code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def process(self, batch, *args, **kwargs):
""" Process a list of examples to create a batch.
Postprocess the batch with user-provided Pipeline.
Args:
batch (list(object)): A list of object from a batch of examples.
Returns:
object: Processed object given the input and custom
postprocessing Pipeline.
"""
if self.postprocessing is not None:
batch = self.postprocessing(batch)
return batch
|
Process a list of examples to create a batch.
Postprocess the batch with user-provided Pipeline.
Args:
batch (list(object)): A list of object from a batch of examples.
Returns:
object: Processed object given the input and custom
postprocessing Pipeline.
|
def bench(client, n):
""" Benchmark n requests """
pairs = [(x, x + 1) for x in range(n)]
started = time.time()
for pair in pairs:
res, err = client.call('add', *pair)
# assert err is None
duration = time.time() - started
print('Client stats:')
util.print_stats(n, duration)
|
Benchmark n requests
|
def rvs(self, size=1, **kwargs):
"""Returns random values for all of the parameters.
"""
size = int(size)
dtype = [(p, float) for p in self.params]
arr = numpy.zeros(size, dtype=dtype)
remaining = size
keepidx = 0
while remaining:
draws = self._draw(size=remaining, **kwargs)
mask = self._constraints(draws)
addpts = mask.sum()
arr[keepidx:keepidx+addpts] = draws[mask]
keepidx += addpts
remaining = size - keepidx
return arr
|
Returns random values for all of the parameters.
|
def is_valid_pid_for_create(did):
"""Assert that ``did`` can be used as a PID for creating a new object with
MNStorage.create() or MNStorage.update()."""
if not d1_gmn.app.did.is_valid_pid_for_create(did):
raise d1_common.types.exceptions.IdentifierNotUnique(
0,
'Identifier is already in use as {}. did="{}"'.format(
d1_gmn.app.did.classify_identifier(did), did
),
identifier=did,
)
|
Assert that ``did`` can be used as a PID for creating a new object with
MNStorage.create() or MNStorage.update().
|
def maybe_convert_objects(values, convert_dates=True, convert_numeric=True,
convert_timedeltas=True, copy=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = maybe_cast_to_datetime(
values, 'M8[ns]', errors='coerce')
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(values,
convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.core.tools.timedeltas import to_timedelta
new_values = to_timedelta(values, errors='coerce')
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
except Exception:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
values = values.copy() if copy else values
return values
|
if we have an object dtype, try to coerce dates and/or numbers
|
def gps2_raw_encode(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible, dgps_numch, dgps_age):
'''
Second GPS data. Coordinate frame is right-handed, Z-axis up (GPS
frame).
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
fix_type : See the GPS_FIX_TYPE enum. (uint8_t)
lat : Latitude (WGS84), in degrees * 1E7 (int32_t)
lon : Longitude (WGS84), in degrees * 1E7 (int32_t)
alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t)
eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: UINT16_MAX (uint16_t)
epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: UINT16_MAX (uint16_t)
vel : GPS ground speed (m/s * 100). If unknown, set to: UINT16_MAX (uint16_t)
cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: UINT16_MAX (uint16_t)
satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
dgps_numch : Number of DGPS satellites (uint8_t)
dgps_age : Age of DGPS info (uint32_t)
'''
return MAVLink_gps2_raw_message(time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible, dgps_numch, dgps_age)
|
Second GPS data. Coordinate frame is right-handed, Z-axis up (GPS
frame).
time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t)
fix_type : See the GPS_FIX_TYPE enum. (uint8_t)
lat : Latitude (WGS84), in degrees * 1E7 (int32_t)
lon : Longitude (WGS84), in degrees * 1E7 (int32_t)
alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t)
eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: UINT16_MAX (uint16_t)
epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: UINT16_MAX (uint16_t)
vel : GPS ground speed (m/s * 100). If unknown, set to: UINT16_MAX (uint16_t)
cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: UINT16_MAX (uint16_t)
satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
dgps_numch : Number of DGPS satellites (uint8_t)
dgps_age : Age of DGPS info (uint32_t)
|
def _from_binary_ea_info(cls, binary_stream):
"""See base class."""
''' Size of Extended Attribute entry - 2
Number of Extended Attributes which have NEED_EA set - 2
Size of extended attribute data - 4
'''
return cls(cls._REPR.unpack(binary_stream[:cls._REPR.size]))
|
See base class.
|
def update_cer(self, symbol, cer, account=None):
""" Update the Core Exchange Rate (CER) of an asset
:param str symbol: Symbol of the asset to publish feed for
:param bitshares.price.Price cer: Core exchange Rate
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
"""
assert isinstance(
cer, Price
), "cer needs to be instance of `bitshares.price.Price`!"
if not account:
if "default_account" in self.config:
account = self.config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account, blockchain_instance=self)
asset = Asset(symbol, blockchain_instance=self, full=True)
assert (
asset["id"] == cer["base"]["asset"]["id"]
or asset["id"] == cer["quote"]["asset"]["id"]
), "Price needs to contain the asset of the symbol you'd like to produce a feed for!"
cer = cer.as_base(symbol)
if cer["quote"]["asset"]["id"] != "1.3.0":
raise ValueError("CER must be defined against core asset '1.3.0'")
options = asset["options"]
options.update({"core_exchange_rate": cer.as_base(symbol).json()})
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": account["id"],
"asset_to_update": asset["id"],
"new_options": options,
"extensions": [],
"prefix": self.prefix,
}
)
return self.finalizeOp(op, account["name"], "active")
|
Update the Core Exchange Rate (CER) of an asset
:param str symbol: Symbol of the asset to publish feed for
:param bitshares.price.Price cer: Core exchange Rate
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
|
def has_tag(self, model):
"""Does the given port have this tag?"""
for tag in model.tags:
if self.is_tag(tag):
return True
return False
|
Does the given port have this tag?
|
def load_fasta_file_as_dict_of_seqrecords(filename):
"""Load a FASTA file and return the sequences as a dict of {ID: SeqRecord}
Args:
filename (str): Path to the FASTA file to load
Returns:
dict: Dictionary of IDs to their SeqRecords
"""
results = {}
records = load_fasta_file(filename)
for r in records:
results[r.id] = r
return results
|
Load a FASTA file and return the sequences as a dict of {ID: SeqRecord}
Args:
filename (str): Path to the FASTA file to load
Returns:
dict: Dictionary of IDs to their SeqRecords
|
def _create_buffer_control(self, editor_buffer):
"""
Create a new BufferControl for a given location.
"""
@Condition
def preview_search():
return self.editor.incsearch
input_processors = [
# Processor for visualising spaces. (should come before the
# selection processor, otherwise, we won't see these spaces
# selected.)
ConditionalProcessor(
ShowTrailingWhiteSpaceProcessor(),
Condition(lambda: self.editor.display_unprintable_characters)),
# Replace tabs by spaces.
TabsProcessor(
tabstop=(lambda: self.editor.tabstop),
char1=(lambda: '|' if self.editor.display_unprintable_characters else ' '),
char2=(lambda: _try_char('\u2508', '.', get_app().output.encoding())
if self.editor.display_unprintable_characters else ' '),
),
# Reporting of errors, for Pyflakes.
ReportingProcessor(editor_buffer),
HighlightSelectionProcessor(),
ConditionalProcessor(
HighlightSearchProcessor(),
Condition(lambda: self.editor.highlight_search)),
ConditionalProcessor(
HighlightIncrementalSearchProcessor(),
Condition(lambda: self.editor.highlight_search) & preview_search),
HighlightMatchingBracketProcessor(),
DisplayMultipleCursors(),
]
return BufferControl(
lexer=DocumentLexer(editor_buffer),
include_default_input_processors=False,
input_processors=input_processors,
buffer=editor_buffer.buffer,
preview_search=preview_search,
search_buffer_control=self.search_control,
focus_on_click=True)
|
Create a new BufferControl for a given location.
|
def overlap_summary(self):
""" print summary of reconstruction overlaps """
olaps = self.compute_overlaps()
# compute min, 25% 50% (median), mean, 75%, max
table = [["5%: ",np.percentile(olaps,5)],
["25%: ",np.percentile(olaps,25)],
["50%: ",np.percentile(olaps,50)],
["75%: ",np.percentile(olaps,75)],
["95%: ",np.percentile(olaps,95)],
[" " , " "],
["Min: ",np.min(olaps)],
["Mean: ",np.mean(olaps)],
["Max: ",np.max(olaps)]]
header = ["Percentile","Overlap"]
print tabulate(table,header,tablefmt="rst")
|
print summary of reconstruction overlaps
|
def get_names(file_dir, files):
"""
Get the annotator name list based on a list of files
Args:
file_dir: AMR file folder
files: a list of AMR names, e.g. nw_wsj_0001_1
Returns:
a list of user names who annotate all the files
"""
# for each user, check if they have files available
# return user name list
total_list = []
name_list = []
get_sub = False
for path, subdir, dir_files in os.walk(file_dir):
if not get_sub:
total_list = subdir[:]
get_sub = True
else:
break
for user in total_list:
has_file = True
for f in files:
file_path = file_dir + user + "/" + f + ".txt"
if not os.path.exists(file_path):
has_file = False
break
if has_file:
name_list.append(user)
if len(name_list) == 0:
print("********Error: Cannot find any user who completes the files*************", file=ERROR_LOG)
return name_list
|
Get the annotator name list based on a list of files
Args:
file_dir: AMR file folder
files: a list of AMR names, e.g. nw_wsj_0001_1
Returns:
a list of user names who annotate all the files
|
def _get_depthsr(self, goobj):
"""Return DNN or RNN depending on if relationships are loaded."""
if 'reldepth' in self.gosubdag.prt_attr['flds']:
return "R{R:02}".format(R=goobj.reldepth)
return "D{D:02}".format(D=goobj.depth)
|
Return DNN or RNN depending on if relationships are loaded.
|
def read_from(self, provider, **options):
""" All :class:`Pointer` fields in the `Sequence` read the necessary
number of bytes from the data :class:`Provider` for their referenced
:attr:`~Pointer.data` object. Null pointer are ignored.
:param Provider provider: data :class:`Provider`.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Sequence` reads their referenced :attr:`~Pointer.data` object as
well (chained method call).
Each :class:`Pointer` field stores the bytes for its referenced
:attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
"""
for item in iter(self):
# Container or Pointer
if is_mixin(item):
item.read_from(provider, **options)
|
All :class:`Pointer` fields in the `Sequence` read the necessary
number of bytes from the data :class:`Provider` for their referenced
:attr:`~Pointer.data` object. Null pointer are ignored.
:param Provider provider: data :class:`Provider`.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
:attr:`~Pointer.data` objects of all :class:`Pointer` fields in the
`Sequence` reads their referenced :attr:`~Pointer.data` object as
well (chained method call).
Each :class:`Pointer` field stores the bytes for its referenced
:attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
|
def build_columns(self, X, verbose=False):
"""construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows
"""
return sp.sparse.csc_matrix(np.ones((len(X), 1)))
|
construct the model matrix columns for the term
Parameters
----------
X : array-like
Input dataset with n rows
verbose : bool
whether to show warnings
Returns
-------
scipy sparse array with n rows
|
def cli(env, identifier, sortby, cpu, domain, hostname, memory, tag, columns):
"""List guests which are in a dedicated host server."""
mgr = SoftLayer.DedicatedHostManager(env.client)
guests = mgr.list_guests(host_id=identifier,
cpus=cpu,
hostname=hostname,
domain=domain,
memory=memory,
tags=tag,
mask=columns.mask())
table = formatting.Table(columns.columns)
table.sortby = sortby
for guest in guests:
table.add_row([value or formatting.blank()
for value in columns.row(guest)])
env.fout(table)
|
List guests which are in a dedicated host server.
|
def _generate_struct_class_h(self, struct):
"""Defines an Obj C header file that represents a struct in Stone."""
self._generate_init_imports_h(struct)
self._generate_imports_h(self._get_imports_h(struct))
self.emit()
self.emit('NS_ASSUME_NONNULL_BEGIN')
self.emit()
self.emit('#pragma mark - API Object')
self.emit()
self._generate_class_comment(struct)
struct_name = fmt_class_prefix(struct)
with self.block_h_from_data_type(struct, protocol=['DBSerializable', 'NSCopying']):
self.emit('#pragma mark - Instance fields')
self.emit()
self._generate_struct_properties(struct.fields)
self.emit('#pragma mark - Constructors')
self.emit()
self._generate_struct_cstor_signature(struct)
self._generate_struct_cstor_signature_default(struct)
self._generate_init_unavailable_signature(struct)
self.emit()
self.emit()
self.emit('#pragma mark - Serializer Object')
self.emit()
self.emit(comment_prefix)
self.emit_wrapped_text(
'The serialization class for the `{}` struct.'.format(
fmt_class(struct.name)),
prefix=comment_prefix)
self.emit(comment_prefix)
with self.block_h(fmt_serial_class(struct_name)):
self._generate_serializer_signatures(struct_name)
self.emit()
self.emit('NS_ASSUME_NONNULL_END')
self.emit()
|
Defines an Obj C header file that represents a struct in Stone.
|
def next(self):
"""Return 'next' version. Eg, next(1.2) is 1.2_"""
if self.tokens:
other = self.copy()
tok = other.tokens.pop()
other.tokens.append(tok.next())
return other
else:
return Version.inf
|
Return 'next' version. Eg, next(1.2) is 1.2_
|
def split_once(self, horizontal: bool, position: int) -> None:
"""Split this partition into 2 sub-partitions.
Args:
horizontal (bool):
position (int):
"""
cdata = self._as_cdata()
lib.TCOD_bsp_split_once(cdata, horizontal, position)
self._unpack_bsp_tree(cdata)
|
Split this partition into 2 sub-partitions.
Args:
horizontal (bool):
position (int):
|
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
|
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
|
def check_type_compatibility(type_1_id, type_2_id):
"""
When applying a type to a resource, it may be the case that the resource already
has an attribute specified in the new type, but the template which defines this
pre-existing attribute has a different unit specification to the new template.
This function checks for any situations where different types specify the same
attributes, but with different units.
"""
errors = []
type_1 = db.DBSession.query(TemplateType).filter(TemplateType.id==type_1_id).options(joinedload_all('typeattrs')).one()
type_2 = db.DBSession.query(TemplateType).filter(TemplateType.id==type_2_id).options(joinedload_all('typeattrs')).one()
template_1_name = type_1.template.name
template_2_name = type_2.template.name
type_1_attrs=set([t.attr_id for t in type_1.typeattrs])
type_2_attrs=set([t.attr_id for t in type_2.typeattrs])
shared_attrs = type_1_attrs.intersection(type_2_attrs)
if len(shared_attrs) == 0:
return []
type_1_dict = {}
for t in type_1.typeattrs:
if t.attr_id in shared_attrs:
type_1_dict[t.attr_id]=t
for ta in type_2.typeattrs:
type_2_unit_id = ta.unit_id
type_1_unit_id = type_1_dict[ta.attr_id].unit_id
fmt_dict = {
'template_1_name': template_1_name,
'template_2_name': template_2_name,
'attr_name': ta.attr.name,
'type_1_unit_id': type_1_unit_id,
'type_2_unit_id': type_2_unit_id,
'type_name' : type_1.name
}
if type_1_unit_id is None and type_2_unit_id is not None:
errors.append("Type %(type_name)s in template %(template_1_name)s"
" stores %(attr_name)s with no units, while template"
"%(template_2_name)s stores it with unit %(type_2_unit_id)s"%fmt_dict)
elif type_1_unit_id is not None and type_2_unit_id is None:
errors.append("Type %(type_name)s in template %(template_1_name)s"
" stores %(attr_name)s in %(type_1_unit_id)s."
" Template %(template_2_name)s stores it with no unit."%fmt_dict)
elif type_1_unit_id != type_2_unit_id:
errors.append("Type %(type_name)s in template %(template_1_name)s"
" stores %(attr_name)s in %(type_1_unit_id)s, while"
" template %(template_2_name)s stores it in %(type_2_unit_id)s"%fmt_dict)
return errors
|
When applying a type to a resource, it may be the case that the resource already
has an attribute specified in the new type, but the template which defines this
pre-existing attribute has a different unit specification to the new template.
This function checks for any situations where different types specify the same
attributes, but with different units.
|
def declareProvisioner(self, *args, **kwargs):
"""
Update a provisioner
Declare a provisioner, supplying some details about it.
`declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
possessed. For example, a request to update the `aws-provisioner-v1`
provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
`queue:declare-provisioner:aws-provisioner-v1#description`.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method takes input: ``v1/update-provisioner-request.json#``
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs)
|
Update a provisioner
Declare a provisioner, supplying some details about it.
`declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
possessed. For example, a request to update the `aws-provisioner-v1`
provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
`queue:declare-provisioner:aws-provisioner-v1#description`.
The term "provisioner" is taken broadly to mean anything with a provisionerId.
This does not necessarily mean there is an associated service performing any
provisioning activity.
This method takes input: ``v1/update-provisioner-request.json#``
This method gives output: ``v1/provisioner-response.json#``
This method is ``experimental``
|
def compile(
self, scss_string=None, scss_file=None, source_files=None,
super_selector=None, filename=None, is_sass=None,
line_numbers=True, import_static_css=False):
"""Compile Sass to CSS. Returns a single CSS string.
This method is DEPRECATED; see :mod:`scss.compiler` instead.
"""
# Derive our root namespace
self.scss_vars = _default_scss_vars.copy()
if self._scss_vars is not None:
self.scss_vars.update(self._scss_vars)
root_namespace = Namespace(
variables=self.scss_vars,
functions=self._library,
)
# Figure out search paths. Fall back from provided explicitly to
# defined globally to just searching the current directory
search_paths = ['.']
if self._search_paths is not None:
assert not isinstance(self._search_paths, six.string_types), \
"`search_paths` should be an iterable, not a string"
search_paths.extend(self._search_paths)
else:
if config.LOAD_PATHS:
if isinstance(config.LOAD_PATHS, six.string_types):
# Back-compat: allow comma-delimited
search_paths.extend(config.LOAD_PATHS.split(','))
else:
search_paths.extend(config.LOAD_PATHS)
search_paths.extend(self._scss_opts.get('load_paths', []))
# Normalize a few old styles of options
output_style = self._scss_opts.get('style', config.STYLE)
if output_style is True:
output_style = 'compressed'
elif output_style is False:
output_style = 'legacy'
fixed_search_path = []
for path in search_paths:
if isinstance(path, six.string_types):
fixed_search_path.append(Path(path))
else:
fixed_search_path.append(path)
# Build the compiler
compiler = Compiler(
namespace=root_namespace,
extensions=[
CoreExtension,
ExtraExtension,
FontsExtension,
CompassExtension,
BootstrapExtension,
],
search_path=fixed_search_path,
import_static_css=import_static_css,
live_errors=self.live_errors,
generate_source_map=self._scss_opts.get('debug_info', False),
output_style=output_style,
warn_unused_imports=self._scss_opts.get('warn_unused', False),
ignore_parse_errors=config.DEBUG,
loops_have_own_scopes=config.CONTROL_SCOPING,
undefined_variables_fatal=config.FATAL_UNDEFINED,
super_selector=super_selector or self.super_selector,
)
# Gonna add the source files manually
compilation = compiler.make_compilation()
# Inject the files we know about
# TODO how does this work with the expectation of absoluteness
if source_files is not None:
for source in source_files:
compilation.add_source(source)
elif scss_string is not None:
source = SourceFile.from_string(
scss_string,
relpath=filename,
is_sass=is_sass,
)
compilation.add_source(source)
elif scss_file is not None:
# This is now the only way to allow forcibly overriding the
# filename a source "thinks" it is
with open(scss_file, 'rb') as f:
source = SourceFile.from_file(
f,
relpath=filename or scss_file,
is_sass=is_sass,
)
compilation.add_source(source)
# Plus the ones from the constructor
if self._scss_files:
for name, contents in list(self._scss_files.items()):
source = SourceFile.from_string(contents, relpath=name)
compilation.add_source(source)
compiled = compiler.call_and_catch_errors(compilation.run)
self.source_files = list(SourceFileTuple(*os.path.split(s.path)) for s in compilation.source_index.values())
return compiled
|
Compile Sass to CSS. Returns a single CSS string.
This method is DEPRECATED; see :mod:`scss.compiler` instead.
|
def save(self, directory_path):
"""
Save the material (the unencrypted PEM encoded RSA private key)
of a newly created KeyPair to a local file.
:type directory_path: string
:param directory_path: The fully qualified path to the directory
in which the keypair will be saved. The
keypair file will be named using the name
of the keypair as the base name and .pem
for the file extension. If a file of that
name already exists in the directory, an
exception will be raised and the old file
will not be overwritten.
:rtype: bool
:return: True if successful.
"""
if self.material:
directory_path = os.path.expanduser(directory_path)
file_path = os.path.join(directory_path, '%s.pem' % self.name)
if os.path.exists(file_path):
raise BotoClientError('%s already exists, it will not be overwritten' % file_path)
fp = open(file_path, 'wb')
fp.write(self.material)
fp.close()
os.chmod(file_path, 0600)
return True
else:
raise BotoClientError('KeyPair contains no material')
|
Save the material (the unencrypted PEM encoded RSA private key)
of a newly created KeyPair to a local file.
:type directory_path: string
:param directory_path: The fully qualified path to the directory
in which the keypair will be saved. The
keypair file will be named using the name
of the keypair as the base name and .pem
for the file extension. If a file of that
name already exists in the directory, an
exception will be raised and the old file
will not be overwritten.
:rtype: bool
:return: True if successful.
|
def setupFeatures(self):
"""
Make the features source.
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
"""
if self.featureWriters:
featureFile = parseLayoutFeatures(self.ufo)
for writer in self.featureWriters:
writer.write(self.ufo, featureFile, compiler=self)
# stringify AST to get correct line numbers in error messages
self.features = featureFile.asFea()
else:
# no featureWriters, simply read existing features' text
self.features = tounicode(self.ufo.features.text or "", "utf-8")
|
Make the features source.
**This should not be called externally.** Subclasses
may override this method to handle the file creation
in a different way if desired.
|
def from_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1,
attributes_fill='.', dtype=None):
"""Read a feature table from a GFF3 format file.
Parameters
----------
path : string
File path.
attributes : list of strings, optional
List of columns to extract from the "attributes" field.
region : string, optional
Genome region to extract. If given, file must be position
sorted, bgzipped and tabix indexed. Tabix must also be installed
and on the system path.
score_fill : int, optional
Value to use where score field has a missing value.
phase_fill : int, optional
Value to use where phase field has a missing value.
attributes_fill : object or list of objects, optional
Value(s) to use where attribute field(s) have a missing value.
dtype : numpy dtype, optional
Manually specify a dtype.
Returns
-------
ft : FeatureTable
"""
a = gff3_to_recarray(path, attributes=attributes, region=region,
score_fill=score_fill, phase_fill=phase_fill,
attributes_fill=attributes_fill, dtype=dtype)
if a is None:
return None
else:
return FeatureTable(a, copy=False)
|
Read a feature table from a GFF3 format file.
Parameters
----------
path : string
File path.
attributes : list of strings, optional
List of columns to extract from the "attributes" field.
region : string, optional
Genome region to extract. If given, file must be position
sorted, bgzipped and tabix indexed. Tabix must also be installed
and on the system path.
score_fill : int, optional
Value to use where score field has a missing value.
phase_fill : int, optional
Value to use where phase field has a missing value.
attributes_fill : object or list of objects, optional
Value(s) to use where attribute field(s) have a missing value.
dtype : numpy dtype, optional
Manually specify a dtype.
Returns
-------
ft : FeatureTable
|
def get_platform_settings():
"""
Returns the content of `settings.PLATFORMS` with a twist.
The platforms settings was created to stay compatible with the old way of
declaring the FB configuration, in order not to break production bots. This
function will convert the legacy configuration into the new configuration
if required. As a result, it should be the only used way to access the
platform configuration.
"""
s = settings.PLATFORMS
if hasattr(settings, 'FACEBOOK') and settings.FACEBOOK:
s.append({
'class': 'bernard.platforms.facebook.platform.Facebook',
'settings': settings.FACEBOOK,
})
return s
|
Returns the content of `settings.PLATFORMS` with a twist.
The platforms settings was created to stay compatible with the old way of
declaring the FB configuration, in order not to break production bots. This
function will convert the legacy configuration into the new configuration
if required. As a result, it should be the only used way to access the
platform configuration.
|
def colorbar(fig, ax, im,
width=0.05,
height=1.0,
hoffset=0.01,
voffset=0.0,
orientation='vertical'):
'''
draw colorbar without resizing the axes object to make room
kwargs:
::
fig : matplotlib.figure.Figure
ax : matplotlib.axes.AxesSubplot
im : matplotlib.image.AxesImage
width : float, colorbar width in fraction of ax width
height : float, colorbar height in fraction of ax height
hoffset : float, horizontal spacing to main axes in fraction of width
voffset : float, vertical spacing to main axis in fraction of height
orientation : str, 'horizontal' or 'vertical'
return:
::
object : colorbar handle
'''
rect = np.array(ax.get_position().bounds)
rect = np.array(ax.get_position().bounds)
caxrect = [0]*4
caxrect[0] = rect[0] + rect[2] + hoffset*rect[2]
caxrect[1] = rect[1] + voffset*rect[3]
caxrect[2] = rect[2]*width
caxrect[3] = rect[3]*height
cax = fig.add_axes(caxrect)
cb = fig.colorbar(im, cax=cax, orientation=orientation)
return cb
|
draw colorbar without resizing the axes object to make room
kwargs:
::
fig : matplotlib.figure.Figure
ax : matplotlib.axes.AxesSubplot
im : matplotlib.image.AxesImage
width : float, colorbar width in fraction of ax width
height : float, colorbar height in fraction of ax height
hoffset : float, horizontal spacing to main axes in fraction of width
voffset : float, vertical spacing to main axis in fraction of height
orientation : str, 'horizontal' or 'vertical'
return:
::
object : colorbar handle
|
def GET(self):
""" Show page """
todos = model.get_todos()
form = self.form()
return render.index(todos, form)
|
Show page
|
def insert_func(self, index, func, *args, **kwargs):
'''
insert func with given arguments and keywords.
'''
wraped_func = partial(func, *args, **kwargs)
self.insert(index, wraped_func)
|
insert func with given arguments and keywords.
|
def _generate_response_head_bytes(status_code, headers):
"""
:type status_code: int
:type headers: dict[str, str]
:rtype: bytes
"""
head_string = str(status_code) + _DELIMITER_NEWLINE
header_tuples = sorted((k, headers[k]) for k in headers)
for name, value in header_tuples:
name = _get_header_correctly_cased(name)
if _should_sign_response_header(name):
head_string += _FORMAT_HEADER_STRING.format(name, value)
return (head_string + _DELIMITER_NEWLINE).encode()
|
:type status_code: int
:type headers: dict[str, str]
:rtype: bytes
|
def add_properties(props, mol):
"""apply properties to the molecule object
Returns:
None (alter molecule object directly)
"""
if not props:
return
# The properties supersedes all charge and radical values in the atom block
for _, atom in mol.atoms_iter():
atom.charge = 0
atom.multi = 1
atom.mass = None
for prop in props.get("CHG", []):
mol.atom(prop[0]).charge = prop[1]
for prop in props.get("RAD", []):
mol.atom(prop[0]).multi = prop[1]
for prop in props.get("ISO", []):
mol.atom(prop[0]).mass = prop[1]
|
apply properties to the molecule object
Returns:
None (alter molecule object directly)
|
def setedgeval(delta, is_multigraph, graph, orig, dest, idx, key, value):
"""Change a delta to say that an edge stat was set to a certain value"""
if is_multigraph(graph):
if (
graph in delta and 'edges' in delta[graph] and
orig in delta[graph]['edges'] and dest in delta[graph]['edges'][orig]
and idx in delta[graph]['edges'][orig][dest]
and not delta[graph]['edges'][orig][dest][idx]
):
return
delta.setdefault(graph, {}).setdefault('edge_val', {})\
.setdefault(orig, {}).setdefault(dest, {})\
.setdefault(idx, {})[key] = value
else:
if (
graph in delta and 'edges' in delta[graph] and
orig in delta[graph]['edges'] and dest in delta[graph]['edges'][orig]
and not delta[graph]['edges'][orig][dest]
):
return
delta.setdefault(graph, {}).setdefault('edge_val', {})\
.setdefault(orig, {}).setdefault(dest, {})[key] = value
|
Change a delta to say that an edge stat was set to a certain value
|
def retry(*r_args, **r_kwargs):
"""
Decorator wrapper for retry_call. Accepts arguments to retry_call
except func and then returns a decorator for the decorated function.
Ex:
>>> @retry(retries=3)
... def my_func(a, b):
... "this is my funk"
... print(a, b)
>>> my_func.__doc__
'this is my funk'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*f_args, **f_kwargs):
bound = functools.partial(func, *f_args, **f_kwargs)
return retry_call(bound, *r_args, **r_kwargs)
return wrapper
return decorate
|
Decorator wrapper for retry_call. Accepts arguments to retry_call
except func and then returns a decorator for the decorated function.
Ex:
>>> @retry(retries=3)
... def my_func(a, b):
... "this is my funk"
... print(a, b)
>>> my_func.__doc__
'this is my funk'
|
def read_input_file(self, fn):
"""
This method may be overridden to implement a custom lookup mechanism when
encountering ``\\input`` or ``\\include`` directives.
The default implementation looks for a file of the given name relative
to the directory set by :py:meth:`set_tex_input_directory()`. If
`strict_input=True` was set, we ensure strictly that the file resides in
a subtree of the reference input directory (after canonicalizing the
paths and resolving all symlinks).
You may override this method to obtain the input data in however way you
see fit. (In that case, a call to `set_tex_input_directory()` may not
be needed as that function simply sets properties which are used by the
default implementation of `read_input_file()`.)
This function accepts the referred filename as argument (the argument to
the ``\\input`` macro), and should return a string with the file
contents (or generate a warning or raise an error).
"""
fnfull = os.path.realpath(os.path.join(self.tex_input_directory, fn))
if self.strict_input:
# make sure that the input file is strictly within dirfull, and didn't escape with
# '../..' tricks or via symlinks.
dirfull = os.path.realpath(self.tex_input_directory)
if not fnfull.startswith(dirfull):
logger.warning(
"Can't access path '%s' leading outside of mandated directory [strict input mode]",
fn
)
return ''
if not os.path.exists(fnfull) and os.path.exists(fnfull + '.tex'):
fnfull = fnfull + '.tex'
if not os.path.exists(fnfull) and os.path.exists(fnfull + '.latex'):
fnfull = fnfull + '.latex'
if not os.path.isfile(fnfull):
logger.warning(u"Error, file doesn't exist: '%s'", fn)
return ''
logger.debug("Reading input file %r", fnfull)
try:
with open(fnfull) as f:
return f.read()
except IOError as e:
logger.warning(u"Error, can't access '%s': %s", fn, e)
return ''
|
This method may be overridden to implement a custom lookup mechanism when
encountering ``\\input`` or ``\\include`` directives.
The default implementation looks for a file of the given name relative
to the directory set by :py:meth:`set_tex_input_directory()`. If
`strict_input=True` was set, we ensure strictly that the file resides in
a subtree of the reference input directory (after canonicalizing the
paths and resolving all symlinks).
You may override this method to obtain the input data in however way you
see fit. (In that case, a call to `set_tex_input_directory()` may not
be needed as that function simply sets properties which are used by the
default implementation of `read_input_file()`.)
This function accepts the referred filename as argument (the argument to
the ``\\input`` macro), and should return a string with the file
contents (or generate a warning or raise an error).
|
def file_can_be_read(path):
"""
Return ``True`` if the file at the given ``path`` can be read.
:param string path: the file path
:rtype: bool
.. versionadded:: 1.4.0
"""
if path is None:
return False
try:
with io.open(path, "rb") as test_file:
pass
return True
except (IOError, OSError):
pass
return False
|
Return ``True`` if the file at the given ``path`` can be read.
:param string path: the file path
:rtype: bool
.. versionadded:: 1.4.0
|
def visitLexerRuleSpec(self, ctx: jsgParser.LexerRuleSpecContext):
""" lexerRuleSpec: LEXER_ID COLON lexerRuleBlock SEMI """
self._context.grammarelts[as_token(ctx)] = JSGLexerRuleBlock(self._context, ctx.lexerRuleBlock())
|
lexerRuleSpec: LEXER_ID COLON lexerRuleBlock SEMI
|
def translations_link(self):
"""
Print on admin change list the link to see all translations for this object
@type text: string
@param text: a string with the html to link to the translations admin interface
"""
translation_type = ContentType.objects.get_for_model(Translation)
link = urlresolvers.reverse('admin:%s_%s_changelist' % (
translation_type.app_label,
translation_type.model),
)
object_type = ContentType.objects.get_for_model(self)
link += '?content_type__id__exact=%s&object_id=%s' % (object_type.id, self.id)
return '<a href="%s">translate</a>' % link
|
Print on admin change list the link to see all translations for this object
@type text: string
@param text: a string with the html to link to the translations admin interface
|
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS_SINTER[imt]
# cap magnitude values at 8.5, see page 1709
mag = rup.mag
if mag > 8.5:
mag = 8.5
# compute PGA on rock (needed for site amplification calculation)
G = 10 ** (1.2 - 0.18 * mag)
pga_rock = self._compute_mean(self.COEFFS_SINTER[PGA()], G, mag,
rup.hypo_depth, dists.rrup, sites.vs30,
# by passing pga_rock > 500 the soil
# amplification is 0
np.zeros_like(sites.vs30) + 600,
PGA())
pga_rock = 10 ** (pga_rock)
# periods 0.4 s (2.5 Hz) and 0.2 s (5 Hz) need a special case because
# of the erratum. SA for 0.4s and 0.2s is computed and a weighted sum
# is returned
if imt.period in (0.2, 0.4):
C04 = self.COEFFS_SINTER[SA(period=0.4, damping=5.0)]
C02 = self.COEFFS_SINTER[SA(period=0.2, damping=5.0)]
mean04 = self._compute_mean(C04, G, mag, rup.hypo_depth,
dists.rrup, sites.vs30, pga_rock, imt)
mean02 = self._compute_mean(C02, G, mag, rup.hypo_depth,
dists.rrup, sites.vs30, pga_rock, imt)
if imt.period == 0.2:
mean = 0.333 * mean02 + 0.667 * mean04
else:
mean = 0.333 * mean04 + 0.667 * mean02
else:
mean = self._compute_mean(C, G, mag, rup.hypo_depth, dists.rrup,
sites.vs30, pga_rock, imt)
# convert from log10 to ln and units from cm/s**2 to g
mean = np.log((10 ** mean) * 1e-2 / g)
if imt.period == 4.0:
mean /= 0.550
stddevs = self._get_stddevs(C, stddev_types, sites.vs30.shape[0])
return mean, stddevs
|
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
|
def disabledColor(self):
"""
Returns the color this node should render when its disabled.
:return <QColor>
"""
palette = self.palette()
return palette.color(palette.Disabled, palette.NodeBackground)
|
Returns the color this node should render when its disabled.
:return <QColor>
|
def is_sqlatype_text_of_length_at_least(
coltype: Union[TypeEngine, VisitableType],
min_length: int = 1000) -> bool:
"""
Is the SQLAlchemy column type a string type that's at least the specified
length?
"""
coltype = _coltype_to_typeengine(coltype)
if not isinstance(coltype, sqltypes.String):
return False # not a string/text type at all
if coltype.length is None:
return True # string of unlimited length
return coltype.length >= min_length
|
Is the SQLAlchemy column type a string type that's at least the specified
length?
|
def add_bundle(name, scripts=[], files=[], scriptsdir=SCRIPTSDIR, filesdir=FILESDIR):
"""High level, simplified interface for creating a bundle which
takes the bundle name, a list of script file names in a common
scripts directory, and a list of absolute target file paths, of
which the basename is also located in a common files directory.
It converts those lists into maps and then calls new_bundle() to
actually create the Bundle and add it to BUNDLEMAP"""
scriptmap = makemap(scripts, join(PATH, scriptsdir))
filemap = dict(zip(files, [join(PATH, filesdir, os.path.basename(f)) for f in files]))
new_bundle(name, scriptmap, filemap)
|
High level, simplified interface for creating a bundle which
takes the bundle name, a list of script file names in a common
scripts directory, and a list of absolute target file paths, of
which the basename is also located in a common files directory.
It converts those lists into maps and then calls new_bundle() to
actually create the Bundle and add it to BUNDLEMAP
|
def path_completer(text, expected=None, classes=None, perm_level=None,
include_current_proj=False, typespec=None, visibility=None):
'''
:param text: String to tab-complete to a path matching the syntax project-name:folder/entity_or_folder_name
:type text: string
:param expected: "folder", "entity", "project", or None (no restriction) as to the types of answers to look for
:type expected: string
:param classes: if expected="entity", the possible data object classes that are acceptable
:type classes: list of strings
:param perm_level: the minimum permissions level required, e.g. "VIEW" or "CONTRIBUTE"
:type perm_level: string
:param include_current_proj: Indicate whether the current project's name should be a potential result
:type include_current_proj: boolean
:param visibility: Visibility with which to restrict the completion (one of "either", "visible", or "hidden") (default behavior is dependent on *text*)
Returns a list of matches to the text and restricted by the
requested parameters.
'''
colon_pos = get_last_pos_of_char(':', text)
slash_pos = get_last_pos_of_char('/', text)
delim_pos = max(colon_pos, slash_pos)
# First get projects if necessary
matches = []
if expected == 'project' and colon_pos > 0 and colon_pos == len(text) - 1:
if dxpy.find_one_project(zero_ok=True, name=text[:colon_pos]) is not None:
return [text + " "]
if colon_pos < 0 and slash_pos < 0:
# Might be tab-completing a project, but don't ever include
# whatever's set as dxpy.WORKSPACE_ID unless expected == "project"
# Also, don't bother if text=="" and expected is NOT "project"
# Also, add space if expected == "project"
if text != "" or expected == 'project':
results = dxpy.find_projects(describe=True, level=perm_level)
if not include_current_proj:
results = [r for r in results if r['id'] != dxpy.WORKSPACE_ID]
matches += [escape_colon(r['describe']['name'])+':' for r in results if r['describe']['name'].startswith(text)]
if expected == 'project':
return matches
# Attempt to tab-complete to a folder or data object name
if colon_pos < 0 and slash_pos >= 0:
# Not tab-completing a project, and the project is unambiguous
# (use dxpy.WORKSPACE_ID)
if dxpy.WORKSPACE_ID is not None:
# try-catch block in case dxpy.WORKSPACE_ID is garbage
try:
dxproj = dxpy.get_handler(dxpy.WORKSPACE_ID)
folderpath, entity_name = clean_folder_path(text)
matches += get_folder_matches(text, slash_pos, dxproj, folderpath)
if expected != 'folder':
if classes is not None:
for classname in classes:
matches += get_data_matches(text, slash_pos, dxproj,
folderpath, classname=classname,
typespec=typespec,
visibility=visibility)
else:
matches += get_data_matches(text, slash_pos, dxproj,
folderpath, typespec=typespec,
visibility=visibility)
except:
pass
else:
# project is given by a path, but attempt to resolve to an
# object or folder anyway
try:
proj_ids, folderpath, entity_name = resolve_path(text, multi_projects=True)
except ResolutionError as details:
sys.stderr.write("\n" + fill(str(details)))
return matches
for proj in proj_ids:
# protects against dxpy.WORKSPACE_ID being garbage
try:
dxproj = dxpy.get_handler(proj)
matches += get_folder_matches(text, delim_pos, dxproj, folderpath)
if expected != 'folder':
if classes is not None:
for classname in classes:
matches += get_data_matches(text, delim_pos, dxproj,
folderpath, classname=classname,
typespec=typespec, visibility=visibility)
else:
matches += get_data_matches(text, delim_pos, dxproj,
folderpath, typespec=typespec,
visibility=visibility)
except:
pass
return matches
|
:param text: String to tab-complete to a path matching the syntax project-name:folder/entity_or_folder_name
:type text: string
:param expected: "folder", "entity", "project", or None (no restriction) as to the types of answers to look for
:type expected: string
:param classes: if expected="entity", the possible data object classes that are acceptable
:type classes: list of strings
:param perm_level: the minimum permissions level required, e.g. "VIEW" or "CONTRIBUTE"
:type perm_level: string
:param include_current_proj: Indicate whether the current project's name should be a potential result
:type include_current_proj: boolean
:param visibility: Visibility with which to restrict the completion (one of "either", "visible", or "hidden") (default behavior is dependent on *text*)
Returns a list of matches to the text and restricted by the
requested parameters.
|
def fromPattern(cls, datetime_string, datetime_pattern, time_zone, require_hour=True):
'''
a method for constructing labDT from a strptime pattern in a string
https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
iso_pattern: '%Y-%m-%dT%H:%M:%S.%f%z'
human_friendly_pattern: '%A, %B %d, %Y %I:%M:%S.%f%p'
:param datetime_string: string with date and time info
:param datetime_pattern: string with python formatted pattern
:param time_zone: string with timezone info
:param require_hour: [optional] boolean to disable hour requirement
:return: labDT object with datetime
'''
# validate inputs
title = 'input for labDT.fromPattern'
dT_req = [['%Y','%y'],['%b','%B','%m'],['%d'],['%H','%I']]
req_counter = 0
for i in dT_req:
for j in i:
if j in datetime_pattern:
req_counter += 1
if not req_counter == 4 and require_hour:
raise Exception('Datetime pattern %s must contain at least year, month, day and hour.' % title)
try:
get_tz = tz.gettz(time_zone)
except:
raise ValueError('Timezone %s is not a valid timezone format.' % title)
# decipher datetime
python_datetime = datetime.strptime(datetime_string, datetime_pattern)
python_datetime = python_datetime.replace(tzinfo=tz.gettz(time_zone))
dT = python_datetime.astimezone(pytz.utc)
dt_kwargs = {
'year': dT.year,
'month': dT.month,
'day': dT.day,
'hour': dT.hour,
'minute': dT.minute,
'second': dT.second,
'microsecond': dT.microsecond,
'tzinfo': dT.tzinfo
}
return labDT(**dt_kwargs)
|
a method for constructing labDT from a strptime pattern in a string
https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
iso_pattern: '%Y-%m-%dT%H:%M:%S.%f%z'
human_friendly_pattern: '%A, %B %d, %Y %I:%M:%S.%f%p'
:param datetime_string: string with date and time info
:param datetime_pattern: string with python formatted pattern
:param time_zone: string with timezone info
:param require_hour: [optional] boolean to disable hour requirement
:return: labDT object with datetime
|
def asDictionary(self):
"""returns the wkid id for use in json calls"""
if self._wkid == None and self._wkt is not None:
return {"wkt": self._wkt}
else:
return {"wkid": self._wkid}
|
returns the wkid id for use in json calls
|
def create_from_name_and_dictionary(self, name, datas):
"""Return a populated object Object from dictionary datas
"""
if "type" not in datas:
str_type = "any"
else:
str_type = str(datas["type"]).lower()
if str_type not in ObjectRaw.Types:
type = ObjectRaw.Types("type")
else:
type = ObjectRaw.Types(str_type)
if type is ObjectRaw.Types.object:
object = ObjectObject()
if "properties" in datas:
object.properties = self.create_dictionary_of_element_from_dictionary("properties", datas)
if "patternProperties" in datas:
object.pattern_properties = self.create_dictionary_of_element_from_dictionary("patternProperties", datas)
if "additionalProperties" in datas:
if isinstance(datas["additionalProperties"], dict):
object.additional_properties = self.create_from_name_and_dictionary("additionalProperties", datas["additionalProperties"])
elif not to_boolean(datas["additionalProperties"]):
object.additional_properties = None
else:
raise ValueError("AdditionalProperties doe not allow empty value (yet)")
elif type is ObjectRaw.Types.array:
object = ObjectArray()
if "items" in datas:
object.items = self.create_from_name_and_dictionary("items", datas["items"])
else:
object.items = ObjectObject()
if "sample_count" in datas:
object.sample_count = int(datas["sample_count"])
elif type is ObjectRaw.Types.number:
object = ObjectNumber()
elif type is ObjectRaw.Types.integer:
object = ObjectInteger()
elif type is ObjectRaw.Types.string:
object = ObjectString()
elif type is ObjectRaw.Types.boolean:
object = ObjectBoolean()
if "sample" in datas:
object.sample = to_boolean(datas["sample"])
elif type is ObjectRaw.Types.reference:
object = ObjectReference()
if "reference" in datas:
object.reference_name = str(datas["reference"])
elif type is ObjectRaw.Types.type:
object = ObjectType()
object.type_name = str(datas["type"])
elif type is ObjectRaw.Types.none:
object = ObjectNone()
elif type is ObjectRaw.Types.dynamic:
object = ObjectDynamic()
if "items" in datas:
object.items = self.create_from_name_and_dictionary("items", datas["items"])
if "sample" in datas:
if isinstance(datas["sample"], dict):
object.sample = {}
for k, v in datas["sample"].items():
object.sample[str(k)] = str(v)
else:
raise ValueError("A dictionnary is expected for dynamic\s object in \"%s\"" % name)
elif type is ObjectRaw.Types.const:
object = ObjectConst()
if "const_type" in datas:
const_type = str(datas["const_type"])
if const_type not in ObjectConst.Types:
raise ValueError("Const type \"%s\" unknwon" % const_type)
else:
const_type = ObjectConst.Types.string
object.const_type = const_type
if "value" not in datas:
raise ValueError("Missing const value")
object.value = datas["value"]
elif type is ObjectRaw.Types.enum:
object = ObjectEnum()
if "values" not in datas or not isinstance(datas['values'], list):
raise ValueError("Missing enum values")
object.values = [str(value) for value in datas["values"]]
if "descriptions" in datas and isinstance(datas['descriptions'], dict):
for (value_name, value_description) in datas["descriptions"].items():
value = EnumValue()
value.name = value_name
value.description = value_description
object.descriptions.append(value)
descriptions = [description.name for description in object.descriptions]
for value_name in [x for x in object.values if x not in descriptions]:
value = EnumValue()
value.name = value_name
object.descriptions.append(value)
else:
object = ObjectRaw()
self.set_common_datas(object, name, datas)
if isinstance(object, Constraintable):
self.set_constraints(object, datas)
object.type = type
if "optional" in datas:
object.optional = to_boolean(datas["optional"])
return object
|
Return a populated object Object from dictionary datas
|
def _find_child(self, tag):
"""Find the child C{etree.Element} with the matching C{tag}.
@raises L{WSDLParseError}: If more than one such elements are found.
"""
tag = self._get_namespace_tag(tag)
children = self._root.findall(tag)
if len(children) > 1:
raise WSDLParseError("Duplicate tag '%s'" % tag)
if len(children) == 0:
return None
return children[0]
|
Find the child C{etree.Element} with the matching C{tag}.
@raises L{WSDLParseError}: If more than one such elements are found.
|
def query(starttime, endtime, output=None, *filenames):
'''Given a time range and input file, query creates a new file with only
that subset of data. If no outfile name is given, the new file name is the
old file name with the time range appended.
Args:
starttime:
The datetime of the beginning time range to be extracted from the files.
endtime:
The datetime of the end of the time range to be extracted from the files.
output:
Optional: The output file name. Defaults to
[first filename in filenames][starttime]-[endtime].pcap
filenames:
A tuple of one or more file names to extract data from.
'''
if not output:
output = (filenames[0].replace('.pcap','') + starttime.isoformat() + '-' + endtime.isoformat() + '.pcap')
else:
output = output
with open(output,'w') as outfile:
for filename in filenames:
log.info("pcap.query: processing %s..." % filename)
with open(filename, 'r') as stream:
for header, packet in stream:
if packet is not None:
if header.timestamp >= starttime and header.timestamp <= endtime:
outfile.write(packet, header=header)
|
Given a time range and input file, query creates a new file with only
that subset of data. If no outfile name is given, the new file name is the
old file name with the time range appended.
Args:
starttime:
The datetime of the beginning time range to be extracted from the files.
endtime:
The datetime of the end of the time range to be extracted from the files.
output:
Optional: The output file name. Defaults to
[first filename in filenames][starttime]-[endtime].pcap
filenames:
A tuple of one or more file names to extract data from.
|
def get_features_all(self):
"""
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
"""
features = {}
# Get all the names of features.
all_vars = vars(self)
for name in all_vars.keys():
if name in feature_names_list_all:
features[name] = all_vars[name]
# Sort by the keys (i.e. feature names).
features = OrderedDict(sorted(features.items(), key=lambda t: t[0]))
return features
|
Return all features with its names.
Regardless of being used for train and prediction. Sorted by the names.
Returns
-------
all_features : OrderedDict
Features dictionary.
|
def show_instance(name, call=None):
'''
Show the details from AzureARM concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
try:
node = list_nodes_full('function')[name]
except KeyError:
log.debug('Failed to get data for node \'%s\'', name)
node = {}
__utils__['cloud.cache_node'](node, __active_provider_name__, __opts__)
return node
|
Show the details from AzureARM concerning an instance
|
def _deriv_logaddexp2(x1, x2):
"""The derivative of f(x, y) = log2(2^x + 2^y)"""
y1 = np.exp2(x1)
y2 = np.exp2(x2)
df_dx1 = y1 / (y1 + y2)
df_dx2 = y2 / (y1 + y2)
return np.vstack([df_dx1, df_dx2]).T
|
The derivative of f(x, y) = log2(2^x + 2^y)
|
def velocity_graph(adata, basis=None, vkey='velocity', which_graph='velocity', n_neighbors=10,
alpha=.8, perc=90, edge_width=.2, edge_color='grey', color=None, use_raw=None, layer=None,
color_map=None, colorbar=True, palette=None, size=None, sort_order=True, groups=None,
components=None, projection='2d', legend_loc='on data', legend_fontsize=None, legend_fontweight=None,
right_margin=None, left_margin=None, xlabel=None, ylabel=None, title=None, fontsize=None,
figsize=None, dpi=None, frameon=None, show=True, save=None, ax=None):
"""\
Plot of the velocity graph.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` or `None` (default: `None`)
Key for annotations of observations/cells or variables/genes.
which_graph: `'velocity'` or `'neighbors'` (default: `'velocity'`)
Whether to show transitions from velocity graph or connectivities from neighbors graph.
{scatter}
Returns
-------
`matplotlib.Axis` if `show==False`
"""
basis = default_basis(adata) if basis is None else basis
title = which_graph + ' graph' if title is None else title
scatter_kwargs = {"basis": basis, "perc": perc, "use_raw": use_raw, "sort_order": sort_order, "alpha": alpha,
"components": components, "projection": projection, "legend_loc": legend_loc, "groups": groups,
"legend_fontsize": legend_fontsize, "legend_fontweight": legend_fontweight, "palette": palette,
"color_map": color_map, "frameon": frameon, "title": title, "xlabel": xlabel, "ylabel": ylabel,
"right_margin": right_margin, "left_margin": left_margin, "colorbar": colorbar, "dpi": dpi,
"fontsize": fontsize, "show": False, "save": None, "figsize": figsize, }
ax = scatter(adata, layer=layer, color=color, size=size, ax=ax, zorder=0, **scatter_kwargs)
from networkx import Graph, draw_networkx_edges
if which_graph == 'neighbors':
T = adata.uns['neighbors']['connectivities']
if perc is not None:
threshold = np.percentile(T.data, perc)
T.data[T.data < threshold] = 0
T.eliminate_zeros()
else:
T = transition_matrix(adata, vkey=vkey, weight_indirect_neighbors=0, n_neighbors=n_neighbors, perc=perc)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
edges = draw_networkx_edges(Graph(T), adata.obsm['X_' + basis], width=edge_width, edge_color=edge_color, ax=ax)
edges.set_zorder(-2)
edges.set_rasterized(settings._vector_friendly)
savefig_or_show('' if basis is None else basis, dpi=dpi, save=save, show=show)
if not show: return ax
|
\
Plot of the velocity graph.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
vkey: `str` or `None` (default: `None`)
Key for annotations of observations/cells or variables/genes.
which_graph: `'velocity'` or `'neighbors'` (default: `'velocity'`)
Whether to show transitions from velocity graph or connectivities from neighbors graph.
{scatter}
Returns
-------
`matplotlib.Axis` if `show==False`
|
def _register_custom_filters(self):
"""Register any custom filter modules."""
custom_filters = self.settings.get('CUSTOM_FILTERS', [])
if not isinstance(custom_filters, list):
raise KeyError("`CUSTOM_FILTERS` setting must be a list.")
for filter_module_name in custom_filters:
try:
filter_module = import_module(filter_module_name)
except ImportError as error:
raise ImproperlyConfigured(
"Failed to load custom filter module '{}'.\n"
"Error was: {}".format(filter_module_name, error)
)
try:
filter_map = getattr(filter_module, 'filters')
if not isinstance(filter_map, dict):
raise TypeError
except (AttributeError, TypeError):
raise ImproperlyConfigured(
"Filter module '{}' does not define a 'filters' dictionary".format(filter_module_name)
)
self._environment.filters.update(filter_map)
|
Register any custom filter modules.
|
async def _download_photo(self, photo, file, date, thumb, progress_callback):
"""Specialized version of .download_media() for photos"""
# Determine the photo and its largest size
if isinstance(photo, types.MessageMediaPhoto):
photo = photo.photo
if not isinstance(photo, types.Photo):
return
size = self._get_thumb(photo.sizes, thumb)
if not size or isinstance(size, types.PhotoSizeEmpty):
return
file = self._get_proper_filename(file, 'photo', '.jpg', date=date)
if isinstance(size, (types.PhotoCachedSize, types.PhotoStrippedSize)):
return self._download_cached_photo_size(size, file)
result = await self.download_file(
types.InputPhotoFileLocation(
id=photo.id,
access_hash=photo.access_hash,
file_reference=photo.file_reference,
thumb_size=size.type
),
file,
file_size=size.size,
progress_callback=progress_callback
)
return result if file is bytes else file
|
Specialized version of .download_media() for photos
|
def absent(name, vname=None, use_32bit_registry=False):
r'''
Ensure a registry value is removed. To remove a key use key_absent.
Args:
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
Returns:
dict: A dictionary showing the results of the registry operation.
CLI Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\\SOFTWARE\\Salt':
reg.absent
- vname: version
In the above example the value named ``version`` will be removed from
the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was
not passed, the ``(Default)`` value would be deleted.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
reg_check = __utils__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not reg_check['success'] or reg_check['vdata'] == '(value not set)':
ret['comment'] = '{0} is already absent'.format(name)
return ret
remove_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(vname if vname else '(Default)')}
# Check for test option
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will remove': remove_change}}
return ret
# Delete the value
ret['result'] = __utils__['reg.delete_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to remove {0} from {1}'.format(key, hive)
else:
ret['changes'] = {'reg': {'Removed': remove_change}}
ret['comment'] = r'Removed {0} from {1}'.format(key, hive)
return ret
|
r'''
Ensure a registry value is removed. To remove a key use key_absent.
Args:
name (str):
A string value representing the full path of the key to include the
HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
vname (str):
The name of the value you'd like to create beneath the Key. If this
parameter is not passed it will assume you want to set the
``(Default)`` value
use_32bit_registry (bool):
Use the 32bit portion of the registry. Applies only to 64bit
windows. 32bit Windows will ignore this parameter. Default is False.
Returns:
dict: A dictionary showing the results of the registry operation.
CLI Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\\SOFTWARE\\Salt':
reg.absent
- vname: version
In the above example the value named ``version`` will be removed from
the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was
not passed, the ``(Default)`` value would be deleted.
|
def tomask(self, pores=None, throats=None):
r"""
Convert a list of pore or throat indices into a boolean mask of the
correct length
Parameters
----------
pores or throats : array_like
List of pore or throat indices. Only one of these can be specified
at a time, and the returned result will be of the corresponding
length.
Returns
-------
A boolean mask of length Np or Nt with True in the specified pore or
throat locations.
See Also
--------
toindices
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> mask = pn.tomask(pores=[0, 10, 20])
>>> sum(mask) # 3 non-zero elements exist in the mask (0, 10 and 20)
3
>>> len(mask) # Mask size is equal to the number of pores in network
125
>>> mask = pn.tomask(throats=[0, 10, 20])
>>> len(mask) # Mask is now equal to number of throats in network
300
"""
if (pores is not None) and (throats is None):
mask = self._tomask(element='pore', indices=pores)
elif (throats is not None) and (pores is None):
mask = self._tomask(element='throat', indices=throats)
else:
raise Exception('Cannot specify both pores and throats')
return mask
|
r"""
Convert a list of pore or throat indices into a boolean mask of the
correct length
Parameters
----------
pores or throats : array_like
List of pore or throat indices. Only one of these can be specified
at a time, and the returned result will be of the corresponding
length.
Returns
-------
A boolean mask of length Np or Nt with True in the specified pore or
throat locations.
See Also
--------
toindices
Examples
--------
>>> import openpnm as op
>>> pn = op.network.Cubic(shape=[5, 5, 5])
>>> mask = pn.tomask(pores=[0, 10, 20])
>>> sum(mask) # 3 non-zero elements exist in the mask (0, 10 and 20)
3
>>> len(mask) # Mask size is equal to the number of pores in network
125
>>> mask = pn.tomask(throats=[0, 10, 20])
>>> len(mask) # Mask is now equal to number of throats in network
300
|
def make_call_types(f, globals_d):
# type: (Callable, Dict) -> Tuple[Dict[str, Anno], Anno]
"""Make a call_types dictionary that describes what arguments to pass to f
Args:
f: The function to inspect for argument names (without self)
globals_d: A dictionary of globals to lookup annotation definitions in
"""
arg_spec = getargspec(f)
args = [k for k in arg_spec.args if k != "self"]
defaults = {} # type: Dict[str, Any]
if arg_spec.defaults:
default_args = args[-len(arg_spec.defaults):]
for a, default in zip(default_args, arg_spec.defaults):
defaults[a] = default
if not getattr(f, "__annotations__", None):
# Make string annotations from the type comment if there is one
annotations = make_annotations(f, globals_d)
else:
annotations = f.__annotations__
call_types = OrderedDict() # type: Dict[str, Anno]
for a in args:
anno = anno_with_default(annotations[a], defaults.get(a, NO_DEFAULT))
assert isinstance(anno, Anno), \
"Argument %r has type %r which is not an Anno" % (a, anno)
call_types[a] = anno
return_type = anno_with_default(annotations.get("return", None))
if return_type is Any:
return_type = Anno("Any return value", Any, "return")
assert return_type is None or isinstance(return_type, Anno), \
"Return has type %r which is not an Anno" % (return_type,)
return call_types, return_type
|
Make a call_types dictionary that describes what arguments to pass to f
Args:
f: The function to inspect for argument names (without self)
globals_d: A dictionary of globals to lookup annotation definitions in
|
def _ensure_device_active(self, device):
'''Ensure a single device is in an active state
:param device: ManagementRoot object -- device to inspect
:raises: UnexpectedClusterState
'''
act = device.tm.cm.devices.device.load(
name=get_device_info(device).name,
partition=self.partition
)
if act.failoverState != 'active':
msg = "A device in the cluster was not in the 'Active' state."
raise UnexpectedDeviceGroupState(msg)
|
Ensure a single device is in an active state
:param device: ManagementRoot object -- device to inspect
:raises: UnexpectedClusterState
|
def init(host='0.0.0.0', port=1338):
"""
Initialize PyMLGame. This creates a controller thread that listens for game controllers and events.
:param host: Bind to this address
:param port: Bind to this port
:type host: str
:type port: int
"""
CONTROLLER.host = host
CONTROLLER.port = port
CONTROLLER.setDaemon(True) # because it's a deamon it will exit together with the main thread
CONTROLLER.start()
|
Initialize PyMLGame. This creates a controller thread that listens for game controllers and events.
:param host: Bind to this address
:param port: Bind to this port
:type host: str
:type port: int
|
async def main():
"""
Main code (asynchronous requests)
You can send one millions request with aiohttp :
https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
But don't do that on one server, it's DDOS !
"""
# Create Client from endpoint string in Duniter format
client = Client(BMAS_ENDPOINT)
tasks = []
# Get the node summary infos by dedicated method (with json schema validation)
print("\nCall bma.node.summary:")
task = asyncio.ensure_future(client(bma.node.summary))
tasks.append(task)
# Get the money parameters located in the first block
print("\nCall bma.blockchain.parameters:")
task = asyncio.ensure_future(client(bma.blockchain.parameters))
tasks.append(task)
responses = await asyncio.gather(*tasks)
# you now have all response bodies in this variable
print("\nResponses:")
print(responses)
# Close client aiohttp session
await client.close()
|
Main code (asynchronous requests)
You can send one millions request with aiohttp :
https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
But don't do that on one server, it's DDOS !
|
def dedent_description(pkg_info):
"""
Dedent and convert pkg_info['Description'] to Unicode.
"""
description = pkg_info['Description']
# Python 3 Unicode handling, sorta.
surrogates = False
if not isinstance(description, str):
surrogates = True
description = pkginfo_unicode(pkg_info, 'Description')
description_lines = description.splitlines()
description_dedent = '\n'.join(
# if the first line of long_description is blank,
# the first line here will be indented.
(description_lines[0].lstrip(),
textwrap.dedent('\n'.join(description_lines[1:])),
'\n'))
if surrogates:
description_dedent = description_dedent \
.encode("utf8") \
.decode("ascii", "surrogateescape")
return description_dedent
|
Dedent and convert pkg_info['Description'] to Unicode.
|
def improvise_step(oracle, i, lrs=0, weight=None, prune=False):
""" Given the current time step, improvise (generate) the next time step based on the oracle structure.
:param oracle: an indexed vmo object
:param i: current improvisation time step
:param lrs: the length of minimum longest repeated suffixes allowed to jump
:param weight: if None, jump to possible candidate time step uniformly, if "lrs", the probability is proportional
to the LRS of each candidate time step
:param prune: whether to prune improvisation steps based on regular beat structure or not
:return: the next time step
"""
if prune:
prune_list = range(i % prune, oracle.n_states - 1, prune)
trn_link = [s + 1 for s in oracle.latent[oracle.data[i]] if
(oracle.lrs[s] >= lrs and
(s + 1) < oracle.n_states) and
s in prune_list]
else:
trn_link = [s + 1 for s in oracle.latent[oracle.data[i]] if
(oracle.lrs[s] >= lrs and (s + 1) < oracle.n_states)]
if not trn_link:
if i == oracle.n_states - 1:
n = 1
else:
n = i + 1
else:
if weight == 'lrs':
lrs_link = [oracle.lrs[s] for s in
oracle.latent[oracle.data[i]] if
(oracle.lrs[s] >= lrs and (s + 1) < oracle.n_states)]
lrs_pop = list(itertools.chain.from_iterable(itertools.chain.from_iterable(
[[[i] * _x for (i, _x) in zip(trn_link, lrs_link)]])))
n = np.random.choice(lrs_pop)
else:
n = trn_link[int(np.floor(random.random() * len(trn_link)))]
return n
|
Given the current time step, improvise (generate) the next time step based on the oracle structure.
:param oracle: an indexed vmo object
:param i: current improvisation time step
:param lrs: the length of minimum longest repeated suffixes allowed to jump
:param weight: if None, jump to possible candidate time step uniformly, if "lrs", the probability is proportional
to the LRS of each candidate time step
:param prune: whether to prune improvisation steps based on regular beat structure or not
:return: the next time step
|
def find_closing_parenthesis(sql, startpos):
"""Find the pair of opening and closing parentheses.
Starts search at the position startpos.
Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
"""
pattern = re.compile(r'[()]')
level = 0
opening = []
for match in pattern.finditer(sql, startpos):
par = match.group()
if par == '(':
if level == 0:
opening = match.start()
level += 1
if par == ')':
assert level > 0, "missing '(' before ')'"
level -= 1
if level == 0:
closing = match.end()
return opening, closing
|
Find the pair of opening and closing parentheses.
Starts search at the position startpos.
Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
|
def initial(self, request, *args, **kwargs):
"""
Overrides DRF's `initial` in order to set the `_sorting_field` from corresponding property in view.
Protected property is required in order to support overriding of `sorting_field` via `@property`, we do this
after original `initial` has been ran in order to make sure that view has all its properties set up.
"""
super(FlatMultipleModelMixin, self).initial(request, *args, **kwargs)
assert not (self.sorting_field and self.sorting_fields), \
'{} should either define ``sorting_field`` or ``sorting_fields`` property, not both.' \
.format(self.__class__.__name__)
if self.sorting_field:
warnings.warn(
'``sorting_field`` property is pending its deprecation. Use ``sorting_fields`` instead.',
DeprecationWarning
)
self.sorting_fields = [self.sorting_field]
self._sorting_fields = self.sorting_fields
|
Overrides DRF's `initial` in order to set the `_sorting_field` from corresponding property in view.
Protected property is required in order to support overriding of `sorting_field` via `@property`, we do this
after original `initial` has been ran in order to make sure that view has all its properties set up.
|
def get_courses(self, **kwargs):
"""
Return a list of active courses for the current user.
:calls: `GET /api/v1/courses \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.course.Course`
"""
return PaginatedList(
Course,
self.__requester,
'GET',
'courses',
_kwargs=combine_kwargs(**kwargs)
)
|
Return a list of active courses for the current user.
:calls: `GET /api/v1/courses \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.course.Course`
|
def _add(self, name, *args, **kw):
"""
Add an argument to the underlying parser and grow the list
.all_arguments and the set .names
"""
argname = list(self.argdict)[self._argno]
if argname != name:
raise NameError(
'Setting argument %s, but it should be %s' % (name, argname))
self._group.add_argument(*args, **kw)
self.all_arguments.append((args, kw))
self.names.append(name)
self._argno += 1
|
Add an argument to the underlying parser and grow the list
.all_arguments and the set .names
|
def format_file_params(files):
'''
Utility method for formatting file parameters for transmission
'''
files_payload = {}
if files:
for idx, filename in enumerate(files):
files_payload["file[" + str(idx) + "]"] = open(filename, 'rb')
return files_payload
|
Utility method for formatting file parameters for transmission
|
def database(self, name=None):
"""Connect to a database called `name`.
Parameters
----------
name : str, optional
The name of the database to connect to. If ``None``, return
the database named ``self.current_database``.
Returns
-------
db : Database
An :class:`ibis.client.Database` instance.
Notes
-----
This creates a new connection if `name` is both not ``None`` and not
equal to the current database.
"""
if name == self.current_database or name is None:
return self.database_class(self.current_database, self)
else:
client_class = type(self)
new_client = client_class(
uri=self.uri,
user=self.user,
password=self.password,
host=self.host,
port=self.port,
database=name,
protocol=self.protocol,
execution_type=self.execution_type,
)
return self.database_class(name, new_client)
|
Connect to a database called `name`.
Parameters
----------
name : str, optional
The name of the database to connect to. If ``None``, return
the database named ``self.current_database``.
Returns
-------
db : Database
An :class:`ibis.client.Database` instance.
Notes
-----
This creates a new connection if `name` is both not ``None`` and not
equal to the current database.
|
def _load_libcrypto():
'''
Load OpenSSL libcrypto
'''
if sys.platform.startswith('win'):
# cdll.LoadLibrary on windows requires an 'str' argument
return cdll.LoadLibrary(str('libeay32')) # future lint: disable=blacklisted-function
elif getattr(sys, 'frozen', False) and salt.utils.platform.is_smartos():
return cdll.LoadLibrary(glob.glob(os.path.join(
os.path.dirname(sys.executable),
'libcrypto.so*'))[0])
else:
lib = find_library('crypto')
if not lib and sys.platform.startswith('sunos5'):
# ctypes.util.find_library defaults to 32 bit library path on sunos5, test for 64 bit python execution
lib = find_library('crypto', sys.maxsize > 2**32)
if not lib and salt.utils.platform.is_sunos():
# Solaris-like distribution that use pkgsrc have
# libraries in a non standard location.
# (SmartOS, OmniOS, OpenIndiana, ...)
# This could be /opt/tools/lib (Global Zone)
# or /opt/local/lib (non-Global Zone), thus the
# two checks below
lib = glob.glob('/opt/local/lib/libcrypto.so*') + glob.glob('/opt/tools/lib/libcrypto.so*')
lib = lib[0] if lib else None
if lib:
return cdll.LoadLibrary(lib)
raise OSError('Cannot locate OpenSSL libcrypto')
|
Load OpenSSL libcrypto
|
def delete(self, eid, token):
"""
Delete a library entry.
:param eid str: Entry ID
:param token str: OAuth Token
:return: True or ServerError
:rtype: Bool or Exception
"""
final_headers = self.header
final_headers['Authorization'] = "Bearer {}".format(token)
r = requests.delete(self.apiurl + "/library-entries/{}".format(eid), headers=final_headers)
if r.status_code != 204:
print(r.status_code)
raise ConnectionError(r.text)
return True
|
Delete a library entry.
:param eid str: Entry ID
:param token str: OAuth Token
:return: True or ServerError
:rtype: Bool or Exception
|
def _available_services():
'''
Return a dictionary of all available services on the system
'''
available_services = dict()
for launch_dir in _launchd_paths():
for root, dirs, files in salt.utils.path.os_walk(launch_dir):
for filename in files:
file_path = os.path.join(root, filename)
# Follow symbolic links of files in _launchd_paths
true_path = os.path.realpath(file_path)
# ignore broken symlinks
if not os.path.exists(true_path):
continue
try:
# This assumes most of the plist files
# will be already in XML format
with salt.utils.files.fopen(file_path):
plist = plistlib.readPlist(
salt.utils.data.decode(true_path)
)
except Exception:
# If plistlib is unable to read the file we'll need to use
# the system provided plutil program to do the conversion
cmd = '/usr/bin/plutil -convert xml1 -o - -- "{0}"'.format(
true_path)
plist_xml = __salt__['cmd.run_all'](
cmd, python_shell=False)['stdout']
if six.PY2:
plist = plistlib.readPlistFromString(plist_xml)
else:
plist = plistlib.readPlistFromBytes(
salt.utils.stringutils.to_bytes(plist_xml))
try:
available_services[plist.Label.lower()] = {
'filename': filename,
'file_path': true_path,
'plist': plist,
}
except AttributeError:
# As of MacOS 10.12 there might be plist files without Label key
# in the searched directories. As these files do not represent
# services, thay are not added to the list.
pass
return available_services
|
Return a dictionary of all available services on the system
|
def str2et(time):
"""
Convert a string representing an epoch to a double precision
value representing the number of TDB seconds past the J2000
epoch corresponding to the input epoch.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/str2et_c.html
:param time: A string representing an epoch.
:type time: str
:return: The equivalent value in seconds past J2000, TDB.
:rtype: float
"""
if isinstance(time, list):
return numpy.array([str2et(t) for t in time])
time = stypes.stringToCharP(time)
et = ctypes.c_double()
libspice.str2et_c(time, ctypes.byref(et))
return et.value
|
Convert a string representing an epoch to a double precision
value representing the number of TDB seconds past the J2000
epoch corresponding to the input epoch.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/str2et_c.html
:param time: A string representing an epoch.
:type time: str
:return: The equivalent value in seconds past J2000, TDB.
:rtype: float
|
def send_command_response(self,
source: list,
command: str,
*args,
**kwargs):
"""
Used in bot observer `on_next` method
"""
args = _json.dumps(args).encode('utf8')
kwargs = _json.dumps(kwargs).encode('utf8')
if isinstance(source, list):
frame = (*source, b'', command.encode('utf8'), args, kwargs)
else:
frame = (b'', command.encode('utf8'), args, kwargs)
if self._run_control_loop:
self.add_callback(self.command_socket.send_multipart, frame)
else:
self.command_socket.send_multipart(frame)
|
Used in bot observer `on_next` method
|
def put_resource(self, resource):
"""
Adds a resource back to the pool or discards it if the pool is full.
:param resource: A resource object.
:raises UnknownResourceError: If resource was not made by the
pool.
"""
rtracker = self._get_tracker(resource)
try:
self._put(rtracker)
except PoolFullError:
self._remove(rtracker)
|
Adds a resource back to the pool or discards it if the pool is full.
:param resource: A resource object.
:raises UnknownResourceError: If resource was not made by the
pool.
|
def _setup_ipc(self):
'''
Setup the listener ICP pusher.
'''
log.debug('Setting up the listener IPC pusher')
self.ctx = zmq.Context()
self.pub = self.ctx.socket(zmq.PUSH)
self.pub.connect(LST_IPC_URL)
log.debug('Setting HWM for the listener: %d', self.opts['hwm'])
try:
self.pub.setsockopt(zmq.HWM, self.opts['hwm'])
# zmq 2
except AttributeError:
# zmq 3
self.pub.setsockopt(zmq.SNDHWM, self.opts['hwm'])
|
Setup the listener ICP pusher.
|
def create_revert(self, revert_to_create, project, repository_id):
"""CreateRevert.
[Preview API] Starts the operation to create a new branch which reverts changes introduced by either a specific commit or commits that are associated to a pull request.
:param :class:`<GitAsyncRefOperationParameters> <azure.devops.v5_1.git.models.GitAsyncRefOperationParameters>` revert_to_create:
:param str project: Project ID or project name
:param str repository_id: ID of the repository.
:rtype: :class:`<GitRevert> <azure.devops.v5_1.git.models.GitRevert>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if repository_id is not None:
route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str')
content = self._serialize.body(revert_to_create, 'GitAsyncRefOperationParameters')
response = self._send(http_method='POST',
location_id='bc866058-5449-4715-9cf1-a510b6ff193c',
version='5.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('GitRevert', response)
|
CreateRevert.
[Preview API] Starts the operation to create a new branch which reverts changes introduced by either a specific commit or commits that are associated to a pull request.
:param :class:`<GitAsyncRefOperationParameters> <azure.devops.v5_1.git.models.GitAsyncRefOperationParameters>` revert_to_create:
:param str project: Project ID or project name
:param str repository_id: ID of the repository.
:rtype: :class:`<GitRevert> <azure.devops.v5_1.git.models.GitRevert>`
|
def fast_hash(self):
"""
Get a CRC32 or xxhash.xxh64 reflecting the DataStore.
Returns
------------
hashed: int, checksum of data
"""
fast = sum(i.fast_hash() for i in self.data.values())
return fast
|
Get a CRC32 or xxhash.xxh64 reflecting the DataStore.
Returns
------------
hashed: int, checksum of data
|
def train_batch(self, batch_info, data, target):
""" Train single batch of data """
batch_info.optimizer.zero_grad()
loss = self.feed_batch(batch_info, data, target)
loss.backward()
if self.max_grad_norm is not None:
batch_info['grad_norm'] = torch.nn.utils.clip_grad_norm_(
filter(lambda p: p.requires_grad, self.model.parameters()),
max_norm=self.max_grad_norm
)
batch_info.optimizer.step()
|
Train single batch of data
|
def surface_normal(self, param):
"""Unit vector perpendicular to the detector surface at ``param``.
The orientation is chosen as follows:
- In 2D, the system ``(normal, tangent)`` should be
right-handed.
- In 3D, the system ``(tangent[0], tangent[1], normal)``
should be right-handed.
Here, ``tangent`` is the return value of `surface_deriv` at
``param``.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. If ``ndim >= 2``,
a sequence of length `ndim` must be provided.
Returns
-------
normal : `numpy.ndarray`
Unit vector(s) perpendicular to the detector surface at
``param``.
If ``param`` is a single parameter, an array of shape
``(space_ndim,)`` representing a single vector is returned.
Otherwise the shape of the returned array is
- ``param.shape + (space_ndim,)`` if `ndim` is 1,
- ``param.shape[:-1] + (space_ndim,)`` otherwise.
"""
# Checking is done by `surface_deriv`
if self.ndim == 1 and self.space_ndim == 2:
return -perpendicular_vector(self.surface_deriv(param))
elif self.ndim == 2 and self.space_ndim == 3:
deriv = self.surface_deriv(param)
if deriv.ndim > 2:
# Vectorized, need to reshape (N, 2, 3) to (2, N, 3)
deriv = moveaxis(deriv, -2, 0)
normal = np.cross(*deriv, axis=-1)
normal /= np.linalg.norm(normal, axis=-1, keepdims=True)
return normal
else:
raise NotImplementedError(
'no default implementation of `surface_normal` available '
'for `ndim = {}` and `space_ndim = {}`'
''.format(self.ndim, self.space_ndim))
|
Unit vector perpendicular to the detector surface at ``param``.
The orientation is chosen as follows:
- In 2D, the system ``(normal, tangent)`` should be
right-handed.
- In 3D, the system ``(tangent[0], tangent[1], normal)``
should be right-handed.
Here, ``tangent`` is the return value of `surface_deriv` at
``param``.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. If ``ndim >= 2``,
a sequence of length `ndim` must be provided.
Returns
-------
normal : `numpy.ndarray`
Unit vector(s) perpendicular to the detector surface at
``param``.
If ``param`` is a single parameter, an array of shape
``(space_ndim,)`` representing a single vector is returned.
Otherwise the shape of the returned array is
- ``param.shape + (space_ndim,)`` if `ndim` is 1,
- ``param.shape[:-1] + (space_ndim,)`` otherwise.
|
def get_tunnel_info_input_filter_type_filter_by_site_site_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_tunnel_info = ET.Element("get_tunnel_info")
config = get_tunnel_info
input = ET.SubElement(get_tunnel_info, "input")
filter_type = ET.SubElement(input, "filter-type")
filter_by_site = ET.SubElement(filter_type, "filter-by-site")
site_name = ET.SubElement(filter_by_site, "site-name")
site_name.text = kwargs.pop('site_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def needsEncoding(self, s):
"""
Get whether string I{s} contains special characters.
@param s: A string to check.
@type s: str
@return: True if needs encoding.
@rtype: boolean
"""
if isinstance(s, str):
for c in self.special:
if c in s:
return True
return False
|
Get whether string I{s} contains special characters.
@param s: A string to check.
@type s: str
@return: True if needs encoding.
@rtype: boolean
|
def dump_to_path(self, cnf, filepath, **kwargs):
"""
Dump config 'cnf' to a file 'filepath'.
:param cnf: Configuration data to dump
:param filepath: Config file path
:param kwargs: optional keyword parameters to be sanitized :: dict
"""
with self.wopen(filepath) as out:
out.write(self.dump_to_string(cnf, **kwargs))
|
Dump config 'cnf' to a file 'filepath'.
:param cnf: Configuration data to dump
:param filepath: Config file path
:param kwargs: optional keyword parameters to be sanitized :: dict
|
def delete(self, file_, delete_file=True):
"""
Deletes file_ references in Key Value store and optionally the file_
it self.
"""
image_file = ImageFile(file_)
if delete_file:
image_file.delete()
default.kvstore.delete(image_file)
|
Deletes file_ references in Key Value store and optionally the file_
it self.
|
def determine_types(self):
""" Determine ES type names from request data.
In particular `request.matchdict['collections']` is used to
determine types names. Its value is comma-separated sequence
of collection names under which views have been registered.
"""
from nefertari.elasticsearch import ES
collections = self.get_collections()
resources = self.get_resources(collections)
models = set([res.view.Model for res in resources])
es_models = [mdl for mdl in models if mdl
and getattr(mdl, '_index_enabled', False)]
types = [ES.src2type(mdl.__name__) for mdl in es_models]
return types
|
Determine ES type names from request data.
In particular `request.matchdict['collections']` is used to
determine types names. Its value is comma-separated sequence
of collection names under which views have been registered.
|
def calc_fft_with_PyCUDA(Signal):
"""
Calculates the FFT of the passed signal by using
the scikit-cuda libary which relies on PyCUDA
Parameters
----------
Signal : ndarray
Signal to be transformed into Fourier space
Returns
-------
Signalfft : ndarray
Array containing the signal's FFT
"""
print("starting fft")
Signal = Signal.astype(_np.float32)
Signal_gpu = _gpuarray.to_gpu(Signal)
Signalfft_gpu = _gpuarray.empty(len(Signal)//2+1,_np.complex64)
plan = _Plan(Signal.shape,_np.float32,_np.complex64)
_fft(Signal_gpu, Signalfft_gpu, plan)
Signalfft = Signalfft_gpu.get() #only 2N+1 long
Signalfft = _np.hstack((Signalfft,_np.conj(_np.flipud(Signalfft[1:len(Signal)//2]))))
print("fft done")
return Signalfft
|
Calculates the FFT of the passed signal by using
the scikit-cuda libary which relies on PyCUDA
Parameters
----------
Signal : ndarray
Signal to be transformed into Fourier space
Returns
-------
Signalfft : ndarray
Array containing the signal's FFT
|
def makeStylesheetResource(self, path, registry):
"""
Return a resource for the css at the given path with its urls rewritten
based on self.rootURL.
"""
return StylesheetRewritingResourceWrapper(
File(path), self.installedOfferingNames, self.rootURL)
|
Return a resource for the css at the given path with its urls rewritten
based on self.rootURL.
|
def update(self):
"""
Update this function.
:return: None
"""
if self._owner_changed:
self.update_owner(self.owner)
self._resources = [res.name for res in self.resources]
return self.parent.update(self)
|
Update this function.
:return: None
|
def _add_task(self, tile_address, coroutine):
"""Add a task from within the event loop.
All tasks are associated with a tile so that they can be cleanly
stopped when that tile is reset.
"""
self.verify_calling_thread(True, "_add_task is not thread safe")
if tile_address not in self._tasks:
self._tasks[tile_address] = []
task = self._loop.create_task(coroutine)
self._tasks[tile_address].append(task)
|
Add a task from within the event loop.
All tasks are associated with a tile so that they can be cleanly
stopped when that tile is reset.
|
def check(self, line_info):
"""If the ifun is magic, and automagic is on, run it. Note: normal,
non-auto magic would already have been triggered via '%' in
check_esc_chars. This just checks for automagic. Also, before
triggering the magic handler, make sure that there is nothing in the
user namespace which could shadow it."""
if not self.shell.automagic or not self.shell.find_magic(line_info.ifun):
return None
# We have a likely magic method. Make sure we should actually call it.
if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials:
return None
head = line_info.ifun.split('.',1)[0]
if is_shadowed(head, self.shell):
return None
return self.prefilter_manager.get_handler_by_name('magic')
|
If the ifun is magic, and automagic is on, run it. Note: normal,
non-auto magic would already have been triggered via '%' in
check_esc_chars. This just checks for automagic. Also, before
triggering the magic handler, make sure that there is nothing in the
user namespace which could shadow it.
|
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> diff = list(diff)
>>> print ''.join(restore(diff, 1)),
one
two
three
>>> print ''.join(restore(diff, 2)),
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
% which)
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
|
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
... 'ore\ntree\nemu\n'.splitlines(1))
>>> diff = list(diff)
>>> print ''.join(restore(diff, 1)),
one
two
three
>>> print ''.join(restore(diff, 2)),
ore
tree
emu
|
def process_mutect_vcf(job, mutect_vcf, work_dir, univ_options):
"""
Process the MuTect vcf for accepted calls.
:param toil.fileStore.FileID mutect_vcf: fsID for a MuTect generated chromosome vcf
:param str work_dir: Working directory
:param dict univ_options: Dict of universal options used by almost all tools
:return: Path to the processed vcf
:rtype: str
"""
mutect_vcf = job.fileStore.readGlobalFile(mutect_vcf)
with open(mutect_vcf, 'r') as infile, open(mutect_vcf + 'mutect_parsed.tmp', 'w') as outfile:
for line in infile:
line = line.strip()
if line.startswith('#'):
print(line, file=outfile)
continue
line = line.split('\t')
if line[6] != 'REJECT':
print('\t'.join(line), file=outfile)
return outfile.name
|
Process the MuTect vcf for accepted calls.
:param toil.fileStore.FileID mutect_vcf: fsID for a MuTect generated chromosome vcf
:param str work_dir: Working directory
:param dict univ_options: Dict of universal options used by almost all tools
:return: Path to the processed vcf
:rtype: str
|
def notebook_executed(pth):
"""Determine whether the notebook at `pth` has been executed."""
nb = nbformat.read(pth, as_version=4)
for n in range(len(nb['cells'])):
if nb['cells'][n].cell_type == 'code' and \
nb['cells'][n].execution_count is None:
return False
return True
|
Determine whether the notebook at `pth` has been executed.
|
def get_instance(self, payload):
"""
Build an instance of ThisMonthInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
"""
return ThisMonthInstance(self._version, payload, account_sid=self._solution['account_sid'], )
|
Build an instance of ThisMonthInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
:rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
|
def add_child_bin(self, bin_id, child_id):
"""Adds a child to a bin.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``bin_id`` is already a parent of
``child_id``
raise: NotFound - ``bin_id`` or ``child_id`` not found
raise: NullArgument - ``bin_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.add_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.add_child_catalog(catalog_id=bin_id, child_id=child_id)
return self._hierarchy_session.add_child(id_=bin_id, child_id=child_id)
|
Adds a child to a bin.
arg: bin_id (osid.id.Id): the ``Id`` of a bin
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: AlreadyExists - ``bin_id`` is already a parent of
``child_id``
raise: NotFound - ``bin_id`` or ``child_id`` not found
raise: NullArgument - ``bin_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
|
def _hosts_in_unenumerated_pattern(self, pattern):
""" Get all host names matching the pattern """
hosts = {}
# ignore any negative checks here, this is handled elsewhere
pattern = pattern.replace("!","").replace("&", "")
groups = self.get_groups()
for group in groups:
for host in group.get_hosts():
if pattern == 'all' or self._match(group.name, pattern) or self._match(host.name, pattern):
hosts[host.name] = host
return sorted(hosts.values(), key=lambda x: x.name)
|
Get all host names matching the pattern
|
def _set_notification(self, conn, char, enabled, timeout=1.0):
"""Enable/disable notifications on a GATT characteristic
Args:
conn (int): The connection handle for the device we should interact with
char (dict): The characteristic we should modify
enabled (bool): Should we enable or disable notifications
timeout (float): How long to wait before failing
"""
if 'client_configuration' not in char:
return False, {'reason': 'Cannot enable notification without a client configuration attribute for characteristic'}
props = char['properties']
if not props.notify:
return False, {'reason': 'Cannot enable notification on a characteristic that does not support it'}
value = char['client_configuration']['value']
#Check if we don't have to do anything
current_state = bool(value & (1 << 0))
if current_state == enabled:
return
if enabled:
value |= 1 << 0
else:
value &= ~(1 << 0)
char['client_configuration']['value'] = value
valarray = struct.pack("<H", value)
return self._write_handle(conn, char['client_configuration']['handle'], True, valarray, timeout)
|
Enable/disable notifications on a GATT characteristic
Args:
conn (int): The connection handle for the device we should interact with
char (dict): The characteristic we should modify
enabled (bool): Should we enable or disable notifications
timeout (float): How long to wait before failing
|
def gmst(utc_time):
"""Greenwich mean sidereal utc_time, in radians.
As defined in the AIAA 2006 implementation:
http://www.celestrak.com/publications/AIAA/2006-6753/
"""
ut1 = jdays2000(utc_time) / 36525.0
theta = 67310.54841 + ut1 * (876600 * 3600 + 8640184.812866 + ut1 *
(0.093104 - ut1 * 6.2 * 10e-6))
return np.deg2rad(theta / 240.0) % (2 * np.pi)
|
Greenwich mean sidereal utc_time, in radians.
As defined in the AIAA 2006 implementation:
http://www.celestrak.com/publications/AIAA/2006-6753/
|
def set_connection(connection=defaults.sqlalchemy_connection_string_default):
"""Set the connection string for sqlalchemy and write it to the config file.
.. code-block:: python
import pyhgnc
pyhgnc.set_connection('mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}')
.. hint::
valid connection strings
- mysql+pymysql://user:passwd@localhost/database?charset=utf8
- postgresql://scott:tiger@localhost/mydatabase
- mssql+pyodbc://user:passwd@database
- oracle://user:passwd@127.0.0.1:1521/database
- Linux: sqlite:////absolute/path/to/database.db
- Windows: sqlite:///C:\path\to\database.db
:param str connection: sqlalchemy connection string
"""
config_path = defaults.config_file_path
config = RawConfigParser()
if not os.path.exists(config_path):
with open(config_path, 'w') as config_file:
config['database'] = {'sqlalchemy_connection_string': connection}
config.write(config_file)
log.info('create configuration file {}'.format(config_path))
else:
config.read(config_path)
config.set('database', 'sqlalchemy_connection_string', connection)
with open(config_path, 'w') as configfile:
config.write(configfile)
|
Set the connection string for sqlalchemy and write it to the config file.
.. code-block:: python
import pyhgnc
pyhgnc.set_connection('mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}')
.. hint::
valid connection strings
- mysql+pymysql://user:passwd@localhost/database?charset=utf8
- postgresql://scott:tiger@localhost/mydatabase
- mssql+pyodbc://user:passwd@database
- oracle://user:passwd@127.0.0.1:1521/database
- Linux: sqlite:////absolute/path/to/database.db
- Windows: sqlite:///C:\path\to\database.db
:param str connection: sqlalchemy connection string
|
def star(self) -> snug.Query[bool]:
"""star this repo"""
req = snug.PUT(BASE + f'/user/starred/{self.owner}/{self.name}')
return (yield req).status_code == 204
|
star this repo
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.