text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/engine/shared_eng_sorosuub_tuned_l_337_ion_engine.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","eng_sorosuub_tuned_l_337_ion_engine_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "87ad0b5b8a6b8c35d789b11bfeefd19c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 106,
"avg_line_length": 28.46153846153846,
"alnum_prop": 0.7162162162162162,
"repo_name": "obi-two/Rebelion",
"id": "7da68a7b7088b39a11a075204642b771d10753c6",
"size": "515",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/ship/components/engine/shared_eng_sorosuub_tuned_l_337_ion_engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
#!/usr/bin/python
#//////////////////////////////////////////////////////////////////////////////
#
# Copyright (c) 2007,2009 Daniel Adler <dadler@uni-goettingen.de>,
# Tassilo Philipp <tphilipp@potion-studios.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
#//////////////////////////////////////////////////////////////////////////////
import sys
# parameters
nargs = 3
# all types without (float)
types = ["DCint","DClonglong","DCdouble","DCpointer"]
sigs = ['i','l','d','p']
# generator
ntypes = len(types)
sys.stderr.write("/* auto generated by mkcase (on stderr) */\n");
sys.stderr.write("".join(["#define NARGS ",str(nargs),"\n"]))
sys.stderr.write("".join(["#define NTYPES ",str(ntypes),"\n"]))
def powerfact(x, n):
if n==0:
return 0
else:
return x**n+powerfact(x,n-1)
x = 0
end = powerfact(ntypes,nargs)+1
sys.stdout.write("/* auto generated by mkcase.py (on stdout) */\n");
while x < end:
args = [str(x)]
sig = ["f_"]
pos = 0
y = x
while y > 0:
s = (y-1) % ntypes
y = (y-1) / ntypes
args += [ types[s] ]
sig += [ sigs[s] ] # types[s][2] ]
pos += 1
sig = "".join(sig)
args += [ sig ]
args = ",".join(args)
sys.stdout.write( "".join(["VF",str(pos),"(",args,")\n"]) )
x += 1
|
{
"content_hash": "1c327bc047f2706e0e34fe7c0e72ffb6",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 29.507462686567163,
"alnum_prop": 0.5892766818411735,
"repo_name": "atsushieno/jenoa",
"id": "3fea7db3ea35d92a468e2dfd1cac13807f6e71cc",
"size": "1977",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "jni/dyncall/test/ellipsis/mkcase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "16690"
},
{
"name": "C",
"bytes": "783081"
},
{
"name": "C++",
"bytes": "91970"
},
{
"name": "Java",
"bytes": "52442"
},
{
"name": "Lua",
"bytes": "9535"
},
{
"name": "Python",
"bytes": "22249"
},
{
"name": "Shell",
"bytes": "3451"
}
],
"symlink_target": ""
}
|
"""
Volume driver for NetApp NFS storage.
"""
import os
from oslo_log import log as logging
from oslo_log import versionutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.performance import perf_7mode
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
@interface.volumedriver
class NetApp7modeNfsDriver(nfs_base.NetAppNfsDriver):
"""NetApp NFS driver for Data ONTAP (7-mode)."""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_CI"
def __init__(self, *args, **kwargs):
super(NetApp7modeNfsDriver, self).__init__(*args, **kwargs)
self.driver_name = 'NetApp_NFS_7mode_direct'
self.driver_mode = '7mode'
self.configuration.append_config_values(na_opts.netapp_7mode_opts)
def do_setup(self, context):
"""Do the customized set up on client if any for 7 mode."""
super(NetApp7modeNfsDriver, self).do_setup(context)
self.zapi_client = client_7mode.Client(
transport_type=self.configuration.netapp_transport_type,
username=self.configuration.netapp_login,
password=self.configuration.netapp_password,
hostname=self.configuration.netapp_server_hostname,
port=self.configuration.netapp_server_port,
vfiler=self.configuration.netapp_vfiler)
self.perf_library = perf_7mode.Performance7modeLibrary(
self.zapi_client)
# This driver has been marked 'deprecated' in the Ocata release and
# can be removed in Queens.
msg = _("The 7-mode Data ONTAP driver is deprecated and will be "
"removed in a future release.")
versionutils.report_deprecated_feature(LOG, msg)
def check_for_setup_error(self):
"""Checks if setup occurred properly."""
api_version = self.zapi_client.get_ontapi_version()
if api_version:
major, minor = api_version
if major == 1 and minor < 9:
msg = _("Unsupported Data ONTAP version."
" Data ONTAP version 7.3.1 and above is supported.")
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = _("Data ONTAP API version could not be determined.")
raise exception.VolumeBackendAPIException(data=msg)
self._add_looping_tasks()
super(NetApp7modeNfsDriver, self).check_for_setup_error()
def _add_looping_tasks(self):
"""Add tasks that need to be executed at a fixed interval."""
super(NetApp7modeNfsDriver, self)._add_looping_tasks()
def _handle_ems_logging(self):
"""Log autosupport messages."""
base_ems_message = dot_utils.build_ems_log_message_0(
self.driver_name, self.app_version, self.driver_mode)
self.zapi_client.send_ems_log_message(base_ems_message)
pool_ems_message = dot_utils.build_ems_log_message_1(
self.driver_name, self.app_version, None,
self._get_backing_flexvol_names(), [])
self.zapi_client.send_ems_log_message(pool_ems_message)
def _clone_backing_file_for_volume(self, volume_name, clone_name,
volume_id, share=None,
is_snapshot=False,
source_snapshot=None):
"""Clone backing file for Cinder volume.
:param: is_snapshot Not used, present for method signature consistency
"""
(_host_ip, export_path) = self._get_export_ip_path(volume_id, share)
storage_path = self.zapi_client.get_actual_path_for_export(export_path)
target_path = '%s/%s' % (storage_path, clone_name)
self.zapi_client.clone_file('%s/%s' % (storage_path, volume_name),
target_path, source_snapshot)
def _update_volume_stats(self):
"""Retrieve stats info from vserver."""
self._ensure_shares_mounted()
LOG.debug('Updating volume stats')
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.driver_name
data['vendor_name'] = 'NetApp'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'nfs'
data['pools'] = self._get_pool_stats(
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function())
data['sparse_copy_volume'] = True
self._spawn_clean_cache_job()
self._stats = data
def _get_pool_stats(self, filter_function=None, goodness_function=None):
"""Retrieve pool (i.e. NFS share) stats info from SSC volumes."""
pools = []
self.perf_library.update_performance_cache()
for nfs_share in self._mounted_shares:
capacity = self._get_share_capacity_info(nfs_share)
pool = dict()
pool['pool_name'] = nfs_share
pool['QoS_support'] = False
pool['multiattach'] = True
pool.update(capacity)
thick = not self.configuration.nfs_sparsed_volumes
pool['thick_provisioning_support'] = thick
pool['thin_provisioning_support'] = not thick
utilization = self.perf_library.get_node_utilization()
pool['utilization'] = na_utils.round_down(utilization, '0.01')
pool['filter_function'] = filter_function
pool['goodness_function'] = goodness_function
pool['consistencygroup_support'] = True
pools.append(pool)
return pools
def _shortlist_del_eligible_files(self, share, old_files):
"""Prepares list of eligible files to be deleted from cache."""
file_list = []
(_, export_path) = self._get_export_ip_path(share=share)
exported_volume = self.zapi_client.get_actual_path_for_export(
export_path)
for old_file in old_files:
path = os.path.join(exported_volume, old_file)
u_bytes = self.zapi_client.get_file_usage(path)
file_list.append((old_file, u_bytes))
LOG.debug('Shortlisted files eligible for deletion: %s', file_list)
return file_list
def _is_filer_ip(self, ip):
"""Checks whether ip is on the same filer."""
try:
ifconfig = self.zapi_client.get_ifconfig()
if_info = ifconfig.get_child_by_name('interface-config-info')
if if_info:
ifs = if_info.get_children()
for intf in ifs:
v4_addr = intf.get_child_by_name('v4-primary-address')
if v4_addr:
ip_info = v4_addr.get_child_by_name('ip-address-info')
if ip_info:
address = ip_info.get_child_content('address')
if ip == address:
return True
else:
continue
except Exception:
return False
return False
def _share_match_for_ip(self, ip, shares):
"""Returns the share that is served by ip.
Multiple shares can have same dir path but
can be served using different ips. It finds the
share which is served by ip on same nfs server.
"""
if self._is_filer_ip(ip) and shares:
for share in shares:
ip_sh = share.split(':')[0]
if self._is_filer_ip(ip_sh):
LOG.debug('Share match found for ip %s', ip)
return share
LOG.debug('No share match found for ip %s', ip)
return None
def _is_share_clone_compatible(self, volume, share):
"""Checks if share is compatible with volume to host its clone."""
thin = self.configuration.nfs_sparsed_volumes
return self._share_has_space_for_clone(share, volume['size'], thin)
def _check_volume_type(self, volume, share, file_name, extra_specs):
"""Matches a volume type for share file."""
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
if qos_policy_group:
raise exception.ManageExistingVolumeTypeMismatch(
reason=(_("Setting file qos policy group is not supported"
" on this storage family and ontap version.")))
volume_type = na_utils.get_volume_type_from_volume(volume)
if volume_type and 'qos_spec_id' in volume_type:
raise exception.ManageExistingVolumeTypeMismatch(
reason=_("QoS specs are not supported"
" on this storage family and ONTAP version."))
def _do_qos_for_volume(self, volume, extra_specs, cleanup=False):
"""Set QoS policy on backend from volume type information."""
# 7-mode DOT does not support QoS.
return
def _get_volume_model_update(self, volume):
"""Provide any updates necessary for a volume being created/managed."""
def _get_backing_flexvol_names(self):
"""Returns a list of backing flexvol names."""
flexvol_names = []
for nfs_share in self._mounted_shares:
flexvol_name = nfs_share.rsplit('/', 1)[1]
flexvol_names.append(flexvol_name)
LOG.debug("Found flexvol %s", flexvol_name)
return flexvol_names
def _get_flexvol_names_from_hosts(self, hosts):
"""Returns a set of flexvol names."""
flexvols = set()
for host in hosts:
pool_name = volume_utils.extract_host(host, level='pool')
flexvol_name = pool_name.rsplit('/', 1)[1]
flexvols.add(flexvol_name)
return flexvols
@utils.trace_method
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Delete files backing each snapshot in the cgsnapshot.
:return: An implicit update of snapshot models that the manager will
interpret and subsequently set the model state to deleted.
"""
for snapshot in snapshots:
self._delete_file(snapshot['volume_id'], snapshot['name'])
LOG.debug("Snapshot %s deletion successful", snapshot['name'])
return None, None
|
{
"content_hash": "261166ecd63f4c9094f6d00dbfabd2f9",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 79,
"avg_line_length": 41.09848484848485,
"alnum_prop": 0.6068202764976959,
"repo_name": "ge0rgi/cinder",
"id": "478879c4a98693e63e9a6713d59e04b49de10d5f",
"size": "11825",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/ocata",
"path": "cinder/volume/drivers/netapp/dataontap/nfs_7mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
}
|
""" History related magics and functionality """
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import atexit
import datetime
import os
import re
try:
import sqlite3
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
sqlite3 = None
import threading
# Our own packages
from traitlets.config.configurable import Configurable
from decorator import decorator
from IPython.utils.decorators import undoc
from IPython.utils.path import locate_profile
from IPython.utils import py3compat
from traitlets import (
Any, Bool, Dict, Instance, Integer, List, Unicode, TraitError,
)
from IPython.utils.warn import warn
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@undoc
class DummyDB(object):
"""Dummy DB that will act as a black hole for history.
Only used in the absence of sqlite"""
def execute(*args, **kwargs):
return []
def commit(self, *args, **kwargs):
pass
def __enter__(self, *args, **kwargs):
pass
def __exit__(self, *args, **kwargs):
pass
@decorator
def needs_sqlite(f, self, *a, **kw):
"""Decorator: return an empty list in the absence of sqlite."""
if sqlite3 is None or not self.enabled:
return []
else:
return f(self, *a, **kw)
if sqlite3 is not None:
DatabaseError = sqlite3.DatabaseError
OperationalError = sqlite3.OperationalError
else:
@undoc
class DatabaseError(Exception):
"Dummy exception when sqlite could not be imported. Should never occur."
@undoc
class OperationalError(Exception):
"Dummy exception when sqlite could not be imported. Should never occur."
@decorator
def catch_corrupt_db(f, self, *a, **kw):
"""A decorator which wraps HistoryAccessor method calls to catch errors from
a corrupt SQLite database, move the old database out of the way, and create
a new one.
"""
try:
return f(self, *a, **kw)
except (DatabaseError, OperationalError):
if os.path.isfile(self.hist_file):
# Try to move the file out of the way
base,ext = os.path.splitext(self.hist_file)
newpath = base + '-corrupt' + ext
os.rename(self.hist_file, newpath)
self.init_db()
print("ERROR! History file wasn't a valid SQLite database.",
"It was moved to %s" % newpath, "and a new file created.")
return []
else:
# The hist_file is probably :memory: or something else.
raise
class HistoryAccessorBase(Configurable):
"""An abstract class for History Accessors """
def get_tail(self, n=10, raw=True, output=False, include_latest=False):
raise NotImplementedError
def search(self, pattern="*", raw=True, search_raw=True,
output=False, n=None, unique=False):
raise NotImplementedError
def get_range(self, session, start=1, stop=None, raw=True,output=False):
raise NotImplementedError
def get_range_by_str(self, rangestr, raw=True, output=False):
raise NotImplementedError
class HistoryAccessor(HistoryAccessorBase):
"""Access the history database without adding to it.
This is intended for use by standalone history tools. IPython shells use
HistoryManager, below, which is a subclass of this."""
# String holding the path to the history file
hist_file = Unicode(config=True,
help="""Path to file to use for SQLite history database.
By default, IPython will put the history database in the IPython
profile directory. If you would rather share one history among
profiles, you can set this value in each, so that they are consistent.
Due to an issue with fcntl, SQLite is known to misbehave on some NFS
mounts. If you see IPython hanging, try setting this to something on a
local disk, e.g::
ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
""")
enabled = Bool(True, config=True,
help="""enable the SQLite history
set enabled=False to disable the SQLite history,
in which case there will be no stored history, no SQLite connection,
and no background saving thread. This may be necessary in some
threaded environments where IPython is embedded.
"""
)
connection_options = Dict(config=True,
help="""Options for configuring the SQLite connection
These options are passed as keyword args to sqlite3.connect
when establishing database conenctions.
"""
)
# The SQLite database
db = Any()
def _db_changed(self, name, old, new):
"""validate the db, since it can be an Instance of two different types"""
connection_types = (DummyDB,)
if sqlite3 is not None:
connection_types = (DummyDB, sqlite3.Connection)
if not isinstance(new, connection_types):
msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
(self.__class__.__name__, new)
raise TraitError(msg)
def __init__(self, profile='default', hist_file=u'', **traits):
"""Create a new history accessor.
Parameters
----------
profile : str
The name of the profile from which to open history.
hist_file : str
Path to an SQLite history database stored by IPython. If specified,
hist_file overrides profile.
config : :class:`~traitlets.config.loader.Config`
Config object. hist_file can also be set through this.
"""
# We need a pointer back to the shell for various tasks.
super(HistoryAccessor, self).__init__(**traits)
# defer setting hist_file from kwarg until after init,
# otherwise the default kwarg value would clobber any value
# set by config
if hist_file:
self.hist_file = hist_file
if self.hist_file == u'':
# No one has set the hist_file, yet.
self.hist_file = self._get_hist_file_name(profile)
if sqlite3 is None and self.enabled:
warn("IPython History requires SQLite, your history will not be saved")
self.enabled = False
self.init_db()
def _get_hist_file_name(self, profile='default'):
"""Find the history file for the given profile name.
This is overridden by the HistoryManager subclass, to use the shell's
active profile.
Parameters
----------
profile : str
The name of a profile which has a history file.
"""
return os.path.join(locate_profile(profile), 'history.sqlite')
@catch_corrupt_db
def init_db(self):
"""Connect to the database, and create tables if necessary."""
if not self.enabled:
self.db = DummyDB()
return
# use detect_types so that timestamps return datetime objects
kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
kwargs.update(self.connection_options)
self.db = sqlite3.connect(self.hist_file, **kwargs)
self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
primary key autoincrement, start timestamp,
end timestamp, num_cmds integer, remark text)""")
self.db.execute("""CREATE TABLE IF NOT EXISTS history
(session integer, line integer, source text, source_raw text,
PRIMARY KEY (session, line))""")
# Output history is optional, but ensure the table's there so it can be
# enabled later.
self.db.execute("""CREATE TABLE IF NOT EXISTS output_history
(session integer, line integer, output text,
PRIMARY KEY (session, line))""")
self.db.commit()
def writeout_cache(self):
"""Overridden by HistoryManager to dump the cache before certain
database lookups."""
pass
## -------------------------------
## Methods for retrieving history:
## -------------------------------
def _run_sql(self, sql, params, raw=True, output=False):
"""Prepares and runs an SQL query for the history database.
Parameters
----------
sql : str
Any filtering expressions to go after SELECT ... FROM ...
params : tuple
Parameters passed to the SQL query (to replace "?")
raw, output : bool
See :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
toget = 'source_raw' if raw else 'source'
sqlfrom = "history"
if output:
sqlfrom = "history LEFT JOIN output_history USING (session, line)"
toget = "history.%s, output_history.output" % toget
cur = self.db.execute("SELECT session, line, %s FROM %s " %\
(toget, sqlfrom) + sql, params)
if output: # Regroup into 3-tuples, and parse JSON
return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)
return cur
@needs_sqlite
@catch_corrupt_db
def get_session_info(self, session):
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if IPython crashed.
num_cmds : int
Number of commands run, or None if IPython crashed.
remark : unicode
A manually set description.
"""
query = "SELECT * from sessions where session == ?"
return self.db.execute(query, (session,)).fetchone()
@catch_corrupt_db
def get_last_session_id(self):
"""Get the last session ID currently in the database.
Within IPython, this should be the same as the value stored in
:attr:`HistoryManager.session_number`.
"""
for record in self.get_tail(n=1, include_latest=True):
return record[0]
@catch_corrupt_db
def get_tail(self, n=10, raw=True, output=False, include_latest=False):
"""Get the last n lines from the history database.
Parameters
----------
n : int
The number of lines to get
raw, output : bool
See :meth:`get_range`
include_latest : bool
If False (default), n+1 lines are fetched, and the latest one
is discarded. This is intended to be used where the function
is called by a user command, which it should not return.
Returns
-------
Tuples as :meth:`get_range`
"""
self.writeout_cache()
if not include_latest:
n += 1
cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?",
(n,), raw=raw, output=output)
if not include_latest:
return reversed(list(cur)[1:])
return reversed(list(cur))
@catch_corrupt_db
def search(self, pattern="*", raw=True, search_raw=True,
output=False, n=None, unique=False):
"""Search the database using unix glob-style matching (wildcards
* and ?).
Parameters
----------
pattern : str
The wildcarded pattern to match when searching
search_raw : bool
If True, search the raw input, otherwise, the parsed input
raw, output : bool
See :meth:`get_range`
n : None or int
If an integer is given, it defines the limit of
returned entries.
unique : bool
When it is true, return only unique entries.
Returns
-------
Tuples as :meth:`get_range`
"""
tosearch = "source_raw" if search_raw else "source"
if output:
tosearch = "history." + tosearch
self.writeout_cache()
sqlform = "WHERE %s GLOB ?" % tosearch
params = (pattern,)
if unique:
sqlform += ' GROUP BY {0}'.format(tosearch)
if n is not None:
sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
params += (n,)
elif unique:
sqlform += " ORDER BY session, line"
cur = self._run_sql(sqlform, params, raw=raw, output=output)
if n is not None:
return reversed(list(cur))
return cur
@catch_corrupt_db
def get_range(self, session, start=1, stop=None, raw=True,output=False):
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
if stop:
lineclause = "line >= ? AND line < ?"
params = (session, start, stop)
else:
lineclause = "line>=?"
params = (session, start)
return self._run_sql("WHERE session==? AND %s" % lineclause,
params, raw=raw, output=output)
def get_range_by_str(self, rangestr, raw=True, output=False):
"""Get lines of history from a string of ranges, as used by magic
commands %hist, %save, %macro, etc.
Parameters
----------
rangestr : str
A string specifying ranges, e.g. "5 ~2/1-4". See
:func:`magic_history` for full details.
raw, output : bool
As :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
for sess, s, e in extract_hist_ranges(rangestr):
for line in self.get_range(sess, s, e, raw=raw, output=output):
yield line
class HistoryManager(HistoryAccessor):
"""A class to organize all history-related functionality in one place.
"""
# Public interface
# An instance of the IPython shell we are attached to
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
# Lists to hold processed and raw history. These start with a blank entry
# so that we can index them starting from 1
input_hist_parsed = List([""])
input_hist_raw = List([""])
# A list of directories visited during session
dir_hist = List()
def _dir_hist_default(self):
try:
return [py3compat.getcwd()]
except OSError:
return []
# A dict of output history, keyed with ints from the shell's
# execution count.
output_hist = Dict()
# The text/plain repr of outputs.
output_hist_reprs = Dict()
# The number of the current session in the history database
session_number = Integer()
db_log_output = Bool(False, config=True,
help="Should the history database include output? (default: no)"
)
db_cache_size = Integer(0, config=True,
help="Write to database every x commands (higher values save disk access & power).\n"
"Values of 1 or less effectively disable caching."
)
# The input and output caches
db_input_cache = List()
db_output_cache = List()
# History saving in separate thread
save_thread = Instance('IPython.core.history.HistorySavingThread',
allow_none=True)
try: # Event is a function returning an instance of _Event...
save_flag = Instance(threading._Event, allow_none=True)
except AttributeError: # ...until Python 3.3, when it's a class.
save_flag = Instance(threading.Event, allow_none=True)
# Private interface
# Variables used to store the three last inputs from the user. On each new
# history update, we populate the user's namespace with these, shifted as
# necessary.
_i00 = Unicode(u'')
_i = Unicode(u'')
_ii = Unicode(u'')
_iii = Unicode(u'')
# A regex matching all forms of the exit command, so that we don't store
# them in the history (it's annoying to rewind the first entry and land on
# an exit call).
_exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
def __init__(self, shell=None, config=None, **traits):
"""Create a new history manager associated with a shell instance.
"""
# We need a pointer back to the shell for various tasks.
super(HistoryManager, self).__init__(shell=shell, config=config,
**traits)
self.save_flag = threading.Event()
self.db_input_cache_lock = threading.Lock()
self.db_output_cache_lock = threading.Lock()
if self.enabled and self.hist_file != ':memory:':
self.save_thread = HistorySavingThread(self)
self.save_thread.start()
self.new_session()
def _get_hist_file_name(self, profile=None):
"""Get default history file name based on the Shell's profile.
The profile parameter is ignored, but must exist for compatibility with
the parent class."""
profile_dir = self.shell.profile_dir.location
return os.path.join(profile_dir, 'history.sqlite')
@needs_sqlite
def new_session(self, conn=None):
"""Get a new session number."""
if conn is None:
conn = self.db
with conn:
cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL,
NULL, "") """, (datetime.datetime.now(),))
self.session_number = cur.lastrowid
def end_session(self):
"""Close the database session, filling in the end time and line count."""
self.writeout_cache()
with self.db:
self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
session==?""", (datetime.datetime.now(),
len(self.input_hist_parsed)-1, self.session_number))
self.session_number = 0
def name_session(self, name):
"""Give the current session a name in the history database."""
with self.db:
self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
(name, self.session_number))
def reset(self, new_session=True):
"""Clear the session history, releasing all object references, and
optionally open a new session."""
self.output_hist.clear()
# The directory history can't be completely empty
self.dir_hist[:] = [py3compat.getcwd()]
if new_session:
if self.session_number:
self.end_session()
self.input_hist_parsed[:] = [""]
self.input_hist_raw[:] = [""]
self.new_session()
# ------------------------------
# Methods for retrieving history
# ------------------------------
def get_session_info(self, session=0):
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve. The current session is 0, and negative
numbers count back from current session, so -1 is the previous session.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if IPython crashed.
num_cmds : int
Number of commands run, or None if IPython crashed.
remark : unicode
A manually set description.
"""
if session <= 0:
session += self.session_number
return super(HistoryManager, self).get_session_info(session=session)
def _get_range_session(self, start=1, stop=None, raw=True, output=False):
"""Get input and output history from the current session. Called by
get_range, and takes similar parameters."""
input_hist = self.input_hist_raw if raw else self.input_hist_parsed
n = len(input_hist)
if start < 0:
start += n
if not stop or (stop > n):
stop = n
elif stop < 0:
stop += n
for i in range(start, stop):
if output:
line = (input_hist[i], self.output_hist_reprs.get(i))
else:
line = input_hist[i]
yield (0, i, line)
def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve. The current session is 0, and negative
numbers count back from current session, so -1 is previous session.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
if session <= 0:
session += self.session_number
if session==self.session_number: # Current session
return self._get_range_session(start, stop, raw, output)
return super(HistoryManager, self).get_range(session, start, stop, raw,
output)
## ----------------------------
## Methods for storing history:
## ----------------------------
def store_inputs(self, line_num, source, source_raw=None):
"""Store source and raw input in history and create input cache
variables ``_i*``.
Parameters
----------
line_num : int
The prompt number of this input.
source : str
Python input.
source_raw : str, optional
If given, this is the raw input without any IPython transformations
applied to it. If not given, ``source`` is used.
"""
if source_raw is None:
source_raw = source
source = source.rstrip('\n')
source_raw = source_raw.rstrip('\n')
# do not store exit/quit commands
if self._exit_re.match(source_raw.strip()):
return
self.input_hist_parsed.append(source)
self.input_hist_raw.append(source_raw)
with self.db_input_cache_lock:
self.db_input_cache.append((line_num, source, source_raw))
# Trigger to flush cache and write to DB.
if len(self.db_input_cache) >= self.db_cache_size:
self.save_flag.set()
# update the auto _i variables
self._iii = self._ii
self._ii = self._i
self._i = self._i00
self._i00 = source_raw
# hackish access to user namespace to create _i1,_i2... dynamically
new_i = '_i%s' % line_num
to_main = {'_i': self._i,
'_ii': self._ii,
'_iii': self._iii,
new_i : self._i00 }
if self.shell is not None:
self.shell.push(to_main, interactive=False)
def store_output(self, line_num):
"""If database output logging is enabled, this saves all the
outputs from the indicated prompt number to the database. It's
called by run_cell after code has been executed.
Parameters
----------
line_num : int
The line number from which to save outputs
"""
if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
return
output = self.output_hist_reprs[line_num]
with self.db_output_cache_lock:
self.db_output_cache.append((line_num, output))
if self.db_cache_size <= 1:
self.save_flag.set()
def _writeout_input_cache(self, conn):
with conn:
for line in self.db_input_cache:
conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
(self.session_number,)+line)
def _writeout_output_cache(self, conn):
with conn:
for line in self.db_output_cache:
conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
(self.session_number,)+line)
@needs_sqlite
def writeout_cache(self, conn=None):
"""Write any entries in the cache to the database."""
if conn is None:
conn = self.db
with self.db_input_cache_lock:
try:
self._writeout_input_cache(conn)
except sqlite3.IntegrityError:
self.new_session(conn)
print("ERROR! Session/line number was not unique in",
"database. History logging moved to new session",
self.session_number)
try:
# Try writing to the new session. If this fails, don't
# recurse
self._writeout_input_cache(conn)
except sqlite3.IntegrityError:
pass
finally:
self.db_input_cache = []
with self.db_output_cache_lock:
try:
self._writeout_output_cache(conn)
except sqlite3.IntegrityError:
print("!! Session/line number for output was not unique",
"in database. Output will not be stored.")
finally:
self.db_output_cache = []
class HistorySavingThread(threading.Thread):
"""This thread takes care of writing history to the database, so that
the UI isn't held up while that happens.
It waits for the HistoryManager's save_flag to be set, then writes out
the history cache. The main thread is responsible for setting the flag when
the cache size reaches a defined threshold."""
daemon = True
stop_now = False
enabled = True
def __init__(self, history_manager):
super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
self.history_manager = history_manager
self.enabled = history_manager.enabled
atexit.register(self.stop)
@needs_sqlite
def run(self):
# We need a separate db connection per thread:
try:
self.db = sqlite3.connect(self.history_manager.hist_file,
**self.history_manager.connection_options
)
while True:
self.history_manager.save_flag.wait()
if self.stop_now:
self.db.close()
return
self.history_manager.save_flag.clear()
self.history_manager.writeout_cache(self.db)
except Exception as e:
print(("The history saving thread hit an unexpected error (%s)."
"History will not be written to the database.") % repr(e))
def stop(self):
"""This can be called from the main thread to safely stop this thread.
Note that it does not attempt to write out remaining history before
exiting. That should be done by calling the HistoryManager's
end_session method."""
self.stop_now = True
self.history_manager.save_flag.set()
self.join()
# To match, e.g. ~5/8-~2/3
range_re = re.compile(r"""
((?P<startsess>~?\d+)/)?
(?P<start>\d+)?
((?P<sep>[\-:])
((?P<endsess>~?\d+)/)?
(?P<end>\d+))?
$""", re.VERBOSE)
def extract_hist_ranges(ranges_str):
"""Turn a string of history ranges into 3-tuples of (session, start, stop).
Examples
--------
>>> list(extract_hist_ranges("~8/5-~7/4 2"))
[(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
"""
for range_str in ranges_str.split():
rmatch = range_re.match(range_str)
if not rmatch:
continue
start = rmatch.group("start")
if start:
start = int(start)
end = rmatch.group("end")
# If no end specified, get (a, a + 1)
end = int(end) if end else start + 1
else: # start not specified
if not rmatch.group('startsess'): # no startsess
continue
start = 1
end = None # provide the entire session hist
if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
end += 1
startsess = rmatch.group("startsess") or "0"
endsess = rmatch.group("endsess") or startsess
startsess = int(startsess.replace("~","-"))
endsess = int(endsess.replace("~","-"))
assert endsess >= startsess, "start session must be earlier than end session"
if endsess == startsess:
yield (startsess, start, end)
continue
# Multiple sessions in one range:
yield (startsess, start, None)
for sess in range(startsess+1, endsess):
yield (sess, 1, None)
yield (endsess, 1, end)
def _format_lineno(session, line):
"""Helper function to format line numbers properly."""
if session == 0:
return str(line)
return "%s#%s" % (session, line)
|
{
"content_hash": "2bf58f3667e79eda61edc1de7c8bbc92",
"timestamp": "",
"source": "github",
"line_count": 877,
"max_line_length": 93,
"avg_line_length": 35.93728620296465,
"alnum_prop": 0.5611574705714376,
"repo_name": "bdh1011/wau",
"id": "adae60dc2bebdd123ffdd765207e9b0da206ccf0",
"size": "31517",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/IPython/core/history.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
}
|
"""A collection of metrics which sample per-example values."""
from typing import Any, List, Optional, Text, Tuple
import apache_beam as beam
import numpy as np
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.utils import beam_util
from tensorflow_model_analysis.utils import util
FIXED_SIZE_SAMPLE_NAME = 'fixed_size_sample'
# This corresponds to the comments in apache_beam/transforms/combiners.py
_HeapType = Tuple[bool, List[Any]]
class FixedSizeSample(metric_types.Metric):
"""Computes a fixed-size sample per slice."""
def __init__(self,
sampled_key: Text,
size: int,
name: Text = FIXED_SIZE_SAMPLE_NAME,
random_seed: Optional[int] = None):
"""Initializes a FixedSizeSample metric.
Args:
sampled_key: The key whose values should be sampled
size: The number of samples to collect (per slice)
name: Metric name.
random_seed: The random_seed to be used for intializing the per worker
np.random.RandomGenerator in the CombineFn setup. Note that when more
than one worker is used, setting this is not sufficient to guarantee
determinism.
"""
super().__init__(
_fixed_size_sample,
sampled_key=sampled_key,
size=size,
name=name,
random_seed=random_seed)
metric_types.register_metric(FixedSizeSample)
def _fixed_size_sample(
sampled_key: Text,
size: int,
name: Text,
random_seed: Optional[int],
model_names: Optional[List[Text]] = None,
output_names: Optional[List[Text]] = None,
sub_keys: Optional[List[metric_types.SubKey]] = None,
example_weighted: bool = False) -> metric_types.MetricComputations:
"""Returns metrics computations for FixedSizeSample metrcs."""
keys = []
for model_name in model_names or ['']:
for output_name in output_names or ['']:
for sub_key in sub_keys or [None]:
keys.append(
metric_types.MetricKey(
name,
model_name=model_name,
output_name=output_name,
sub_key=sub_key,
example_weighted=example_weighted))
return [
metric_types.MetricComputation(
keys=keys,
preprocessors=[
metric_types.FeaturePreprocessor(feature_keys=[sampled_key])
],
combiner=_FixedSizeSampleCombineFn(
metric_keys=keys,
sampled_key=sampled_key,
size=size,
example_weighted=example_weighted,
random_seed=random_seed))
]
class _FixedSizeSampleCombineFn(beam_util.DelegatingCombineFn):
"""A fixed size sample combiner which samples values of a specified key.
This CombineFn is similar to beam.combiners.SampleCombineFn except it makes
use of the numpy random generator which means that it accepts a seed for use
with deterministic testing.
"""
def __init__(self, metric_keys: List[metric_types.MetricKey],
sampled_key: Text, size: int, example_weighted: bool,
random_seed: Optional[int]):
self._metric_keys = metric_keys
self._sampled_key = sampled_key
self._example_weighted = example_weighted
self._random_seed = random_seed
# We delegate to the TopCombineFn rather than subclass because the use of a
# TopCombineFn is an implementation detail.
super().__init__(combine_fn=beam.combiners.TopCombineFn(n=size))
def setup(self):
self._random_generator = np.random.default_rng(self._random_seed)
def add_input(self, heap: _HeapType,
element: metric_types.StandardMetricInputs) -> _HeapType:
# TODO(b/206546545): add support for sampling derived features
sampled_value = util.get_by_keys(element.features, [self._sampled_key])
random_tag = self._random_generator.random()
if self._example_weighted:
# For details, see Weighted Random Sampling over Data Streams:
# https://arxiv.org/abs/1012.0256
weight = element.example_weight
random_tag = random_tag**(1 / weight)
return super().add_input(heap, (random_tag, sampled_value))
def extract_output(self, heap: _HeapType) -> metric_types.MetricsDict:
# drop random numbers used for sampling
sampled_values = np.array([v for _, v in super().extract_output(heap)])
return {k: sampled_values for k in self._metric_keys}
|
{
"content_hash": "3f4dfc433109d82c2adeac46fc420812",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 36.825,
"alnum_prop": 0.662367051369088,
"repo_name": "tensorflow/model-analysis",
"id": "10abcee60b68131ab723b4c5f4464305dae790d2",
"size": "4994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_model_analysis/metrics/sample_metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "125312"
},
{
"name": "JavaScript",
"bytes": "1415355"
},
{
"name": "Python",
"bytes": "3261298"
},
{
"name": "Shell",
"bytes": "813"
},
{
"name": "Starlark",
"bytes": "11590"
}
],
"symlink_target": ""
}
|
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v11.services.types import customizer_attribute_service
from .base import CustomizerAttributeServiceTransport, DEFAULT_CLIENT_INFO
class CustomizerAttributeServiceGrpcTransport(
CustomizerAttributeServiceTransport
):
"""gRPC backend transport for CustomizerAttributeService.
Service to manage customizer attribute
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def mutate_customizer_attributes(
self,
) -> Callable[
[customizer_attribute_service.MutateCustomizerAttributesRequest],
customizer_attribute_service.MutateCustomizerAttributesResponse,
]:
r"""Return a callable for the mutate customizer attributes method over gRPC.
Creates, updates or removes customizer attributes.
Operation statuses are returned.
Returns:
Callable[[~.MutateCustomizerAttributesRequest],
~.MutateCustomizerAttributesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_customizer_attributes" not in self._stubs:
self._stubs[
"mutate_customizer_attributes"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v11.services.CustomizerAttributeService/MutateCustomizerAttributes",
request_serializer=customizer_attribute_service.MutateCustomizerAttributesRequest.serialize,
response_deserializer=customizer_attribute_service.MutateCustomizerAttributesResponse.deserialize,
)
return self._stubs["mutate_customizer_attributes"]
def close(self):
self.grpc_channel.close()
__all__ = ("CustomizerAttributeServiceGrpcTransport",)
|
{
"content_hash": "5f9f3da866d5b8b8d58a9ec90e2f1111",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 114,
"avg_line_length": 44.31782945736434,
"alnum_prop": 0.6107224068567431,
"repo_name": "googleads/google-ads-python",
"id": "07937681f6c5c53381b0ccf4b5b32711f140ea16",
"size": "12034",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/services/services/customizer_attribute_service/transports/grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class ErrorEntity(Model):
"""Body of the error response returned from the API.
:param code: Basic error code.
:type code: str
:param message: Any details of the error.
:type message: str
:param extended_code: Type of error.
:type extended_code: str
:param message_template: Message template.
:type message_template: str
:param parameters: Parameters for the template.
:type parameters: list of str
:param inner_errors: Inner errors.
:type inner_errors: list of :class:`ErrorEntity
<azure.mgmt.web.models.ErrorEntity>`
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'extended_code': {'key': 'extendedCode', 'type': 'str'},
'message_template': {'key': 'messageTemplate', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': '[str]'},
'inner_errors': {'key': 'innerErrors', 'type': '[ErrorEntity]'},
}
def __init__(self, code=None, message=None, extended_code=None, message_template=None, parameters=None, inner_errors=None):
self.code = code
self.message = message
self.extended_code = extended_code
self.message_template = message_template
self.parameters = parameters
self.inner_errors = inner_errors
|
{
"content_hash": "5e9aec2cdee39a1d0473c40351a8eb81",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 127,
"avg_line_length": 37.37837837837838,
"alnum_prop": 0.6276211135213304,
"repo_name": "rjschwei/azure-sdk-for-python",
"id": "e559412a967f75fafa517e1e4685a03e10913ba2",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-web/azure/mgmt/web/models/error_entity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8317911"
}
],
"symlink_target": ""
}
|
import warnings
import numpy as np
from numbers import Integral, Real
from scipy import sparse
from math import sqrt
from ..metrics import pairwise_distances_argmin
from ..metrics.pairwise import euclidean_distances
from ..base import (
TransformerMixin,
ClusterMixin,
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
)
from ..utils.extmath import row_norms
from ..utils._param_validation import Interval
from ..utils.validation import check_is_fitted
from ..exceptions import ConvergenceWarning
from . import AgglomerativeClustering
from .._config import config_context
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in range(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features,
dtype=node.init_centroids_.dtype,
)
new_node2 = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features,
dtype=node.init_centroids_.dtype,
)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True
)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[(farthest_idx,)]
node1_closer = node1_dist < node2_dist
# make sure node1 is closest to itself even if all distances are equal.
# This can only happen when all node.centroids_ are duplicates leading to all
# distances between centroids being zero.
node1_closer[farthest_idx[0]] = True
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode:
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : list
List of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray of shape (branching_factor + 1, n_features)
Manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray of shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray of shape (branching_factor + 1, n_features)
View of ``init_centroids_``.
squared_norm_ : ndarray of shape (branching_factor + 1,)
View of ``init_sq_norm_``.
"""
def __init__(self, *, threshold, branching_factor, is_leaf, n_features, dtype):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features), dtype=dtype)
self.init_sq_norm_ = np.zeros((branching_factor + 1), dtype)
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[: n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[: n_samples + 1]
def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.0
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = self.subclusters_[
closest_index
].centroid_
self.init_sq_norm_[closest_index] = self.subclusters_[
closest_index
].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_,
threshold,
branching_factor,
)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2
)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster:
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray of shape (n_features,), default=None
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray of shape (branching_factor + 1, n_features)
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray of shape (branching_factor + 1,)
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, *, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.centroid_ = self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_
)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_sq_norm = np.dot(new_centroid, new_centroid)
# The squared radius of the cluster is defined:
# r^2 = sum_i ||x_i - c||^2 / n
# with x_i the n points assigned to the cluster and c its centroid:
# c = sum_i x_i / n
# This can be expanded to:
# r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n
# and therefore simplifies to:
# r^2 = sum_i ||x_i||^2 / n - ||c||^2
sq_radius = new_ss / new_n - new_sq_norm
if sq_radius <= threshold**2:
(
self.n_samples_,
self.linear_sum_,
self.squared_sum_,
self.centroid_,
self.sq_norm_,
) = (new_n, new_ls, new_ss, new_centroid, new_sq_norm)
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
# Because of numerical issues, this could become negative
sq_radius = self.squared_sum_ / self.n_samples_ - self.sq_norm_
return sqrt(max(0, sq_radius))
class Birch(
ClassNamePrefixFeaturesOutMixin, ClusterMixin, TransformerMixin, BaseEstimator
):
"""Implements the BIRCH clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
.. versionadded:: 0.16
Parameters
----------
threshold : float, default=0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default=50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model or None, default=3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- :mod:`sklearn.cluster` Estimator : If a model is provided, the model
is fit treating the subclusters as new samples and the initial data
is mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default=True
Whether or not to compute labels for each fit.
copy : bool, default=True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray of shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
MiniBatchKMeans : Alternative implementation that does incremental updates
of the centers' positions using mini-batches.
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(n_clusters=None)
>>> brc.fit(X)
Birch(n_clusters=None)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
"""
_parameter_constraints: dict = {
"threshold": [Interval(Real, 0.0, None, closed="neither")],
"branching_factor": [Interval(Integral, 1, None, closed="neither")],
"n_clusters": [None, ClusterMixin, Interval(Integral, 1, None, closed="left")],
"compute_labels": ["boolean"],
"copy": ["boolean"],
}
def __init__(
self,
*,
threshold=0.5,
branching_factor=50,
n_clusters=3,
compute_labels=True,
copy=True,
):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
self._validate_params()
return self._fit(X, partial=False)
def _fit(self, X, partial):
has_root = getattr(self, "root_", None)
first_call = not (partial and has_root)
X = self._validate_data(
X,
accept_sparse="csr",
copy=self.copy,
reset=first_call,
dtype=[np.float64, np.float32],
)
threshold = self.threshold
branching_factor = self.branching_factor
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
if first_call:
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=True,
n_features=n_features,
dtype=X.dtype,
)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=True,
n_features=n_features,
dtype=X.dtype,
)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor
)
del self.root_
self.root_ = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=False,
n_features=n_features,
dtype=X.dtype,
)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._n_features_out = self.subcluster_centers_.shape[0]
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : list of shape (n_leaves,)
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), \
default=None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
self._validate_params()
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
return self._fit(X, partial=True)
def _check_fit(self, X):
check_is_fitted(self)
if (
hasattr(self, "subcluster_centers_")
and X.shape[1] != self.subcluster_centers_.shape[1]
):
raise ValueError(
"Training data and predicted data do not have same number of features."
)
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray of shape(n_samples,)
Labelled data.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse="csr", reset=False)
return self._predict(X)
def _predict(self, X):
"""Predict data using the ``centroids_`` of subclusters."""
kwargs = {"Y_norm_squared": self._subcluster_norms}
with config_context(assume_finite=True):
argmin = pairwise_distances_argmin(
X, self.subcluster_centers_, metric_kwargs=kwargs
)
return self.subcluster_labels_[argmin]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse="csr", reset=False)
with config_context(assume_finite=True):
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, Integral):
clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by BIRCH is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters),
ConvergenceWarning,
)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_)
if compute_labels:
self.labels_ = self._predict(X)
def _more_tags(self):
return {"preserves_dtype": [np.float64, np.float32]}
|
{
"content_hash": "cf21586b1627e465590b9d8801e88a2a",
"timestamp": "",
"source": "github",
"line_count": 737,
"max_line_length": 88,
"avg_line_length": 35.260515603799185,
"alnum_prop": 0.6011467272097587,
"repo_name": "espg/scikit-learn",
"id": "4c9d7921fdc704fc95496fd683c2eaab1deb0b70",
"size": "26189",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/cluster/_birch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "668672"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10476969"
},
{
"name": "Shell",
"bytes": "41551"
}
],
"symlink_target": ""
}
|
"""Test runner for typeshed.
Depends on mypy and pytype being installed.
If pytype is installed:
1. For every pyi, run "pytd <foo.pyi>" in a separate process
"""
import os
import re
import sys
import argparse
import subprocess
import collections
parser = argparse.ArgumentParser(description="Pytype tests.")
parser.add_argument('-n', '--dry-run', action='store_true', help="Don't actually run tests")
parser.add_argument('--num-parallel', type=int, default=1,
help="Number of test processes to spawn")
def main():
args = parser.parse_args()
code, runs = pytype_test(args)
if code:
print("--- exit status %d ---" % code)
sys.exit(code)
if not runs:
print("--- nothing to do; exit 1 ---")
sys.exit(1)
def load_blacklist():
filename = os.path.join(os.path.dirname(__file__), "pytype_blacklist.txt")
regex = r"^\s*([^\s#]+)\s*(?:#.*)?$"
with open(filename) as f:
return re.findall(regex, f.read(), flags=re.M)
class PytdRun(object):
def __init__(self, args, dry_run=False):
self.args = args
self.dry_run = dry_run
self.results = None
if dry_run:
self.results = (0, "", "")
else:
self.proc = subprocess.Popen(
["pytd"] + args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def communicate(self):
if self.results:
return self.results
stdout, stderr = self.proc.communicate()
self.results = self.proc.returncode, stdout, stderr
return self.results
def pytype_test(args):
try:
PytdRun(["-h"]).communicate()
except OSError:
print("Cannot run pytd. Did you install pytype?")
return 0, 0
wanted = re.compile(r"stdlib/(2|2\.7|2and3)/.*\.pyi$")
skipped = re.compile("(%s)$" % "|".join(load_blacklist()))
files = []
for root, _, filenames in os.walk("stdlib"):
for f in sorted(filenames):
f = os.path.join(root, f)
if wanted.search(f) and not skipped.search(f):
files.append(f)
running_tests = collections.deque()
max_code, runs, errors = 0, 0, 0
print("Running pytype tests...")
while 1:
while files and len(running_tests) < args.num_parallel:
test_run = PytdRun([files.pop()], dry_run=args.dry_run)
running_tests.append(test_run)
if not running_tests:
break
test_run = running_tests.popleft()
code, stdout, stderr = test_run.communicate()
max_code = max(max_code, code)
runs += 1
if code:
print("pytd error processing \"%s\":" % test_run.args[0])
print(stderr)
errors += 1
print("Ran pytype with %d pyis, got %d errors." % (runs, errors))
return max_code, runs
if __name__ == '__main__':
main()
|
{
"content_hash": "12f7312d2d5c72a2398e79c9e2bd5fa2",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 92,
"avg_line_length": 27.02777777777778,
"alnum_prop": 0.5686879068174032,
"repo_name": "vvv1559/intellij-community",
"id": "e7475c0be115903003c1420c3dfa08507779849b",
"size": "2941",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "python/helpers/typeshed/tests/pytype_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "60827"
},
{
"name": "C",
"bytes": "211454"
},
{
"name": "C#",
"bytes": "1264"
},
{
"name": "C++",
"bytes": "199030"
},
{
"name": "CMake",
"bytes": "1675"
},
{
"name": "CSS",
"bytes": "201445"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "3246752"
},
{
"name": "HLSL",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "1901858"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "166889152"
},
{
"name": "JavaScript",
"bytes": "570364"
},
{
"name": "Jupyter Notebook",
"bytes": "93222"
},
{
"name": "Kotlin",
"bytes": "4758504"
},
{
"name": "Lex",
"bytes": "147486"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "51370"
},
{
"name": "Objective-C",
"bytes": "28061"
},
{
"name": "Perl",
"bytes": "903"
},
{
"name": "Perl 6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6680"
},
{
"name": "Python",
"bytes": "25489147"
},
{
"name": "Roff",
"bytes": "37534"
},
{
"name": "Ruby",
"bytes": "1217"
},
{
"name": "Shell",
"bytes": "64141"
},
{
"name": "Smalltalk",
"bytes": "338"
},
{
"name": "TeX",
"bytes": "25473"
},
{
"name": "Thrift",
"bytes": "1846"
},
{
"name": "TypeScript",
"bytes": "9469"
},
{
"name": "Visual Basic",
"bytes": "77"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
}
|
"""
Sponge Knowledge Base
Demo
"""
from java.util import Random
class SubmittableActionArg(Action):
def onConfigure(self):
self.withLabel("Submittable argument")
self.withArgs([
StringType("arg1").withLabel("Argument 1").withNullable().withProvided(
ProvidedMeta().withValue().withSubmittable(
SubmittableMeta().withInfluences(["arg2", "arg3", "arg4"]))).withFeatures({"responsive":True}),
StringType("arg2").withLabel("Argument 2").withNullable().withReadOnly().withProvided(
ProvidedMeta().withValue().withDependency("arg1").withLazyUpdate().withOptionalMode()),
StringType("arg3").withLabel("Argument 3").withNullable().withReadOnly().withProvided(
ProvidedMeta().withValue().withDependency("arg1").withOptionalMode()),
StringType("arg4").withLabel("Argument 4").withReadOnly().withProvided(
ProvidedMeta().withValue().withDependency("arg1").withOptionalMode()),
]).withNonCallable()
self.withFeatures({"cancelLabel":"Close"})
def onProvideArgs(self, context):
if "arg1" in context.provide:
context.provided["arg1"] = ProvidedValue().withValue("a")
# Current arg1 is set when it is submitted or any of arg2, arg3 or arg4 is to be provided.
arg1 = context.current.get("arg1")
if "arg2" in context.provide or "arg1" in context.submit:
context.provided["arg2"] = ProvidedValue().withValue(arg1.upper() if arg1 else None)
if "arg3" in context.provide or "arg1" in context.submit:
context.provided["arg3"] = ProvidedValue().withValue(arg1.upper() if arg1 else None)
if "arg4" in context.provide or "arg1" in context.submit:
context.provided["arg4"] = ProvidedValue().withValue(arg1.upper() if arg1 else None)
|
{
"content_hash": "57b6d19902b64c2a96e63c8464e53422",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 115,
"avg_line_length": 52.22222222222222,
"alnum_prop": 0.65,
"repo_name": "softelnet/sponge",
"id": "f144280a19bb4c2b43cc670e7ea6a6a3a48aa06f",
"size": "1880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sponge-app/sponge-app-demo-service/sponge/sponge_demo_submittable_action_arg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "482"
},
{
"name": "Dockerfile",
"bytes": "2389"
},
{
"name": "Groovy",
"bytes": "70914"
},
{
"name": "HTML",
"bytes": "6759"
},
{
"name": "Java",
"bytes": "3300560"
},
{
"name": "JavaScript",
"bytes": "70716"
},
{
"name": "Kotlin",
"bytes": "113542"
},
{
"name": "Mustache",
"bytes": "38"
},
{
"name": "Python",
"bytes": "426240"
},
{
"name": "Ruby",
"bytes": "65491"
},
{
"name": "SCSS",
"bytes": "6217"
},
{
"name": "Shell",
"bytes": "1388"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = 'django-timesheet',
version = '0.1',
url = 'http://github.com/myles/django-timesheet',
license = 'BSD License',
description = 'A Django timesheet application.',
long_description = read('README'),
author = 'Myles Braithwaite',
author_email = 'me@mylesbraithwaite.com',
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = [
'setuptools',
'python-dateutil',
],
classifiers = [
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
)
|
{
"content_hash": "111c7f50aa782dffe59c46fde4c46cf0",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 67,
"avg_line_length": 23.36111111111111,
"alnum_prop": 0.6611177170035671,
"repo_name": "philippeowagner/django-timesheet",
"id": "814bab92faadc081bb52e9e78f4d98194afac296",
"size": "841",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Web screen capture script with QtWebKit
How to use
==========
$ python screenshot.py -h
usage: screenshot.py [-h] [-a AGENT] [-l LANGUAGE] [-w WIDTH] [-H HEIGHT]
[-p PREFIX] [-s]
url
positional arguments:
url specify request url
optional arguments:
-h, --help show this help message and exit
-a AGENT, --agent AGENT
UA strings for HTTP Header 'User-Agent'
-l LANGUAGE, --language LANGUAGE
specify langs for HTTP Header 'Accept-Language'
-w WIDTH, --width WIDTH
specify window width to capture screen
-H HEIGHT, --height HEIGHT
specify minimum window height to capture screen
-p PREFIX, --prefix PREFIX
specify PNG file prefix (timestamp follows)
-s, --with-smooth-scroll
whether scroll down to bottom when capture the page or
not
"""
import datetime
import sys
from argparse import ArgumentParser
try:
from PySide.QtCore import QUrl, QTimer, Qt
from PySide.QtGui import QApplication, QImage, QPainter
from PySide.QtNetwork import QNetworkRequest
from PySide.QtWebKit import QWebView, QWebPage, QWebSettings
except ImportError:
# Use PyQt5 when it couldn't have found PySide modules
from PyQt5.QtCore import QUrl, QTimer, Qt
from PyQt5.QtGui import QImage, QPainter
from PyQt5.QtNetwork import QNetworkRequest
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWebKitWidgets import QWebView, QWebPage
from PyQt5.QtWidgets import QApplication
DEFAULT_WIDTH = 1024
DEFAULT_HEIGHT = 768
DEFAULT_USERAGENT = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5)'
' AppleWebKit/537.36 (KHTML, like Gecko)'
' CDP/47.0.2526.73 Safari/537.36')
DEFAULT_PREFIX = 'screenshot'
class Page(QWebPage):
"""psedo webpage class
"""
def __init__(self, ua):
QWebPage.__init__(self)
self.ua = ua
def userAgentForUrl(self, url):
"""override 'userAgentForUrl' method
"""
return self.ua
class Browser(QWebView):
"""psedo browser class
"""
def __init__(self, page=None):
"""Initialize browser class
"""
QWebView.__init__(self)
if page:
self.setPage(page)
self.use_smooth_scroll = args.with_smooth_scroll
self.scrollStarted = False
self.initialize()
def _private_browse(self):
print("Enable private browsing mode")
self.settings().setAttribute(QWebSettings.PrivateBrowsingEnabled, True)
def _hide_scroll_bars(self):
print("Disable scroll bars")
self.page().mainFrame().setScrollBarPolicy(Qt.Horizontal, Qt.ScrollBarAlwaysOff)
def initialize(self):
self.timerDelay = QTimer()
self.timerDelay.setInterval(50)
self.timerDelay.setSingleShot(True)
self.timerDelay.timeout.connect(self.delay_action)
self.loadFinished.connect(self.load_finished_slot)
self.loadProgress.connect(self.load_progress_slot)
self._private_browse()
self._hide_scroll_bars()
def load_progress_slot(self, progress):
"""Callback function when content loading status updated.
"""
print("Loading progress: {:d}%...".format(progress))
def load_finished_slot(self, ok):
"""Callback function when content loading finished
"""
if not ok:
print("Loaded but not completed: {}".format(self.url))
return
print("Load completed: {}".format(self.url))
print("Loaded content size: {:,d} x {:,d}".format(
self.page().mainFrame().contentsSize().width(),
self.page().mainFrame().contentsSize().height(),
))
self.delay_action()
def delay_action(self):
frame = self.page().mainFrame()
target_y = frame.scrollBarMaximum(Qt.Vertical)
current_y = frame.scrollBarValue(Qt.Vertical)
print("target: {:d}, current: {:d}".format(target_y, current_y))
if self.use_smooth_scroll:
y = current_y - 50 if self.scrollStarted else target_y
if y > 0:
frame.evaluateJavaScript("window.scrollTo(0, {:d});".format(y))
print("Scroll to y: {:,d}".format(y))
if not self.scrollStarted:
self.scrollStarted = True
self.timerDelay.start()
else:
self.take_screenshot()
else:
self.take_screenshot()
def take_screenshot(self):
frame = self.page().mainFrame()
size = frame.contentsSize()
self.page().setViewportSize(size)
image = QImage(size, QImage.Format_ARGB32)
painter = QPainter(image)
frame.render(painter)
painter.end()
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
file_name = "{}_{}.png".format(args.prefix, timestamp)
print("page title: [{:s}] --> save as {:s}".format(self.title(), file_name))
image.save(file_name)
sys.exit()
def run(self, args):
"""prepare request object, then call 'load' method of QWebView object
"""
request = QNetworkRequest()
request.setUrl(QUrl(args.url))
request.setRawHeader(bytes("Accept-Languages", 'utf-8'), bytes(', '.join(args.language), 'utf-8'))
request.setRawHeader(bytes("User-Agent", 'utf-8'), bytes(args.agent, 'utf-8'))
self.resize(int(args.width) + 15, int(args.height))
self.load(request)
def main(args):
"""main function
"""
print(args)
app = QApplication(sys.argv)
page = Page(args.agent) if args.agent else None
browser = Browser(page)
browser.run(args)
browser.show()
app.exec_()
if __name__ == "__main__":
ap = ArgumentParser()
ap.add_argument('-a', '--agent', default=DEFAULT_USERAGENT,
help="UA strings for HTTP Header 'User-Agent'")
ap.add_argument('-l', '--language', action="append",
help="specify langs for HTTP Header 'Accept-Language'")
ap.add_argument('-w', '--width', default=DEFAULT_WIDTH,
help="specify window width to capture screen")
ap.add_argument('-H', '--height', default=DEFAULT_HEIGHT,
help="specify minimum window height to capture screen")
ap.add_argument('-p', '--prefix', default=DEFAULT_PREFIX,
help="specify PNG file prefix (timestamp follows)")
ap.add_argument('-s', '--with-smooth-scroll', default=False, action="store_true",
help="whether scroll down to bottom when capture the page or not")
ap.add_argument('url', help="specify request url")
args = ap.parse_args()
if not args.language:
args.language = ['ja']
main(args)
|
{
"content_hash": "7aec2e82d53175bf97963c5713d89dde",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 106,
"avg_line_length": 34.11219512195122,
"alnum_prop": 0.5958815958815958,
"repo_name": "shkumagai/pyside-sandbox",
"id": "0aa07f0aaaedf3227329c069d5a8fa5bd475f721",
"size": "7017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "screenshot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "60584"
},
{
"name": "Shell",
"bytes": "376"
}
],
"symlink_target": ""
}
|
import pathlib
from typing import Any, BinaryIO, Dict, List, Tuple, Union
import numpy as np
from torchdata.datapipes.iter import IterDataPipe, Mapper, UnBatcher
from torchvision.prototype.datasets.utils import Dataset, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import hint_sharding, hint_shuffling, read_mat
from torchvision.prototype.features import Image, Label
from .._api import register_dataset, register_info
NAME = "svhn"
@register_info(NAME)
def _info() -> Dict[str, Any]:
return dict(categories=[str(c) for c in range(10)])
@register_dataset(NAME)
class SVHN(Dataset):
"""SVHN Dataset.
homepage="http://ufldl.stanford.edu/housenumbers/",
dependencies = scipy
"""
def __init__(
self,
root: Union[str, pathlib.Path],
*,
split: str = "train",
skip_integrity_check: bool = False,
) -> None:
self._split = self._verify_str_arg(split, "split", {"train", "test", "extra"})
self._categories = _info()["categories"]
super().__init__(root, skip_integrity_check=skip_integrity_check, dependencies=("scipy",))
_CHECKSUMS = {
"train": "435e94d69a87fde4fd4d7f3dd208dfc32cb6ae8af2240d066de1df7508d083b8",
"test": "cdce80dfb2a2c4c6160906d0bd7c68ec5a99d7ca4831afa54f09182025b6a75b",
"extra": "a133a4beb38a00fcdda90c9489e0c04f900b660ce8a316a5e854838379a71eb3",
}
def _resources(self) -> List[OnlineResource]:
data = HttpResource(
f"http://ufldl.stanford.edu/housenumbers/{self._split}_32x32.mat",
sha256=self._CHECKSUMS[self._split],
)
return [data]
def _read_images_and_labels(self, data: Tuple[str, BinaryIO]) -> List[Tuple[np.ndarray, np.ndarray]]:
_, buffer = data
content = read_mat(buffer)
return list(
zip(
content["X"].transpose((3, 0, 1, 2)),
content["y"].squeeze(),
)
)
def _prepare_sample(self, data: Tuple[np.ndarray, np.ndarray]) -> Dict[str, Any]:
image_array, label_array = data
return dict(
image=Image(image_array.transpose((2, 0, 1))),
label=Label(int(label_array) % 10, categories=self._categories),
)
def _datapipe(self, resource_dps: List[IterDataPipe]) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Mapper(dp, self._read_images_and_labels)
dp = UnBatcher(dp)
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def __len__(self) -> int:
return {
"train": 73_257,
"test": 26_032,
"extra": 531_131,
}[self._split]
|
{
"content_hash": "105e34279e20cec038754ecd0e46ebf6",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 105,
"avg_line_length": 33.21686746987952,
"alnum_prop": 0.6180631120783461,
"repo_name": "pytorch/vision",
"id": "6dd55a77c99f2d17457fe94db54fa53f3e78490d",
"size": "2757",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "torchvision/prototype/datasets/_builtin/svhn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "20242"
},
{
"name": "C",
"bytes": "930"
},
{
"name": "C++",
"bytes": "366825"
},
{
"name": "CMake",
"bytes": "18266"
},
{
"name": "Cuda",
"bytes": "90174"
},
{
"name": "Dockerfile",
"bytes": "1608"
},
{
"name": "Java",
"bytes": "21833"
},
{
"name": "Objective-C",
"bytes": "2715"
},
{
"name": "Objective-C++",
"bytes": "3284"
},
{
"name": "PowerShell",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "3952070"
},
{
"name": "Ruby",
"bytes": "1086"
},
{
"name": "Shell",
"bytes": "35660"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django_dynamic_fixture import get
from readthedocs_build.config import BuildConfig
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
from readthedocs.doc_builder.config import ConfigWrapper
def get_build_config(config, env_config=None, source_file='readthedocs.yml',
source_position=0):
config['name'] = 'test'
config['type'] = 'sphinx'
ret_config = BuildConfig(
{'output_base': ''},
config,
source_file=source_file,
source_position=source_position)
ret_config.validate()
return ret_config
class ConfigWrapperTests(TestCase):
def setUp(self):
self.project = get(Project, slug='test', python_interpreter='python',
install_project=False, requirements_file='urls.py')
self.version = get(Version, project=self.project, slug='foobar')
def test_python_version(self):
yaml_config = get_build_config({'python': {'version': 3}})
config = ConfigWrapper(version=self.version, yaml_config=yaml_config)
self.assertEqual(config.python_version, 3)
yaml_config = get_build_config({})
config = ConfigWrapper(version=self.version, yaml_config=yaml_config)
self.assertEqual(config.python_version, 2)
def test_python_interpreter(self):
yaml_config = get_build_config({'python': {'version': 3}})
config = ConfigWrapper(version=self.version, yaml_config=yaml_config)
self.assertEqual(config.python_interpreter, 'python3')
yaml_config = get_build_config({})
config = ConfigWrapper(version=self.version, yaml_config=yaml_config)
self.assertEqual(config.python_interpreter, 'python')
def test_install_project(self):
yaml_config = get_build_config({'python': {'setup_py_install': True}})
config = ConfigWrapper(version=self.version, yaml_config=yaml_config)
self.assertEqual(config.install_project, True)
yaml_config = get_build_config({})
config = ConfigWrapper(version=self.version, yaml_config=yaml_config)
self.assertEqual(config.install_project, False)
def test_conda(self):
to_find = 'urls.py'
yaml_config = get_build_config({'conda': {'file': to_find}})
config = ConfigWrapper(version=self.version, yaml_config=yaml_config)
self.assertEqual(config.use_conda, True)
self.assertTrue(config.conda_file[-len(to_find):] == to_find)
yaml_config = get_build_config({})
config = ConfigWrapper(version=self.version, yaml_config=yaml_config)
self.assertEqual(config.use_conda, False)
self.assertEqual(config.conda_file, None)
def test_requirements_file(self):
yaml_config = get_build_config({'requirements_file': 'wsgi.py'})
config = ConfigWrapper(version=self.version, yaml_config=yaml_config)
self.assertEqual(config.requirements_file, 'wsgi.py')
yaml_config = get_build_config({})
config = ConfigWrapper(version=self.version, yaml_config=yaml_config)
self.assertEqual(config.requirements_file, 'urls.py')
|
{
"content_hash": "b29fdea95a733fedfbfbcb1b852cc3a7",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 78,
"avg_line_length": 41.324675324675326,
"alnum_prop": 0.6731615336266499,
"repo_name": "espdev/readthedocs.org",
"id": "dda9c8ac3fae1005dc1df02fc0324dc5c3220d25",
"size": "3182",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "readthedocs/rtd_tests/tests/test_config_wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "65340"
},
{
"name": "HTML",
"bytes": "216474"
},
{
"name": "JavaScript",
"bytes": "1437755"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Nginx",
"bytes": "891"
},
{
"name": "Perl",
"bytes": "6814"
},
{
"name": "Python",
"bytes": "1452456"
},
{
"name": "Shell",
"bytes": "1501"
}
],
"symlink_target": ""
}
|
"""
Lasagne implementation of ILSVRC2015 winner on the mnist dataset
Deep Residual Learning for Image Recognition
http://arxiv.org/abs/1512.03385
"""
from __future__ import print_function
import sys
import os
import time
import string
import numpy as np
import theano
import theano.tensor as T
import lasagne
import BatchNormLayer
sys.setrecursionlimit(10000)
# ################## Download and prepare the MNIST dataset ##################
# This is just some way of getting the MNIST dataset from an online location
# and loading it into numpy arrays. It doesn't involve Lasagne at all.
def load_dataset():
# We first define a download function, supporting both Python 2 and 3.
if sys.version_info[0] == 2:
from urllib import urlretrieve
else:
from urllib.request import urlretrieve
def download(filename, source='http://yann.lecun.com/exdb/mnist/'):
print("Downloading %s" % filename)
urlretrieve(source + filename, filename)
# We then define functions for loading MNIST images and labels.
# For convenience, they also download the requested files if needed.
import gzip
def load_mnist_images(filename):
if not os.path.exists(filename):
download(filename)
# Read the inputs in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=16)
# The inputs are vectors now, we reshape them to monochrome 2D images,
# following the shape convention: (examples, channels, rows, columns)
data = data.reshape(-1, 1, 28, 28)
# The inputs come as bytes, we convert them to float32 in range [0,1].
# (Actually to range [0, 255/256], for compatibility to the version
# provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)
return data / np.float32(256)
def load_mnist_labels(filename):
if not os.path.exists(filename):
download(filename)
# Read the labels in Yann LeCun's binary format.
with gzip.open(filename, 'rb') as f:
data = np.frombuffer(f.read(), np.uint8, offset=8)
# The labels are vectors of integers now, that's exactly what we want.
return data
# We can now download and read the training and test set images and labels.
X_train = load_mnist_images('train-images-idx3-ubyte.gz')
y_train = load_mnist_labels('train-labels-idx1-ubyte.gz')
X_test = load_mnist_images('t10k-images-idx3-ubyte.gz')
y_test = load_mnist_labels('t10k-labels-idx1-ubyte.gz')
# We reserve the last 10000 training examples for validation.
X_train, X_val = X_train[:-10000], X_train[-10000:]
y_train, y_val = y_train[:-10000], y_train[-10000:]
# We just return all the arrays in order, as expected in main().
# (It doesn't matter how we do this as long as we can read them again.)
return X_train, y_train, X_val, y_val, X_test, y_test
# ##################### Build the neural network model #######################
# helper function for projection_b
def ceildiv(a, b):
return -(-a // b)
def build_cnn(input_var=None, n=1, num_filters=8, cudnn='no'):
import lasagne # For some odd reason it can't read the global import, please PR/Issue if you know why
projection_type = 'B'
# Setting up layers
if cudnn == 'yes':
import lasagne.layers.dnn
conv = lasagne.layers.dnn.Conv2DDNNLayer # cuDNN
else:
conv = lasagne.layers.Conv2DLayer
nonlin = lasagne.nonlinearities.rectify
nonlin_layer = lasagne.layers.NonlinearityLayer
sumlayer = lasagne.layers.ElemwiseSumLayer
#batchnorm = BatchNormLayer.BatchNormLayer
batchnorm = lasagne.layers.BatchNormLayer
# Setting the projection type for when reducing height/width
# and increasing dimensions.
# Default is 'B' as B performs slightly better
# and A requires newer version of lasagne with ExpressionLayer
projection_type = 'B'
if projection_type == 'A':
expression = lasagne.layers.ExpressionLayer
pad = lasagne.layers.PadLayer
if projection_type == 'A':
# option A for projection as described in paper
# (should perform slightly worse than B)
def projection(l_inp):
n_filters = l_inp.output_shape[1]*2
l = expression(l_inp, lambda X: X[:, :, ::2, ::2], lambda s: (s[0], s[1], ceildiv(s[2], 2), ceildiv(s[3], 2)))
l = pad(l, [n_filters//4,0,0], batch_ndim=1)
return l
if projection_type == 'B':
# option B for projection as described in paper
def projection(l_inp):
# twice normal channels when projecting!
n_filters = l_inp.output_shape[1]*2
l = conv(l_inp, num_filters=n_filters, filter_size=(1, 1),
stride=(2, 2), nonlinearity=None, pad='same', b=None)
l = batchnorm(l)
return l
# helper function to handle filters/strides when increasing dims
def filters_increase_dims(l, increase_dims):
in_num_filters = l.output_shape[1]
if increase_dims:
first_stride = (2, 2)
out_num_filters = in_num_filters*2
else:
first_stride = (1, 1)
out_num_filters = in_num_filters
return out_num_filters, first_stride
# block as described and used in cifar in the original paper:
# http://arxiv.org/abs/1512.03385
def res_block_v1(l_inp, nonlinearity=nonlin, increase_dim=False):
# first figure filters/strides
n_filters, first_stride = filters_increase_dims(l_inp, increase_dim)
# conv -> BN -> nonlin -> conv -> BN -> sum -> nonlin
l = conv(l_inp, num_filters=n_filters, filter_size=(3, 3),
stride=first_stride, nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu'))
l = batchnorm(l)
l = nonlin_layer(l, nonlinearity=nonlin)
l = conv(l, num_filters=n_filters, filter_size=(3, 3),
stride=(1, 1), nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu'))
l = batchnorm(l)
if increase_dim:
# Use projection (A, B) as described in paper
p = projection(l_inp)
else:
# Identity shortcut
p = l_inp
l = sumlayer([l, p])
l = nonlin_layer(l, nonlinearity=nonlin)
return l
# block as described in second paper on the subject (by same authors):
# http://arxiv.org/abs/1603.05027
def res_block_v2(l_inp, nonlinearity=nonlin, increase_dim=False):
# first figure filters/strides
n_filters, first_stride = filters_increase_dims(l_inp, increase_dim)
# BN -> nonlin -> conv -> BN -> nonlin -> conv -> sum
l = batchnorm(l_inp)
l = nonlin_layer(l, nonlinearity=nonlin)
l = conv(l, num_filters=n_filters, filter_size=(3, 3),
stride=first_stride, nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu'))
l = batchnorm(l)
l = nonlin_layer(l, nonlinearity=nonlin)
l = conv(l, num_filters=n_filters, filter_size=(3, 3),
stride=(1, 1), nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu'))
if increase_dim:
# Use projection (A, B) as described in paper
p = projection(l_inp)
else:
# Identity shortcut
p = l_inp
l = sumlayer([l, p])
return l
def bottleneck_block(l_inp, nonlinearity=nonlin, increase_dim=False):
# first figure filters/strides
n_filters, first_stride = filters_increase_dims(l_inp, increase_dim)
# conv -> BN -> nonlin -> conv -> BN -> nonlin -> conv -> BN -> sum
# -> nonlin
# first make the bottleneck, scale the filters ..!
scale = 4 # as per bottleneck architecture used in paper
scaled_filters = n_filters/scale
l = conv(l_inp, num_filters=scaled_filters, filter_size=(1, 1),
stride=first_stride, nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu'))
l = batchnorm(l)
l = nonlin_layer(l, nonlinearity=nonlin)
l = conv(l, num_filters=scaled_filters, filter_size=(3, 3),
stride=(1, 1), nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu'))
l = batchnorm(l)
l = nonlin_layer(l, nonlinearity=nonlin)
l = conv(l, num_filters=n_filters, filter_size=(1, 1),
stride=(1, 1), nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu'))
if increase_dim:
# Use projection (A, B) as described in paper
p = projection(l_inp)
else:
# Identity shortcut
p = l_inp
l = sumlayer([l, p])
l = nonlin_layer(l, nonlinearity=nonlin)
return l
# Bottleneck architecture with more efficiency (the post with Kaiming He's response)
# https://www.reddit.com/r/MachineLearning/comments/3ywi6x/deep_residual_learning_the_bottleneck/
def bottleneck_block_fast(l_inp, nonlinearity=nonlin, increase_dim=False):
# first figure filters/strides
n_filters, last_stride = filters_increase_dims(l_inp, increase_dim)
# conv -> BN -> nonlin -> conv -> BN -> nonlin -> conv -> BN -> sum
# -> nonlin
# first make the bottleneck, scale the filters ..!
scale = 4 # as per bottleneck architecture used in paper
scaled_filters = n_filters/scale
l = conv(l_inp, num_filters=scaled_filters, filter_size=(1, 1),
stride=(1, 1), nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu'))
l = batchnorm(l)
l = nonlin_layer(l, nonlinearity=nonlin)
l = conv(l, num_filters=scaled_filters, filter_size=(3, 3),
stride=(1, 1), nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu'))
l = batchnorm(l)
l = nonlin_layer(l, nonlinearity=nonlin)
l = conv(l, num_filters=n_filters, filter_size=(1, 1),
stride=last_stride, nonlinearity=None, pad='same',
W=lasagne.init.HeNormal(gain='relu'))
if increase_dim:
# Use projection (A, B) as described in paper
p = projection(l_inp)
else:
# Identity shortcut
p = l_inp
l = sumlayer([l, p])
l = nonlin_layer(l, nonlinearity=nonlin)
return l
res_block = res_block_v1
# Stacks the residual blocks, makes it easy to model size of architecture with int n
def blockstack(l, n, nonlinearity=nonlin):
for _ in range(n):
l = res_block(l, nonlinearity=nonlin)
return l
# Building the network
l_in = lasagne.layers.InputLayer(shape=(None, 1, 28, 28),
input_var=input_var)
# First layer! just a plain convLayer
l1 = conv(l_in, num_filters=num_filters, stride=(1, 1),
filter_size=(3, 3), nonlinearity=None, pad='same')
l1 = batchnorm(l1)
l1 = nonlin_layer(l1, nonlinearity=nonlin)
# Stacking bottlenecks and increasing dims! (while reducing shape size)
l1_bs = blockstack(l1, n=n)
l1_id = res_block(l1_bs, increase_dim=True)
l2_bs = blockstack(l1_id, n=n)
l2_id = res_block(l2_bs, increase_dim=True)
l3_bs = blockstack(l2_id, n=n)
# And, finally, the 10-unit output layer:
network = lasagne.layers.DenseLayer(
l3_bs,
num_units=10,
nonlinearity=lasagne.nonlinearities.softmax)
return network
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def main(n=1, num_filters=8, num_epochs=500, cudnn='no'):
assert n>=0
assert num_filters>0
assert num_epochs>0
assert cudnn in ['yes', 'no']
print("Amount of bottlenecks: %d" % n)
# Load the dataset
print("Loading data...")
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
# Prepare Theano variables for inputs and targets
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Create neural network model (depending on first command line parameter)
print("Building model and compiling functions...")
network = build_cnn(input_var, n, num_filters, cudnn)
all_layers = lasagne.layers.get_all_layers(network)
num_params = lasagne.layers.count_params(network)
num_conv = 0
num_nonlin = 0
num_input = 0
num_batchnorm = 0
num_elemsum = 0
num_dense = 0
num_unknown = 0
print(" layer output shapes:")
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
print(" %s %s" %(name, lasagne.layers.get_output_shape(layer)))
if "Conv2D" in name:
num_conv += 1
elif "NonlinearityLayer" in name:
num_nonlin += 1
elif "InputLayer" in name:
num_input += 1
elif "BatchNormLayer" in name:
num_batchnorm += 1
elif "ElemwiseSumLayer" in name:
num_elemsum += 1
elif "DenseLayer" in name:
num_dense += 1
else:
num_unknown += 1
print(" no. of InputLayers: %d" % num_input)
print(" no. of Conv2DLayers: %d" % num_conv)
print(" no. of BatchNormLayers: %d" % num_batchnorm)
print(" no. of NonlinearityLayers: %d" % num_nonlin)
print(" no. of DenseLayers: %d" % num_dense)
print(" no. of ElemwiseSumLayers: %d" % num_elemsum)
print(" no. of Unknown Layers: %d" % num_unknown)
print(" total no. of layers: %d" % len(all_layers))
print(" no. of parameters: %d" % num_params)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
# several learning rates for low initial learning rates and
# learning rate anealing (id is epoch)
learning_rate_schedule = {
0: 0.0001, # low initial learning rate as described in paper
2: 0.01,
100: 0.001,
150: 0.0001
}
learning_rate = theano.shared(np.float32(learning_rate_schedule[0]))
updates = lasagne.updates.nesterov_momentum(
loss, params, learning_rate=learning_rate, momentum=0.9)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
if epoch in learning_rate_schedule:
lr = np.float32(learning_rate_schedule[epoch])
print(" setting learning rate to %.7f" % lr)
learning_rate.set_value(lr)
# In each epoch, we do a full pass over the training data:
train_err = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
# And a full pass over the validation data:
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
if __name__ == '__main__':
if ('--help' in sys.argv) or ('-h' in sys.argv):
print("Trains a Deep Residual neural network on MNIST using Lasagne.")
print("Usage: %s [NUM_BOTTLENECKS] [NUM_FILTERS] [EPOCHS]" % sys.argv[0])
print()
print("NUM_BOTTLENECKS: Define amount of bottlenecks with integer, e.g. 3")
print("NUM_FILTERS: Defines the amount of filters in the first layer(doubled at each filter halfing)")
print("EPOCHS: number of training epochs to perform (default: 500)")
print("CUDNN: no to not use, yes to use (default: no)")
else:
kwargs = {}
if len(sys.argv) > 1:
kwargs['n'] = int(sys.argv[1])
if len(sys.argv) > 2:
kwargs['num_filters'] = int(sys.argv[2])
if len(sys.argv) > 3:
kwargs['num_epochs'] = int(sys.argv[3])
if len(sys.argv) > 4:
kwargs['cudnn'] = sys.argv[4]
main(**kwargs)
|
{
"content_hash": "2f76a9fffa7436bb29d6dfe65fd49415",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 122,
"avg_line_length": 41.78615071283096,
"alnum_prop": 0.6134912511575766,
"repo_name": "VCG/gp",
"id": "7d172a566d076c04cb52120ecd527543b719cbb2",
"size": "20540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gp/nets/Deep_Residual_Network_mnist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1231"
},
{
"name": "HTML",
"bytes": "59805"
},
{
"name": "JavaScript",
"bytes": "4075"
},
{
"name": "Jupyter Notebook",
"bytes": "19996593"
},
{
"name": "Matlab",
"bytes": "19322"
},
{
"name": "Python",
"bytes": "867475"
},
{
"name": "Shell",
"bytes": "117697"
},
{
"name": "TeX",
"bytes": "489980"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_brigadier_general_trandoshan_female.iff"
result.attribute_template_id = 9
result.stfName("npc_name","trandoshan_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "8de39edc5be90580bd40b684cc328738",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 95,
"avg_line_length": 25.846153846153847,
"alnum_prop": 0.7172619047619048,
"repo_name": "obi-two/Rebelion",
"id": "a1317bcdf52cf2d76b3d7686c8c832f4667b46e7",
"size": "481",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_rebel_brigadier_general_trandoshan_female.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""
The top-most bit of code that ties everything together into one runnable
server process. The two important parts here are:
* The RESTful JSON API that runs under twisted.web. This is primarily used
to pipe commands into the Minecraft process from websites or other
applications.
* The Minecraft server process whose stdin we can pipe commands into, and whose
stdout we can monitor for activity.
"""
import sys
import os
from twisted.application.service import Application
from twisted.internet import reactor
from twisted.python import log
from zombiepygman.web_api.service import ZombiePygManWebAPIService
from zombiepygman.notchian_wrapper.process import NotchianProcess
from zombiepygman.conf import settings
# The data dir holds server settings, the white-list, the world data, and etc.
if not os.path.exists(settings.MINECRAFT_SERVER_DATA_DIR):
log.err("ERROR: The directory you specified in your "\
"settings.MINECRAFT_SERVER_DATA_DIR doesn't exist:")
log.err(" %s" % settings.MINECRAFT_SERVER_DATA_DIR)
sys.exit(1)
# Minecraft server's Java archive file. Can't go far without this.
if not os.path.exists(settings.MINECRAFT_SERVER_JAR_PATH):
log.err("ERROR: The minecraft_server.jar you specified in your "\
"settings.MINECRAFT_SERVER_JAR_PATH can't be found.")
sys.exit(1)
if settings.API_SECURITY_ENABLED is False:
# A stern warning.
log.err("WARNING: API security is disabled. Open season.")
# They need to set this to something, or we'll have a bunch of people running
# around with wide-open API servers.
if settings.API_SECURITY_ENABLED and settings.API_SECURITY_TOKEN == None:
log.err("ERROR: You have not set a new value for API_SECURITY_TOKEN in"\
"your conf.py. Do that now, then start zombiepygman again.")
log.err("ERROR: zombiepygman startup aborted.")
sys.exit(1)
application = Application("ZombiePygman")
# Set up the RESTful JSON API for interacting with the Minecraft server
# process. Goes through the transport set up later in this module.
service = ZombiePygManWebAPIService()
service.setServiceParent(application)
# Runs the java -jar minecraft_server.jar command in a separate process.
# Sets up a transport in notchian_wrapper.process.transport.
reactor.callWhenRunning(NotchianProcess.start_minecraft_server)
|
{
"content_hash": "22ded7a20e03265434282be4727b30f2",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 43.2037037037037,
"alnum_prop": 0.7625375053579083,
"repo_name": "gtaylor/zombiepygman",
"id": "c529781673af87fe36868000059d204815f9a51e",
"size": "2333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zombiepygman/server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "43158"
}
],
"symlink_target": ""
}
|
"""
NewzbinDownloader - Downloads NZBs directly from v3.newzbin.com via the
DirectNZB API: http://docs.newzbin.com/Newzbin::DirectNZB
(c) Copyright 2005-2007 Philip Jenvey
Thomas Hurst <freaky@newzbin.com>
Dan Borello
[See end of file]
"""
import os, random, Hellanzb.NZBQueue
from twisted.internet import reactor
from twisted.internet.error import ConnectionRefusedError, DNSLookupError, TimeoutError
from twisted.web.client import HTTPClientFactory
from twisted.web.error import Error
from Hellanzb.Log import *
from Hellanzb.NZBDownloader import NZBDownloader, StoreHeadersHTTPDownloader
from Hellanzb.Util import tempFilename
__id__ = '$Id$'
class NewzbinDownloader(NZBDownloader):
""" Download the NZB file with the specified msgid from www.newzbin.com, by instantiating
this class and calling download() """
HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded',
'Accept-Encoding': 'gzip',
'Accept': 'text/plain'
}
url = 'http://www.newzbin.com/api/dnzb/'
def __init__(self, msgId):
""" Initialize the downloader with the specified msgId string """
self.msgId = msgId
# The HTTPDownloader
self.downloader = None
# Write the downloaded NZB here temporarily
self.tempFilename = os.path.join(Hellanzb.TEMP_DIR,
tempFilename(self.TEMP_FILENAME_PREFIX) + '.nzb')
# The real NZB filename determined from HTTP headers
self.nzbFilename = None
# Whether or not it appears that this NZB with the msgId does not exist on newzbin
self.nonExistantNZB = False
# DNZB error message
self.errMessage = False
# Number of attempts to download this NZB
self.attempt = 0
def gotHeaders(self, headers):
""" The downloader will feeds headers via this function """
super(self.__class__, self).gotHeaders(headers)
if headers.has_key('x-dnzb-name'):
name = headers.get('x-dnzb-name')[0]
# XXX may want to sanitize a little more
cleanName = name.replace('/', '_').replace('\\','_')
self.nzbFilename = '%s_%s.nzb' % (self.msgId, cleanName)
else:
# The failure case will go to the generic error handler atm, so this is most likely unused
if headers.has_key('x-dnzb-rtext'):
self.errMessage = headers.get('x-dnzb-rtext')[0]
else:
self.errMessage = 'DNZB service error'
info('DNZB request failed: %s' % self.errMessage)
self.nzbFilename = None
if headers.has_key('x-dnzb-rcode') and headers.get('x-dnzb-rcode')[0] == '404':
self.nonExistantNZB = True
self.nzbCategory = headers.get('x-dnzb-category')[0]
def download(self):
""" Start the NZB download process """
debug(str(self) + ' Downloading from newzbin.com..')
if not NewzbinDownloader.canDownload():
debug(str(self) + ' download: No www.newzbin.com login information')
return
info('Downloading newzbin NZB: %s ' % self.msgId)
self.handleNZBDownloadFromNewzbin()
def handleNZBDownloadFromNewzbin(self):
""" Download the NZB """
debug(str(self) + ' handleNZBDownloadFromNewzbin')
# XXX erm, URL encoding needed?
postdata = 'username=' + Hellanzb.NEWZBIN_USERNAME
postdata += '&password=' + Hellanzb.NEWZBIN_PASSWORD
postdata += '&reportid=' + self.msgId
# This will be www.newzbin.com eventually
self.downloader = StoreHeadersHTTPDownloader(self.url, self.tempFilename, method = 'POST',
headers = self.HEADERS, postdata = postdata,
agent = self.AGENT)
self.downloader.deferred.addCallback(self.handleEnqueueNZB)
self.downloader.deferred.addErrback(self.errBack)
reactor.connectTCP('v3.newzbin.com', 80, self.downloader)
def handleEnqueueNZB(self, page):
""" Add the new NZB to the queue"""
if super(self.__class__, self).handleEnqueueNZB(page):
Hellanzb.NZBQueue.writeStateXML()
else:
msg = 'Unable to download newzbin NZB: %s' % self.msgId
if self.errMessage:
error('%s (%s)' % [msg, self.errMessage])
elif self.nonExistantNZB:
error('%s (This appears to be an invalid msgid)' % msg)
else:
error('%s (Incorrect NEWZBIN_USERNAME/PASSWORD?)' % msg)
# Invalidate the cached cookies
Hellanzb.NZBQueue.writeStateXML()
def errBack(self, reason):
if not reason.check(Error):
return super(self.__class__, self).errBack(reason)
headers = self.downloader.response_headers
rcode = headers.get('x-dnzb-rcode', [None])[0]
if rcode == '450':
self.attempt += 1
if self.attempt >= 5:
error('Unable to download newzbin NZB: %s due to rate limiting. Will '
'not retry' % (self.msgId))
return
rtext = headers.get('x-dnzb-rtext', [''])[0]
try:
newzbinWait = int(rtext.split(' ')[3])
except IndexError, ValueError:
# Invalid DNZB-RText
newzbinWait = 60
wait = round(newzbinWait + random.random() * 15, 0)
if not rtext:
rtext = "'no error message'"
error('Unable to download newzbin NZB: %s (newzbin said: %s) will '
'retry in %i seconds (attempt: %i)' % \
(self.msgId, rtext, wait, self.attempt))
reactor.callLater(wait, self.download)
return
elif rcode != '200':
error('Unable to download newzbin NZB: %s (%s: %s)' % \
(self.msgId,
headers.get('x-dnzb-rcode', ['No Code'])[0],
headers.get('x-dnzb-rtext', ['No Error Text'])[0]))
return
def __str__(self):
return '%s(%s):' % (self.__class__.__name__, self.msgId)
def canDownload():
""" Whether or not the conf file supplied www.newzbin.com login info """
noInfo = lambda var : not hasattr(Hellanzb, var) or getattr(Hellanzb, var) == None
if noInfo('NEWZBIN_USERNAME') or noInfo('NEWZBIN_PASSWORD'):
return False
return True
canDownload = staticmethod(canDownload)
"""
Copyright (c) 2005-2007 Philip Jenvey <pjenvey@groovie.org>
Thomas Hurst <freaky@newzbin.com>
Dan Borello
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author or contributors may not be used to endorse or
promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
$Id$
"""
|
{
"content_hash": "7d757c7d038809c2891f5aa31325cd81",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 102,
"avg_line_length": 41.81725888324873,
"alnum_prop": 0.6188395241563486,
"repo_name": "emikulic/hellanzb",
"id": "6d25ba5f55533a14c3c45501c6a580af53022403",
"size": "8238",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Hellanzb/NewzbinDownloader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "667124"
},
{
"name": "Shell",
"bytes": "817"
}
],
"symlink_target": ""
}
|
'''
I N S T A L L A T I O N::
Step 1:
Copy "no_flip_pole_vector_tool.py" to your Maya plugins directory.
Windows: C:\Users\UserName\Documents\maya\scripts
Step 2:
Run this in the Maya's Script Editor under the Python tab...
import no_flip_pole_vector_tool as nfpv
nfpv.No_Flip_Pole_Vector().show_ui()
If you have any problems email me at Nicholas.Silveira@gmail.com
'''
import sys
import functools
import maya.cmds as cmds
import maya.OpenMaya as OpenMaya
VERSION = 1.0
'''
========================================================================
----> No Flip Pole Vector <----
========================================================================
'''
class No_Flip_Pole_Vector():
"""
*Examples:* ::
import no_flip_pole_vector_tool as nfpv
# Show ui
nfpv.No_Flip_Pole_Vector().show_ui()
*Author:*
* nicholas.silveira, Nicholas.Silveira@gmail.com, Jun 13, 2013 8:53:53 AM
"""
'''
========================================================================
----> Shows No Flip Pole Vector ui <----
========================================================================
'''
def show_ui( self ):
"""
*Examples:* ::
import no_flip_pole_vector_tool as nfpv
# Show ui
nfpv.No_Flip_Pole_Vector().show_ui()
"""
if cmds.window( 'no_flip_pole_vector_window', exists = True, q = True ):
cmds.deleteUI( 'no_flip_pole_vector_window' )
self.no_flip_pole_vector_ui()
'''
========================================================================
----> No Flip Pole Vector ui <----
========================================================================
'''
def no_flip_pole_vector_ui( self ):
self.root_joint = None
self.controller = None
self.pole_vector = None
window = cmds.window( 'no_flip_pole_vector_window', title = 'No Flip Pole Vector {0}'.format( VERSION ), menuBar = True )
cmds.menu( label = 'Help' )
cmds.menuItem( 'sample"', label = 'Build Sample', c = self.sample )
cmds.menuItem( 'code_sample"', label = 'Code Sample', c = self.code_sample )
cmds.menuItem( 'about"', label = 'About No Flip Pole Vector', c = self.about )
cmds.columnLayout()
cmds.rowColumnLayout ( nc = 2, columnWidth = [( 1, 100 ), ( 2, 200 )] )
cmds.text( label = 'Name: ', align = 'right' )
self.name_text = cmds.textField()
cmds.setParent( '..' )
cmds.rowColumnLayout ( nc = 1, columnWidth = ( 1, 300 ) )
cmds.separator( height = 20, style = 'in' )
cmds.rowColumnLayout ( nc = 2, columnWidth = [( 1, 100 ), ( 2, 200 )] )
cmds.button( label = 'Root Joint', c = functools.partial( self.set_text_field, 'root_joint' ) )
self.root_joint_text = cmds.textField()
cmds.button( label = 'Controller', c = functools.partial( self.set_text_field, 'controller' ) )
self.controller_text = cmds.textField()
cmds.button( label = 'Pole Vector', c = functools.partial( self.set_text_field, 'pole_vector' ) )
self.pole_vector_text = cmds.textField()
cmds.setParent( '..' )
cmds.rowColumnLayout ( nc = 1, columnWidth = ( 1, 300 ) )
cmds.separator( height = 20, style = 'in' )
cmds.button( label = 'Build No Flip Pole Vector', c = self.run_setup )
cmds.showWindow( window )
'''
========================================================================
----> Set Maya ui text field <----
========================================================================
'''
def set_text_field( self, text_field_name, *args ):
"""
*Arguments:*
* ``text_field_name`` Pass a text field name that will take on the selected objects name.
*Examples:* ::
import pymel.core
import no_flip_pole_vector_tool as nfpv
reload( nfpv )
# Show ui
no_flip_pole_vector_tool = nfpv.No_Flip_Pole_Vector()
no_flip_pole_vector.show_ui()
# Create locator
cmds.spaceLocator()
# Add selected to text field
no_flip_pole_vector.set_text_field('controller')
"""
objs = cmds.ls( sl = True )
if len( objs ) == 1:
obj_name = objs[0].split( '|' )[-1]
obj_dag = DAG_Node( cmds.ls( sl = True )[0] )
if text_field_name == 'root_joint':
self.root_joint = obj_dag
cmds.textField( self.root_joint_text, edit = True, text = obj_name )
elif text_field_name == 'controller':
self.controller = obj_dag
cmds.textField( self.controller_text, edit = True, text = obj_name )
elif text_field_name == 'pole_vector':
self.pole_vector = obj_dag
cmds.textField( self.pole_vector_text, edit = True, text = obj_name )
elif len( objs ) >= 1:
OpenMaya.MGlobal.displayError( "There are to many objects selected!" )
elif len( objs ) <= 1:
OpenMaya.MGlobal.displayError( "There are no objects selected!" )
'''
========================================================================
----> Run Setup gets ui data and runs build <----
========================================================================
'''
def run_setup( self, *args ):
self.name = cmds.textField( self.name_text, text = True, q = True )
if self.root_joint:
self.root_joint = self.root_joint.name()
if self.controller:
self.controller = self.controller.name()
if self.pole_vector:
self.pole_vector = self.pole_vector.name()
self.build( root_joint = self.root_joint,
controller = self.controller,
pole_vector = self.pole_vector,
name = self.name )
'''
========================================================================
----> Builds No Flip Pole Vector <----
========================================================================
'''
def build( self, root_joint = None, controller = None, pole_vector = None, name = '', *args ):
"""
*Keyword Arguments:*
* ``root_joint`` Pass the top of the joint chain.
* ``controller`` Pass the main controller.
* ``pole_vector`` Pass the pole vector controller.
* ``name`` Add prefix to all created nodes
*Returns:*
* ``True`` If process finishes.
*Examples:* ::
import pymel.core
import no_flip_pole_vector_tool as nfpv
reload( nfpv )
# Build example rig
# Build joint chain
cmds.select( cl = True )
chain1_jnt = cmds.joint( n = 'chain1_jnt', p = [0, 6, 0] )
chain2_jnt = cmds.joint( n = 'chain2_jnt', p = [0, 3, 1] )
chain3_jnt = cmds.joint( n = 'chain3_jnt', p = [0, 0, 0] )
# Build ikHandle
cmds.ikHandle ( n = 'chain_ikHandle', startJoint = chain1_jnt, endEffector = chain3_jnt, sol = 'ikRPsolver' )
chain_ikHandle = cmds.selected()[0]
# Build pole vector
pole_vector_loc = cmds.spaceLocator()
pole_vector_loc.rename( 'pole_vector_loc' )
pole_vector_loc.translateY.set( 3 )
pole_vector_loc.translateZ.set( 2 )
cmds.poleVectorConstraint( pole_vector_loc, chain_ikHandle )
# Build controller
controller = cmds.circle ( nr = [0, 1, 0], r = 1 )[0]
cmds.pointConstraint( controller, chain_ikHandle )
# Standalone code
nfpv.No_Flip_Pole_Vector().build( root_joint = chain1_jnt, controller = controller, pole_vector = pole_vector_loc, name = 'example' )
"""
if root_joint == None or controller == None or pole_vector == None:
get_selected_objs = cmds.ls( sl = True )
if len( get_selected_objs ) == 3:
root_joint = DAG_Node( get_selected_objs[0] )
controller = DAG_Node( get_selected_objs[1] )
pole_vector = DAG_Node( get_selected_objs[2] )
elif len( get_selected_objs ) >= 3:
OpenMaya.MGlobal.displayError( "There more than 3 objects selected!" )
return False
elif len( get_selected_objs ) <= 3:
OpenMaya.MGlobal.displayError( "There less than 3 objects selected!" )
return False
else:
root_joint = DAG_Node( root_joint )
controller = DAG_Node( controller )
pole_vector = DAG_Node( pole_vector )
cmds.select( cl = True )
# Get pole vector parent
pole_parent = pole_vector.parent()
# Create pole main grp
self.pole_main_grp = DAG_Node( cmds.group( n = '{0}_poleMain_grp'.format( name ), em = True ) )
# Create pole parent grp
pole_parent_grp = DAG_Node( cmds.group( n = '{0}_poleParent_grp'.format( name ), em = True ) )
if pole_parent:
pole_parent_grp.set_parent( pole_parent )
controller_pivot = cmds.xform( controller.name(), ws = True, rp = True, q = True )
controller_rotation = cmds.xform( controller.name(), ws = True, rotation = True, q = True )
cmds.xform( pole_parent_grp.name(), translation = controller_pivot, ws = True )
cmds.xform( pole_parent_grp.name(), rotation = controller_rotation, ws = True )
pole_vector.set_parent( pole_parent_grp )
# Create pole world grp
pole_world_grp = DAG_Node( cmds.group( n = '{0}_poleWorld_grp'.format( name ), em = True ) )
pole_world_grp.set_parent( self.pole_main_grp )
cmds.xform( pole_world_grp.name(), translation = controller_pivot, ws = True )
cmds.xform( pole_world_grp.name(), rotation = controller_rotation, ws = True )
# Object up vector
up_vector_grp = DAG_Node( cmds.group( n = '{0}_upVector_grp'.format( name ), em = True ) )
up_vector_grp.set_parent( self.pole_main_grp )
cmds.pointConstraint( root_joint.name() , up_vector_grp.name() )
# Create bottom chain aim locator
aim_grp = DAG_Node( cmds.group( n = '{0}_aim_grp'.format( name ), em = True ) )
aim_grp.set_parent( self.pole_main_grp )
cmds.aimConstraint ( root_joint.name(), aim_grp.name(),
aimVector = [1, 0, 0],
upVector = [0, 1, 0],
worldUpType = "objectrotation",
worldUpVector = [-1, 0, 0],
worldUpObject = up_vector_grp.name() )
cmds.pointConstraint( controller.name(), aim_grp.name() )
# Create pole vector parent groups
pole_controller_grp = DAG_Node( cmds.group( n = '{0}_poleController_grp'.format( name ), em = True ) )
pole_rotate_grp = DAG_Node( cmds.group( n = '{0}_poleRotate_grp'.format( name ), em = True ) )
pole_rotate_grp.set_parent( pole_controller_grp )
pole_controller_grp.set_parent( aim_grp )
# Set controller orientation on main pole group
cmds.xform( pole_controller_grp.name(), translation = controller_pivot, ws = True )
cmds.xform( pole_controller_grp.name(), rotation = controller_rotation, ws = True )
# Connect rotate group's rotation Y,Z for twist follow
cmds.connectAttr( '{0}.rotateY'.format( controller.name() ), '{0}.rotateY'.format( pole_rotate_grp.name() ) )
cmds.connectAttr( '{0}.rotateZ'.format( controller.name() ), '{0}.rotateZ'.format( pole_rotate_grp.name() ) )
# Create and attach new custom attribute
position_follow_str = 'position_follow'
rotation_follow_str = 'rotation_follow'
if not cmds.objExists( '{0}.{1}'.format( pole_vector.name(), position_follow_str ) ):
cmds.addAttr( pole_vector.name(), longName = position_follow_str, attributeType = 'double', min = 0, max = 1, k = True )
if not cmds.objExists( '{0}.{1}'.format( pole_vector.name(), rotation_follow_str ) ):
cmds.addAttr( pole_vector.name(), longName = rotation_follow_str, attributeType = 'double', min = 0, max = 1, k = True )
cmds.setAttr( '{0}.{1}'.format( pole_vector.name(), position_follow_str ), 1 )
cmds.setAttr( '{0}.{1}'.format( pole_vector.name(), rotation_follow_str ), 1 )
# Constraint pole parent to world and follow grps
point_constraint = DAG_Node( cmds.pointConstraint( pole_world_grp.name(), pole_rotate_grp.name(), pole_parent_grp.name() )[0] )
orient_constraint = DAG_Node( cmds.orientConstraint( pole_world_grp.name(), pole_rotate_grp.name(), pole_parent_grp.name() )[0] )
position_constraint_weights = cmds.pointConstraint( point_constraint.name(), weightAliasList = True, query = True )
rotation_constraint_weights = cmds.orientConstraint( orient_constraint.name(), weightAliasList = True, query = True )
cmds.connectAttr( '{0}.{1}'.format( pole_vector.name(), position_follow_str ), '{0}.{1}'.format( point_constraint.name(), position_constraint_weights[1] ) )
cmds.connectAttr( '{0}.{1}'.format( pole_vector.name(), rotation_follow_str ), '{0}.{1}'.format( orient_constraint.name(), rotation_constraint_weights[1] ) )
Maya_Util().reverse_node( parent_attr = '{0}.{1}'.format( pole_vector.name(), position_follow_str ),
child_attr = '{0}.{1}'.format( point_constraint.name(), position_constraint_weights[0] ),
node_name = '{0}_positionFollow_node'.format( name ) )
Maya_Util().reverse_node( parent_attr = '{0}.{1}'.format( pole_vector.name(), rotation_follow_str ),
child_attr = '{0}.{1}'.format( orient_constraint.name(), rotation_constraint_weights[0] ),
node_name = '{0}_rotationFollow_node'.format( name ) )
cmds.select( cl = True )
sys.stdout.write( '// Result: No FLip Pole Vector is finished!' )
return True
'''
========================================================================
----> Build Rig Sample <----
========================================================================
'''
def sample( self, *args ):
# Build joint chain
cmds.select( cl = True )
chain1_jnt = cmds.joint( n = 'chain1_jnt', p = [0, 6, 0] )
cmds.joint( n = 'chain2_jnt', p = [0, 3, 1] )
chain3_jnt = cmds.joint( n = 'chain3_jnt', p = [0, 0, 0] )
# Build ikHandle
chain_ikHandle = cmds.ikHandle ( n = 'chain_ikHandle', startJoint = chain1_jnt, endEffector = chain3_jnt, sol = 'ikRPsolver' )[0]
# Build pole vector
pole_vector_loc = cmds.spaceLocator( n = 'pole_vector_loc' )[0]
cmds.setAttr( '{0}.translateY'.format( pole_vector_loc ), 3 )
cmds.setAttr( '{0}.translateZ'.format( pole_vector_loc ), 2 )
cmds.poleVectorConstraint( pole_vector_loc, chain_ikHandle )
# Build controller
controller = cmds.circle ( nr = [0, 1, 0], r = 1 )[0]
cmds.pointConstraint( controller, chain_ikHandle )
# Run Standalone code
No_Flip_Pole_Vector().build( root_joint = chain1_jnt, controller = controller, pole_vector = pole_vector_loc, name = 'example' )
'''
========================================================================
----> Code Sample <----
========================================================================
'''
def code_sample( self, *args ):
code = '''
import maya.cmds
import no_flip_pole_vector_tool as nfpv
# Show ui
nfpv.No_Flip_Pole_Vector().show_ui()
"""
========================================================================
----> Run Standalone code <----
========================================================================
"""
nfpv.No_Flip_Pole_Vector().build( root_joint = None, controller = None, pole_vector = None, name = 'example' )
'''
if cmds.window( 'code_sample_window', exists = True, q = True ):
cmds.deleteUI( 'code_sample_window' )
cmds.window( 'code_sample_window', title = 'Code Sample' )
cmds.paneLayout()
cmds.scrollField( editable = False, text = code.replace( ' ', '' ) )
cmds.showWindow()
'''
========================================================================
----> About No Flip Pole Vector <----
========================================================================
'''
def about( self, *args ):
about = '''
"""
========================================================================
----> No Flip Pole Vector <----
========================================================================
"""
This tool builds a no flip pole vector. After passing in a root joint,
main controller, and pole vector the tool will allow the pole vector to
follow the main controller or switch to world space.
If you have any questions email me at Nicholas.Silveira@gmail.com
'''
if cmds.window( 'about_window', exists = True, q = True ):
cmds.deleteUI( 'about_window' )
cmds.window( 'about_window', title = 'About' )
cmds.paneLayout()
cmds.scrollField( editable = False, text = about.replace( ' ', '' ) )
cmds.showWindow()
'''
========================================================================
----> Maya Utilities <----
========================================================================
'''
class Maya_Util():
'''
========================================================================
----> Create a Maya reverse node <----
========================================================================
'''
def reverse_node ( self, parent_attr, child_attr, node_name = '' ):
"""
*Arguments:*
* ``parent_attr`` Pass the parent attribute.
* ``child_attr`` Pass the child attribute.
*Keyword Arguments:*
* ``node_name`` Pass a node name.
*Returns:*
* ``node`` Returns reverse node
"""
node = cmds.shadingNode( 'reverse', name = node_name, asUtility = True )
cmds.connectAttr( parent_attr, '{0}.inputX'.format( node ) )
cmds.connectAttr( '{0}.outputX'.format( node ), child_attr )
'''
========================================================================
----> DAG Node Utilities <----
========================================================================
'''
class DAG_Node():
"""
*Arguments:*
* ``node`` Makes a DAG instance from passed node
*Examples:* ::
import maya.cmds as cmds
import no_flip_pole_vector_tool as nfpv
exampleA_grp = nfpv.DAG_Node( cmds.group( n = 'exampleA_grp', em = True ) )
exampleB_grp = nfpv.DAG_Node( cmds.group( n = 'exampleB_grp', em = True ) )
exampleA_grp.set_parent(exampleB_grp)
print exampleA_grp.parent()
print exampleA_grp.name()
"""
def __init__( self, node ):
selection_list = OpenMaya.MSelectionList()
selection_list.add( node )
self.m_obj = OpenMaya.MObject()
selection_list.getDependNode( 0, self.m_obj )
'''
========================================================================
----> DAG Full Path Name <----
========================================================================
'''
def name( self ):
"""
*Returns:*
* ``node_name`` Returns DAG's full path name.
"""
nodeFn = OpenMaya.MFnDagNode( self.m_obj )
node_name = nodeFn.fullPathName()
return node_name
'''
========================================================================
----> DAG Parent <----
========================================================================
'''
def parent( self ):
"""
*Returns:*
* ``node_parent`` Returns DAG's parent or None.
"""
node_parent = cmds.listRelatives( self.name(), parent = True, f = True )
if node_parent:
return DAG_Node( node_parent[0] )
else:
return None
'''
========================================================================
----> Set DAG Parent <----
========================================================================
'''
def set_parent( self, parent ):
cmds.parent( self.name(), parent.name() )
|
{
"content_hash": "8e0258e8f695ba6a2ccdd27624c7b6fb",
"timestamp": "",
"source": "github",
"line_count": 529,
"max_line_length": 159,
"avg_line_length": 35.119092627599244,
"alnum_prop": 0.5504898266767144,
"repo_name": "nicholas-silveira/art_pipeline",
"id": "f707dabc00b526292b8df6dd3bfa36d64cc75e28",
"size": "18578",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "maya/packages/oop_maya/tools/rigging/no_flip_pole_vector_tool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "189773"
}
],
"symlink_target": ""
}
|
"""
pygments.lexers.foxpro
~~~~~~~~~~~~~~~~~~~~~~
Simple lexer for Microsoft Visual FoxPro source code.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String
__all__ = ['FoxProLexer']
class FoxProLexer(RegexLexer):
"""Lexer for Microsoft Visual FoxPro language.
FoxPro syntax allows to shorten all keywords and function names
to 4 characters. Shortened forms are not recognized by this lexer.
.. versionadded:: 1.6
"""
name = 'FoxPro'
aliases = ['foxpro', 'vfp', 'clipper', 'xbase']
filenames = ['*.PRG', '*.prg']
mimetype = []
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r';\s*\n', Punctuation), # consume newline
(r'(^|\n)\s*', Text, 'newline'),
# Square brackets may be used for array indices
# and for string literal. Look for arrays
# before matching string literals.
(r'(?<=\w)\[[0-9, ]+\]', Text),
(r'\'[^\'\n]*\'|"[^"\n]*"|\[[^]*]\]', String),
(r'(^\s*\*|&&|&&).*?\n', Comment.Single),
(r'(ABS|ACLASS|ACOPY|ACOS|ADATABASES|ADBOBJECTS|ADDBS|'
r'ADDPROPERTY|ADEL|ADIR|ADLLS|ADOCKSTATE|AELEMENT|AERROR|'
r'AEVENTS|AFIELDS|AFONT|AGETCLASS|AGETFILEVERSION|AINS|'
r'AINSTANCE|ALANGUAGE|ALEN|ALIAS|ALINES|ALLTRIM|'
r'AMEMBERS|AMOUSEOBJ|ANETRESOURCES|APRINTERS|APROCINFO|'
r'ASC|ASCAN|ASELOBJ|ASESSIONS|ASIN|ASORT|ASQLHANDLES|'
r'ASTACKINFO|ASUBSCRIPT|AT|AT_C|ATAGINFO|ATAN|ATC|ATCC|'
r'ATCLINE|ATLINE|ATN2|AUSED|AVCXCLASSES|BAR|BARCOUNT|'
r'BARPROMPT|BETWEEN|BINDEVENT|BINTOC|BITAND|BITCLEAR|'
r'BITLSHIFT|BITNOT|BITOR|BITRSHIFT|BITSET|BITTEST|BITXOR|'
r'BOF|CANDIDATE|CAPSLOCK|CAST|CDOW|CDX|CEILING|CHR|CHRSAW|'
r'CHRTRAN|CHRTRANC|CLEARRESULTSET|CMONTH|CNTBAR|CNTPAD|COL|'
r'COM|Functions|COMARRAY|COMCLASSINFO|COMPOBJ|COMPROP|'
r'COMRETURNERROR|COS|CPCONVERT|CPCURRENT|CPDBF|CREATEBINARY|'
r'CREATEOBJECT|CREATEOBJECTEX|CREATEOFFLINE|CTOBIN|CTOD|'
r'CTOT|CURDIR|CURSORGETPROP|CURSORSETPROP|CURSORTOXML|'
r'CURVAL|DATE|DATETIME|DAY|DBC|DBF|DBGETPROP|DBSETPROP|'
r'DBUSED|DDEAbortTrans|DDEAdvise|DDEEnabled|DDEExecute|'
r'DDEInitiate|DDELastError|DDEPoke|DDERequest|DDESetOption|'
r'DDESetService|DDESetTopic|DDETerminate|DEFAULTEXT|'
r'DELETED|DESCENDING|DIFFERENCE|DIRECTORY|DISKSPACE|'
r'DisplayPath|DMY|DODEFAULT|DOW|DRIVETYPE|DROPOFFLINE|'
r'DTOC|DTOR|DTOS|DTOT|EDITSOURCE|EMPTY|EOF|ERROR|EVAL(UATE)?|'
r'EVENTHANDLER|EVL|EXECSCRIPT|EXP|FCHSIZE|FCLOSE|FCOUNT|'
r'FCREATE|FDATE|FEOF|FERROR|FFLUSH|FGETS|FIELD|FILE|'
r'FILETOSTR|FILTER|FKLABEL|FKMAX|FLDLIST|FLOCK|FLOOR|'
r'FONTMETRIC|FOPEN|FOR|FORCEEXT|FORCEPATH|FOUND|FPUTS|'
r'FREAD|FSEEK|FSIZE|FTIME|FULLPATH|FV|FWRITE|'
r'GETAUTOINCVALUE|GETBAR|GETCOLOR|GETCP|GETDIR|GETENV|'
r'GETFILE|GETFLDSTATE|GETFONT|GETINTERFACE|'
r'GETNEXTMODIFIED|GETOBJECT|GETPAD|GETPEM|GETPICT|'
r'GETPRINTER|GETRESULTSET|GETWORDCOUNT|GETWORDNUM|'
r'GETCURSORADAPTER|GOMONTH|HEADER|HOME|HOUR|ICASE|'
r'IDXCOLLATE|IIF|IMESTATUS|INDBC|INDEXSEEK|INKEY|INLIST|'
r'INPUTBOX|INSMODE|INT|ISALPHA|ISBLANK|ISCOLOR|ISDIGIT|'
r'ISEXCLUSIVE|ISFLOCKED|ISLEADBYTE|ISLOWER|ISMEMOFETCHED|'
r'ISMOUSE|ISNULL|ISPEN|ISREADONLY|ISRLOCKED|'
r'ISTRANSACTABLE|ISUPPER|JUSTDRIVE|JUSTEXT|JUSTFNAME|'
r'JUSTPATH|JUSTSTEM|KEY|KEYMATCH|LASTKEY|LEFT|LEFTC|LEN|'
r'LENC|LIKE|LIKEC|LINENO|LOADPICTURE|LOCFILE|LOCK|LOG|'
r'LOG10|LOOKUP|LOWER|LTRIM|LUPDATE|MAKETRANSACTABLE|MAX|'
r'MCOL|MDOWN|MDX|MDY|MEMLINES|MEMORY|MENU|MESSAGE|'
r'MESSAGEBOX|MIN|MINUTE|MLINE|MOD|MONTH|MRKBAR|MRKPAD|'
r'MROW|MTON|MWINDOW|NDX|NEWOBJECT|NORMALIZE|NTOM|NUMLOCK|'
r'NVL|OBJNUM|OBJTOCLIENT|OBJVAR|OCCURS|OEMTOANSI|OLDVAL|'
r'ON|ORDER|OS|PAD|PADL|PARAMETERS|PAYMENT|PCOL|PCOUNT|'
r'PEMSTATUS|PI|POPUP|PRIMARY|PRINTSTATUS|PRMBAR|PRMPAD|'
r'PROGRAM|PROMPT|PROPER|PROW|PRTINFO|PUTFILE|PV|QUARTER|'
r'RAISEEVENT|RAND|RAT|RATC|RATLINE|RDLEVEL|READKEY|RECCOUNT|'
r'RECNO|RECSIZE|REFRESH|RELATION|REPLICATE|REQUERY|RGB|'
r'RGBSCHEME|RIGHT|RIGHTC|RLOCK|ROUND|ROW|RTOD|RTRIM|'
r'SAVEPICTURE|SCHEME|SCOLS|SEC|SECONDS|SEEK|SELECT|SET|'
r'SETFLDSTATE|SETRESULTSET|SIGN|SIN|SKPBAR|SKPPAD|SOUNDEX|'
r'SPACE|SQLCANCEL|SQLCOLUMNS|SQLCOMMIT|SQLCONNECT|'
r'SQLDISCONNECT|SQLEXEC|SQLGETPROP|SQLIDLEDISCONNECT|'
r'SQLMORERESULTS|SQLPREPARE|SQLROLLBACK|SQLSETPROP|'
r'SQLSTRINGCONNECT|SQLTABLES|SQRT|SROWS|STR|STRCONV|'
r'STREXTRACT|STRTOFILE|STRTRAN|STUFF|STUFFC|SUBSTR|'
r'SUBSTRC|SYS|SYSMETRIC|TABLEREVERT|TABLEUPDATE|TAG|'
r'TAGCOUNT|TAGNO|TAN|TARGET|TEXTMERGE|TIME|TRANSFORM|'
r'TRIM|TTOC|TTOD|TXNLEVEL|TXTWIDTH|TYPE|UNBINDEVENTS|'
r'UNIQUE|UPDATED|UPPER|USED|VAL|VARREAD|VARTYPE|VERSION|'
r'WBORDER|WCHILD|WCOLS|WDOCKABLE|WEEK|WEXIST|WFONT|WLAST|'
r'WLCOL|WLROW|WMAXIMUM|WMINIMUM|WONTOP|WOUTPUT|WPARENT|'
r'WREAD|WROWS|WTITLE|WVISIBLE|XMLTOCURSOR|XMLUPDATEGRAM|'
r'YEAR)(?=\s*\()', Name.Function),
(r'_ALIGNMENT|_ASCIICOLS|_ASCIIROWS|_ASSIST|_BEAUTIFY|_BOX|'
r'_BROWSER|_BUILDER|_CALCMEM|_CALCVALUE|_CLIPTEXT|_CONVERTER|'
r'_COVERAGE|_CUROBJ|_DBLCLICK|_DIARYDATE|_DOS|_FOXDOC|_FOXREF|'
r'_GALLERY|_GENGRAPH|_GENHTML|_GENMENU|_GENPD|_GENSCRN|'
r'_GENXTAB|_GETEXPR|_INCLUDE|_INCSEEK|_INDENT|_LMARGIN|_MAC|'
r'_MENUDESIGNER|_MLINE|_PADVANCE|_PAGENO|_PAGETOTAL|_PBPAGE|'
r'_PCOLNO|_PCOPIES|_PDRIVER|_PDSETUP|_PECODE|_PEJECT|_PEPAGE|'
r'_PLENGTH|_PLINENO|_PLOFFSET|_PPITCH|_PQUALITY|_PRETEXT|'
r'_PSCODE|_PSPACING|_PWAIT|_RMARGIN|_REPORTBUILDER|'
r'_REPORTOUTPUT|_REPORTPREVIEW|_SAMPLES|_SCCTEXT|_SCREEN|'
r'_SHELL|_SPELLCHK|_STARTUP|_TABS|_TALLY|_TASKPANE|_TEXT|'
r'_THROTTLE|_TOOLBOX|_TOOLTIPTIMEOUT|_TRANSPORT|_TRIGGERLEVEL|'
r'_UNIX|_VFP|_WINDOWS|_WIZARD|_WRAP', Keyword.Pseudo),
(r'THISFORMSET|THISFORM|THIS', Name.Builtin),
(r'Application|CheckBox|Collection|Column|ComboBox|'
r'CommandButton|CommandGroup|Container|Control|CursorAdapter|'
r'Cursor|Custom|DataEnvironment|DataObject|EditBox|'
r'Empty|Exception|Fields|Files|File|FormSet|Form|FoxCode|'
r'Grid|Header|Hyperlink|Image|Label|Line|ListBox|Objects|'
r'OptionButton|OptionGroup|PageFrame|Page|ProjectHook|Projects|'
r'Project|Relation|ReportListener|Separator|Servers|Server|'
r'Session|Shape|Spinner|Tables|TextBox|Timer|ToolBar|'
r'XMLAdapter|XMLField|XMLTable', Name.Class),
(r'm\.[a-z_]\w*', Name.Variable),
(r'\.(F|T|AND|OR|NOT|NULL)\.|\b(AND|OR|NOT|NULL)\b', Operator.Word),
(r'\.(ActiveColumn|ActiveControl|ActiveForm|ActivePage|'
r'ActiveProject|ActiveRow|AddLineFeeds|ADOCodePage|Alias|'
r'Alignment|Align|AllowAddNew|AllowAutoColumnFit|'
r'AllowCellSelection|AllowDelete|AllowHeaderSizing|'
r'AllowInsert|AllowModalMessages|AllowOutput|AllowRowSizing|'
r'AllowSimultaneousFetch|AllowTabs|AllowUpdate|'
r'AlwaysOnBottom|AlwaysOnTop|Anchor|Application|'
r'AutoActivate|AutoCenter|AutoCloseTables|AutoComplete|'
r'AutoCompSource|AutoCompTable|AutoHideScrollBar|'
r'AutoIncrement|AutoOpenTables|AutoRelease|AutoSize|'
r'AutoVerbMenu|AutoYield|BackColor|ForeColor|BackStyle|'
r'BaseClass|BatchUpdateCount|BindControls|BorderColor|'
r'BorderStyle|BorderWidth|BoundColumn|BoundTo|Bound|'
r'BreakOnError|BufferModeOverride|BufferMode|'
r'BuildDateTime|ButtonCount|Buttons|Cancel|Caption|'
r'Centered|Century|ChildAlias|ChildOrder|ChildTable|'
r'ClassLibrary|Class|ClipControls|Closable|CLSID|CodePage|'
r'ColorScheme|ColorSource|ColumnCount|ColumnLines|'
r'ColumnOrder|Columns|ColumnWidths|CommandClauses|'
r'Comment|CompareMemo|ConflictCheckCmd|ConflictCheckType|'
r'ContinuousScroll|ControlBox|ControlCount|Controls|'
r'ControlSource|ConversionFunc|Count|CurrentControl|'
r'CurrentDataSession|CurrentPass|CurrentX|CurrentY|'
r'CursorSchema|CursorSource|CursorStatus|Curvature|'
r'Database|DataSessionID|DataSession|DataSourceType|'
r'DataSource|DataType|DateFormat|DateMark|Debug|'
r'DeclareXMLPrefix|DEClassLibrary|DEClass|DefaultFilePath|'
r'Default|DefOLELCID|DeleteCmdDataSourceType|DeleteCmdDataSource|'
r'DeleteCmd|DeleteMark|Description|Desktop|'
r'Details|DisabledBackColor|DisabledForeColor|'
r'DisabledItemBackColor|DisabledItemForeColor|'
r'DisabledPicture|DisableEncode|DisplayCount|'
r'DisplayValue|Dockable|Docked|DockPosition|'
r'DocumentFile|DownPicture|DragIcon|DragMode|DrawMode|'
r'DrawStyle|DrawWidth|DynamicAlignment|DynamicBackColor|'
r'DynamicForeColor|DynamicCurrentControl|DynamicFontBold|'
r'DynamicFontItalic|DynamicFontStrikethru|'
r'DynamicFontUnderline|DynamicFontName|DynamicFontOutline|'
r'DynamicFontShadow|DynamicFontSize|DynamicInputMask|'
r'DynamicLineHeight|EditorOptions|Enabled|'
r'EnableHyperlinks|Encrypted|ErrorNo|Exclude|Exclusive|'
r'FetchAsNeeded|FetchMemoCmdList|FetchMemoDataSourceType|'
r'FetchMemoDataSource|FetchMemo|FetchSize|'
r'FileClassLibrary|FileClass|FillColor|FillStyle|Filter|'
r'FirstElement|FirstNestedTable|Flags|FontBold|FontItalic|'
r'FontStrikethru|FontUnderline|FontCharSet|FontCondense|'
r'FontExtend|FontName|FontOutline|FontShadow|FontSize|'
r'ForceCloseTag|Format|FormCount|FormattedOutput|Forms|'
r'FractionDigits|FRXDataSession|FullName|GDIPlusGraphics|'
r'GridLineColor|GridLines|GridLineWidth|HalfHeightCaption|'
r'HeaderClassLibrary|HeaderClass|HeaderHeight|Height|'
r'HelpContextID|HideSelection|HighlightBackColor|'
r'HighlightForeColor|HighlightStyle|HighlightRowLineWidth|'
r'HighlightRow|Highlight|HomeDir|Hours|HostName|'
r'HScrollSmallChange|hWnd|Icon|IncrementalSearch|Increment|'
r'InitialSelectedAlias|InputMask|InsertCmdDataSourceType|'
r'InsertCmdDataSource|InsertCmdRefreshCmd|'
r'InsertCmdRefreshFieldList|InsertCmdRefreshKeyFieldList|'
r'InsertCmd|Instancing|IntegralHeight|'
r'Interval|IMEMode|IsAttribute|IsBase64|IsBinary|IsNull|'
r'IsDiffGram|IsLoaded|ItemBackColor,|ItemData|ItemIDData|'
r'ItemTips|IXMLDOMElement|KeyboardHighValue|KeyboardLowValue|'
r'Keyfield|KeyFieldList|KeyPreview|KeySort|LanguageOptions|'
r'LeftColumn|Left|LineContents|LineNo|LineSlant|LinkMaster|'
r'ListCount|ListenerType|ListIndex|ListItemID|ListItem|'
r'List|LockColumnsLeft|LockColumns|LockScreen|MacDesktop|'
r'MainFile|MapN19_4ToCurrency|MapBinary|MapVarchar|Margin|'
r'MaxButton|MaxHeight|MaxLeft|MaxLength|MaxRecords|MaxTop|'
r'MaxWidth|MDIForm|MemberClassLibrary|MemberClass|'
r'MemoWindow|Message|MinButton|MinHeight|MinWidth|'
r'MouseIcon|MousePointer|Movable|MoverBars|MultiSelect|'
r'Name|NestedInto|NewIndex|NewItemID|NextSiblingTable|'
r'NoCpTrans|NoDataOnLoad|NoData|NullDisplay|'
r'NumberOfElements|Object|OLEClass|OLEDragMode|'
r'OLEDragPicture|OLEDropEffects|OLEDropHasData|'
r'OLEDropMode|OLEDropTextInsertion|OLELCID|'
r'OLERequestPendingTimeout|OLEServerBusyRaiseError|'
r'OLEServerBusyTimeout|OLETypeAllowed|OneToMany|'
r'OpenViews|OpenWindow|Optimize|OrderDirection|Order|'
r'OutputPageCount|OutputType|PageCount|PageHeight|'
r'PageNo|PageOrder|Pages|PageTotal|PageWidth|'
r'PanelLink|Panel|ParentAlias|ParentClass|ParentTable|'
r'Parent|Partition|PasswordChar|PictureMargin|'
r'PicturePosition|PictureSpacing|PictureSelectionDisplay|'
r'PictureVal|Picture|Prepared|'
r'PolyPoints|PreserveWhiteSpace|PreviewContainer|'
r'PrintJobName|Procedure|PROCESSID|ProgID|ProjectHookClass|'
r'ProjectHookLibrary|ProjectHook|QuietMode|'
r'ReadCycle|ReadLock|ReadMouse|ReadObject|ReadOnly|'
r'ReadSave|ReadTimeout|RecordMark|RecordSourceType|'
r'RecordSource|RefreshAlias|'
r'RefreshCmdDataSourceType|RefreshCmdDataSource|RefreshCmd|'
r'RefreshIgnoreFieldList|RefreshTimeStamp|RelationalExpr|'
r'RelativeColumn|RelativeRow|ReleaseType|Resizable|'
r'RespectCursorCP|RespectNesting|RightToLeft|RotateFlip|'
r'Rotation|RowColChange|RowHeight|RowSourceType|'
r'RowSource|ScaleMode|SCCProvider|SCCStatus|ScrollBars|'
r'Seconds|SelectCmd|SelectedID|'
r'SelectedItemBackColor|SelectedItemForeColor|Selected|'
r'SelectionNamespaces|SelectOnEntry|SelLength|SelStart|'
r'SelText|SendGDIPlusImage|SendUpdates|ServerClassLibrary|'
r'ServerClass|ServerHelpFile|ServerName|'
r'ServerProject|ShowTips|ShowInTaskbar|ShowWindow|'
r'Sizable|SizeBox|SOM|Sorted|Sparse|SpecialEffect|'
r'SpinnerHighValue|SpinnerLowValue|SplitBar|StackLevel|'
r'StartMode|StatusBarText|StatusBar|Stretch|StrictDateEntry|'
r'Style|TabIndex|Tables|TabOrientation|Tabs|TabStop|'
r'TabStretch|TabStyle|Tag|TerminateRead|Text|Themes|'
r'ThreadID|TimestampFieldList|TitleBar|ToolTipText|'
r'TopIndex|TopItemID|Top|TwoPassProcess|TypeLibCLSID|'
r'TypeLibDesc|TypeLibName|Type|Unicode|UpdatableFieldList|'
r'UpdateCmdDataSourceType|UpdateCmdDataSource|'
r'UpdateCmdRefreshCmd|UpdateCmdRefreshFieldList|'
r'UpdateCmdRefreshKeyFieldList|UpdateCmd|'
r'UpdateGramSchemaLocation|UpdateGram|UpdateNameList|UpdateType|'
r'UseCodePage|UseCursorSchema|UseDeDataSource|UseMemoSize|'
r'UserValue|UseTransactions|UTF8Encoded|Value|VersionComments|'
r'VersionCompany|VersionCopyright|VersionDescription|'
r'VersionNumber|VersionProduct|VersionTrademarks|Version|'
r'VFPXMLProgID|ViewPortHeight|ViewPortLeft|'
r'ViewPortTop|ViewPortWidth|VScrollSmallChange|View|Visible|'
r'VisualEffect|WhatsThisButton|WhatsThisHelpID|WhatsThisHelp|'
r'WhereType|Width|WindowList|WindowState|WindowType|WordWrap|'
r'WrapCharInCDATA|WrapInCDATA|WrapMemoInCDATA|XMLAdapter|'
r'XMLConstraints|XMLNameIsXPath|XMLNamespace|XMLName|'
r'XMLPrefix|XMLSchemaLocation|XMLTable|XMLType|'
r'XSDfractionDigits|XSDmaxLength|XSDtotalDigits|'
r'XSDtype|ZoomBox)', Name.Attribute),
(r'\.(ActivateCell|AddColumn|AddItem|AddListItem|AddObject|'
r'AddProperty|AddTableSchema|AddToSCC|Add|'
r'ApplyDiffgram|Attach|AutoFit|AutoOpen|Box|Build|'
r'CancelReport|ChangesToCursor|CheckIn|CheckOut|Circle|'
r'CleanUp|ClearData|ClearStatus|Clear|CloneObject|CloseTables|'
r'Close|Cls|CursorAttach|CursorDetach|CursorFill|'
r'CursorRefresh|DataToClip|DelayedMemoFetch|DeleteColumn|'
r'Dock|DoMessage|DoScroll|DoStatus|DoVerb|Drag|Draw|Eval|'
r'GetData|GetDockState|GetFormat|GetKey|GetLatestVersion|'
r'GetPageHeight|GetPageWidth|Help|Hide|IncludePageInOutput|'
r'IndexToItemID|ItemIDToIndex|Item|LoadXML|Line|Modify|'
r'MoveItem|Move|Nest|OLEDrag|OnPreviewClose|OutputPage|'
r'Point|Print|PSet|Quit|ReadExpression|ReadMethod|'
r'RecordRefresh|Refresh|ReleaseXML|Release|RemoveFromSCC|'
r'RemoveItem|RemoveListItem|RemoveObject|Remove|'
r'Render|Requery|RequestData|ResetToDefault|Reset|Run|'
r'SaveAsClass|SaveAs|SetAll|SetData|SetFocus|SetFormat|'
r'SetMain|SetVar|SetViewPort|ShowWhatsThis|Show|'
r'SupportsListenerType|TextHeight|TextWidth|ToCursor|'
r'ToXML|UndoCheckOut|Unnest|UpdateStatus|WhatsThisMode|'
r'WriteExpression|WriteMethod|ZOrder)', Name.Function),
(r'\.(Activate|AdjustObjectSize|AfterBand|AfterBuild|'
r'AfterCloseTables|AfterCursorAttach|AfterCursorClose|'
r'AfterCursorDetach|AfterCursorFill|AfterCursorRefresh|'
r'AfterCursorUpdate|AfterDelete|AfterInsert|'
r'AfterRecordRefresh|AfterUpdate|AfterDock|AfterReport|'
r'AfterRowColChange|BeforeBand|BeforeCursorAttach|'
r'BeforeCursorClose|BeforeCursorDetach|BeforeCursorFill|'
r'BeforeCursorRefresh|BeforeCursorUpdate|BeforeDelete|'
r'BeforeInsert|BeforeDock|BeforeOpenTables|'
r'BeforeRecordRefresh|BeforeReport|BeforeRowColChange|'
r'BeforeUpdate|Click|dbc_Activate|dbc_AfterAddTable|'
r'dbc_AfterAppendProc|dbc_AfterCloseTable|dbc_AfterCopyProc|'
r'dbc_AfterCreateConnection|dbc_AfterCreateOffline|'
r'dbc_AfterCreateTable|dbc_AfterCreateView|dbc_AfterDBGetProp|'
r'dbc_AfterDBSetProp|dbc_AfterDeleteConnection|'
r'dbc_AfterDropOffline|dbc_AfterDropTable|'
r'dbc_AfterModifyConnection|dbc_AfterModifyProc|'
r'dbc_AfterModifyTable|dbc_AfterModifyView|dbc_AfterOpenTable|'
r'dbc_AfterRemoveTable|dbc_AfterRenameConnection|'
r'dbc_AfterRenameTable|dbc_AfterRenameView|'
r'dbc_AfterValidateData|dbc_BeforeAddTable|'
r'dbc_BeforeAppendProc|dbc_BeforeCloseTable|'
r'dbc_BeforeCopyProc|dbc_BeforeCreateConnection|'
r'dbc_BeforeCreateOffline|dbc_BeforeCreateTable|'
r'dbc_BeforeCreateView|dbc_BeforeDBGetProp|'
r'dbc_BeforeDBSetProp|dbc_BeforeDeleteConnection|'
r'dbc_BeforeDropOffline|dbc_BeforeDropTable|'
r'dbc_BeforeModifyConnection|dbc_BeforeModifyProc|'
r'dbc_BeforeModifyTable|dbc_BeforeModifyView|'
r'dbc_BeforeOpenTable|dbc_BeforeRemoveTable|'
r'dbc_BeforeRenameConnection|dbc_BeforeRenameTable|'
r'dbc_BeforeRenameView|dbc_BeforeValidateData|'
r'dbc_CloseData|dbc_Deactivate|dbc_ModifyData|dbc_OpenData|'
r'dbc_PackData|DblClick|Deactivate|Deleted|Destroy|DoCmd|'
r'DownClick|DragDrop|DragOver|DropDown|ErrorMessage|Error|'
r'EvaluateContents|GotFocus|Init|InteractiveChange|KeyPress|'
r'LoadReport|Load|LostFocus|Message|MiddleClick|MouseDown|'
r'MouseEnter|MouseLeave|MouseMove|MouseUp|MouseWheel|Moved|'
r'OLECompleteDrag|OLEDragOver|OLEGiveFeedback|OLESetData|'
r'OLEStartDrag|OnMoveItem|Paint|ProgrammaticChange|'
r'QueryAddFile|QueryModifyFile|QueryNewFile|QueryRemoveFile|'
r'QueryRunFile|QueryUnload|RangeHigh|RangeLow|ReadActivate|'
r'ReadDeactivate|ReadShow|ReadValid|ReadWhen|Resize|'
r'RightClick|SCCInit|SCCDestroy|Scrolled|Timer|UIEnable|'
r'UnDock|UnloadReport|Unload|UpClick|Valid|When)', Name.Function),
(r'\s+', Text),
# everything else is not colored
(r'.', Text),
],
'newline': [
(r'\*.*?$', Comment.Single, '#pop'),
(r'(ACCEPT|ACTIVATE\s*MENU|ACTIVATE\s*POPUP|ACTIVATE\s*SCREEN|'
r'ACTIVATE\s*WINDOW|APPEND|APPEND\s*FROM|APPEND\s*FROM\s*ARRAY|'
r'APPEND\s*GENERAL|APPEND\s*MEMO|ASSIST|AVERAGE|BLANK|BROWSE|'
r'BUILD\s*APP|BUILD\s*EXE|BUILD\s*PROJECT|CALCULATE|CALL|'
r'CANCEL|CHANGE|CLEAR|CLOSE|CLOSE\s*MEMO|COMPILE|CONTINUE|'
r'COPY\s*FILE|COPY\s*INDEXES|COPY\s*MEMO|COPY\s*STRUCTURE|'
r'COPY\s*STRUCTURE\s*EXTENDED|COPY\s*TAG|COPY\s*TO|'
r'COPY\s*TO\s*ARRAY|COUNT|CREATE|CREATE\s*COLOR\s*SET|'
r'CREATE\s*CURSOR|CREATE\s*FROM|CREATE\s*LABEL|CREATE\s*MENU|'
r'CREATE\s*PROJECT|CREATE\s*QUERY|CREATE\s*REPORT|'
r'CREATE\s*SCREEN|CREATE\s*TABLE|CREATE\s*VIEW|DDE|'
r'DEACTIVATE\s*MENU|DEACTIVATE\s*POPUP|DEACTIVATE\s*WINDOW|'
r'DECLARE|DEFINE\s*BAR|DEFINE\s*BOX|DEFINE\s*MENU|'
r'DEFINE\s*PAD|DEFINE\s*POPUP|DEFINE\s*WINDOW|DELETE|'
r'DELETE\s*FILE|DELETE\s*TAG|DIMENSION|DIRECTORY|DISPLAY|'
r'DISPLAY\s*FILES|DISPLAY\s*MEMORY|DISPLAY\s*STATUS|'
r'DISPLAY\s*STRUCTURE|DO|EDIT|EJECT|EJECT\s*PAGE|ERASE|'
r'EXIT|EXPORT|EXTERNAL|FILER|FIND|FLUSH|FUNCTION|GATHER|'
r'GETEXPR|GO|GOTO|HELP|HIDE\s*MENU|HIDE\s*POPUP|'
r'HIDE\s*WINDOW|IMPORT|INDEX|INPUT|INSERT|JOIN|KEYBOARD|'
r'LABEL|LIST|LOAD|LOCATE|LOOP|MENU|MENU\s*TO|MODIFY\s*COMMAND|'
r'MODIFY\s*FILE|MODIFY\s*GENERAL|MODIFY\s*LABEL|MODIFY\s*MEMO|'
r'MODIFY\s*MENU|MODIFY\s*PROJECT|MODIFY\s*QUERY|'
r'MODIFY\s*REPORT|MODIFY\s*SCREEN|MODIFY\s*STRUCTURE|'
r'MODIFY\s*WINDOW|MOVE\s*POPUP|MOVE\s*WINDOW|NOTE|'
r'ON\s*APLABOUT|ON\s*BAR|ON\s*ERROR|ON\s*ESCAPE|'
r'ON\s*EXIT\s*BAR|ON\s*EXIT\s*MENU|ON\s*EXIT\s*PAD|'
r'ON\s*EXIT\s*POPUP|ON\s*KEY|ON\s*KEY\s*=|ON\s*KEY\s*LABEL|'
r'ON\s*MACHELP|ON\s*PAD|ON\s*PAGE|ON\s*READERROR|'
r'ON\s*SELECTION\s*BAR|ON\s*SELECTION\s*MENU|'
r'ON\s*SELECTION\s*PAD|ON\s*SELECTION\s*POPUP|ON\s*SHUTDOWN|'
r'PACK|PARAMETERS|PLAY\s*MACRO|POP\s*KEY|POP\s*MENU|'
r'POP\s*POPUP|PRIVATE|PROCEDURE|PUBLIC|PUSH\s*KEY|'
r'PUSH\s*MENU|PUSH\s*POPUP|QUIT|READ|READ\s*MENU|RECALL|'
r'REINDEX|RELEASE|RELEASE\s*MODULE|RENAME|REPLACE|'
r'REPLACE\s*FROM\s*ARRAY|REPORT|RESTORE\s*FROM|'
r'RESTORE\s*MACROS|RESTORE\s*SCREEN|RESTORE\s*WINDOW|'
r'RESUME|RETRY|RETURN|RUN|RUN\s*\/N"|RUNSCRIPT|'
r'SAVE\s*MACROS|SAVE\s*SCREEN|SAVE\s*TO|SAVE\s*WINDOWS|'
r'SCATTER|SCROLL|SEEK|SELECT|SET|SET\s*ALTERNATE|'
r'SET\s*ANSI|SET\s*APLABOUT|SET\s*AUTOSAVE|SET\s*BELL|'
r'SET\s*BLINK|SET\s*BLOCKSIZE|SET\s*BORDER|SET\s*BRSTATUS|'
r'SET\s*CARRY|SET\s*CENTURY|SET\s*CLEAR|SET\s*CLOCK|'
r'SET\s*COLLATE|SET\s*COLOR\s*OF|SET\s*COLOR\s*OF\s*SCHEME|'
r'SET\s*COLOR\s*SET|SET\s*COLOR\s*TO|SET\s*COMPATIBLE|'
r'SET\s*CONFIRM|SET\s*CONSOLE|SET\s*CURRENCY|SET\s*CURSOR|'
r'SET\s*DATE|SET\s*DEBUG|SET\s*DECIMALS|SET\s*DEFAULT|'
r'SET\s*DELETED|SET\s*DELIMITERS|SET\s*DEVELOPMENT|'
r'SET\s*DEVICE|SET\s*DISPLAY|SET\s*DOHISTORY|SET\s*ECHO|'
r'SET\s*ESCAPE|SET\s*EXACT|SET\s*EXCLUSIVE|SET\s*FIELDS|'
r'SET\s*FILTER|SET\s*FIXED|SET\s*FORMAT|SET\s*FULLPATH|'
r'SET\s*FUNCTION|SET\s*HEADINGS|SET\s*HELP|SET\s*HELPFILTER|'
r'SET\s*HOURS|SET\s*INDEX|SET\s*INTENSITY|SET\s*KEY|'
r'SET\s*KEYCOMP|SET\s*LIBRARY|SET\s*LOCK|SET\s*LOGERRORS|'
r'SET\s*MACDESKTOP|SET\s*MACHELP|SET\s*MACKEY|SET\s*MARGIN|'
r'SET\s*MARK\s*OF|SET\s*MARK\s*TO|SET\s*MEMOWIDTH|'
r'SET\s*MESSAGE|SET\s*MOUSE|SET\s*MULTILOCKS|SET\s*NEAR|'
r'SET\s*NOCPTRANS|SET\s*NOTIFY|SET\s*ODOMETER|SET\s*OPTIMIZE|'
r'SET\s*ORDER|SET\s*PALETTE|SET\s*PATH|SET\s*PDSETUP|'
r'SET\s*POINT|SET\s*PRINTER|SET\s*PROCEDURE|SET\s*READBORDER|'
r'SET\s*REFRESH|SET\s*RELATION|SET\s*RELATION\s*OFF|'
r'SET\s*REPROCESS|SET\s*RESOURCE|SET\s*SAFETY|SET\s*SCOREBOARD|'
r'SET\s*SEPARATOR|SET\s*SHADOWS|SET\s*SKIP|SET\s*SKIP\s*OF|'
r'SET\s*SPACE|SET\s*STATUS|SET\s*STATUS\s*BAR|SET\s*STEP|'
r'SET\s*STICKY|SET\s*SYSMENU|SET\s*TALK|SET\s*TEXTMERGE|'
r'SET\s*TEXTMERGE\s*DELIMITERS|SET\s*TOPIC|SET\s*TRBETWEEN|'
r'SET\s*TYPEAHEAD|SET\s*UDFPARMS|SET\s*UNIQUE|SET\s*VIEW|'
r'SET\s*VOLUME|SET\s*WINDOW\s*OF\s*MEMO|SET\s*XCMDFILE|'
r'SHOW\s*GET|SHOW\s*GETS|SHOW\s*MENU|SHOW\s*OBJECT|'
r'SHOW\s*POPUP|SHOW\s*WINDOW|SIZE\s*POPUP|SKIP|SORT|'
r'STORE|SUM|SUSPEND|TOTAL|TYPE|UNLOCK|UPDATE|USE|WAIT|'
r'ZAP|ZOOM\s*WINDOW|DO\s*CASE|CASE|OTHERWISE|ENDCASE|'
r'DO\s*WHILE|ENDDO|FOR|ENDFOR|NEXT|IF|ELSE|ENDIF|PRINTJOB|'
r'ENDPRINTJOB|SCAN|ENDSCAN|TEXT|ENDTEXT|=)',
Keyword.Reserved, '#pop'),
(r'#\s*(IF|ELIF|ELSE|ENDIF|DEFINE|IFDEF|IFNDEF|INCLUDE)',
Comment.Preproc, '#pop'),
(r'(m\.)?[a-z_]\w*', Name.Variable, '#pop'),
(r'.', Text, '#pop'),
],
}
|
{
"content_hash": "9d534679b92eb77c3e01fdf302cd3a3c",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 80,
"avg_line_length": 61.386416861826696,
"alnum_prop": 0.6577521745765298,
"repo_name": "jinie/sublime-wakatime",
"id": "868a44d820c4a3e1a2bad92a0d707bc8fce16bc4",
"size": "26236",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packages/wakatime/packages/py27/pygments/lexers/foxpro.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "106976"
},
{
"name": "JavaScript",
"bytes": "1896"
},
{
"name": "Python",
"bytes": "6232183"
}
],
"symlink_target": ""
}
|
import os
import sys
CURRENT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
sys.path.append(CURRENT_DIRECTORY + '/')
from webpages import homepage, weather
PAGES = {
'/': homepage,
'/weather': weather
}
def application(environ, start_response):
page = environ.get('PATH_INFO')
request_type = environ.get('REQUEST_METHOD')
request_body = environ.get('wsgi.input').read()
try:
status = '200 OK'
content = get_content(page, request_type, request_body)
except:
status = '404 NOT FOUND'
content = 'Page not found.'
response_headers = [('Content-Type', 'text/html'),
('Content-Length', str(len(content)))]
start_response(status, response_headers)
yield content.encode('utf8')
def get_content(page, request_type, request_body):
"""Returns content for page given"""
f = PAGES.get(page)
return f(request_type, request_body)
|
{
"content_hash": "88dcc5fdfde3382c3d80873179efb3af",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 63,
"avg_line_length": 23.27027027027027,
"alnum_prop": 0.6957026713124274,
"repo_name": "chughtaimh/chughtaimh-pythonanywhere",
"id": "50c26f8ccea50a22e282f9e836c3780dcec3dbdb",
"size": "1292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chughtaimh_pythonanywhere_com_wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2143"
},
{
"name": "Python",
"bytes": "3388"
}
],
"symlink_target": ""
}
|
from xscontainer import api_helper
from xscontainer import util
from xscontainer.util import log
from xscontainer.docker_monitor import api as docker_monitor_api
import glob
import os
import random
import re
import shutil
import tempfile
CLOUD_CONFIG_OVERRIDE_PATH = (
"/opt/xensource/packages/files/xscontainer/cloud-config.template")
XS_TOOLS_ISO_PATH = '/opt/xensource/packages/iso/*tools-*.iso'
OTHER_CONFIG_CONFIG_DRIVE_KEY = "config-drive"
def remove_disks_in_vm_provisioning(session, vm_ref):
"""Re-write the xml for provisioning disks to set a SR"""
other_config = session.xenapi.VM.get_other_config(vm_ref)
del other_config['disks']
session.xenapi.VM.set_other_config(vm_ref, other_config)
def install_vm(session, urlvhdbz2, sruuid,
vmname='CoreOs-%d' % (random.randint(0, 1000)),
templatename='CoreOS'):
# devmode only
log.info("install_vm from url %s to sr %s" % (urlvhdbz2, sruuid))
atempfile = tempfile.mkstemp(suffix='.vhd.bz2')[1]
atempfileunpacked = atempfile.replace('.bz2', '')
# @todo: pipe instead, so the file never actually touches Dom0
cmd = ['curl', '-o', atempfile, urlvhdbz2]
util.runlocal(cmd)
try:
cmd = ['bzip2', '-d', atempfile]
util.runlocal(cmd)
vdiref = api_helper.import_disk(session, sruuid, atempfileunpacked,
'vhd', 'Disk')
finally:
if os.path.exists(atempfile):
os.remove(atempfile)
if os.path.exists(atempfileunpacked):
os.remove(atempfileunpacked)
templateref = session.xenapi.VM.get_by_name_label(templatename)[0]
vmref = session.xenapi.VM.clone(templateref, vmname)
vmuuid = session.xenapi.VM.get_record(vmref)['uuid']
log.info("install_vm created vm %s" % (vmuuid))
remove_disks_in_vm_provisioning(session, vmref)
session.xenapi.VM.provision(vmref)
api_helper.create_vbd(session, vmref, vdiref, 'rw', True)
setup_network_on_lowest_pif(session, vmref)
return vmuuid
def setup_network_on_lowest_pif(session, vmref):
# devmode only
pifs = session.xenapi.PIF.get_all_records()
lowest = None
for pifref in pifs.keys():
if ((lowest is None) or
(pifs[pifref]['device'] < pifs[lowest]['device'])):
lowest = pifref
if lowest:
networkref = session.xenapi.PIF.get_network(lowest)
api_helper.create_vif(session, networkref, vmref)
def prepare_vm_for_config_drive(session, vmref, vmuuid):
if api_helper.get_hi_preferene_on(session):
# Setup host internal network
api_helper.disable_gw_of_hi_mgmtnet_ref(session)
mgmtnet_device = api_helper.get_hi_mgmtnet_device(session, vmuuid)
if not mgmtnet_device:
api_helper.create_vif(session,
api_helper.get_hi_mgmtnet_ref(session),
vmref)
def filterxshinexists(text):
# Include legacy support for 'XS' prefixed keys
patterns = [r'(\%HINEXISTS\%(.)*\%ENDHINEXISTS\%)',
r'(\%XSHINEXISTS\%(.)*\%XSENDHINEXISTS\%)']
for p in patterns:
match = re.search(p, text, re.DOTALL)
if match:
text = text.replace(match.group(0), '')
return text
def _fmt_vm_name(session, vmuuid):
vmname = api_helper.get_vm_record_by_uuid(session, vmuuid)['name_label']
vmname = re.sub(r'[\W_]+', '', vmname).lower()
return vmname
def get_template_data(session, vmuuid):
data = {
'vm_name': _fmt_vm_name(session, vmuuid),
'rsa_pub': api_helper.get_idrsa_secret_public_keyonly(session),
'mgmt_dev': api_helper.get_hi_mgmtnet_device(session, vmuuid),
}
return data
def customize_userdata(template, data):
def sub(tpl, keys, value):
for key in keys:
tpl = tpl.replace(key, value)
return tpl
hostname_keys = ['%XSVMNAMETOHOSTNAME%', '%VMNAMETOHOSTNAME%']
rsa_pub_keys = ['%XSCONTAINERRSAPUB%', '%CONTAINERRSAPUB%']
mgmt_dev_keys = ['%XSHIN%', '%HIN%']
template = sub(template, hostname_keys, data['vm_name'])
template = sub(template, rsa_pub_keys, data['rsa_pub'])
if data['mgmt_dev']:
template = sub(template, mgmt_dev_keys, data['mgmt_dev'])
else:
template = filterxshinexists(template)
return template
def load_cloud_config_template(template_path=None):
if template_path:
# Do nothing, specifying the path takes precedence.
pass
elif os.path.exists(CLOUD_CONFIG_OVERRIDE_PATH):
# Use the override file
template_path = CLOUD_CONFIG_OVERRIDE_PATH
else:
# Use the inbuilt default template
template_path = util.get_data_file_path("cloud-config.template")
log.info("load_cloud_config_template from %s" % (template_path))
filehandle = open(template_path)
try:
template_data = filehandle.read()
finally:
filehandle.close()
# Append template location to make it clear where it was loaded from.
template_data = ("%s\n\n# Template loaded from %s"
% (template_data, template_path))
return template_data
def get_config_drive_default(session):
userdata = load_cloud_config_template()
if not api_helper.get_hi_preferene_on(session):
userdata = filterxshinexists(userdata)
return userdata
def find_latest_tools_iso_path():
tools_iso_paths = glob.glob(XS_TOOLS_ISO_PATH)
if len(tools_iso_paths) == 0:
raise util.XSContainerException("Can't locate XS tools in %s."
% (XS_TOOLS_ISO_PATH))
elif len(tools_iso_paths) == 1:
return tools_iso_paths[0]
else:
# Let's first loose the xs-tools.iso without a release
tools_iso_path_wo_releaseless = []
for path in tools_iso_paths:
basename = os.path.basename(path)
if basename.count("-") != 2:
tools_iso_path_wo_releaseless.append(path)
# Then sort the remaining
tools_iso_path_wo_releaseless.sort(
key=lambda s: map(str, re.split('[.-]', s)))
# And return the last number out of the sorted list
return tools_iso_path_wo_releaseless[-1]
def create_config_drive_iso(session, userdata_template, vmuuid):
log.info("create_config_drive_iso for vm %s" % (vmuuid))
umountrequired = False
temptoolsisodir = None
userdatafile = None
latestfolder = None
openstackfolder = None
agentfilepaths = []
agentpath = None
tempisodir = None
try:
tempisodir = tempfile.mkdtemp()
tempisofile = tempfile.mkstemp()[1]
# add the userdata-file
openstackfolder = os.path.join(tempisodir, 'openstack')
latestfolder = os.path.join(openstackfolder, 'latest')
os.makedirs(latestfolder)
userdatafile = os.path.join(latestfolder, 'user_data')
userdatatemplatefile = "%s.template" % userdatafile
template_data = get_template_data(session, vmuuid)
userdata = customize_userdata(userdata_template, template_data)
util.write_file(userdatafile, userdata)
util.write_file(userdatatemplatefile, userdata_template)
log.debug("Userdata: %s" % (userdata))
# Also add the Linux guest agent
temptoolsisodir = tempfile.mkdtemp()
tools_iso_path = find_latest_tools_iso_path()
cmd = ['mount', '-o', 'loop',
tools_iso_path, temptoolsisodir]
util.runlocal(cmd)
umountrequired = True
agentpath = os.path.join(tempisodir, 'agent')
os.makedirs(agentpath)
agentfiles = ['xe-daemon', 'xe-linux-distribution',
'xe-linux-distribution.service',
'xen-vcpu-hotplug.rules', 'install.sh',
'versions.deb', 'versions.rpm', "versions.tgz"]
for filename in agentfiles:
path = os.path.join(temptoolsisodir, 'Linux', filename)
shutil.copy(path, agentpath)
agentfilepaths.append(os.path.join(agentpath, filename))
# Finally wrap up the iso
util.make_iso('config-2', tempisodir, tempisofile)
finally:
# And tidy
if umountrequired:
cmd = ['umount', temptoolsisodir]
util.runlocal(cmd)
for path in [temptoolsisodir, userdatafile, userdatatemplatefile,
latestfolder, openstackfolder] + agentfilepaths + \
[agentpath, tempisodir]:
if path is not None:
if os.path.isdir(path):
os.rmdir(path)
elif os.path.isfile(path):
os.remove(path)
else:
log.debug("create_config_drive_iso: Not tidying %s because"
" it could not be found" % (path))
return tempisofile
def remove_config_drive(session, vmrecord, configdisk_namelabel):
for vbd in vmrecord['VBDs']:
vbdrecord = session.xenapi.VBD.get_record(vbd)
vdirecord = None
if vbdrecord['VDI'] != api_helper.NULLREF:
vdirecord = session.xenapi.VDI.get_record(vbdrecord['VDI'])
if ((OTHER_CONFIG_CONFIG_DRIVE_KEY in vdirecord['other_config'] or
OTHER_CONFIG_CONFIG_DRIVE_KEY in vbdrecord['other_config'])):
log.info("remove_config_drive will destroy vdi %s"
% (vdirecord['uuid']))
if vbdrecord['currently_attached']:
session.xenapi.VBD.unplug(vbd)
session.xenapi.VBD.destroy(vbd)
session.xenapi.VDI.destroy(vbdrecord['VDI'])
def create_config_drive(session, vmuuid, sruuid, userdata):
log.info("create_config_drive for vm %s on sr %s" % (vmuuid, sruuid))
vmref = session.xenapi.VM.get_by_uuid(vmuuid)
vmrecord = session.xenapi.VM.get_record(vmref)
prepare_vm_for_config_drive(session, vmref, vmuuid)
isofile = create_config_drive_iso(session, userdata, vmuuid)
other_config_keys = {OTHER_CONFIG_CONFIG_DRIVE_KEY: 'True'}
try:
configdisk_namelabel = 'Automatic Config Drive'
vdiref = api_helper.import_disk(session, sruuid, isofile, 'raw',
configdisk_namelabel,
other_config_keys=other_config_keys)
finally:
os.remove(isofile)
remove_config_drive(session, vmrecord, configdisk_namelabel)
vbdref = api_helper.create_vbd(session, vmref, vdiref, 'ro', False,
other_config_keys=other_config_keys)
if vmrecord['power_state'] == 'Running':
session.xenapi.VBD.plug(vbdref)
if re.search("\n\s*- ssh-rsa %[XS]*CONTAINERRSAPUB%", userdata):
# if %XSRSAPUB% isn't commented out, automatically mark the VM
# as monitorable.
docker_monitor_api.mark_monitorable_vm(vmuuid, session)
vdirecord = session.xenapi.VDI.get_record(vdiref)
return vdirecord['uuid']
def get_config_drive_configuration(session, vdiuuid):
log.info("get_config_drive_configuration from vdi %s" % (vdiuuid))
tempdir = None
umountrequired = False
filename = api_helper.export_disk(session, vdiuuid)
try:
tempdir = tempfile.mkdtemp()
cmd = ['mount', '-o', 'loop', '-t', 'iso9660', filename, tempdir]
util.runlocal(cmd)
umountrequired = True
userdatapath_template = os.path.join(
tempdir, 'openstack', 'latest', 'user_data.template')
content = util.read_file(userdatapath_template)
finally:
os.remove(filename)
if umountrequired:
cmd = ['umount', tempdir]
util.runlocal(cmd)
if tempdir:
os.rmdir(tempdir)
return content
|
{
"content_hash": "6023d81079c612dcf62a534267333269",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 79,
"avg_line_length": 38.02250803858521,
"alnum_prop": 0.6178435517970402,
"repo_name": "xenserver/xscontainer",
"id": "f6152f39e24bb9dd54ca82ec028f1d08a4de483f",
"size": "11825",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/xscontainer/coreos.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2630"
},
{
"name": "Makefile",
"bytes": "654"
},
{
"name": "Python",
"bytes": "129835"
},
{
"name": "Shell",
"bytes": "5456"
}
],
"symlink_target": ""
}
|
"""
[2016-05-04] Challenge #265 [Easy] Permutations and combinations part 2
https://www.reddit.com/r/dailyprogrammer/comments/4htg9t/20160504_challenge_265_easy_permutations_and/
Basically the same challenge as Monday's, but with much larger numbers and so code that must find permutation and
combination numbers without generating the full list.
# permutation number
https://en.wikipedia.org/wiki/Factorial_number_system is the traditional technique used to solve this, but a very
similar recursive approach can calculate how many permutation indexes were skipped in order to set the next position.
**input:**
what is the 12345678901234 permutation index of 42-length list
**output:**
0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 35 32 36 34 39 29 27 33 26 37 40 30 31 41 28
38
**input2:**
what is the permutation number of: 25 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 35 32 36 34
39 29 27 33 26 37 40 30 31 41 28 38
**output:**
836313165329095177704251551336018791641145678901234
# combination number
https://en.wikipedia.org/wiki/Combinatorial_number_system and
https://msdn.microsoft.com/en-us/library/aa289166%28VS.71%29.aspx show the theory.
It may also be useful to know that the number of combinations of 4 out of 10 that start with 0 1 2 3 4 5 6 are (in J
notation `!` is out of operator)
3 ! 9 8 7 6 5 4 3
84 56 35 20 10 4 1
with the last combination `6 7 8 9` (84 combinations for 4 out of 10 start with 0, 56 start with 1...)
**input:** (find the combination number)
0 1 2 88 from 4 out of 100
**output:**
85
**challenge input:** (find the combination number)
0 1 2 88 111 from 5 out of 120
15 25 35 45 55 65 85 from 7 out of 100
**challenge input 2**
what is the 123456789 combination index for 5 out of 100
**bonus:**
how many combinations from `30 out of 100` start with `10 30 40`
**bonus2:**
write a function that compresses a sorted list of numbers based on its lowest and highest values. Should return: low,
high, count, combination number.
example list:
15 25 35 45 55 65 85
output with missing combination number (x):
15 85 7 x
"""
def main():
pass
if __name__ == "__main__":
main()
|
{
"content_hash": "a896f7307b601d8f213c90983db8336b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 119,
"avg_line_length": 40.345454545454544,
"alnum_prop": 0.7210455159981974,
"repo_name": "DayGitH/Python-Challenges",
"id": "a264ec89a30d6ff33bb9ffb417f7251d78e8e84f",
"size": "2219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyProgrammer/DP20160504A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "2471582"
}
],
"symlink_target": ""
}
|
__author__ = 'adeb'
# Hack to be able to run this module
import sys
import os
sys.path.insert(0, os.path.abspath('../..'))
import matplotlib
matplotlib.use('Agg')
import PIL
from spynet.models.network import *
from spynet.models.neuron_type import *
from spynet.data.dataset import Dataset
from spynet.utils.utilities import open_h5file, tile_raster_images, MSE
if __name__ == '__main__':
mode = "drop"
experiment_path = "./experiments/mnist_example/"
data_path = "./datasets/mnist/"
testing_data_path = data_path + "test.h5"
ds_testing = Dataset.create_and_read(testing_data_path)
# Load the network
net = AutoEncoder()
net.init([28**2, 256, 28**2], dropout=True, dropout_p=[0.5], neuron_function=NeuronSigmoid())
net.load_parameters(open_h5file(experiment_path + "netdrop.net"))
i = ds_testing.inputs[0:10,:]
e = net.predict(i, 10)
print ""
print MSE(e,i)
image = PIL.Image.fromarray(tile_raster_images(X=net.ls_layers[0].ls_layer_blocks[0].w.get_value(borrow=True).T,
img_shape=(28, 28), tile_shape=(16, 16),
tile_spacing=(1, 1)))
image.save(experiment_path + "filters" + mode + ".png")
image = PIL.Image.fromarray(tile_raster_images(X=i,
img_shape=(28, 28), tile_shape=(1, 10),
tile_spacing=(1, 1)))
image.save(experiment_path + "i" + mode + ".png")
image = PIL.Image.fromarray(tile_raster_images(X=e,
img_shape=(28, 28), tile_shape=(1, 10),
tile_spacing=(1, 1)))
image.save(experiment_path + "e" + mode + ".png")
|
{
"content_hash": "bab2d120c0a91e6a802e3bbe76352d8c",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 116,
"avg_line_length": 33.55769230769231,
"alnum_prop": 0.5684813753581662,
"repo_name": "adbrebs/spynet",
"id": "c1961dd813387c28a82043308de1b267d2f645b5",
"size": "1745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mnist_example/test_mnist_autoencoder.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "114487"
}
],
"symlink_target": ""
}
|
import yaml
import json
import urllib.request
import time
from wallet import Wallet
from database.database import Database
config = yaml.safe_load(open("config.yml"))
f = open('dataset.txt', 'r')
dataset = f.readlines()
dataset = [float(i) for i in dataset]
walletBitcoin = 0.0
cash_wallet = Wallet()
firstrun = True
transection = 0.002
print ('Starting simulator...')
def getSellPrice():
with urllib.request.urlopen('https://api.bitfinex.com/v1/pubticker/btcusd') as response:
html = response.read()
return float(json.loads(html)['last_price'])
oldPrice = currentPrice = initPrice = dataset[0]
def getTransectionfee():
return transection * currentPrice
def getTotalNetWorth():
return cash_wallet.cash + (walletBitcoin * currentPrice)
def showTotalNetWorth():
print ('Total bitcoin:', walletBitcoin, 'walletDollar', cash_wallet.cash)
print ('Total networth:', getTotalNetWorth(), 'profit', getTotalNetWorth() - totalNetWorthStart )
totalNetWorthStart = getTotalNetWorth()
def sell():
global walletBitcoin
# print ("sell")
walletBitcoin = walletBitcoin - 1
cash_wallet.collect(currentPrice)
cash_wallet.give(getTransectionfee())
def buy():
global walletBitcoin
# print ("buy")
walletBitcoin = walletBitcoin + 1
cash_wallet.give(currentPrice)
cash_wallet.give(getTransectionfee())
# Remove first price datapoint because we don't need it anymore
dataset.pop(0)
results = []
smallestWindow = 2
biggestWindow = 1000
for windowSize in range(smallestWindow,biggestWindow):
window = []
for i in range(0, windowSize):
window.append(0.0)
# reset the bloody wallet
walletBitcoin = 0.0
cash_wallet = Wallet()
currentIteration = 0
oldAverage = 0
trend = 0
lastTransactionPrice = initPrice
print("Running test for window size")
print(windowSize)
totalBuys = 0
totalSells = 0
# Now iterate over all consequitive datapoints
for currentPrice in dataset:
window.append(currentPrice)
window.pop(0)
currentIteration = currentIteration + 1
if currentIteration == windowSize:
currentAverage = round(sum(window) / len(window), 2)
oldAverage = currentAverage
if currentIteration > windowSize:
currentAverage = round(sum(window) / len(window), 2)
didSomething = False
oldTrend = trend
if currentPrice > currentAverage:
trend = 1
elif currentPrice < currentAverage:
trend = -1
else:
trend = 0
# smartness
if trend != oldTrend:
if trend == 1:
if currentAverage < oldAverage:
if lastTransactionPrice < (currentPrice + currentPrice * transection):
if walletBitcoin > 0:
sell()
totalSells += 1
lastTransactionPrice = currentPrice
didSomething = True
elif trend == -1:
if currentAverage > oldAverage:
if lastTransactionPrice > (currentPrice + currentPrice * transection):
buy()
totalBuys += 1
lastTransactionPrice = currentPrice
didSomething = True
oldAverage = currentAverage
# if not didSomething:
# print("did nothing")
print("Total buys")
print(totalBuys)
print("Total sells")
print(totalSells)
showTotalNetWorth()
results.append(getTotalNetWorth())
continue
print ('old;',oldPrice,'currentPrice;',currentPrice)
if currentPrice > oldPrice and walletBitcoin > 0:
print ("sell")
walletBitcoin = walletBitcoin - 1
cash_wallet.collect(currentPrice)
cash_wallet.give(getTransectionfee())
elif currentPrice < oldPrice and cash_wallet.cash > 0:
print ("buy")
walletBitcoin = walletBitcoin + 1
cash_wallet.give(currentPrice)
cash_wallet.give(getTransectionfee())
showTotalNetWorth()
oldPrice = currentPrice
# Don't sleep anymore because we don't pull data from the internet
# anymore in this version of the code.
# time.sleep(config['simulator']['seconds_between_refresh'])
print(results)
print(results.index(max(results)))
|
{
"content_hash": "4aa1dc9d768654ebd1a554f66142fa99",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 101,
"avg_line_length": 26.83431952662722,
"alnum_prop": 0.6116868798235943,
"repo_name": "ifcodingmaastricht/blockchainbot",
"id": "42dd0d9db5b06bf7e23693b44f3c48aae1381257",
"size": "4535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulator_offline_newkidsontheblockchain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27454"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
from flexget.manager import Session
from flexget.plugins.filter.delay import DelayedEntry
class TestDelay:
config = """
tasks:
test:
mock:
- title: entry 1
delay: 1 hours
"""
def test_delay(self, execute_task):
task = execute_task('test')
assert not task.entries, 'No entries should have passed delay'
# Age the entry in the db
session = Session()
delayed_entries = session.query(DelayedEntry).all()
for entry in delayed_entries:
entry.expire = entry.expire - timedelta(hours=1)
session.commit()
task = execute_task('test')
assert task.entries, 'Entry should have passed delay and been inserted'
# Make sure entry is only injected once
task = execute_task('test')
assert not task.entries, 'Entry should only be insert'
|
{
"content_hash": "32f2bc877a34378eaa4622526d9cf297",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 32.206896551724135,
"alnum_prop": 0.6134903640256959,
"repo_name": "Flexget/Flexget",
"id": "6a83c9d76996a3a3927b43c6f6c24440d6c00416",
"size": "934",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/tests/test_delay.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1237"
},
{
"name": "HTML",
"bytes": "82565"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3797883"
},
{
"name": "SCSS",
"bytes": "11875"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1568"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
import cgi
try: # Python 3
from urllib.parse import urlencode
except ImportError: # Python 2
from urllib import urlencode
from owslib.etree import etree
from owslib.util import openURL, strip_bom
class WMSCapabilitiesReader(object):
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='1.1.1', url=None, un=None, pw=None, headers=None):
"""Initialize"""
self.version = version
self._infoset = None
self.url = url
self.username = un
self.password = pw
self.headers = headers
self.request = None
#if self.username and self.password:
## Provide login information in order to use the WMS server
## Create an OpenerDirector with support for Basic HTTP
## Authentication...
#passman = HTTPPasswordMgrWithDefaultRealm()
#passman.add_password(None, self.url, self.username, self.password)
#auth_handler = HTTPBasicAuthHandler(passman)
#opener = build_opener(auth_handler)
#self._open = opener.open
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WMS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url, timeout=30):
"""Get and parse a WMS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
version, and request parameters
"""
self.request = self.capabilities_url(service_url)
# now split it up again to use the generic openURL function...
spliturl = self.request.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get',
username=self.username,
password=self.password,
timeout=timeout,
headers=self.headers)
raw_text = strip_bom(u.read())
return etree.fromstring(raw_text)
def readString(self, st):
"""Parse a WMS capabilities document, returning an elementtree instance.
string should be an XML capabilities document
"""
if not isinstance(st, str) and not isinstance(st, bytes):
raise ValueError("String must be of type string or bytes, not %s" % type(st))
raw_text = strip_bom(st)
return etree.fromstring(raw_text)
|
{
"content_hash": "a5e757cbb11da03c5813a68cbad46794",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 89,
"avg_line_length": 35.36904761904762,
"alnum_prop": 0.5950858296869741,
"repo_name": "geographika/OWSLib",
"id": "7de5d6e2ab97b352f913c78c8179f5d6fb87df3a",
"size": "2971",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "owslib/map/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "678963"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
if sys.version_info[0] != 2:
basestring = str
class Processor:
"""
Framework for allowing modules to modify the input data as a set of
transforms. Once the original input data is loaded, the preprocessor
iteratively allows Modules to inspect the data and generate a list of
Transforms against the data. The Transforms are applied in descending
order by line number, and the resulting data is used for the next pass.
Once all modules have transformed the data, it is ready for writing out
to a file.
"""
data = []
transforms = {}
modules = []
def register(self, module):
"""
This method registers an individual module to be called when processing
"""
self.modules.append(module)
def input(self, file):
"""
This method reads the original data from an object following
the file interface.
"""
self.data = file.readlines()
def process(self):
"""
This method handles the actual processing of Modules and Transforms
"""
self.modules.sort(key=lambda x: x.priority)
for module in self.modules:
transforms = module.transform(self.data)
transforms.sort(key=lambda x: x.linenum, reverse=True)
for transform in transforms:
linenum = transform.linenum
if isinstance(transform.data, basestring):
transform.data = [transform.data]
if transform.oper == "prepend":
self.data[linenum:linenum] = transform.data
elif transform.oper == "append":
self.data[linenum+1:linenum+1] = transform.data
elif transform.oper == "swap":
self.data[linenum:linenum+1] = transform.data
elif transform.oper == "drop":
self.data[linenum:linenum+1] = []
elif transform.oper == "noop":
pass
def output(self, file):
"""
This method writes the resulting data to an object following
the file interface.
"""
file.writelines(self.data)
|
{
"content_hash": "99eb858d3f798ca5ccdac9a21f645ef7",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 31,
"alnum_prop": 0.5956989247311828,
"repo_name": "csparkresearch/ExpEYES17-Qt",
"id": "cb7f029ad2604cb29ab0710082f776b758cf3c44",
"size": "2387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SPARK17/textManual/MarkdownPP/Processor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19579"
},
{
"name": "HTML",
"bytes": "35913"
},
{
"name": "JavaScript",
"bytes": "667"
},
{
"name": "Makefile",
"bytes": "15356"
},
{
"name": "Python",
"bytes": "621843"
},
{
"name": "Ruby",
"bytes": "1448"
},
{
"name": "Shell",
"bytes": "822"
}
],
"symlink_target": ""
}
|
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import _read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import _read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
|
{
"content_hash": "29c71a6f2fadf5e1d57cdd295475a44c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 107,
"avg_line_length": 74.04166666666667,
"alnum_prop": 0.8300506471581317,
"repo_name": "mengxn/tensorflow",
"id": "456792835827f86c0fbc42822e688240e6643ed4",
"size": "2466",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/learn_io/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "177254"
},
{
"name": "C++",
"bytes": "22804170"
},
{
"name": "CMake",
"bytes": "140337"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "794578"
},
{
"name": "HTML",
"bytes": "593171"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "13906"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37240"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64166"
},
{
"name": "Protocol Buffer",
"bytes": "209604"
},
{
"name": "Python",
"bytes": "20006123"
},
{
"name": "Shell",
"bytes": "331908"
},
{
"name": "TypeScript",
"bytes": "789019"
}
],
"symlink_target": ""
}
|
from .data_viz import DataVisualizationNode, SplomPlotNode
from .cv_plot import CrossValidationVisualizationNode
__all__ = ["DataVisualizationNode",
"SplomPlotNode",
"CrossValidationVisualizationNode"]
|
{
"content_hash": "64a37595afa17eeefb0caa26ff78e42c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 58,
"avg_line_length": 37.5,
"alnum_prop": 0.7466666666666667,
"repo_name": "CurryBoy/ProtoML-Deprecated",
"id": "a2d074068423e6c6bcb4235fd1b83f73009fbb4d",
"size": "225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protoml/viz/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9383"
},
{
"name": "C++",
"bytes": "43265"
},
{
"name": "Matlab",
"bytes": "2905"
},
{
"name": "Python",
"bytes": "126308"
},
{
"name": "Shell",
"bytes": "1345"
}
],
"symlink_target": ""
}
|
import flask
from openahjo_activity_streams import convert
import requests
import logging
import json
OPENAHJO_URL = 'http://dev.hel.fi/paatokset/v1/agenda_item/?order_by=-last_modified_time'
def create_app(remote_url=OPENAHJO_URL, converter=convert.to_activity_stream):
logging.basicConfig(level=logging.INFO)
application = flask.Flask(__name__)
application.config['REMOTE_URL'] = remote_url
application.config['CONVERTER'] = converter
@application.route('/')
def show_something():
openahjo_data = requests.get(application.config['REMOTE_URL'])
converted_data = application.config['CONVERTER'](openahjo_data.json())
return application.response_class(json.dumps(converted_data), mimetype='application/activity+json')
return application
application = create_app()
if __name__ == '__main__':
application.run()
|
{
"content_hash": "e7b411368e9cb9f680da9b5e49baa3a4",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 107,
"avg_line_length": 33.46153846153846,
"alnum_prop": 0.7218390804597701,
"repo_name": "ThoughtWorksInc/HelsinkiActivityStream",
"id": "1e0b094ff56f9301f3986dcc6adbfbde06313b6c",
"size": "954",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openahjo_activity_streams/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35922"
},
{
"name": "Ruby",
"bytes": "671"
},
{
"name": "Shell",
"bytes": "1067"
}
],
"symlink_target": ""
}
|
"""Provides the Session class and related utilities."""
import weakref
from .. import util, sql, engine, exc as sa_exc, event
from ..sql import util as sql_util, expression
from . import (
SessionExtension, attributes, exc, query, util as orm_util,
loading, identity
)
from .util import (
object_mapper, class_mapper,
_class_to_mapper, _state_mapper, object_state,
_none_set
)
from .unitofwork import UOWTransaction
from .mapper import Mapper
from .events import SessionEvents
statelib = util.importlater("sqlalchemy.orm", "state")
import sys
__all__ = ['Session', 'SessionTransaction', 'SessionExtension', 'sessionmaker']
class _SessionClassMethods(object):
"""Class-level methods for :class:`.Session`, :class:`.sessionmaker`."""
@classmethod
def close_all(cls):
"""Close *all* sessions in memory."""
for sess in _sessions.values():
sess.close()
@classmethod
def identity_key(cls, *args, **kwargs):
"""Return an identity key.
This is an alias of :func:`.util.identity_key`.
"""
return orm_util.identity_key(*args, **kwargs)
@classmethod
def object_session(cls, instance):
"""Return the :class:`.Session` to which an object belongs.
This is an alias of :func:`.object_session`.
"""
return object_session(instance)
ACTIVE = util.symbol('ACTIVE')
PREPARED = util.symbol('PREPARED')
COMMITTED = util.symbol('COMMITTED')
DEACTIVE = util.symbol('DEACTIVE')
CLOSED = util.symbol('CLOSED')
class SessionTransaction(object):
"""A :class:`.Session`-level transaction.
:class:`.SessionTransaction` is a mostly behind-the-scenes object
not normally referenced directly by application code. It coordinates
among multiple :class:`.Connection` objects, maintaining a database
transaction for each one individually, committing or rolling them
back all at once. It also provides optional two-phase commit behavior
which can augment this coordination operation.
The :attr:`.Session.transaction` attribute of :class:`.Session`
refers to the current :class:`.SessionTransaction` object in use, if any.
A :class:`.SessionTransaction` is associated with a :class:`.Session`
in its default mode of ``autocommit=False`` immediately, associated
with no database connections. As the :class:`.Session` is called upon
to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection`
objects, a corresponding :class:`.Connection` and associated
:class:`.Transaction` is added to a collection within the
:class:`.SessionTransaction` object, becoming one of the
connection/transaction pairs maintained by the
:class:`.SessionTransaction`.
The lifespan of the :class:`.SessionTransaction` ends when the
:meth:`.Session.commit`, :meth:`.Session.rollback` or
:meth:`.Session.close` methods are called. At this point, the
:class:`.SessionTransaction` removes its association with its parent
:class:`.Session`. A :class:`.Session` that is in ``autocommit=False``
mode will create a new :class:`.SessionTransaction` to replace it
immediately, whereas a :class:`.Session` that's in ``autocommit=True``
mode will remain without a :class:`.SessionTransaction` until the
:meth:`.Session.begin` method is called.
Another detail of :class:`.SessionTransaction` behavior is that it is
capable of "nesting". This means that the :meth:`.Session.begin` method
can be called while an existing :class:`.SessionTransaction` is already
present, producing a new :class:`.SessionTransaction` that temporarily
replaces the parent :class:`.SessionTransaction`. When a
:class:`.SessionTransaction` is produced as nested, it assigns itself to
the :attr:`.Session.transaction` attribute. When it is ended via
:meth:`.Session.commit` or :meth:`.Session.rollback`, it restores its
parent :class:`.SessionTransaction` back onto the
:attr:`.Session.transaction` attribute. The behavior is effectively a
stack, where :attr:`.Session.transaction` refers to the current head of
the stack.
The purpose of this stack is to allow nesting of
:meth:`.Session.rollback` or :meth:`.Session.commit` calls in context
with various flavors of :meth:`.Session.begin`. This nesting behavior
applies to when :meth:`.Session.begin_nested` is used to emit a
SAVEPOINT transaction, and is also used to produce a so-called
"subtransaction" which allows a block of code to use a
begin/rollback/commit sequence regardless of whether or not its enclosing
code block has begun a transaction. The :meth:`.flush` method, whether
called explicitly or via autoflush, is the primary consumer of the
"subtransaction" feature, in that it wishes to guarantee that it works
within in a transaction block regardless of whether or not the
:class:`.Session` is in transactional mode when the method is called.
See also:
:meth:`.Session.rollback`
:meth:`.Session.commit`
:meth:`.Session.begin`
:meth:`.Session.begin_nested`
:attr:`.Session.is_active`
:meth:`.SessionEvents.after_commit`
:meth:`.SessionEvents.after_rollback`
:meth:`.SessionEvents.after_soft_rollback`
"""
_rollback_exception = None
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
self._parent = parent
self.nested = nested
self._state = ACTIVE
if not parent and nested:
raise sa_exc.InvalidRequestError(
"Can't start a SAVEPOINT transaction when no existing "
"transaction is in progress")
if self.session._enable_transaction_accounting:
self._take_snapshot()
if self.session.dispatch.after_transaction_create:
self.session.dispatch.after_transaction_create(self.session, self)
@property
def is_active(self):
return self.session is not None and self._state is ACTIVE
def _assert_active(self, prepared_ok=False,
rollback_ok=False,
closed_msg="This transaction is closed"):
if self._state is COMMITTED:
raise sa_exc.InvalidRequestError(
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is PREPARED:
if not prepared_ok:
raise sa_exc.InvalidRequestError(
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction."
)
elif self._state is DEACTIVE:
if not rollback_ok:
if self._rollback_exception:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"due to a previous exception during flush."
" To begin a new transaction with this Session, "
"first issue Session.rollback()."
" Original exception was: %s"
% self._rollback_exception
)
else:
raise sa_exc.InvalidRequestError(
"This Session's transaction has been rolled back "
"by a nested rollback() call. To begin a new "
"transaction, issue Session.rollback() first."
)
elif self._state is CLOSED:
raise sa_exc.ResourceClosedError(closed_msg)
@property
def _is_transaction_boundary(self):
return self.nested or not self._parent
def connection(self, bindkey, **kwargs):
self._assert_active()
bind = self.session.get_bind(bindkey, **kwargs)
return self._connection_for_bind(bind)
def _begin(self, nested=False):
self._assert_active()
return SessionTransaction(
self.session, self, nested=nested)
def _iterate_parents(self, upto=None):
if self._parent is upto:
return (self,)
else:
if self._parent is None:
raise sa_exc.InvalidRequestError(
"Transaction %s is not on the active transaction list" % (
upto))
return (self,) + self._parent._iterate_parents(upto)
def _take_snapshot(self):
if not self._is_transaction_boundary:
self._new = self._parent._new
self._deleted = self._parent._deleted
self._dirty = self._parent._dirty
self._key_switches = self._parent._key_switches
return
if not self.session._flushing:
self.session.flush()
self._new = weakref.WeakKeyDictionary()
self._deleted = weakref.WeakKeyDictionary()
self._dirty = weakref.WeakKeyDictionary()
self._key_switches = weakref.WeakKeyDictionary()
def _restore_snapshot(self, dirty_only=False):
assert self._is_transaction_boundary
for s in set(self._new).union(self.session._new):
self.session._expunge_state(s)
if s.key:
del s.key
for s, (oldkey, newkey) in self._key_switches.items():
self.session.identity_map.discard(s)
s.key = oldkey
self.session.identity_map.replace(s)
for s in set(self._deleted).union(self.session._deleted):
if s.deleted:
#assert s in self._deleted
del s.deleted
self.session._update_impl(s, discard_existing=True)
assert not self.session._deleted
for s in self.session.identity_map.all_states():
if not dirty_only or s.modified or s in self._dirty:
s._expire(s.dict, self.session.identity_map._modified)
def _remove_snapshot(self):
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
s._expire(s.dict, self.session.identity_map._modified)
for s in self._deleted:
s.session_id = None
self._deleted.clear()
def _connection_for_bind(self, bind):
self._assert_active()
if bind in self._connections:
return self._connections[bind][0]
if self._parent:
conn = self._parent._connection_for_bind(bind)
if not self.nested:
return conn
else:
if isinstance(bind, engine.Connection):
conn = bind
if conn.engine in self._connections:
raise sa_exc.InvalidRequestError(
"Session already has a Connection associated for the "
"given Connection's Engine")
else:
conn = bind.contextual_connect()
if self.session.twophase and self._parent is None:
transaction = conn.begin_twophase()
elif self.nested:
transaction = conn.begin_nested()
else:
transaction = conn.begin()
self._connections[conn] = self._connections[conn.engine] = \
(conn, transaction, conn is not bind)
self.session.dispatch.after_begin(self.session, self, conn)
return conn
def prepare(self):
if self._parent is not None or not self.session.twophase:
raise sa_exc.InvalidRequestError(
"'twophase' mode not enabled, or not root transaction; "
"can't prepare.")
self._prepare_impl()
def _prepare_impl(self):
self._assert_active()
if self._parent is None or self.nested:
self.session.dispatch.before_commit(self.session)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.commit()
if not self.session._flushing:
for _flush_guard in xrange(100):
if self.session._is_clean():
break
self.session.flush()
else:
raise exc.FlushError(
"Over 100 subsequent flushes have occurred within "
"session.commit() - is an after_flush() hook "
"creating new objects?")
if self._parent is None and self.session.twophase:
try:
for t in set(self._connections.values()):
t[1].prepare()
except:
self.rollback()
raise
self._state = PREPARED
def commit(self):
self._assert_active(prepared_ok=True)
if self._state is not PREPARED:
self._prepare_impl()
if self._parent is None or self.nested:
for t in set(self._connections.values()):
t[1].commit()
self._state = COMMITTED
self.session.dispatch.after_commit(self.session)
if self.session._enable_transaction_accounting:
self._remove_snapshot()
self.close()
return self._parent
def rollback(self, _capture_exception=False):
self._assert_active(prepared_ok=True, rollback_ok=True)
stx = self.session.transaction
if stx is not self:
for subtransaction in stx._iterate_parents(upto=self):
subtransaction.close()
if self._state in (ACTIVE, PREPARED):
for transaction in self._iterate_parents():
if transaction._parent is None or transaction.nested:
transaction._rollback_impl()
transaction._state = DEACTIVE
break
else:
transaction._state = DEACTIVE
sess = self.session
if self.session._enable_transaction_accounting and \
not sess._is_clean():
# if items were added, deleted, or mutated
# here, we need to re-restore the snapshot
util.warn(
"Session's state has been changed on "
"a non-active transaction - this state "
"will be discarded.")
self._restore_snapshot(dirty_only=self.nested)
self.close()
if self._parent and _capture_exception:
self._parent._rollback_exception = sys.exc_info()[1]
sess.dispatch.after_soft_rollback(sess, self)
return self._parent
def _rollback_impl(self):
for t in set(self._connections.values()):
t[1].rollback()
if self.session._enable_transaction_accounting:
self._restore_snapshot(dirty_only=self.nested)
self.session.dispatch.after_rollback(self.session)
def close(self):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in \
set(self._connections.values()):
if autoclose:
connection.close()
else:
transaction.close()
self._state = CLOSED
if self.session.dispatch.after_transaction_end:
self.session.dispatch.after_transaction_end(self.session, self)
if self._parent is None:
if not self.session.autocommit:
self.session.begin()
self.session = None
self._connections = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._assert_active(prepared_ok=True)
if self.session.transaction is None:
return
if type is None:
try:
self.commit()
except:
self.rollback()
raise
else:
self.rollback()
class Session(_SessionClassMethods):
"""Manages persistence operations for ORM-mapped objects.
The Session's usage paradigm is described at :doc:`/orm/session`.
"""
public_methods = (
'__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
'close', 'commit', 'connection', 'delete', 'execute', 'expire',
'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
'is_modified',
'merge', 'query', 'refresh', 'rollback',
'scalar')
def __init__(self, bind=None, autoflush=True, expire_on_commit=True,
_enable_transaction_accounting=True,
autocommit=False, twophase=False,
weak_identity_map=True, binds=None, extension=None,
query_cls=query.Query):
"""Construct a new Session.
See also the :class:`.sessionmaker` function which is used to
generate a :class:`.Session`-producing callable with a given
set of arguments.
:param autocommit:
.. warning::
The autocommit flag is **not for general use**, and if it is used,
queries should only be invoked within the span of a
:meth:`.Session.begin` / :meth:`.Session.commit` pair. Executing
queries outside of a demarcated transaction is a legacy mode
of usage, and can in some cases lead to concurrent connection
checkouts.
Defaults to ``False``. When ``True``, the
:class:`.Session` does not keep a persistent transaction running, and
will acquire connections from the engine on an as-needed basis,
returning them immediately after their use. Flushes will begin and
commit (or possibly rollback) their own transaction if no
transaction is present. When using this mode, the
:meth:`.Session.begin` method is used to explicitly start
transactions.
.. seealso::
:ref:`session_autocommit`
:param autoflush: When ``True``, all query operations will issue a
``flush()`` call to this ``Session`` before proceeding. This is a
convenience feature so that ``flush()`` need not be called
repeatedly in order for database queries to retrieve results. It's
typical that ``autoflush`` is used in conjunction with
``autocommit=False``. In this scenario, explicit calls to
``flush()`` are rarely needed; you usually only need to call
``commit()`` (which flushes) to finalize changes.
:param bind: An optional ``Engine`` or ``Connection`` to which this
``Session`` should be bound. When specified, all SQL operations
performed by this session will execute via this connectable.
:param binds: An optional dictionary which contains more granular
"bind" information than the ``bind`` parameter provides. This
dictionary can map individual ``Table`` instances as well as
``Mapper`` instances to individual ``Engine`` or ``Connection``
objects. Operations which proceed relative to a particular
``Mapper`` will consult this dictionary for the direct ``Mapper``
instance as well as the mapper's ``mapped_table`` attribute in
order to locate an connectable to use. The full resolution is
described in the ``get_bind()`` method of ``Session``.
Usage looks like::
Session = sessionmaker(binds={
SomeMappedClass: create_engine('postgresql://engine1'),
somemapper: create_engine('postgresql://engine2'),
some_table: create_engine('postgresql://engine3'),
})
Also see the :meth:`.Session.bind_mapper`
and :meth:`.Session.bind_table` methods.
:param \class_: Specify an alternate class other than
``sqlalchemy.orm.session.Session`` which should be used by the
returned class. This is the only argument that is local to the
``sessionmaker()`` function, and is not sent directly to the
constructor for ``Session``.
:param _enable_transaction_accounting: Defaults to ``True``. A
legacy-only flag which when ``False`` disables *all* 0.5-style
object accounting on transaction boundaries, including auto-expiry
of instances on rollback and commit, maintenance of the "new" and
"deleted" lists upon rollback, and autoflush of pending changes upon
begin(), all of which are interdependent.
:param expire_on_commit: Defaults to ``True``. When ``True``, all
instances will be fully expired after each ``commit()``, so that
all attribute/object access subsequent to a completed transaction
will load from the most recent database state.
:param extension: An optional
:class:`~.SessionExtension` instance, or a list
of such instances, which will receive pre- and post- commit and
flush events, as well as a post-rollback event. **Deprecated.**
Please see :class:`.SessionEvents`.
:param query_cls: Class which should be used to create new Query
objects, as returned by the ``query()`` method. Defaults to
:class:`~sqlalchemy.orm.query.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
of the database in use along with an XID. During a ``commit()``,
after ``flush()`` has been issued for all attached databases, the
``prepare()`` method on each database's ``TwoPhaseTransaction``
will be called. This allows each database to roll back the entire
transaction, before each transaction is committed.
:param weak_identity_map: Defaults to ``True`` - when set to
``False``, objects placed in the :class:`.Session` will be
strongly referenced until explicitly removed or the
:class:`.Session` is closed. **Deprecated** - this option
is obsolete.
"""
if weak_identity_map:
self._identity_cls = identity.WeakInstanceDict
else:
util.warn_deprecated("weak_identity_map=False is deprecated. "
"This feature is not needed.")
self._identity_cls = identity.StrongInstanceDict
self.identity_map = self._identity_cls()
self._new = {} # InstanceState->object, strong refs object
self._deleted = {} # same
self.bind = bind
self.__binds = {}
self._flushing = False
self._warn_on_events = False
self.transaction = None
self.hash_key = _new_sessionid()
self.autoflush = autoflush
self.autocommit = autocommit
self.expire_on_commit = expire_on_commit
self._enable_transaction_accounting = _enable_transaction_accounting
self.twophase = twophase
self._query_cls = query_cls
if extension:
for ext in util.to_list(extension):
SessionExtension._adapt_listener(self, ext)
if binds is not None:
for mapperortable, bind in binds.iteritems():
if isinstance(mapperortable, (type, Mapper)):
self.bind_mapper(mapperortable, bind)
else:
self.bind_table(mapperortable, bind)
if not self.autocommit:
self.begin()
_sessions[self.hash_key] = self
dispatch = event.dispatcher(SessionEvents)
connection_callable = None
transaction = None
"""The current active or inactive :class:`.SessionTransaction`."""
def begin(self, subtransactions=False, nested=False):
"""Begin a transaction on this Session.
If this Session is already within a transaction, either a plain
transaction or nested transaction, an error is raised, unless
``subtransactions=True`` or ``nested=True`` is specified.
The ``subtransactions=True`` flag indicates that this
:meth:`~.Session.begin` can create a subtransaction if a transaction
is already in progress. For documentation on subtransactions, please
see :ref:`session_subtransactions`.
The ``nested`` flag begins a SAVEPOINT transaction and is equivalent
to calling :meth:`~.Session.begin_nested`. For documentation on
SAVEPOINT transactions, please see :ref:`session_begin_nested`.
"""
if self.transaction is not None:
if subtransactions or nested:
self.transaction = self.transaction._begin(
nested=nested)
else:
raise sa_exc.InvalidRequestError(
"A transaction is already begun. Use "
"subtransactions=True to allow subtransactions.")
else:
self.transaction = SessionTransaction(
self, nested=nested)
return self.transaction # needed for __enter__/__exit__ hook
def begin_nested(self):
"""Begin a `nested` transaction on this Session.
The target database(s) must support SQL SAVEPOINTs or a
SQLAlchemy-supported vendor implementation of the idea.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
"""
return self.begin(nested=True)
def rollback(self):
"""Rollback the current transaction in progress.
If no transaction is in progress, this method is a pass-through.
This method rolls back the current transaction or nested transaction
regardless of subtransactions being in effect. All subtransactions up
to the first real transaction are closed. Subtransactions occur when
begin() is called multiple times.
.. seealso::
:ref:`session_rollback`
"""
if self.transaction is None:
pass
else:
self.transaction.rollback()
def commit(self):
"""Flush pending changes and commit the current transaction.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
By default, the :class:`.Session` also expires all database
loaded state on all ORM-managed attributes after transaction commit.
This so that subsequent operations load the most recent
data from the database. This behavior can be disabled using
the ``expire_on_commit=False`` option to :class:`.sessionmaker` or
the :class:`.Session` constructor.
If a subtransaction is in effect (which occurs when begin() is called
multiple times), the subtransaction will be closed, and the next call
to ``commit()`` will operate on the enclosing transaction.
When using the :class:`.Session` in its default mode of
``autocommit=False``, a new transaction will
be begun immediately after the commit, but note that the newly begun
transaction does *not* use any connection resources until the first
SQL is actually emitted.
.. seealso::
:ref:`session_committing`
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.commit()
def prepare(self):
"""Prepare the current transaction in progress for two phase commit.
If no transaction is in progress, this method raises an
:exc:`~sqlalchemy.exc.InvalidRequestError`.
Only root transactions of two phase sessions can be prepared. If the
current transaction is not such, an
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if self.transaction is None:
if not self.autocommit:
self.begin()
else:
raise sa_exc.InvalidRequestError("No transaction is begun.")
self.transaction.prepare()
def connection(self, mapper=None, clause=None,
bind=None,
close_with_result=False,
**kw):
"""Return a :class:`.Connection` object corresponding to this
:class:`.Session` object's transactional state.
If this :class:`.Session` is configured with ``autocommit=False``,
either the :class:`.Connection` corresponding to the current
transaction is returned, or if no transaction is in progress, a new
one is begun and the :class:`.Connection` returned (note that no
transactional state is established with the DBAPI until the first
SQL statement is emitted).
Alternatively, if this :class:`.Session` is configured with
``autocommit=True``, an ad-hoc :class:`.Connection` is returned
using :meth:`.Engine.contextual_connect` on the underlying
:class:`.Engine`.
Ambiguity in multi-bind or unbound :class:`.Session` objects can be
resolved through any of the optional keyword arguments. This
ultimately makes usage of the :meth:`.get_bind` method for resolution.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes precedence
over ``mapper``, ``clause``.
:param mapper:
Optional :func:`.mapper` mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause``.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.) which will be used to locate a bind, if a bind
cannot otherwise be identified.
:param close_with_result: Passed to :meth:`Engine.connect`, indicating
the :class:`.Connection` should be considered "single use",
automatically closing when the first result set is closed. This
flag only has an effect if this :class:`.Session` is configured with
``autocommit=True`` and does not already have a transaction
in progress.
:param \**kw:
Additional keyword arguments are sent to :meth:`get_bind()`,
allowing additional arguments to be passed to custom
implementations of :meth:`get_bind`.
"""
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind,
close_with_result=close_with_result)
def _connection_for_bind(self, engine, **kwargs):
if self.transaction is not None:
return self.transaction._connection_for_bind(engine)
else:
return engine.contextual_connect(**kwargs)
def execute(self, clause, params=None, mapper=None, bind=None, **kw):
"""Execute a SQL expression construct or string statement within
the current transaction.
Returns a :class:`.ResultProxy` representing
results of the statement execution, in the same manner as that of an
:class:`.Engine` or
:class:`.Connection`.
E.g.::
result = session.execute(
user_table.select().where(user_table.c.id == 5)
)
:meth:`~.Session.execute` accepts any executable clause construct, such
as :func:`~.sql.expression.select`,
:func:`~.sql.expression.insert`,
:func:`~.sql.expression.update`,
:func:`~.sql.expression.delete`, and
:func:`~.sql.expression.text`. Plain SQL strings can be passed
as well, which in the case of :meth:`.Session.execute` only
will be interpreted the same as if it were passed via a
:func:`~.expression.text` construct. That is, the following usage::
result = session.execute(
"SELECT * FROM user WHERE id=:param",
{"param":5}
)
is equivalent to::
from sqlalchemy import text
result = session.execute(
text("SELECT * FROM user WHERE id=:param"),
{"param":5}
)
The second positional argument to :meth:`.Session.execute` is an
optional parameter set. Similar to that of
:meth:`.Connection.execute`, whether this is passed as a single
dictionary, or a list of dictionaries, determines whether the DBAPI
cursor's ``execute()`` or ``executemany()`` is used to execute the
statement. An INSERT construct may be invoked for a single row::
result = session.execute(users.insert(), {"id": 7, "name": "somename"})
or for multiple rows::
result = session.execute(users.insert(), [
{"id": 7, "name": "somename7"},
{"id": 8, "name": "somename8"},
{"id": 9, "name": "somename9"}
])
The statement is executed within the current transactional context of
this :class:`.Session`. The :class:`.Connection` which is used
to execute the statement can also be acquired directly by
calling the :meth:`.Session.connection` method. Both methods use
a rule-based resolution scheme in order to determine the
:class:`.Connection`, which in the average case is derived directly
from the "bind" of the :class:`.Session` itself, and in other cases
can be based on the :func:`.mapper`
and :class:`.Table` objects passed to the method; see the documentation
for :meth:`.Session.get_bind` for a full description of this scheme.
The :meth:`.Session.execute` method does *not* invoke autoflush.
The :class:`.ResultProxy` returned by the :meth:`.Session.execute`
method is returned with the "close_with_result" flag set to true;
the significance of this flag is that if this :class:`.Session` is
autocommitting and does not have a transaction-dedicated
:class:`.Connection` available, a temporary :class:`.Connection` is
established for the statement execution, which is closed (meaning,
returned to the connection pool) when the :class:`.ResultProxy` has
consumed all available data. This applies *only* when the
:class:`.Session` is configured with autocommit=True and no
transaction has been started.
:param clause:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`.expression.select`) or string SQL statement
to be executed.
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param mapper:
Optional :func:`.mapper` or mapped class, used to identify
the appropriate bind. This argument takes precedence over
``clause`` when locating a bind. See :meth:`.Session.get_bind`
for more details.
:param bind:
Optional :class:`.Engine` to be used as the bind. If
this engine is already involved in an ongoing transaction,
that connection will be used. This argument takes
precedence over ``mapper`` and ``clause`` when locating
a bind.
:param \**kw:
Additional keyword arguments are sent to :meth:`.Session.get_bind()`
to allow extensibility of "bind" schemes.
.. seealso::
:ref:`sqlexpression_toplevel` - Tutorial on using Core SQL
constructs.
:ref:`connections_toplevel` - Further information on direct
statement execution.
:meth:`.Connection.execute` - core level statement execution
method, which is :meth:`.Session.execute` ultimately uses
in order to execute the statement.
"""
clause = expression._literal_as_text(clause)
if bind is None:
bind = self.get_bind(mapper, clause=clause, **kw)
return self._connection_for_bind(bind, close_with_result=True).execute(
clause, params or {})
def scalar(self, clause, params=None, mapper=None, bind=None, **kw):
"""Like :meth:`~.Session.execute` but return a scalar result."""
return self.execute(
clause, params=params, mapper=mapper, bind=bind, **kw).scalar()
def close(self):
"""Close this Session.
This clears all items and ends any transaction in progress.
If this session were created with ``autocommit=False``, a new
transaction is immediately begun. Note that this new transaction does
not use any connection resources until they are first needed.
"""
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_parents():
transaction.close()
def expunge_all(self):
"""Remove all object instances from this ``Session``.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
"""
for state in self.identity_map.all_states() + list(self._new):
state._detach()
self.identity_map = self._identity_cls()
self._new = {}
self._deleted = {}
# TODO: need much more test coverage for bind_mapper() and similar !
# TODO: + crystalize + document resolution order
# vis. bind_mapper/bind_table
def bind_mapper(self, mapper, bind):
"""Bind operations for a mapper to a Connectable.
mapper
A mapper instance or mapped class
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this mapper will use the given
`bind`.
"""
if isinstance(mapper, type):
mapper = class_mapper(mapper)
self.__binds[mapper.base_mapper] = bind
for t in mapper._all_tables:
self.__binds[t] = bind
def bind_table(self, table, bind):
"""Bind operations on a Table to a Connectable.
table
A ``Table`` instance
bind
Any Connectable: a ``Engine`` or ``Connection``.
All subsequent operations involving this ``Table`` will use the
given `bind`.
"""
self.__binds[table] = bind
def get_bind(self, mapper=None, clause=None):
"""Return a "bind" to which this :class:`.Session` is bound.
The "bind" is usually an instance of :class:`.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and session.binds is present,
locate a bind based on mapper.
2. if clause given and session.binds is present,
locate a bind based on :class:`.Table` objects
found in the given clause present in session.binds.
3. if session.bind is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`.MetaData` ultimately
associated with the :class:`.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
is raised.
:param mapper:
Optional :func:`.mapper` mapped class or instance of
:class:`.Mapper`. The bind can be derived from a :class:`.Mapper`
first by consulting the "binds" map associated with this
:class:`.Session`, and secondly by consulting the :class:`.MetaData`
associated with the :class:`.Table` to which the :class:`.Mapper`
is mapped for a bind.
:param clause:
A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
:func:`~.sql.expression.text`,
etc.). If the ``mapper`` argument is not present or could not
produce a bind, the given expression construct will be searched
for a bound element, typically a :class:`.Table` associated with
bound :class:`.MetaData`.
"""
if mapper is clause is None:
if self.bind:
return self.bind
else:
raise sa_exc.UnboundExecutionError(
"This session is not bound to a single Engine or "
"Connection, and no context was provided to locate "
"a binding.")
c_mapper = mapper is not None and _class_to_mapper(mapper) or None
# manually bound?
if self.__binds:
if c_mapper:
if c_mapper.base_mapper in self.__binds:
return self.__binds[c_mapper.base_mapper]
elif c_mapper.mapped_table in self.__binds:
return self.__binds[c_mapper.mapped_table]
if clause is not None:
for t in sql_util.find_tables(clause, include_crud=True):
if t in self.__binds:
return self.__binds[t]
if self.bind:
return self.bind
if isinstance(clause, sql.expression.ClauseElement) and clause.bind:
return clause.bind
if c_mapper and c_mapper.mapped_table.bind:
return c_mapper.mapped_table.bind
context = []
if mapper is not None:
context.append('mapper %s' % c_mapper)
if clause is not None:
context.append('SQL expression')
raise sa_exc.UnboundExecutionError(
"Could not locate a bind configured on %s or this Session" % (
', '.join(context)))
def query(self, *entities, **kwargs):
"""Return a new ``Query`` object corresponding to this ``Session``."""
return self._query_cls(entities, self, **kwargs)
@property
@util.contextmanager
def no_autoflush(self):
"""Return a context manager that disables autoflush.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
.. versionadded:: 0.7.6
"""
autoflush = self.autoflush
self.autoflush = False
yield self
self.autoflush = autoflush
def _autoflush(self):
if self.autoflush and not self._flushing:
self.flush()
def refresh(self, instance, attribute_names=None, lockmode=None):
"""Expire and refresh the attributes on the given instance.
A query will be issued to the database and all attributes will be
refreshed with their current database value.
Lazy-loaded relational attributes will remain lazily loaded, so that
the instance-wide refresh operation will be followed immediately by
the lazy load of that attribute.
Eagerly-loaded relational attributes will eagerly load within the
single refresh operation.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction - usage of
:meth:`~Session.refresh` usually only makes sense if non-ORM SQL
statement were emitted in the ongoing transaction, or if autocommit
mode is turned on.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
if loading.load_on_ident(
self.query(object_mapper(instance)),
state.key, refresh_state=state,
lockmode=lockmode,
only_load_props=attribute_names) is None:
raise sa_exc.InvalidRequestError(
"Could not refresh instance '%s'" %
orm_util.instance_str(instance))
def expire_all(self):
"""Expires all persistent instances within this Session.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` should not be needed when
autocommit is ``False``, assuming the transaction is isolated.
"""
for state in self.identity_map.all_states():
state._expire(state.dict, self.identity_map._modified)
def expire(self, instance, attribute_names=None):
"""Expire the attributes on an instance.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._expire_state(state, attribute_names)
def _expire_state(self, state, attribute_names):
self._validate_persistent(state)
if attribute_names:
state._expire_attributes(state.dict, attribute_names)
else:
# pre-fetch the full cascade since the expire is going to
# remove associations
cascaded = list(state.manager.mapper.cascade_iterator(
'refresh-expire', state))
self._conditional_expire(state)
for o, m, st_, dct_ in cascaded:
self._conditional_expire(st_)
def _conditional_expire(self, state):
"""Expire a state if persistent, else expunge if pending"""
if state.key:
state._expire(state.dict, self.identity_map._modified)
elif state in self._new:
self._new.pop(state)
state._detach()
@util.deprecated("0.7", "The non-weak-referencing identity map "
"feature is no longer needed.")
def prune(self):
"""Remove unreferenced instances cached in the identity map.
Note that this method is only meaningful if "weak_identity_map" is set
to False. The default weak identity map is self-pruning.
Removes any object in this Session's identity map that is not
referenced in user code, modified, new or scheduled for deletion.
Returns the number of objects pruned.
"""
return self.identity_map.prune()
def expunge(self, instance):
"""Remove the `instance` from this ``Session``.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.session_id is not self.hash_key:
raise sa_exc.InvalidRequestError(
"Instance %s is not present in this Session" %
orm_util.state_str(state))
cascaded = list(state.manager.mapper.cascade_iterator(
'expunge', state))
self._expunge_state(state)
for o, m, st_, dct_ in cascaded:
self._expunge_state(st_)
def _expunge_state(self, state):
if state in self._new:
self._new.pop(state)
state._detach()
elif self.identity_map.contains_state(state):
self.identity_map.discard(state)
self._deleted.pop(state, None)
state._detach()
elif self.transaction:
self.transaction._deleted.pop(state, None)
def _register_newly_persistent(self, states):
for state in states:
mapper = _state_mapper(state)
# prevent against last minute dereferences of the object
obj = state.obj()
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
if _none_set.issubset(instance_key[1]) and \
not mapper.allow_partial_pks or \
_none_set.issuperset(instance_key[1]):
raise exc.FlushError(
"Instance %s has a NULL identity key. If this is an "
"auto-generated value, check that the database table "
"allows generation of new primary key values, and "
"that the mapped Column object is configured to "
"expect these generated values. Ensure also that "
"this flush() is not occurring at an inappropriate "
"time, such aswithin a load() event."
% orm_util.state_str(state)
)
if state.key is None:
state.key = instance_key
elif state.key != instance_key:
# primary key switch. use discard() in case another
# state has already replaced this one in the identity
# map (see test/orm/test_naturalpks.py ReversePKsTest)
self.identity_map.discard(state)
if state in self.transaction._key_switches:
orig_key = self.transaction._key_switches[state][0]
else:
orig_key = state.key
self.transaction._key_switches[state] = (
orig_key, instance_key)
state.key = instance_key
self.identity_map.replace(state)
statelib.InstanceState._commit_all_states(
((state, state.dict) for state in states),
self.identity_map
)
self._register_altered(states)
# remove from new last, might be the last strong ref
for state in set(states).intersection(self._new):
self._new.pop(state)
def _register_altered(self, states):
if self._enable_transaction_accounting and self.transaction:
for state in states:
if state in self._new:
self.transaction._new[state] = True
else:
self.transaction._dirty[state] = True
def _remove_newly_deleted(self, states):
for state in states:
if self._enable_transaction_accounting and self.transaction:
self.transaction._deleted[state] = True
self.identity_map.discard(state)
self._deleted.pop(state, None)
state.deleted = True
def add(self, instance, _warn=True):
"""Place an object in the ``Session``.
Its state will be persisted to the database on the next flush
operation.
Repeated calls to ``add()`` will be ignored. The opposite of ``add()``
is ``expunge()``.
"""
if _warn and self._warn_on_events:
self._flush_warning("Session.add()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
self._save_or_update_state(state)
def add_all(self, instances):
"""Add the given collection of instances to this ``Session``."""
if self._warn_on_events:
self._flush_warning("Session.add_all()")
for instance in instances:
self.add(instance, _warn=False)
def _save_or_update_state(self, state):
self._save_or_update_impl(state)
mapper = _state_mapper(state)
for o, m, st_, dct_ in mapper.cascade_iterator(
'save-update',
state,
halt_on=self._contains_state):
self._save_or_update_impl(st_)
def delete(self, instance):
"""Mark an instance as deleted.
The database delete operation occurs upon ``flush()``.
"""
if self._warn_on_events:
self._flush_warning("Session.delete()")
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
orm_util.state_str(state))
if state in self._deleted:
return
# ensure object is attached to allow the
# cascade operation to load deferred attributes
# and collections
self._attach(state, include_before=True)
# grab the cascades before adding the item to the deleted list
# so that autoflush does not delete the item
# the strong reference to the instance itself is significant here
cascade_states = list(state.manager.mapper.cascade_iterator(
'delete', state))
self._deleted[state] = state.obj()
self.identity_map.add(state)
for o, m, st_, dct_ in cascade_states:
self._delete_impl(st_)
def merge(self, instance, load=True):
"""Copy the state of a given instance into a corresponding instance
within this :class:`.Session`.
:meth:`.Session.merge` examines the primary key attributes of the
source instance, and attempts to reconcile it with an instance of the
same primary key in the session. If not found locally, it attempts
to load the object from the database based on primary key, and if
none can be located, creates a new instance. The state of each
attribute on the source instance is then copied to the target instance.
The resulting target instance is then returned by the method; the
original source instance is left unmodified, and un-associated with the
:class:`.Session` if not already.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
events as well as all database access. This flag is used for
cases such as transferring graphs of objects into a :class:`.Session`
from a second level cache, or to transfer just-loaded objects
into the :class:`.Session` owned by a worker thread or process
without re-querying the database.
The ``load=False`` use case adds the caveat that the given
object has to be in a "clean" state, that is, has no pending changes
to be flushed - even if the incoming object is detached from any
:class:`.Session`. This is so that when
the merge operation populates local attributes and
cascades to related objects and
collections, the values can be "stamped" onto the
target object as is, without generating any history or attribute
events, and without the need to reconcile the incoming data with
any existing related objects or collections that might not
be loaded. The resulting objects from ``load=False`` are always
produced as "clean", so it is only appropriate that the given objects
should be "clean" as well, else this suggests a mis-use of the method.
"""
if self._warn_on_events:
self._flush_warning("Session.merge()")
_recursive = {}
if load:
# flush current contents if we expect to load data
self._autoflush()
object_mapper(instance) # verify mapped
autoflush = self.autoflush
try:
self.autoflush = False
return self._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive=_recursive)
finally:
self.autoflush = autoflush
def _merge(self, state, state_dict, load=True, _recursive=None):
mapper = _state_mapper(state)
if state in _recursive:
return _recursive[state]
new_instance = False
key = state.key
if key is None:
if not load:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects transient (i.e. unpersisted) objects. flush() "
"all changes on mapped instances before merging with "
"load=False.")
key = mapper._identity_key_from_state(state)
if key in self.identity_map:
merged = self.identity_map[key]
elif not load:
if state.modified:
raise sa_exc.InvalidRequestError(
"merge() with load=False option does not support "
"objects marked as 'dirty'. flush() all changes on "
"mapped instances before merging with load=False.")
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_state.key = key
self._update_impl(merged_state)
new_instance = True
elif not _none_set.issubset(key[1]) or \
(mapper.allow_partial_pks and
not _none_set.issuperset(key[1])):
merged = self.query(mapper.class_).get(key[1])
else:
merged = None
if merged is None:
merged = mapper.class_manager.new_instance()
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
new_instance = True
self._save_or_update_state(merged_state)
else:
merged_state = attributes.instance_state(merged)
merged_dict = attributes.instance_dict(merged)
_recursive[state] = merged
# check that we didn't just pull the exact same
# state out.
if state is not merged_state:
# version check if applicable
if mapper.version_id_col is not None:
existing_version = mapper._get_state_attr_by_column(
state,
state_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
merged_version = mapper._get_state_attr_by_column(
merged_state,
merged_dict,
mapper.version_id_col,
passive=attributes.PASSIVE_NO_INITIALIZE)
if existing_version is not attributes.PASSIVE_NO_RESULT and \
merged_version is not attributes.PASSIVE_NO_RESULT and \
existing_version != merged_version:
raise exc.StaleDataError(
"Version id '%s' on merged state %s "
"does not match existing version '%s'. "
"Leave the version attribute unset when "
"merging to update the most recent version."
% (
existing_version,
orm_util.state_str(merged_state),
merged_version
))
merged_state.load_path = state.load_path
merged_state.load_options = state.load_options
for prop in mapper.iterate_properties:
prop.merge(self, state, state_dict,
merged_state, merged_dict,
load, _recursive)
if not load:
# remove any history
merged_state._commit_all(merged_dict, self.identity_map)
if new_instance:
merged_state.manager.dispatch.load(merged_state, None)
return merged
def _validate_persistent(self, state):
if not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persistent within this Session" %
orm_util.state_str(state))
def _save_impl(self, state):
if state.key is not None:
raise sa_exc.InvalidRequestError(
"Object '%s' already has an identity - it can't be registered "
"as pending" % orm_util.state_str(state))
self._before_attach(state)
if state not in self._new:
self._new[state] = state.obj()
state.insert_order = len(self._new)
self._attach(state)
def _update_impl(self, state, discard_existing=False):
if (self.identity_map.contains_state(state) and
state not in self._deleted):
return
if state.key is None:
raise sa_exc.InvalidRequestError(
"Instance '%s' is not persisted" %
orm_util.state_str(state))
if state.deleted:
raise sa_exc.InvalidRequestError(
"Instance '%s' has been deleted. Use the make_transient() "
"function to send this object back to the transient state." %
orm_util.state_str(state)
)
self._before_attach(state)
self._deleted.pop(state, None)
if discard_existing:
self.identity_map.replace(state)
else:
self.identity_map.add(state)
self._attach(state)
def _save_or_update_impl(self, state):
if state.key is None:
self._save_impl(state)
else:
self._update_impl(state)
def _delete_impl(self, state):
if state in self._deleted:
return
if state.key is None:
return
self._attach(state, include_before=True)
self._deleted[state] = state.obj()
self.identity_map.add(state)
def enable_relationship_loading(self, obj):
"""Associate an object with this :class:`.Session` for related
object loading.
.. warning::
:meth:`.enable_relationship_loading` exists to serve special
use cases and is not recommended for general use.
Accesses of attributes mapped with :func:`.relationship`
will attempt to load a value from the database using this
:class:`.Session` as the source of connectivity. The values
will be loaded based on foreign key values present on this
object - it follows that this functionality
generally only works for many-to-one-relationships.
The object will be attached to this session, but will
**not** participate in any persistence operations; its state
for almost all purposes will remain either "transient" or
"detached", except for the case of relationship loading.
Also note that backrefs will often not work as expected.
Altering a relationship-bound attribute on the target object
may not fire off a backref event, if the effective value
is what was already loaded from a foreign-key-holding value.
The :meth:`.Session.enable_relationship_loading` method supersedes
the ``load_on_pending`` flag on :func:`.relationship`. Unlike
that flag, :meth:`.Session.enable_relationship_loading` allows
an object to remain transient while still being able to load
related items.
To make a transient object associated with a :class:`.Session`
via :meth:`.Session.enable_relationship_loading` pending, add
it to the :class:`.Session` using :meth:`.Session.add` normally.
:meth:`.Session.enable_relationship_loading` does not improve
behavior when the ORM is used normally - object references should be
constructed at the object level, not at the foreign key level, so
that they are present in an ordinary way before flush()
proceeds. This method is not intended for general use.
.. versionadded:: 0.8
"""
state = attributes.instance_state(obj)
self._attach(state, include_before=True)
state._load_pending = True
def _before_attach(self, state):
if state.session_id != self.hash_key and \
self.dispatch.before_attach:
self.dispatch.before_attach(self, state.obj())
def _attach(self, state, include_before=False):
if state.key and \
state.key in self.identity_map and \
not self.identity_map.contains_state(state):
raise sa_exc.InvalidRequestError("Can't attach instance "
"%s; another instance with key %s is already "
"present in this session."
% (orm_util.state_str(state), state.key))
if state.session_id and \
state.session_id is not self.hash_key and \
state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')" % (orm_util.state_str(state),
state.session_id, self.hash_key))
if state.session_id != self.hash_key:
if include_before and \
self.dispatch.before_attach:
self.dispatch.before_attach(self, state.obj())
state.session_id = self.hash_key
if self.dispatch.after_attach:
self.dispatch.after_attach(self, state.obj())
def __contains__(self, instance):
"""Return True if the instance is associated with this session.
The instance may be pending or persistent within the Session for a
result of True.
"""
try:
state = attributes.instance_state(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
return self._contains_state(state)
def __iter__(self):
"""Iterate over all pending or persistent instances within this
Session.
"""
return iter(list(self._new.values()) + self.identity_map.values())
def _contains_state(self, state):
return state in self._new or self.identity_map.contains_state(state)
def flush(self, objects=None):
"""Flush all the object changes to the database.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
For ``autocommit`` Sessions with no active manual transaction, flush()
will create a transaction on the fly that surrounds the entire set of
operations int the flush.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
This feature is for an extremely narrow set of use cases where
particular objects may need to be operated upon before the
full flush() occurs. It is not intended for general use.
"""
if self._flushing:
raise sa_exc.InvalidRequestError("Session is already flushing")
if self._is_clean():
return
try:
self._flushing = True
self._flush(objects)
finally:
self._flushing = False
def _flush_warning(self, method):
util.warn(
"Usage of the '%s' operation is not currently supported "
"within the execution stage of the flush process. "
"Results may not be consistent. Consider using alternative "
"event listeners or connection-level operations instead."
% method)
def _is_clean(self):
return not self.identity_map.check_modified() and \
not self._deleted and \
not self._new
def _flush(self, objects=None):
dirty = self._dirty_states
if not dirty and not self._deleted and not self._new:
self.identity_map._modified.clear()
return
flush_context = UOWTransaction(self)
if self.dispatch.before_flush:
self.dispatch.before_flush(self, flush_context, objects)
# re-establish "dirty states" in case the listeners
# added
dirty = self._dirty_states
deleted = set(self._deleted)
new = set(self._new)
dirty = set(dirty).difference(deleted)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = set()
for o in objects:
try:
state = attributes.instance_state(o)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(o)
objset.add(state)
else:
objset = None
# store objects whose fate has been decided
processed = set()
# put all saves/updates into the flush context. detect top-level
# orphans and throw them into deleted.
if objset:
proc = new.union(dirty).intersection(objset).difference(deleted)
else:
proc = new.union(dirty).difference(deleted)
for state in proc:
is_orphan = (
_state_mapper(state)._is_orphan(state) and state.has_identity)
flush_context.register_object(state, isdelete=is_orphan)
processed.add(state)
# put all remaining deletes into the flush context.
if objset:
proc = deleted.intersection(objset).difference(processed)
else:
proc = deleted.difference(processed)
for state in proc:
flush_context.register_object(state, isdelete=True)
if not flush_context.has_work:
return
flush_context.transaction = transaction = self.begin(
subtransactions=True)
try:
self._warn_on_events = True
try:
flush_context.execute()
finally:
self._warn_on_events = False
self.dispatch.after_flush(self, flush_context)
flush_context.finalize_flush_changes()
if not objects and self.identity_map._modified:
len_ = len(self.identity_map._modified)
statelib.InstanceState._commit_all_states(
[(state, state.dict) for state in
self.identity_map._modified],
instance_dict=self.identity_map)
util.warn("Attribute history events accumulated on %d "
"previously clean instances "
"within inner-flush event handlers have been reset, "
"and will not result in database updates. "
"Consider using set_committed_value() within "
"inner-flush event handlers to avoid this warning."
% len_)
# useful assertions:
#if not objects:
# assert not self.identity_map._modified
#else:
# assert self.identity_map._modified == \
# self.identity_map._modified.difference(objects)
self.dispatch.after_flush_postexec(self, flush_context)
transaction.commit()
except:
transaction.rollback(_capture_exception=True)
raise
def is_modified(self, instance, include_collections=True,
passive=True):
"""Return ``True`` if the given instance has locally
modified attributes.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
.. versionchanged:: 0.8
When using SQLAlchemy 0.7 and earlier, the ``passive``
flag should **always** be explicitly set to ``True``,
else SQL loads/autoflushes may proceed which can affect
the modified state itself:
``session.is_modified(someobject, passive=True)``\ .
In 0.8 and above, the behavior is corrected and
this flag is ignored.
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may report
``False`` when tested with this method. This is because
the object may have received change events via attribute
mutation, thus placing it in :attr:`.Session.dirty`,
but ultimately the state is the same as that loaded from
the database, resulting in no net change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
:param passive:
.. versionchanged:: 0.8
Ignored for backwards compatibility.
When using SQLAlchemy 0.7 and earlier, this flag should always
be set to ``True``.
"""
state = object_state(instance)
if not state.modified:
return False
dict_ = state.dict
for attr in state.manager.attributes:
if \
(
not include_collections and
hasattr(attr.impl, 'get_collection')
) or not hasattr(attr.impl, 'get_history'):
continue
(added, unchanged, deleted) = \
attr.impl.get_history(state, dict_,
passive=attributes.NO_CHANGE)
if added or deleted:
return True
else:
return False
@property
def is_active(self):
"""True if this :class:`.Session` is in "transaction mode" and
is not in "partial rollback" state.
The :class:`.Session` in its default mode of ``autocommit=False``
is essentially always in "transaction mode", in that a
:class:`.SessionTransaction` is associated with it as soon as
it is instantiated. This :class:`.SessionTransaction` is immediately
replaced with a new one as soon as it is ended, due to a rollback,
commit, or close operation.
"Transaction mode" does *not* indicate whether
or not actual database connection resources are in use; the
:class:`.SessionTransaction` object coordinates among zero or more
actual database transactions, and starts out with none, accumulating
individual DBAPI connections as different data sources are used
within its scope. The best way to track when a particular
:class:`.Session` has actually begun to use DBAPI resources is to
implement a listener using the :meth:`.SessionEvents.after_begin`
method, which will deliver both the :class:`.Session` as well as the
target :class:`.Connection` to a user-defined event listener.
The "partial rollback" state refers to when an "inner" transaction,
typically used during a flush, encounters an error and emits a
rollback of the DBAPI connection. At this point, the
:class:`.Session` is in "partial rollback" and awaits for the user to
call :meth:`.Session.rollback`, in order to close out the
transaction stack. It is in this "partial rollback" period that the
:attr:`.is_active` flag returns False. After the call to
:meth:`.Session.rollback`, the :class:`.SessionTransaction` is replaced
with a new one and :attr:`.is_active` returns ``True`` again.
When a :class:`.Session` is used in ``autocommit=True`` mode, the
:class:`.SessionTransaction` is only instantiated within the scope
of a flush call, or when :meth:`.Session.begin` is called. So
:attr:`.is_active` will always be ``False`` outside of a flush or
:meth:`.Session.begin` block in this mode, and will be ``True``
within the :meth:`.Session.begin` block as long as it doesn't enter
"partial rollback" state.
From all the above, it follows that the only purpose to this flag is
for application frameworks that wish to detect is a "rollback" is
necessary within a generic error handling routine, for
:class:`.Session` objects that would otherwise be in
"partial rollback" mode. In a typical integration case, this is also
not necessary as it is standard practice to emit
:meth:`.Session.rollback` unconditionally within the outermost
exception catch.
To track the transactional state of a :class:`.Session` fully,
use event listeners, primarily the :meth:`.SessionEvents.after_begin`,
:meth:`.SessionEvents.after_commit`,
:meth:`.SessionEvents.after_rollback` and related events.
"""
return self.transaction and self.transaction.is_active
identity_map = None
"""A mapping of object identities to objects themselves.
Iterating through ``Session.identity_map.values()`` provides
access to the full set of persistent objects (i.e., those
that have row identity) currently in the session.
See also:
:func:`.identity_key` - operations involving identity keys.
"""
@property
def _dirty_states(self):
"""The set of all persistent states considered dirty.
This method returns all states that were modified including
those that were possibly deleted.
"""
return self.identity_map._dirty_states()
@property
def dirty(self):
"""The set of all persistent instances considered dirty.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
"""
return util.IdentitySet(
[state.obj()
for state in self._dirty_states
if state not in self._deleted])
@property
def deleted(self):
"The set of all instances marked as 'deleted' within this ``Session``"
return util.IdentitySet(self._deleted.values())
@property
def new(self):
"The set of all instances marked as 'new' within this ``Session``."
return util.IdentitySet(self._new.values())
class sessionmaker(_SessionClassMethods):
"""A configurable :class:`.Session` factory.
The :class:`.sessionmaker` factory generates new
:class:`.Session` objects when called, creating them given
the configurational arguments established here.
e.g.::
# global scope
Session = sessionmaker(autoflush=False)
# later, in a local scope, create and use a session:
sess = Session()
Any keyword arguments sent to the constructor itself will override the
"configured" keywords::
Session = sessionmaker()
# bind an individual session to a connection
sess = Session(bind=connection)
The class also includes a method :meth:`.configure`, which can
be used to specify additional keyword arguments to the factory, which
will take effect for subsequent :class:`.Session` objects generated.
This is usually used to associate one or more :class:`.Engine` objects
with an existing :class:`.sessionmaker` factory before it is first
used::
# application starts
Session = sessionmaker()
# ... later
engine = create_engine('sqlite:///foo.db')
Session.configure(bind=engine)
sess = Session()
.. seealso:
:ref:`session_getting` - introductory text on creating
sessions using :class:`.sessionmaker`.
"""
def __init__(self, bind=None, class_=Session, autoflush=True,
autocommit=False,
expire_on_commit=True, **kw):
"""Construct a new :class:`.sessionmaker`.
All arguments here except for ``class_`` correspond to arguments
accepted by :class:`.Session` directly. See the
:meth:`.Session.__init__` docstring for more details on parameters.
:param bind: a :class:`.Engine` or other :class:`.Connectable` with
which newly created :class:`.Session` objects will be associated.
:param class_: class to use in order to create new :class:`.Session`
objects. Defaults to :class:`.Session`.
:param autoflush: The autoflush setting to use with newly created
:class:`.Session` objects.
:param autocommit: The autocommit setting to use with newly created
:class:`.Session` objects.
:param expire_on_commit=True: the expire_on_commit setting to use
with newly created :class:`.Session` objects.
:param \**kw: all other keyword arguments are passed to the constructor
of newly created :class:`.Session` objects.
"""
kw['bind'] = bind
kw['autoflush'] = autoflush
kw['autocommit'] = autocommit
kw['expire_on_commit'] = expire_on_commit
self.kw = kw
# make our own subclass of the given class, so that
# events can be associated with it specifically.
self.class_ = type(class_.__name__, (class_,), {})
def __call__(self, **local_kw):
"""Produce a new :class:`.Session` object using the configuration
established in this :class:`.sessionmaker`.
In Python, the ``__call__`` method is invoked on an object when
it is "called" in the same way as a function::
Session = sessionmaker()
session = Session() # invokes sessionmaker.__call__()
"""
for k, v in self.kw.items():
local_kw.setdefault(k, v)
return self.class_(**local_kw)
def configure(self, **new_kw):
"""(Re)configure the arguments for this sessionmaker.
e.g.::
Session = sessionmaker()
Session.configure(bind=create_engine('sqlite://'))
"""
self.kw.update(new_kw)
def __repr__(self):
return "%s(class_=%r%s)" % (
self.__class__.__name__,
self.class_.__name__,
", ".join("%s=%r" % (k, v) for k, v in self.kw.items())
)
_sessions = weakref.WeakValueDictionary()
def make_transient(instance):
"""Make the given instance 'transient'.
This will remove its association with any
session and additionally will remove its "identity key",
such that it's as though the object were newly constructed,
except retaining its values. It also resets the
"deleted" flag on the state if this object
had been explicitly deleted by its session.
Attributes which were "expired" or deferred at the
instance level are reverted to undefined, and
will not trigger any loads.
"""
state = attributes.instance_state(instance)
s = _state_session(state)
if s:
s._expunge_state(state)
# remove expired state and
# deferred callables
state.callables.clear()
if state.key:
del state.key
if state.deleted:
del state.deleted
def object_session(instance):
"""Return the ``Session`` to which instance belongs.
If the instance is not a mapped instance, an error is raised.
"""
try:
return _state_session(attributes.instance_state(instance))
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
def _state_session(state):
if state.session_id:
try:
return _sessions[state.session_id]
except KeyError:
pass
return None
_new_sessionid = util.counter()
|
{
"content_hash": "b4749ed42c0ec5d5f0548296f0c37a70",
"timestamp": "",
"source": "github",
"line_count": 2305,
"max_line_length": 83,
"avg_line_length": 39.52234273318872,
"alnum_prop": 0.6031789591543266,
"repo_name": "femmerling/DirMaker",
"id": "71e617e365787dd552e7c0b98d1ca95475c01a51",
"size": "91332",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "box/lib/python2.7/site-packages/sqlalchemy/orm/session.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "JavaScript",
"bytes": "7702"
},
{
"name": "Python",
"bytes": "7333770"
},
{
"name": "Shell",
"bytes": "3518"
}
],
"symlink_target": ""
}
|
"""Query list of all contributors and reviewers in a release"""
import json
import re
import sys
import requests
from sh.contrib import git
if len(sys.argv) != 5:
print(f'Usage: {sys.argv[0]} [starting commit/tag] [ending commit/tag] [GitHub username] ' +
'[GitHub password]')
sys.exit(1)
from_commit = sys.argv[1]
to_commit = sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
contributors = set()
reviewers = set()
def paginate_request(url, callback):
r = requests.get(url, auth=(username, password))
assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}'
callback(json.loads(r.text))
while 'next' in r.links:
r = requests.get(r.links['next']['url'], auth=(username, password))
callback(json.loads(r.text))
for line in git.log(f'{from_commit}..{to_commit}', '--pretty=format:%s', '--reverse', '--first-parent'):
m = re.search('\(#([0-9]+)\)$', line.rstrip())
if m:
pr_id = m.group(1)
print(f'PR #{pr_id}')
def process_commit_list(commit_list):
try:
contributors.update([commit['author']['login'] for commit in commit_list])
except TypeError:
prompt = (f'Error fetching contributors for PR #{pr_id}. Enter it manually, ' +
'as a space-separated list: ')
contributors.update(str(input(prompt)).split(' '))
def process_review_list(review_list):
reviewers.update([x['user']['login'] for x in review_list])
def process_comment_list(comment_list):
reviewers.update([x['user']['login'] for x in comment_list])
paginate_request(f'https://api.github.com/repos/dmlc/xgboost/pulls/{pr_id}/commits',
process_commit_list)
paginate_request(f'https://api.github.com/repos/dmlc/xgboost/pulls/{pr_id}/reviews',
process_review_list)
paginate_request(f'https://api.github.com/repos/dmlc/xgboost/issues/{pr_id}/comments',
process_comment_list)
print('Contributors: ', end='')
for x in sorted(contributors):
r = requests.get(f'https://api.github.com/users/{x}', auth=(username, password))
assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}'
user_info = json.loads(r.text)
if user_info['name'] is None:
print(f"@{x}, ", end='')
else:
print(f"{user_info['name']} (@{x}), ", end='')
print('\nReviewers: ', end='')
for x in sorted(reviewers):
r = requests.get(f'https://api.github.com/users/{x}', auth=(username, password))
assert r.status_code == requests.codes.ok, f'Code: {r.status_code}, Text: {r.text}'
user_info = json.loads(r.text)
if user_info['name'] is None:
print(f"@{x}, ", end='')
else:
print(f"{user_info['name']} (@{x}), ", end='')
print('')
|
{
"content_hash": "7390487482c88c1a56149c71ad055a67",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 104,
"avg_line_length": 38.733333333333334,
"alnum_prop": 0.5938037865748709,
"repo_name": "dmlc/xgboost",
"id": "d57ad3f7c28ae22ab4c8548c5e9c3b7043379b45",
"size": "2905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/query_contributors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1383"
},
{
"name": "C",
"bytes": "23067"
},
{
"name": "C++",
"bytes": "2182522"
},
{
"name": "CMake",
"bytes": "52394"
},
{
"name": "CSS",
"bytes": "3812"
},
{
"name": "Cuda",
"bytes": "855374"
},
{
"name": "Dockerfile",
"bytes": "2364"
},
{
"name": "Groovy",
"bytes": "1251"
},
{
"name": "Java",
"bytes": "206549"
},
{
"name": "M4",
"bytes": "2131"
},
{
"name": "Makefile",
"bytes": "8179"
},
{
"name": "PowerShell",
"bytes": "4308"
},
{
"name": "Python",
"bytes": "1189411"
},
{
"name": "R",
"bytes": "342898"
},
{
"name": "Scala",
"bytes": "471040"
},
{
"name": "Shell",
"bytes": "45815"
},
{
"name": "TeX",
"bytes": "913"
}
],
"symlink_target": ""
}
|
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import sys
# Define required packages.
requires = []
# Assume spidev is required on non-windows & non-mac platforms (i.e. linux).
if sys.platform != 'win32' and sys.platform != 'darwin':
requires.append('spidev')
setup(name = 'Adafruit_GPIO',
version = '0.9.0',
author = 'Tony DiCola',
author_email = 'tdicola@adafruit.com',
description = 'Library to provide a cross-platform GPIO interface on the Raspberry Pi and Beaglebone Black using the RPi.GPIO and Adafruit_BBIO libraries.',
license = 'MIT',
url = 'https://github.com/adafruit/Adafruit_Python_GPIO/',
install_requires = requires,
test_suite = 'tests',
packages = find_packages())
|
{
"content_hash": "c670da3cd38d5ddc52675715fa322eb1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 168,
"avg_line_length": 40.04545454545455,
"alnum_prop": 0.619750283768445,
"repo_name": "MinnowBoard/minnow-maker",
"id": "394d08c119b1d39c1a67d4ca095aa70cb0ea640c",
"size": "881",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Adafruit_Python_GPIO/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2006"
},
{
"name": "Makefile",
"bytes": "336"
},
{
"name": "Python",
"bytes": "203960"
},
{
"name": "Shell",
"bytes": "3417"
}
],
"symlink_target": ""
}
|
"""Loss helper functions."""
import tensorflow.compat.v2 as tf
def softmax_cross_entropy(pos, neg):
"""softmax cross entropy loss.
Let d_p = pos, d_n = neg.
we minimize:
log(1+exp(d_p)) + log(1+exp(-d_n))
for stability, is it equivalent to
d_p + log(1+exp(-d_p)) + log(1+exp(-d_n))
Args:
pos: Tensor.
neg: Tensor of the same shape of pos.
Returns:
Tensor holding pointwise loss of the same shape as pos.
"""
log_exp_pos = tf.math.log1p(tf.math.exp(-pos))
log_exp_neg = tf.math.log1p(tf.math.exp(-neg))
return pos + log_exp_pos + log_exp_neg
|
{
"content_hash": "bfdcb0026d29bd07019d99b96b500e33",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 59,
"avg_line_length": 24.291666666666668,
"alnum_prop": 0.6363636363636364,
"repo_name": "google-research/google-research",
"id": "11f6c50dff84713f5269d2c155ff1acce8fa337b",
"size": "1191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperbolic/utils/learn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
from models.scf import Element, SCF
from models.rasp import *
import re
## Extractor for RASP dependency format
# @author Adriano Zanette
# @version 0.1
class RaspDependencyExtractor:
## Class constuctor
# @author Adriano Zanette
# @version 0.1
# @return DatabaseBuilder
def __init__(self):
pass
## It extracts frames
# @author Adriano Zanette
# @version 0.1
# @param sentence Sentence
# @return Dict Frames to be built
def extract(self, sentence):
raspSentence = self.buildSentence(sentence)
verbs = raspSentence.getVerbs()
frames = []
for verb in verbs:
frame = SCF()
frame.verb = verb.word
frame.isPassive = verb.isPassive
verbElement = Element(sintax = 'V', element = 'V', relevance = 0, position = verb.id, raw = verb.word)
frame.elements.append(verbElement)
for child in verb.children:
element = self.buildElement(child)
if element:
frame.elements.append(element)
frames.append(frame)
return frames
## Builds a sentence splited in tokens
# @author Adriano Zanette
# @version 0.1
# @param sentence Sentence sentence
# @return Sentence
def buildSentence(self, sentence):
lines = sentence.parsed.split('\n')
raspSentence = Sentence(sentence)
for line in lines:
line = re.sub('[()|]', '', line.strip())
args = line.split(" ")
if line == "" or len(args) == 0:
continue
raspSentence.addRelationship(args)
return raspSentence
|
{
"content_hash": "303a8d55f2ea2d95cd843aaca6f9d478",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 108,
"avg_line_length": 25.46031746031746,
"alnum_prop": 0.614713216957606,
"repo_name": "adzanette/scf-extractor",
"id": "712b770445aa328aa877d2b193892bbfda14c8c3",
"size": "1604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scf-extractor/extractor/RaspDependencyExtractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "51698"
},
{
"name": "PHP",
"bytes": "131430"
},
{
"name": "Python",
"bytes": "423162"
}
],
"symlink_target": ""
}
|
"""
**********
Exceptions
**********
Base exceptions and errors for NetworkX.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)\nPieter Swart (swart@lanl.gov)\nDan Schult(dschult@colgate.edu)"""
# Copyright (C) 2004-2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Exception handling
# the root of all Exceptions
class NetworkXException(Exception):
"""Base class for exceptions in NetworkX."""
class NetworkXError(NetworkXException):
"""Exception for a serious error in NetworkX"""
class NetworkXPointlessConcept(NetworkXException):
"""Harary, F. and Read, R. "Is the Null Graph a Pointless Concept?"
In Graphs and Combinatorics Conference, George Washington University.
New York: Springer-Verlag, 1973.
"""
|
{
"content_hash": "2ab5044cd496da23ace328d772060b93",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 114,
"avg_line_length": 26.71875,
"alnum_prop": 0.6970760233918128,
"repo_name": "JaneliaSciComp/Neuroptikon",
"id": "de2d958beb12051c06e11b4426fb7c6e50ca6d0f",
"size": "855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/lib/CrossPlatform/networkx/exception.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "GLSL",
"bytes": "3048"
},
{
"name": "HTML",
"bytes": "97998"
},
{
"name": "Inno Setup",
"bytes": "2349"
},
{
"name": "Python",
"bytes": "8142986"
}
],
"symlink_target": ""
}
|
from rx.core import Observable, AnonymousObservable
from rx.disposables import SerialDisposable, CompositeDisposable, \
SingleAssignmentDisposable
from rx.concurrency import immediate_scheduler
from rx.internal import extensionmethod
@extensionmethod(Observable)
def expand(self, selector, scheduler=None):
"""Expands an observable sequence by recursively invoking selector.
selector -- {Function} Selector function to invoke for each produced
element, resulting in another sequence to which the selector will be
invoked recursively again.
scheduler -- {Scheduler} [Optional] Scheduler on which to perform the
expansion. If not provided, this defaults to the current thread
scheduler.
Returns an observable {Observable} sequence containing all the elements
produced by the recursive expansion.
"""
scheduler = scheduler or immediate_scheduler
source = self
def subscribe(observer):
q = []
m = SerialDisposable()
d = CompositeDisposable(m)
active_count = [0]
is_acquired = [False]
def ensure_active():
is_owner = False
if len(q) > 0:
is_owner = not is_acquired[0]
is_acquired[0] = True
def action(scheduler, state):
if len(q) > 0:
work = q.pop(0)
else:
is_acquired[0] = False
return
sad = SingleAssignmentDisposable()
d.add(sad)
def on_next(x):
observer.on_next(x)
result = None
try:
result = selector(x)
except Exception as ex:
observer.on_error(ex)
q.append(result)
active_count[0] += 1
ensure_active()
def on_complete():
d.remove(sad)
active_count[0] -= 1
if active_count[0] == 0:
observer.on_completed()
sad.disposable = work.subscribe(on_next, observer.on_error, on_complete)
m.disposable = scheduler.schedule(action)
if is_owner:
m.disposable = scheduler.schedule(action)
q.append(source)
active_count[0] += 1
ensure_active()
return d
return AnonymousObservable(subscribe)
|
{
"content_hash": "4c9cb1e34e5a62d31379efe2eba631ee",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 88,
"avg_line_length": 32.12820512820513,
"alnum_prop": 0.5494812450119713,
"repo_name": "Sprytile/Sprytile",
"id": "24f89cd51c3422d45b0d5baa432f4481a4616a41",
"size": "2506",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rx/linq/observable/expand.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "720766"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class BallerShotCallerConfig(AppConfig):
name = 'baller_shot_caller'
|
{
"content_hash": "3b049c5d3086d12a003bf064dffd9761",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 40,
"avg_line_length": 21.8,
"alnum_prop": 0.7798165137614679,
"repo_name": "kizzen/Baller-Shot-Caller",
"id": "f09e480cd8cdb49246f03910c36d56729a37f0a5",
"size": "109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web_site/baller_shot_caller/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "465952"
},
{
"name": "Dockerfile",
"bytes": "505"
},
{
"name": "HTML",
"bytes": "14778"
},
{
"name": "JavaScript",
"bytes": "258321"
},
{
"name": "Python",
"bytes": "62681"
},
{
"name": "Shell",
"bytes": "392"
}
],
"symlink_target": ""
}
|
from datahub.configuration.common import AllowDenyPattern
def test_allow_all() -> None:
pattern = AllowDenyPattern.allow_all()
assert pattern.allowed("foo.table")
def test_deny_all() -> None:
pattern = AllowDenyPattern(allow=[], deny=[".*"])
assert not pattern.allowed("foo.table")
def test_single_table() -> None:
pattern = AllowDenyPattern(allow=["foo.mytable"])
assert pattern.allowed("foo.mytable")
def test_default_deny() -> None:
pattern = AllowDenyPattern(allow=["foo.mytable"])
assert not pattern.allowed("foo.bar")
def test_fully_speced():
pattern = AllowDenyPattern(allow=["foo.mytable"])
assert pattern.is_fully_specified_allow_list()
pattern = AllowDenyPattern(allow=["foo.*", "foo.table"])
assert not pattern.is_fully_specified_allow_list()
pattern = AllowDenyPattern(allow=["foo.?", "foo.table"])
assert not pattern.is_fully_specified_allow_list()
def test_is_allowed():
pattern = AllowDenyPattern(allow=["foo.mytable"], deny=["foo.*"])
assert pattern.get_allowed_list() == []
def test_case_sensitivity():
pattern = AllowDenyPattern(allow=["Foo.myTable"])
assert pattern.allowed("foo.mytable")
assert pattern.allowed("FOO.MYTABLE")
assert pattern.allowed("Foo.MyTable")
pattern = AllowDenyPattern(allow=["Foo.myTable"], ignoreCase=False)
assert not pattern.allowed("foo.mytable")
assert pattern.allowed("Foo.myTable")
|
{
"content_hash": "54e124564007185c1ba6be228b15d7f5",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 71,
"avg_line_length": 31.955555555555556,
"alnum_prop": 0.694019471488178,
"repo_name": "linkedin/WhereHows",
"id": "6f3af9cba287fe93a6feb253dd4ba83b255871a3",
"size": "1438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metadata-ingestion/tests/unit/test_allow_deny.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110129"
},
{
"name": "Dockerfile",
"bytes": "2521"
},
{
"name": "HTML",
"bytes": "131513"
},
{
"name": "Java",
"bytes": "1307442"
},
{
"name": "JavaScript",
"bytes": "148450"
},
{
"name": "Nearley",
"bytes": "2837"
},
{
"name": "Python",
"bytes": "1419332"
},
{
"name": "Shell",
"bytes": "2564"
},
{
"name": "TSQL",
"bytes": "42644"
},
{
"name": "TypeScript",
"bytes": "641014"
}
],
"symlink_target": ""
}
|
import json
from twisted.web.client import Agent
from twisted.internet import defer, reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
from ooni import errors as e
from ooni.settings import config
from ooni.utils import log
from ooni.utils.net import BodyReceiver, StringProducer, Downloader
from ooni.utils.trueheaders import TrueHeadersSOCKS5Agent
class OONIBClient(object):
retries = 3
def __init__(self, address):
self.address = address
def _request(self, method, urn, genReceiver, bodyProducer=None):
address = self.address
if self.address.startswith('httpo://'):
address = self.address.replace('httpo://', 'http://')
agent = TrueHeadersSOCKS5Agent(reactor,
proxyEndpoint=TCP4ClientEndpoint(reactor, '127.0.0.1',
config.tor.socks_port))
elif self.address.startswith('https://'):
log.err("HTTPS based bouncers are currently not supported.")
raise e.InvalidOONIBBouncerAddress
elif self.address.startswith('http://'):
log.msg("Warning using unencrypted backend")
agent = Agent(reactor)
attempts = 0
finished = defer.Deferred()
def perform_request(attempts):
uri = address + urn
d = agent.request(method, uri, bodyProducer=bodyProducer)
@d.addCallback
def callback(response):
try:
content_length = int(response.headers.getRawHeaders('content-length')[0])
except:
content_length = None
response.deliverBody(genReceiver(finished, content_length))
def errback(err, attempts):
# We we will recursively keep trying to perform a request until
# we have reached the retry count.
if attempts < self.retries:
log.err("Lookup failed. Retrying.")
attempts += 1
perform_request(attempts)
else:
log.err("Failed. Giving up.")
finished.errback(err)
d.addErrback(errback, attempts)
perform_request(attempts)
return finished
def queryBackend(self, method, urn, query=None):
bodyProducer = None
if query:
bodyProducer = StringProducer(json.dumps(query))
def genReceiver(finished, content_length):
def process_response(s):
# If empty string then don't parse it.
if not s:
return
try:
response = json.loads(s)
except ValueError:
raise e.get_error(None)
if 'error' in response:
log.err("Got this backend error message %s" % response)
raise e.get_error(response['error'])
return response
return BodyReceiver(finished, content_length, process_response)
return self._request(method, urn, genReceiver, bodyProducer)
def download(self, urn, download_path):
def genReceiver(finished, content_length):
return Downloader(download_path, finished, content_length)
return self._request('GET', urn, genReceiver)
def getInput(self, input_hash):
from ooni.deck import InputFile
input_file = InputFile(input_hash)
if input_file.descriptorCached:
return defer.succeed(input_file)
else:
d = self.queryBackend('GET', '/input/' + input_hash)
@d.addCallback
def cb(descriptor):
input_file.load(descriptor)
input_file.save()
return input_file
@d.addErrback
def err(err):
log.err("Failed to get descriptor for input %s" % input_hash)
log.exception(err)
return d
def getInputList(self):
return self.queryBackend('GET', '/input')
def downloadInput(self, input_hash):
from ooni.deck import InputFile
input_file = InputFile(input_hash)
if input_file.fileCached:
return defer.succeed(input_file)
else:
d = self.download('/input/' + input_hash + '/file', input_file.cached_file)
@d.addCallback
def cb(res):
input_file.verify()
return input_file
@d.addErrback
def err(err):
log.err("Failed to download the input file %s" % input_hash)
log.exception(err)
return d
def getInputPolicy(self):
return self.queryBackend('GET', '/policy/input')
def getNettestPolicy(self):
return self.queryBackend('GET', '/policy/nettest')
def getDeckList(self):
return self.queryBackend('GET', '/deck')
def getDeck(self, deck_hash):
from ooni.deck import Deck
deck = Deck(deck_hash)
if deck.descriptorCached:
return defer.succeed(deck)
else:
d = self.queryBackend('GET', '/deck/' + deck_hash)
@d.addCallback
def cb(descriptor):
deck.load(descriptor)
deck.save()
return deck
@d.addErrback
def err(err):
log.err("Failed to get descriptor for deck %s" % deck_hash)
log.exception(err)
return d
def downloadDeck(self, deck_hash):
from ooni.deck import Deck
deck = Deck(deck_hash)
if deck.fileCached:
return defer.succeed(deck)
else:
d = self.download('/deck/' + deck_hash + '/file', deck.cached_file)
@d.addCallback
def cb(res):
deck.verify()
return deck
@d.addErrback
def err(err):
log.err("Failed to download the deck %s" % deck_hash)
log.exception(err)
return d
@defer.inlineCallbacks
def lookupTestCollector(self, net_tests):
try:
test_collector = yield self.queryBackend('POST', '/bouncer/net-tests',
query={'net-tests': net_tests})
except Exception as exc:
log.exception(exc)
raise e.CouldNotFindTestCollector
defer.returnValue(test_collector)
@defer.inlineCallbacks
def lookupTestHelpers(self, test_helper_names):
try:
test_helper = yield self.queryBackend('POST', '/bouncer/test-helpers',
query={'test-helpers': test_helper_names})
except Exception as exc:
log.exception(exc)
raise e.CouldNotFindTestHelper
if not test_helper:
raise e.CouldNotFindTestHelper
defer.returnValue(test_helper)
|
{
"content_hash": "a52dc235f1bb88fc758c96b1a0c495c3",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 99,
"avg_line_length": 32.00452488687783,
"alnum_prop": 0.5496960271454828,
"repo_name": "0xPoly/ooni-probe",
"id": "388bb6817b95531dd15ccf8c6e446952744b767f",
"size": "7073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ooni/oonibclient.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "392"
},
{
"name": "Groff",
"bytes": "38425"
},
{
"name": "HTML",
"bytes": "3963"
},
{
"name": "JavaScript",
"bytes": "7778"
},
{
"name": "Makefile",
"bytes": "3786"
},
{
"name": "Python",
"bytes": "518736"
},
{
"name": "Shell",
"bytes": "77958"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from flexbe_core import EventState, Logger
import rospy
from wm_nlu.srv import AnswerQuestion
from std_msgs.msg import String
class SaraNLUspr(EventState):
'''
Use wm_nlu to parse a sentence and return the answer.
># sentence string sentence to parse
#> answer string answer
<= understood Finished job.
<= not_understood Finished job but no commands detected.
<= fail service unavailable.
'''
def __init__(self):
# See example_state.py for basic explanations.
super(SaraNLUspr, self).__init__(outcomes=['understood', 'not_understood', 'fail'], input_keys=['sentence'],
output_keys=['answer'])
serviceName = "/answer_question"
Logger.loginfo("waiting forservice: " + serviceName)
rospy.wait_for_service(serviceName)
self.service = rospy.ServiceProxy(serviceName, AnswerQuestion)
def execute(self, userdata):
# Call the NLU service
response = self.service(String(userdata.sentence))
# Checking the validity of the response
if response.str.data is "":
userdata.answer = response.str.data
return "fail"
userdata.answer = response.str.data
return "understood"
|
{
"content_hash": "b991d8a1ab45c617301ca431802895b1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 116,
"avg_line_length": 32.023809523809526,
"alnum_prop": 0.6237918215613383,
"repo_name": "WalkingMachine/sara_behaviors",
"id": "7460556284461fa70f7d937d41175d7c42d74b24",
"size": "1383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sara_flexbe_states/src/sara_flexbe_states/sara_nlu_spr.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "6456"
},
{
"name": "CMake",
"bytes": "2065"
},
{
"name": "Python",
"bytes": "905600"
},
{
"name": "Shell",
"bytes": "2661"
}
],
"symlink_target": ""
}
|
import os
from pip.backwardcompat import urllib
from tests.lib.path import Path
from pip.index import package_to_requirement, HTMLPage
from pip.index import PackageFinder, Link, INSTALLED_VERSION
from tests.lib import path_to_url
from string import ascii_lowercase
from mock import patch
def test_package_name_should_be_converted_to_requirement():
"""
Test that it translates a name like Foo-1.2 to Foo==1.3
"""
assert package_to_requirement('Foo-1.2') == 'Foo==1.2'
assert package_to_requirement('Foo-dev') == 'Foo==dev'
assert package_to_requirement('Foo') == 'Foo'
def test_html_page_should_be_able_to_scrap_rel_links():
"""
Test scraping page looking for url in href
"""
page = HTMLPage("""
<!-- The <th> elements below are a terrible terrible hack for setuptools -->
<li>
<strong>Home Page:</strong>
<!-- <th>Home Page -->
<a href="http://supervisord.org/">http://supervisord.org/</a>
</li>""", "supervisor")
links = list(page.scraped_rel_links())
assert len(links) == 1
assert links[0].url == 'http://supervisord.org/'
def test_sort_locations_file_find_link(data):
"""
Test that a file:// find-link dir gets listdir run
"""
finder = PackageFinder([data.find_links], [])
files, urls = finder._sort_locations([data.find_links])
assert files and not urls, "files and not urls should have been found at find-links url: %s" % data.find_links
def test_sort_locations_file_not_find_link(data):
"""
Test that a file:// url dir that's not a find-link, doesn't get a listdir run
"""
finder = PackageFinder([], [])
files, urls = finder._sort_locations(data.index_url("empty_with_pkg"))
assert urls and not files, "urls, but not files should have been found"
def test_INSTALLED_VERSION_greater():
"""Test INSTALLED_VERSION compares greater."""
assert INSTALLED_VERSION > Link("some link")
|
{
"content_hash": "f3e43f12fde30c850e1f8ab0c9e5e7e9",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 114,
"avg_line_length": 34.21052631578947,
"alnum_prop": 0.662051282051282,
"repo_name": "alquerci/pip",
"id": "63d49d63361ce1752e6949f712263b50f6e41ab4",
"size": "1950",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/unit/test_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1158391"
},
{
"name": "Shell",
"bytes": "4534"
}
],
"symlink_target": ""
}
|
import logging
from collections import defaultdict
from paddle.nn import Layer
from paddle.jit import to_static, not_to_static
from paddle.fluid.framework import Parameter
from paddle.fluid.framework import program_guard
from paddle.fluid.executor import global_scope
from paddle.fluid.dygraph.dygraph_to_static.program_translator import (
StaticFunction,
)
from .utils import to_list
from .utils import get_logger
from .converter import Converter
class ProxyLayer(Layer):
"""
ProxyLayer implements all logic for converting dygraph model into
static Program IR. Meanwhile, it provides conviential interfaces for
auto parallel to visit feed/fetch/loss/metric variables.
"""
def __init__(self, layer, loss_func, metrics):
super().__init__()
# NOTE: All verify logics are finished in Engine.Prepare
self.inner_layer = layer
self.loss_func = loss_func
self.metrics = metrics
# train / eval / predict
self.mode = None
# generated program vars
self._input_vars = defaultdict(list)
self._label_vars = defaultdict(list)
self._output_vars = defaultdict(list)
self._loss_vars = defaultdict(list)
self._metric_vars = defaultdict(list)
def _train(self, inputs, labels):
"""
Train process of inner_layer with forward/loss/metric logic.
"""
# step 1. save feed variables of Program
mode = 'train'
self._input_vars[mode] = inputs
self._label_vars[mode] = labels
# step 2. call inner_layer.forward
self._output_vars[mode] = self.inner_layer(*inputs)
# step 3. calculate loss if needed
new_inputs = self._prepare(self.output_vars, labels)
self._loss_vars[mode] = self.call_loss(new_inputs)
# step 4. calculate metrics if needed
self._metric_vars[mode] = self.call_metrics(new_inputs)
def _eval(self, inputs, labels):
"""
Evaluate process of inner_layer with forward/loss/metric logic.
"""
# TODO(dev): we can reuse codes with self._train after making
# sure if they can.
# step 1. save feed variables of Program
mode = 'eval'
self._input_vars[mode] = inputs
self._label_vars[mode] = labels
# step 2. call inner_layer.forward
self._output_vars[mode] = self.inner_layer(*inputs)
# step 3. calculate loss if needed
new_inputs = self._prepare(self.output_vars, labels)
self._loss_vars[mode] = self.call_loss(new_inputs)
# step 4. calculate metrics if needed
self._metric_vars[mode] = self.call_metrics(new_inputs)
def _predict(self, inputs, labels):
"""
Predict process of inner_layer with forward logic.
"""
# step 1. save feed variables of Program
mode = 'predict'
self._input_vars[mode] = inputs
self._label_vars[mode] = labels
# step 2. call inner_layer.forward
self._output_vars[mode] = self.inner_layer(*inputs)
@not_to_static
def _prepare(self, outputs, labels):
"""
Concat outputs and labels as a single list
NOTE(dev): We use @not_to_static to avoid AST Analysis.
"""
return to_list(outputs) + to_list(labels)
def call_loss(self, inputs):
"""
Apply Loss Function on outputs and labels.
Args:
inputs: List[Variable]
Returns: List[Variable]
"""
res = []
if self.loss_func is not None:
res = self.loss_func(*inputs)
return res
def call_metrics(self, inputs):
"""
Apply Metrics Function on outputs and labels.
Args:
inputs: List[Variable]
Returns: List[Variable]
"""
outs = []
for metric in self.metrics:
outs.append(to_list(metric.compute(*inputs)))
return outs
def set_mode(self, mode):
self.mode = mode
self.training = mode == 'train'
def clone(self):
return ProxyLayer(self.inner_layer, self.loss_func, self.metrics)
@property
def input_vars(self):
return self._input_vars[self.mode]
@property
def label_vars(self):
return self._label_vars[self.mode]
@property
def output_vars(self):
return self._output_vars[self.mode]
@property
def loss_vars(self):
return self._loss_vars[self.mode]
@property
def metric_vars(self):
return self._metric_vars[self.mode]
@property
def startup_program(self):
return self.inner_layer._startup_program()
class BuildInfo:
def __init__(self):
self.clear()
def has_cache(self, mode, update=False):
is_cache = self.states[mode]
if update:
self.cache(mode)
return is_cache
def cache(self, mode):
self.states[mode] = True
def clear(self):
self.states = defaultdict(bool)
class ProgramHelper:
"""
A Helper class for Engine to provides different Program IR according specified 'mode'.
"""
def __init__(self, layer, loss_func, metrics, inputs_spec, labels_spec):
# original model config information
# TODO(Aurelius84): Implenet append_backward and optimizer in ProxyLayer
# after distribute engine satisify basic condition.
self.proxy_layer = ProxyLayer(layer, loss_func, metrics)
self.inputs_spec = inputs_spec
self.labels_spec = labels_spec
self.build_info = BuildInfo()
self._logger = get_logger(logging.INFO)
self.lazy_init = False
def reset(self):
"""
Reset all state of current Object.
"""
self.build_info.clear()
self.proxy_layer = self.proxy_layer.clone()
def build_program(self, mode):
"""
Convert dygraph model into static Program IR.
"""
assert mode in ['train', 'eval', 'predict']
self.proxy_layer.set_mode(mode)
# skip if we has already built program.
if self.build_info.has_cache(mode, True):
self._logger.info(
"Already build program with mode = %s, use cached program."
% mode
)
return
self._logger.info("start to build program for mode = %s." % mode)
input_spec = [self.inputs_spec, self.labels_spec]
static_func = to_static(self.static_func(), input_spec=input_spec)
func_name = '_' + mode
setattr(self.proxy_layer, func_name, static_func)
# NOTE(dev): Because @to_static is a Lazy mechanism, so we explicitly call this to trigger
# generating Program IR immediately.
getattr(self.proxy_layer, func_name).concrete_program
self._build_startup_program()
def _build_startup_program(self):
"""
Create and Sync parameters into startup program.
"""
if len(self.startup_program.global_block().ops) > 1:
self.lazy_init = True
return
for param in self.concrete_program.parameters:
Parameter(
name=param.name,
desc=param,
type=param.type,
shape=param.shape,
dtype=param.dtype,
stop_gradient=param.stop_gradient,
block=self.startup_program.global_block(),
)
def apply_optimizer(self, optimizer):
"""
Append backward and generate optimizer operations.
"""
self._verify_optimizer(optimizer)
self._logger.info(
"start to apply optimizer: %s ", type(optimizer).__name__
)
# clear optimizer parameters
original_params = optimizer._parameter_list
optimizer._parameter_list = None
with program_guard(self.main_program, self.startup_program):
res = optimizer.minimize(self.loss_vars[0])
# restore optimizer parameters
optimizer._parameter_list = original_params
return res
def _verify_optimizer(self, optimizer):
assert optimizer is not None
assert hasattr(
optimizer, "minimize"
), "Optimizer must have minimize() method."
assert self.proxy_layer.mode == 'train', (
"Required mode == 'train', but received '%s'"
% self.proxy_layer.mode
)
assert len(self.loss_vars) == 1, (
"Required len(loss_vars) == 1, but received len(loss_vars) = %s"
% len(self.loss_vars)
)
def to(self, mode):
"""
Switch underly proxy layer mode into target mode.
"""
assert mode in ['train', 'eval', 'predict']
func = getattr(self.proxy_layer, '_' + mode)
assert isinstance(
func, StaticFunction
), "Please call build_program(mode) firstly."
self.proxy_layer.set_mode(mode)
def static_func(self):
"""
Return StaticFunction instance with underly target mode.
"""
assert self.proxy_layer.mode in [
'train',
'eval',
'predict',
], "Please call build_program(mode) firstly."
func_name = '_' + self.proxy_layer.mode
return getattr(self.proxy_layer, func_name)
def init(self, main_program, place, dist_context):
if self.lazy_init:
return
for param in self.concrete_program.parameters:
# create var in scope and share parameters to scope
if param.name not in main_program.global_block().vars:
continue
# get param_var's dist_attr
var = main_program.global_block().vars[param.name]
var_dist_attr = dist_context.get_tensor_dist_attr_for_program(var)
dist_attr = {
"dims_mapping": var_dist_attr.dims_mapping,
"process_shape": var_dist_attr.process_mesh.topology,
"process_group": var_dist_attr.process_mesh.processes,
}
# slice param_value with dist_attr
# share sliced_param_value with param_tensor in global_scope
param_tensor = global_scope().var(param.name).get_tensor()
sliced_param = Converter.slice_with_dist_attr(
param.numpy(), dist_attr
)
param_tensor.set(sliced_param, place)
@property
def concrete_program(self):
return self.static_func().concrete_program
@property
def main_program(self):
return self.concrete_program.main_program
@property
def startup_program(self):
try:
return self.proxy_layer.startup_program
except Exception as err:
self._logger.warning("`lazy init` failed.")
if isinstance(err, AssertionError):
return self.concrete_program.startup_program
raise err
@property
def input_vars(self):
return to_list(self.proxy_layer.input_vars)
@property
def output_vars(self):
return to_list(self.proxy_layer.output_vars)
@property
def label_vars(self):
return to_list(self.proxy_layer.label_vars)
@property
def loss_vars(self):
return to_list(self.proxy_layer.loss_vars)
@property
def metric_vars(self):
return to_list(self.proxy_layer.metric_vars)
|
{
"content_hash": "d64d76a5583a4970e4d9ca246f80c670",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 98,
"avg_line_length": 31.392857142857142,
"alnum_prop": 0.5948192876520522,
"repo_name": "luotao1/Paddle",
"id": "31deaea4275eb969a5e9bed344663cd78b054236",
"size": "12038",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/distributed/auto_parallel/helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_array_equal
import os
import sqlite3
import tables
import numpy as np
from tools import check_cmd
from helper import tables_exist, find_ids, exit_times, \
h5out, sqliteout, clean_outs, to_ary, which_outfile
"""Tests"""
def test_source_to_sink():
"""Tests linear growth of sink inventory by checking if the transactions
were of equal quantities and only between sink and source facilities.
"""
clean_outs()
# Cyclus simulation input for Source and Sink
sim_inputs = ["./input/source_to_sink.xml"]
for sim_input in sim_inputs:
holdsrtn = [1] # needed because nose does not send() to test generator
outfile = which_outfile()
cmd = ["cyclus", "-o", outfile, "--input-file", sim_input]
yield check_cmd, cmd, '.', holdsrtn
rtn = holdsrtn[0]
if rtn != 0:
return # don't execute further commands
# Tables of interest
paths = ["/AgentEntry", "/Resources", "/Transactions", "/Info"]
# Check if these tables exist
yield assert_true, tables_exist(outfile, paths)
if not tables_exist(outfile, paths):
outfile.close()
clean_outs()
return # don't execute further commands
# Get specific tables and columns
if outfile == h5out:
output = tables.open_file(h5out, mode = "r")
agent_entry = output.get_node("/AgentEntry")[:]
info = output.get_node("/Info")[:]
resources = output.get_node("/Resources")[:]
transactions = output.get_node("/Transactions")[:]
output.close()
else:
conn = sqlite3.connect(outfile)
conn.row_factory = sqlite3.Row
cur = conn.cursor()
exc = cur.execute
agent_entry = exc('SELECT * FROM AgentEntry').fetchall()
info = exc('SELECT * FROM Info').fetchall()
resources = exc('SELECT * FROM Resources').fetchall()
transactions = exc('SELECT * FROM Transactions').fetchall()
conn.close()
# Find agent ids of source and sink facilities
agent_ids = to_ary(agent_entry, "AgentId")
spec = to_ary(agent_entry, "Spec")
source_id = find_ids(":agents:Source", spec, agent_ids)
sink_id = find_ids(":agents:Sink", spec, agent_ids)
# Test for only one source and one sink are deployed in the simulation
yield assert_equal, len(source_id), 1
yield assert_equal, len(sink_id), 1
# Check if transactions are only between source and sink
sender_ids = to_ary(transactions, "SenderId")
receiver_ids = to_ary(transactions, "ReceiverId")
expected_sender_array = np.empty(sender_ids.size)
expected_sender_array.fill(source_id[0])
expected_receiver_array = np.empty(receiver_ids.size)
expected_receiver_array.fill(sink_id[0])
yield assert_array_equal, sender_ids, expected_sender_array
yield assert_array_equal, receiver_ids, expected_receiver_array
# Transaction ids must be equal range from 1 to the number of rows
expected_trans_ids = np.arange(0, sender_ids.size, 1)
yield assert_array_equal, \
to_ary(transactions, "TransactionId"),\
expected_trans_ids
# Track transacted resources
resource_ids = to_ary(resources, "ResourceId")
quantities = to_ary(resources, "Quantity")
expected_quantities = np.empty(resource_ids.size)
# Expect that every transaction quantity is the same amount
expected_quantities.fill(quantities[0])
yield assert_array_equal, quantities, expected_quantities
clean_outs()
|
{
"content_hash": "533631922b2f9fc7654b876b3e6acf72",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 39.604166666666664,
"alnum_prop": 0.6212519726459758,
"repo_name": "rwcarlsen/cyclus",
"id": "ab9210d6c0dbee65292599d75510c4b08394c43f",
"size": "3826",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "tests/test_source_to_sink.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1654"
},
{
"name": "C++",
"bytes": "3899653"
},
{
"name": "CMake",
"bytes": "106618"
},
{
"name": "Python",
"bytes": "226770"
},
{
"name": "Shell",
"bytes": "4079"
}
],
"symlink_target": ""
}
|
import unittest
import openmesh
class TriMeshGarbageCollection(unittest.TestCase):
def setUp(self):
self.mesh = openmesh.TriMesh()
self.mesh.request_vertex_status()
self.mesh.request_edge_status()
self.mesh.request_halfedge_status()
self.mesh.request_face_status()
# Add some vertices
self.vhandle = []
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(-1, -1, 1)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d( 1, -1, 1)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d( 1, 1, 1)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(-1, 1, 1)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(-1, -1, -1)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d( 1, -1, -1)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d( 1, 1, -1)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(-1, 1, -1)))
# Add six faces to form a cube
self.mesh.add_face(self.vhandle[0], self.vhandle[1], self.vhandle[3])
self.mesh.add_face(self.vhandle[1], self.vhandle[2], self.vhandle[3])
self.mesh.add_face(self.vhandle[7], self.vhandle[6], self.vhandle[5])
self.mesh.add_face(self.vhandle[7], self.vhandle[5], self.vhandle[4])
self.mesh.add_face(self.vhandle[1], self.vhandle[0], self.vhandle[4])
self.mesh.add_face(self.vhandle[1], self.vhandle[4], self.vhandle[5])
self.mesh.add_face(self.vhandle[2], self.vhandle[1], self.vhandle[5])
self.mesh.add_face(self.vhandle[2], self.vhandle[5], self.vhandle[6])
self.mesh.add_face(self.vhandle[3], self.vhandle[2], self.vhandle[6])
self.mesh.add_face(self.vhandle[3], self.vhandle[6], self.vhandle[7])
self.mesh.add_face(self.vhandle[0], self.vhandle[3], self.vhandle[7])
self.mesh.add_face(self.vhandle[0], self.vhandle[7], self.vhandle[4])
# Test setup:
#
# 3 ======== 2
# / /|
# / / | z
# 0 ======== 1 | |
# | | | | y
# | 7 | 6 | /
# | | / | /
# | |/ |/
# 4 ======== 5 -------> x
def test_standard_garbage_collection(self):
# Check setup
self.assertEqual(self.mesh.n_vertices(), 8)
self.assertEqual(self.mesh.n_faces(), 12)
self.mesh.delete_vertex(self.vhandle[0])
# Check setup
self.assertEqual(self.mesh.n_vertices(), 8)
self.assertEqual(self.mesh.n_faces(), 12)
self.mesh.garbage_collection()
# Check setup
self.assertEqual(self.mesh.n_vertices(), 7)
self.assertEqual(self.mesh.n_faces(), 8)
def test_tracked_garbage_collection(self):
# Check setup
self.assertEqual(self.mesh.n_vertices(), 8)
self.assertEqual(self.mesh.n_faces(), 12)
#==================================================
# Create lists containing the current handles
#==================================================
vertexHandles = []
for v in self.mesh.vertices():
vertexHandles.append(v)
halfedgeHandles = []
for he in self.mesh.halfedges():
halfedgeHandles.append(he)
faceHandles = []
for f in self.mesh.faces():
faceHandles.append(f)
# Deleting vertex 0
self.mesh.delete_vertex(self.vhandle[0])
# Check setup
self.assertEqual(self.mesh.n_vertices(), 8)
self.assertEqual(self.mesh.n_faces(), 12)
self.mesh.garbage_collection(vertexHandles, halfedgeHandles, faceHandles, True, True, True)
# Check setup
self.assertEqual(self.mesh.n_vertices(), 7)
self.assertEqual(self.mesh.n_faces(), 8)
# Check setup of vertices
self.assertEqual(vertexHandles[0].idx(), -1)
self.assertEqual(vertexHandles[1].idx(), 1)
self.assertEqual(vertexHandles[2].idx(), 2)
self.assertEqual(vertexHandles[3].idx(), 3)
self.assertEqual(vertexHandles[4].idx(), 4)
self.assertEqual(vertexHandles[5].idx(), 5)
self.assertEqual(vertexHandles[6].idx(), 6)
self.assertEqual(vertexHandles[7].idx(), 0)
# Check setup of halfedge handles
self.assertEqual(halfedgeHandles[0 ].idx(), -1)
self.assertEqual(halfedgeHandles[1 ].idx(), -1)
self.assertEqual(halfedgeHandles[2 ].idx(), 2)
self.assertEqual(halfedgeHandles[3 ].idx(), 3)
self.assertEqual(halfedgeHandles[4 ].idx(), -1)
self.assertEqual(halfedgeHandles[5 ].idx(), -1)
self.assertEqual(halfedgeHandles[6 ].idx(), 6)
self.assertEqual(halfedgeHandles[7 ].idx(), 7)
self.assertEqual(halfedgeHandles[8 ].idx(), 8)
self.assertEqual(halfedgeHandles[9 ].idx(), 9)
self.assertEqual(halfedgeHandles[10].idx(), 10)
self.assertEqual(halfedgeHandles[11].idx(), 11)
self.assertEqual(halfedgeHandles[12].idx(), 12)
self.assertEqual(halfedgeHandles[13].idx(), 13)
self.assertEqual(halfedgeHandles[14].idx(), 14)
self.assertEqual(halfedgeHandles[15].idx(), 15)
self.assertEqual(halfedgeHandles[16].idx(), 16)
self.assertEqual(halfedgeHandles[17].idx(), 17)
self.assertEqual(halfedgeHandles[18].idx(), 18)
self.assertEqual(halfedgeHandles[19].idx(), 19)
self.assertEqual(halfedgeHandles[20].idx(), -1)
self.assertEqual(halfedgeHandles[21].idx(), -1)
self.assertEqual(halfedgeHandles[22].idx(), 22)
self.assertEqual(halfedgeHandles[23].idx(), 23)
self.assertEqual(halfedgeHandles[24].idx(), 24)
self.assertEqual(halfedgeHandles[25].idx(), 25)
self.assertEqual(halfedgeHandles[26].idx(), 26)
self.assertEqual(halfedgeHandles[27].idx(), 27)
self.assertEqual(halfedgeHandles[28].idx(), 20)
self.assertEqual(halfedgeHandles[29].idx(), 21)
self.assertEqual(halfedgeHandles[30].idx(), 4)
self.assertEqual(halfedgeHandles[31].idx(), 5)
self.assertEqual(halfedgeHandles[32].idx(), 0)
self.assertEqual(halfedgeHandles[33].idx(), 1)
self.assertEqual(halfedgeHandles[34].idx(), -1)
self.assertEqual(halfedgeHandles[35].idx(), -1)
# Check setup of faces
self.assertEqual(faceHandles[0 ].idx(), -1)
self.assertEqual(faceHandles[1 ].idx(), 1)
self.assertEqual(faceHandles[2 ].idx(), 2)
self.assertEqual(faceHandles[3 ].idx(), 3)
self.assertEqual(faceHandles[4 ].idx(), -1)
self.assertEqual(faceHandles[5 ].idx(), 5)
self.assertEqual(faceHandles[6 ].idx(), 6)
self.assertEqual(faceHandles[7 ].idx(), 7)
self.assertEqual(faceHandles[8 ].idx(), 4)
self.assertEqual(faceHandles[9 ].idx(), 0)
self.assertEqual(faceHandles[10].idx(), -1)
self.assertEqual(faceHandles[11].idx(), -1)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TriMeshGarbageCollection)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "7cd6a389305536a61ed0802f49db3afa",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 99,
"avg_line_length": 43.45238095238095,
"alnum_prop": 0.5909589041095891,
"repo_name": "svn2github/OpenMesh4",
"id": "07c820b2c216acb7d00b57f7ef539fa9b9d6985c",
"size": "7300",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/Python/Unittests/test_trimesh_garbage_collection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10150"
},
{
"name": "C++",
"bytes": "3487191"
},
{
"name": "CMake",
"bytes": "112489"
},
{
"name": "HTML",
"bytes": "385"
},
{
"name": "Prolog",
"bytes": "273"
},
{
"name": "Python",
"bytes": "186651"
},
{
"name": "QMake",
"bytes": "15228"
},
{
"name": "Shell",
"bytes": "6079"
},
{
"name": "TeX",
"bytes": "13595"
}
],
"symlink_target": ""
}
|
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import FileSystemStorage
from django.utils.importlib import import_module
from django.contrib.staticfiles import utils
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for site media files.
The defaults for ``location`` and ``base_url`` are
``STATICFILES_ROOT`` and ``STATICFILES_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATICFILES_ROOT
if base_url is None:
base_url = settings.STATICFILES_URL
if not location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATICFILES_ROOT setting. Set it to "
"the absolute path of the directory that holds static media.")
if not base_url:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATICFILES_URL setting. Set it to "
"URL that handles the files served from STATICFILES_ROOT.")
if settings.DEBUG:
utils.check_settings()
super(StaticFilesStorage, self).__init__(location, base_url, *args, **kwargs)
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is actually the models module of the app. Remove the '.models'.
bits = app.__name__.split('.')[:-1]
self.app_name = bits[-1]
self.app_module = '.'.join(bits)
# The models module (app) may be a package in which case
# dirname(app.__file__) would be wrong. Import the actual app
# as opposed to the models module.
app = import_module(self.app_module)
location = self.get_location(os.path.dirname(app.__file__))
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
def get_location(self, app_root):
"""
Given the app root, return the location of the static files of an app,
by default 'static'. We special case the admin app here since it has
its static files in 'media'.
"""
if self.app_module == 'django.contrib.admin':
return os.path.join(app_root, 'media')
return os.path.join(app_root, self.source_dir)
def get_prefix(self):
"""
Return the path name that should be prepended to files for this app.
"""
if self.app_module == 'django.contrib.admin':
return self.app_name
return None
def get_files(self, ignore_patterns=[]):
"""
Return a list containing the relative source paths for all files that
should be copied for an app.
"""
files = []
prefix = self.get_prefix()
for path in utils.get_files(self, ignore_patterns):
if prefix:
path = '/'.join([prefix, path])
files.append(path)
return files
|
{
"content_hash": "ee60521e6924fec33c24eacb10d63908",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 85,
"avg_line_length": 39.74418604651163,
"alnum_prop": 0.6012287887653599,
"repo_name": "GoSteven/Diary",
"id": "a7784b56453f318d00b5518b49425c3c9a64f795",
"size": "3418",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "django/contrib/staticfiles/storage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "96307"
},
{
"name": "Python",
"bytes": "4274191"
}
],
"symlink_target": ""
}
|
import shutil
import sys
from os.path import join
from test import base
class TestImportHook(base.TestPyConcreteBase):
def test_py_import_hook(self):
import os
assert os
import csv
assert csv
def test_py_relative_import(self):
import os
sys.path.insert(0, os.path.join(base.ROOT_DIR, 'test', 'data'))
from test.data.relative_import import main
self.assertEqual(main.data, 'main')
self.assertEqual(main.util.data, 'util')
sys.path.pop(0)
def test_pye_relative_import(self):
src_path = join(base.ROOT_DIR, 'test', 'data', 'relative_import')
shutil.copytree(src_path, join(self.tmp_dir, 'relative_import'))
base.touch(join(self.tmp_dir, '__init__.py'))
self.lib_compile_pye(self.tmp_dir, remove_py=True, remove_pyc=True)
from relative_import import main
self.assertEqual(main.data, 'main')
self.assertEqual(main.util.data, 'util')
|
{
"content_hash": "fbec9bb23f1e59e0a678c56a074a65b6",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 75,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.6362715298885512,
"repo_name": "Falldog/pyconcrete",
"id": "9081479a4d543d132e8e2c18a2ea7c76a73fc79c",
"size": "1610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_import_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1173"
},
{
"name": "C",
"bytes": "132942"
},
{
"name": "CMake",
"bytes": "4598"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "PowerShell",
"bytes": "4022"
},
{
"name": "Python",
"bytes": "72171"
},
{
"name": "Shell",
"bytes": "1716"
}
],
"symlink_target": ""
}
|
"""
The __init__ module of the protocol package.
This module contains some fuctions common to all sub packages.
Attributes:
DATA_BLOCK (str): Data block type string.
METADATA_BLOCK (str): Metadata block type string.
build (function): Message building function.
parse (function): message parsing function.
"""
import msgpack
import struct
import zlib
DATA_BLOCK = 'data'
METADATA_BLOCK = 'metadata'
def parse(message):
"""
Parse a received message.
Args:
message (str): The received message.
Return:
dict: The parsed message.
"""
return msgpack.loads(zlib.decompress(message))
def build(message):
"""
Build a message to send.
Args:
message (dict): The message to send.
Return:
str: The message.
"""
return zlib.compress(msgpack.dumps(message))
def __message(message_type, **kwargs):
"""
Return a message as string.
Args:
message_type (str): The message type.
**kwargs: Keyword to create the message.
Return:
str: The message as a string.
"""
kwargs['type'] = message_type
return wrap(build(kwargs))
def wrap(string):
"""
Wrap a string in a message frame for sending it in a socket.
Args:
string (str): The string to wrap.
Returns:
str: The wraped string.
"""
return struct.pack('>L', len(string)) + string
def get_size(string):
"""
Return the size of a message wraped by the wrap protocol
Args:
string (str): The wraped string.
Returns:
int: the size of the message.
"""
return struct.unpack('>L', string[:4])[0]
|
{
"content_hash": "8a5bce6277ac49da1aa3fb201887db4f",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 64,
"avg_line_length": 20.576470588235296,
"alnum_prop": 0.5900514579759862,
"repo_name": "doctoromer/haya-data",
"id": "568264d93d7d6ff5b7f0be8e7ef7f0efff9a58d4",
"size": "1749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/protocol/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "266693"
}
],
"symlink_target": ""
}
|
def extractBarnnnBlogspotCom(item):
'''
Parser for 'barnnn.blogspot.com'
'''
if 'Voice Drama' in item['tags']:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if 'Lower Bound Volume' in item['title'] and vol is None:
vol = 2
if 'Upper Bound Volume' in item['title'] and vol is None:
vol = 1
tagmap = [
('yuri in which the world will end in ten days', 'yuri in which the world will end in ten days', 'translated'),
('Monster Hunter: Cross Soul', 'Monster Hunter: Cross Soul', 'translated'),
('The Girl Who Ate The Death God', 'The Girl Who Ate The Death God', 'translated'),
('kino\'s journey', 'Kino\'s Journey', 'translated'),
('Cross Road', 'Cross Road: In Their Cases', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "cedc8dc6fbcbb47d18a1d70b5641ea0c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 140,
"avg_line_length": 39.53333333333333,
"alnum_prop": 0.5868465430016864,
"repo_name": "fake-name/ReadableWebProxy",
"id": "344522d525095cb68d61bede63575c5a31ef1561",
"size": "1186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractBarnnnBlogspotCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
import datetime
import unittest
import mock
from airflow.models.dag import DAG
from airflow.providers.microsoft.azure.sensors.wasb import WasbBlobSensor, WasbPrefixSensor
class TestWasbBlobSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'blob_name': 'blob',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbBlobSensor(
task_id='wasb_sensor_1',
dag=self.dag,
**self._config
)
self.assertEqual(sensor.container_name, self._config['container_name'])
self.assertEqual(sensor.blob_name, self._config['blob_name'])
self.assertEqual(sensor.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(sensor.check_options, {})
self.assertEqual(sensor.timeout, self._config['timeout'])
sensor = WasbBlobSensor(
task_id='wasb_sensor_2',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
self.assertEqual(sensor.check_options, {'timeout': 2})
@mock.patch('airflow.providers.microsoft.azure.sensors.wasb.WasbHook',
autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbBlobSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
sensor.poke(None)
mock_instance.check_for_blob.assert_called_once_with(
'container', 'blob', timeout=2
)
class TestWasbPrefixSensor(unittest.TestCase):
_config = {
'container_name': 'container',
'prefix': 'prefix',
'wasb_conn_id': 'conn_id',
'timeout': 100,
}
def setUp(self):
args = {
'owner': 'airflow',
'start_date': datetime.datetime(2017, 1, 1)
}
self.dag = DAG('test_dag_id', default_args=args)
def test_init(self):
sensor = WasbPrefixSensor(
task_id='wasb_sensor_1',
dag=self.dag,
**self._config
)
self.assertEqual(sensor.container_name, self._config['container_name'])
self.assertEqual(sensor.prefix, self._config['prefix'])
self.assertEqual(sensor.wasb_conn_id, self._config['wasb_conn_id'])
self.assertEqual(sensor.check_options, {})
self.assertEqual(sensor.timeout, self._config['timeout'])
sensor = WasbPrefixSensor(
task_id='wasb_sensor_2',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
self.assertEqual(sensor.check_options, {'timeout': 2})
@mock.patch('airflow.providers.microsoft.azure.sensors.wasb.WasbHook',
autospec=True)
def test_poke(self, mock_hook):
mock_instance = mock_hook.return_value
sensor = WasbPrefixSensor(
task_id='wasb_sensor',
dag=self.dag,
check_options={'timeout': 2},
**self._config
)
sensor.poke(None)
mock_instance.check_for_prefix.assert_called_once_with(
'container', 'prefix', timeout=2
)
|
{
"content_hash": "4b7bba75f34db3d16bc3a00d3da4a9b4",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 91,
"avg_line_length": 31.63302752293578,
"alnum_prop": 0.5672853828306265,
"repo_name": "wooga/airflow",
"id": "74dc0f17f0692cceaed65b106542db9e065bc004",
"size": "4238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/microsoft/azure/sensors/test_wasb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879650"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
}
|
from fabric.api import *
root_dir = '/srv/http'
# the user to use for the remote commands
env.user = 'www-data'
# the servers where the commands are executed
env.hosts = ['c-hans.de']
def pack():
# create a new source distribution as tarball
local('python setup.py sdist --formats=gztar', capture=False)
def deploy():
# figure out the release name and version
dist = local('python setup.py --fullname', capture=True).strip()
# upload the source tarball to the temporary folder on the server
put('dist/%s.tar.gz' % dist, '/tmp/mensafeed.tar.gz')
# create a place where we can unzip the tarball, then enter
# that directory and unzip it
run('tar xzf /tmp/mensafeed.tar.gz -C /tmp')
with cd('/tmp/%s' % dist):
# now setup the package with our virtual environment's
# python interpreter
run('%s/mensafeed/env/bin/python setup.py install' % root_dir)
run('rsync -r --delete static/ %s/mensafeed/static' % root_dir)
run('cp fetch.py %s/mensafeed/fetch.py' % root_dir)
# now that all is set up, delete the folder again
run('rm -rf /tmp/%s /tmp/mensafeed.tar.gz' % dist)
# restart gunicorn webserver
#run('supervisorctl restart mensafeed')
|
{
"content_hash": "759b2f1ec66784dad32dfc7a4d16694a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 34.361111111111114,
"alnum_prop": 0.6645109135004043,
"repo_name": "christianhans/mensafeed",
"id": "6e5129dfb5713a0a4b60c9bbbd816094f637b5e1",
"size": "1237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "539"
},
{
"name": "JavaScript",
"bytes": "430"
},
{
"name": "Python",
"bytes": "19142"
}
],
"symlink_target": ""
}
|
from proxy import Proxy
from .basespider import BaseSpider
from scrapy.selector import Selector
class ProxyRoxSpider(BaseSpider):
name = 'proxyrox'
def __init__(self, *a, **kwargs):
super(ProxyRoxSpider, self).__init__(*a, **kwargs)
self.urls = ['https://proxyrox.com/?p=%s' % n for n in range(1, 10)]
self.headers = {
'Host': 'proxyrox.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:50.0) Gecko/20100101 Firefox/50.0',
}
self.is_record_web_page = False
self.init()
def parse_page(self, response):
super(ProxyRoxSpider, self).parse_page(response)
data = response.xpath('//tr[@class="fat"]').extract()
for i, d in enumerate(data):
sel = Selector(text = d)
ip_port = sel.xpath('//td/a/text()').extract_first()
ip = ip_port.split(':')[0]
port = ip_port.split(':')[1]
country = sel.xpath('//td/span[@class="region"]/text()').extract_first()
anonymity = sel.xpath('//td/span/text()').extract_first()
proxy = Proxy()
proxy.set_value(
ip = ip,
port = port,
country = country,
anonymity = anonymity,
source = self.name
)
self.add_proxy(proxy = proxy)
|
{
"content_hash": "798ed7df88c6286f5dbece5dc540a05f",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 111,
"avg_line_length": 32.95454545454545,
"alnum_prop": 0.5206896551724138,
"repo_name": "meihuanyu/rental",
"id": "275944fdaa0ec028f9eccdf16a60bf8a3b0806f9",
"size": "1466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipproxytool/spiders/proxy/proxyrox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104496"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from django.test import ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
with ignore_warnings(category=RemovedInDjango50Warning):
from django.utils.baseconv import (
BaseConverter, base2, base16, base36, base56, base62, base64,
)
# RemovedInDjango50Warning
class TestBaseConv(TestCase):
def test_baseconv(self):
nums = [-10 ** 10, 10 ** 10, *range(-100, 100)]
for converter in [base2, base16, base36, base56, base62, base64]:
for i in nums:
self.assertEqual(i, converter.decode(converter.encode(i)))
def test_base11(self):
base11 = BaseConverter('0123456789-', sign='$')
self.assertEqual(base11.encode(1234), '-22')
self.assertEqual(base11.decode('-22'), 1234)
self.assertEqual(base11.encode(-1234), '$-22')
self.assertEqual(base11.decode('$-22'), -1234)
def test_base20(self):
base20 = BaseConverter('0123456789abcdefghij')
self.assertEqual(base20.encode(1234), '31e')
self.assertEqual(base20.decode('31e'), 1234)
self.assertEqual(base20.encode(-1234), '-31e')
self.assertEqual(base20.decode('-31e'), -1234)
def test_base64(self):
self.assertEqual(base64.encode(1234), 'JI')
self.assertEqual(base64.decode('JI'), 1234)
self.assertEqual(base64.encode(-1234), '$JI')
self.assertEqual(base64.decode('$JI'), -1234)
def test_base7(self):
base7 = BaseConverter('cjdhel3', sign='g')
self.assertEqual(base7.encode(1234), 'hejd')
self.assertEqual(base7.decode('hejd'), 1234)
self.assertEqual(base7.encode(-1234), 'ghejd')
self.assertEqual(base7.decode('ghejd'), -1234)
def test_exception(self):
with self.assertRaises(ValueError):
BaseConverter('abc', sign='a')
self.assertIsInstance(BaseConverter('abc', sign='d'), BaseConverter)
def test_repr(self):
base7 = BaseConverter('cjdhel3', sign='g')
self.assertEqual(repr(base7), '<BaseConverter: base7 (cjdhel3)>')
|
{
"content_hash": "17cc950fdb0bdcd08cbf8adb301ab78b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 38.30909090909091,
"alnum_prop": 0.6483151400094922,
"repo_name": "ghickman/django",
"id": "989e1eb0bf5e04564f13154c034b50cb14fb7e67",
"size": "2107",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "tests/utils_tests/test_baseconv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170436"
},
{
"name": "JavaScript",
"bytes": "255321"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11414242"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
"""
This module contains the default values for all settings used by Scrapy.
For more information about these settings you can read the settings
documentation in docs/topics/settings.rst
Scrapy developers, if you add a setting here remember to:
* add it in alphabetical order
* group similar settings without leaving blank lines
* add its documentation to the available settings documentation
(docs/topics/settings.rst)
"""
import os
import sys
from importlib import import_module
from os.path import join, abspath, dirname
AJAXCRAWL_ENABLED = False
BOT_NAME = 'scrapybot'
CLOSESPIDER_TIMEOUT = 0
CLOSESPIDER_PAGECOUNT = 0
CLOSESPIDER_ITEMCOUNT = 0
CLOSESPIDER_ERRORCOUNT = 0
COMMANDS_MODULE = ''
COMPRESSION_ENABLED = True
CONCURRENT_ITEMS = 100
CONCURRENT_REQUESTS = 16
CONCURRENT_REQUESTS_PER_DOMAIN = 8
CONCURRENT_REQUESTS_PER_IP = 0
COOKIES_ENABLED = True
COOKIES_DEBUG = False
DEFAULT_ITEM_CLASS = 'scrapy.item.Item'
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
DEPTH_LIMIT = 0
DEPTH_STATS = True
DEPTH_PRIORITY = 0
DNSCACHE_ENABLED = True
DOWNLOAD_DELAY = 0
DOWNLOAD_HANDLERS = {}
DOWNLOAD_HANDLERS_BASE = {
'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',
'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',
'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',
}
DOWNLOAD_TIMEOUT = 180 # 3mins
DOWNLOADER_DEBUG = False
DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'
DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'
DOWNLOADER_MIDDLEWARES = {}
DOWNLOADER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
'scrapy.contrib.downloadermiddleware.ajaxcrawl.AjaxCrawlMiddleware': 560,
'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580,
'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590,
'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
# Downloader side
}
DOWNLOADER_STATS = True
DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
try:
EDITOR = os.environ['EDITOR']
except KeyError:
if sys.platform == 'win32':
EDITOR = '%s -m idlelib.idle'
else:
EDITOR = 'vi'
EXTENSIONS = {}
EXTENSIONS_BASE = {
'scrapy.contrib.corestats.CoreStats': 0,
'scrapy.webservice.WebService': 0,
'scrapy.telnet.TelnetConsole': 0,
'scrapy.contrib.memusage.MemoryUsage': 0,
'scrapy.contrib.memdebug.MemoryDebugger': 0,
'scrapy.contrib.closespider.CloseSpider': 0,
'scrapy.contrib.feedexport.FeedExporter': 0,
'scrapy.contrib.logstats.LogStats': 0,
'scrapy.contrib.spiderstate.SpiderState': 0,
'scrapy.contrib.throttle.AutoThrottle': 0,
}
FEED_URI = None
FEED_URI_PARAMS = None # a function to extend uri arguments
FEED_FORMAT = 'jsonlines'
FEED_STORE_EMPTY = False
FEED_STORAGES = {}
FEED_STORAGES_BASE = {
'': 'scrapy.contrib.feedexport.FileFeedStorage',
'file': 'scrapy.contrib.feedexport.FileFeedStorage',
'stdout': 'scrapy.contrib.feedexport.StdoutFeedStorage',
's3': 'scrapy.contrib.feedexport.S3FeedStorage',
'ftp': 'scrapy.contrib.feedexport.FTPFeedStorage',
}
FEED_EXPORTERS = {}
FEED_EXPORTERS_BASE = {
'json': 'scrapy.contrib.exporter.JsonItemExporter',
'jsonlines': 'scrapy.contrib.exporter.JsonLinesItemExporter',
'csv': 'scrapy.contrib.exporter.CsvItemExporter',
'xml': 'scrapy.contrib.exporter.XmlItemExporter',
'marshal': 'scrapy.contrib.exporter.MarshalItemExporter',
'pickle': 'scrapy.contrib.exporter.PickleItemExporter',
}
HTTPCACHE_ENABLED = False
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_MISSING = False
HTTPCACHE_STORAGE = 'scrapy.contrib.httpcache.FilesystemCacheStorage'
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_IGNORE_SCHEMES = ['file']
HTTPCACHE_DBM_MODULE = 'anydbm'
HTTPCACHE_POLICY = 'scrapy.contrib.httpcache.DummyPolicy'
ITEM_PROCESSOR = 'scrapy.contrib.pipeline.ItemPipelineManager'
ITEM_PIPELINES = {}
ITEM_PIPELINES_BASE = {}
LOG_ENABLED = True
LOG_ENCODING = 'utf-8'
LOG_FORMATTER = 'scrapy.logformatter.LogFormatter'
LOG_STDOUT = False
LOG_LEVEL = 'DEBUG'
LOG_FILE = None
LOG_UNSERIALIZABLE_REQUESTS = False
LOGSTATS_INTERVAL = 60.0
MAIL_DEBUG = False
MAIL_HOST = 'localhost'
MAIL_PORT = 25
MAIL_FROM = 'scrapy@localhost'
MAIL_PASS = None
MAIL_USER = None
MEMDEBUG_ENABLED = False # enable memory debugging
MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown
MEMUSAGE_ENABLED = False
MEMUSAGE_LIMIT_MB = 0
MEMUSAGE_NOTIFY_MAIL = []
MEMUSAGE_REPORT = False
MEMUSAGE_WARNING_MB = 0
METAREFRESH_ENABLED = True
METAREFRESH_MAXDELAY = 100
NEWSPIDER_MODULE = ''
RANDOMIZE_DOWNLOAD_DELAY = True
REDIRECT_ENABLED = True
REDIRECT_MAX_TIMES = 20 # uses Firefox default setting
REDIRECT_PRIORITY_ADJUST = +2
REFERER_ENABLED = True
RETRY_ENABLED = True
RETRY_TIMES = 2 # initial response + 2 retries = 3 requests
RETRY_HTTP_CODES = [500, 502, 503, 504, 400, 408]
RETRY_PRIORITY_ADJUST = -1
ROBOTSTXT_OBEY = False
SCHEDULER = 'scrapy.core.scheduler.Scheduler'
SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue'
SPIDER_MANAGER_CLASS = 'scrapy.spidermanager.SpiderManager'
SPIDER_MIDDLEWARES = {}
SPIDER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900,
# Spider side
}
SPIDER_MODULES = []
STATS_CLASS = 'scrapy.statscol.MemoryStatsCollector'
STATS_DUMP = True
STATSMAILER_RCPTS = []
TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))
URLLENGTH_LIMIT = 2083
USER_AGENT = 'Scrapy/%s (+http://scrapy.org)' % import_module('scrapy').__version__
TELNETCONSOLE_ENABLED = 1
TELNETCONSOLE_PORT = [6023, 6073]
TELNETCONSOLE_HOST = '0.0.0.0'
WEBSERVICE_ENABLED = True
WEBSERVICE_LOGFILE = None
WEBSERVICE_PORT = [6080, 7030]
WEBSERVICE_HOST = '0.0.0.0'
WEBSERVICE_RESOURCES = {}
WEBSERVICE_RESOURCES_BASE = {
'scrapy.contrib.webservice.crawler.CrawlerResource': 1,
'scrapy.contrib.webservice.enginestatus.EngineStatusResource': 1,
'scrapy.contrib.webservice.stats.StatsResource': 1,
}
SPIDER_CONTRACTS = {}
SPIDER_CONTRACTS_BASE = {
'scrapy.contracts.default.UrlContract': 1,
'scrapy.contracts.default.ReturnsContract': 2,
'scrapy.contracts.default.ScrapesContract': 3,
}
|
{
"content_hash": "5a9adb536a62f7498a30aadb8a63579d",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 100,
"avg_line_length": 30.2984496124031,
"alnum_prop": 0.75399769732634,
"repo_name": "xtmhm2000/scrapy-0.22",
"id": "8acca09285439019b514529a3241492de2aa570b",
"size": "7817",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scrapy/settings/default_settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9681"
},
{
"name": "Makefile",
"bytes": "2225"
},
{
"name": "Python",
"bytes": "1183205"
},
{
"name": "Shell",
"bytes": "1723"
}
],
"symlink_target": ""
}
|
import string,random
import hashlib
import datetime
from faker import Faker
from web.util.db import db
from .feed import FeedSite,Feed
class UserInfo(db.EmbeddedDocument):
nickname = db.StringField()
class UserSetting(db.EmbeddedDocument):
unread_only = db.BooleanField(default=True)
theme = db.StringField(default="google")
class User(db.Document):
username = db.StringField()
password = db.StringField()
activate = db.BooleanField(default=False)
info = db.EmbeddedDocumentField("UserInfo")
setting = db.EmbeddedDocumentField("UserSetting")
type = "user"
meta = {
'allow_inheritance': True,
'index_types': False,
'indexes': [
]
}
@classmethod
def validate_user(cls, username=None, password=None):
return cls.objects(username=username,password=password).first()
@classmethod
def gen_user(cls):
return cls(info=UserInfo(), setting=UserSetting()).save()
@classmethod
def get_user_by_id(cls,id):
return cls.objects(id=id).first()
@classmethod
def get_user_by_nickname(cls,nickname):
return cls.objects(info__nickname=nickname).first()
def is_activate(self):
return self.activate
def activate_me(self):
self.activate = True
return self.save()
def get_rencent_unread_feeds(self):
from user_feed import ReadFeed
return ReadFeed.get_rencent_unread_feeds_by_user(user=self)
def get_unread_feeds_on_feedsite(self, feedsite=None, limit=15, page=1):
from user_feed import ReadFeed
return ReadFeed.get_rencent_unread_feeds_by_user_feedsite(user=self,
feedsite=feedsite,
limit=limit,
page=page)
def get_feedsite(self):
from user_feed import Sub
return Sub.get_feedsite_by_user(user=self)
#
def has_feedsite(self,feedsite):
from user_feed import Sub
return Sub.exist_sub(self,feedsite)
def read_feed(self,feed):
from user_feed import ReadFeed,Sub
rf = ReadFeed.get_readfeed_by_feed_and_user(feed=feed,
user=self)
if rf is None:
return
if rf.unread:
sub = Sub.get_sub_by_user_feedsite(user=self,
feedsite=feed.feedsite)
sub.unread_counter -=1
sub.save()
rf.unread = False
rf.safe_save()
def unread_feed(self,feed):
pass
def has_read(self,feed=None):
from user_feed import ReadFeed
rf = ReadFeed.get_readfeed_by_feed_and_user(feed=feed,user=self)
return not rf.unread
def has_feed(self, feed=None):
from user_feed import ReadFeed
rf = ReadFeed.get_readfeed_by_feed_and_user(feed=feed,user=self)
return rf is not None
def has_stared_feed(self, feed=None):
from .user_feed import StarFeed
return StarFeed.is_user_star_feed(user=self, feed=feed)
def star_feed(self, feed):
from .user_feed import StarFeed
StarFeed.user_star_feed(user=self, feed=feed)
def unstar_feed(self, feed):
from .user_feed import StarFeed
StarFeed.user_unstar_feed(user=self, feed=feed)
def get_star_feeds(self):
from .user_feed import StarFeed
return StarFeed.get_feed_by_user(user=self)
def sub_feedsite(self, feedsite=None):
from user_feed import Sub
from feed import FeedSite
if self.has_feedsite(feedsite):
return None
Sub.add_sub(self,feedsite)
return feedsite
def unsub_feedsite(self, feedsite=None):
from user_feed import Sub, ReadFeed
from feed import FeedSite
if self.has_feedsite(feedsite):
Sub.objects(user=self,feedsite=feedsite).delete()
# ReadFeed.objects(user=self, feedsite=feedsite).delete()
return True
def add_feedsite(self,feed_url=None):
from user_feed import Sub
from feed import FeedSite
fs = FeedSite.get_from_feed_url(feed_url)
if self.has_feedsite(fs):
return None
fs = FeedSite.add_from_feed_url(feed_url)
Sub.add_sub(self,fs)
return fs
def get_unread_counter(self):
from user_feed import Sub
return Sub.get_unread_counter_by_user(user=self)
def get_unread_counter_on_feedsite(self,feedsite):
from user_feed import Sub
counter = Sub.get_unread_counter_by_user_feedsite(user=self,
feedsite=feedsite)
return counter
def to_dict(self):
return {"id":str(self.id),
"nickname":self.info.nickname,
"type":self.type
}
@property
def nickname(self):
return self.info.nickname
class BasicUser(User):
type = "basic"
@classmethod
def register(cls, username=None,
nickname=None, password=None):
from flask import g
g.user.username = username
g.user.info.nickname = nickname
g.user.password = password
g.user.activate = True
return g.user.save()
def upgrade(self):
pass
def subscribe(self,site):
pass
@classmethod
def get_guest(cls):
return cls.objects(username="guest").first()
class AdvancedUser(User):
pass
|
{
"content_hash": "ceb55f4e5ce211cce5985bc5e9b89772",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 84,
"avg_line_length": 28.26865671641791,
"alnum_prop": 0.5850052798310454,
"repo_name": "zhy0216/Conuread",
"id": "dbeaa1ab539cc74a6d8dd7c243833c21d70af4c3",
"size": "5707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/model/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5158"
},
{
"name": "HTML",
"bytes": "13165"
},
{
"name": "JavaScript",
"bytes": "3019"
},
{
"name": "Python",
"bytes": "29040"
}
],
"symlink_target": ""
}
|
"""
"""
import argparse
import subprocess
import tempfile
import logging
import os
from bingo import bingo
from bingo import card_maker
def get_words_from_textfile(textfile_handler):
"""Yield the words by parsing a given text fileahandler."""
for line in textfile_handler:
line = line.rstrip()
yield line
def make_bingo_card_deck(words, size, count):
bingo_generator = bingo.BingoGenerator(size=size)
bingo_generator.words = words
bingo_card_maker = card_maker.LaTeXCardMaker(size)
bingo_generator.maker = bingo_card_maker
tempdir = tempfile.mkdtemp()
pages = []
for index, card in enumerate(bingo_generator.make_lots_of_cards(count)):
filename = os.path.join(tempdir, "%s.tex" % index)
with open(filename, 'w') as f:
f.write(card)
cmd = "pdflatex --output-directory=%s %s" % (tempdir, filename)
logging.info("Generating PDF: %s" % cmd)
subprocess.call(cmd, shell=True)
# subprocess.Popen(cmd, shell=True,
# stdout=subprocess.PIPE,
# cwd=tempdir).communicate()
pages.append(os.path.join(tempdir, "%s.pdf" % index))
_merge_PDFs(pages)
def _merge_PDFs(pages):
"""Merge the given PDFs into one."""
print pages
output_filename = "bingo_deck.pdf"
output_filepath = output_filename
has_pyPdf = False
try:
import pyPdf
has_pyPdf = True
except ImportError:
pass
if has_pyPdf:
logging.info("Using 'pyPdf' to join PDFs")
output = pyPdf.PdfFileWriter()
inputfiles = []
for page in pages:
inputstream = file(page, "rb")
inputfiles.append(inputstream)
reader = pyPdf.PdfFileReader(inputstream)
output.addPage(reader.getPage(0))
outputStream = file(output_filepath, "wb")
output.write(outputStream)
outputStream.close()
for f in inputfiles:
f.close()
else:
logging.warning("PyPDF not installed, cannot merge PDF slides")
def main():
parser = argparse.ArgumentParser(description="Bingo generator")
parser.add_argument("-s", "--size",
type=int,
dest="size",
metavar="SIZE",
required=False,
default=5,
help="The size of the bingo grid")
parser.add_argument("-l", "--list", metavar="LIST",
dest="word_list",
type=argparse.FileType('r'),
help='A list of words')
parser.add_argument("-c", "--count", metavar="COUNT",
dest="count",
required=False,
default=1,
type=int,
help='Number of grids to generate')
parser.add_argument("words",
nargs='*',
metavar="WORDS",
help='A list of words')
args = parser.parse_args()
size = args.size
log_level = logging.INFO
logging.basicConfig(level=log_level)
if args.word_list:
words = list(get_words_from_textfile(args.word_list))
make_bingo_card_deck(words, size, args.count)
elif args.words:
make_bingo_card_deck(args.words, size, args.count)
else:
parser.print_help()
if __name__ == "__main__":
main()
|
{
"content_hash": "d2e8051a5960d496426f9a1a50ab7862",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 76,
"avg_line_length": 32.00917431192661,
"alnum_prop": 0.5543135568930926,
"repo_name": "Commonists/bingo",
"id": "c5ffc1f80ebfc1a37b791fe3313e81bf65123b4e",
"size": "3489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/make_bingo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8530"
},
{
"name": "TeX",
"bytes": "1352"
}
],
"symlink_target": ""
}
|
"""
Summarize the deny log messages from ipfw.
Use the given file names, or /var/log/security if no filenames are given.
This program can handle compressed files like /var/log/security.?.bz2.
"""
import argparse
import bz2
import logging
import re
import sys
__version__ = "2020.03.31"
def main():
"""
Entry point for denylog.py.
"""
args = setup()
if not args.files:
args.files = ["/var/log/security"]
reps = " IP: {:16s} port: {:10s} rule: {}"
serv = services()
for f in args.files:
print("File:", f)
try:
matches = parselogfile(f)
except FileNotFoundError as e:
print(e)
continue
if not matches:
print(" Nothing to report.")
continue
for rule, IP, port in matches:
print(reps.format(IP + ",", serv[int(port)] + ",", rule))
def setup():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-v", "--version", action="version", version=__version__)
parser.add_argument(
"--log",
default="warning",
choices=["debug", "info", "warning", "error"],
help="logging level (defaults to 'warning')",
)
parser.add_argument(
"files", metavar="file", nargs="*", help="one or more files to process"
)
args = parser.parse_args(sys.argv[1:])
logging.basicConfig(
level=getattr(logging, args.log.upper(), None),
format="%(levelname)s: %(message)s",
)
return args
def services(filename="/etc/services"):
"""
Generate a dictionary of the available services from /etc/services.
Arguments:
filename: Name of the services file.
Returns:
A dict in the form of {25: 'smtp', 80: 'http', ...}
"""
with open(filename) as serv:
data = serv.read()
matches = re.findall("\n" + r"(\S+)\s+(\d+)/", data)
return {int(num): name for name, num in set(matches)}
def parselogfile(filename):
"""
Extract deny rules for incoming packets from file and parse them.
Arguments:
filename: Name of the file to read.
Returns:
A tuple of (rule, source IP, port) tuples
"""
if filename.endswith(".bz2"):
df = bz2.open(filename, "rt")
else:
df = open(filename)
data = df.read()
df.close()
patt = (
r"ipfw: (\d+) Deny (?:\S+) " r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d+).*in"
)
return tuple(set(re.findall(patt, data)))
if __name__ == "__main__":
main()
|
{
"content_hash": "534e55d28b1bdc52bd112d839f85a5f4",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 86,
"avg_line_length": 25.84,
"alnum_prop": 0.5661764705882353,
"repo_name": "rsmith-nl/scripts",
"id": "3ddf3106343394e54bb665ed1bcaf645b7d9c6e4",
"size": "2839",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "denylog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "244258"
},
{
"name": "Shell",
"bytes": "22630"
}
],
"symlink_target": ""
}
|
import sys
# sys.path.append('<path to macfp-db>/macfp-db/Utilities/')
sys.path.append('../../../../../../macfp-db/Utilities/')
import macfp
import importlib
importlib.reload(macfp) # use for development (while making changes to macfp.py)
import matplotlib.pyplot as plt
macfp.dataplot(config_filename='UConn_FM_Burner_dataplot_config.csv',
institute='UConn',
expdir='../../../Experimental_Data/',
pltdir='./Plots/',
close_figs=True,
verbose=True,
plot_range=range(1000))
# plt.show()
|
{
"content_hash": "d05c20f800d5593fc484192af7f35e42",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 80,
"avg_line_length": 32.05555555555556,
"alnum_prop": 0.6065857885615251,
"repo_name": "rmcdermo/macfp-db",
"id": "56ba9350a6638cc9ddcb2d5c7c9694f45e83d70f",
"size": "821",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Extinction/FM_Burner/Computational_Results/2021/UConn/UConn_FM_Burner_plot_results.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "85703"
},
{
"name": "MATLAB",
"bytes": "68987"
},
{
"name": "Python",
"bytes": "88892"
},
{
"name": "TeX",
"bytes": "3022"
}
],
"symlink_target": ""
}
|
from revscoring.dependencies import DependentSet
from ...feature import Feature
from ...meta import aggregators, bools
from .diff import Diff
class Revision(DependentSet):
def __init__(self, name, revision_datasources):
super().__init__(name)
self.datasources = revision_datasources
self.sitelinks = aggregators.len(self.datasources.sitelinks)
"`int` : A count of sitelinks in the revision"
self.labels = aggregators.len(self.datasources.labels)
"`int` : A count of labels in the revision"
self.aliases = aggregators.len(self.datasources.aliases)
"`int` : A count of aliases in the revision"
self.descriptions = aggregators.len(self.datasources.descriptions)
"`int` : A count of descriptions in the revision"
self.properties = aggregators.len(self.datasources.properties)
"`int` : A count of properties in the revision"
self.claims = aggregators.len(self.datasources.claims)
"`int` : A count of claims in the revision"
self.sources = aggregators.len(self.datasources.sources)
"`int` : A count of sources in the revision"
self.reference_claims = aggregators.len(
self.datasources.reference_claims)
"`int` : A count of reference claims in the revision"
self.qualifiers = aggregators.len(self.datasources.qualifiers)
"`int` : A count of qualifiers in the revision"
self.badges = aggregators.len(self.datasources.badges)
"`int` : A count of badges in the revision"
if hasattr(self.datasources, "parent"):
self.parent = Revision(name + ".parent", self.datasources.parent)
"""
:class:`revscoring.features.wikibase.Revision` : The
parent (aka "previous") revision of the page.
"""
if hasattr(self.datasources, "diff"):
self.diff = Diff(name + ".diff", self.datasources.diff)
"""
:class:`~revscoring.features.wikibase.Diff` : The
difference between this revision and the parent revision.
"""
def has_property(self, property, name=None):
"""
Returns True if the specified property exists
:Parameters:
property : `str`
The name of a property (usually preceeded by "P")
name : `str`
A name to associate with the feature. If not set, the
feature's name will be 'has_property(<property>)'
"""
if name is None:
name = self._name + ".has_property({0})".format(repr(property))
return bools.item_in_set(property, self.datasources.properties,
name=name)
def has_property_value(self, property, value, name=None):
"""
Returns True if the specified property matches the provided value.
:Parameters:
property : `str`
The name of a property (usually preceeded by "P")
value : `mixed`
The value to match
name : `str`
A name to associate with the Feature. If not set, the
feature's name will be
'has_property_value(<property>, <value>)'
"""
if name is None:
name = self._name + ".has_property_value({0}, {1})" \
.format(repr(property), repr(value))
return HasPropertyValue(name, property, value, self.datasources.entity)
class HasPropertyValue(Feature):
def __init__(self, name, property, value, item_datasource):
self.property = property
self.value = value
super().__init__(name, self._process, returns=bool,
depends_on=[item_datasource])
def _process(self, item):
statements = item.properties.get(self.property, [])
return self.value in (str(s.claim.datavalue) for s in statements)
|
{
"content_hash": "a4aac2178fb9e2cab4ca964da47334e3",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 40.275510204081634,
"alnum_prop": 0.601216113503927,
"repo_name": "he7d3r/revscoring",
"id": "918e5be23ca2b9a15573e427536d5f260020c461",
"size": "3947",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "revscoring/features/wikibase/features/revision_oriented.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "19605"
},
{
"name": "Python",
"bytes": "341481"
}
],
"symlink_target": ""
}
|
"""AWS platform for notify component."""
import asyncio
import base64
import json
import logging
from homeassistant.components.notify import (
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
BaseNotificationService,
)
from homeassistant.const import CONF_PLATFORM, CONF_NAME
from homeassistant.helpers.json import JSONEncoder
from .const import (
CONF_CONTEXT,
CONF_CREDENTIAL_NAME,
CONF_PROFILE_NAME,
CONF_REGION,
CONF_SERVICE,
DATA_SESSIONS,
)
_LOGGER = logging.getLogger(__name__)
async def get_available_regions(hass, service):
"""Get available regions for a service."""
import aiobotocore
session = aiobotocore.get_session()
# get_available_regions is not a coroutine since it does not perform
# network I/O. But it still perform file I/O heavily, so put it into
# an executor thread to unblock event loop
return await hass.async_add_executor_job(session.get_available_regions, service)
async def async_get_service(hass, config, discovery_info=None):
"""Get the AWS notification service."""
if discovery_info is None:
_LOGGER.error("Please config aws notify platform in aws component")
return None
import aiobotocore
session = None
conf = discovery_info
service = conf[CONF_SERVICE]
region_name = conf[CONF_REGION]
available_regions = await get_available_regions(hass, service)
if region_name not in available_regions:
_LOGGER.error(
"Region %s is not available for %s service, must in %s",
region_name,
service,
available_regions,
)
return None
aws_config = conf.copy()
del aws_config[CONF_SERVICE]
del aws_config[CONF_REGION]
if CONF_PLATFORM in aws_config:
del aws_config[CONF_PLATFORM]
if CONF_NAME in aws_config:
del aws_config[CONF_NAME]
if CONF_CONTEXT in aws_config:
del aws_config[CONF_CONTEXT]
if not aws_config:
# no platform config, use the first aws component credential instead
if hass.data[DATA_SESSIONS]:
session = next(iter(hass.data[DATA_SESSIONS].values()))
else:
_LOGGER.error("Missing aws credential for %s", config[CONF_NAME])
return None
if session is None:
credential_name = aws_config.get(CONF_CREDENTIAL_NAME)
if credential_name is not None:
session = hass.data[DATA_SESSIONS].get(credential_name)
if session is None:
_LOGGER.warning("No available aws session for %s", credential_name)
del aws_config[CONF_CREDENTIAL_NAME]
if session is None:
profile = aws_config.get(CONF_PROFILE_NAME)
if profile is not None:
session = aiobotocore.AioSession(profile=profile)
del aws_config[CONF_PROFILE_NAME]
else:
session = aiobotocore.AioSession()
aws_config[CONF_REGION] = region_name
if service == "lambda":
context_str = json.dumps(
{"custom": conf.get(CONF_CONTEXT, {})}, cls=JSONEncoder
)
context_b64 = base64.b64encode(context_str.encode("utf-8"))
context = context_b64.decode("utf-8")
return AWSLambda(session, aws_config, context)
if service == "sns":
return AWSSNS(session, aws_config)
if service == "sqs":
return AWSSQS(session, aws_config)
# should not reach here since service was checked in schema
return None
class AWSNotify(BaseNotificationService):
"""Implement the notification service for the AWS service."""
def __init__(self, session, aws_config):
"""Initialize the service."""
self.session = session
self.aws_config = aws_config
class AWSLambda(AWSNotify):
"""Implement the notification service for the AWS Lambda service."""
service = "lambda"
def __init__(self, session, aws_config, context):
"""Initialize the service."""
super().__init__(session, aws_config)
self.context = context
async def async_send_message(self, message="", **kwargs):
"""Send notification to specified LAMBDA ARN."""
if not kwargs.get(ATTR_TARGET):
_LOGGER.error("At least one target is required")
return
cleaned_kwargs = {k: v for k, v in kwargs.items() if v is not None}
payload = {"message": message}
payload.update(cleaned_kwargs)
json_payload = json.dumps(payload)
async with self.session.create_client(
self.service, **self.aws_config
) as client:
tasks = []
for target in kwargs.get(ATTR_TARGET, []):
tasks.append(
client.invoke(
FunctionName=target,
Payload=json_payload,
ClientContext=self.context,
)
)
if tasks:
await asyncio.gather(*tasks)
class AWSSNS(AWSNotify):
"""Implement the notification service for the AWS SNS service."""
service = "sns"
async def async_send_message(self, message="", **kwargs):
"""Send notification to specified SNS ARN."""
if not kwargs.get(ATTR_TARGET):
_LOGGER.error("At least one target is required")
return
message_attributes = {
k: {"StringValue": json.dumps(v), "DataType": "String"}
for k, v in kwargs.items()
if v is not None
}
subject = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
async with self.session.create_client(
self.service, **self.aws_config
) as client:
tasks = []
for target in kwargs.get(ATTR_TARGET, []):
tasks.append(
client.publish(
TargetArn=target,
Message=message,
Subject=subject,
MessageAttributes=message_attributes,
)
)
if tasks:
await asyncio.gather(*tasks)
class AWSSQS(AWSNotify):
"""Implement the notification service for the AWS SQS service."""
service = "sqs"
async def async_send_message(self, message="", **kwargs):
"""Send notification to specified SQS ARN."""
if not kwargs.get(ATTR_TARGET):
_LOGGER.error("At least one target is required")
return
cleaned_kwargs = {k: v for k, v in kwargs.items() if v is not None}
message_body = {"message": message}
message_body.update(cleaned_kwargs)
json_body = json.dumps(message_body)
message_attributes = {}
for key, val in cleaned_kwargs.items():
message_attributes[key] = {
"StringValue": json.dumps(val),
"DataType": "String",
}
async with self.session.create_client(
self.service, **self.aws_config
) as client:
tasks = []
for target in kwargs.get(ATTR_TARGET, []):
tasks.append(
client.send_message(
QueueUrl=target,
MessageBody=json_body,
MessageAttributes=message_attributes,
)
)
if tasks:
await asyncio.gather(*tasks)
|
{
"content_hash": "6723e494a7b642b4e912477eb2cb9543",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 84,
"avg_line_length": 31.421940928270043,
"alnum_prop": 0.5847992480193367,
"repo_name": "fbradyirl/home-assistant",
"id": "fa1cf3fa36334567a47b897433b6c7ac6a69b686",
"size": "7447",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/aws/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask import escape
def build_flask_app():
flask_app = Flask(__name__, static_folder="../../dashboardapp/dist", static_url_path="/app")
# The following settings will likely be overriden.
flask_app.config["DEBUG"] = False
flask_app.config["SECRET_KEY"] = "SECRET"
flask_app.config.from_pyfile("../config.py")
# Mostly for debugging purposes, this snippet will print the site-map so that we can check
# which methods we are routing.
@flask_app.route("/site-map")
def site_map():
lines = []
for rule in flask_app.url_map.iter_rules():
line = str(escape(repr(rule)))
lines.append(line)
ret = "<br>".join(lines)
return ret
return flask_app
|
{
"content_hash": "0b5d31b6a2e9f4393c4b00d947c1086c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 96,
"avg_line_length": 29.46153846153846,
"alnum_prop": 0.6266318537859008,
"repo_name": "zstars/weblabdeusto",
"id": "6870bafbc3150513221a6ef1c4c9c9ea1889e371",
"size": "766",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/dashboard/dashboardserver/dashboardserver/flask_app_builder.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "ActionScript",
"bytes": "8508"
},
{
"name": "ApacheConf",
"bytes": "122186"
},
{
"name": "Batchfile",
"bytes": "7753"
},
{
"name": "C",
"bytes": "19456"
},
{
"name": "C#",
"bytes": "315160"
},
{
"name": "C++",
"bytes": "9547"
},
{
"name": "CSS",
"bytes": "150709"
},
{
"name": "CoffeeScript",
"bytes": "30909"
},
{
"name": "Go",
"bytes": "7076"
},
{
"name": "HTML",
"bytes": "452001"
},
{
"name": "Java",
"bytes": "1234794"
},
{
"name": "JavaScript",
"bytes": "1656027"
},
{
"name": "Makefile",
"bytes": "1571"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "PHP",
"bytes": "155137"
},
{
"name": "Python",
"bytes": "3435335"
},
{
"name": "Shell",
"bytes": "2596"
},
{
"name": "Smarty",
"bytes": "20160"
},
{
"name": "VHDL",
"bytes": "5874"
}
],
"symlink_target": ""
}
|
import __builtin__
import __future__
import functools
import imp
import inspect
import json
from pathlib import Path
import optparse
import os
import os.path
import subprocess
import sys
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a object with information about
# the environment of the build file which is currently being processed.
# It contains the following attributes:
#
# "dirname" - The directory containing the build file.
#
# "base_path" - The base path of the build file.
BUILD_FUNCTIONS = []
class SyncCookieState(object):
"""
Process-wide state used to enable Watchman sync cookies only on
the first query issued.
"""
def __init__(self):
self.use_sync_cookies = True
class BuildContextType(object):
"""
Identifies the type of input file to the processor.
"""
BUILD_FILE = 'build_file'
INCLUDE = 'include'
class BuildFileContext(object):
"""
The build context used when processing a build file.
"""
type = BuildContextType.BUILD_FILE
def __init__(self, base_path, dirname, allow_empty_globs, watchman_client,
watchman_watch_root, watchman_project_prefix, sync_cookie_state,
watchman_error):
self.globals = {}
self.includes = set()
self.base_path = base_path
self.dirname = dirname
self.allow_empty_globs = allow_empty_globs
self.watchman_client = watchman_client
self.watchman_watch_root = watchman_watch_root
self.watchman_project_prefix = watchman_project_prefix
self.sync_cookie_state = sync_cookie_state
self.watchman_error = watchman_error
self.rules = {}
class IncludeContext(object):
"""
The build context used when processing an include.
"""
type = BuildContextType.INCLUDE
def __init__(self):
self.globals = {}
self.includes = set()
class LazyBuildEnvPartial(object):
"""Pairs a function with a build environment in which it will be executed.
Note that while the function is specified via the constructor, the build
environment must be assigned after construction, for the build environment
currently being used.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the
underlying function.
"""
def __init__(self, func):
self.func = func
self.build_env = None
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({'build_env': self.build_env})
return self.func(*args, **updated_kwargs)
def provide_for_build(func):
BUILD_FUNCTIONS.append(func)
return func
def add_rule(rule, build_env):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `{}()` at the top-level of an included file."
.format(rule['buck.type']))
# Include the base path of the BUILD file so the reader consuming this
# JSON will know which BUILD file the rule came from.
if 'name' not in rule:
raise ValueError(
'rules must contain the field \'name\'. Found %s.' % rule)
rule_name = rule['name']
if rule_name in build_env.rules:
raise ValueError('Duplicate rule definition found. Found %s and %s' %
(rule, build_env.rules[rule_name]))
rule['buck.base_path'] = build_env.base_path
build_env.rules[rule_name] = rule
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
args_key = repr(args)
if args_key in self.cache:
return self.cache[args_key]
else:
value = self.func(*args)
self.cache[args_key] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
@provide_for_build
def glob(includes, excludes=[], include_dotfiles=False, build_env=None):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `glob()` at the top-level of an included file.")
# Ensure the user passes lists of strings rather than just a string.
assert not isinstance(includes, basestring), \
"The first argument to glob() must be a list of strings."
assert not isinstance(excludes, basestring), \
"The excludes argument must be a list of strings."
results = None
if not includes:
results = []
elif build_env.watchman_client:
try:
results = glob_watchman(
includes,
excludes,
include_dotfiles,
build_env.base_path,
build_env.watchman_watch_root,
build_env.watchman_project_prefix,
build_env.sync_cookie_state,
build_env.watchman_client)
except build_env.watchman_error, e:
print >>sys.stderr, 'Watchman error, falling back to slow glob: ' + str(e)
try:
build_env.watchman_client.close()
except:
pass
build_env.watchman_client = None
if results is None:
search_base = Path(build_env.dirname)
results = glob_internal(
includes,
excludes,
include_dotfiles,
search_base)
assert build_env.allow_empty_globs or results, (
"glob(includes={includes}, excludes={excludes}, include_dotfiles={include_dotfiles}) " +
"returned no results. (allow_empty_globs is set to false in the Buck " +
"configuration)").format(
includes=includes,
excludes=excludes,
include_dotfiles=include_dotfiles)
return results
def format_watchman_query_params(includes, excludes, include_dotfiles, relative_root):
match_exprs = ["allof", "exists", ["anyof", ["type", "f"], ["type", "l"]]]
match_flags = {}
if include_dotfiles:
match_flags["includedotfiles"] = True
if includes:
match_exprs.append(
["anyof"] + [["match", i, "wholename", match_flags] for i in includes])
if excludes:
match_exprs.append(
["not",
["anyof"] + [["match", x, "wholename", match_flags] for x in excludes]])
return {
"relative_root": relative_root,
# Explicitly pass an empty path so Watchman queries only the tree of files
# starting at base_path.
"path": [''],
"fields": ["name"],
"expression": match_exprs,
}
@memoized
def glob_watchman(includes, excludes, include_dotfiles, base_path, watchman_watch_root,
watchman_project_prefix, sync_cookie_state, watchman_client):
assert includes, "The includes argument must be a non-empty list of strings."
if watchman_project_prefix:
relative_root = os.path.join(watchman_project_prefix, base_path)
else:
relative_root = base_path
query_params = format_watchman_query_params(
includes, excludes, include_dotfiles, relative_root)
# Sync cookies cause a massive overhead when issuing thousands of
# glob queries. Only enable them (by not setting sync_timeout to 0)
# for the very first request issued by this process.
if sync_cookie_state.use_sync_cookies:
sync_cookie_state.use_sync_cookies = False
else:
query_params["sync_timeout"] = 0
query = ["query", watchman_watch_root, query_params]
res = watchman_client.query(*query)
if res.get('warning'):
print >> sys.stderr, 'Watchman warning from query {}: {}'.format(
query,
res.get('warning'))
result = res.get('files', [])
return sorted(result)
def glob_internal(includes, excludes, include_dotfiles, search_base):
def includes_iterator():
for pattern in includes:
for path in search_base.glob(pattern):
# TODO(user): Handle hidden files on Windows.
if path.is_file() and (include_dotfiles or not path.name.startswith('.')):
yield path.relative_to(search_base)
def is_special(pat):
return "*" in pat or "?" in pat or "[" in pat
non_special_excludes = set()
match_excludes = set()
for pattern in excludes:
if is_special(pattern):
match_excludes.add(pattern)
else:
non_special_excludes.add(pattern)
def exclusion(path):
if path.as_posix() in non_special_excludes:
return True
for pattern in match_excludes:
result = path.match(pattern, match_entire=True)
if result:
return True
return False
return sorted(set([str(p) for p in includes_iterator() if not exclusion(p)]))
@provide_for_build
def get_base_path(build_env=None):
"""Get the base path to the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
Returns: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
"""
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `get_base_path()` at the top-level of an included file.")
return build_env.base_path
@provide_for_build
def add_deps(name, deps=[], build_env=None):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `add_deps()` at the top-level of an included file.")
if name not in build_env.rules:
raise ValueError(
'Invoked \'add_deps\' on non-existent rule %s.' % name)
rule = build_env.rules[name]
if 'deps' not in rule:
raise ValueError(
'Invoked \'add_deps\' on rule %s that has no \'deps\' field'
% name)
rule['deps'] = rule['deps'] + deps
class BuildFileProcessor(object):
def __init__(self, project_root, watchman_watch_root, watchman_project_prefix, build_file_name,
allow_empty_globs, watchman_client, watchman_error, implicit_includes=[],
extra_funcs=[]):
self._cache = {}
self._build_env_stack = []
self._sync_cookie_state = SyncCookieState()
self._project_root = project_root
self._watchman_watch_root = watchman_watch_root
self._watchman_project_prefix = watchman_project_prefix
self._build_file_name = build_file_name
self._implicit_includes = implicit_includes
self._allow_empty_globs = allow_empty_globs
self._watchman_client = watchman_client
self._watchman_error = watchman_error
lazy_functions = {}
for func in BUILD_FUNCTIONS + extra_funcs:
func_with_env = LazyBuildEnvPartial(func)
lazy_functions[func.__name__] = func_with_env
self._functions = lazy_functions
def _merge_globals(self, mod, dst):
"""
Copy the global definitions from one globals dict to another.
Ignores special attributes and attributes starting with '_', which
typically denote module-level private attributes.
"""
hidden = set([
'include_defs',
])
keys = getattr(mod, '__all__', mod.__dict__.keys())
for key in keys:
if not key.startswith('_') and key not in hidden:
dst[key] = mod.__dict__[key]
def _update_functions(self, build_env):
"""
Updates the build functions to use the given build context when called.
"""
for function in self._functions.itervalues():
function.build_env = build_env
def install_builtins(self, namespace):
"""
Installs the build functions, by their name, into the given namespace.
"""
for name, function in self._functions.iteritems():
namespace[name] = function.invoke
def _get_include_path(self, name):
"""
Resolve the given include def name to a full path.
"""
# Find the path from the include def name.
if not name.startswith('//'):
raise ValueError(
'include_defs argument "%s" must begin with //' % name)
relative_path = name[2:]
return os.path.join(self._project_root, relative_path)
def _include_defs(self, name, implicit_includes=[]):
"""
Pull the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._build_env_stack[-1]
# Resolve the named include to its path and process it to get its
# build context and module.
path = self._get_include_path(name)
inner_env, mod = self._process_include(
path,
implicit_includes=implicit_includes)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = inspect.currentframe()
while frame.f_globals['__name__'] == __name__:
frame = frame.f_back
self._merge_globals(mod, frame.f_globals)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(path)
build_env.includes.update(inner_env.includes)
def _push_build_env(self, build_env):
"""
Set the given build context as the current context.
"""
self._build_env_stack.append(build_env)
self._update_functions(build_env)
def _pop_build_env(self):
"""
Restore the previous build context as the current context.
"""
self._build_env_stack.pop()
if self._build_env_stack:
self._update_functions(self._build_env_stack[-1])
def _process(self, build_env, path, implicit_includes=[]):
"""
Process a build file or include at the given path.
"""
# First check the cache.
cached = self._cache.get(path)
if cached is not None:
return cached
# Install the build context for this input as the current context.
self._push_build_env(build_env)
# The globals dict that this file will be executed under.
default_globals = {}
# Install the 'include_defs' function into our global object.
default_globals['include_defs'] = functools.partial(
self._include_defs,
implicit_includes=implicit_includes)
# If any implicit includes were specified, process them first.
for include in implicit_includes:
include_path = self._get_include_path(include)
inner_env, mod = self._process_include(include_path)
self._merge_globals(mod, default_globals)
build_env.includes.add(include_path)
build_env.includes.update(inner_env.includes)
# Build a new module for the given file, using the default globals
# created above.
module = imp.new_module(path)
module.__file__ = path
module.__dict__.update(default_globals)
with open(path) as f:
contents = f.read()
# Enable absolute imports. This prevents the compiler from trying to
# do a relative import first, and warning that this module doesn't
# exist in sys.modules.
future_features = __future__.absolute_import.compiler_flag
code = compile(contents, path, 'exec', future_features, 1)
exec(code, module.__dict__)
# Restore the previous build context.
self._pop_build_env()
self._cache[path] = build_env, module
return build_env, module
def _process_include(self, path, implicit_includes=[]):
"""
Process the include file at the given path.
"""
build_env = IncludeContext()
return self._process(
build_env,
path,
implicit_includes=implicit_includes)
def _process_build_file(self, path, implicit_includes=[]):
"""
Process the build file at the given path.
"""
# Create the build file context, including the base path and directory
# name of the given path.
relative_path_to_build_file = os.path.relpath(
path, self._project_root).replace('\\', '/')
len_suffix = -len('/' + self._build_file_name)
base_path = relative_path_to_build_file[:len_suffix]
dirname = os.path.dirname(path)
build_env = BuildFileContext(
base_path,
dirname,
self._allow_empty_globs,
self._watchman_client,
self._watchman_watch_root,
self._watchman_project_prefix,
self._sync_cookie_state,
self._watchman_error)
return self._process(
build_env,
path,
implicit_includes=implicit_includes)
def process(self, path):
"""
Process a build file returning a dict of it's rules and includes.
"""
build_env, mod = self._process_build_file(
os.path.join(self._project_root, path),
implicit_includes=self._implicit_includes)
values = build_env.rules.values()
values.append({"__includes": [path] + sorted(build_env.includes)})
return values
def cygwin_adjusted_path(path):
if sys.platform == 'cygwin':
return subprocess.check_output(['cygpath', path]).rstrip()
else:
return path
# Inexplicably, this script appears to run faster when the arguments passed
# into it are absolute paths. However, we want the "buck.base_path" property
# of each rule to be printed out to be the base path of the build target that
# identifies the rule. That means that when parsing a BUILD file, we must know
# its path relative to the root of the project to produce the base path.
#
# To that end, the first argument to this script must be an absolute path to
# the project root. It must be followed by one or more absolute paths to
# BUILD files under the project root. If no paths to BUILD files are
# specified, then it will traverse the project root for BUILD files, excluding
# directories of generated files produced by Buck.
#
# All of the build rules that are parsed from the BUILD files will be printed
# to stdout by a JSON parser. That means that printing out other information
# for debugging purposes will likely break the JSON parsing, so be careful!
def main():
# Our parent expects to read JSON from our stdout, so if anyone
# uses print, buck will complain with a helpful "but I wanted an
# array!" message and quit. Redirect stdout to stderr so that
# doesn't happen. Actually dup2 the file handle so that writing
# to file descriptor 1, os.system, and so on work as expected too.
to_parent = os.fdopen(os.dup(sys.stdout.fileno()), 'a')
os.dup2(sys.stderr.fileno(), sys.stdout.fileno())
parser = optparse.OptionParser()
parser.add_option(
'--project_root',
action='store',
type='string',
dest='project_root')
parser.add_option(
'--build_file_name',
action='store',
type='string',
dest="build_file_name")
parser.add_option(
'--allow_empty_globs',
action='store_true',
dest='allow_empty_globs',
help='Tells the parser not to raise an error when glob returns no results.')
parser.add_option(
'--use_watchman_glob',
action='store_true',
dest='use_watchman_glob',
help='Invokes `watchman query` to get lists of files instead of globbing in-process.')
parser.add_option(
'--watchman_watch_root',
action='store',
type='string',
dest='watchman_watch_root',
help='Path to root of watchman watch as returned by `watchman watch-project`.')
parser.add_option(
'--watchman_project_prefix',
action='store',
type='string',
dest='watchman_project_prefix',
help='Relative project prefix as returned by `watchman watch-project`.')
parser.add_option(
'--watchman_query_timeout_ms',
action='store',
type='int',
dest='watchman_query_timeout_ms',
help='Maximum time in milliseconds to wait for watchman query to respond.')
parser.add_option(
'--include',
action='append',
dest='include')
(options, args) = parser.parse_args()
# Even though project_root is absolute path, it may not be concise. For
# example, it might be like "C:\project\.\rule".
#
# Under cygwin, the project root will be invoked from buck as C:\path, but
# the cygwin python uses UNIX-style paths. They can be converted using
# cygpath, which is necessary because abspath will treat C:\path as a
# relative path.
options.project_root = cygwin_adjusted_path(options.project_root)
project_root = os.path.abspath(options.project_root)
watchman_client = None
watchman_error = None
output_format = 'JSON'
output_encode = lambda val: json.dumps(val, sort_keys=True)
if options.use_watchman_glob:
try:
# pywatchman may not be built, so fall back to non-watchman
# in that case.
import pywatchman
client_args = {}
if options.watchman_query_timeout_ms is not None:
# pywatchman expects a timeout as a nonnegative floating-point
# value in seconds.
client_args['timeout'] = max(0.0, options.watchman_query_timeout_ms / 1000.0)
watchman_client = pywatchman.client(**client_args)
watchman_error = pywatchman.WatchmanError
output_format = 'BSER'
output_encode = lambda val: pywatchman.bser.dumps(val)
except ImportError, e:
# TODO(agallagher): Restore this when the PEX builds pywatchman.
# print >> sys.stderr, \
# 'Could not import pywatchman (sys.path {}): {}'.format(
# sys.path,
# repr(e))
pass
buildFileProcessor = BuildFileProcessor(
project_root,
options.watchman_watch_root,
options.watchman_project_prefix,
options.build_file_name,
options.allow_empty_globs,
watchman_client,
watchman_error,
implicit_includes=options.include or [])
buildFileProcessor.install_builtins(__builtin__.__dict__)
to_parent.write(output_format + '\n')
to_parent.flush()
for build_file in args:
build_file = cygwin_adjusted_path(build_file)
values = buildFileProcessor.process(build_file)
to_parent.write(output_encode(values))
to_parent.flush()
# "for ... in sys.stdin" in Python 2.x hangs until stdin is closed.
for build_file in iter(sys.stdin.readline, ''):
build_file = cygwin_adjusted_path(build_file)
values = buildFileProcessor.process(build_file.rstrip())
to_parent.write(output_encode(values))
to_parent.flush()
# Python tries to flush/close stdout when it quits, and if there's a dead
# pipe on the other end, it will spit some warnings to stderr. This breaks
# tests sometimes. Prevent that by explicitly catching the error.
try:
to_parent.close()
except IOError:
pass
|
{
"content_hash": "5110b656651c9b9b8f5faf057d774a8e",
"timestamp": "",
"source": "github",
"line_count": 694,
"max_line_length": 99,
"avg_line_length": 35.22766570605187,
"alnum_prop": 0.6198053010471204,
"repo_name": "luiseduardohdbackup/buck",
"id": "e5cc66bbe43fde44705376eb7fc479482bb239e0",
"size": "24626",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/com/facebook/buck/json/buck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "683"
},
{
"name": "C",
"bytes": "245769"
},
{
"name": "C++",
"bytes": "3765"
},
{
"name": "CSS",
"bytes": "54863"
},
{
"name": "D",
"bytes": "623"
},
{
"name": "Groff",
"bytes": "440"
},
{
"name": "HTML",
"bytes": "4938"
},
{
"name": "IDL",
"bytes": "128"
},
{
"name": "Java",
"bytes": "9977219"
},
{
"name": "JavaScript",
"bytes": "931262"
},
{
"name": "Lex",
"bytes": "2442"
},
{
"name": "Makefile",
"bytes": "1791"
},
{
"name": "Matlab",
"bytes": "47"
},
{
"name": "OCaml",
"bytes": "2956"
},
{
"name": "Objective-C",
"bytes": "67487"
},
{
"name": "Objective-C++",
"bytes": "34"
},
{
"name": "PowerShell",
"bytes": "143"
},
{
"name": "Python",
"bytes": "197714"
},
{
"name": "Rust",
"bytes": "938"
},
{
"name": "Shell",
"bytes": "30301"
},
{
"name": "Smalltalk",
"bytes": "438"
},
{
"name": "Yacc",
"bytes": "323"
}
],
"symlink_target": ""
}
|
import logging
import os
import re
import sys
from nose.plugins import Plugin
from base import Knows
def modname(qualname):
candidate = qualname[0]
for elem in qualname[1:]:
if ".".join((candidate, elem)) not in sys.modules:
return candidate
candidate = ".".join((candidate, elem))
# nose generates two types of test names: unittest-based test cases
# are named "Test(<module.class testMethod=method>"). Non-unittest
# test cases (functions and methods) simply have a dotted name like
# "Test(module.class.method)".
TESTMETH = re.compile(r"Test\(\<(?P<class>[\w_\.]+) "
"testMethod=(?P<method>[\w_]+)\>\)")
TESTFUNC = re.compile(r"Test\((?P<func>[\w_\.]+)\)")
def parse_test_name(test_name):
# Try to match both name styles we know.
mo = TESTMETH.match(test_name)
if mo:
qualname = mo.groupdict()["class"].split(".")
method_name = mo.groupdict()["method"]
module_name = ".".join(qualname[:-1])
class_name = qualname[-1]
return "%s:%s.%s" % (module_name, class_name, method_name)
mo = TESTFUNC.match(test_name)
if mo:
qualname = mo.groupdict()["func"].split(".")
module_name = modname(qualname)
func_name = qualname[-1]
return "%s:%s" % (module_name, func_name)
# No match
return ''
class KnowsNosePlugin(Plugin):
name = 'knows'
def __init__(self, *args, **kwargs):
self.output = True
self.enableOpt = 'with-knows'
self.logger = logging.getLogger('nose.plugins.knows')
def options(self, parser, env=os.environ):
parser.add_option(
'--knows-file',
type='string',
dest='knows_file',
default='.knows',
help='Output file for knows plugin.',
)
parser.add_option(
'--knows-out',
action='store_true',
dest='knows_out',
help='Whether to output the mapping of files to unit tests.',
)
parser.add_option(
'--knows-dir',
type='string',
dest='knows_dir',
default='',
help='Include only this given directory. This should be your '
'project directory name, and does not need to be an absolute '
'path.',
)
parser.add_option(
'--knows-exclude',
type='string',
action='append',
dest='knows_exclude',
help='Exclude files having this string (can use multiple times).',
)
super(KnowsNosePlugin, self).options(parser, env=env)
def configure(self, options, config):
self.enabled = getattr(options, self.enableOpt)
if self.enabled:
self.knows = Knows(
knows_filename=options.knows_file,
output=options.knows_out,
knows_directory=options.knows_dir,
exclude=options.knows_exclude,
)
input_files = config.testNames
if not options.knows_out:
if input_files:
config.testNames = self.knows.get_tests_to_run(
input_files,
)
super(KnowsNosePlugin, self).configure(options, config)
def begin(self):
self.knows.begin()
def startTest(self, test):
self.knows.start_test(parse_test_name(repr(test)))
def stopTest(self, test):
self.knows.stop_test(parse_test_name(repr(test)))
def finalize(self, result):
self.knows.finalize()
|
{
"content_hash": "e10647e750bf35bc2029403782bee77c",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 31.339130434782607,
"alnum_prop": 0.5574361820199778,
"repo_name": "venmo/nose-knows",
"id": "a1258917f8b99ca033b65c7b124bab6b7bfd95cc",
"size": "3604",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/knows/nose_plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10696"
}
],
"symlink_target": ""
}
|
"""test_mock_chroot.py - Testing for mock_chroot.py
"""
import pytest
from subprocess import CalledProcessError
from mock_chroot import MockChroot
import mock_chroot
class TestMockChroot(object):
@pytest.mark.parametrize(
('init_args', 'more_args', 'expected'),
[
(
dict(root='a_root_file'),
('foo', 'bar'),
(MockChroot.mock_exe(), '--root=a_root_file', 'foo', 'bar'),
),
]
)
def test__mock_cmd(self, init_args, more_args, expected):
mock = MockChroot(**init_args)
result = mock._mock_cmd(*more_args)
assert result == expected
def test_chroot(self):
mc = MockChroot(root='epel-6-x86_64')
expected = 'CentOS release 6.7 (Final)\nKernel \\r on an \\m\n\n'
output = mc.chroot('cat', '/etc/issue')
assert output == expected
expected = 'testing\n'
output = mc.chroot('bash', '-c', 'echo testing | tee test.txt')
assert output == expected
output = mc.chroot('cat', 'test.txt')
assert output == expected
with pytest.raises(CalledProcessError):
output = mc.chroot('false')
# the following will raise an exception if we're in the wrong directory
output = mc.chroot('cat', 'hosts', cwd='etc')
def test_string_config(self, custom_mock_cfg):
cmc = MockChroot(config=custom_mock_cfg)
output = cmc.chroot('/usr/bin/yum', '-q', 'list', 'google-chrome-beta')
# If the package is not there, or any other error, yum will return a
# nonzero value, which will raise an exception. So output will contain a
# non blank string only if the package is found
assert output
def test_init_bad_params(self, custom_mock_cfg):
with pytest.raises(RuntimeError):
MockChroot(
root='epel-6-x86_64',
config=custom_mock_cfg
)
def test_clean(self):
mc = MockChroot(root='epel-6-x86_64')
expected = 'testing\n'
output = mc.chroot('bash', '-c', 'echo testing | tee test.txt')
assert output == expected
output = mc.chroot('cat', 'test.txt')
assert output == expected
mc.clean()
with pytest.raises(CalledProcessError):
output = mc.chroot('cat', 'test.txt')
expected = 'not there\n'
output = mc.chroot('bash', '-c', 'test -e test.txt || echo not there')
assert output == expected
@pytest.mark.parametrize(
('args', 'expected'),
[
(
dict(no_clean=True),
['--no-clean'],
),
(
dict(no_clean=False),
[],
),
(
dict(define='def1'),
['--define', 'def1'],
),
(
dict(define=('def1', 'def2')),
['--define', 'def1', '--define', 'def2'],
),
(
dict(resultdir='/a/dir'),
['--resultdir', '/a/dir'],
),
(
dict(define=7),
TypeError,
),
(
dict(no_clean=False, define='def1', resultdir='/a/dir'),
['--define', 'def1', '--resultdir', '/a/dir'],
),
]
)
def test__setup_mock_build_options(self, args, expected):
if isinstance(expected, type) and issubclass(expected, Exception):
with pytest.raises(expected):
MockChroot._setup_mock_build_options(**args)
else:
output = MockChroot._setup_mock_build_options(**args)
assert output == expected
def test_rebuild(self, monkeypatch):
# Set to list to get it passed by reference to clusures
checked_cmd = [[]]
def check_output(cmd):
checked_cmd[0] = list(cmd)
return 'some output'
monkeypatch.setattr(mock_chroot, 'check_output', check_output)
mc = MockChroot(root='some_root')
output = mc.rebuild(src_rpm='some.src.rpm')
expected = [
MockChroot.mock_exe(),
'--root=some_root',
'--rebuild', 'some.src.rpm'
]
assert checked_cmd[0] == expected
assert output == 'some output'
mc.rebuild(src_rpm='some.src.rpm', define='def1')
expected = [
MockChroot.mock_exe(),
'--root=some_root',
'--rebuild', 'some.src.rpm',
'--define', 'def1',
]
assert checked_cmd[0] == expected
assert output == 'some output'
def test_buildsrpm(self, monkeypatch):
# Set to list to get it passed by reference to clusures
checked_cmd = [[]]
def check_output(cmd):
checked_cmd[0] = list(cmd)
return 'some output'
monkeypatch.setattr(mock_chroot, 'check_output', check_output)
mc = MockChroot(root='some_root')
output = mc.buildsrpm(spec='some.spec', sources='/some/sources')
expected = [
MockChroot.mock_exe(),
'--root=some_root',
'--buildsrpm',
'--spec', 'some.spec',
'--sources', '/some/sources',
]
assert checked_cmd[0] == expected
assert output == 'some output'
output = mc.buildsrpm(
spec='some.spec', sources='/some/sources', define='def1'
)
expected = [
MockChroot.mock_exe(),
'--root=some_root',
'--buildsrpm',
'--spec', 'some.spec',
'--sources', '/some/sources',
'--define', 'def1',
]
assert checked_cmd[0] == expected
assert output == 'some output'
|
{
"content_hash": "b603606f9f08c6baa72021c67ccbb487",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 80,
"avg_line_length": 34.38461538461539,
"alnum_prop": 0.5105833763551885,
"repo_name": "ifireball/python-mockchroot",
"id": "46a7d38944c2c4fd491a85933f7eb35af3fb0d90",
"size": "5833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_mock_chroot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38549"
}
],
"symlink_target": ""
}
|
class LinkActionType(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
}
self.attributeMap = {
}
|
{
"content_hash": "49faccb32a548684b335f45c2568edad",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 97,
"avg_line_length": 26.2,
"alnum_prop": 0.5629770992366412,
"repo_name": "farooqsheikhpk/Aspose_Pdf_Cloud",
"id": "6b3363c1c994a9ce1cec14368da5cb8c22f69617",
"size": "547",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "SDKs/Aspose.Pdf-Cloud-SDK-for-Python/asposepdfcloud/models/LinkActionType.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1279"
},
{
"name": "Java",
"bytes": "308541"
},
{
"name": "JavaScript",
"bytes": "222677"
},
{
"name": "Objective-C",
"bytes": "538918"
},
{
"name": "PHP",
"bytes": "223730"
},
{
"name": "Python",
"bytes": "297747"
},
{
"name": "Ruby",
"bytes": "2035"
},
{
"name": "XSLT",
"bytes": "444"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
version = '1.8'
LONG_DESCRIPTION = """
How to use django-pagination
----------------------------
``django-pagination`` allows for easy Digg-style pagination without modifying
your views.
There are really 5 steps to setting it up with your projects (not including
installation, which is covered in INSTALL.txt in this same directory.)
1. List this application in the ``INSTALLED_APPS`` portion of your settings
file. Your settings file might look something like::
INSTALLED_APPS = (
# ...
'pagination',
)
2. Install the pagination middleware. Your settings file might look something
like::
MIDDLEWARE_CLASSES = (
# ...
'pagination.middleware.PaginationMiddleware',
)
3. If it's not already added in your setup, add the request context processor.
Note that context processors are set by default implicitly, so to set them
explicitly, you need to copy and paste this code into your under
the value TEMPLATE_CONTEXT_PROCESSORS::
("django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request")
4. Add this line at the top of your template to load the pagination tags:
{% load pagination_tags %}
5. Decide on a variable that you would like to paginate, and use the
autopaginate tag on that variable before iterating over it. This could
take one of two forms (using the canonical ``object_list`` as an example
variable):
{% autopaginate object_list %}
This assumes that you would like to have the default 20 results per page.
If you would like to specify your own amount of results per page, you can
specify that like so:
{% autopaginate object_list 10 %}
Note that this replaces ``object_list`` with the list for the current page, so
you can iterate over the ``object_list`` like you normally would.
6. Now you want to display the current page and the available pages, so
somewhere after having used autopaginate, use the paginate inclusion tag:
{% paginate %}
This does not take any arguments, but does assume that you have already
called autopaginate, so make sure to do so first.
That's it! You have now paginated ``object_list`` and given users of the site
a way to navigate between the different pages--all without touching your views.
Optional Settings
------------------
In django-pagination, there are no required settings. There are, however, a
small set of optional settings useful for changing the default behavior of the
pagination tags. Here's an overview:
``PAGINATION_DEFAULT_PAGINATION``
The default amount of items to show on a page if no number is specified.
``PAGINATION_DEFAULT_WINDOW``
The number of items to the left and to the right of the current page to
display (accounting for ellipses).
``PAGINATION_DEFAULT_ORPHANS``
The number of orphans allowed. According to the Django documentation,
orphans are defined as::
The minimum number of items allowed on the last page, defaults to zero.
``PAGINATION_INVALID_PAGE_RAISES_404``
Determines whether an invalid page raises an ``Http404`` or just sets the
``invalid_page`` context variable. ``True`` does the former and ``False``
does the latter.
Update to work with Django 1.9
------------------
Some fixes have been made by Diego J. Romero to make this package work
under newer versions of Django, like the 1.9 one.
"""
setup(
name='django-pagination',
version=version,
description="django-pagination",
long_description=LONG_DESCRIPTION,
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Django",
"Environment :: Web Environment",
],
keywords='pagination,django',
author='Eric Florenzano',
author_email='floguy@gmail.com',
url='http://django-pagination.googlecode.com/',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['setuptools'],
)
|
{
"content_hash": "34133a8a513958e2781b71ef09b6f883",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 81,
"avg_line_length": 32.954198473282446,
"alnum_prop": 0.6905258281213806,
"repo_name": "intelligenia/django-pagination",
"id": "90da75ef3c591e5206a77e0e9372d2fdee3dc769",
"size": "4317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "990"
},
{
"name": "Python",
"bytes": "24329"
}
],
"symlink_target": ""
}
|
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
def test_tied_init():
network = tn.SequentialNode(
"s",
[tn.InputNode("i", shape=()),
tn.AddBiasNode("b1", inits=[treeano.inits.ConstantInit(42)]),
tn.AddBiasNode("b2", inits=[treeano.inits.TiedInit("b2", "b1")])]
).network()
fn = network.function(["i"], ["s"])
np.testing.assert_equal(84, fn(0)[0])
network["b1"].get_vw("bias").variable.set_value(43)
np.testing.assert_equal(86, fn(0)[0])
|
{
"content_hash": "84f24ab427b1bf5cf588ef4e520be065",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 27.40909090909091,
"alnum_prop": 0.6334991708126037,
"repo_name": "jagill/treeano",
"id": "699a6558a829a06f079a4940aed02e012b94659d",
"size": "603",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "treeano/inits/tests/inits_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1121"
},
{
"name": "JavaScript",
"bytes": "16041"
},
{
"name": "Python",
"bytes": "748632"
}
],
"symlink_target": ""
}
|
from django.db import models
from core.mixins import ObjectUrlMixin
from core.keyvalue.base_option import DHCPKeyValue, CommonOption
class Group(models.Model, ObjectUrlMixin):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=255)
#parent_group = models.ForeignKey('Group', null=True, blank=True)
def details(self):
return (
("Name", self.name),
)
class Meta:
db_table = "group"
unique_together = ("name",)
def __str__(self):
return "{0}".format(self.name)
def __repr__(self):
return "<Group: {0}>".format(self)
@classmethod
def get_api_fields(cls):
return ['name']
class GroupKeyValue(DHCPKeyValue, CommonOption):
obj = models.ForeignKey(Group, related_name='keyvalue_set', null=False)
class Meta:
db_table = "group_key_value"
unique_together = ("key", "value")
def _aa_description(self):
return
|
{
"content_hash": "b9325be39ba44e94fdb73862cc0f80ab",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 75,
"avg_line_length": 23.78048780487805,
"alnum_prop": 0.6235897435897436,
"repo_name": "rtucker-mozilla/inventory",
"id": "256437d33f0cdd38c9296b81f980eb1383b5eb2d",
"size": "975",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/group/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5104"
},
{
"name": "CSS",
"bytes": "362837"
},
{
"name": "CoffeeScript",
"bytes": "9538"
},
{
"name": "HTML",
"bytes": "1195738"
},
{
"name": "JavaScript",
"bytes": "1530665"
},
{
"name": "Makefile",
"bytes": "14421"
},
{
"name": "PHP",
"bytes": "27273"
},
{
"name": "Python",
"bytes": "3642241"
},
{
"name": "Shell",
"bytes": "1783"
}
],
"symlink_target": ""
}
|
import posixpath
from compiled_file_system import Unicode
from extensions_paths import (
API_FEATURES, JSON_TEMPLATES, MANIFEST_FEATURES, PERMISSION_FEATURES)
import features_utility
from future import Gettable, Future
from third_party.json_schema_compiler.json_parse import Parse
def _AddPlatformsFromDependencies(feature,
api_features,
manifest_features,
permission_features):
features_map = {
'api': api_features,
'manifest': manifest_features,
'permission': permission_features,
}
dependencies = feature.get('dependencies')
if dependencies is None:
return ['apps', 'extensions']
platforms = set()
for dependency in dependencies:
dep_type, dep_name = dependency.split(':')
dependency_features = features_map[dep_type]
dependency_feature = dependency_features.get(dep_name)
# If the dependency can't be resolved, it is inaccessible and therefore
# so is this feature.
if dependency_feature is None:
return []
platforms = platforms.union(dependency_feature['platforms'])
feature['platforms'] = list(platforms)
class _FeaturesCache(object):
def __init__(self, file_system, compiled_fs_factory, *json_paths):
self._cache = compiled_fs_factory.Create(
file_system, self._CreateCache, type(self))
self._text_cache = compiled_fs_factory.ForUnicode(file_system)
self._json_path = json_paths[0]
self._extra_paths = json_paths[1:]
@Unicode
def _CreateCache(self, _, features_json):
extra_path_futures = [self._text_cache.GetFromFile(path)
for path in self._extra_paths]
features = features_utility.Parse(Parse(features_json))
for path_future in extra_path_futures:
extra_json = path_future.Get()
features = features_utility.MergedWith(
features_utility.Parse(Parse(extra_json)), features)
return features
def GetFeatures(self):
if self._json_path is None:
return Future(value={})
return self._cache.GetFromFile(self._json_path)
class FeaturesBundle(object):
'''Provides access to properties of API, Manifest, and Permission features.
'''
def __init__(self, file_system, compiled_fs_factory, object_store_creator):
self._api_cache = _FeaturesCache(
file_system,
compiled_fs_factory,
API_FEATURES)
self._manifest_cache = _FeaturesCache(
file_system,
compiled_fs_factory,
MANIFEST_FEATURES,
posixpath.join(JSON_TEMPLATES, 'manifest.json'))
self._permission_cache = _FeaturesCache(
file_system,
compiled_fs_factory,
PERMISSION_FEATURES,
posixpath.join(JSON_TEMPLATES, 'permissions.json'))
self._object_store = object_store_creator.Create(_FeaturesCache, 'features')
def GetPermissionFeatures(self):
return self._permission_cache.GetFeatures()
def GetManifestFeatures(self):
return self._manifest_cache.GetFeatures()
def GetAPIFeatures(self):
api_features = self._object_store.Get('api_features').Get()
if api_features is not None:
return Future(value=api_features)
api_features_future = self._api_cache.GetFeatures()
manifest_features_future = self._manifest_cache.GetFeatures()
permission_features_future = self._permission_cache.GetFeatures()
def resolve():
api_features = api_features_future.Get()
manifest_features = manifest_features_future.Get()
permission_features = permission_features_future.Get()
# TODO(rockot): Handle inter-API dependencies more gracefully.
# Not yet a problem because there is only one such case (windows -> tabs).
# If we don't store this value before annotating platforms, inter-API
# dependencies will lead to infinite recursion.
for feature in api_features.itervalues():
_AddPlatformsFromDependencies(
feature, api_features, manifest_features, permission_features)
self._object_store.Set('api_features', api_features)
return api_features
return Future(delegate=Gettable(resolve))
|
{
"content_hash": "4bd08ea1770d8d87ad49486034189498",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 38.27777777777778,
"alnum_prop": 0.6823899371069182,
"repo_name": "ChromiumWebApps/chromium",
"id": "a81321492dcef55210888832f0cf5df54a3025a0",
"size": "4297",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chrome/common/extensions/docs/server2/features_bundle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "42286199"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "198616766"
},
{
"name": "CSS",
"bytes": "937333"
},
{
"name": "DOT",
"bytes": "2984"
},
{
"name": "Java",
"bytes": "5695686"
},
{
"name": "JavaScript",
"bytes": "21967126"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "2262"
},
{
"name": "Objective-C",
"bytes": "7602057"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "1210885"
},
{
"name": "Python",
"bytes": "10774996"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1316721"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15243"
}
],
"symlink_target": ""
}
|
import math
import os
import random
import re
import sys
# Complete the flippingBits function below.
def flippingBits(n):
swapped = [1 if x is '0' else 0 for x in "{:032b}".format(n)]
swapped_str = ''.join(str(x) for x in swapped)
i = int(swapped_str, 2)
return i
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
n = int(input())
result = flippingBits(n)
fptr.write(str(result) + '\n')
fptr.close()
|
{
"content_hash": "dd85295b4102e55f61823ab4ec72f09e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 65,
"avg_line_length": 22.304347826086957,
"alnum_prop": 0.5964912280701754,
"repo_name": "jekhokie/scriptbox",
"id": "bc95f472407e0fa118e5fe32da55dff1c49cfc5a",
"size": "643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python--learnings/coding-practice/flipping_bits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1526"
},
{
"name": "DIGITAL Command Language",
"bytes": "521"
},
{
"name": "Dockerfile",
"bytes": "3686"
},
{
"name": "HTML",
"bytes": "3524"
},
{
"name": "Java",
"bytes": "2907"
},
{
"name": "JavaScript",
"bytes": "42537"
},
{
"name": "Jinja",
"bytes": "92573"
},
{
"name": "Mustache",
"bytes": "1897"
},
{
"name": "Open Policy Agent",
"bytes": "263"
},
{
"name": "PHP",
"bytes": "5342"
},
{
"name": "PowerShell",
"bytes": "2721"
},
{
"name": "Pug",
"bytes": "275"
},
{
"name": "Python",
"bytes": "129947"
},
{
"name": "Ruby",
"bytes": "26843"
},
{
"name": "Rust",
"bytes": "43138"
},
{
"name": "Shell",
"bytes": "77462"
}
],
"symlink_target": ""
}
|
"""Unit tests for scripts/concurrent_task_utils.py."""
from __future__ import annotations
import builtins
import threading
import time
from core import python_utils
from core.tests import test_utils
from . import concurrent_task_utils
def test_function(unused_arg):
return python_utils.OBJECT
class ConcurrentTaskUtilsTests(test_utils.GenericTestBase):
"""Test for concurrent_task_utils.py flie."""
def setUp(self):
super(ConcurrentTaskUtilsTests, self).setUp()
self.semaphore = threading.Semaphore(1)
self.task_stdout = []
def mock_print(*args):
"""Mock for print. Append the values to print to
task_stdout list.
Args:
*args: list(*). Variable length argument list of values to print
in the same line of output.
"""
self.task_stdout.append(' '.join(str(arg) for arg in args))
self.print_swap = self.swap(builtins, 'print', mock_print)
class TaskResultTests(ConcurrentTaskUtilsTests):
"""Tests for TaskResult class."""
def test_all_messages_with_success_message(self):
output_object = concurrent_task_utils.TaskResult(
'Test', False, [], [])
self.assertEqual(output_object.trimmed_messages, [])
self.assertEqual(
output_object.get_report(), ['SUCCESS Test check passed'])
self.assertFalse(output_object.failed)
self.assertEqual(output_object.name, 'Test')
def test_all_messages_with_failed_message(self):
output_object = concurrent_task_utils.TaskResult(
'Test', True, [], [])
self.assertEqual(output_object.trimmed_messages, [])
self.assertEqual(
output_object.get_report(), ['FAILED Test check failed'])
self.assertTrue(output_object.failed)
self.assertEqual(output_object.name, 'Test')
class CreateTaskTests(ConcurrentTaskUtilsTests):
"""Tests for create_task method."""
def test_create_task_with_success(self):
task = concurrent_task_utils.create_task(
test_function, True, self.semaphore)
self.assertTrue(isinstance(task, concurrent_task_utils.TaskThread))
class TaskThreadTests(ConcurrentTaskUtilsTests):
"""Tests for TaskThread class."""
def test_task_thread_with_success(self):
task = concurrent_task_utils.TaskThread(
test_function('unused_arg'), False, self.semaphore, name='test',
report_enabled=True)
self.semaphore.acquire()
task.start_time = time.time()
with self.print_swap:
task.start()
task.join()
expected_output = [s for s in self.task_stdout if 'FINISHED' in s]
self.assertTrue(len(expected_output) == 1)
def test_task_thread_with_exception(self):
task = concurrent_task_utils.TaskThread(
test_function, True, self.semaphore, name='test',
report_enabled=True)
self.semaphore.acquire()
task.start_time = time.time()
with self.print_swap:
task.start()
task.join()
self.assertIn(
'test_function() missing 1 required '
'positional argument: \'unused_arg\'',
self.task_stdout
)
def test_task_thread_with_verbose_mode_enabled(self):
class HelperTests:
def test_show(self):
return concurrent_task_utils.TaskResult('name', True, [], [])
def test_perform_all_check(self):
return [self.test_show()]
def test_func():
return HelperTests()
task = concurrent_task_utils.TaskThread(
test_func().test_perform_all_check, True,
self.semaphore, name='test', report_enabled=True)
self.semaphore.acquire()
task.start_time = time.time()
with self.print_swap:
task.start()
task.join()
self.assertRegexpMatches(
self.task_stdout[0],
r'\d+:\d+:\d+ Report from name check\n-+\nFAILED '
'name check failed')
def test_task_thread_with_task_report_disabled(self):
class HelperTests:
def test_show(self):
return concurrent_task_utils.TaskResult(
None, None, None, ['msg'])
def test_perform_all_check(self):
return [self.test_show()]
def test_func():
return HelperTests()
task = concurrent_task_utils.TaskThread(
test_func().test_perform_all_check, True,
self.semaphore, name='test', report_enabled=False)
self.semaphore.acquire()
task.start_time = time.time()
with self.print_swap:
task.start()
task.join()
expected_output = [s for s in self.task_stdout if 'FINISHED' in s]
self.assertTrue(len(expected_output) == 1)
class ExecuteTasksTests(ConcurrentTaskUtilsTests):
"""Tests for execute_tasks method."""
def test_execute_task_with_single_task(self):
task = concurrent_task_utils.create_task(
test_function('unused_arg'), False, self.semaphore, name='test')
with self.print_swap:
concurrent_task_utils.execute_tasks([task], self.semaphore)
expected_output = [s for s in self.task_stdout if 'FINISHED' in s]
self.assertTrue(len(expected_output) == 1)
def test_execute_task_with_multiple_task(self):
task_list = []
for _ in range(6):
task = concurrent_task_utils.create_task(
test_function('unused_arg'), False, self.semaphore)
task_list.append(task)
with self.print_swap:
concurrent_task_utils.execute_tasks(task_list, self.semaphore)
expected_output = [s for s in self.task_stdout if 'FINISHED' in s]
self.assertTrue(len(expected_output) == 6)
def test_execute_task_with_exception(self):
task_list = []
for _ in range(6):
task = concurrent_task_utils.create_task(
test_function, True, self.semaphore)
task_list.append(task)
with self.print_swap:
concurrent_task_utils.execute_tasks(task_list, self.semaphore)
self.assertIn(
'test_function() missing 1 required '
'positional argument: \'unused_arg\'',
self.task_stdout
)
|
{
"content_hash": "d685607de23fa1b1cafa28b5e805587c",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 80,
"avg_line_length": 35.66111111111111,
"alnum_prop": 0.6050786726904502,
"repo_name": "kevinlee12/oppia",
"id": "5c2491c43418e21e3f85dd1a6eb9e425aeb1e16d",
"size": "7042",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/concurrent_task_utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "205771"
},
{
"name": "HTML",
"bytes": "1835761"
},
{
"name": "JavaScript",
"bytes": "1182599"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "13670639"
},
{
"name": "Shell",
"bytes": "2239"
},
{
"name": "TypeScript",
"bytes": "13024194"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
import jsonrpyc
this_dir = os.path.dirname(os.path.abspath(__file__))
keywords = [
"rpc", "json", "json-rpc", "2.0",
]
classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Information Technology"
]
# read the readme file
with open(os.path.join(this_dir, "README.md"), "r") as f:
long_description = f.read()
# load installation requirements
with open(os.path.join(this_dir, "requirements.txt"), "r") as f:
install_requires = [line.strip() for line in f.readlines() if line.strip()]
setup(
name=jsonrpyc.__name__,
version=jsonrpyc.__version__,
author=jsonrpyc.__author__,
author_email=jsonrpyc.__email__,
description=jsonrpyc.__doc__.strip().split("\n")[0].strip(),
license=jsonrpyc.__license__,
url=jsonrpyc.__contact__,
keywords=keywords,
classifiers=classifiers,
long_description=long_description,
long_description_content_type="text/markdown",
python_requires=">=2.7",
zip_safe=False,
py_modules=[jsonrpyc.__name__],
)
|
{
"content_hash": "c171b2c87dab722caa87e10efd439ff3",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 26.705882352941178,
"alnum_prop": 0.6527165932452276,
"repo_name": "riga/jsonrpyc",
"id": "3b59617621ef5fbefcd5e720f7a814f9e32a12a3",
"size": "1381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22821"
}
],
"symlink_target": ""
}
|
import plyvel
import ast
import hashlib
import os
import sys
import threading
from processor import print_log, logger
from utils import bc_address_to_hash_160, hash_160_to_pubkey_address, Hash, \
bytes8_to_int, bytes4_to_int, int_to_bytes8, \
int_to_hex8, int_to_bytes4, int_to_hex4
"""
Patricia tree for hashing unspents
"""
# increase this when database needs to be updated
global GENESIS_HASH
GENESIS_HASH = '12a765e31ffd4059bada1e25190f6e98c99d9714d334efa41a195a7e7e04bfe2'
DB_VERSION = 3
KEYLENGTH = 56 # 20 + 32 + 4
class Node(object):
def __init__(self, s):
self.k = int(s[0:32].encode('hex'), 16)
self.s = s[32:]
if self.k==0 and self.s:
print "init error", len(self.s), "0x%0.64X" % self.k
raise BaseException("z")
def serialized(self):
k = "0x%0.64X" % self.k
k = k[2:].decode('hex')
assert len(k) == 32
return k + self.s
def has(self, c):
return (self.k & (1<<(ord(c)))) != 0
def is_singleton(self, key):
assert self.s != ''
return len(self.s) == 40
def get_singleton(self):
for i in xrange(256):
if self.k == (1<<i):
return chr(i)
raise BaseException("get_singleton")
def indexof(self, c):
assert self.k != 0 or self.s == ''
x = 0
for i in xrange(ord(c)):
if (self.k & (1<<i)) != 0:
x += 40
return x
def get(self, c):
x = self.indexof(c)
ss = self.s[x:x+40]
_hash = ss[0:32]
value = bytes8_to_int(ss[32:40])
return _hash, value
def set(self, c, h, value):
if h is None:
h = chr(0)*32
vv = int_to_bytes8(value)
item = h + vv
assert len(item) == 40
if self.has(c):
self.remove(c)
x = self.indexof(c)
self.s = self.s[0:x] + item + self.s[x:]
self.k |= (1<<ord(c))
assert self.k != 0
def remove(self, c):
x = self.indexof(c)
self.k &= ~(1<<ord(c))
self.s = self.s[0:x] + self.s[x+40:]
def get_hash(self, x, parent):
if x:
assert self.k != 0
skip_string = x[len(parent)+1:] if x != '' else ''
x = 0
v = 0
hh = ''
for i in xrange(256):
if (self.k&(1<<i)) != 0:
ss = self.s[x:x+40]
hh += ss[0:32]
v += bytes8_to_int(ss[32:40])
x += 40
try:
_hash = Hash(skip_string + hh)
except:
_hash = None
if x:
assert self.k != 0
return _hash, v
@classmethod
def from_dict(klass, d):
k = 0
s = ''
for i in xrange(256):
if chr(i) in d:
k += 1<<i
h, value = d[chr(i)]
if h is None: h = chr(0)*32
vv = int_to_bytes8(value)
item = h + vv
assert len(item) == 40
s += item
k = "0x%0.64X" % k # 32 bytes
k = k[2:].decode('hex')
assert len(k) == 32
out = k + s
return Node(out)
class DB(object):
def __init__(self, path, name, cache_size):
self.db = plyvel.DB(os.path.join(path, name), create_if_missing=True, compression=None, lru_cache_size=cache_size)
self.batch = self.db.write_batch()
self.cache = {}
self.lock = threading.Lock()
def put(self, key, s):
self.batch.put(key, s)
self.cache[key] = s
def get(self, key):
s = self.cache.get(key)
if s == 'deleted':
return None
if s is None:
with self.lock:
s = self.db.get(key)
return s
def delete(self, key):
self.batch.delete(key)
self.cache[key] = 'deleted'
def close(self):
self.db.close()
def write(self):
with self.lock:
self.batch.write()
self.batch.clear()
self.cache.clear()
def get_next(self, key):
with self.lock:
i = self.db.iterator(start=key)
k, _ = i.next()
return k
class Storage(object):
def __init__(self, config, shared, test_reorgs):
self.shared = shared
self.hash_list = {}
self.parents = {}
self.skip_batch = {}
self.test_reorgs = test_reorgs
# init path
self.dbpath = config.get('leveldb', 'path')
if not os.path.exists(self.dbpath):
os.mkdir(self.dbpath)
try:
self.db_utxo = DB(self.dbpath, 'utxo', config.getint('leveldb', 'utxo_cache'))
self.db_hist = DB(self.dbpath, 'hist', config.getint('leveldb', 'hist_cache'))
self.db_addr = DB(self.dbpath, 'addr', config.getint('leveldb', 'addr_cache'))
self.db_undo = DB(self.dbpath, 'undo', None)
except:
logger.error('db init', exc_info=True)
self.shared.stop()
try:
self.last_hash, self.height, db_version = ast.literal_eval(self.db_undo.get('height'))
except:
print_log('Initializing database')
self.height = 0
self.last_hash = GENESIS_HASH
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
db_version = DB_VERSION
self.put_node('', Node.from_dict({}))
# check version
if db_version != DB_VERSION:
print_log("Your database '%s' is deprecated. Please create a new database"%self.dbpath)
self.shared.stop()
return
# pruning limit
try:
self.pruning_limit = ast.literal_eval(self.db_undo.get('limit'))
except:
self.pruning_limit = config.getint('leveldb', 'pruning_limit')
self.db_undo.put('version', repr(self.pruning_limit))
# reorg limit
try:
self.reorg_limit = ast.literal_eval(self.db_undo.get('reorg_limit'))
except:
self.reorg_limit = config.getint('leveldb', 'reorg_limit')
self.db_undo.put('reorg_limit', repr(self.reorg_limit))
# compute root hash
root_node = self.get_node('')
self.root_hash, coins = root_node.get_hash('', None)
# print stuff
print_log("Database version %d."%db_version)
print_log("Pruning limit for spent outputs is %d."%self.pruning_limit)
print_log("Reorg limit is %d blocks."%self.reorg_limit)
print_log("Blockchain height", self.height)
print_log("UTXO tree root hash:", self.root_hash.encode('hex'))
print_log("Coins in database:", coins)
# convert between bitcoin addresses and 20 bytes keys used for storage.
@staticmethod
def address_to_key(addr):
return bc_address_to_hash_160(addr)
def get_skip(self, key):
o = self.skip_batch.get(key)
if o is not None:
return o
k = self.db_utxo.get_next(key)
assert k.startswith(key)
return k[len(key):]
def set_skip(self, key, skip):
self.skip_batch[key] = skip
def get_proof(self, addr):
key = self.address_to_key(addr)
k = self.db_utxo.get_next(key)
p = self.get_path(k)
p.append(k)
out = []
for item in p:
v = self.db_utxo.get(item)
out.append((item.encode('hex'), v.encode('hex')))
return out
def get_balance(self, addr):
key = self.address_to_key(addr)
k = self.db_utxo.get_next(key)
if not k.startswith(key):
return 0
p = self.get_parent(k)
d = self.get_node(p)
letter = k[len(p)]
return d.get(letter)[1]
def listunspent(self, addr):
key = self.address_to_key(addr)
if key is None:
raise BaseException('Invalid Litecoin address', addr)
out = []
with self.db_utxo.lock:
for k, v in self.db_utxo.db.iterator(start=key):
if not k.startswith(key):
break
if len(k) == KEYLENGTH:
txid = k[20:52].encode('hex')
txpos = bytes4_to_int(k[52:56])
h = bytes4_to_int(v[8:12])
v = bytes8_to_int(v[0:8])
out.append({'tx_hash': txid, 'tx_pos':txpos, 'height': h, 'value':v})
if len(out) == 1000:
print_log('max utxo reached', addr)
break
out.sort(key=lambda x:x['height'])
return out
def get_history(self, addr):
out = []
o = self.listunspent(addr)
for item in o:
out.append((item['height'], item['tx_hash']))
h = self.db_hist.get(addr)
while h:
item = h[0:80]
h = h[80:]
txi = item[0:32].encode('hex')
hi = bytes4_to_int(item[36:40])
txo = item[40:72].encode('hex')
ho = bytes4_to_int(item[76:80])
out.append((hi, txi))
out.append((ho, txo))
# uniqueness
out = set(out)
# sort by height then tx_hash
out = sorted(out)
return map(lambda x: {'height':x[0], 'tx_hash':x[1]}, out)
def get_address(self, txi):
return self.db_addr.get(txi)
def get_undo_info(self, height):
s = self.db_undo.get("undo_info_%d" % (height % self.reorg_limit))
if s is None:
print_log("no undo info for ", height)
return eval(s)
def write_undo_info(self, height, bitcoind_height, undo_info):
if height > bitcoind_height - self.reorg_limit or self.test_reorgs:
self.db_undo.put("undo_info_%d" % (height % self.reorg_limit), repr(undo_info))
@staticmethod
def common_prefix(word1, word2):
max_len = min(len(word1),len(word2))
for i in xrange(max_len):
if word2[i] != word1[i]:
index = i
break
else:
index = max_len
return word1[0:index]
def put_node(self, key, node):
self.db_utxo.put(key, node.serialized())
def get_node(self, key):
s = self.db_utxo.get(key)
if s is None:
return
return Node(s)
def add_key(self, target, value, height):
assert len(target) == KEYLENGTH
path = self.get_path(target, new=True)
if path is True:
return
#print "add key: target", target.encode('hex'), "path", map(lambda x: x.encode('hex'), path)
parent = path[-1]
parent_node = self.get_node(parent)
n = len(parent)
c = target[n]
if parent_node.has(c):
h, v = parent_node.get(c)
skip = self.get_skip(parent + c)
child = parent + c + skip
assert not target.startswith(child)
prefix = self.common_prefix(child, target)
index = len(prefix)
if len(child) == KEYLENGTH:
# if it's a leaf, get hash and value of new_key from parent
d = Node.from_dict({
target[index]: (None, 0),
child[index]: (h, v)
})
else:
# if it is not a leaf, update its hash because skip_string changed
child_node = self.get_node(child)
h, v = child_node.get_hash(child, prefix)
d = Node.from_dict({
target[index]: (None, 0),
child[index]: (h, v)
})
self.set_skip(prefix + target[index], target[index+1:])
self.set_skip(prefix + child[index], child[index+1:])
self.put_node(prefix, d)
path.append(prefix)
self.parents[child] = prefix
# update parent skip
new_skip = prefix[n+1:]
self.set_skip(parent+c, new_skip)
parent_node.set(c, None, 0)
self.put_node(parent, parent_node)
else:
# add new letter to parent
skip = target[n+1:]
self.set_skip(parent+c, skip)
parent_node.set(c, None, 0)
self.put_node(parent, parent_node)
# write the new leaf
s = (int_to_hex8(value) + int_to_hex4(height)).decode('hex')
self.db_utxo.put(target, s)
# the hash of a leaf is the txid
_hash = target[20:52]
self.update_node_hash(target, path, _hash, value)
def update_node_hash(self, node, path, _hash, value):
c = node
for x in path[::-1]:
self.parents[c] = x
c = x
self.hash_list[node] = (_hash, value)
def update_hashes(self):
nodes = {} # nodes to write
for i in xrange(KEYLENGTH, -1, -1):
for node in self.hash_list.keys():
if len(node) != i:
continue
node_hash, node_value = self.hash_list.pop(node)
parent = self.parents[node] if node!='' else ''
if i != KEYLENGTH and node_hash is None:
n = self.get_node(node)
node_hash, node_value = n.get_hash(node, parent)
assert node_hash is not None
if node == '':
self.root_hash = node_hash
self.root_value = node_value
assert self.root_hash is not None
break
# read parent
d = nodes.get(parent)
if d is None:
d = self.get_node(parent)
assert d is not None
# write value into parent
letter = node[len(parent)]
d.set(letter, node_hash, node_value)
nodes[parent] = d
# iterate
grandparent = self.parents[parent] if parent != '' else None
parent_hash, parent_value = d.get_hash(parent, grandparent)
if parent_hash is not None:
self.hash_list[parent] = (parent_hash, parent_value)
for k, v in nodes.iteritems():
self.put_node(k, v)
# cleanup
assert self.hash_list == {}
self.parents = {}
self.skip_batch = {}
def get_path(self, target, new=False):
x = self.db_utxo.get(target)
if not new and x is None:
raise BaseException('key not in tree', target.encode('hex'))
if new and x is not None:
# raise BaseException('key already in tree', target.encode('hex'))
# occurs at block 91880 (duplicate txid)
print_log('key already in tree', target.encode('hex'))
return True
remaining = target
key = ''
path = []
while key != target:
node = self.get_node(key)
if node is None:
break
#raise # should never happen
path.append(key)
c = remaining[0]
if not node.has(c):
break
skip = self.get_skip(key + c)
key = key + c + skip
if not target.startswith(key):
break
remaining = target[len(key):]
return path
def delete_key(self, leaf):
path = self.get_path(leaf)
#print "delete key", leaf.encode('hex'), map(lambda x: x.encode('hex'), path)
s = self.db_utxo.get(leaf)
self.db_utxo.delete(leaf)
if leaf in self.hash_list:
del self.hash_list[leaf]
parent = path[-1]
letter = leaf[len(parent)]
parent_node = self.get_node(parent)
parent_node.remove(letter)
# remove key if it has a single child
if parent_node.is_singleton(parent):
#print "deleting parent", parent.encode('hex')
self.db_utxo.delete(parent)
if parent in self.hash_list:
del self.hash_list[parent]
l = parent_node.get_singleton()
_hash, value = parent_node.get(l)
skip = self.get_skip(parent + l)
otherleaf = parent + l + skip
# update skip value in grand-parent
gp = path[-2]
gp_items = self.get_node(gp)
letter = otherleaf[len(gp)]
new_skip = otherleaf[len(gp)+1:]
gp_items.set(letter, None, 0)
self.set_skip(gp+ letter, new_skip)
#print "gp new_skip", gp.encode('hex'), new_skip.encode('hex')
self.put_node(gp, gp_items)
# note: k is not necessarily a leaf
if len(otherleaf) == KEYLENGTH:
ss = self.db_utxo.get(otherleaf)
_hash, value = otherleaf[20:52], bytes8_to_int(ss[0:8])
else:
_hash, value = None, None
self.update_node_hash(otherleaf, path[:-1], _hash, value)
else:
self.put_node(parent, parent_node)
_hash, value = None, None
self.update_node_hash(parent, path[:-1], _hash, value)
return s
def get_parent(self, x):
p = self.get_path(x)
return p[-1]
def get_root_hash(self):
return self.root_hash if self.root_hash else ''
def batch_write(self):
for db in [self.db_utxo, self.db_addr, self.db_hist, self.db_undo]:
db.write()
def close(self):
for db in [self.db_utxo, self.db_addr, self.db_hist, self.db_undo]:
db.close()
def save_height(self, block_hash, block_height):
self.db_undo.put('height', repr((block_hash, block_height, DB_VERSION)))
def add_to_history(self, addr, tx_hash, tx_pos, value, tx_height):
key = self.address_to_key(addr)
txo = (tx_hash + int_to_hex4(tx_pos)).decode('hex')
# write the new history
self.add_key(key + txo, value, tx_height)
# backlink
self.db_addr.put(txo, addr)
def revert_add_to_history(self, addr, tx_hash, tx_pos, value, tx_height):
key = self.address_to_key(addr)
txo = (tx_hash + int_to_hex4(tx_pos)).decode('hex')
# delete
self.delete_key(key + txo)
# backlink
self.db_addr.delete(txo)
def get_utxo_value(self, addr, txi):
key = self.address_to_key(addr)
leaf = key + txi
s = self.db_utxo.get(leaf)
value = bytes8_to_int(s[0:8])
return value
def set_spent(self, addr, txi, txid, index, height, undo):
key = self.address_to_key(addr)
leaf = key + txi
s = self.delete_key(leaf)
value = bytes8_to_int(s[0:8])
in_height = bytes4_to_int(s[8:12])
undo[leaf] = value, in_height
# delete backlink txi-> addr
self.db_addr.delete(txi)
# add to history
s = self.db_hist.get(addr)
if s is None: s = ''
txo = (txid + int_to_hex4(index) + int_to_hex4(height)).decode('hex')
s += txi + int_to_bytes4(in_height) + txo
s = s[ -80*self.pruning_limit:]
self.db_hist.put(addr, s)
def revert_set_spent(self, addr, txi, undo):
key = self.address_to_key(addr)
leaf = key + txi
# restore backlink
self.db_addr.put(txi, addr)
v, height = undo.pop(leaf)
self.add_key(leaf, v, height)
# revert add to history
s = self.db_hist.get(addr)
# s might be empty if pruning limit was reached
if not s:
return
assert s[-80:-44] == txi
s = s[:-80]
self.db_hist.put(addr, s)
def import_transaction(self, txid, tx, block_height, touched_addr):
undo = { 'prev_addr':[] } # contains the list of pruned items for each address in the tx; also, 'prev_addr' is a list of prev addresses
prev_addr = []
for i, x in enumerate(tx.get('inputs')):
txi = (x.get('prevout_hash') + int_to_hex4(x.get('prevout_n'))).decode('hex')
addr = self.get_address(txi)
if addr is not None:
self.set_spent(addr, txi, txid, i, block_height, undo)
touched_addr.add(addr)
prev_addr.append(addr)
undo['prev_addr'] = prev_addr
# here I add only the outputs to history; maybe I want to add inputs too (that's in the other loop)
for x in tx.get('outputs'):
addr = x.get('address')
if addr is None: continue
self.add_to_history(addr, txid, x.get('index'), x.get('value'), block_height)
touched_addr.add(addr)
return undo
def revert_transaction(self, txid, tx, block_height, touched_addr, undo):
#print_log("revert tx", txid)
for x in reversed(tx.get('outputs')):
addr = x.get('address')
if addr is None: continue
self.revert_add_to_history(addr, txid, x.get('index'), x.get('value'), block_height)
touched_addr.add(addr)
prev_addr = undo.pop('prev_addr')
for i, x in reversed(list(enumerate(tx.get('inputs')))):
addr = prev_addr[i]
if addr is not None:
txi = (x.get('prevout_hash') + int_to_hex4(x.get('prevout_n'))).decode('hex')
self.revert_set_spent(addr, txi, undo)
touched_addr.add(addr)
assert undo == {}
|
{
"content_hash": "20e5ba5c31271a2a9226a6356ccd103e",
"timestamp": "",
"source": "github",
"line_count": 665,
"max_line_length": 143,
"avg_line_length": 32.25714285714286,
"alnum_prop": 0.5126101347256539,
"repo_name": "pooler/electrum-ltc-server",
"id": "506c74a02fb16bfa13bb451827b14dbc7c8773c8",
"size": "22576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118574"
},
{
"name": "Shell",
"bytes": "2254"
}
],
"symlink_target": ""
}
|
import unittest
from asp.config import *
class CompilerDetectorTests(unittest.TestCase):
def test_detect(self):
self.assertTrue(CompilerDetector().detect("gcc"))
self.assertFalse(CompilerDetector().detect("lkasdfj"))
class CPUInfoTests(unittest.TestCase):
def test_num_cores(self):
def read_cpu_info(self):
return open("tests/cpuinfo").readlines()
PlatformDetector.read_cpu_info = read_cpu_info
pd = PlatformDetector()
info = pd.get_cpu_info()
self.assertEqual(info['numCores'], 8)
def test_vendor_and_model(self):
def read_cpu_info(self):
return open("tests/cpuinfo").readlines()
PlatformDetector.read_cpu_info = read_cpu_info
pd = PlatformDetector()
info = pd.get_cpu_info()
self.assertEqual(info['vendorID'], "GenuineIntel")
self.assertEqual(info['model'], 30)
self.assertEqual(info['cpuFamily'], 6)
def test_cache_size(self):
def read_cpu_info(self):
return open("tests/cpuinfo").readlines()
PlatformDetector.read_cpu_info = read_cpu_info
pd = PlatformDetector()
info = pd.get_cpu_info()
self.assertEqual(info['cacheSize'], 8192)
def test_capabilities(self):
def read_cpu_info(self):
return open("tests/cpuinfo").readlines()
PlatformDetector.read_cpu_info = read_cpu_info
pd = PlatformDetector()
info = pd.get_cpu_info()
self.assertEqual(info['capabilities'].count("sse"), 1)
def test_compilers(self):
compilers = PlatformDetector().get_compilers()
self.assertTrue("gcc" in compilers)
class GPUInfoTest(unittest.TestCase):
def test_properties(self):
pd = PlatformDetector()
compilers = pd.get_compilers()
if "nvcc" in compilers and pd.get_num_cuda_devices() > 0:
info = {}
pd.set_cuda_device(0)
info = pd.get_cuda_info()
self.assertTrue(info['total_mem'] > 0)
else: self.assertTrue(True) # Undesirable to have the test fail on machinces without GPUs
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9ab2361d1a955ae23e86c6c94c61e2ce",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 97,
"avg_line_length": 31,
"alnum_prop": 0.6048387096774194,
"repo_name": "richardxia/asp-multilevel-debug",
"id": "8f6ef85b077c414458004dd7461a9ad5dead84ab",
"size": "2232",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/platform_detector_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "102604"
},
{
"name": "Objective-C",
"bytes": "6902"
},
{
"name": "Python",
"bytes": "551438"
},
{
"name": "Shell",
"bytes": "7110"
}
],
"symlink_target": ""
}
|
try:
import smbus
except ImportError:
raise ImportError("python3-smbus not found. Install with 'sudo apt-get install python3-smbus'")
try:
import spidev
except ImportError:
raise ImportError("spidev not found. Visit https://www.abelectronics.co.uk/kb/article/2/spi-and-raspbian-linux-on-a-raspberry-pi for installing spidev'")
import re
import os
import time
import datetime
import sys
import math
import struct
import ctypes
"""
================================================
ABElectronics Expander Pi
Version 1.0 Created 20/05/2014
Version 1.1 16/11/2014 updated code and functions to PEP8 format
Version 1.2 10/06/2017 updated to include additional functions for DAC and RTC
Requires python3 smbus to be installed
================================================
"""
"""
Private Classes
"""
class _ABE_Helpers:
"""
Local Functions used across all Expander Pi classes
"""
def updatebyte(self, byte, bit, value):
""" internal method for setting the value of a single bit
within a byte """
if value == 0:
return byte & ~(1 << bit)
elif value == 1:
return byte | (1 << bit)
def get_smbus(self):
i2c__bus = 1
# detect the device that is being used
device = os.uname()[1]
if (device == "orangepione"): # running on orange pi one
i2c__bus = 0
elif (device == "orangepiplus"): # running on orange pi one
i2c__bus = 0
elif (device == "linaro-alip"): # running on Asus Tinker Board
i2c__bus = 1
elif (device == "raspberrypi"): # running on raspberry pi
# detect i2C port number and assign to i2c__bus
for line in open('/proc/cpuinfo').readlines():
m = re.match('(.*?)\s*:\s*(.*)', line)
if m:
(name, value) = (m.group(1), m.group(2))
if name == "Revision":
if value[-4:] in ('0002', '0003'):
i2c__bus = 0
else:
i2c__bus = 1
break
try:
return smbus.SMBus(i2c__bus)
except IOError:
print ('Could not open the i2c bus.')
print ('Please check that i2c is enabled and python3-smbus and i2c-tools are installed.')
print ('Visit https://www.abelectronics.co.uk/kb/article/1/i2c--smbus-and-raspbian-linux for more information.')
class _Dac_bits(ctypes.LittleEndianStructure):
"""Class to define the DAC command register bitfields.
See Microchip mcp4822 datasheet for more information
"""
_fields_ = [("data", ctypes.c_uint16, 12), #Bits 0:11
("shutdown", ctypes.c_uint16, 1), #Bit 12
("gain", ctypes.c_uint16, 1), #Bit 13
("reserved1", ctypes.c_uint16, 1), #Bit 14
("channel", ctypes.c_uint16, 1) #Bit 15
]
#GA field value lookup. <gainFactor>:<bitfield val>
__ga_field__ = {1:1, 2:0}
def gain_to_field_val(self, gainFactor):
"""Returns bitfield value based on desired gain"""
return self.__ga_field__[gainFactor]
class _Dac_register(ctypes.Union):
"""Union to represent the DAC's command register
See Microchip mcp4822 datasheet for more information
"""
_fields_ = [("bits", _Dac_bits), ("bytes", ctypes.c_uint8 * 2), ("reg", ctypes.c_uint16)]
"""
Public Classes
"""
class ADC:
"""
Based on the Microchip MCP3208
"""
# variables
__adcrefvoltage = 4.096 # reference voltage for the ADC chip.
# Define SPI bus and init
__spiADC = spidev.SpiDev()
__spiADC.open(0, 0)
__spiADC.max_speed_hz = (50000)
# public methods
def read_adc_voltage(self, channel, mode):
"""
Read the voltage from the selected channel on the ADC
Channel = 1 to 8
Mode = 0 or 1 - 0 = single ended, 1 = differential
"""
if (mode < 0) or (mode > 1):
print ('mode needs to be 0 (single ended) or 1 (differential)')
return 0
if (channel > 4) and (mode == 1):
print ('ADC channel needs to be 1 to 4 when using differential mode')
return 0
if ((channel > 8) or (channel < 1)):
print ('ADC channel needs to be 1 to 8')
return 0
raw = self.read_adc_raw(channel, mode)
voltage = (self.__adcrefvoltage / 4096) * raw
return voltage
def read_adc_raw(self, channel, mode):
"""
Read the raw value from the selected channel on the ADC
Channel = 1 to 8
Mode = 0 or 1 - 0 = single ended, 1 = differential
"""
if (mode < 0) or (mode > 1):
print ('mode needs to be 0 (single ended) or 1 (differential)')
return 0
if (channel > 4) and (mode == 1):
print ('ADC channel needs to be 1 to 4 when using differential mode')
return 0
if ((channel > 8) or (channel < 1)):
print ('ADC channel needs to be 1 to 8')
return 0.0
channel = channel - 1
if (mode == 0):
r = self.__spiADC.xfer2([6 + (channel >> 2), (channel & 3) << 6, 0])
if (mode == 1):
r = self.__spiADC.xfer2([4 + (channel >> 2), (channel & 3) << 6, 0])
ret = ((r[1] & 0x0F) << 8) + (r[2])
return ret
def set_adc_refvoltage(self, voltage):
"""
set the reference voltage for the analogue to digital converter.
By default the ADC uses an onboard 4.096V voltage reference. If you
choose to use an external voltage reference you will need to
use this method to set the ADC reference voltage to match the
supplied reference voltage.
The reference voltage must be less than or equal to the voltage on
the Raspberry Pi 5V rail.
"""
if (voltage >= 0.0) and (voltage <= 5.5):
self.__adcrefvoltage = voltage
else:
print ('reference voltage out of range')
return
class DAC:
"""
Based on the Microchip MCP4822
Define SPI bus and init
"""
spiDAC = spidev.SpiDev()
spiDAC.open(0, 1)
spiDAC.max_speed_hz = (4000000)
#Max DAC output voltage. Depends on gain factor
#The following table is in the form <gain factor>:<max voltage>
__dacMaxOutput__ = {
1:2.048, #This is Vref
2:4.096 #This is double Vref
}
maxDacVoltage = 2.048
# public methods
def __init__(self, gainFactor = 1):
"""Class Constructor
gainFactor -- Set the DAC's gain factor. The value should
be 1 or 2. Gain factor is used to determine output voltage
from the formula: Vout = G * Vref * D/4096
Where G is gain factor, Vref (for this chip) is 2.048 and
D is the 12-bit digital value
"""
if (gainFactor != 1) and (gainFactor != 2):
print ('Invalid gain factor. Must be 1 or 2')
self.gain = 1
else:
self.gain = gainFactor
self.maxDacVoltage = self.__dacMaxOutput__[self.gain]
def set_dac_voltage(self, channel, voltage):
"""
set the voltage for the selected channel on the DAC
voltage can be between 0 and 2.047 volts when gain is set to 1 or 4.096 when gain is set to 2
"""
if ((channel > 2) or (channel < 1)):
print ('DAC channel needs to be 1 or 2')
if (voltage >= 0.0) and (voltage < self.maxDacVoltage):
rawval = (voltage / 2.048) * 4096 / self.gain
self.set_dac_raw(channel, int(rawval))
else:
print ('Invalid DAC Vout value %f. Must be between 0 and %f (non-inclusive) ' % (voltage, self.maxDacVoltage))
return
def set_dac_raw(self, channel, value):
"""
Set the raw value from the selected channel on the DAC
Channel = 1 or 2
Value between 0 and 4095
"""
reg = _Dac_register()
#Configurable fields
reg.bits.data = value
reg.bits.channel = channel - 1
reg.bits.gain = reg.bits.gain_to_field_val(self.gain)
#Fixed fields:
reg.bits.shutdown = 1 #Active low
#Write to device
self.spiDAC.xfer2([reg.bytes[1], reg.bytes[0]])
return
class IO:
"""
The MCP23017 chip is split into two 8-bit ports. port 0 controls pins
1 to 8 while port 1 controls pins 9 to 16.
When writing to or reading from a port the least significant bit
represents the lowest numbered pin on the selected port.
#
"""
# Define registers values from datasheet
IODIRA = 0x00 # IO direction A - 1= input 0 = output
IODIRB = 0x01 # IO direction B - 1= input 0 = output
# Input polarity A - If a bit is set, the corresponding GPIO register bit
# will reflect the inverted value on the pin.
IPOLA = 0x02
# Input polarity B - If a bit is set, the corresponding GPIO register bit
# will reflect the inverted value on the pin.
IPOLB = 0x03
# The GPINTEN register controls the interrupt-onchange feature for each
# pin on port A.
GPINTENA = 0x04
# The GPINTEN register controls the interrupt-onchange feature for each
# pin on port B.
GPINTENB = 0x05
# Default value for port A - These bits set the compare value for pins
# configured for interrupt-on-change. If the associated pin level is the
# opposite from the register bit, an interrupt occurs.
DEFVALA = 0x06
# Default value for port B - These bits set the compare value for pins
# configured for interrupt-on-change. If the associated pin level is the
# opposite from the register bit, an interrupt occurs.
DEFVALB = 0x07
# Interrupt control register for port A. If 1 interrupt is fired when the
# pin matches the default value, if 0 the interrupt is fired on state
# change
INTCONA = 0x08
# Interrupt control register for port B. If 1 interrupt is fired when the
# pin matches the default value, if 0 the interrupt is fired on state
# change
INTCONB = 0x09
IOCON = 0x0A # see datasheet for configuration register
GPPUA = 0x0C # pull-up resistors for port A
GPPUB = 0x0D # pull-up resistors for port B
# The INTF register reflects the interrupt condition on the port A pins of
# any pin that is enabled for interrupts. A set bit indicates that the
# associated pin caused the interrupt.
INTFA = 0x0E
# The INTF register reflects the interrupt condition on the port B pins of
# any pin that is enabled for interrupts. A set bit indicates that the
# associated pin caused the interrupt.
INTFB = 0x0F
# The INTCAP register captures the GPIO port A value at the time the
# interrupt occurred.
INTCAPA = 0x10
# The INTCAP register captures the GPIO port B value at the time the
# interrupt occurred.
INTCAPB = 0x11
GPIOA = 0x12 # data port A
GPIOB = 0x13 # data port B
OLATA = 0x14 # output latches A
OLATB = 0x15 # output latches B
# variables
__ioaddress = 0x20 # I2C address
__portA_dir = 0x00 # port a direction
__portB_dir = 0x00 # port b direction
__portA_val = 0x00 # port a value
__portB_val = 0x00 # port b value
__portA_pullup = 0x00 # port a pull-up resistors
__portB_pullup = 0x00 # port a pull-up resistors
__portA_polarity = 0x00 # input polarity for port a
__portB_polarity = 0x00 # input polarity for port b
__intA = 0x00 # interrupt control for port a
__intB = 0x00 # interrupt control for port a
# initial configuration - see IOCON page in the MCP23017 datasheet for
# more information.
__ioconfig = 0x22
__helper = None
__bus = None
def __init__(self):
"""
init object with i2c address, default is 0x20, 0x21 for IOPi board,
load default configuration
"""
self.__helper = _ABE_Helpers()
self.__bus = self.__helper.get_smbus()
self.__bus.write_byte_data(self.__ioaddress, self.IOCON, self.__ioconfig)
self.__portA_val = self.__bus.read_byte_data(self.__ioaddress, self.GPIOA)
self.__portB_val = self.__bus.read_byte_data(self.__ioaddress, self.GPIOB)
self.__bus.write_byte_data(self.__ioaddress, self.IODIRA, 0xFF)
self.__bus.write_byte_data(self.__ioaddress, self.IODIRB, 0xFF)
self.set_port_pullups(0,0x00)
self.set_port_pullups(1,0x00)
self.invert_port(0, 0x00)
self.invert_port(1, 0x00)
return
# local methods
def __checkbit(self, byte, bit):
""" internal method for reading the value of a single bit
within a byte """
if byte & (1 << bit):
return 1
else:
return 0
# public methods
def set_pin_direction(self, pin, direction):
"""
set IO direction for an individual pin
pins 1 to 16
direction 1 = input, 0 = output
"""
pin = pin - 1
if pin < 8:
self.__portA_dir = self.__helper.updatebyte(self.__portA_dir, pin, direction)
self.__bus.write_byte_data(self.__ioaddress, self.IODIRA, self.__portA_dir)
else:
self.__portB_dir = self.__helper.updatebyte(self.__portB_dir, pin - 8, direction)
self.__bus.write_byte_data(self.__ioaddress, self.IODIRB, self.__portB_dir)
return
def set_port_direction(self, port, direction):
"""
set direction for an IO port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
1 = input, 0 = output
"""
if port == 1:
self.__bus.write_byte_data(self.__ioaddress, self.IODIRB, direction)
self.__portB_dir = direction
else:
self.__bus.write_byte_data(self.__ioaddress, self.IODIRA, direction)
self.__portA_dir = direction
return
def set_pin_pullup(self, pin, value):
"""
set the internal 100K pull-up resistors for an individual pin
pins 1 to 16
value 1 = enabled, 0 = disabled
"""
pin = pin - 1
if pin < 8:
self.__portA_pullup = self.__helper.updatebyte(self.__portA_pullup, pin, value)
self.__bus.write_byte_data(self.__ioaddress, self.GPPUA, self.__portA_pullup)
else:
self.__portB_pullup = self.__helper.updatebyte(self.__portB_pullup,pin - 8,value)
self.__bus.write_byte_data(self.__ioaddress, self.GPPUB, self.__portB_pullup)
return
def set_port_pullups(self, port, value):
"""
set the internal 100K pull-up resistors for the selected IO port
"""
if port == 1:
self.__portA_pullup = value
self.__bus.write_byte_data(self.__ioaddress, self.GPPUB, value)
else:
self.__portB_pullup = value
self.__bus.write_byte_data(self.__ioaddress, self.GPPUA, value)
return
def write_pin(self, pin, value):
"""
write to an individual pin 1 - 16
"""
pin = pin - 1
if pin < 8:
self.__portA_val = self.__helper.updatebyte(self.__portA_val, pin, value)
self.__bus.write_byte_data(self.__ioaddress, self.GPIOA, self.__portA_val)
else:
self.__portB_val = self.__helper.updatebyte(self.__portB_val, pin - 8, value)
self.__bus.write_byte_data(self.__ioaddress, self.GPIOB, self.__portB_val)
return
def write_port(self, port, value):
"""
write to all pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
value = number between 0 and 255 or 0x00 and 0xFF
"""
if port == 1:
self.__bus.write_byte_data(self.__ioaddress, self.GPIOB, value)
self.__portB_val = value
else:
self.__bus.write_byte_data(self.__ioaddress, self.GPIOA, value)
self.__portA_val = value
return
def read_pin(self, pin):
"""
read the value of an individual pin 1 - 16
returns 0 = logic level low, 1 = logic level high
"""
pin = pin - 1
if pin < 8:
self.__portA_val = self.__bus.read_byte_data(self.__ioaddress, self.GPIOA)
return self.__checkbit(self.__portA_val, pin)
else:
pin = pin - 8
self.__portB_val = self.__bus.read_byte_data(self.__ioaddress, self.GPIOB)
return self.__checkbit(self.__portB_val, pin)
def read_port(self, port):
"""
read all pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
returns number between 0 and 255 or 0x00 and 0xFF
"""
if port == 1:
self.__portB_val = self.__bus.read_byte_data(self.__ioaddress, self.GPIOB)
return self.__portB_val
else:
self.__portA_val = self.__bus.read_byte_data(self.__ioaddress, self.GPIOA)
return self.__portA_val
def invert_port(self, port, polarity):
"""
invert the polarity of the pins on a selected port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
polarity 0 = same logic state of the input pin, 1 = inverted logic
state of the input pin
"""
if port == 1:
self.__bus.write_byte_data(self.__ioaddress, self.IPOLB, polarity)
self.__portB_polarity = polarity
else:
self.__bus.write_byte_data(self.__ioaddress, self.IPOLA, polarity)
self.__portA_polarity = polarity
return
def invert_pin(self, pin, polarity):
"""
invert the polarity of the selected pin
pins 1 to 16
polarity 0 = same logic state of the input pin, 1 = inverted logic
state of the input pin
"""
pin = pin - 1
if pin < 8:
self.__portA_polarity = self.__helper.updatebyte(self.__portA_val,
pin,
polarity)
self.__bus.write_byte_data(self.__ioaddress, self.IPOLA, self.__portA_polarity)
else:
self.__portB_polarity = self.__helper.updatebyte(self.__portB_val,
pin - 8,
polarity)
self.__bus.write_byte_data(self.__ioaddress, self.IPOLB, self.__portB_polarity)
return
def mirror_interrupts(self, value):
"""
1 = The INT pins are internally connected, 0 = The INT pins are not
connected. __intA is associated with PortA and __intB is associated
with PortB
"""
if value == 0:
self.config = self.__helper.updatebyte(self.__ioconfig, 6, 0)
self.__bus.write_byte_data(self.__ioaddress, self.IOCON, self.__ioconfig)
if value == 1:
self.config = self.__helper.updatebyte(self.__ioconfig, 6, 1)
self.__bus.write_byte_data(self.__ioaddress, self.IOCON, self.__ioconfig)
return
def set_interrupt_polarity(self, value):
"""
This sets the polarity of the INT output pins - 1 = Active-high. 0 =
Active-low.
"""
if value == 0:
self.config = self.__helper.updatebyte(self.__ioconfig, 1, 0)
self.__bus.write_byte_data(self.__ioaddress, self.IOCON, self.__ioconfig)
if value == 1:
self.config = self.__helper.updatebyte(self.__ioconfig, 1, 1)
self.__bus.write_byte_data(self.__ioaddress, self.IOCON, self.__ioconfig)
return
return
def set_interrupt_type(self, port, value):
"""
Sets the type of interrupt for each pin on the selected port
1 = interrupt is fired when the pin matches the default value, 0 =
the interrupt is fired on state change
"""
if port == 0:
self.__bus.write_byte_data(self.__ioaddress, self.INTCONA, value)
else:
self.__bus.write_byte_data(self.__ioaddress, self.INTCONB, value)
return
def set_interrupt_defaults(self, port, value):
"""
These bits set the compare value for pins configured for
interrupt-on-change on the selected port.
If the associated pin level is the opposite from the register bit, an
interrupt occurs.
"""
if port == 0:
self.__bus.write_byte_data(self.__ioaddress, self.DEFVALA, value)
else:
self.__bus.write_byte_data(self.__ioaddress, self.DEFVALB, value)
return
def set_interrupt_on_port(self, port, value):
"""
Enable interrupts for the pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
value = number between 0 and 255 or 0x00 and 0xFF
"""
if port == 0:
self.__bus.write_byte_data(self.__ioaddress, self.GPINTENA, value)
self.__intA = value
else:
self.__bus.write_byte_data(self.__ioaddress, self.GPINTENB, value)
self.__intB = value
return
def set_interrupt_on_pin(self, pin, value):
"""
Enable interrupts for the selected pin
Pin = 1 to 16
Value 0 = interrupt disabled, 1 = interrupt enabled
"""
pin = pin - 1
if pin < 8:
self.__intA = self.__helper.updatebyte(self.__intA, pin, value)
self.__bus.write_byte_data(self.__ioaddress, self.GPINTENA, self.__intA)
else:
self.__intB = self.__helper.updatebyte(self.__intB, pin - 8, value)
self.__bus.write_byte_data(self.__ioaddress, self.GPINTENB, self.__intB)
return
def read_interrupt_status(self, port):
"""
read the interrupt status for the pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
"""
if port == 0:
return self.__bus.read_byte_data(self.__ioaddress, self.INTFA)
else:
return self.__bus.read_byte_data(self.__ioaddress, self.INTFB)
def read_interrupt_capture(self, port):
"""
read the value from the selected port at the time of the last
interrupt trigger
port 0 = pins 1 to 8, port 1 = pins 9 to 16
"""
if port == 0:
return self.__bus.read_byte_data(self.__ioaddress, self.INTCAPA)
else:
return self.__bus.read_byte_data(self.__ioaddress, self.INTCAPB)
def reset_interrupts(self):
"""
Reset the interrupts A and B to 0
"""
self.read_interrupt_capture(0)
self.read_interrupt_capture(1)
return
class RTC:
"""
Based on the Maxim DS1307
Define registers values from datasheet
"""
SECONDS = 0x00
MINUTES = 0x01
HOURS = 0x02
DAYOFWEEK = 0x03
DAY = 0x04
MONTH = 0x05
YEAR = 0x06
CONTROL = 0x07
# variables
__rtcaddress = 0x68 # I2C address
# initial configuration - square wave and output disabled, frequency set
# to 32.768KHz.
__rtcconfig = 0x03
# the DS1307 does not store the current century so that has to be added on
# manually.
__century = 2000
__helper = None
__bus = None
# local methods
def __init__(self):
self.__helper = _ABE_Helpers()
self.__bus = self.__helper.get_smbus()
self.__bus.write_byte_data(self.__rtcaddress, self.CONTROL, self.__rtcconfig)
return
def __bcd_to_dec(self,bcd):
"""
internal method for converting BCD formatted number to decimal
"""
dec = 0
for a in (bcd >> 4, bcd):
for b in (1, 2, 4 ,8):
if a & 1:
dec += b
a >>= 1
dec *= 10
return dec / 10
def __dec_to_bcd(self,dec):
"""
internal method for converting decimal formatted number to BCD
"""
bcd = 0
for a in (dec // 10, dec % 10):
for b in (8, 4, 2, 1):
if a >= b:
bcd += 1
a -= b
bcd <<= 1
return bcd >> 1
def __get_century(self, val):
if len(val) > 2:
y = val[0] + val[1]
self.__century = int(y) * 100
return
# public methods
def set_date(self, date):
"""
set the date and time on the RTC
date must be in ISO 8601 format - YYYY-MM-DDTHH:MM:SS
"""
d = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S")
self.__get_century(date)
self.__bus.write_byte_data(self.__rtcaddress,
self.SECONDS,
self.__dec_to_bcd(d.second))
self.__bus.write_byte_data(self.__rtcaddress,
self.MINUTES,
self.__dec_to_bcd(d.minute))
self.__bus.write_byte_data(self.__rtcaddress,
self.HOURS,
self.__dec_to_bcd(d.hour))
self.__bus.write_byte_data(self.__rtcaddress,
self.DAYOFWEEK,
self.__dec_to_bcd(d.weekday()))
self.__bus.write_byte_data(self.__rtcaddress,
self.DAY,
self.__dec_to_bcd(d.day))
self.__bus.write_byte_data(self.__rtcaddress,
self.MONTH,
self.__dec_to_bcd(d.month))
self.__bus.write_byte_data(self.__rtcaddress,
self.YEAR,
self.__dec_to_bcd(d.year - self.__century))
return
def read_date(self):
"""
read the date and time from the RTC in ISO 8601 format -
YYYY-MM-DDTHH:MM:SS
"""
seconds, minutes, hours, dayofweek, day, month, year \
= self.__bus.read_i2c_block_data(self.__rtcaddress, 0, 7)
date = ("%02d-%02d-%02dT%02d:%02d:%02d " % (self.__bcd_to_dec(year) + self.__century,
self.__bcd_to_dec(month),
self.__bcd_to_dec(day),
self.__bcd_to_dec(hours),
self.__bcd_to_dec(minutes),
self.__bcd_to_dec(seconds)))
return date
def enable_output(self):
"""
Enable the output pin
"""
self.__config = self.__helper.updatebyte(self.__rtcconfig, 7, 1)
self.__config = self.__helper.updatebyte(self.__rtcconfig, 4, 1)
self.__bus.write_byte_data(self.__rtcaddress, self.CONTROL, self.__rtcconfig)
return
def disable_output(self):
"""
Disable the output pin
"""
self.__config = self.__helper.updatebyte(self.__rtcconfig, 7, 0)
self.__config = self.__helper.updatebyte(self.__rtcconfig, 4, 0)
self.__bus.write_byte_data(self.__rtcaddress, self.CONTROL, self.__rtcconfig)
return
def set_frequency(self, frequency):
"""
set the frequency of the output pin square-wave
options are: 1 = 1Hz, 2 = 4.096KHz, 3 = 8.192KHz, 4 = 32.768KHz
"""
if frequency == 1:
self.__config = self.__helper.updatebyte(self.__rtcconfig, 0, 0)
self.__config = self.__helper.updatebyte(self.__rtcconfig, 1, 0)
if frequency == 2:
self.__config = self.__helper.updatebyte(self.__rtcconfig, 0, 1)
self.__config = self.__helper.updatebyte(self.__rtcconfig, 1, 0)
if frequency == 3:
self.__config = self.__helper.updatebyte(self.__rtcconfig, 0, 0)
self.__config = self.__helper.updatebyte(self.__rtcconfig, 1, 1)
if frequency == 4:
self.__config = self.__helper.updatebyte(self.__rtcconfig, 0, 1)
self.__config = self.__helper.updatebyte(self.__rtcconfig, 1, 1)
self.__bus.write_byte_data(self.__rtcaddress, self.CONTROL, self.__rtcconfig)
return
def write_memory(self, address, valuearray):
"""
write to the memory on the ds1307
the ds1307 contains 56-Byte, battery-backed RAM with Unlimited Writes
variables are:
address: 0x08 to 0x3F
valuearray: byte array containing data to be written to memory
"""
if address >= 0x08 and address <= 0x3F:
if address + len(valuearray) <= 0x3F:
self.__bus.write_i2c_block_data(self.__rtcaddress, address, valuearray)
else:
print ('memory overflow error: address + length exceeds 0x3F')
else:
print ('address out of range')
def read_memory(self, address, length):
"""
read from the memory on the ds1307
the ds1307 contains 56-Byte, battery-backed RAM with Unlimited Writes
variables are:
address: 0x08 to 0x3F
length: up to 32 bytes. length can not exceed the avaiable address space.
"""
if address >= 0x08 and address <= 0x3F:
if address <= (0x3F - length):
return self.__bus.read_i2c_block_data(self.__rtcaddress, address, length)
else:
print ('memory overflow error: address + length exceeds 0x3F')
else:
print ('address out of range' 'address out of range')
|
{
"content_hash": "68b3ea6c8cf9ca265d60b0e1163a6b94",
"timestamp": "",
"source": "github",
"line_count": 841,
"max_line_length": 157,
"avg_line_length": 34.902497027348396,
"alnum_prop": 0.5636221169897455,
"repo_name": "abelectronicsuk/ABElectronics_Python3_Libraries",
"id": "8ee340d28477b74233fc66f1cfdce10e3afc6040",
"size": "29372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ExpanderPi/ABE_ExpanderPi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17962"
},
{
"name": "Python",
"bytes": "158850"
}
],
"symlink_target": ""
}
|
import os
current_path = os.path.abspath(os.path.curdir)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
USE_TZ = True
SITE_ID = 1
SECRET_KEY = 'keepitsecretkeepitsafe'
ROOT_URLCONF = 'test_app.urls'
STATIC_URL = '/static/'
MEDIA_ROOT = '%s/files' % current_path
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_nose',
'south',
'django_extensions',
'jingo',
'mptt',
'crispy_forms',
'accounts',
'base',
'tools',
'hubs',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ('--nocapture', )
SOUTH_TESTS_MIGRATE = False
CELERY_ALWAYS_EAGER = True
COMPRESS_ROOT = STATIC_URL
|
{
"content_hash": "9dd072fa272d6768a426d1ceee5b2004",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 47,
"avg_line_length": 20.02173913043478,
"alnum_prop": 0.6351791530944625,
"repo_name": "toolhub/toolhub.co",
"id": "36ee83cec5bf991cd6e93b537e7c3836f5e77da4",
"size": "921",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "test_app/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9685"
},
{
"name": "JavaScript",
"bytes": "2800"
},
{
"name": "Python",
"bytes": "123001"
},
{
"name": "Shell",
"bytes": "7558"
}
],
"symlink_target": ""
}
|
"""
This code is adapted from Deep Learning Tutorials
http://deeplearning.net/tutorial/logreg.html
Copyright (c) 2008–2013, Theano Development Team All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
Neither the name of Theano nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ‘’AS IS’’ AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
This tutorial introduces logistic regression using Theano and stochastic
gradient descent.
Logistic regression is a probabilistic, linear classifier. It is parametrized
by a weight matrix :math:`W` and a bias vector :math:`b`. Classification is
done by projecting data points onto a set of hyperplanes, the distance to
which is used to determine a class membership probability.
Mathematically, this can be written as:
.. math::
P(Y=i|x, W,b) &= softmax_i(W x + b) \\
&= \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}}
The output of the model or prediction is then done by taking the argmax of
the vector whose i'th element is P(Y=i|x).
.. math::
y_{pred} = argmax_i P(Y=i|x,W,b)
This tutorial presents a stochastic gradient descent optimization method
suitable for large datasets.
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 4.3.2
"""
from __future__ import print_function
__docformat__ = 'restructedtext en'
import six.moves.cPickle as pickle
import gzip
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)))
import timeit
import numpy
import theano
import theano.tensor as T
# MULTIVERSO: import multiverso
import multiverso as mv
# MULTIVERSO: the sharedvar in theano_ext acts same like Theano's
# sharedVariables. But it use multiverso as the backend
from multiverso.theano_ext import sharedvar
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
# MULTIVERSO: replace the shared variable with mv_shared
self.W = sharedvar.mv_shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
# initialize the biases b as a vector of n_out 0s
# MULTIVERSO: replace the shared variable with mv_shared
self.b = sharedvar.mv_shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyperplane for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose
# probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|}
\log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# start-snippet-2
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def load_data(dataset):
''' Loads the dataset
:type dataset: string
:param dataset: the path to the dataset (here MNIST)
'''
#############
# LOAD DATA #
#############
# Download the MNIST dataset if it is not present
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_dir = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
)
if not os.path.exists(new_dir):
os.mkdir(new_dir)
new_path = os.path.join(new_dir, dataset)
if os.path.isfile(new_path) or data_file == 'mnist.pkl.gz':
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == 'mnist.pkl.gz':
from six.moves import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, dataset)
print('... loading data')
# Load the dataset
with gzip.open(dataset, 'rb') as f:
try:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
except:
train_set, valid_set, test_set = pickle.load(f)
# train_set, valid_set, test_set format: tuple(input, target)
# input is a numpy.ndarray of 2 dimensions (a matrix)
# where each row corresponds to an example. target is a
# numpy.ndarray of 1 dimension (vector) that has the same length as
# the number of rows in the input. It should give the target
# to the example with the same index in the input.
def shared_dataset(data_xy, borrow=True):
""" Function that loads the dataset into shared variables
The reason we store our dataset in shared variables is to allow
Theano to copy it into the GPU memory (when code is run on GPU).
Since copying data into the GPU is slow, copying a minibatch everytime
is needed (the default behaviour if the data is not in a shared
variable) would lead to a large decrease in performance.
"""
data_x, data_y = data_xy
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
# When storing data on the GPU it has to be stored as floats
# therefore we will store the labels as ``floatX`` as well
# (``shared_y`` does exactly that). But during our computations
# we need them as ints (we use labels as index, and if they are
# floats it doesn't make sense) therefore instead of returning
# ``shared_y`` we will have to cast it to int. This little hack
# lets ous get around this issue
return shared_x, T.cast(shared_y, 'int32')
test_set_x, test_set_y = shared_dataset(test_set)
valid_set_x, valid_set_y = shared_dataset(valid_set)
train_set_x, train_set_y = shared_dataset(train_set)
rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,
dataset='mnist.pkl.gz',
batch_size=600):
"""
Demonstrate stochastic gradient descent optimization of a log-linear
model
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size
######################
# BUILD ACTUAL MODEL #
######################
print('... building the model')
# MULTIVERSO: you should call mv.init before call multiverso apis
mv.init()
# MULTIVERSO: every process has distinct worker id
worker_id = mv.worker_id()
# MULTIVERSO: mv.workers_num will return the number of workers
total_worker = mv.workers_num()
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
# generate symbolic variables for input (x and y represent a
# minibatch)
x = T.matrix('x') # data, presented as rasterized images
y = T.ivector('y') # labels, presented as 1D vector of [int] labels
# construct the logistic regression class
# Each MNIST image has size 28*28
classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)
# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)
# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size: (index + 1) * batch_size],
y: test_set_y[index * batch_size: (index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size: (index + 1) * batch_size],
y: valid_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)
# start-snippet-3
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
(classifier.b, classifier.b - learning_rate * g_b)]
# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-3
###############
# TRAIN MODEL #
###############
print('... training the model')
validation_frequency = n_train_batches
start_time = timeit.default_timer()
done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in range(n_train_batches):
# MULTIVERSO: we distribute the batches to different workers.
# A worker will only train batches belonged to itself
if minibatch_index % total_worker == worker_id:
minibatch_avg_cost = train_model(minibatch_index)
# MULTIVERSO: when you want to commit all the delta of
# parameters produced by mv_shared and update the latest
# parameters from parameter server, you can call this function to
# synchronize the values
sharedvar.sync_all_mv_shared_vars()
iter = (epoch - 1) * n_train_batches + minibatch_index
# MULTIVERSO: only master worker will output the model
if mv.is_master_worker() and (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i)
for i in range(n_valid_batches)]
validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
validation_loss * 100.
)
)
# MULTIVERSO: all the workers will synchronize at the place you call barrier
mv.barrier()
# MULTIVERSO: You should make sure only one process will output the result.
# Otherwise results will be outputted repeatedly
if mv.is_master_worker():
end_time = timeit.default_timer()
test_losses = [test_model(i)
for i in range(n_test_batches)]
test_score = numpy.mean(test_losses)
print(
(
'Optimization complete with validation score of %f %%,'
'with test performance %f %%'
)
% (validation_loss * 100., test_score * 100.)
)
print('The code run for %d epochs, with %f epochs/sec' % (
epoch, 1. * epoch / (end_time - start_time)))
print(('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.1fs' % ((end_time - start_time))), file=sys.stderr)
# save the model
with open('model.pkl', 'wb') as f:
pickle.dump(classifier, f)
# MULTIVERSO: You must call shutdown at the end of the file
mv.shutdown()
def predict():
"""
An example of how to load a trained model and use it
to predict labels.
"""
# load the saved model
classifier = pickle.load(open('model.pkl'))
# compile a predictor function
predict_model = theano.function(
inputs=[classifier.input],
outputs=classifier.y_pred)
# We can test it on some examples from test test
dataset = 'mnist.pkl.gz'
datasets = load_data(dataset)
test_set_x, test_set_y = datasets[2]
test_set_x = test_set_x.get_value()
predicted_values = predict_model(test_set_x[:10])
print("Predicted values for the first 10 examples in test set:")
print(predicted_values)
if __name__ == '__main__':
sgd_optimization_mnist(n_epochs=100)
|
{
"content_hash": "e26a68550a86bd05d1b67ccafd937b80",
"timestamp": "",
"source": "github",
"line_count": 504,
"max_line_length": 105,
"avg_line_length": 37.226190476190474,
"alnum_prop": 0.6132075471698113,
"repo_name": "zhengsx/multiverso",
"id": "cc81373445fec6197c5e2c5bdda21dfe7c8f41a9",
"size": "18808",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "binding/python/examples/theano/logistic_regression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1116"
},
{
"name": "C",
"bytes": "2237"
},
{
"name": "C++",
"bytes": "445808"
},
{
"name": "CMake",
"bytes": "7526"
},
{
"name": "Lua",
"bytes": "13061"
},
{
"name": "Makefile",
"bytes": "1223"
},
{
"name": "PowerShell",
"bytes": "77"
},
{
"name": "Python",
"bytes": "84201"
},
{
"name": "Shell",
"bytes": "784"
}
],
"symlink_target": ""
}
|
"""
NLTK corpus readers. The modules in this package provide functions
that can be used to read corpus fileids in a variety of formats. These
functions can be used to read both the corpus fileids that are
distributed in the NLTK corpus package, and corpus fileids that are part
of external corpora.
Corpus Reader Functions
=======================
Each corpus module defines one or more "corpus reader functions",
which can be used to read documents from that corpus. These functions
take an argument, ``item``, which is used to indicate which document
should be read from the corpus:
- If ``item`` is one of the unique identifiers listed in the corpus
module's ``items`` variable, then the corresponding document will
be loaded from the NLTK corpus package.
- If ``item`` is a fileid, then that file will be read.
Additionally, corpus reader functions can be given lists of item
names; in which case, they will return a concatenation of the
corresponding documents.
Corpus reader functions are named based on the type of information
they return. Some common examples, and their return types, are:
- words(): list of str
- sents(): list of (list of str)
- paras(): list of (list of (list of str))
- tagged_words(): list of (str,str) tuple
- tagged_sents(): list of (list of (str,str))
- tagged_paras(): list of (list of (list of (str,str)))
- chunked_sents(): list of (Tree w/ (str,str) leaves)
- parsed_sents(): list of (Tree with str leaves)
- parsed_paras(): list of (list of (Tree with str leaves))
- xml(): A single xml ElementTree
- raw(): unprocessed corpus contents
For example, to read a list of the words in the Brown Corpus, use
``nltk.corpus.brown.words()``:
>>> from nltk.corpus import brown
>>> print(", ".join(brown.words()))
The, Fulton, County, Grand, Jury, said, ...
"""
from nltk.corpus.reader.plaintext import *
from nltk.corpus.reader.util import *
from nltk.corpus.reader.api import *
from nltk.corpus.reader.tagged import *
from nltk.corpus.reader.cmudict import *
from nltk.corpus.reader.conll import *
from nltk.corpus.reader.chunked import *
from nltk.corpus.reader.wordlist import *
from nltk.corpus.reader.xmldocs import *
from nltk.corpus.reader.ppattach import *
from nltk.corpus.reader.senseval import *
from nltk.corpus.reader.ieer import *
from nltk.corpus.reader.sinica_treebank import *
from nltk.corpus.reader.bracket_parse import *
from nltk.corpus.reader.indian import *
from nltk.corpus.reader.toolbox import *
from nltk.corpus.reader.timit import *
from nltk.corpus.reader.ycoe import *
from nltk.corpus.reader.rte import *
from nltk.corpus.reader.string_category import *
from nltk.corpus.reader.propbank import *
from nltk.corpus.reader.verbnet import *
from nltk.corpus.reader.bnc import *
from nltk.corpus.reader.nps_chat import *
from nltk.corpus.reader.wordnet import *
from nltk.corpus.reader.switchboard import *
from nltk.corpus.reader.dependency import *
from nltk.corpus.reader.nombank import *
from nltk.corpus.reader.ipipan import *
from nltk.corpus.reader.pl196x import *
from nltk.corpus.reader.knbc import *
from nltk.corpus.reader.chasen import *
from nltk.corpus.reader.childes import *
from nltk.corpus.reader.aligned import *
from nltk.corpus.reader.lin import *
from nltk.corpus.reader.semcor import *
from nltk.corpus.reader.framenet import *
from nltk.corpus.reader.udhr import *
from nltk.corpus.reader.bnc import *
from nltk.corpus.reader.sentiwordnet import *
from nltk.corpus.reader.nkjp import *
# Make sure that nltk.corpus.reader.bracket_parse gives the module, not
# the function bracket_parse() defined in nltk.tree:
from nltk.corpus.reader import bracket_parse
__all__ = [
'CorpusReader', 'CategorizedCorpusReader',
'PlaintextCorpusReader', 'find_corpus_fileids',
'TaggedCorpusReader', 'CMUDictCorpusReader',
'ConllChunkCorpusReader', 'WordListCorpusReader',
'PPAttachmentCorpusReader', 'SensevalCorpusReader',
'IEERCorpusReader', 'ChunkedCorpusReader',
'SinicaTreebankCorpusReader', 'BracketParseCorpusReader',
'IndianCorpusReader', 'ToolboxCorpusReader',
'TimitCorpusReader', 'YCOECorpusReader',
'MacMorphoCorpusReader', 'SyntaxCorpusReader',
'AlpinoCorpusReader', 'RTECorpusReader',
'StringCategoryCorpusReader','EuroparlCorpusReader',
'CategorizedBracketParseCorpusReader',
'CategorizedTaggedCorpusReader',
'CategorizedPlaintextCorpusReader',
'PortugueseCategorizedPlaintextCorpusReader',
'tagged_treebank_para_block_reader',
'PropbankCorpusReader', 'VerbnetCorpusReader',
'BNCCorpusReader', 'ConllCorpusReader',
'XMLCorpusReader', 'NPSChatCorpusReader',
'SwadeshCorpusReader', 'WordNetCorpusReader',
'WordNetICCorpusReader', 'SwitchboardCorpusReader',
'DependencyCorpusReader', 'NombankCorpusReader',
'IPIPANCorpusReader', 'Pl196xCorpusReader',
'TEICorpusView', 'KNBCorpusReader', 'ChasenCorpusReader',
'CHILDESCorpusReader', 'AlignedCorpusReader',
'TimitTaggedCorpusReader', 'LinThesaurusCorpusReader',
'SemcorCorpusReader', 'FramenetCorpusReader', 'UdhrCorpusReader',
'BNCCorpusReader', 'SentiWordNetCorpusReader', 'SentiSynset',
'NKJPCorpusReader'
]
|
{
"content_hash": "709d7002a92580b85154b33b9d52715a",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 72,
"avg_line_length": 41.448,
"alnum_prop": 0.7598919127581548,
"repo_name": "devs4v/devs4v-information-retrieval15",
"id": "e48be766bf67a2881591035b2cab4e584961f243",
"size": "5424",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project/venv/lib/python2.7/site-packages/nltk/corpus/reader/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5652"
},
{
"name": "C++",
"bytes": "46179369"
},
{
"name": "CSS",
"bytes": "48509"
},
{
"name": "GAP",
"bytes": "10699"
},
{
"name": "Gnuplot",
"bytes": "2444"
},
{
"name": "Groff",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "154190600"
},
{
"name": "Java",
"bytes": "28482713"
},
{
"name": "JavaScript",
"bytes": "125675"
},
{
"name": "Makefile",
"bytes": "14512"
},
{
"name": "Perl",
"bytes": "63759"
},
{
"name": "Python",
"bytes": "11523468"
},
{
"name": "Shell",
"bytes": "4083"
},
{
"name": "XSLT",
"bytes": "14748"
}
],
"symlink_target": ""
}
|
import unittest
from CelestialMechanics.orbits.hyperbola import angle_asymptotic
class MyTestCase(unittest.TestCase):
def test_something(self):
angle1, angle2 = angle_asymptotic(1.5)
self.assertAlmostEqual(131.81031489577862, angle1)
self.assertAlmostEqual(228.18968510422138, angle2)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9a026cc5ea1338eebd6d26b325dcc0d9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 64,
"avg_line_length": 26.071428571428573,
"alnum_prop": 0.7178082191780822,
"repo_name": "Camiloasc1/AstronomyUNAL",
"id": "4e549e1f581cd03b7075950c9350d1317b727c9a",
"size": "365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CelestialMechanics/orbits/test/test_hyperbola.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85858"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token
from api import views
urlpatterns = format_suffix_patterns([
# swagger documentation url
url(r'^docs/', include('rest_framework_swagger.urls')),
# login and register url
url(r'^auth/register$',
views.UserList.as_view(),
name='user-register'),
# url(r'^auth/login/',
# authtoken_views.obtain_auth_token,
# name='user-login'),
url(r'^auth/login', obtain_jwt_token, name='user-login'),
url(r'^auth/api-token-refresh/', refresh_jwt_token),
url(r'^users/(?P<pk>[0-9]+)/$',
views.UserDetail.as_view(),
name='user-detail'),
# bucketlist related urls
url(r'^bucketlists/$',
views.BucketListAll.as_view(),
name='bucketlist-list'),
url(r'^bucketlists/(?P<pk>[0-9]+)$',
views.BucketListDetail.as_view(),
name='bucketlist-detail'),
# bucketlist item related urls
url(r'^bucketlists/(?P<bucketlist>[0-9]+)/items/$',
views.BucketListItemAll.as_view(),
name='item-list'),
url(r'^bucketlists/(?P<bucketlist>[0-9]+)/items/'
'(?P<pk>[0-9]+)$', views.BucketListItemDetail.as_view(),
name='item-detail'),
])
|
{
"content_hash": "6aaf3f9196e4dddaec1aadcfa70cb9b2",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 33.475,
"alnum_prop": 0.6250933532486931,
"repo_name": "andela-cdike/django_bucketlist_app",
"id": "1bfbab4e100be92ea29cfdcda3697ff189fdff7d",
"size": "1339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bucketlist/api/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8413"
},
{
"name": "HTML",
"bytes": "10033"
},
{
"name": "JavaScript",
"bytes": "72826"
},
{
"name": "Python",
"bytes": "52949"
}
],
"symlink_target": ""
}
|
from runner.koan import *
class AboutClasses(Koan):
class Dog(object):
"Dogs need regular walkies. Never, ever let them drive."
def test_instances_of_classes_can_be_created_adding_parentheses(self):
fido = self.Dog()
self.assertEqual(__, type(fido).__name__)
def test_classes_have_docstrings(self):
self.assertMatch(__, self.Dog.__doc__)
# ------------------------------------------------------------------
class Dog2(object):
def __init__(self):
self._name = 'Paul'
def set_name(self, a_name):
self._name = a_name
def test_init_method_is_the_constructor(self):
dog = self.Dog2()
self.assertEqual(__, dog._name)
def test_private_attributes_are_not_really_private(self):
dog = self.Dog2()
dog.set_name("Fido")
self.assertEqual(__, dog._name)
# The _ prefix in _name implies private ownership, but nothing is truly
# private in Python.
def test_you_can_also_access_the_value_out_using_getattr_and_dict(self):
fido = self.Dog2()
fido.set_name("Fido")
self.assertEqual(__, getattr(fido, "_name"))
# getattr(), setattr() and delattr() are a way of accessing attributes
# by method rather than through assignment operators
self.assertEqual(__, fido.__dict__["_name"])
# Yes, this works here, but don't rely on the __dict__ object! Some
# class implementations use optimization which result in __dict__ not
# showing everything.
# ------------------------------------------------------------------
class Dog3(object):
def __init__(self):
self._name = None
def set_name(self, a_name):
self._name = a_name
def get_name(self):
return self._name
name = property(get_name, set_name)
def test_that_name_can_be_read_as_a_property(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual(__, fido.get_name()) # access as method
self.assertEqual(__, fido.name) # access as property
# ------------------------------------------------------------------
class Dog4(object):
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, a_name):
self._name = a_name
def test_creating_properties_with_decorators_is_slightly_easier(self):
fido = self.Dog4()
fido.name = "Fido"
self.assertEqual(__, fido.name)
# ------------------------------------------------------------------
class Dog5(object):
def __init__(self, initial_name):
self._name = initial_name
@property
def name(self):
return self._name
def test_init_provides_initial_values_for_instance_variables(self):
fido = self.Dog5("Fido")
self.assertEqual(__, fido.name)
def test_args_must_match_init(self):
self.assertRaises(___, self.Dog5) # Evaluates self.Dog5()
# THINK ABOUT IT:
# Why is this so?
def test_different_objects_have_difference_instance_variables(self):
fido = self.Dog5("Fido")
rover = self.Dog5("Rover")
self.assertEqual(____, rover.name == fido.name)
# ------------------------------------------------------------------
class Dog6(object):
def __init__(self, initial_name):
self._name = initial_name
def get_self(self):
return self
def __str__(self):
return __
def __repr__(self):
return "<Dog named '" + self._name + "'>"
def test_inside_a_method_self_refers_to_the_containing_object(self):
fido = self.Dog6("Fido")
self.assertEqual(__, fido.get_self()) # Not a string!
def test_str_provides_a_string_version_of_the_object(self):
fido = self.Dog6("Fido")
self.assertEqual("Fido", str(fido))
def test_str_is_used_explicitly_in_string_interpolation(self):
fido = self.Dog6("Fido")
self.assertEqual(__, "My dog is " + str(fido))
def test_repr_provides_a_more_complete_string_version(self):
fido = self.Dog6("Fido")
self.assertEqual(__, repr(fido))
def test_all_objects_support_str_and_repr(self):
seq = [1, 2, 3]
self.assertEqual(__, str(seq))
self.assertEqual(__, repr(seq))
self.assertEqual(__, str("STRING"))
self.assertEqual(__, repr("STRING"))
|
{
"content_hash": "0a85d8fb2b4c897fa462ef4cca135c21",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 79,
"avg_line_length": 29.954545454545453,
"alnum_prop": 0.5289399523086928,
"repo_name": "brainstorm/python_koans",
"id": "564554bbad66969d709225ff8858c2045e0f3d83",
"size": "4660",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python2/koans/about_classes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "325345"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
import txaio
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
from autobahn.wamp.types import SubscribeOptions, RegisterOptions
from autobahn.wamp.cryptobox import KeyRing, Key
from autobahn.wamp.exception import ApplicationError
from sample_keys import PRIVKEY, RESPONDER_PRIV, ORIGINATOR_PUB
class Component2(ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
self.log.info('session joined: {details}', details=details)
# setup application payload end-to-end encryption ("WAMP-cryptobox")
# when a keyring was set, end-to-end encryption is performed automatically
if False:
# this is simplest keyring: for all URIs, use one key for both
# originators and responders.
keyring = KeyRing(PRIVKEY)
else:
# this is a more specialized keyring: we only make URIs starting
# with "com.myapp.encrypted." encrypted, and only with private key
# for originator (= this session, as it is "calling" and "publishing")
keyring = KeyRing()
# since we want to act as "callee" and "subscriber") we are thus a "responder"
# and responders need the responder private key. however, we don't act as "callers"
# or "publishers", and hence can get away with the public key for the originator only!
key = Key(originator_pub=ORIGINATOR_PUB, responder_priv=RESPONDER_PRIV)
keyring.set_key('com.myapp.encrypted.', key)
self.set_payload_codec(keyring)
# now start the testing ..
def add2(a, b, details=None):
self.log.info('call received: a={a}, b={b}, details={details}', a=a, b=b, details=details)
# when the procedure args were encrypted, the result will be always encrypted too!
return a + b
options = RegisterOptions(details_arg='details')
reg1 = yield self.register(add2, 'com.myapp.add2', options=options)
reg2 = yield self.register(add2, 'com.myapp.encrypted.add2', options=options)
def failme(encrypted_error, details=None):
# IMPORTANT: independent of whether the "failme" procedure args were encrypted or not,
# an error returned to the caller will be encrypted or not depending soley
# on the error URI!
if encrypted_error:
raise ApplicationError("com.myapp.encrypted.error1", custom1=23, custom2='Hello')
else:
raise ApplicationError("com.myapp.error1", custom1=23, custom2='Hello')
reg3 = yield self.register(failme, 'com.myapp.failme', options=options)
reg4 = yield self.register(failme, 'com.myapp.encrypted.failme', options=options)
def on_hello(msg, details=None):
self.log.info('event received: msg="{msg}", details={details}', msg=msg, details=details)
options = SubscribeOptions(details=True)
sub1 = yield self.subscribe(on_hello, 'com.myapp.hello', options=options)
sub2 = yield self.subscribe(on_hello, 'com.myapp.encrypted.hello', options=options)
self.log.info('session ready!')
def onLeave(self, details):
self.log.info('session left: {details}',details=details)
ApplicationSession.onLeave(self, details)
def onDisconnect(self):
ApplicationSession.onDisconnect(self)
from twisted.internet import reactor
reactor.stop()
if __name__ == '__main__':
txaio.start_logging(level='info')
runner = ApplicationRunner("ws://127.0.0.1:8080", "realm1")
runner.run(Component2)
|
{
"content_hash": "949516185eb0e546854173f227b08135",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 102,
"avg_line_length": 43.73809523809524,
"alnum_prop": 0.6622210125204138,
"repo_name": "crossbario/crossbar-examples",
"id": "9d0b6c84aba41e5eb27e6670c7a2e9fed53934cc",
"size": "4952",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "encryption/cryptobox/client2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5120"
},
{
"name": "C#",
"bytes": "22931"
},
{
"name": "C++",
"bytes": "77209"
},
{
"name": "CSS",
"bytes": "216506"
},
{
"name": "Dockerfile",
"bytes": "1423"
},
{
"name": "Erlang",
"bytes": "16493"
},
{
"name": "HTML",
"bytes": "4701160"
},
{
"name": "Hack",
"bytes": "4082"
},
{
"name": "Java",
"bytes": "20795"
},
{
"name": "JavaScript",
"bytes": "2989112"
},
{
"name": "Jupyter Notebook",
"bytes": "335655"
},
{
"name": "Lua",
"bytes": "1233"
},
{
"name": "Makefile",
"bytes": "68685"
},
{
"name": "PHP",
"bytes": "45600"
},
{
"name": "PLSQL",
"bytes": "157154"
},
{
"name": "PLpgSQL",
"bytes": "5053"
},
{
"name": "Python",
"bytes": "856797"
},
{
"name": "SCSS",
"bytes": "58669"
},
{
"name": "Shell",
"bytes": "46285"
}
],
"symlink_target": ""
}
|
import json
import tempfile
import unittest
from unittest import mock
from unittest.mock import patch
import kubernetes
from parameterized import parameterized
from airflow import AirflowException
from airflow.models import Connection
from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
from airflow.utils import db
from tests.test_utils.db import clear_db_connections
class TestKubernetesHook(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
db.merge_conn(
Connection(
conn_id='kubernetes_in_cluster',
conn_type='kubernetes',
extra=json.dumps({'extra__kubernetes__in_cluster': True}),
)
)
db.merge_conn(
Connection(
conn_id='kubernetes_kube_config',
conn_type='kubernetes',
extra=json.dumps({'extra__kubernetes__kube_config': '{"test": "kube"}'}),
)
)
db.merge_conn(
Connection(
conn_id='kubernetes_kube_config_path',
conn_type='kubernetes',
extra=json.dumps({'extra__kubernetes__kube_config_path': 'path/to/file'}),
)
)
db.merge_conn(
Connection(conn_id='kubernetes_default_kube_config', conn_type='kubernetes', extra=json.dumps({}))
)
db.merge_conn(
Connection(
conn_id='kubernetes_with_namespace',
conn_type='kubernetes',
extra=json.dumps({'extra__kubernetes__namespace': 'mock_namespace'}),
)
)
@classmethod
def tearDownClass(cls) -> None:
clear_db_connections()
@patch("kubernetes.config.incluster_config.InClusterConfigLoader")
def test_in_cluster_connection(self, mock_kube_config_loader):
kubernetes_hook = KubernetesHook(conn_id='kubernetes_in_cluster')
api_conn = kubernetes_hook.get_conn()
mock_kube_config_loader.assert_called_once()
self.assertIsInstance(api_conn, kubernetes.client.api_client.ApiClient)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
def test_kube_config_path(self, mock_kube_config_loader, mock_kube_config_merger):
kubernetes_hook = KubernetesHook(conn_id='kubernetes_kube_config_path')
api_conn = kubernetes_hook.get_conn()
mock_kube_config_loader.assert_called_once_with("path/to/file")
mock_kube_config_merger.assert_called_once()
self.assertIsInstance(api_conn, kubernetes.client.api_client.ApiClient)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
@patch.object(tempfile, 'NamedTemporaryFile')
def test_kube_config_connection(self, mock_kube_config_loader, mock_kube_config_merger, mock_tempfile):
kubernetes_hook = KubernetesHook(conn_id='kubernetes_kube_config')
api_conn = kubernetes_hook.get_conn()
mock_tempfile.is_called_once()
mock_kube_config_loader.assert_called_once()
mock_kube_config_merger.assert_called_once()
self.assertIsInstance(api_conn, kubernetes.client.api_client.ApiClient)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
@patch("kubernetes.config.kube_config.KUBE_CONFIG_DEFAULT_LOCATION", "/mock/config")
def test_default_kube_config_connection(
self,
mock_kube_config_loader,
mock_kube_config_merger,
):
kubernetes_hook = KubernetesHook(conn_id='kubernetes_default_kube_config')
api_conn = kubernetes_hook.get_conn()
mock_kube_config_loader.assert_called_once_with("/mock/config")
mock_kube_config_merger.assert_called_once()
self.assertIsInstance(api_conn, kubernetes.client.api_client.ApiClient)
def test_get_namespace(self):
kubernetes_hook_with_namespace = KubernetesHook(conn_id='kubernetes_with_namespace')
kubernetes_hook_without_namespace = KubernetesHook(conn_id='kubernetes_default_kube_config')
self.assertEqual(kubernetes_hook_with_namespace.get_namespace(), 'mock_namespace')
self.assertEqual(kubernetes_hook_without_namespace.get_namespace(), 'default')
class TestKubernetesHookIncorrectConfiguration(unittest.TestCase):
@parameterized.expand(
(
"kubernetes://?extra__kubernetes__kube_config_path=/tmp/&extra__kubernetes__kube_config=[1,2,3]",
"kubernetes://?extra__kubernetes__kube_config_path=/tmp/&extra__kubernetes__in_cluster=[1,2,3]",
"kubernetes://?extra__kubernetes__kube_config=/tmp/&extra__kubernetes__in_cluster=[1,2,3]",
)
)
def test_should_raise_exception_on_invalid_configuration(self, conn_uri):
with mock.patch.dict("os.environ", AIRFLOW_CONN_KUBERNETES_DEFAULT=conn_uri), self.assertRaisesRegex(
AirflowException, "Invalid connection configuration"
):
kubernetes_hook = KubernetesHook()
kubernetes_hook.get_conn()
|
{
"content_hash": "a10f0899b5f61292a1c350f98786ae19",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 110,
"avg_line_length": 44.06837606837607,
"alnum_prop": 0.6662141194724592,
"repo_name": "DinoCow/airflow",
"id": "9a01d8fa5007be77f4bb3c74b3f84f5a924167e8",
"size": "5946",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/providers/cncf/kubernetes/hooks/test_kubernetes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "56963"
},
{
"name": "HTML",
"bytes": "140781"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1473771"
},
{
"name": "Shell",
"bytes": "18638"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.contrib.auth import get_user_model
from django.utils import timezone
import factory
from factory.django import DjangoModelFactory
class UserFactory(DjangoModelFactory):
class Meta:
model = get_user_model()
first_name = "Jane"
last_name = factory.Sequence(lambda n: "D%se" % ("o" * min(20, int(n))))
username = factory.Sequence(lambda n: "user_%s" % n)
email = factory.Sequence(lambda n: "user%s@example.org" % n)
password = factory.PostGenerationMethodCall("set_password", "froide")
is_staff = False
is_active = True
is_superuser = False
last_login = datetime(2000, 1, 1).replace(tzinfo=timezone.utc)
date_joined = datetime(1999, 1, 1).replace(tzinfo=timezone.utc)
private = False
address = "Dummystreet5\n12345 Town"
organization = ""
organization_url = ""
|
{
"content_hash": "010e3babb248d360637b83b75b9d4519",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.6895761741122566,
"repo_name": "fin/froide",
"id": "e7b7303399b9370581a81f0bf756019cb367f902",
"size": "873",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "froide/account/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "302838"
},
{
"name": "JavaScript",
"bytes": "47357"
},
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "1706123"
},
{
"name": "SCSS",
"bytes": "39397"
},
{
"name": "TypeScript",
"bytes": "57910"
},
{
"name": "Vue",
"bytes": "218866"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from cryptography.hazmat._oid import ObjectIdentifier
from cryptography.hazmat.primitives import hashes
class ExtensionOID(object):
SUBJECT_DIRECTORY_ATTRIBUTES = ObjectIdentifier("2.5.29.9")
SUBJECT_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.14")
KEY_USAGE = ObjectIdentifier("2.5.29.15")
SUBJECT_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.17")
ISSUER_ALTERNATIVE_NAME = ObjectIdentifier("2.5.29.18")
BASIC_CONSTRAINTS = ObjectIdentifier("2.5.29.19")
NAME_CONSTRAINTS = ObjectIdentifier("2.5.29.30")
CRL_DISTRIBUTION_POINTS = ObjectIdentifier("2.5.29.31")
CERTIFICATE_POLICIES = ObjectIdentifier("2.5.29.32")
POLICY_MAPPINGS = ObjectIdentifier("2.5.29.33")
AUTHORITY_KEY_IDENTIFIER = ObjectIdentifier("2.5.29.35")
POLICY_CONSTRAINTS = ObjectIdentifier("2.5.29.36")
EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37")
FRESHEST_CRL = ObjectIdentifier("2.5.29.46")
INHIBIT_ANY_POLICY = ObjectIdentifier("2.5.29.54")
ISSUING_DISTRIBUTION_POINT = ObjectIdentifier("2.5.29.28")
AUTHORITY_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.1")
SUBJECT_INFORMATION_ACCESS = ObjectIdentifier("1.3.6.1.5.5.7.1.11")
OCSP_NO_CHECK = ObjectIdentifier("1.3.6.1.5.5.7.48.1.5")
TLS_FEATURE = ObjectIdentifier("1.3.6.1.5.5.7.1.24")
CRL_NUMBER = ObjectIdentifier("2.5.29.20")
DELTA_CRL_INDICATOR = ObjectIdentifier("2.5.29.27")
PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS = ObjectIdentifier(
"1.3.6.1.4.1.11129.2.4.2"
)
PRECERT_POISON = ObjectIdentifier("1.3.6.1.4.1.11129.2.4.3")
SIGNED_CERTIFICATE_TIMESTAMPS = ObjectIdentifier("1.3.6.1.4.1.11129.2.4.5")
class OCSPExtensionOID(object):
NONCE = ObjectIdentifier("1.3.6.1.5.5.7.48.1.2")
class CRLEntryExtensionOID(object):
CERTIFICATE_ISSUER = ObjectIdentifier("2.5.29.29")
CRL_REASON = ObjectIdentifier("2.5.29.21")
INVALIDITY_DATE = ObjectIdentifier("2.5.29.24")
class NameOID(object):
COMMON_NAME = ObjectIdentifier("2.5.4.3")
COUNTRY_NAME = ObjectIdentifier("2.5.4.6")
LOCALITY_NAME = ObjectIdentifier("2.5.4.7")
STATE_OR_PROVINCE_NAME = ObjectIdentifier("2.5.4.8")
STREET_ADDRESS = ObjectIdentifier("2.5.4.9")
ORGANIZATION_NAME = ObjectIdentifier("2.5.4.10")
ORGANIZATIONAL_UNIT_NAME = ObjectIdentifier("2.5.4.11")
SERIAL_NUMBER = ObjectIdentifier("2.5.4.5")
SURNAME = ObjectIdentifier("2.5.4.4")
GIVEN_NAME = ObjectIdentifier("2.5.4.42")
TITLE = ObjectIdentifier("2.5.4.12")
GENERATION_QUALIFIER = ObjectIdentifier("2.5.4.44")
X500_UNIQUE_IDENTIFIER = ObjectIdentifier("2.5.4.45")
DN_QUALIFIER = ObjectIdentifier("2.5.4.46")
PSEUDONYM = ObjectIdentifier("2.5.4.65")
USER_ID = ObjectIdentifier("0.9.2342.19200300.100.1.1")
DOMAIN_COMPONENT = ObjectIdentifier("0.9.2342.19200300.100.1.25")
EMAIL_ADDRESS = ObjectIdentifier("1.2.840.113549.1.9.1")
JURISDICTION_COUNTRY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.3")
JURISDICTION_LOCALITY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.1")
JURISDICTION_STATE_OR_PROVINCE_NAME = ObjectIdentifier(
"1.3.6.1.4.1.311.60.2.1.2"
)
BUSINESS_CATEGORY = ObjectIdentifier("2.5.4.15")
POSTAL_ADDRESS = ObjectIdentifier("2.5.4.16")
POSTAL_CODE = ObjectIdentifier("2.5.4.17")
INN = ObjectIdentifier("1.2.643.3.131.1.1")
OGRN = ObjectIdentifier("1.2.643.100.1")
SNILS = ObjectIdentifier("1.2.643.100.3")
UNSTRUCTURED_NAME = ObjectIdentifier("1.2.840.113549.1.9.2")
class SignatureAlgorithmOID(object):
RSA_WITH_MD5 = ObjectIdentifier("1.2.840.113549.1.1.4")
RSA_WITH_SHA1 = ObjectIdentifier("1.2.840.113549.1.1.5")
# This is an alternate OID for RSA with SHA1 that is occasionally seen
_RSA_WITH_SHA1 = ObjectIdentifier("1.3.14.3.2.29")
RSA_WITH_SHA224 = ObjectIdentifier("1.2.840.113549.1.1.14")
RSA_WITH_SHA256 = ObjectIdentifier("1.2.840.113549.1.1.11")
RSA_WITH_SHA384 = ObjectIdentifier("1.2.840.113549.1.1.12")
RSA_WITH_SHA512 = ObjectIdentifier("1.2.840.113549.1.1.13")
RSASSA_PSS = ObjectIdentifier("1.2.840.113549.1.1.10")
ECDSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10045.4.1")
ECDSA_WITH_SHA224 = ObjectIdentifier("1.2.840.10045.4.3.1")
ECDSA_WITH_SHA256 = ObjectIdentifier("1.2.840.10045.4.3.2")
ECDSA_WITH_SHA384 = ObjectIdentifier("1.2.840.10045.4.3.3")
ECDSA_WITH_SHA512 = ObjectIdentifier("1.2.840.10045.4.3.4")
DSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10040.4.3")
DSA_WITH_SHA224 = ObjectIdentifier("2.16.840.1.101.3.4.3.1")
DSA_WITH_SHA256 = ObjectIdentifier("2.16.840.1.101.3.4.3.2")
ED25519 = ObjectIdentifier("1.3.101.112")
ED448 = ObjectIdentifier("1.3.101.113")
GOSTR3411_94_WITH_3410_2001 = ObjectIdentifier("1.2.643.2.2.3")
GOSTR3410_2012_WITH_3411_2012_256 = ObjectIdentifier("1.2.643.7.1.1.3.2")
GOSTR3410_2012_WITH_3411_2012_512 = ObjectIdentifier("1.2.643.7.1.1.3.3")
_SIG_OIDS_TO_HASH = {
SignatureAlgorithmOID.RSA_WITH_MD5: hashes.MD5(),
SignatureAlgorithmOID.RSA_WITH_SHA1: hashes.SHA1(),
SignatureAlgorithmOID._RSA_WITH_SHA1: hashes.SHA1(),
SignatureAlgorithmOID.RSA_WITH_SHA224: hashes.SHA224(),
SignatureAlgorithmOID.RSA_WITH_SHA256: hashes.SHA256(),
SignatureAlgorithmOID.RSA_WITH_SHA384: hashes.SHA384(),
SignatureAlgorithmOID.RSA_WITH_SHA512: hashes.SHA512(),
SignatureAlgorithmOID.ECDSA_WITH_SHA1: hashes.SHA1(),
SignatureAlgorithmOID.ECDSA_WITH_SHA224: hashes.SHA224(),
SignatureAlgorithmOID.ECDSA_WITH_SHA256: hashes.SHA256(),
SignatureAlgorithmOID.ECDSA_WITH_SHA384: hashes.SHA384(),
SignatureAlgorithmOID.ECDSA_WITH_SHA512: hashes.SHA512(),
SignatureAlgorithmOID.DSA_WITH_SHA1: hashes.SHA1(),
SignatureAlgorithmOID.DSA_WITH_SHA224: hashes.SHA224(),
SignatureAlgorithmOID.DSA_WITH_SHA256: hashes.SHA256(),
SignatureAlgorithmOID.ED25519: None,
SignatureAlgorithmOID.ED448: None,
SignatureAlgorithmOID.GOSTR3411_94_WITH_3410_2001: None,
SignatureAlgorithmOID.GOSTR3410_2012_WITH_3411_2012_256: None,
SignatureAlgorithmOID.GOSTR3410_2012_WITH_3411_2012_512: None,
}
class ExtendedKeyUsageOID(object):
SERVER_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.1")
CLIENT_AUTH = ObjectIdentifier("1.3.6.1.5.5.7.3.2")
CODE_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.3")
EMAIL_PROTECTION = ObjectIdentifier("1.3.6.1.5.5.7.3.4")
TIME_STAMPING = ObjectIdentifier("1.3.6.1.5.5.7.3.8")
OCSP_SIGNING = ObjectIdentifier("1.3.6.1.5.5.7.3.9")
ANY_EXTENDED_KEY_USAGE = ObjectIdentifier("2.5.29.37.0")
class AuthorityInformationAccessOID(object):
CA_ISSUERS = ObjectIdentifier("1.3.6.1.5.5.7.48.2")
OCSP = ObjectIdentifier("1.3.6.1.5.5.7.48.1")
class SubjectInformationAccessOID(object):
CA_REPOSITORY = ObjectIdentifier("1.3.6.1.5.5.7.48.5")
class CertificatePoliciesOID(object):
CPS_QUALIFIER = ObjectIdentifier("1.3.6.1.5.5.7.2.1")
CPS_USER_NOTICE = ObjectIdentifier("1.3.6.1.5.5.7.2.2")
ANY_POLICY = ObjectIdentifier("2.5.29.32.0")
class AttributeOID(object):
CHALLENGE_PASSWORD = ObjectIdentifier("1.2.840.113549.1.9.7")
UNSTRUCTURED_NAME = ObjectIdentifier("1.2.840.113549.1.9.2")
_OID_NAMES = {
NameOID.COMMON_NAME: "commonName",
NameOID.COUNTRY_NAME: "countryName",
NameOID.LOCALITY_NAME: "localityName",
NameOID.STATE_OR_PROVINCE_NAME: "stateOrProvinceName",
NameOID.STREET_ADDRESS: "streetAddress",
NameOID.ORGANIZATION_NAME: "organizationName",
NameOID.ORGANIZATIONAL_UNIT_NAME: "organizationalUnitName",
NameOID.SERIAL_NUMBER: "serialNumber",
NameOID.SURNAME: "surname",
NameOID.GIVEN_NAME: "givenName",
NameOID.TITLE: "title",
NameOID.GENERATION_QUALIFIER: "generationQualifier",
NameOID.X500_UNIQUE_IDENTIFIER: "x500UniqueIdentifier",
NameOID.DN_QUALIFIER: "dnQualifier",
NameOID.PSEUDONYM: "pseudonym",
NameOID.USER_ID: "userID",
NameOID.DOMAIN_COMPONENT: "domainComponent",
NameOID.EMAIL_ADDRESS: "emailAddress",
NameOID.JURISDICTION_COUNTRY_NAME: "jurisdictionCountryName",
NameOID.JURISDICTION_LOCALITY_NAME: "jurisdictionLocalityName",
NameOID.JURISDICTION_STATE_OR_PROVINCE_NAME: (
"jurisdictionStateOrProvinceName"
),
NameOID.BUSINESS_CATEGORY: "businessCategory",
NameOID.POSTAL_ADDRESS: "postalAddress",
NameOID.POSTAL_CODE: "postalCode",
NameOID.INN: "INN",
NameOID.OGRN: "OGRN",
NameOID.SNILS: "SNILS",
NameOID.UNSTRUCTURED_NAME: "unstructuredName",
SignatureAlgorithmOID.RSA_WITH_MD5: "md5WithRSAEncryption",
SignatureAlgorithmOID.RSA_WITH_SHA1: "sha1WithRSAEncryption",
SignatureAlgorithmOID.RSA_WITH_SHA224: "sha224WithRSAEncryption",
SignatureAlgorithmOID.RSA_WITH_SHA256: "sha256WithRSAEncryption",
SignatureAlgorithmOID.RSA_WITH_SHA384: "sha384WithRSAEncryption",
SignatureAlgorithmOID.RSA_WITH_SHA512: "sha512WithRSAEncryption",
SignatureAlgorithmOID.RSASSA_PSS: "RSASSA-PSS",
SignatureAlgorithmOID.ECDSA_WITH_SHA1: "ecdsa-with-SHA1",
SignatureAlgorithmOID.ECDSA_WITH_SHA224: "ecdsa-with-SHA224",
SignatureAlgorithmOID.ECDSA_WITH_SHA256: "ecdsa-with-SHA256",
SignatureAlgorithmOID.ECDSA_WITH_SHA384: "ecdsa-with-SHA384",
SignatureAlgorithmOID.ECDSA_WITH_SHA512: "ecdsa-with-SHA512",
SignatureAlgorithmOID.DSA_WITH_SHA1: "dsa-with-sha1",
SignatureAlgorithmOID.DSA_WITH_SHA224: "dsa-with-sha224",
SignatureAlgorithmOID.DSA_WITH_SHA256: "dsa-with-sha256",
SignatureAlgorithmOID.ED25519: "ed25519",
SignatureAlgorithmOID.ED448: "ed448",
SignatureAlgorithmOID.GOSTR3411_94_WITH_3410_2001: (
"GOST R 34.11-94 with GOST R 34.10-2001"
),
SignatureAlgorithmOID.GOSTR3410_2012_WITH_3411_2012_256: (
"GOST R 34.10-2012 with GOST R 34.11-2012 (256 bit)"
),
SignatureAlgorithmOID.GOSTR3410_2012_WITH_3411_2012_512: (
"GOST R 34.10-2012 with GOST R 34.11-2012 (512 bit)"
),
ExtendedKeyUsageOID.SERVER_AUTH: "serverAuth",
ExtendedKeyUsageOID.CLIENT_AUTH: "clientAuth",
ExtendedKeyUsageOID.CODE_SIGNING: "codeSigning",
ExtendedKeyUsageOID.EMAIL_PROTECTION: "emailProtection",
ExtendedKeyUsageOID.TIME_STAMPING: "timeStamping",
ExtendedKeyUsageOID.OCSP_SIGNING: "OCSPSigning",
ExtensionOID.SUBJECT_DIRECTORY_ATTRIBUTES: "subjectDirectoryAttributes",
ExtensionOID.SUBJECT_KEY_IDENTIFIER: "subjectKeyIdentifier",
ExtensionOID.KEY_USAGE: "keyUsage",
ExtensionOID.SUBJECT_ALTERNATIVE_NAME: "subjectAltName",
ExtensionOID.ISSUER_ALTERNATIVE_NAME: "issuerAltName",
ExtensionOID.BASIC_CONSTRAINTS: "basicConstraints",
ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS: (
"signedCertificateTimestampList"
),
ExtensionOID.SIGNED_CERTIFICATE_TIMESTAMPS: (
"signedCertificateTimestampList"
),
ExtensionOID.PRECERT_POISON: "ctPoison",
CRLEntryExtensionOID.CRL_REASON: "cRLReason",
CRLEntryExtensionOID.INVALIDITY_DATE: "invalidityDate",
CRLEntryExtensionOID.CERTIFICATE_ISSUER: "certificateIssuer",
ExtensionOID.NAME_CONSTRAINTS: "nameConstraints",
ExtensionOID.CRL_DISTRIBUTION_POINTS: "cRLDistributionPoints",
ExtensionOID.CERTIFICATE_POLICIES: "certificatePolicies",
ExtensionOID.POLICY_MAPPINGS: "policyMappings",
ExtensionOID.AUTHORITY_KEY_IDENTIFIER: "authorityKeyIdentifier",
ExtensionOID.POLICY_CONSTRAINTS: "policyConstraints",
ExtensionOID.EXTENDED_KEY_USAGE: "extendedKeyUsage",
ExtensionOID.FRESHEST_CRL: "freshestCRL",
ExtensionOID.INHIBIT_ANY_POLICY: "inhibitAnyPolicy",
ExtensionOID.ISSUING_DISTRIBUTION_POINT: ("issuingDistributionPoint"),
ExtensionOID.AUTHORITY_INFORMATION_ACCESS: "authorityInfoAccess",
ExtensionOID.SUBJECT_INFORMATION_ACCESS: "subjectInfoAccess",
ExtensionOID.OCSP_NO_CHECK: "OCSPNoCheck",
ExtensionOID.CRL_NUMBER: "cRLNumber",
ExtensionOID.DELTA_CRL_INDICATOR: "deltaCRLIndicator",
ExtensionOID.TLS_FEATURE: "TLSFeature",
AuthorityInformationAccessOID.OCSP: "OCSP",
AuthorityInformationAccessOID.CA_ISSUERS: "caIssuers",
SubjectInformationAccessOID.CA_REPOSITORY: "caRepository",
CertificatePoliciesOID.CPS_QUALIFIER: "id-qt-cps",
CertificatePoliciesOID.CPS_USER_NOTICE: "id-qt-unotice",
OCSPExtensionOID.NONCE: "OCSPNonce",
AttributeOID.CHALLENGE_PASSWORD: "challengePassword",
}
|
{
"content_hash": "5647d62fb6174b5ea339da126bc7f1ad",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 79,
"avg_line_length": 47.559386973180075,
"alnum_prop": 0.7226295013292516,
"repo_name": "kimjinyong/i2nsf-framework",
"id": "2bf606e50d6bbadac859f3f147153f7a99d999c5",
"size": "12594",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Hackathon-112/analyzer/.local/lib/python3.5/site-packages/cryptography/x509/oid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4396520"
},
{
"name": "C++",
"bytes": "9389"
},
{
"name": "CSS",
"bytes": "51736"
},
{
"name": "Dockerfile",
"bytes": "3839"
},
{
"name": "Emacs Lisp",
"bytes": "24812"
},
{
"name": "Erlang",
"bytes": "1364078"
},
{
"name": "HTML",
"bytes": "42486541"
},
{
"name": "Hack",
"bytes": "6349"
},
{
"name": "Java",
"bytes": "7976"
},
{
"name": "JavaScript",
"bytes": "533000"
},
{
"name": "Makefile",
"bytes": "401170"
},
{
"name": "PHP",
"bytes": "164007"
},
{
"name": "Perl",
"bytes": "2188"
},
{
"name": "Python",
"bytes": "3004949"
},
{
"name": "QMake",
"bytes": "360"
},
{
"name": "Roff",
"bytes": "3906372"
},
{
"name": "Shell",
"bytes": "83872"
},
{
"name": "XSLT",
"bytes": "167018"
}
],
"symlink_target": ""
}
|
from unittest import mock
import pytest
@pytest.mark.django_db
@mock.patch("django_rocket.models.uuid4")
@mock.patch("django_rocket.models.get_random_string")
def test_invitation_tokens(get_random_string, uuid4):
get_random_string.return_value = "CODE"
uuid4.return_value.hex = "TOKEN"
from django_rocket.models import InvitationToken
obj = InvitationToken.objects.create()
assert obj.code == "CODE"
assert obj.token == "TOKEN"
|
{
"content_hash": "3f9f387e45645afa9f9ac08eef63ec32",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7270742358078602,
"repo_name": "mariocesar/django-rocket",
"id": "bb148185a95240498e1fddd1273895934abaab07",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1283"
},
{
"name": "Makefile",
"bytes": "680"
},
{
"name": "Python",
"bytes": "16878"
}
],
"symlink_target": ""
}
|
from unittest import mock
from keystoneauth1 import session
from requests_mock.contrib import fixture
import testtools
from barbicanclient import client
from barbicanclient import exceptions
class TestClient(testtools.TestCase):
def setUp(self):
super(TestClient, self).setUp()
self.responses = self.useFixture(fixture.Fixture())
self.endpoint = 'http://localhost:9311'
self.project_id = 'project_id'
self.session = session.Session()
self.httpclient = client._HTTPClient(session=self.session,
endpoint=self.endpoint,
project_id=self.project_id)
class WhenTestingClientInit(TestClient):
def test_api_version_is_appended_to_endpoint(self):
c = client._HTTPClient(session=self.session,
endpoint=self.endpoint,
project_id=self.project_id)
self.assertEqual('http://localhost:9311/v1', c.endpoint_override)
def test_default_headers_are_empty(self):
c = client._HTTPClient(session=self.session, endpoint=self.endpoint)
self.assertIsInstance(c._default_headers, dict)
self.assertFalse(bool(c._default_headers))
def test_project_id_is_added_to_default_headers(self):
c = client._HTTPClient(session=self.session,
endpoint=self.endpoint,
project_id=self.project_id)
self.assertIn('X-Project-Id', c._default_headers.keys())
self.assertEqual(self.project_id, c._default_headers['X-Project-Id'])
def test_error_thrown_when_no_session_and_no_endpoint(self):
self.assertRaises(ValueError, client.Client,
**{"project_id": self.project_id})
def test_error_thrown_when_no_session_and_no_project_id(self):
self.assertRaises(ValueError, client.Client,
**{"endpoint": self.endpoint})
def test_endpoint_override_starts_with_endpoint_url(self):
c = client._HTTPClient(session=self.session,
endpoint=self.endpoint,
project_id=self.project_id)
self.assertTrue(c.endpoint_override.startswith(self.endpoint))
def test_endpoint_override_ends_with_default_api_version(self):
c = client._HTTPClient(session=self.session,
endpoint=self.endpoint,
project_id=self.project_id)
self.assertTrue(
c.endpoint_override.endswith(client._DEFAULT_API_VERSION))
class WhenTestingClientPost(TestClient):
def setUp(self):
super(WhenTestingClientPost, self).setUp()
self.httpclient = client._HTTPClient(session=self.session,
endpoint=self.endpoint)
self.href = self.endpoint + '/v1/secrets/'
self.post_mock = self.responses.post(self.href, json={})
def test_post_normalizes_url_with_traling_slash(self):
self.httpclient.post(path='secrets', json={'test_data': 'test'})
self.assertTrue(self.post_mock.last_request.url.endswith('/'))
def test_post_includes_content_type_header_of_application_json(self):
self.httpclient.post(path='secrets', json={'test_data': 'test'})
self.assertEqual('application/json',
self.post_mock.last_request.headers['Content-Type'])
def test_post_includes_default_headers(self):
self.httpclient._default_headers = {'Test-Default-Header': 'test'}
self.httpclient.post(path='secrets', json={'test_data': 'test'})
self.assertEqual(
'test',
self.post_mock.last_request.headers['Test-Default-Header'])
def test_post_checks_status_code(self):
self.httpclient._check_status_code = mock.MagicMock()
self.httpclient.post(path='secrets', json={'test_data': 'test'})
self.httpclient._check_status_code.assert_has_calls([])
class WhenTestingClientPut(TestClient):
def setUp(self):
super(WhenTestingClientPut, self).setUp()
self.httpclient = client._HTTPClient(session=self.session,
endpoint=self.endpoint)
self.href = 'http://test_href/'
self.put_mock = self.responses.put(self.href, status_code=204)
def test_put_uses_href_as_is(self):
self.httpclient.put(self.href)
self.assertTrue(self.put_mock.called)
def test_put_passes_data(self):
data = "test"
self.httpclient.put(self.href, data=data)
self.assertEqual("test", self.put_mock.last_request.text)
def test_put_includes_default_headers(self):
self.httpclient._default_headers = {'Test-Default-Header': 'test'}
self.httpclient.put(self.href)
self.assertEqual(
'test',
self.put_mock.last_request.headers['Test-Default-Header'])
def test_put_checks_status_code(self):
self.httpclient._check_status_code = mock.MagicMock()
self.httpclient.put(self.href, data='test')
self.httpclient._check_status_code.assert_has_calls([])
class WhenTestingClientGet(TestClient):
def setUp(self):
super(WhenTestingClientGet, self).setUp()
self.httpclient = client._HTTPClient(session=self.session,
endpoint=self.endpoint)
self.headers = dict()
self.href = 'http://test_href/'
self.get_mock = self.responses.get(self.href, json={})
def test_get_uses_href_as_is(self):
self.httpclient.get(self.href)
self.assertEqual(self.href, self.get_mock.last_request.url)
def test_get_passes_params(self):
params = {'test': 'test1'}
self.httpclient.get(self.href, params=params)
self.assertEqual(self.href,
self.get_mock.last_request.url.split('?')[0])
self.assertEqual(['test1'], self.get_mock.last_request.qs['test'])
def test_get_includes_accept_header_of_application_json(self):
self.httpclient.get(self.href)
self.assertEqual('application/json',
self.get_mock.last_request.headers['Accept'])
def test_get_includes_default_headers(self):
self.httpclient._default_headers = {'Test-Default-Header': 'test'}
self.httpclient.get(self.href)
self.assertEqual(
'test',
self.get_mock.last_request.headers['Test-Default-Header'])
def test_get_checks_status_code(self):
self.httpclient._check_status_code = mock.MagicMock()
self.httpclient.get(self.href)
self.httpclient._check_status_code.assert_has_calls([])
def test_get_raw_uses_href_as_is(self):
self.httpclient._get_raw(self.href, headers=self.headers)
self.assertEqual(self.href, self.get_mock.last_request.url)
def test_get_raw_passes_headers(self):
self.httpclient._get_raw(self.href, headers={'test': 'test'})
self.assertEqual('test', self.get_mock.last_request.headers['test'])
def test_get_raw_includes_default_headers(self):
self.httpclient._default_headers = {'Test-Default-Header': 'test'}
self.httpclient._get_raw(self.href, headers=self.headers)
self.assertIn('Test-Default-Header',
self.get_mock.last_request.headers)
def test_get_raw_checks_status_code(self):
self.httpclient._check_status_code = mock.MagicMock()
self.httpclient._get_raw(self.href, headers=self.headers)
self.httpclient._check_status_code.assert_has_calls([])
class WhenTestingClientDelete(TestClient):
def setUp(self):
super(WhenTestingClientDelete, self).setUp()
self.httpclient = client._HTTPClient(session=self.session,
endpoint=self.endpoint)
self.href = 'http://test_href/'
self.del_mock = self.responses.delete(self.href, status_code=204)
def test_delete_uses_href_as_is(self):
self.httpclient.delete(self.href)
self.assertTrue(self.del_mock.called)
def test_delete_passes_json(self):
json = {"test": "test"}
self.httpclient.delete(self.href, json=json)
self.assertEqual('{"test": "test"}', self.del_mock.last_request.text)
def test_delete_includes_default_headers(self):
self.httpclient._default_headers = {'Test-Default-Header': 'test'}
self.httpclient.delete(self.href)
self.assertEqual(
'test',
self.del_mock.last_request.headers['Test-Default-Header'])
def test_delete_checks_status_code(self):
self.httpclient._check_status_code = mock.MagicMock()
self.httpclient.delete(self.href)
self.httpclient._check_status_code.assert_has_calls([])
class WhenTestingCheckStatusCodes(TestClient):
def test_raises_http_auth_error_for_401_response(self):
resp = mock.MagicMock()
resp.status_code = 401
self.assertRaises(exceptions.HTTPAuthError,
self.httpclient._check_status_code,
resp)
def test_raises_http_server_error_for_500_response(self):
resp = mock.MagicMock()
resp.status_code = 500
self.assertRaises(exceptions.HTTPServerError,
self.httpclient._check_status_code, resp)
def test_raises_http_client_error_for_400_response(self):
resp = mock.MagicMock()
resp.status_code = 400
self.assertRaises(exceptions.HTTPClientError,
self.httpclient._check_status_code, resp)
class WhenTestingGetErrorMessage(TestClient):
def test_gets_error_message_from_title_in_json(self):
resp = mock.MagicMock()
resp.json.return_value = {'title': 'test_text'}
msg = self.httpclient._get_error_message(resp)
self.assertEqual('test_text', msg)
def test_gets_error_message_from_content_when_no_json(self):
resp = mock.MagicMock()
resp.json.side_effect = ValueError()
resp.content = content = 'content'
msg = self.httpclient._get_error_message(resp)
self.assertEqual(content, msg)
def test_gets_error_message_from_description_in_json(self):
resp = mock.MagicMock()
resp.json.return_value = {'title': 'test_text',
'description': 'oopsie'}
msg = self.httpclient._get_error_message(resp)
self.assertEqual('test_text: oopsie', msg)
class BaseEntityResource(testtools.TestCase):
def _setUp(self, entity, entity_id='abcd1234-eabc-5678-9abc-abcdef012345'):
super(BaseEntityResource, self).setUp()
self.responses = self.useFixture(fixture.Fixture())
self.endpoint = 'http://localhost:9311'
self.project_id = '1234567'
self.entity = entity
self.entity_id = entity_id
self.entity_base = self.endpoint + "/v1/" + self.entity
self.entity_href = self.entity_base + "/" + self.entity_id
self.entity_payload_href = self.entity_href + "/payload"
self.client = client.Client(endpoint=self.endpoint,
project_id=self.project_id)
|
{
"content_hash": "d425bcded1f98a827b91b3427d1fe037",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 79,
"avg_line_length": 40.711191335740075,
"alnum_prop": 0.6250775915580384,
"repo_name": "openstack/python-barbicanclient",
"id": "87c7296981b717cfcd3329a227f164b9c0a36534",
"size": "11862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barbicanclient/tests/test_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "495148"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Limit(A10BaseClass):
"""Class Description::
Specify limit for GSLB Message Protocol.
Class limit supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param ardt_response: {"description": "Response Messages of Active RDT, default is 1000 (Number)", "format": "number", "default": 1000, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param conn_response: {"description": "Response Messages of Connection Load, default is no limit (Number)", "format": "number", "default": 0, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param ardt_session: {"description": "Sessions of Active RDT, default is 32768 (Number)", "format": "number", "default": 32768, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param ardt_query: {"description": "Query Messages of Active RDT, default is 200 (Number)", "format": "number", "default": 200, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param message: {"description": "Amount of Messages, default is 10000 (Number)", "format": "number", "default": 10000, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param response: {"description": "Amount of Response Messages, default is 3600 (Number)", "format": "number", "default": 3600, "optional": true, "maximum": 1000000, "minimum": 0, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/gslb/protocol/limit`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "limit"
self.a10_url="/axapi/v3/gslb/protocol/limit"
self.DeviceProxy = ""
self.ardt_response = ""
self.uuid = ""
self.conn_response = ""
self.ardt_session = ""
self.ardt_query = ""
self.message = ""
self.response = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "186757d54d413c1e9bb881c548b8b775",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 215,
"avg_line_length": 51.319148936170215,
"alnum_prop": 0.6364013266998342,
"repo_name": "a10networks/a10sdk-python",
"id": "c11bb7c3b8dcd2a58f82538520d805f9f7bcaa90",
"size": "2412",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/gslb/gslb_protocol_limit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956372"
}
],
"symlink_target": ""
}
|
"""
@file test_temperature_lps331ap.py
"""
##
# @addtogroup soletta sensor
# @brief This is sensor test based on soletta app
# @brief test temperature function of sensor lps331ap on Galileo/MinnowMax
import os
import time
from oeqa.utils.helper import shell_cmd
from oeqa.oetest import oeRuntimeTest
from EnvirSetup import EnvirSetup
from oeqa.utils.decorators import tag
@tag(TestType="FVT", FeatureID="IOTOS-757")
class TestTemperatureLPS331AP(oeRuntimeTest):
"""
@class TestTemperatureLPS331AP
"""
def setUp(self):
'''Generate test app on target
@fn setUp
@param self
@return'''
print 'start!\n'
#connect sensor and DUT through board
#shell_cmd("sudo python "+ os.path.dirname(__file__) + "/Connector.py lps331ap")
envir = EnvirSetup(self.target)
envir.envirSetup("lps331ap","temperature")
def tearDown(self):
'''unload lps331ap driver
@fn tearDown
@param self
@return'''
(status, output) = self.target.run("cat /sys/devices/virtual/dmi/id/board_name")
if "Minnow" in output:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x5d >i2c-1/delete_device")
if "Galileo" in output:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x5d >i2c-0/delete_device")
def test_Temperature_LPS331AP(self):
'''Execute the test app and verify sensor data
@fn test_Temperature_LPS331AP
@param self
@return'''
print 'start reading data!'
(status, output) = self.target.run(
"chmod 777 /opt/apps/test_temperature_lps331ap.fbp")
(status, output) = self.target.run(
"cd /opt/apps; ./test_temperature_lps331ap.fbp >re.log")
(status, output) = self.target.run(
"cd /opt/apps; ./test_temperature_lps331ap.fbp >re.log")
error = output
(status, output) = self.target.run(
"cp /opt/apps/re.log /home/root/lps331ap_temperature.log")
#verification of target sensor data
(status, output) = self.target.run("cat /opt/apps/re.log|grep float")
print output + "\n"
self.assertEqual(status, 0, msg="Error messages: %s" % error)
#make sure sensor data is valid
(status, output) = self.target.run("cat /opt/apps/re.log|grep ' 0.000000'")
self.assertEqual(status, 1, msg="Error messages: %s" % output)
|
{
"content_hash": "b153cc526d28c996387fb4bc0e6c3170",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 88,
"avg_line_length": 38.80882352941177,
"alnum_prop": 0.5892383478590375,
"repo_name": "daweiwu/meta-iotqa-1",
"id": "c472def0d82548a501cc590f1310da93a910fee3",
"size": "2639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/oeqa/runtime/sensor/test_temperature_lps331ap.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "BitBake",
"bytes": "6677"
},
{
"name": "C",
"bytes": "5625"
},
{
"name": "Java",
"bytes": "504"
},
{
"name": "JavaScript",
"bytes": "32196"
},
{
"name": "M4",
"bytes": "5945"
},
{
"name": "Makefile",
"bytes": "392"
},
{
"name": "Python",
"bytes": "524122"
},
{
"name": "Shell",
"bytes": "10369"
}
],
"symlink_target": ""
}
|
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_gap04.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [45938176, 59715584]
chart.axis2_ids = [62526208, 59718272]
data = [[1, 2, 3, 4, 5],
[6, 8, 6, 4, 2]]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
chart.add_series({'values': '=Sheet1!$A$1:$A$5',
'gap': 51,
'overlap': 12})
chart.add_series({'values': '=Sheet1!$B$1:$B$5',
'y2_axis': 1,
'gap': 251,
'overlap': -27})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
{
"content_hash": "9bb34a894681f13903392ec5deacfa17",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 68,
"avg_line_length": 27.522727272727273,
"alnum_prop": 0.5309661436829067,
"repo_name": "jmcnamara/XlsxWriter",
"id": "34a51aca8dac7c801ee7ea7ac2504190a0ab1d08",
"size": "1424",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/comparison/test_chart_gap04.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
}
|
import sys
import os
from os.path import abspath, join, dirname
sys.path.append(abspath(join(dirname(__file__), "..")))
from shared_conf import *
# Releases changelog extension
extensions.append("releases")
releases_document_name = ["changelog", "changelog-v1"]
releases_github_path = "fabric/fabric"
# Intersphinx for referencing API/usage docs
extensions.append("sphinx.ext.intersphinx")
# Default is 'local' building, but reference the public docs site when building
# under RTD.
target = join(dirname(__file__), "..", "docs", "_build")
if on_rtd:
target = "https://docs.fabfile.org/en/latest/"
intersphinx_mapping.update({"docs": (target, None)})
# Sister-site links to API docs
html_theme_options["extra_nav_links"] = {
"API Docs": "https://docs.fabfile.org"
}
|
{
"content_hash": "af69f089d0b0cf867acdb719a61445b3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 29.96153846153846,
"alnum_prop": 0.7150192554557124,
"repo_name": "fabric/fabric",
"id": "a0a3d72860720adfe3c038d0dd7d0805cd880142",
"size": "809",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sites/www/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "295293"
}
],
"symlink_target": ""
}
|
"""Basic tests for TF-TensorRT integration."""
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class SimpleSingleEngineTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing single segment."""
dtype = inp.dtype
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
bias = constant_op.constant([4., 1.5, 2., 3., 5., 7.],
name="bias",
dtype=dtype)
added = nn.bias_add(conv, bias, name="bias_add")
relu = nn.relu(added, "relu")
identity = array_ops.identity(relu, "identity")
pool = nn_ops.max_pool(
identity, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
return array_ops.squeeze(pool, name="output_0")
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[100, 6, 6, 6]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"weights", "conv", "bias", "bias_add", "relu", "identity",
"max_pool"
]
}
class SimpleMultiEnginesTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
dtype = inp.dtype
conv_filter = constant_op.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtype)
conv = nn.conv2d(
input=inp,
filter=conv_filter,
strides=[1, 2, 2, 1],
padding="SAME",
name="conv")
c1 = constant_op.constant(
np.random.randn(12, 12, 6), dtype=dtype, name="c1")
p = math_ops.mul(conv, c1, name="mul")
c2 = constant_op.constant(
np.random.randn(12, 12, 6), dtype=dtype, name="c2")
q = math_ops.div(conv, c2, name="div")
edge = self.trt_incompatible_op(q, name="incompatible")
one = constant_op.constant(1, name="one", dtype=dtype)
edge = math_ops.sub(one, edge, name="one_sub")
edge = math_ops.div(edge, edge, name="div1")
r = math_ops.add(edge, edge, name="add")
p = math_ops.sub(p, edge, name="sub")
q = math_ops.mul(q, edge, name="mul1")
s = math_ops.add(p, q, name="add1")
s = math_ops.sub(s, r, name="sub1")
return array_ops.squeeze(s, name="output_0")
def GetParams(self):
# TODO(aaroey): test graph with different dtypes.
return self.BuildParams(self.GraphFn, dtypes.float32, [[100, 24, 24, 2]],
[[100, 12, 12, 6]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": [
"add", "add1", "c1", "div1", "mul", "mul1", "sub", "sub1", "one",
"one_sub"
],
"TRTEngineOp_1": ["c2", "conv", "div", "weights"]
}
def setUp(self):
super(trt_test.TfTrtIntegrationTestBase, self).setUp() # pylint: disable=bad-super-call
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
self.DisableNonTrtOptimizers()
class SimpleMultiEnginesTest2(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing two segments."""
n = inp
for i in range(2):
c = constant_op.constant(float(i), name="c%d" % i)
n = math_ops.add(n, c, name="add%d" % i)
n = math_ops.mul(n, n, name="mul%d" % i)
n = self.trt_incompatible_op(n, name="incompatible")
c = constant_op.constant(2.0, name="c2")
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul2")
c = constant_op.constant(3.0, name="c3")
n = math_ops.add(n, c, name="add3")
n = math_ops.mul(n, n, name="mul3")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["c0", "c1", "add0", "add1", "mul0", "mul1"],
"TRTEngineOp_1": ["c2", "c3", "add2", "add3", "mul2", "mul3"]
}
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
# Disable the test in fp16 mode since multiple matmul and add ops together
# can cause overflow.
return (
(run_params.precision_mode != "FP16") and
not (trt_test.IsQuantizationMode(run_params.precision_mode) and
not run_params.use_calibration)), "test FP32 and non-calibration"
class ConstInputTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
n = inp
c = constant_op.constant(1.0, name="c")
# Adds data dependency from the constant op to a trt incompatible op,
# and adds data dependency from the trt incompatible op to the other
# ops, to make sure the constant op cannot be contracted with any trt
# segment that depends on it.
n = self.trt_incompatible_binary_op(n, c, name="incompatible")
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
n = self.trt_incompatible_op(n, name="incompatible1")
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul1")
n = math_ops.add(n, n, name="add3")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["add", "add1", "mul"],
"TRTEngineOp_1": ["add2", "add3", "mul1"]
}
def ExpectedConnections(self, run_params):
"""Returns the expected edges."""
return {
"input_0": set(),
"c": set(),
"incompatible": {"input_0", "c"},
"TRTEngineOp_0": {"incompatible"},
"incompatible1": {"TRTEngineOp_0"},
"TRTEngineOp_1": {"incompatible1"},
"output_0": {"TRTEngineOp_1"},
}
class ConstDataInputSingleEngineTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing single segment."""
n = inp
c = constant_op.constant(1.0, name="c")
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {"TRTEngineOp_0": ["c", "add", "add1", "mul"]}
class ConstDataInputMultipleEnginesTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segment."""
n = inp
c = constant_op.constant(1.0, name="c")
n = math_ops.add(n, c, name="add")
n = math_ops.mul(n, n, name="mul")
n = math_ops.add(n, n, name="add1")
n = self.trt_incompatible_op(n, name="incompatible1")
n = math_ops.add(n, c, name="add2")
n = math_ops.mul(n, n, name="mul1")
n = math_ops.add(n, n, name="add3")
return array_ops.squeeze(n, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["add2", "add3", "mul1"],
# Why segment ["add", "add1", "mul"] was assigned segment id 1
# instead of 0: the parent node of this segment is actually const
# node 'c', but it's removed later since it's const output of the
# segment which is not allowed.
"TRTEngineOp_1": ["add", "add1", "mul"]
}
class ControlDependencyTest(trt_test.TfTrtIntegrationTestBase):
def GraphFn(self, inp):
"""Create a graph containing multiple segments."""
c1 = constant_op.constant(1.0, name="c1")
c2 = constant_op.constant(2.0, name="c2")
d1 = self.trt_incompatible_op(inp, name="d1")
d2 = self.trt_incompatible_binary_op(inp, inp, name="d2")
with ops.control_dependencies([d1]):
add = math_ops.add(inp, c1, name="add")
mul = math_ops.mul(add, add, name="mul")
add1 = math_ops.add(mul, mul, name="add1")
edge = self.trt_incompatible_op(add1, name="incompatible")
with ops.control_dependencies([d1, d2, add1]):
add2 = math_ops.add(edge, c2, name="add2")
mul1 = math_ops.mul(add2, add2, name="mul1")
add3 = math_ops.add(mul1, mul1, name="add3")
inc1 = self.trt_incompatible_binary_op(d1, add3, name="incompatible1")
inc2 = self.trt_incompatible_binary_op(d2, inc1, name="incompatible2")
return array_ops.squeeze(inc2, name="output_0")
def GetParams(self):
shapes = [[2, 32, 32, 3]]
return self.BuildParams(self.GraphFn, dtypes.float32, input_shapes=shapes,
output_shapes=shapes)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return {
"TRTEngineOp_0": ["c1", "add", "add1", "mul"],
"TRTEngineOp_1": ["c2", "add2", "add3", "mul1"]
}
def ExpectedConnections(self, run_params):
"""Returns the expected edges."""
return {
"input_0": set(),
"d1": {"input_0"},
"d2": {"input_0"},
"TRTEngineOp_0": {"input_0", "^d1"},
"incompatible": {"TRTEngineOp_0"},
"TRTEngineOp_1": {"incompatible", "^d2"},
"incompatible1": {"TRTEngineOp_1", "d1"},
"incompatible2": {"incompatible1", "d2"},
"output_0": {"incompatible2"},
}
if __name__ == "__main__":
test.main()
|
{
"content_hash": "c64cbfc1a4547f111dc10ce6856ed3ef",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 93,
"avg_line_length": 36.4496644295302,
"alnum_prop": 0.6032958939421837,
"repo_name": "Intel-Corporation/tensorflow",
"id": "cbdabaec942988eaa41f481b3f74a61d77f3ed5c",
"size": "11551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/compiler/tensorrt/test/base_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
}
|
import urllib2
import re
import time
# Default retry timeout (seconds) and sleep-time
VERIFY_TIMEOUT = 300
SLEEP_INTERVAL = 10
class VerifyRetry(object):
def __init__(self, timeout=VERIFY_TIMEOUT, interval=SLEEP_INTERVAL):
"""
Decorator to wrap verify operations to retry until timeout
"""
self.timeout = timeout
self.interval = interval
def __call__(self, f):
def fn(*args, **kwargs):
elapsed = 0
result = False
while elapsed < self.timeout and not result:
result = f(*args, **kwargs)
if not result:
print "Failed verify, sleeping for %ss (%s/%s)" %\
(self.interval, elapsed, self.timeout)
time.sleep(self.interval)
elapsed += self.interval
return result
return fn
class VerifyStack:
'''
Class containing helper-functions to prove a stack resource or service
has been created correctly, e.g by accessing the service and checking
the result is as expected
'''
@VerifyRetry()
def verify_url(self, url, timeout, regex):
print "Reading html from %s" % url
try:
content = urllib2.urlopen(url).read()
except IOError:
return False
matches = re.findall(regex, content)
if len(matches):
print "VERIFY : looks OK!"
return True
else:
return False
@VerifyRetry()
def verify_wordpress(self, url, timeout=VERIFY_TIMEOUT):
'''
Verify the url provided has a functional wordpress installation
for now we simply scrape the page and do a regex for an expected
string
'''
WORDPRESS_REGEX = "<p>Welcome to the famous five minute WordPress"
return self.verify_url(url, timeout, WORDPRESS_REGEX)
def verify_openshift(self, url, timeout=VERIFY_TIMEOUT):
OPENSHIFT_REGEX = "<title>Welcome to OpenShift</title>"
return self.verify_url(url, timeout, OPENSHIFT_REGEX)
|
{
"content_hash": "ec4c8b545e8ee210bb974605d80c5728",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 74,
"avg_line_length": 29.619718309859156,
"alnum_prop": 0.5915359010936757,
"repo_name": "Triv90/Heat",
"id": "eab1070067f4210cf555361cee55090ac63fe54d",
"size": "2723",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/grizzly",
"path": "heat/tests/functional/verify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1733741"
},
{
"name": "Shell",
"bytes": "19255"
}
],
"symlink_target": ""
}
|
import json
import uuid
import mimetypes
from collections import OrderedDict
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.mail import send_mail
from django.core.exceptions import ValidationError
from django.contrib.staticfiles.storage import staticfiles_storage
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.template.loader import render_to_string
from django.contrib.auth.models import BaseUserManager
from utils.localized import BaseLocalizedObject, BaseLocalizedContent
ARTICLE_STATUSES = (
(0, _(u'Adding / Overview')),
(1, _(u'Adding / Abstract')),
(2, _(u'Adding / Authors')),
(3, _(u'Adding / Media')),
(11, _(u'New')),
(13, _(u'In review')),
(15, _(u'In rework')),
(16, _(u'Reworked')),
(10, _(u'Published')),
(12, _(u'Rejected')),
)
ARTICLE_ADDING_STATUSES = (0, 1, 2, 3)
ARTICLE_TYPES = (
(1, _(u'Article')),
(2, _(u'Short message')),
(3, _(u'Presentation')),
(4, _(u'Data')),
)
REVIEW_STATUSES = (
(0, _(u'Pending')),
(1, _(u'Unfinished')),
(2, _(u'Done')),
)
RESOLUTIONS = (
(0, _(u'None')),
(1, _(u'Rejected')),
(2, _(u'Rework required')),
(3, _(u'Approved')),
)
MODERATION_STATUSES = (
(0, _(u'New')),
(1, _(u'Rejected')),
(2, _(u'Approved')),
)
REVIEW_FIELD_TYPES = (
(0, _(u'Header')),
(1, _(u'Choices field')),
(2, _(u'Text string')),
(3, _(u'Text field')),
(4, _(u'Checkbox')),
)
ATTACH_TYPES = (
(1, _(u'Image')),
(2, _(u'Video')),
(0, _(u'Generic')),
)
def default_key():
return uuid.uuid4().hex
class ModeratedObject(models.Model):
moderation_status = models.PositiveSmallIntegerField(choices=MODERATION_STATUSES, default=0, verbose_name=_(u'Moderation status'))
class Meta:
abstract = True
class OrderedEntry(models.Model):
order = models.PositiveIntegerField(verbose_name=_(u'Order'), blank=True, default=0)
_order_lookup_field = None
class Meta:
abstract = True
ordering = ['order']
def save(self, *args, **kwargs):
if not self.order:
if self._order_lookup_field:
lookups = {self._order_lookup_field: getattr(self, self._order_lookup_field)}
else:
lookups = {}
qs = self.__class__.objects.filter(**lookups)
if self.pk:
qs = qs.exclude(pk=self.pk)
self.order = min(2 ** 31 - 1, 1 + qs.count())
super(OrderedEntry, self).save(*args, **kwargs)
# TODO: multiple emails for user
class JournalUserManager(BaseUserManager):
def _create_user(self, email, password, is_staff, is_superuser, **extra_fields):
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, is_staff=is_staff, is_active=True, is_superuser=is_superuser,
last_login=now, date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True, **extra_fields)
class JournalUser(AbstractBaseUser, PermissionsMixin, ModeratedObject, BaseLocalizedObject): # Moderation only applied to author role
email = models.EmailField(_('email address'))
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
degree = models.CharField(max_length=200, verbose_name=_(u'Degree'), blank=True, default='')
objects = JournalUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __unicode__(self):
return self.get_full_name() or self.email
@models.permalink
def get_absolute_url(self):
return 'show_author', [self.id]
def clean(self):
if self.moderation_status == 2:
qs = self.__class__.objects.filter(email=self.email)
if self.id:
qs = qs.exclude(id=self.id)
if qs.exists():
raise ValidationError(_(u'Duplicated email for approved users'))
def get_full_name(self):
return (u'%s %s' % (self.first_name, self.last_name)).strip()
get_full_name.short_description = _(u'Full name')
def get_short_name(self):
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
send_mail(subject, message, from_email, [self.email], **kwargs)
def str_compact(self):
return u'%s %s' % (self.last_name, u' '.join(u'%s.' % i[0] for i in self.first_name.split() if i))
@property
def first_name(self):
return self.get_localized('first_name') or ''
@property
def last_name(self):
return self.get_localized('last_name') or ''
def published_articles(self):
return Article.objects.filter(status=10, articleauthor__user=self).distinct()
def unpublished_articles(self):
return Article.objects.exclude(status=10).filter(models.Q(articleauthor__user=self) | models.Q(senders=self)).distinct()
def pending_reviews(self):
return self.review_set.filter(status__in=(0, 1)).distinct()
def has_journal_profile(self):
return bool(self.is_active and self.moderation_status == 2 and self.published_articles())
# override user default moderation status
JournalUser._meta.get_field('moderation_status').default = 2
class Section(OrderedEntry, BaseLocalizedObject):
moderators = models.ManyToManyField(JournalUser, verbose_name=_(u'Moderators'), blank=True)
class Meta:
ordering = OrderedEntry.Meta.ordering
verbose_name = _(u'Section')
verbose_name_plural = _(u'Sections')
def __unicode__(self):
return self.name
@property
def name(self):
return self.get_localized('name') or ''
class SectionName(BaseLocalizedContent):
section = models.ForeignKey(Section, verbose_name=Section._meta.verbose_name)
name = models.CharField(max_length=100, verbose_name=_(u'Name'))
class Meta:
verbose_name = _(u'Section name')
verbose_name_plural = _(u'Section names')
unique_together = [('lang', 'section')]
class StaffMember(models.Model):
user = models.OneToOneField(JournalUser, verbose_name=_(u'User'))
chief_editor = models.BooleanField(default=False, verbose_name=_(u'Chief editor'))
editor = models.BooleanField(default=False, verbose_name=_(u'Editor'))
reviewer = models.BooleanField(default=False, verbose_name=_(u'Reviewer'))
class Meta:
ordering = ('chief_editor', 'editor', 'reviewer', 'user__localizedname__last_name')
verbose_name = _(u'Staff member')
verbose_name_plural = _(u'Staff members')
def save(self, *args, **kwargs):
if self.chief_editor:
StaffMember.objects.filter(chief_editor=True).update(chief_editor=False)
super(StaffMember, self).save(*args, **kwargs)
def __unicode__(self):
return self.user.get_full_name() or self.user.email
class Organization(ModeratedObject, BaseLocalizedObject):
short_name = models.CharField(max_length=32, verbose_name=_(u'Short name'), help_text=_(u'for admin site'), default='', blank=True)
alt_names = models.TextField(verbose_name=_(u'Alternative names'), help_text=_(u'one per line'), default='', blank=True)
site = models.URLField(blank=True, default='', verbose_name=_(u'Site URL'))
obsolete = models.BooleanField(default=False, verbose_name=_(u'Obsolete'))
previous = models.ManyToManyField('self', verbose_name=_(u'Previous versions'), blank=True, limit_choices_to={'obsolete': True})
class Meta:
ordering = ['short_name']
verbose_name = _(u'Organization')
verbose_name_plural = _(u'Organizations')
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return 'show_organization', [self.id]
@property
def name(self):
return self.get_localized('name') or ''
@property
def address(self):
return self.get_localized('address') or ''
@property
def country(self):
return self.get_localized('country') or ''
@property
def city(self):
return self.get_localized('city') or ''
class OrganizationLocalizedContent(BaseLocalizedContent):
org = models.ForeignKey(Organization)
name = models.TextField(verbose_name=_(u'Name'))
country = models.CharField(max_length=100, verbose_name=_(u'Country'), blank=True, default='')
city = models.CharField(max_length=100, verbose_name=_(u'City'), blank=True, default='')
address = models.TextField(verbose_name=_(u'Address'), default='', blank=True)
class Meta:
unique_together = [('lang', 'org')]
verbose_name = _(u'Organization localized content')
verbose_name_plural = _(u'Organization localized content')
def __unicode__(self):
return self.name
class LocalizedName(BaseLocalizedContent):
user = models.ForeignKey(JournalUser, verbose_name=_(u'User'))
first_name = models.CharField(_('First name'), max_length=60, blank=True)
last_name = models.CharField(_('Last name'), max_length=60, blank=True)
class Meta:
ordering = ('user', 'lang')
verbose_name = _(u'Localized name')
verbose_name_plural = _(u'Localized names')
unique_together = [('user', 'lang')]
def __unicode__(self):
return (u'%s %s' % (self.last_name, self.first_name)) or self.user.username
class PositionInOrganization(models.Model):
user = models.ForeignKey(JournalUser, verbose_name=_(u'User'))
organization = models.ForeignKey(Organization, verbose_name=_(u'Organization'))
position = models.CharField(max_length=200, verbose_name=_(u'Position'), default='', blank=True)
class Meta:
ordering = ['id']
verbose_name = _(u'Position in organization')
verbose_name_plural = _(u'Position in organizations')
unique_together = [('user', 'organization')]
def __unicode__(self):
return u'%s (%s, %s)' % (self.user.get_full_name() or self.user, self.position, self.organization)
def article_upload_to(instance, filename):
out = uuid.uuid4().hex
if '.' in filename:
out += '.' + filename.rsplit('.', 1)[-1][:16].lower()
return 'articles/' + out
def report_upload_to(instance, filename):
out = uuid.uuid4().hex
if '.' in filename:
out += '.' + filename.rsplit('.', 1)[-1][:16].lower()
return 'reports/' + out
class Article(BaseLocalizedObject):
status = models.PositiveSmallIntegerField(default=0, choices=ARTICLE_STATUSES, verbose_name=_(u'Status'))
date_in = models.DateTimeField(default=timezone.now, verbose_name=_(u'Date in'))
date_published = models.DateTimeField(null=True, blank=True, verbose_name=_(u'Publish date'))
old_number = models.CharField(default='', blank=True, verbose_name=_(u'Old number'), max_length=20,
help_text=_(u'to link consistency with old articles'))
image = models.ImageField(verbose_name=_(u'Image'), upload_to=article_upload_to, blank=True, default='')
doi = models.URLField(verbose_name='DOI', blank=True, null=True)
type = models.PositiveSmallIntegerField(verbose_name=_(u'Article type'), choices=ARTICLE_TYPES, default=1)
lang = models.CharField(max_length=2, choices=settings.LANGUAGES, verbose_name=_(u'Article language'), default=settings.LANGUAGE_CODE)
report = models.FileField(verbose_name=_(u'Акт экспертизы'), upload_to=report_upload_to, default='', blank=True)
content = models.FileField(verbose_name=_(u'Content'), upload_to='published', default='', blank=True)
senders = models.ManyToManyField(JournalUser, verbose_name=_(u'Senders'), blank=True)
issue = models.ForeignKey('Issue', null=True, blank=True, verbose_name=_(u'Issue'))
sections = models.ManyToManyField(Section, blank=True, verbose_name=_(u'Sections'))
class Meta:
ordering = ('-date_published', 'id')
verbose_name = _(u'Article')
verbose_name_plural = _(u'Articles')
def __unicode__(self):
return self.title or ((_(u'Article %s') % self.id) if self.id else _(u'New article'))
__unicode__.short_description = _(u'Title')
def get_absolute_url(self):
if self.issue:
kwargs = {'year': self.issue.year, 'volume': self.issue.volume, 'id': self.id}
if self.issue.number:
kwargs['number'] = self.issue.number
return reverse('show_article', kwargs=kwargs)
return u''
def clean(self):
if self.status == 10:
if not self.content:
raise ValidationError(_(u'Published article must have content file'))
if not self.date_published:
raise ValidationError(_(u'Published article must have published date'))
if self.date_published and self.date_published < self.date_in:
raise ValidationError(_(u'Published date must be after date in'))
if self.old_number:
qs = self.__class__.objects.filter(old_number=self.old_number)
if self.id:
qs = qs.exclude(id=self.id)
if qs.exists():
raise ValidationError(_(u'Duplicated %s field') % self._meta.get_field('old_number').verbose_name)
def get_authors(self):
authors = OrderedDict()
for aa in self.articleauthor_set.all():
authors.setdefault(aa.user, []).append(aa.organization)
return authors
@property
def title(self):
return self.get_localized('title') or ''
@property
def abstract(self):
return self.get_localized('abstract') or ''
@property
def keywords(self):
return self.get_localized('keywords') or ''
@property
def references(self):
return self.get_localized('references') or ''
def adding(self):
return self.status in ARTICLE_ADDING_STATUSES
def str_authors(self):
return u', '.join(a.str_compact() for a in self.get_authors().keys())
def has_video(self):
return self.articleattach_set.filter(type=2).exists()
class LocalizedArticleContent(BaseLocalizedContent):
article = models.ForeignKey(Article, verbose_name=Article._meta.verbose_name)
title = models.TextField(verbose_name=_(u'Title'), default='', blank=True)
abstract = models.TextField(verbose_name=_(u'Abstract'), default=u'', blank=True)
keywords = models.TextField(verbose_name=_(u'Keywords'), default=u'', blank=True)
references = models.TextField(verbose_name=_(u'References'), default='', blank=True)
class Meta:
ordering = ('article', 'lang')
verbose_name = _(u'Localized article content')
verbose_name_plural = _(u'Localized article content')
unique_together = [('article', 'lang')]
def __unicode__(self):
return _(u'Loclized content for %s') % self.article
def is_filled(self):
return any((self.title, self.abstract, self.keywords))
class ArticleAuthor(OrderedEntry):
article = models.ForeignKey(Article, verbose_name=Article._meta.verbose_name)
user = models.ForeignKey(JournalUser, verbose_name=_(u'User'))
organization = models.ForeignKey(Organization, verbose_name=_(u'Organization'))
_order_lookup_field = 'article'
class Meta:
verbose_name = _(u'Article author')
verbose_name_plural = _(u'Article authors')
ordering = OrderedEntry.Meta.ordering
unique_together = [('article', 'user', 'organization')]
def __unicode__(self):
return u'%s (%s)' % (self.user, self.organization)
class ArticleAttach(OrderedEntry):
article = models.ForeignKey(Article, verbose_name=Article._meta.verbose_name)
type = models.PositiveSmallIntegerField(choices=ATTACH_TYPES, verbose_name=_(u'Attach type'), default=1)
file = models.FileField(upload_to='attaches', verbose_name=_(u'File'))
comment = models.TextField(default='', blank=True, verbose_name=_(u'Comment to file'))
date_created = models.DateTimeField(default=timezone.now, verbose_name=_(u'Date created'))
_order_lookup_field = 'article'
class Meta:
verbose_name = _(u'Article attach')
verbose_name_plural = _(u'Article attaches')
ordering = OrderedEntry.Meta.ordering
get_latest_by = 'date_created'
def __unicode__(self):
return _(u'Attach for %s') % self.article
def icon_url(self):
mt = mimetypes.guess_type(self.file.path)[0]
if mt:
path = u'img/mimetypes/%s.png' % mt.replace('/', '-')
if staticfiles_storage.exists(path):
return staticfiles_storage.url(path)
path = u'img/mimetypes/%s.png' % self.file.path.rsplit('.', 1)[-1]
if staticfiles_storage.exists(path):
return staticfiles_storage.url(path)
return staticfiles_storage.url(u'img/mimetypes/unknown.png')
class ArticleSource(models.Model):
article = models.ForeignKey(Article, verbose_name=Article._meta.verbose_name)
date_created = models.DateTimeField(default=timezone.now, verbose_name=_(u'Date created'))
file = models.FileField(upload_to='sources', verbose_name=_(u'File'))
comment = models.TextField(default='', blank=True, verbose_name=_(u'Staff comment'))
class Meta:
ordering = ['date_created']
verbose_name = _(u'Article source')
verbose_name_plural = _(u'Article sources')
get_latest_by = 'date_created'
def __unicode__(self):
return _(u'Source of %s') % self.article
class ArticleResolution(models.Model):
article = models.ForeignKey(Article, verbose_name=Article._meta.verbose_name)
date_created = models.DateTimeField(default=timezone.now, verbose_name=_(u'Date created'))
reviews = models.ManyToManyField('Review', verbose_name=_(u'Reviews'))
status = models.PositiveSmallIntegerField(choices=RESOLUTIONS, verbose_name=_(u'Status'))
text = models.TextField(verbose_name=_(u'Text'))
class Meta:
ordering = ['date_created']
verbose_name = _(u'Article resolution')
verbose_name_plural = _(u'Article resolutions')
get_latest_by = 'date_created'
def __unicode__(self):
return _(u'Resolution for %s') % self.article
class ReviewField(OrderedEntry):
field_type = models.PositiveSmallIntegerField(choices=REVIEW_FIELD_TYPES, verbose_name=_(u'Field type'))
name = models.CharField(max_length=64, verbose_name=_(u'Name'))
description = models.TextField(default='', blank=True, verbose_name=_(u'Description'))
choices = models.TextField(default='', blank=True, verbose_name=_(u'Choices'))
class Meta:
ordering = OrderedEntry.Meta.ordering
verbose_name = _(u'Review field')
verbose_name_plural = _(u'Review fields')
def __unicode__(self):
return self.name
def formfield(self):
from django import forms
from django.utils.html import escape
class HeaderField(forms.Field):
def __init__(self, *args, **kwargs):
label = kwargs.get('label')
kwargs['label'] = ''
class HeaderWidget(forms.Widget):
def render(self, name, value, attrs=None):
return '<h4>%s</h4>' % escape(label)
kwargs['widget'] = HeaderWidget
super(HeaderField, self).__init__(*args, **kwargs)
kwargs = {'label': self.name,
'help_text': self.description,
'required': False}
if self.field_type == 0: # Header
return HeaderField(**kwargs)
elif self.field_type == 1: # Choice field
choices = filter(None, self.choices.strip().splitlines())
choices = map(unicode.strip, choices)
choices = zip(choices, choices)
choices.insert(0, ('', '-' * 8))
kwargs['choices'] = choices
return forms.ChoiceField(**kwargs)
elif self.field_type == 2: # Text string
return forms.CharField(**kwargs)
elif self.field_type == 3: # Text field
kwargs['widget'] = forms.Textarea
return forms.CharField(**kwargs)
elif self.field_type == 4: # Checkbox
return forms.BooleanField(**kwargs)
class Review(models.Model):
key = models.CharField(max_length=32, verbose_name=_(u'Key'), unique=True, default=default_key, editable=False)
article = models.ForeignKey(Article, verbose_name=Article._meta.verbose_name)
reviewer = models.ForeignKey(JournalUser, verbose_name=_(u'Reviewer'), limit_choices_to={'staffmember__reviewer': True})
status = models.PositiveSmallIntegerField(choices=REVIEW_STATUSES, default=0, verbose_name=_(u'Status'))
date_created = models.DateTimeField(default=timezone.now, verbose_name=_(u'Created'))
field_values = models.TextField(default='', editable=False, verbose_name=_(u'Field values'))
comment_for_authors = models.TextField(default=u'', blank=True, verbose_name=_(u'Comment for authors'))
comment_for_editors = models.TextField(default=u'', blank=True, verbose_name=_(u'Comment for editors'))
resolution = models.PositiveSmallIntegerField(choices=RESOLUTIONS, default=0, verbose_name=_(u'Resolution'))
class Meta:
ordering = ['date_created']
verbose_name = _(u'Review')
verbose_name_plural = _(u'Reviews')
def __unicode__(self):
return _(u'Review for %s') % self.article
def save(self, *args, **kwargs):
new = not self.id
super(Review, self).save(*args, **kwargs)
if new:
self.send()
def send(self):
msg = render_to_string('journal/mail/review.txt', {
'link': settings.SITE_URL + reverse('edit_review_login', args=[self.key]),
'review': self,
'user': self.reviewer,
'article': self.article,
})
self.reviewer.email_user(u'Review request', msg)
@models.permalink
def get_absolute_url(self):
return 'edit_review', [self.key]
@property
def values(self):
if self.field_values:
return json.loads(self.field_values)
return []
@values.setter
def values(self, value):
# beware: if you change value after assignement changes will not be saved
self.field_values = json.dumps(value)
def render(self):
from django.utils.html import strip_spaces_between_tags
return strip_spaces_between_tags(render_to_string('journal/review_result.html', {'data': self.values}))
render.short_description = _(u'Data')
class Issue(OrderedEntry, BaseLocalizedObject):
is_active = models.BooleanField(verbose_name=_(u'Active'), default=False, blank=True)
number = models.CharField(max_length=100, verbose_name=_(u'Number'), blank=True, null=True)
volume = models.PositiveIntegerField(verbose_name=_(u'Volume'))
year = models.PositiveIntegerField(verbose_name=_(u'Year'))
class Meta:
verbose_name = _(u'Issue')
verbose_name_plural = _(u'Issues')
ordering = ['order']
unique_together = ('number', 'volume', 'year')
def __unicode__(self):
if self.number:
return ugettext(u'Volume %(volume)s, issue %(number)s, %(year)s year') % self.__dict__
else:
return ugettext(u'Volume %(volume)s, %(year)s year') % self.__dict__
def to_str_no_year(self):
if self.number:
return ugettext(u'Volume %(volume)s, issue %(number)s') % self.__dict__
else:
return ugettext(u'Volume %(volume)s') % self.__dict__
def str_compact(self):
if self.number:
return ugettext(u'%(year)s. V.%(volume)s, iss. %(number)s') % self.__dict__
else:
return ugettext(u'%(year)s. V.%(volume)s') % self.__dict__
@property
def description(self):
return self.get_localized('description') or ''
@property
def title(self):
return self.get_localized('title') or ''
def get_absolute_url(self):
kwargs = {'year': self.year, 'volume': self.volume}
if self.number:
kwargs['number'] = self.number
return reverse('show_issue', kwargs=kwargs)
def published_count(self):
return self.article_set.filter(status=10).count()
class LocalizedIssueContent(BaseLocalizedContent):
issue = models.ForeignKey(Issue, verbose_name=Issue._meta.verbose_name)
description = models.TextField(verbose_name=_(u'Description'), default=u'', blank=True)
title = models.CharField(max_length=200, default='', blank=True, verbose_name=_(u'Title'))
class Meta:
unique_together = ('lang', 'issue')
def __unicode__(self):
return unicode(self.issue)
# TODO: article and profile user input escaping
|
{
"content_hash": "821530d2e7e7aeda8fe4b8f189458eea",
"timestamp": "",
"source": "github",
"line_count": 707,
"max_line_length": 153,
"avg_line_length": 36.64639321074964,
"alnum_prop": 0.6335636265390405,
"repo_name": "Nnonexistent/chemphys",
"id": "ac4ad393e672054ccd20d8082073795e8e543a1d",
"size": "25947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/journal/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22804"
},
{
"name": "HTML",
"bytes": "61688"
},
{
"name": "JavaScript",
"bytes": "4793"
},
{
"name": "Python",
"bytes": "141470"
},
{
"name": "Shell",
"bytes": "611"
}
],
"symlink_target": ""
}
|
'''
Return salt data via pushover (http://www.pushover.net)
.. versionadded:: 2016.3.0
The following fields can be set in the minion conf file::
pushover.user (required)
pushover.token (required)
pushover.title (optional)
pushover.device (optional)
pushover.priority (optional)
pushover.expire (optional)
pushover.retry (optional)
pushover.profile (optional)
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location::
alternative.pushover.user
alternative.pushover.token
alternative.pushover.title
alternative.pushover.device
alternative.pushover.priority
alternative.pushover.expire
alternative.pushover.retry
PushOver settings may also be configured as::
pushover:
user: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
title: Salt Returner
device: phone
priority: -1
expire: 3600
retry: 5
alternative.pushover:
user: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
title: Salt Returner
device: phone
priority: 1
expire: 4800
retry: 2
pushover_profile:
pushover.token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
pushover:
user: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
profile: pushover_profile
alternative.pushover:
user: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
profile: pushover_profile
To use the PushOver returner, append '--return pushover' to the salt command. ex:
.. code-block:: bash
salt '*' test.ping --return pushover
To use the alternative configuration, append '--return_config alternative' to the salt command. ex:
salt '*' test.ping --return pushover --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. code-block:: bash
salt '*' test.ping --return pushover --return_kwargs '{"title": "Salt is awesome!"}'
'''
from __future__ import absolute_import
# Import Python libs
import pprint
import logging
# Import 3rd-party libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext.six.moves.urllib.parse import urlencode as _urlencode
# pylint: enable=import-error,no-name-in-module,redefined-builtin
# Import Salt Libs
import salt.returners
import salt.utils.pushover
from salt.exceptions import SaltInvocationError
log = logging.getLogger(__name__)
__virtualname__ = 'pushover'
def _get_options(ret=None):
'''
Get the pushover options from salt.
'''
defaults = {'priority': '0'}
attrs = {'pushover_profile': 'profile',
'user': 'user',
'device': 'device',
'token': 'token',
'priority': 'priority',
'title': 'title',
'api_version': 'api_version',
'expire': 'expire',
'retry': 'retry',
'sound': 'sound',
}
profile_attr = 'pushover_profile'
profile_attrs = {'user': 'user',
'device': 'device',
'token': 'token',
'priority': 'priority',
'title': 'title',
'api_version': 'api_version',
'expire': 'expire',
'retry': 'retry',
'sound': 'sound',
}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
profile_attr=profile_attr,
profile_attrs=profile_attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults)
return _options
def __virtual__():
'''
Return virtual name of the module.
:return: The virtual name of the module.
'''
return __virtualname__
def _post_message(user,
device,
message,
title,
priority,
expire,
retry,
sound,
api_version=1,
token=None):
'''
Send a message to a Pushover user or group.
:param user: The user or group to send to, must be key of user or group not email address.
:param message: The message to send to the PushOver user or group.
:param title: Specify who the message is from.
:param priority The priority of the message, defaults to 0.
:param api_version: The PushOver API version, if not specified in the configuration.
:param notify: Whether to notify the room, default: False.
:param token: The PushOver token, if not specified in the configuration.
:return: Boolean if message was sent successfully.
'''
user_validate = salt.utils.pushover.validate_user(user, device, token)
if not user_validate['result']:
return user_validate
parameters = dict()
parameters['user'] = user
parameters['device'] = device
parameters['token'] = token
parameters['title'] = title
parameters['priority'] = priority
parameters['expire'] = expire
parameters['retry'] = retry
parameters['message'] = message
if sound:
sound_validate = salt.utils.pushover.validate_sound(sound, token)
if sound_validate['res']:
parameters['sound'] = sound
result = salt.utils.pushover.query(function='message',
method='POST',
header_dict={'Content-Type': 'application/x-www-form-urlencoded'},
data=_urlencode(parameters),
opts=__opts__)
return result
def returner(ret):
'''
Send an PushOver message with the data
'''
_options = _get_options(ret)
user = _options.get('user')
device = _options.get('device')
token = _options.get('token')
title = _options.get('title')
priority = _options.get('priority')
expire = _options.get('expire')
retry = _options.get('retry')
sound = _options.get('sound')
if not token:
raise SaltInvocationError('Pushover token is unavailable.')
if not user:
raise SaltInvocationError('Pushover user key is unavailable.')
if priority and priority == 2:
if not expire and not retry:
raise SaltInvocationError('Priority 2 requires pushover.expire and pushover.retry options.')
message = ('id: {0}\r\n'
'function: {1}\r\n'
'function args: {2}\r\n'
'jid: {3}\r\n'
'return: {4}\r\n').format(
ret.get('id'),
ret.get('fun'),
ret.get('fun_args'),
ret.get('jid'),
pprint.pformat(ret.get('return')))
result = _post_message(user=user,
device=device,
message=message,
title=title,
priority=priority,
expire=expire,
retry=retry,
sound=sound,
token=token)
log.debug('result {0}'.format(result))
if not result['res']:
log.info('Error: {0}'.format(result['message']))
return
|
{
"content_hash": "d50704fe42e8cacad5b25ee57bd4ccc8",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 107,
"avg_line_length": 31.128,
"alnum_prop": 0.5537136982780776,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "ece9d631590cb478c647ac62561676270c7fe34b",
"size": "7806",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.3/salt/returners/pushover_returner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
from .mutex import Mutex
from .sema import Semaphore
class Cond(object):
""" Cond implements a condition variable, a rendezvous point for coroutines
waiting for or announcing the occurrence of an event.
Each Cond has an associated Locker L (often a Mutex or RWMutex), which
must be held when changing the condition and when calling the ``wait`` method.
"""
def __init__(self, l):
self.l = l
self.m = Mutex()
# We must be careful to make sure that when ``signal``
# releases a semaphore, the corresponding acquire is
# executed by a coroutine that was already waiting at
# the time of the call to ``signal``, not one that arrived later.
# To ensure this, we segment waiting coroutines into
# generations punctuated by calls to ``signal``. Each call to
# ``signal`` begins another generation if there are no coroutines
# left in older generations for it to wake. Because of this
# optimization (only begin another generation if there
# are no older coroutines left), we only need to keep track
# of the two most recent generations, which we call old
# and new.
self.old_waiters = 0 # number of waiters in old generation...
self.old_sema = Semaphore() # ... waiting on this semaphore
self.new_waiters = 0 # number of waiters in new generation...
self.new_sema = Semaphore() # ... waiting on this semaphore
def wait(self):
"""``wait`` atomically unlocks cond.l and suspends execution of the calling
coroutine. After later resuming execution, ``wait`` locks cond.l before
returning. Unlike in other systems, ``wait`` cannot return unless awoken by
Broadcast or ``signal``.
Because cond.l is not locked when ``wait`` first resumes, the caller typically
cannot assume that the condition is true when ``wait`` returns. Instead,
the caller should ``wait`` in a loop::
with m:
while True:
if not condition():
cond.wait()
# ... handle the condition
"""
self.m.lock()
if self.new_sema is None:
self.new_sema = Semaphore()
self.new_waiters += 1
self.m.unlock()
self.l.unlock()
self.new_sema.acquire()
self.l.lock()
def signal(self):
""" ``signal`` wakes one coroutine waiting on cond, if there is any.
It is allowed but not required for the caller to hold cond.l
during the call.
"""
self.m.lock()
if self.old_waiters == 0 and self.new_waiters > 0:
self.old_waiters = self.new_waiters
self.old_sema = self.new_sema
self.new_waiters = 0
self.new_sema = None
if self.old_waiters > 0:
self.old_waiters -= 1
self.old_sema.release()
self.m.unlock()
def broadcast(self):
""" Broadcast wakes all coroutines waiting on cond.
It is allowed but not required for the caller to hold cond.l
during the call.
"""
self.m.lock()
if self.old_waiters > 0:
for i in range(self.new_waiters):
self.new_sema.release()
self.new_waiters = 0
self.new_sema = None
self.m.unlock()
|
{
"content_hash": "757678108f0ee7736d670153863f398c",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 86,
"avg_line_length": 34.2,
"alnum_prop": 0.5906432748538012,
"repo_name": "benoitc/offset",
"id": "0cf86b2147bf0eb8f23f7f2dd7096169b514cd62",
"size": "3422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "offset/sync/cond.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "138718"
}
],
"symlink_target": ""
}
|
import unittest
import datetime
from pyetherscan import response, ethereum, error
class BaseEthereumTestCase(unittest.TestCase):
def setUp(self):
pass
class TestAddressObject(BaseEthereumTestCase):
def test_retrieve_balance(self):
_address = '0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae'
address = ethereum.Address(address=_address)
self.assertEqual(address.balance, 744997704382925139479303.0)
with self.assertRaises(error.EtherscanInitializationError):
_bad_address = 5
ethereum.Address(_bad_address)
def test_transaction_property(self):
_address = '0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae'
address = ethereum.Address(address=_address)
self.assertIsInstance(
address.transactions,
ethereum.TransactionContainer
)
def test_token_balance(self):
contract_address = '0x57d90b64a1a57749b0f932f1a3395792e12e7055'
_address = '0xe04f27eb70e025b78871a2ad7eabe85e61212761'
address = ethereum.Address(address=_address)
token_balance = address.token_balance(contract_address)
self.assertEqual(token_balance, 135499.0)
def test_blocks_mined(self):
_address = '0x9dd134d14d1e65f84b706d6f205cd5b1cd03a46b'
address = ethereum.Address(address=_address)
expected_block_number = 3462296
block_number = address.blocks_mined[0].block_number
self.assertEqual(expected_block_number, block_number)
class TestTransactionObject(BaseEthereumTestCase):
data = {
"blockNumber": "80240",
"timeStamp": "1439482422",
"hash": "0x72f2508c262763d5ae0e51d71c0d50c881cc75c872152716b04256"
"fe07797dcd",
"nonce": "2",
"blockHash": "0xb9367a1bc9094d6275ab50f4a58ce13186e35a46de68f505"
"3487a578abf00361",
"transactionIndex": "0",
"from": "0xc5a96db085dda36ffbe390f455315d30d6d3dc52",
"to": "0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae",
"value": "0",
"gas": "377583",
"gasPrice": "500000000000",
"isError": "0",
"input": "0xf00d4b5d00000000000000000000000005096a47749d8bfab0a90"
"c1bb7a95115dbe4cea60000000000000000000000005ed8cee6b63b1c6a"
"fce3ad7c92f4fd7e1b8fad9f",
"contractAddress": "",
"cumulativeGasUsed": "122207",
"gasUsed": "122207",
"confirmations": "3929454"
}
def test_initialization(self):
with self.assertRaises(error.EtherscanInitializationError):
ethereum.Transaction('')
def test_transaction_attributes(self):
transaction = ethereum.Transaction(data=self.data)
self.assertEqual(transaction._data, self.data)
self.assertEqual(transaction.from_, self.data.get('from'))
self.assertEqual(transaction.hash, self.data.get('hash'))
self.assertEqual(transaction.nonce, self.data.get('nonce'))
self.assertEqual(transaction.block_hash, self.data.get('blockHash'))
self.assertEqual(transaction.to, self.data.get('to'))
self.assertEqual(transaction.value, float(self.data.get('value')))
self.assertEqual(transaction.gas, float(self.data.get('gas')))
self.assertEqual(transaction.input, self.data.get('input'))
self.assertEqual(transaction.gas_used, float(self.data.get('gasUsed')))
self.assertEqual(
transaction.gas_price,
float(self.data.get('gasPrice')))
self.assertEqual(
transaction.confirmations,
self.data.get('confirmations'))
self.assertEqual(
transaction.cumulative_gas_used,
float(self.data.get('cumulativeGasUsed')))
self.assertEqual(
transaction.contract_address,
self.data.get('contractAddress'))
self.assertEqual(
transaction.transaction_index,
int(self.data.get('transactionIndex')))
self.assertEqual(
transaction.time_stamp,
int(self.data.get('timeStamp')))
self.assertEqual(
transaction.block_number,
int(self.data.get('blockNumber')))
datetime_ex = datetime.datetime.utcfromtimestamp(
int(self.data.get('timeStamp'))
)
self.assertEqual(transaction.datetime_executed, datetime_ex)
def test_transaction_block(self):
transaction = ethereum.Transaction(data=self.data)
block = ethereum.Block(80240)
expected_miner = block.block_miner
expected_reward = block.block_reward
expected_datetime_mined = block.datetime_mined
self.assertEqual(
expected_miner,
transaction.block.block_miner
)
self.assertEqual(
expected_reward,
transaction.block.block_reward
)
self.assertEqual(
expected_datetime_mined,
transaction.block.datetime_mined
)
def test_transaction_type(self):
data = {
"blockNumber": "2535368",
"timeStamp": "1477837690",
"hash": "0x8a1a9989bda84f80143181a68bc137ecefa64d0d4ebde45dd9' \
'4fc0cf49e70cb6",
"from": "0x20d42f2e99a421147acf198d775395cac2e8b03d",
"to": "",
"value": "0",
"contractAddress": "0x2c1ba59d6f58433fb1eaee7d20b26ed83bda51a3",
"input": "",
"type": "create",
"gas": "254791",
"gasUsed": "46750",
"traceId": "0",
"isError": "0",
"errCode": ""
}
transaction = ethereum.Transaction(data=data)
self.assertEqual(transaction.type, 'create')
class TestTransactionContainer(BaseEthereumTestCase):
data = {
"blockNumber": "80240",
"timeStamp": "1439482422",
"hash": "0x72f2508c262763d5ae0e51d71c0d50c881cc75c872152716b04256"
"fe07797dcd",
"nonce": "2",
"blockHash": "0xb9367a1bc9094d6275ab50f4a58ce13186e35a46de68f505"
"3487a578abf00361",
"transactionIndex": "0",
"from": "0xc5a96db085dda36ffbe390f455315d30d6d3dc52",
"to": "0xde0b295669a9fd93d5f28d9ec85e40f4cb697bae",
"value": "0",
"gas": "377583",
"gasPrice": "500000000000",
"isError": "0",
"input": "0xf00d4b5d00000000000000000000000005096a47749d8bfab0a90"
"c1bb7a95115dbe4cea60000000000000000000000005ed8cee6b63b1c6a"
"fce3ad7c92f4fd7e1b8fad9f",
"contractAddress": "",
"cumulativeGasUsed": "122207",
"gasUsed": "122207",
"confirmations": "3929454"
}
def test_retrieval(self):
data_list = [self.data for n in range(5)]
container = ethereum.TransactionContainer(data_list)
self.assertEqual(
container[0].hash,
ethereum.Transaction(self.data).hash
)
for txn in container:
self.assertEqual(
txn.hash,
ethereum.Transaction(self.data).hash
)
class TestBlockObject(BaseEthereumTestCase):
data = {
"blockNumber": "2165403",
"timeStamp": "1472533979",
"blockMiner": "0x13a06d3dfe21e0db5c016c03ea7d2509f7f8d1e3",
"blockReward": "5314181600000000000",
"uncles": [
{
"miner": "0xbcdfc35b86bedf72f0cda046a3c16829a2ef41d1",
"unclePosition": "0",
"blockreward": "3750000000000000000"
}, {
"miner": "0x0d0c9855c722ff0c78f21e43aa275a5b8ea60dce",
"unclePosition": "1",
"blockreward": "3750000000000000000"
}
],
"uncleInclusionReward": "312500000000000000"
}
uncles = [
{
"miner": ethereum.Address(
"0xbcdfc35b86bedf72f0cda046a3c16829a2ef41d1"),
"block_reward": float("3750000000000000000")
}, {
"miner": ethereum.Address(
"0x0d0c9855c722ff0c78f21e43aa275a5b8ea60dce"),
"block_reward": float("3750000000000000000")
}
]
def test_initialization(self):
with self.assertRaises(error.EtherscanInitializationError):
ethereum.Block(2.0)
def test_block_attributes(self):
block_rewards = ethereum.Block(2165403)
self.assertEqual(
block_rewards.time_stamp,
int(self.data.get(
'timeStamp')
)
)
self.assertEqual(
block_rewards.block_miner,
self.data.get('blockMiner')
)
self.assertEqual(
block_rewards.block_reward,
float(self.data.get(
'blockReward')
)
)
self.assertEqual(
block_rewards.uncle_inclusion_reward,
float(self.data.get('uncleInclusionReward'))
)
datetime_mined = datetime.datetime.utcfromtimestamp(
int(self.data.get('timeStamp'))
)
self.assertEqual(block_rewards.datetime_mined, datetime_mined)
# test uncles
uncle_one_address = block_rewards.uncles[0]['miner']
uncle_one_reward = block_rewards.uncles[0]['block_reward']
expected_uncle_address = self.uncles[0]['miner'].address
expected_uncle_reward = self.uncles[0]['block_reward']
self.assertEqual(uncle_one_address, expected_uncle_address)
self.assertEqual(uncle_one_reward, expected_uncle_reward)
class TestBlockContainer(BaseEthereumTestCase):
data = {
"blockNumber": "2691400",
"timeStamp": "1480072029",
"blockReward": "5086562212310617100"
}
def test_retrieval(self):
data_list = [self.data for _ in range(5)]
container = ethereum.BlockContainer(data_list)
expected_block_number = int(ethereum.Block(
self.data.get('blockNumber')
).block_number)
self.assertEqual(
container[0].block_number,
expected_block_number
)
for block in container:
self.assertEqual(
block.block_number,
expected_block_number
)
class TestTokenObject(BaseEthereumTestCase):
def test_initialization(self):
with self.assertRaises(error.EtherscanInitializationError):
_bad_address = 5
ethereum.Token(_bad_address)
def test_token_balance(self):
expected = {
"status": "1",
"message": "OK",
"result": "135499"
}
_contract_address = '0x57d90b64a1a57749b0f932f1a3395792e12e7055'
_address = '0xe04f27eb70e025b78871a2ad7eabe85e61212761'
token = ethereum.Token(contract_address=_contract_address)
self.assertEqual(
token.token_balance(_address),
float(expected.get('result'))
)
def test_token_supply(self):
expected = 21265524714464.0
_contract_address = '0x57d90b64a1a57749b0f932f1a3395792e12e7055'
token = ethereum.Token(contract_address=_contract_address)
self.assertEqual(
token.supply,
expected
)
|
{
"content_hash": "3c015af2c4f3eb72dd1fdc836a3557a3",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 79,
"avg_line_length": 33.2212389380531,
"alnum_prop": 0.6092168353755993,
"repo_name": "Marto32/pyetherscan",
"id": "b67461cec0f5d311177585eaeeb1f0533e055ef8",
"size": "11262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ethereum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91218"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from datetime import timedelta
from functools import wraps
import json
import logging
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from axes import get_version
from axes.conf import settings
from axes.attempts import is_already_locked
from axes.utils import iso8601, get_client_username, get_lockout_message
log = logging.getLogger(settings.AXES_LOGGER)
if settings.AXES_VERBOSE:
log.info('AXES: BEGIN LOG')
log.info('AXES: Using django-axes %s', get_version())
if settings.AXES_ONLY_USER_FAILURES:
log.info('AXES: blocking by username only.')
elif settings.AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP:
log.info('AXES: blocking by combination of username and IP.')
else:
log.info('AXES: blocking by IP only.')
def axes_dispatch(func):
def inner(request, *args, **kwargs):
if is_already_locked(request):
return lockout_response(request)
return func(request, *args, **kwargs)
return inner
def axes_form_invalid(func):
@wraps(func)
def inner(self, *args, **kwargs):
if is_already_locked(self.request):
return lockout_response(self.request)
return func(self, *args, **kwargs)
return inner
def lockout_response(request):
context = {
'failure_limit': settings.AXES_FAILURE_LIMIT,
'username': get_client_username(request) or ''
}
cool_off = settings.AXES_COOLOFF_TIME
if cool_off:
if isinstance(cool_off, (int, float)):
cool_off = timedelta(hours=cool_off)
context.update({
'cooloff_time': iso8601(cool_off)
})
if request.is_ajax():
return HttpResponse(
json.dumps(context),
content_type='application/json',
status=403,
)
if settings.AXES_LOCKOUT_TEMPLATE:
return render(
request, settings.AXES_LOCKOUT_TEMPLATE, context, status=403
)
if settings.AXES_LOCKOUT_URL:
return HttpResponseRedirect(settings.AXES_LOCKOUT_URL)
return HttpResponse(get_lockout_message(), status=403)
|
{
"content_hash": "617977cee59c30244faf7c2a3585a430",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 72,
"avg_line_length": 27.525,
"alnum_prop": 0.6639418710263397,
"repo_name": "cloudera/hue",
"id": "7a5085727393d2c06789c3d492ffefc729040163",
"size": "2202",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/django-axes-4.5.4/axes/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
import django_filters
from django.db.models import Q
from django.forms import SelectMultiple
from django_filters import FilterSet, MethodFilter
from pdc.apps.common.filters import MultiValueFilter, value_is_not_empty
from . import models
from .models import (Person,
Maillist,
GlobalComponentContact,
ReleaseComponentContact)
class PersonFilterSet(django_filters.FilterSet):
username = MultiValueFilter()
email = MultiValueFilter()
class Meta:
model = models.Person
fields = ('username', 'email')
class MaillistFilterSet(django_filters.FilterSet):
mail_name = MultiValueFilter()
email = MultiValueFilter()
class Meta:
model = models.Maillist
fields = ('mail_name', 'email')
class ContactRoleFilterSet(django_filters.FilterSet):
name = MultiValueFilter()
class Meta:
model = models.ContactRole
fields = ('name',)
def _filter_contacts(people_filter, maillist_filter, qs, values):
"""Helper for filtering based on subclassed contacts.
Runs the filter on separately on each subclass (field defined by argument,
the same values are used), then filters the queryset to only keep items
that have matching.
"""
people = Person.objects.filter(**{people_filter + '__in': values})
mailing_lists = Maillist.objects.filter(**{maillist_filter + '__in': values})
return qs.filter(Q(contact__in=people) | Q(contact__in=mailing_lists))
class _BaseComponentContactFilter(FilterSet):
contact = MethodFilter(action='filter_by_contact', widget=SelectMultiple)
email = MethodFilter(action='filter_by_email', widget=SelectMultiple)
role = MultiValueFilter(name='role__name')
component = MultiValueFilter(name='component__name')
@value_is_not_empty
def filter_by_contact(self, qs, value):
return _filter_contacts('username', 'mail_name', qs, value)
@value_is_not_empty
def filter_by_email(self, qs, value):
return _filter_contacts('email', 'email', qs, value)
class GlobalComponentContactFilter(_BaseComponentContactFilter):
class Meta:
model = GlobalComponentContact
fields = ('role', 'email', 'contact', 'component')
class ReleaseComponentContactFilter(_BaseComponentContactFilter):
dist_git_branch = MultiValueFilter(name='component__dist_git_branch')
release = MultiValueFilter(name='component__release__release_id')
global_component = MultiValueFilter(name='component__global_component__name')
class Meta:
model = ReleaseComponentContact
fields = ('role', 'email', 'contact', 'component', 'dist_git_branch', 'release',
'global_component')
|
{
"content_hash": "209cb5f50a2e6d5e422b9f1eb4d6666d",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 88,
"avg_line_length": 33.36585365853659,
"alnum_prop": 0.689327485380117,
"repo_name": "tzhaoredhat/automation",
"id": "39688834fb65a9e25183d40b35a84f5684a2775d",
"size": "2845",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pdc/apps/contact/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1767"
},
{
"name": "HTML",
"bytes": "49433"
},
{
"name": "JavaScript",
"bytes": "6629"
},
{
"name": "Makefile",
"bytes": "2828"
},
{
"name": "Python",
"bytes": "1190922"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.