text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from distutils.core import setup
from setuptools import find_packages
PACKAGES = find_packages()
setup(
name='letsencrypt-appengine',
version='1.0',
description=(
"A Django app to make letsencrypt SSL certificates easier to create/renew on Google App Engine."
),
author='Adam Alton',
author_email='adamalton@gmail.com',
url='https://github.com/adamalton/letsencrypt-appengine',
packages=PACKAGES,
include_package_data=True,
# dependencies
)
|
{
"content_hash": "6a9a8f101181992c50b35f8c11e709c7",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 104,
"avg_line_length": 26.105263157894736,
"alnum_prop": 0.6975806451612904,
"repo_name": "adamalton/letsencrypt-appengine",
"id": "0224b56b6f663a356a57eb5344cb2b689d3b1185",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2150"
}
],
"symlink_target": ""
}
|
import io
from steel.common import meta, args, fields
from steel.fields import Field
from steel.base import Structure
from steel.fields.strings import Bytes
__all__ = ['Chunk', 'Payload', 'ChunkList', 'ChunkStreamer']
class ChunkMetaclass(meta.DeclarativeMetaclass):
def __init__(cls, name, bases, attrs, **options):
cls.structure = meta.DeclarativeMetaclass(name, (Structure,), attrs, **options)
for name, attr in attrs.items():
if isinstance(attr, Field):
delattr(cls, name)
class Chunk(metaclass=ChunkMetaclass):
def __init__(self, id, multiple=False):
self.id = id
self.multiple = multiple
def __call__(self, cls):
cls._chunk = self
if not issubclass(cls, ChunkMixin):
cls.__bases__ = (ChunkMixin,) + cls.__bases__
return cls
@classmethod
def read(cls, file):
value = cls.structure(file)
# Force the evaluation of the entire structure in
# order to make sure other fields work properly
value_bytes = b''
for name in cls.structure._fields:
getattr(value, name)
value_bytes += value._raw_values[name]
return value_bytes, value
def _extract(self, field):
return self.structure._extract(field)
class ChunkMixin:
def __init__(self, *args, process_chunk=True, **kwargs):
if process_chunk and not args:
process_chunk = False
if process_chunk:
chunk = self._chunk.structure(*args, **kwargs)
for name in chunk._fields:
getattr(chunk, name)
id = chunk.id
id = self._chunk.id
if chunk.id != self._chunk.id:
raise ValueError('Expected %r, got %r' % (self._chunk.id, chunk.id))
super(ChunkMixin, self).__init__(chunk.payload)
self._chunk_data = chunk
else:
super(ChunkMixin, self).__init__(*args, **kwargs)
def save(self, file):
payload = io.BytesIO()
super(ChunkMixin, self).save(payload)
chunk = self._chunk.structure(id=self._chunk.id)
chunk.payload = payload.getvalue()
chunk.size = len(chunk.payload)
chunk.save(file)
class Payload(Bytes):
def read(self, file):
value_bytes = super(Payload, self).read(file)
raise fields.FullyDecoded(value_bytes, io.BytesIO(value_bytes))
class ChunkList(Field):
size = args.Override(default=None)
def __init__(self, base_chunk, known_classes=(), terminator=None, **options):
self.base_chunk = base_chunk
self.terminator = terminator
self.known_types = {cls._chunk.id: cls for cls in known_classes}
super(ChunkList, self).__init__()
def read(self, file):
chunks_bytes = b''
chunks = ChunkValueList()
while 1:
chunk_bytes, chunk = self.base_chunk.read(file)
chunks_bytes += chunk_bytes
if chunk.id in self.known_types:
value = self.known_types[chunk.id](chunk.payload, process_chunk=False)
if self.terminator and isinstance(chunk, self.terminator):
break
chunks.append(value)
elif chunk.id:
# This is a valid chunk, just not a recognized type
continue
else:
# This is not a valid chunk, which is probably the end of the file
break
raise fields.FullyDecoded(chunks_bytes, chunks)
def encode(self, chunks):
output = io.BytesIO()
for chunk in chunks:
if not isinstance(chunk, tuple(self.known_types.values())):
raise TypeError("Unknown chunk type %r" % chunk._chunk.id)
chunk.save(output)
if self.terminator and not isinstance(chunk, self.terminator):
# The last chunk wasn't a terminator, so add one automatically
self.terminator().save(output)
return output.getvalue()
class ChunkValueList(list):
def of_type(self, type):
return [chunk for chunk in self if isinstance(chunk, type)]
class ChunkStreamer:
def __init__(self, base_chunk, terminator=None):
self.base_chunk = base_chunk
self.terminator = terminator
self.parsers = {}
def parser(self, *chunk_classes):
def wrapper(func):
for cls in chunk_classes:
self.parsers[cls._chunk.id] = func
return wrapper
def parse(self, file):
while 1:
chunk = self.base_chunk.structure(file)
if chunk.id in self.parsers:
for name in chunk._fields:
getattr(chunk, name)
value = self.parsers[chunk.id](chunk.payload, process_chunk=False)
if self.terminator and isinstance(chunk, self.terminator):
break
yield value
elif chunk.id:
# This is a valid chunk, just not a recognized type
for name in chunk._fields:
getattr(chunk, name)
yield chunk
else:
# This is not a valid chunk, which is probably the end of the file
break
|
{
"content_hash": "72bbbc7cc03ac6c351d13dde33339c0c",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 87,
"avg_line_length": 35.44444444444444,
"alnum_prop": 0.5618661257606491,
"repo_name": "gulopine/steel",
"id": "883dfc817a2f93a19f933a2093eb1d42be751846",
"size": "5423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "steel/chunks/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "103498"
}
],
"symlink_target": ""
}
|
'''Module that configures setuptools to package libthumbor'''
from setuptools import setup, find_packages
from libthumbor import __version__
tests_require = [
'mock',
'nose',
'coverage',
'yanc',
'preggy',
'ipdb',
'coveralls',
'thumbor',
]
setup(
name = 'libthumbor',
version = __version__,
description = "libthumbor is the python extension to thumbor",
long_description = """
libthumbor is the python extension to thumbor.
It allows users to generate safe urls easily.
""",
keywords = 'imaging face detection feature thumbor thumbnail' + \
' imagemagick pil opencv',
author = 'Bernardo Heynemann',
author_email = 'heynemann@gmail.com',
url = 'http://github.com/heynemann/libthumbor',
license = 'MIT',
classifiers = ['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Graphics :: Presentation'
],
packages = find_packages(),
package_dir = {"libthumbor": "libthumbor"},
include_package_data = True,
package_data = {
},
extras_require={
'tests': tests_require,
},
install_requires=[
"PyCryptodome"
],
)
|
{
"content_hash": "09b2ab2a4597cf9bbb5b02f57823b0ca",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 70,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.5726984126984127,
"repo_name": "webus/libthumbor",
"id": "607605c74ddd1bd1cf620ff42b9fb9f7bc1c6400",
"size": "1847",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "141"
},
{
"name": "Python",
"bytes": "60496"
}
],
"symlink_target": ""
}
|
class Parameter:
"""Defines a parameter with a name and a value.
Must provide a name and value.
Parameters have a generic metadata property stored as a dictionary.
.. code-block:: python
parameter = Parameter('warp speed', 9.0, {'max': 10.0})
parameter.name #=> 'warp speed'
parameter.value #=> 9.0
parameter.meta['max'] #=> 10.0
"""
def __init__(self, name, value):
self.name = name
self.value = value
@property
def name(self):
"""The name of this parameter.
:return: Parameter name.
:rtype: str
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def value(self):
"""The value of this parameter.
:return: Parameter name.
:rtype: float
"""
return self._value
@value.setter
def value(self, value):
self._value = value
@property
def meta(self):
"""The metadata of this parameter.
:return: Parameter metadata.
:rtype: dict
:default: ``{}``
"""
if not hasattr(self, '_meta'): self._meta = {}
return self._meta
@meta.setter
def meta(self, value):
self._meta = value
|
{
"content_hash": "bd978af6d1c015fc946c4a00aab93b50",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 71,
"avg_line_length": 22.473684210526315,
"alnum_prop": 0.5448868071818892,
"repo_name": "razor-x/dichalcogenides",
"id": "d386544879e9ccbc9052e47501eaa445e1c23fa0",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dichalcogenides/parameters/parameter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "285"
},
{
"name": "Python",
"bytes": "20364"
}
],
"symlink_target": ""
}
|
"""Test runner for typeshed.
Depends on mypy being installed.
Approach:
1. Parse sys.argv
2. Compute appropriate arguments for mypy
3. Stuff those arguments into sys.argv
4. Run mypy.main('')
5. Repeat steps 2-4 for other mypy runs (e.g. --py2)
"""
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser(description="Test runner for typeshed. "
"Patterns are unanchored regexps on the full path.")
parser.add_argument('-v', '--verbose', action='count', default=0, help="More output")
parser.add_argument('-n', '--dry-run', action='store_true', help="Don't actually run mypy")
parser.add_argument('-x', '--exclude', type=str, nargs='*', help="Exclude pattern")
parser.add_argument('-p', '--python-version', type=str, nargs='*',
help="These versions only (major[.minor])")
parser.add_argument('--platform',
help="Run mypy for a certain OS platform (defaults to sys.platform)")
parser.add_argument('--warn-unused-ignores', action='store_true',
help="Run mypy with --warn-unused-ignores "
"(hint: only get rid of warnings that are "
"unused for all platforms and Python versions)")
parser.add_argument('filter', type=str, nargs='*', help="Include pattern (default all)")
def log(args, *varargs):
if args.verbose >= 2:
print(*varargs)
def match(fn, args, blacklist):
if blacklist.match(fn):
log(args, fn, 'exluded by blacklist')
return False
if not args.filter and not args.exclude:
log(args, fn, 'accept by default')
return True
if args.exclude:
for f in args.exclude:
if re.search(f, fn):
log(args, fn, 'excluded by pattern', f)
return False
if args.filter:
for f in args.filter:
if re.search(f, fn):
log(args, fn, 'accepted by pattern', f)
return True
if args.filter:
log(args, fn, 'rejected (no pattern matches)')
return False
log(args, fn, 'accepted (no exclude pattern matches)')
return True
def libpath(major, minor):
versions = ['%d.%d' % (major, minor)
for minor in reversed(range(minor + 1))]
versions.append(str(major))
versions.append('2and3')
paths = []
for v in versions:
for top in ['stdlib', 'third_party']:
p = os.path.join(top, v)
if os.path.isdir(p):
paths.append(p)
return paths
def main():
args = parser.parse_args()
with open(os.path.join(os.path.dirname(__file__), "mypy_blacklist.txt")) as f:
blacklist = re.compile("(%s)$" % "|".join(
re.findall(r"^\s*([^\s#]+)\s*(?:#.*)?$", f.read(), flags=re.M)))
try:
from mypy.main import main as mypy_main
except ImportError:
print("Cannot import mypy. Did you install it?")
sys.exit(1)
versions = [(3, 8), (3, 7), (3, 6), (3, 5), (2, 7)]
if args.python_version:
versions = [v for v in versions
if any(('%d.%d' % v).startswith(av) for av in args.python_version)]
if not versions:
print("--- no versions selected ---")
sys.exit(1)
code = 0
runs = 0
for major, minor in versions:
roots = libpath(major, minor)
files = []
seen = {'__builtin__', 'builtins', 'typing'} # Always ignore these.
for root in roots:
names = os.listdir(root)
for name in names:
full = os.path.join(root, name)
mod, ext = os.path.splitext(name)
if mod in seen or mod.startswith('.'):
continue
if ext in ['.pyi', '.py']:
if match(full, args, blacklist):
seen.add(mod)
files.append(full)
elif (os.path.isfile(os.path.join(full, '__init__.pyi')) or
os.path.isfile(os.path.join(full, '__init__.py'))):
for r, ds, fs in os.walk(full):
ds.sort()
fs.sort()
for f in fs:
m, x = os.path.splitext(f)
if x in ['.pyi', '.py']:
fn = os.path.join(r, f)
if match(fn, args, blacklist):
seen.add(mod)
files.append(fn)
if files:
runs += 1
flags = ['--python-version', '%d.%d' % (major, minor)]
flags.append('--strict-optional')
flags.append('--no-site-packages')
flags.append('--show-traceback')
flags.append('--no-implicit-optional')
flags.append('--disallow-any-generics')
if args.warn_unused_ignores:
flags.append('--warn-unused-ignores')
if args.platform:
flags.extend(['--platform', args.platform])
sys.argv = ['mypy'] + flags + files
if args.verbose:
print("running", ' '.join(sys.argv))
else:
print("running mypy", ' '.join(flags), "# with", len(files), "files")
try:
if not args.dry_run:
mypy_main('', sys.stdout, sys.stderr)
except SystemExit as err:
code = max(code, err.code)
if code:
print("--- exit status", code, "---")
sys.exit(code)
if not runs:
print("--- nothing to do; exit 1 ---")
sys.exit(1)
if __name__ == '__main__':
main()
|
{
"content_hash": "d491836a7aaacb1cb50b641cbb84058e",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 97,
"avg_line_length": 35.85,
"alnum_prop": 0.5097629009762901,
"repo_name": "srusskih/SublimeJEDI",
"id": "48ad62e98a2ea72c9e5d72d387cdc3dd4bea829e",
"size": "5759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dependencies/jedi/third_party/typeshed/tests/mypy_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "792"
},
{
"name": "Python",
"bytes": "53199"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from haystack.views import FacetedSearchView
from haystack.forms import FacetedSearchForm
from haystack.query import SearchQuerySet
# Uncomment the next two lines to enable the admin:
from cms.sitemaps import CMSSitemap
from django.contrib import admin
admin.autodiscover()
sqs = SearchQuerySet().facet('model_type').facet('sector').facet('sub_sector')
urlpatterns = patterns(
'',
# Examples:
# url(r'^$', 'admin.views.site.home', name='home'),
# url(r'^pursuite/', include('pursuite.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^analytics/', include('analytics.urls')),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^account/profile/$', 'account.views.profile', name="profile"),
url(r'^account/competency/$', 'account.views.check_competency',
name="check_competency"),
url(r'^account/', include('allauth.urls')),
url(r'^search/$', FacetedSearchView(
form_class=FacetedSearchForm,
template='search-result.html',
searchqueryset=sqs,
results_per_page=10,
), name='haystack_search'),
url(
r'^occupational-standard/(?P<code>[A-z]{3}/[NO]\d{4})/$',
'admin.views.occupational_standard.view_occupational_standard',
name="occupational_standard"
),
url(
r'^career-map/(?P<slug>.*).svg$',
'admin.views.occupation.view_career_map',
name="career_map"
),
url(
r'^occupation/(?P<slug>.*)/$',
'admin.views.occupation.render',
name="render_occupation"
),
url(
r'^occupational-standard/(?P<code>[A-z]{3}/[NO]\d{4})/'
'(?P<version>\d+\.\d+)/$',
'admin.views.occupational_standard.view_occupational_standard',
name="occupational_standard"
),
url(
r'^qualification-pack/(?P<id>\d+)/$',
'admin.views.qualification_pack.view_qualification_pack_id',
name="qualification_pack"
),
url(
r'^qualification-pack/(?P<code>[A-z]{3}/Q\d{4})/$',
'admin.views.qualification_pack.view_qualification_pack',
name="qualification_pack"
),
url(
r'^qualification-pack/(?P<code>[A-z]{3}/Q\d{4})/(?P<version>\d+\.\d+)/\
$', 'admin.views.qualification_pack.view_qualification_pack',
name="qualification_pack"
),
url(
r'^wfmis-json/$', 'admin.views.common.wfmis_json', name="wfmis_json"
),
# Job URLs
url(
r'^job/(?P<id>\d+)/$', 'admin.views.job.render', name="render_job"
),
url(
r'^jobs/$', 'admin.views.job.render_list', name="render_jobs"
),
url(
r'^jobs/-new$', 'admin.views.job.new_job', name="new_job"
),
url(
r'^job/(?P<id>\d+)/-delete$', 'admin.views.job.delete_job',
name="delete_job"
),
# Training URLs
url(
r'^training/(?P<id>\d+)/$', 'admin.views.training.render',
name="render_training"
),
url(
r'^trainings/$', 'admin.views.training.render_list',
name="render_trainings"
),
url(
r'^trainings/-new$', 'admin.views.training.new_training',
name="new_training"
),
url(
r'^training/(?P<id>\d+)/-delete$',
'admin.views.training.delete_training',
name="delete_training"
),
# CMS urls
url(r'^', include('cms.urls')),
url(
r'^sitemap.xml$', 'django.contrib.sitemaps.views.sitemap',
{'sitemaps': {'cmspages': CMSSitemap}}
),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{
"content_hash": "cc53158b3958b0f3f28682d0b812445f",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 32.47899159663866,
"alnum_prop": 0.5981888745148771,
"repo_name": "arpitprogressive/arpittest",
"id": "1094bf79e2d4559e535bfbfa4e75983777562ba3",
"size": "3865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pursuite/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133532"
},
{
"name": "JavaScript",
"bytes": "227983"
},
{
"name": "Python",
"bytes": "782274"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
}
|
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class AssessmentStatusCode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Programmatic code for the status of the assessment."""
#: The resource is healthy
HEALTHY = "Healthy"
#: The resource has a security issue that needs to be addressed
UNHEALTHY = "Unhealthy"
#: Assessment for this resource did not happen
NOT_APPLICABLE = "NotApplicable"
class AssessmentType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""BuiltIn if the assessment based on built-in Azure Policy definition, Custom if the assessment
based on custom Azure Policy definition.
"""
#: Microsoft Defender for Cloud managed assessments
BUILT_IN = "BuiltIn"
#: User defined policies that are automatically ingested from Azure Policy to Microsoft Defender
#: for Cloud
CUSTOM_POLICY = "CustomPolicy"
#: User assessments pushed directly by the user or other third party to Microsoft Defender for
#: Cloud
CUSTOMER_MANAGED = "CustomerManaged"
#: An assessment that was created by a verified 3rd party if the user connected it to ASC
VERIFIED_PARTNER = "VerifiedPartner"
class Categories(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The categories of resource that is at risk when the assessment is unhealthy."""
COMPUTE = "Compute"
NETWORKING = "Networking"
DATA = "Data"
IDENTITY_AND_ACCESS = "IdentityAndAccess"
IO_T = "IoT"
class Enum1(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Enum1."""
MCAS = "MCAS"
WDATP = "WDATP"
SENTINEL = "Sentinel"
class ExpandEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""ExpandEnum."""
#: All links associated with an assessment
LINKS = "links"
#: Assessment metadata
METADATA = "metadata"
class ImplementationEffort(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The implementation effort required to remediate this assessment."""
LOW = "Low"
MODERATE = "Moderate"
HIGH = "High"
class SettingKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""the kind of the settings string."""
DATA_EXPORT_SETTINGS = "DataExportSettings"
ALERT_SUPPRESSION_SETTING = "AlertSuppressionSetting"
ALERT_SYNC_SETTINGS = "AlertSyncSettings"
class Severity(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The severity level of the assessment."""
LOW = "Low"
MEDIUM = "Medium"
HIGH = "High"
class Source(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The platform where the assessed resource resides."""
#: Resource is in Azure
AZURE = "Azure"
#: Resource in an on premise machine connected to Azure cloud
ON_PREMISE = "OnPremise"
#: SQL Resource in an on premise machine connected to Azure cloud
ON_PREMISE_SQL = "OnPremiseSql"
class Tactics(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Tactic of the assessment."""
RECONNAISSANCE = "Reconnaissance"
RESOURCE_DEVELOPMENT = "Resource Development"
INITIAL_ACCESS = "Initial Access"
EXECUTION = "Execution"
PERSISTENCE = "Persistence"
PRIVILEGE_ESCALATION = "Privilege Escalation"
DEFENSE_EVASION = "Defense Evasion"
CREDENTIAL_ACCESS = "Credential Access"
DISCOVERY = "Discovery"
LATERAL_MOVEMENT = "Lateral Movement"
COLLECTION = "Collection"
COMMAND_AND_CONTROL = "Command and Control"
EXFILTRATION = "Exfiltration"
IMPACT = "Impact"
class Techniques(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Techniques of the assessment."""
ABUSE_ELEVATION_CONTROL_MECHANISM = "Abuse Elevation Control Mechanism"
ACCESS_TOKEN_MANIPULATION = "Access Token Manipulation"
ACCOUNT_DISCOVERY = "Account Discovery"
ACCOUNT_MANIPULATION = "Account Manipulation"
ACTIVE_SCANNING = "Active Scanning"
APPLICATION_LAYER_PROTOCOL = "Application Layer Protocol"
AUDIO_CAPTURE = "Audio Capture"
BOOT_OR_LOGON_AUTOSTART_EXECUTION = "Boot or Logon Autostart Execution"
BOOT_OR_LOGON_INITIALIZATION_SCRIPTS = "Boot or Logon Initialization Scripts"
BRUTE_FORCE = "Brute Force"
CLOUD_INFRASTRUCTURE_DISCOVERY = "Cloud Infrastructure Discovery"
CLOUD_SERVICE_DASHBOARD = "Cloud Service Dashboard"
CLOUD_SERVICE_DISCOVERY = "Cloud Service Discovery"
COMMAND_AND_SCRIPTING_INTERPRETER = "Command and Scripting Interpreter"
COMPROMISE_CLIENT_SOFTWARE_BINARY = "Compromise Client Software Binary"
COMPROMISE_INFRASTRUCTURE = "Compromise Infrastructure"
CONTAINER_AND_RESOURCE_DISCOVERY = "Container and Resource Discovery"
CREATE_ACCOUNT = "Create Account"
CREATE_OR_MODIFY_SYSTEM_PROCESS = "Create or Modify System Process"
CREDENTIALS_FROM_PASSWORD_STORES = "Credentials from Password Stores"
DATA_DESTRUCTION = "Data Destruction"
DATA_ENCRYPTED_FOR_IMPACT = "Data Encrypted for Impact"
DATA_FROM_CLOUD_STORAGE_OBJECT = "Data from Cloud Storage Object"
DATA_FROM_CONFIGURATION_REPOSITORY = "Data from Configuration Repository"
DATA_FROM_INFORMATION_REPOSITORIES = "Data from Information Repositories"
DATA_FROM_LOCAL_SYSTEM = "Data from Local System"
DATA_MANIPULATION = "Data Manipulation"
DATA_STAGED = "Data Staged"
DEFACEMENT = "Defacement"
DEOBFUSCATE_DECODE_FILES_OR_INFORMATION = "Deobfuscate/Decode Files or Information"
DISK_WIPE = "Disk Wipe"
DOMAIN_TRUST_DISCOVERY = "Domain Trust Discovery"
DRIVE_BY_COMPROMISE = "Drive-by Compromise"
DYNAMIC_RESOLUTION = "Dynamic Resolution"
ENDPOINT_DENIAL_OF_SERVICE = "Endpoint Denial of Service"
EVENT_TRIGGERED_EXECUTION = "Event Triggered Execution"
EXFILTRATION_OVER_ALTERNATIVE_PROTOCOL = "Exfiltration Over Alternative Protocol"
EXPLOIT_PUBLIC_FACING_APPLICATION = "Exploit Public-Facing Application"
EXPLOITATION_FOR_CLIENT_EXECUTION = "Exploitation for Client Execution"
EXPLOITATION_FOR_CREDENTIAL_ACCESS = "Exploitation for Credential Access"
EXPLOITATION_FOR_DEFENSE_EVASION = "Exploitation for Defense Evasion"
EXPLOITATION_FOR_PRIVILEGE_ESCALATION = "Exploitation for Privilege Escalation"
EXPLOITATION_OF_REMOTE_SERVICES = "Exploitation of Remote Services"
EXTERNAL_REMOTE_SERVICES = "External Remote Services"
FALLBACK_CHANNELS = "Fallback Channels"
FILE_AND_DIRECTORY_DISCOVERY = "File and Directory Discovery"
GATHER_VICTIM_NETWORK_INFORMATION = "Gather Victim Network Information"
HIDE_ARTIFACTS = "Hide Artifacts"
HIJACK_EXECUTION_FLOW = "Hijack Execution Flow"
IMPAIR_DEFENSES = "Impair Defenses"
IMPLANT_CONTAINER_IMAGE = "Implant Container Image"
INDICATOR_REMOVAL_ON_HOST = "Indicator Removal on Host"
INDIRECT_COMMAND_EXECUTION = "Indirect Command Execution"
INGRESS_TOOL_TRANSFER = "Ingress Tool Transfer"
INPUT_CAPTURE = "Input Capture"
INTER_PROCESS_COMMUNICATION = "Inter-Process Communication"
LATERAL_TOOL_TRANSFER = "Lateral Tool Transfer"
MAN_IN_THE_MIDDLE = "Man-in-the-Middle"
MASQUERADING = "Masquerading"
MODIFY_AUTHENTICATION_PROCESS = "Modify Authentication Process"
MODIFY_REGISTRY = "Modify Registry"
NETWORK_DENIAL_OF_SERVICE = "Network Denial of Service"
NETWORK_SERVICE_SCANNING = "Network Service Scanning"
NETWORK_SNIFFING = "Network Sniffing"
NON_APPLICATION_LAYER_PROTOCOL = "Non-Application Layer Protocol"
NON_STANDARD_PORT = "Non-Standard Port"
OBTAIN_CAPABILITIES = "Obtain Capabilities"
OBFUSCATED_FILES_OR_INFORMATION = "Obfuscated Files or Information"
OFFICE_APPLICATION_STARTUP = "Office Application Startup"
OS_CREDENTIAL_DUMPING = "OS Credential Dumping"
PERMISSION_GROUPS_DISCOVERY = "Permission Groups Discovery"
PHISHING = "Phishing"
PRE_OS_BOOT = "Pre-OS Boot"
PROCESS_DISCOVERY = "Process Discovery"
PROCESS_INJECTION = "Process Injection"
PROTOCOL_TUNNELING = "Protocol Tunneling"
PROXY = "Proxy"
QUERY_REGISTRY = "Query Registry"
REMOTE_ACCESS_SOFTWARE = "Remote Access Software"
REMOTE_SERVICE_SESSION_HIJACKING = "Remote Service Session Hijacking"
REMOTE_SERVICES = "Remote Services"
REMOTE_SYSTEM_DISCOVERY = "Remote System Discovery"
RESOURCE_HIJACKING = "Resource Hijacking"
SCHEDULED_TASK_JOB = "Scheduled Task/Job"
SCREEN_CAPTURE = "Screen Capture"
SEARCH_VICTIM_OWNED_WEBSITES = "Search Victim-Owned Websites"
SERVER_SOFTWARE_COMPONENT = "Server Software Component"
SERVICE_STOP = "Service Stop"
SIGNED_BINARY_PROXY_EXECUTION = "Signed Binary Proxy Execution"
SOFTWARE_DEPLOYMENT_TOOLS = "Software Deployment Tools"
SQL_STORED_PROCEDURES = "SQL Stored Procedures"
STEAL_OR_FORGE_KERBEROS_TICKETS = "Steal or Forge Kerberos Tickets"
SUBVERT_TRUST_CONTROLS = "Subvert Trust Controls"
SUPPLY_CHAIN_COMPROMISE = "Supply Chain Compromise"
SYSTEM_INFORMATION_DISCOVERY = "System Information Discovery"
TAINT_SHARED_CONTENT = "Taint Shared Content"
TRAFFIC_SIGNALING = "Traffic Signaling"
TRANSFER_DATA_TO_CLOUD_ACCOUNT = "Transfer Data to Cloud Account"
TRUSTED_RELATIONSHIP = "Trusted Relationship"
UNSECURED_CREDENTIALS = "Unsecured Credentials"
USER_EXECUTION = "User Execution"
VALID_ACCOUNTS = "Valid Accounts"
WINDOWS_MANAGEMENT_INSTRUMENTATION = "Windows Management Instrumentation"
FILE_AND_DIRECTORY_PERMISSIONS_MODIFICATION = "File and Directory Permissions Modification"
class Threats(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Threats impact of the assessment."""
ACCOUNT_BREACH = "accountBreach"
DATA_EXFILTRATION = "dataExfiltration"
DATA_SPILLAGE = "dataSpillage"
MALICIOUS_INSIDER = "maliciousInsider"
ELEVATION_OF_PRIVILEGE = "elevationOfPrivilege"
THREAT_RESISTANCE = "threatResistance"
MISSING_COVERAGE = "missingCoverage"
DENIAL_OF_SERVICE = "denialOfService"
class UserImpact(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The user impact of the assessment."""
LOW = "Low"
MODERATE = "Moderate"
HIGH = "High"
|
{
"content_hash": "f45a817b4bdfb3fbb9040bd103f220f5",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 100,
"avg_line_length": 41.70124481327801,
"alnum_prop": 0.7340298507462687,
"repo_name": "Azure/azure-sdk-for-python",
"id": "4326c66f40e8fdbb1ae13f91212f4cfd6a8e1d45",
"size": "10518",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/security/azure-mgmt-security/azure/mgmt/security/v2021_06_01/models/_security_center_enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""
Bootstrapper for test framework plugins.
The entire rationale for this system is to get the modules in plugin/
imported without importing all of the supporting library, so that we can
set up things for testing before coverage starts.
The rationale for all of plugin/ being *in* the supporting library in the
first place is so that the testing and plugin suite is available to other
libraries, mainly external SQLAlchemy and Alembic dialects, to make use
of the same test environment and standard suites available to
SQLAlchemy/Alembic themselves without the need to ship/install a separate
package outside of SQLAlchemy.
"""
import importlib.util
import os
import sys
bootstrap_file = locals()["bootstrap_file"]
to_bootstrap = locals()["to_bootstrap"]
def load_file_as_module(name):
path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name)
spec = importlib.util.spec_from_file_location(name, path)
assert spec is not None
assert spec.loader is not None
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
if to_bootstrap == "pytest":
sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base")
sys.modules["sqla_plugin_base"].bootstrapped_as_sqlalchemy = True
sys.modules["sqla_pytestplugin"] = load_file_as_module("pytestplugin")
else:
raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa
|
{
"content_hash": "fb47307e9ba305d65c974c2a512937b4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 74,
"avg_line_length": 32.883720930232556,
"alnum_prop": 0.7475247524752475,
"repo_name": "zzzeek/sqlalchemy",
"id": "f93b8d3e629eca50e60a73f8bcc6268d2fe997de",
"size": "1437",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "lib/sqlalchemy/testing/plugin/bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "21698"
},
{
"name": "Python",
"bytes": "16838583"
}
],
"symlink_target": ""
}
|
import datetime
import json
import os
import sys
import mysql.connector
from mysql.connector import Error
from tweepy import OAuthHandler
from tweepy import Stream
from tweepy.streaming import StreamListener
from svm import Classifier
from location import Location
import settings
os.environ['TZ'] = 'Asia/Jakarta'
auth = OAuthHandler(settings.consumer_key, settings.consumer_secret)
auth.set_access_token(settings.access_token, settings.access_secret)
labeled_tweets = (
[(line, 'traffic') for line in open('tweets_corpus/traffic_tweets_combined.txt')] +
[(line, 'non_traffic') for line in open('tweets_corpus/random_tweets.txt')] +
[(line, 'non_traffic') for line in open('tweets_corpus/non_traffic_tweets.txt')]
)
class TwitterStreamer(StreamListener):
def __init__(self):
super(TwitterStreamer, self).__init__()
self.classifier = Classifier(labeled_tweets)
print('Using', self.classifier.get_data_count(), 'training data.')
print('SVM Classifier training time:', self.classifier.get_training_time(), 'seconds')
self.location = Location()
print('\nTweets:')
with open(os.path.dirname(__file__) + 'classified_tweets.txt', 'a') as f:
f.write('\nTweets:')
def on_data(self, data):
try:
tweet = json.loads(data)['text'].replace('\n', ' ')
svm_result = str(self.classifier.classify(tweet))
if sys.argv[1] == "dev":
print('| ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'\t| ' + svm_result,
'\t| ' + tweet)
with open(os.path.dirname(__file__) + 'classified_tweets.txt', 'a') as f:
f.write('\n| ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") +
'\t| ' + svm_result +
'\t| ' + tweet)
with open(os.path.dirname(__file__) + 'classified_tweets.csv', 'a') as f:
f.write('"' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") +
'","' + svm_result +
'","' + tweet + '"\n')
if svm_result == "traffic":
ts = datetime.datetime.strftime(datetime.datetime.strptime(json.loads(data)['created_at'],
'%a %b %d %H:%M:%S +0000 %Y') + datetime.timedelta(hours=7), '%Y-%m-%d %H:%M:%S')
con = mysql.connector.connect(host=settings.mysql_host, database=settings.mysql_db, user=settings.mysql_user, password=settings.mysql_password)
cur = con.cursor()
add_tweet = (
"INSERT INTO tweets(datetime, twitter_user_id, text, category, locations) VALUES(%s, %s, %s, %s, %s)")
tweet_data = (
ts,
json.loads(data)['user']['id_str'],
tweet,
svm_result,
str(self.location.find_locations(tweet))
)
cur.execute(add_tweet, tweet_data)
con.commit()
cur.close()
except BaseException as e:
print("Error on_data: %s" % str(e))
return True
def on_error(self, status):
print(status)
return True
twitter_stream = Stream(auth, TwitterStreamer())
# keywords = [line.rstrip('\n') for line in open(os.path.dirname(__file__) + 'name_list.txt')]
users = ['250022672', '187397386', '1118238337', '4675666764', '128175561', '537556372', '106780531', '62327666',
'454564576', '223476605', '201720189']
keywords = ['Yogyakarta', 'Jogjakarta', 'Jogja', 'Yogya', 'Adisutjipto', 'Adi Sutjipto', 'lalinjogja', 'RTMC_Jogja',
'ATCS_DIY', 'jogjaupdate', 'jogja24jam', 'infojogja', 'yogyakartacity', 'jogjamedia', 'tribunjogja', 'unisifmyk',
'UGM', 'UII', 'UNY', 'UMY', 'lalinyk']
twitter_stream.filter(track=keywords, follow=users)
|
{
"content_hash": "15938ca891fdc1b4f26fcc4fc7552c67",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 159,
"avg_line_length": 42.180851063829785,
"alnum_prop": 0.5604035308953341,
"repo_name": "dwiajik/twit-macet-mining-v2",
"id": "a2d8c643370189a92a00bbd6c0caad89579d90a3",
"size": "3965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old/stream_classify_save_db.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94488"
}
],
"symlink_target": ""
}
|
from .base_resource import BaseMediaResource, parse_media_id
from synapse.http.servlet import parse_string, parse_integer
from synapse.http.server import request_handler
from twisted.web.server import NOT_DONE_YET
from twisted.internet import defer
import logging
logger = logging.getLogger(__name__)
class ThumbnailResource(BaseMediaResource):
isLeaf = True
def render_GET(self, request):
self._async_render_GET(request)
return NOT_DONE_YET
@request_handler
@defer.inlineCallbacks
def _async_render_GET(self, request):
server_name, media_id = parse_media_id(request)
width = parse_integer(request, "width")
height = parse_integer(request, "height")
method = parse_string(request, "method", "scale")
m_type = parse_string(request, "type", "image/png")
if server_name == self.server_name:
yield self._respond_local_thumbnail(
request, media_id, width, height, method, m_type
)
else:
yield self._respond_remote_thumbnail(
request, server_name, media_id,
width, height, method, m_type
)
@defer.inlineCallbacks
def _respond_local_thumbnail(self, request, media_id, width, height,
method, m_type):
media_info = yield self.store.get_local_media(media_id)
if not media_info:
self._respond_404(request)
return
thumbnail_infos = yield self.store.get_local_media_thumbnails(media_id)
if thumbnail_infos:
thumbnail_info = self._select_thumbnail(
width, height, method, m_type, thumbnail_infos
)
t_width = thumbnail_info["thumbnail_width"]
t_height = thumbnail_info["thumbnail_height"]
t_type = thumbnail_info["thumbnail_type"]
t_method = thumbnail_info["thumbnail_method"]
file_path = self.filepaths.local_media_thumbnail(
media_id, t_width, t_height, t_type, t_method,
)
yield self._respond_with_file(request, t_type, file_path)
else:
yield self._respond_default_thumbnail(
request, media_info, width, height, method, m_type,
)
@defer.inlineCallbacks
def _respond_remote_thumbnail(self, request, server_name, media_id, width,
height, method, m_type):
# TODO: Don't download the whole remote file
# We should proxy the thumbnail from the remote server instead.
media_info = yield self._get_remote_media(server_name, media_id)
thumbnail_infos = yield self.store.get_remote_media_thumbnails(
server_name, media_id,
)
if thumbnail_infos:
thumbnail_info = self._select_thumbnail(
width, height, method, m_type, thumbnail_infos
)
t_width = thumbnail_info["thumbnail_width"]
t_height = thumbnail_info["thumbnail_height"]
t_type = thumbnail_info["thumbnail_type"]
t_method = thumbnail_info["thumbnail_method"]
file_id = thumbnail_info["filesystem_id"]
t_length = thumbnail_info["thumbnail_length"]
file_path = self.filepaths.remote_media_thumbnail(
server_name, file_id, t_width, t_height, t_type, t_method,
)
yield self._respond_with_file(request, t_type, file_path, t_length)
else:
yield self._respond_default_thumbnail(
request, media_info, width, height, method, m_type,
)
@defer.inlineCallbacks
def _respond_default_thumbnail(self, request, media_info, width, height,
method, m_type):
media_type = media_info["media_type"]
top_level_type = media_type.split("/")[0]
sub_type = media_type.split("/")[-1].split(";")[0]
thumbnail_infos = yield self.store.get_default_thumbnails(
top_level_type, sub_type,
)
if not thumbnail_infos:
thumbnail_infos = yield self.store.get_default_thumbnails(
top_level_type, "_default",
)
if not thumbnail_infos:
thumbnail_infos = yield self.store.get_default_thumbnails(
"_default", "_default",
)
if not thumbnail_infos:
self._respond_404(request)
return
thumbnail_info = self._select_thumbnail(
width, height, "crop", m_type, thumbnail_infos
)
t_width = thumbnail_info["thumbnail_width"]
t_height = thumbnail_info["thumbnail_height"]
t_type = thumbnail_info["thumbnail_type"]
t_method = thumbnail_info["thumbnail_method"]
t_length = thumbnail_info["thumbnail_length"]
file_path = self.filepaths.default_thumbnail(
top_level_type, sub_type, t_width, t_height, t_type, t_method,
)
yield self.respond_with_file(request, t_type, file_path, t_length)
def _select_thumbnail(self, desired_width, desired_height, desired_method,
desired_type, thumbnail_infos):
d_w = desired_width
d_h = desired_height
if desired_method.lower() == "crop":
info_list = []
for info in thumbnail_infos:
t_w = info["thumbnail_width"]
t_h = info["thumbnail_height"]
t_method = info["thumbnail_method"]
if t_method == "scale" or t_method == "crop":
aspect_quality = abs(d_w * t_h - d_h * t_w)
size_quality = abs((d_w - t_w) * (d_h - t_h))
type_quality = desired_type != info["thumbnail_type"]
length_quality = info["thumbnail_length"]
info_list.append((
aspect_quality, size_quality, type_quality,
length_quality, info
))
if info_list:
return min(info_list)[-1]
else:
info_list = []
info_list2 = []
for info in thumbnail_infos:
t_w = info["thumbnail_width"]
t_h = info["thumbnail_height"]
t_method = info["thumbnail_method"]
size_quality = abs((d_w - t_w) * (d_h - t_h))
type_quality = desired_type != info["thumbnail_type"]
length_quality = info["thumbnail_length"]
if t_method == "scale" and (t_w >= d_w or t_h >= d_h):
info_list.append((
size_quality, type_quality, length_quality, info
))
elif t_method == "scale":
info_list2.append((
size_quality, type_quality, length_quality, info
))
if info_list:
return min(info_list)[-1]
else:
return min(info_list2)[-1]
|
{
"content_hash": "fd67ab63ebf9fd2d1af677dfb2892a42",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 79,
"avg_line_length": 39.55865921787709,
"alnum_prop": 0.548086428470555,
"repo_name": "illicitonion/synapse",
"id": "1dadd880b27f44be2c0b396a08a2cef7b42f78d8",
"size": "7691",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "synapse/rest/media/v1/thumbnail_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1020"
},
{
"name": "HTML",
"bytes": "1223"
},
{
"name": "JavaScript",
"bytes": "172643"
},
{
"name": "Perl",
"bytes": "31420"
},
{
"name": "Python",
"bytes": "1571632"
},
{
"name": "Shell",
"bytes": "3281"
}
],
"symlink_target": ""
}
|
class ConfigurationError(Exception):
"""Raised when the cache isn't configured correctly."""
class QueueEmpty(Exception):
"""Raised when a connection cannot be acquired."""
|
{
"content_hash": "7c66dac6bcec039b8fa0131e62b1008d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 59,
"avg_line_length": 30.5,
"alnum_prop": 0.73224043715847,
"repo_name": "citrix-openstack-build/oslo.cache",
"id": "3fb31c441f768b64902b48048e7a913a9c733845",
"size": "770",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oslo_cache/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "107333"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, unicode_literals
from future.builtins import input, open
import os
import re
import sys
from functools import wraps
from getpass import getpass, getuser
from glob import glob
from contextlib import contextmanager
from posixpath import join
import boto.ec2
import boto.route53
from fabric.api import env, cd, prefix, sudo as _sudo, run as _run, hide, task
from fabric.contrib.files import exists, upload_template
from fabric.colors import yellow, green, blue, red
################
# Config setup #
################
conf = {}
if sys.argv[0].split(os.sep)[-1] in ("fab", "fab-script.py"):
# Ensure we import settings from the current dir
try:
conf = __import__("settings", globals(), locals(), [], 0).FABRIC
try:
conf["HOSTS"][0]
except (KeyError, ValueError):
raise ImportError
except (ImportError, AttributeError):
print("Aborting, no hosts defined.")
exit()
env.db_pass = conf.get("DB_PASS", None)
env.admin_pass = conf.get("ADMIN_PASS", None)
env.user = conf.get("SSH_USER", getuser())
env.password = conf.get("SSH_PASS", None)
env.key_filename = conf.get("SSH_KEY_PATH", None)
env.hosts = conf.get("HOSTS", [""])
env.proj_name = conf.get("PROJECT_NAME", os.getcwd().split(os.sep)[-1])
env.venv_home = conf.get("VIRTUALENV_HOME", "/home/%s" % env.user)
env.venv_path = "%s/%s" % (env.venv_home, env.proj_name)
env.proj_dirname = "project"
env.proj_path = "%s/%s" % (env.venv_path, env.proj_dirname)
env.manage = "%s/bin/python %s/project/manage.py" % ((env.venv_path,) * 2)
env.domains = conf.get("DOMAINS", [conf.get("LIVE_HOSTNAME", env.hosts[0])])
env.domains_nginx = " ".join(env.domains)
env.domains_python = ", ".join(["'%s'" % s for s in env.domains])
env.ssl_disabled = "#" if len(env.domains) > 1 else ""
env.repo_url = conf.get("REPO_URL", "")
env.git = env.repo_url.startswith("git") or env.repo_url.endswith(".git")
env.reqs_path = conf.get("REQUIREMENTS_PATH", None)
env.gunicorn_port = conf.get("GUNICORN_PORT", 8000)
env.locale = conf.get("LOCALE", "en_US.UTF-8")
env.secret_key = conf.get("SECRET_KEY", "")
env.nevercache_key = conf.get("NEVERCACHE_KEY", "")
env.aws_region = conf.get("AWS_REGION")
env.aws_avail_zone = conf.get("AWS_AVAIL_ZONE")
env.aws_hosted_zone = conf.get("AWS_HOSTED_ZONE")
env.aws_access_key_id = conf.get("AWS_ACCESS_KEY_ID")
env.aws_secret_access_key = conf.get("AWS_SECRET_ACCESS_KEY")
env.aws_ses_region_name = conf.get("AWS_SES_REGION_NAME")
env.aws_ses_region_endpoint = conf.get("AWS_SES_REGION_ENDPOINT")
env.email_backend = conf.get(
"EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
env.email_host = conf.get("EMAIL_HOST", "")
env.email_port = conf.get("EMAIL_PORT", 25)
env.email_host_user = conf.get("EMAIL_HOST_USER", "")
env.email_host_password = conf.get("EMAIL_HOST_PASSWORD", "")
env.email_use_tls = conf.get("EMAIL_USE_TLS", False)
env.default_from_email = conf.get("DEFAULT_FROM_EMAIL", "")
env.raven_config_dsn = conf.get("RAVEN_CONFIG_DSN", "")
env.youtube_api_key = conf.get("YOUTUBE_API_KEY", "")
##################
# Template setup #
##################
# Each template gets uploaded at deploy time, only if their
# contents has changed, in which case, the reload command is
# also run.
templates = {
"nginx": {
"local_path": "deploy/nginx.conf",
"remote_path": "/etc/nginx/sites-enabled/%(proj_name)s.conf",
"reload_command": "service nginx restart",
},
"supervisor": {
"local_path": "deploy/supervisor.conf",
"remote_path": "/etc/supervisor/conf.d/%(proj_name)s.conf",
"reload_command": "supervisorctl reload",
},
"cron": {
"local_path": "deploy/crontab",
"remote_path": "/etc/cron.d/%(proj_name)s",
"owner": "root",
"mode": "600",
},
"gunicorn": {
"local_path": "deploy/gunicorn.conf.py.template",
"remote_path": "%(proj_path)s/gunicorn.conf.py",
},
"settings": {
"local_path": "deploy/local_settings.py.template",
"remote_path": "%(proj_path)s/local_settings.py",
},
}
######################################
# Context for virtualenv and project #
######################################
@contextmanager
def virtualenv():
"""
Runs commands within the project's virtualenv.
"""
with cd(env.venv_path):
with prefix("source %s/bin/activate" % env.venv_path):
yield
@contextmanager
def project():
"""
Runs commands within the project's directory.
"""
with virtualenv():
with cd(env.proj_dirname):
yield
@contextmanager
def update_changed_requirements():
"""
Checks for changes in the requirements file across an update,
and gets new requirements if changes have occurred.
"""
reqs_path = join(env.proj_path, env.reqs_path)
get_reqs = lambda: run("cat %s" % reqs_path, show=False)
old_reqs = get_reqs() if env.reqs_path else ""
yield
if old_reqs:
new_reqs = get_reqs()
if old_reqs == new_reqs:
# Unpinned requirements should always be checked.
for req in new_reqs.split("\n"):
if req.startswith("-e"):
if "@" not in req:
# Editable requirement without pinned commit.
break
elif req.strip() and not req.startswith("#"):
if not set(">=<") & set(req):
# PyPI requirement without version.
break
else:
# All requirements are pinned.
return
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
###########################################
# Utils and wrappers for various commands #
###########################################
def _print(output):
print()
print(output)
print()
def print_command(command):
_print(blue("$ ", bold=True) +
yellow(command, bold=True) +
red(" ->", bold=True))
@task
def run(command, show=True):
"""
Runs a shell comand on the remote server.
"""
if show:
print_command(command)
with hide("running"):
return _run(command)
@task
def sudo(command, show=True):
"""
Runs a command as sudo.
"""
if show:
print_command(command)
with hide("running"):
return _sudo(command)
def log_call(func):
@wraps(func)
def logged(*args, **kawrgs):
header = "-" * len(func.__name__)
_print(green("\n".join([header, func.__name__, header]), bold=True))
return func(*args, **kawrgs)
return logged
def get_templates():
"""
Returns each of the templates with env vars injected.
"""
injected = {}
for name, data in templates.items():
injected[name] = dict([(k, v % env) for k, v in data.items()])
return injected
def upload_template_and_reload(name):
"""
Uploads a template only if it has changed, and if so, reload a
related service.
"""
template = get_templates()[name]
local_path = template["local_path"]
if not os.path.exists(local_path):
project_root = os.path.dirname(os.path.abspath(__file__))
local_path = os.path.join(project_root, local_path)
remote_path = template["remote_path"]
reload_command = template.get("reload_command")
owner = template.get("owner")
mode = template.get("mode")
remote_data = ""
if exists(remote_path):
with hide("stdout"):
remote_data = sudo("cat %s" % remote_path, show=False)
with open(local_path, "r") as f:
local_data = f.read()
# Escape all non-string-formatting-placeholder occurrences of '%':
local_data = re.sub(r"%(?!\(\w+\)s)", "%%", local_data)
if "%(db_pass)s" in local_data:
env.db_pass = db_pass()
local_data %= env
clean = lambda s: s.replace("\n", "").replace("\r", "").strip()
if clean(remote_data) == clean(local_data):
return
upload_template(local_path, remote_path, env, use_sudo=True, backup=False)
if owner:
sudo("chown %s %s" % (owner, remote_path))
if mode:
sudo("chmod %s %s" % (mode, remote_path))
if reload_command:
sudo(reload_command)
def db_pass():
"""
Prompts for the database password if unknown.
"""
if not env.db_pass:
env.db_pass = getpass("Enter the database password: ")
return env.db_pass
@task
def apt(packages):
"""
Installs one or more system packages via apt.
"""
return sudo("apt-get install -y -q " + packages)
@task
def pip(packages):
"""
Installs one or more Python packages within the virtual environment.
"""
with virtualenv():
return sudo("pip3 install %s" % packages)
def postgres(command):
"""
Runs the given command as the postgres user.
"""
show = not command.startswith("psql")
return run("sudo -u root sudo -u postgres %s" % command, show=show)
@task
def psql(sql, show=True):
"""
Runs SQL against the project's database.
"""
out = postgres('psql -c "%s"' % sql)
if show:
print_command(sql)
return out
@task
def backup(filename):
"""
Backs up the database.
"""
return postgres("pg_dump -Fc %s > %s" % (env.proj_name, filename))
@task
def restore(filename):
"""
Restores the database.
"""
return postgres("pg_restore -c -d %s %s" % (env.proj_name, filename))
@task
def python(code, show=True):
"""
Runs Python code in the project's virtual environment, with Django loaded.
"""
setup = "import os; os.environ[\'DJANGO_SETTINGS_MODULE\']=\'settings\';"
full_code = 'python -c "%s%s"' % (setup, code.replace("`", "\\\`"))
with project():
result = run(full_code, show=False)
if show:
print_command(code)
return result
def static():
"""
Returns the live STATIC_ROOT directory.
"""
return python("from django.conf import settings;"
"print(settings.STATIC_ROOT)", show=False).split("\n")[-1]
@task
def manage(command):
"""
Runs a Django management command.
"""
return run("%s %s" % (env.manage, command))
#########################
# Install and configure #
#########################
@task
@log_call
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
locale = "LC_ALL=%s" % env.locale
with hide("stdout"):
if locale not in sudo("cat /etc/default/locale"):
sudo("update-locale %s" % locale)
run("exit")
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python3-dev python3-setuptools git-core "
"postgresql libpq-dev memcached supervisor")
sudo("easy_install3 pip")
sudo("pip3 install virtualenv")
@task
@log_call
def create():
"""
Create a new virtual environment for a project.
Pulls the project's repo from version control, adds system-level
configs for the project, and initialises the database with the
live host.
"""
# Create virtualenv
with cd(env.venv_home):
if exists(env.proj_name):
prompt = input("\nVirtualenv exists: %s"
"\nWould you like to replace it? (yes/no) "
% env.proj_name)
if prompt.lower() != "yes":
print("\nAborting!")
return False
remove()
run("virtualenv %s --distribute -p /usr/bin/python3" % env.proj_name)
vcs = "git" if env.git else "hg"
run("%s clone %s %s" % (vcs, env.repo_url, env.proj_path))
# Create DB and DB user.
pw = db_pass()
user_sql_args = (env.proj_name, pw.replace("'", "\'"))
user_sql = "CREATE USER %s WITH ENCRYPTED PASSWORD '%s';" % user_sql_args
psql(user_sql, show=False)
shadowed = "*" * len(pw)
print_command(user_sql.replace("'%s'" % pw, "'%s'" % shadowed))
psql("CREATE DATABASE %s WITH OWNER %s ENCODING = 'UTF8' "
"LC_CTYPE = '%s' LC_COLLATE = '%s' TEMPLATE template0;" %
(env.proj_name, env.proj_name, env.locale, env.locale))
# Set up SSL certificate.
if not env.ssl_disabled:
conf_path = "/etc/nginx/conf"
if not exists(conf_path):
sudo("mkdir %s" % conf_path)
with cd(conf_path):
crt_file = env.proj_name + ".crt"
key_file = env.proj_name + ".key"
if not exists(crt_file) and not exists(key_file):
try:
crt_local, = glob(join("deploy", "*.crt"))
key_local, = glob(join("deploy", "*.key"))
except ValueError:
parts = (crt_file, key_file, env.domains[0])
sudo("openssl req -new -x509 -nodes -out %s -keyout %s "
"-subj '/CN=%s' -days 3650" % parts)
else:
upload_template(crt_local, crt_file, use_sudo=True)
upload_template(key_local, key_file, use_sudo=True)
# Set up project.
upload_template_and_reload("settings")
with project():
if env.reqs_path:
pip("-r %s/%s" % (env.proj_path, env.reqs_path))
pip("gunicorn setproctitle south psycopg2 "
"django-compressor python3-memcached")
manage("createdb --noinput --nodata")
python("from django.conf import settings;"
"from django.contrib.sites.models import Site;"
"Site.objects.filter(id=settings.SITE_ID).update(domain='%s');"
% env.domains[0])
for domain in env.domains:
python("from django.contrib.sites.models import Site;"
"Site.objects.get_or_create(domain='%s');" % domain)
if env.admin_pass:
pw = env.admin_pass
user_py = ("from mezzanine.utils.models import get_user_model;"
"User = get_user_model();"
"u, _ = User.objects.get_or_create(username='admin');"
"u.is_staff = u.is_superuser = True;"
"u.set_password('%s');"
"u.save();" % pw)
python(user_py, show=False)
shadowed = "*" * len(pw)
print_command(user_py.replace("'%s'" % pw, "'%s'" % shadowed))
return True
@task
@log_call
def remove():
"""
Blow away the current project.
"""
if exists(env.venv_path):
sudo("rm -rf %s" % env.venv_path)
for template in get_templates().values():
remote_path = template["remote_path"]
if exists(remote_path):
sudo("rm %s" % remote_path)
psql("DROP DATABASE IF EXISTS %s;" % env.proj_name)
psql("DROP USER IF EXISTS %s;" % env.proj_name)
##############
# Deployment #
##############
@task
@log_call
def restart():
"""
Restart gunicorn worker processes for the project.
"""
pid_path = "%s/gunicorn.pid" % env.proj_path
if exists(pid_path):
sudo("kill -HUP `cat %s`" % pid_path)
else:
start_args = (env.proj_name, env.proj_name)
sudo("supervisorctl start %s:gunicorn_%s" % start_args)
@task
@log_call
def deploy():
"""
Deploy latest version of the project.
Check out the latest version of the project from version
control, install new requirements, sync and migrate the database,
collect any new static assets, and restart gunicorn's work
processes for the project.
"""
if not exists(env.venv_path):
prompt = input("\nVirtualenv doesn't exist: %s"
"\nWould you like to create it? (yes/no) "
% env.proj_name)
if prompt.lower() != "yes":
print("\nAborting!")
return False
create()
for name in get_templates():
upload_template_and_reload(name)
with project():
backup("last.db")
static_dir = static()
if exists(static_dir):
run("tar -cf last.tar %s" % static_dir)
git = env.git
last_commit = "git rev-parse HEAD" if git else "hg id -i"
run("%s > last.commit" % last_commit)
with update_changed_requirements():
run("git pull origin master -f" if git else "hg pull && hg up -C")
manage("collectstatic -v 0 --noinput")
manage("syncdb --noinput")
manage("migrate --noinput")
restart()
return True
@task
@log_call
def rollback():
"""
Reverts project state to the last deploy.
When a deploy is performed, the current state of the project is
backed up. This includes the last commit checked out, the database,
and all static files. Calling rollback will revert all of these to
their state prior to the last deploy.
"""
with project():
with update_changed_requirements():
update = "git checkout" if env.git else "hg up -C"
run("%s `cat last.commit`" % update)
with cd(join(static(), "..")):
run("tar -xf %s" % join(env.proj_path, "last.tar"))
restore("last.db")
restart()
@task
@log_call
def create_zone():
conn = boto.route53.connect_to_region(env.aws_region)
zones = conn.get_zones()
if zones:
zone = zones[0]
else:
zone = conn.create_zone(env.aws_hosted_zone)
return zone
@task
@log_call
def create_security_groups(conn=None):
if conn is None:
conn = boto.ec2.connect_to_region(env.aws_region)
groups = conn.get_all_security_groups()
if len(groups) == 1:
web = conn.create_security_group('nginx', 'nginx')
web.authorize('tcp', 80, 80, '0.0.0.0/0')
web.authorize('tcp', 443, 443, '0.0.0.0/0')
ssh = conn.create_security_group('ssh', 'ssh')
ssh.authorize('tcp', 22, 22, '0.0.0.0/0')
smtp = conn.create_security_group('smtp', 'smtp')
smtp.authorize('tcp', 587, 587, '0.0.0.0/0')
groups.extend([web, ssh, smtp])
return [group.name for group in groups if group.name != 'default']
@task
@log_call
def create_volume(conn=None):
if conn is None:
conn = boto.ec2.connect_to_region(env.aws_region)
volumes = conn.get_all_volumes()
if not volumes:
volume = conn.create_volume(10, env.aws_avail_zone)
else:
volume = volumes[0]
return volume
@task
@log_call
def create_server():
zone = create_zone()
conn = boto.ec2.connect_to_region(env.aws_region)
groups = create_security_groups(conn)
volume = create_volume(conn)
instances = conn.get_all_instances()
if not instances:
instance = conn.run_instances(
'ami-d5a30ac8', instance_type='t2.micro', security_groups=groups)
else:
instance = instances[0]
import ipdb; ipdb.set_trace()
@task
@log_call
def all():
"""
Installs everything required on a new system and deploy.
From the base software, up to the deployed project.
"""
install()
if create():
deploy()
|
{
"content_hash": "643823c801826cdd65fed84115eecef0",
"timestamp": "",
"source": "github",
"line_count": 631,
"max_line_length": 78,
"avg_line_length": 30.253565768621236,
"alnum_prop": 0.5776846516500785,
"repo_name": "diogobaeder/giva",
"id": "9177252f28c1675632c346f56407f163e5bc4be3",
"size": "19090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3055"
},
{
"name": "JavaScript",
"bytes": "45837"
},
{
"name": "Python",
"bytes": "26172"
}
],
"symlink_target": ""
}
|
from django.urls import reverse
from django.contrib.auth.decorators import login_required, permission_required
from django.shortcuts import redirect, render
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _
from django.http import HttpRequest
from course.models import news
from course.forms import NewsForm
from util.error.reporting import db_error
@login_required()
@permission_required('news.add_news')
def create(request):
if request.method == 'POST':
form = NewsForm(request.POST)
if form.is_valid():
news_item = form.save(commit=False)
news_item.author = "{} {}".format(request.user.first_name, request.user.last_name)
news_item.save()
return redirect('index')
else:
form = NewsForm()
form.initial['entry'] = _((
'English is a weakly typed, interpreted language and runs on a '
'large number of modern humanoids with varying support for '
'advanced syntax features. Website: https://oed.com'
))
return render(
request,
'news/edit.html',
{
'title': _('New News'),
'create': True,
'form': form
}
)
@login_required()
@permission_required('news.change_news')
def edit(request: HttpRequest, news_id: str):
"""
Edit form for changing a news and handler for submitted data.
:param request: request object
:param news_id: id for the news
:return:
"""
try:
cur_news = news.News.objects.get(id=news_id)
except news.News.DoesNotExist:
return db_error(request, _('Requested News does not exist.'))
if request.method == 'POST':
form = NewsForm(request.POST, instance=cur_news)
if form.is_valid():
cur_news.save()
return redirect('index')
else:
form = NewsForm(instance=cur_news)
return render(
request,
'news/edit.html',
{
'title': _('Edit News'),
'create': False,
'form': form,
'news_id': news_id
}
)
@login_required
@permission_required('news.delete_news')
def delete(request: HttpRequest, news_id: str):
"""
Delete a news entry.
:param request: request object
:param news_id: id for the news
:return:
"""
try:
cur_news = news.News.objects.get(id=news_id)
except news.News.DoesNotExist:
return db_error(request, _('Requested News does not exist.'))
cur_news.delete()
return redirect('index')
|
{
"content_hash": "84b7461d83ef01e62f68a7a11361648d",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 94,
"avg_line_length": 27.229166666666668,
"alnum_prop": 0.608645753634277,
"repo_name": "fsr/course-management",
"id": "e2f3828b10ad17e37162b64c389684c2949b157a",
"size": "2614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course/views/news.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "45"
},
{
"name": "HTML",
"bytes": "103817"
},
{
"name": "Python",
"bytes": "77635"
}
],
"symlink_target": ""
}
|
from pso import pso
from optitestfuns import ackley
import unittest
import math
intVar = []
result = pso(ackley, [-5,-5], [5,5], intVar)
print(result.exit)
print('x_opt: {}'.format(result.xopt))
print('FO: {:2e}'.format(result.FO))
|
{
"content_hash": "43f3f27eb4d6632941682e1549659a70",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 44,
"avg_line_length": 19.5,
"alnum_prop": 0.6923076923076923,
"repo_name": "CAChemE/stochastic-optimization",
"id": "31074cf79232eb947d4c05669f893445039be22a",
"size": "234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PSO/nD/DemoAckley.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3363048"
},
{
"name": "Matlab",
"bytes": "12828"
},
{
"name": "Python",
"bytes": "74044"
}
],
"symlink_target": ""
}
|
import json
from sqlalchemy import (
and_, Boolean, Column, Integer, String, Text,
)
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import foreign, relationship
from superset import utils
from superset.models.core import Slice
from superset.models.helpers import AuditMixinNullable, ImportMixin
class BaseDatasource(AuditMixinNullable, ImportMixin):
"""A common interface to objects that are queryable
(tables and datasources)"""
# ---------------------------------------------------------------
# class attributes to define when deriving BaseDatasource
# ---------------------------------------------------------------
__tablename__ = None # {connector_name}_datasource
type = None # datasoure type, str to be defined when deriving this class
baselink = None # url portion pointing to ModelView endpoint
column_class = None # link to derivative of BaseColumn
metric_class = None # link to derivative of BaseMetric
# Used to do code highlighting when displaying the query in the UI
query_language = None
name = None # can be a Column or a property pointing to one
# ---------------------------------------------------------------
# Columns
id = Column(Integer, primary_key=True)
description = Column(Text)
default_endpoint = Column(Text)
is_featured = Column(Boolean, default=False) # TODO deprecating
filter_select_enabled = Column(Boolean, default=False)
offset = Column(Integer, default=0)
cache_timeout = Column(Integer)
params = Column(String(1000))
perm = Column(String(1000))
@declared_attr
def slices(self):
return relationship(
'Slice',
primaryjoin=lambda: and_(
foreign(Slice.datasource_id) == self.id,
foreign(Slice.datasource_type) == self.type,
),
)
# placeholder for a relationship to a derivative of BaseColumn
columns = []
# placeholder for a relationship to a derivative of BaseMetric
metrics = []
@property
def uid(self):
"""Unique id across datasource types"""
return '{self.id}__{self.type}'.format(**locals())
@property
def column_names(self):
return sorted([c.column_name for c in self.columns])
@property
def columns_types(self):
return {c.column_name: c.type for c in self.columns}
@property
def main_dttm_col(self):
return 'timestamp'
@property
def connection(self):
"""String representing the context of the Datasource"""
return None
@property
def schema(self):
"""String representing the schema of the Datasource (if it applies)"""
return None
@property
def groupby_column_names(self):
return sorted([c.column_name for c in self.columns if c.groupby])
@property
def filterable_column_names(self):
return sorted([c.column_name for c in self.columns if c.filterable])
@property
def dttm_cols(self):
return []
@property
def url(self):
return '/{}/edit/{}'.format(self.baselink, self.id)
@property
def explore_url(self):
if self.default_endpoint:
return self.default_endpoint
else:
return '/superset/explore/{obj.type}/{obj.id}/'.format(obj=self)
@property
def column_formats(self):
return {
m.metric_name: m.d3format
for m in self.metrics
if m.d3format
}
@property
def metrics_combo(self):
return sorted(
[
(m.metric_name, m.verbose_name or m.metric_name)
for m in self.metrics],
key=lambda x: x[1])
@property
def short_data(self):
"""Data representation of the datasource sent to the frontend"""
return {
'edit_url': self.url,
'id': self.id,
'uid': self.uid,
'schema': self.schema,
'name': self.name,
'type': self.type,
'connection': self.connection,
'creator': str(self.created_by),
}
@property
def data(self):
"""Data representation of the datasource sent to the frontend"""
order_by_choices = []
for s in sorted(self.column_names):
order_by_choices.append((json.dumps([s, True]), s + ' [asc]'))
order_by_choices.append((json.dumps([s, False]), s + ' [desc]'))
verbose_map = {
o.metric_name: o.verbose_name or o.metric_name
for o in self.metrics
}
verbose_map.update({
o.column_name: o.verbose_name or o.column_name
for o in self.columns
})
return {
'all_cols': utils.choicify(self.column_names),
'column_formats': self.column_formats,
'edit_url': self.url,
'filter_select': self.filter_select_enabled,
'filterable_cols': utils.choicify(self.filterable_column_names),
'gb_cols': utils.choicify(self.groupby_column_names),
'id': self.id,
'metrics_combo': self.metrics_combo,
'name': self.name,
'order_by_choices': order_by_choices,
'type': self.type,
'metrics': [o.data for o in self.metrics],
'columns': [o.data for o in self.columns],
'verbose_map': verbose_map,
}
def get_query_str(self, query_obj):
"""Returns a query as a string
This is used to be displayed to the user so that she/he can
understand what is taking place behind the scene"""
raise NotImplementedError()
def query(self, query_obj):
"""Executes the query and returns a dataframe
query_obj is a dictionary representing Superset's query interface.
Should return a ``superset.models.helpers.QueryResult``
"""
raise NotImplementedError()
def values_for_column(self, column_name, limit=10000):
"""Given a column, returns an iterable of distinct values
This is used to populate the dropdown showing a list of
values in filters in the explore view"""
raise NotImplementedError()
class BaseColumn(AuditMixinNullable, ImportMixin):
"""Interface for column"""
__tablename__ = None # {connector_name}_column
id = Column(Integer, primary_key=True)
column_name = Column(String(255))
verbose_name = Column(String(1024))
is_active = Column(Boolean, default=True)
type = Column(String(32))
groupby = Column(Boolean, default=False)
count_distinct = Column(Boolean, default=False)
sum = Column(Boolean, default=False)
avg = Column(Boolean, default=False)
max = Column(Boolean, default=False)
min = Column(Boolean, default=False)
filterable = Column(Boolean, default=False)
description = Column(Text)
is_dttm = None
# [optional] Set this to support import/export functionality
export_fields = []
def __repr__(self):
return self.column_name
num_types = (
'DOUBLE', 'FLOAT', 'INT', 'BIGINT',
'LONG', 'REAL', 'NUMERIC', 'DECIMAL',
)
date_types = ('DATE', 'TIME', 'DATETIME')
str_types = ('VARCHAR', 'STRING', 'CHAR')
@property
def is_num(self):
return (
self.type and
any([t in self.type.upper() for t in self.num_types])
)
@property
def is_time(self):
return (
self.type and
any([t in self.type.upper() for t in self.date_types])
)
@property
def is_string(self):
return (
self.type and
any([t in self.type.upper() for t in self.str_types])
)
@property
def expression(self):
raise NotImplementedError()
@property
def data(self):
attrs = (
'column_name', 'verbose_name', 'description', 'expression',
'filterable', 'groupby', 'is_dttm')
return {s: getattr(self, s) for s in attrs}
class BaseMetric(AuditMixinNullable, ImportMixin):
"""Interface for Metrics"""
__tablename__ = None # {connector_name}_metric
id = Column(Integer, primary_key=True)
metric_name = Column(String(512))
verbose_name = Column(String(1024))
metric_type = Column(String(32))
description = Column(Text)
is_restricted = Column(Boolean, default=False, nullable=True)
d3format = Column(String(128))
warning_text = Column(Text)
"""
The interface should also declare a datasource relationship pointing
to a derivative of BaseDatasource, along with a FK
datasource_name = Column(
String(255),
ForeignKey('datasources.datasource_name'))
datasource = relationship(
# needs to be altered to point to {Connector}Datasource
'BaseDatasource',
backref=backref('metrics', cascade='all, delete-orphan'),
enable_typechecks=False)
"""
@property
def perm(self):
raise NotImplementedError()
@property
def expression(self):
raise NotImplementedError()
@property
def data(self):
attrs = (
'metric_name', 'verbose_name', 'description', 'expression',
'warning_text')
return {s: getattr(self, s) for s in attrs}
|
{
"content_hash": "f4147bc34a0681e07201682a65f80f7c",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 78,
"avg_line_length": 30.933993399339933,
"alnum_prop": 0.5939400405419822,
"repo_name": "alanmcruickshank/superset-dev",
"id": "8042ac9c1e03725fad973450bcebed0f4e63e8ca",
"size": "9373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "superset/connectors/base/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "65422"
},
{
"name": "HTML",
"bytes": "101728"
},
{
"name": "JavaScript",
"bytes": "783366"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "815898"
},
{
"name": "Shell",
"bytes": "1367"
}
],
"symlink_target": ""
}
|
from django.db import models
import common
import ptree.constants as constants
from django.template import defaultfilters
import random
from urlparse import urljoin
from django.conf import settings
class BaseExperiment(models.Model):
"""
Base class for all Experiments.
"""
name = models.CharField(max_length = 500, null = True, blank = True)
code = common.RandomCharField(length=8)
is_for_mturk = models.BooleanField(verbose_name='Is for MTurk', default=True)
payment_was_sent = models.BooleanField(verbose_name='Payment was sent', default=False)
experimenter_access_code = common.RandomCharField(length=8)
def __unicode__(self):
return self.name or str(self.pk)
def experimenter_input_url(self):
return urljoin(settings.DOMAIN,
'/{}/ExperimenterLaunch/?{}={}&{}={}'.format(self.url_base,
constants.experiment_code,
self.code,
constants.experimenter_access_code,
self.experimenter_access_code
))
def mturk_start_url(self):
"""The URL that a user is redirected to in order to start a treatment"""
return urljoin(settings.DOMAIN,
'/{}/GetTreatmentOrParticipant/?{}={}'.format(self.url_base,
constants.experiment_code_obfuscated,
self.code))
def pick_treatment_for_incoming_participant(self):
return random.choice(self.treatments())
def treatments(self):
return self.treatment_set.all()
def matches(self):
return self.match_set.all()
def participants(self):
return self.participant_set.all()
def experimenter_sequence_of_views(self):
raise NotImplementedError()
def experimenter_sequence_as_urls(self):
"""Converts the sequence to URLs.
e.g.:
sequence() returns something like [views.IntroPage, ...]
sequence_as_urls() returns something like ['mygame/IntroPage', ...]
"""
return [View.url() for index, View in enumerate(self.experimenter_sequence_of_views())]
class Meta:
abstract = True
|
{
"content_hash": "907b164ebc86a8142ba1020d55b08b83",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 95,
"avg_line_length": 37.707692307692305,
"alnum_prop": 0.5622195022439821,
"repo_name": "pombredanne/django-ptree",
"id": "6283f5ca572d39896fb9280bc8595878f4c7e5da",
"size": "2451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ptree/models/experiments.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
Disk File Interface for the Swift Object Server
The `DiskFile`, `DiskFileWriter` and `DiskFileReader` classes combined define
the on-disk abstraction layer for supporting the object server REST API
interfaces (excluding `REPLICATE`). Other implementations wishing to provide
an alternative backend for the object server must implement the three
classes. An example alternative implementation can be found in the
`mem_server.py` and `mem_diskfile.py` modules along size this one.
The `DiskFileManager` is a reference implemenation specific class and is not
part of the backend API.
The remaining methods in this module are considered implementation specific and
are also not considered part of the backend API.
"""
import six.moves.cPickle as pickle
import copy
import errno
import fcntl
import json
import os
import re
import time
import uuid
import hashlib
import logging
import traceback
import xattr
from os.path import basename, dirname, exists, join, splitext
from random import shuffle
from tempfile import mkstemp
from contextlib import contextmanager
from collections import defaultdict
from datetime import timedelta
from eventlet import Timeout
from eventlet.hubs import trampoline
import six
from pyeclib.ec_iface import ECDriverError, ECInvalidFragmentMetadata, \
ECBadFragmentChecksum, ECInvalidParameter
from swift import gettext_ as _
from swift.common.constraints import check_mount, check_dir
from swift.common.request_helpers import is_sys_meta
from swift.common.utils import mkdirs, Timestamp, \
storage_directory, hash_path, renamer, fallocate, fsync, fdatasync, \
fsync_dir, drop_buffer_cache, lock_path, write_pickle, \
config_true_value, listdir, split_path, ismount, remove_file, \
get_md5_socket, F_SETPIPE_SZ, decode_timestamps, encode_timestamps, \
tpool_reraise, MD5_OF_EMPTY_STRING, link_fd_to_path, o_tmpfile_supported, \
O_TMPFILE, makedirs_count, replace_partition_in_path
from swift.common.splice import splice, tee
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
DiskFileCollision, DiskFileNoSpace, DiskFileDeviceUnavailable, \
DiskFileDeleted, DiskFileError, DiskFileNotOpen, PathNotDir, \
ReplicationLockTimeout, DiskFileExpired, DiskFileXattrNotSupported
from swift.common.swob import multi_range_iterator
from swift.common.storage_policy import (
get_policy_string, split_policy_string, PolicyError, POLICIES,
REPL_POLICY, EC_POLICY)
from functools import partial
PICKLE_PROTOCOL = 2
DEFAULT_RECLAIM_AGE = timedelta(weeks=1).total_seconds()
HASH_FILE = 'hashes.pkl'
HASH_INVALIDATIONS_FILE = 'hashes.invalid'
METADATA_KEY = 'user.swift.metadata'
DROP_CACHE_WINDOW = 1024 * 1024
# These are system-set metadata keys that cannot be changed with a POST.
# They should be lowercase.
DATAFILE_SYSTEM_META = set('content-length deleted etag'.split())
DATADIR_BASE = 'objects'
ASYNCDIR_BASE = 'async_pending'
TMP_BASE = 'tmp'
get_data_dir = partial(get_policy_string, DATADIR_BASE)
get_async_dir = partial(get_policy_string, ASYNCDIR_BASE)
get_tmp_dir = partial(get_policy_string, TMP_BASE)
MIN_TIME_UPDATE_AUDITOR_STATUS = 60
# This matches rsync tempfiles, like ".<timestamp>.data.Xy095a"
RE_RSYNC_TEMPFILE = re.compile(r'^\..*\.([a-zA-Z0-9_]){6}$')
def _get_filename(fd):
"""
Helper function to get to file name from a file descriptor or filename.
:param fd: file descriptor or filename.
:returns: the filename.
"""
if hasattr(fd, 'name'):
# fd object
return fd.name
# fd is a filename
return fd
def _encode_metadata(metadata):
"""
UTF8 encode any unicode keys or values in given metadata dict.
:param metadata: a dict
"""
def encode_str(item):
if isinstance(item, six.text_type):
return item.encode('utf8')
return item
return dict(((encode_str(k), encode_str(v)) for k, v in metadata.items()))
def read_metadata(fd):
"""
Helper function to read the pickled metadata from an object file.
:param fd: file descriptor or filename to load the metadata from
:returns: dictionary of metadata
"""
metadata = b''
key = 0
try:
while True:
metadata += xattr.getxattr(fd, '%s%s' % (METADATA_KEY,
(key or '')))
key += 1
except (IOError, OSError) as e:
for err in 'ENOTSUP', 'EOPNOTSUPP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
msg = "Filesystem at %s does not support xattr" % \
_get_filename(fd)
logging.exception(msg)
raise DiskFileXattrNotSupported(e)
if e.errno == errno.ENOENT:
raise DiskFileNotExist()
# TODO: we might want to re-raise errors that don't denote a missing
# xattr here. Seems to be ENODATA on linux and ENOATTR on BSD/OSX.
# strings are utf-8 encoded when written, but have not always been
# (see https://bugs.launchpad.net/swift/+bug/1678018) so encode them again
# when read
return _encode_metadata(pickle.loads(metadata))
def write_metadata(fd, metadata, xattr_size=65536):
"""
Helper function to write pickled metadata for an object file.
:param fd: file descriptor or filename to write the metadata
:param metadata: metadata to write
"""
metastr = pickle.dumps(_encode_metadata(metadata), PICKLE_PROTOCOL)
key = 0
while metastr:
try:
xattr.setxattr(fd, '%s%s' % (METADATA_KEY, key or ''),
metastr[:xattr_size])
metastr = metastr[xattr_size:]
key += 1
except IOError as e:
for err in 'ENOTSUP', 'EOPNOTSUPP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
msg = "Filesystem at %s does not support xattr" % \
_get_filename(fd)
logging.exception(msg)
raise DiskFileXattrNotSupported(e)
if e.errno in (errno.ENOSPC, errno.EDQUOT):
msg = "No space left on device for %s" % _get_filename(fd)
logging.exception(msg)
raise DiskFileNoSpace()
raise
def extract_policy(obj_path):
"""
Extracts the policy for an object (based on the name of the objects
directory) given the device-relative path to the object. Returns None in
the event that the path is malformed in some way.
The device-relative path is everything after the mount point; for example:
/srv/node/d42/objects-5/30/179/
485dc017205a81df3af616d917c90179/1401811134.873649.data
would have device-relative path:
objects-5/30/179/485dc017205a81df3af616d917c90179/1401811134.873649.data
:param obj_path: device-relative path of an object, or the full path
:returns: a :class:`~swift.common.storage_policy.BaseStoragePolicy` or None
"""
try:
obj_portion = obj_path[obj_path.rindex(DATADIR_BASE):]
obj_dirname = obj_portion[:obj_portion.index('/')]
except Exception:
return None
try:
base, policy = split_policy_string(obj_dirname)
except PolicyError:
return None
return policy
def quarantine_renamer(device_path, corrupted_file_path):
"""
In the case that a file is corrupted, move it to a quarantined
area to allow replication to fix it.
:params device_path: The path to the device the corrupted file is on.
:params corrupted_file_path: The path to the file you want quarantined.
:returns: path (str) of directory the file was moved to
:raises OSError: re-raises non errno.EEXIST / errno.ENOTEMPTY
exceptions from rename
"""
policy = extract_policy(corrupted_file_path)
if policy is None:
# TODO: support a quarantine-unknown location
policy = POLICIES.legacy
from_dir = dirname(corrupted_file_path)
to_dir = join(device_path, 'quarantined',
get_data_dir(policy),
basename(from_dir))
invalidate_hash(dirname(from_dir))
try:
renamer(from_dir, to_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
to_dir = "%s-%s" % (to_dir, uuid.uuid4().hex)
renamer(from_dir, to_dir, fsync=False)
return to_dir
def read_hashes(partition_dir):
"""
Read the existing hashes.pkl
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist
"""
hashes_file = join(partition_dir, HASH_FILE)
hashes = {'valid': False}
try:
with open(hashes_file, 'rb') as hashes_fp:
pickled_hashes = hashes_fp.read()
except (IOError, OSError):
pass
else:
try:
hashes = pickle.loads(pickled_hashes)
except Exception:
# pickle.loads() can raise a wide variety of exceptions when
# given invalid input depending on the way in which the
# input is invalid.
pass
# hashes.pkl w/o valid updated key is "valid" but "forever old"
hashes.setdefault('valid', True)
hashes.setdefault('updated', -1)
return hashes
def write_hashes(partition_dir, hashes):
"""
Write hashes to hashes.pkl
The updated key is added to hashes before it is written.
"""
hashes_file = join(partition_dir, HASH_FILE)
# 'valid' key should always be set by the caller; however, if there's a bug
# setting invalid is most safe
hashes.setdefault('valid', False)
hashes['updated'] = time.time()
write_pickle(hashes, hashes_file, partition_dir, PICKLE_PROTOCOL)
def consolidate_hashes(partition_dir):
"""
Take what's in hashes.pkl and hashes.invalid, combine them, write the
result back to hashes.pkl, and clear out hashes.invalid.
:param partition_dir: absolute path to partition dir containing hashes.pkl
and hashes.invalid
:returns: a dict, the suffix hashes (if any), the key 'valid' will be False
if hashes.pkl is corrupt, cannot be read or does not exist
"""
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
with lock_path(partition_dir):
hashes = read_hashes(partition_dir)
found_invalidation_entry = False
try:
with open(invalidations_file, 'rb') as inv_fh:
for line in inv_fh:
found_invalidation_entry = True
suffix = line.strip()
hashes[suffix] = None
except (IOError, OSError) as e:
if e.errno != errno.ENOENT:
raise
if found_invalidation_entry:
write_hashes(partition_dir, hashes)
# Now that all the invalidations are reflected in hashes.pkl, it's
# safe to clear out the invalidations file.
with open(invalidations_file, 'wb') as inv_fh:
pass
return hashes
def invalidate_hash(suffix_dir):
"""
Invalidates the hash for a suffix_dir in the partition's hashes file.
:param suffix_dir: absolute path to suffix dir whose hash needs
invalidating
"""
suffix = basename(suffix_dir)
partition_dir = dirname(suffix_dir)
invalidations_file = join(partition_dir, HASH_INVALIDATIONS_FILE)
with lock_path(partition_dir):
with open(invalidations_file, 'ab') as inv_fh:
inv_fh.write(suffix + "\n")
def relink_paths(target_path, new_target_path, check_existing=False):
"""
Hard-links a file located in target_path using the second path
new_target_path. Creates intermediate directories if required.
:param target_path: current absolute filename
:param new_target_path: new absolute filename for the hardlink
:param check_existing: if True, check whether the link is already present
before attempting to create a new one
"""
if target_path != new_target_path:
logging.debug('Relinking %s to %s due to next_part_power set',
target_path, new_target_path)
new_target_dir = os.path.dirname(new_target_path)
if not os.path.isdir(new_target_dir):
os.makedirs(new_target_dir)
link_exists = False
if check_existing:
try:
new_stat = os.stat(new_target_path)
orig_stat = os.stat(target_path)
link_exists = (new_stat.st_ino == orig_stat.st_ino)
except OSError:
pass # if anything goes wrong, try anyway
if not link_exists:
os.link(target_path, new_target_path)
def get_part_path(dev_path, policy, partition):
"""
Given the device path, policy, and partition, returns the full
path to the partition
"""
return os.path.join(dev_path, get_data_dir(policy), str(partition))
class AuditLocation(object):
"""
Represents an object location to be audited.
Other than being a bucket of data, the only useful thing this does is
stringify to a filesystem path so the auditor's logs look okay.
"""
def __init__(self, path, device, partition, policy):
self.path, self.device, self.partition, self.policy = (
path, device, partition, policy)
def __str__(self):
return str(self.path)
def object_audit_location_generator(devices, mount_check=True, logger=None,
device_dirs=None, auditor_type="ALL"):
"""
Given a devices path (e.g. "/srv/node"), yield an AuditLocation for all
objects stored under that directory if device_dirs isn't set. If
device_dirs is set, only yield AuditLocation for the objects under the
entries in device_dirs. The AuditLocation only knows the path to the hash
directory, not to the .data file therein (if any). This is to avoid a
double listdir(hash_dir); the DiskFile object will always do one, so
we don't.
:param devices: parent directory of the devices to be audited
:param mount_check: flag to check if a mount check should be performed
on devices
:param logger: a logger object
:param device_dirs: a list of directories under devices to traverse
:param auditor_type: either ALL or ZBF
"""
if not device_dirs:
device_dirs = listdir(devices)
else:
# remove bogus devices and duplicates from device_dirs
device_dirs = list(
set(listdir(devices)).intersection(set(device_dirs)))
# randomize devices in case of process restart before sweep completed
shuffle(device_dirs)
for device in device_dirs:
if mount_check and not \
ismount(os.path.join(devices, device)):
if logger:
logger.debug(
_('Skipping %s as it is not mounted'), device)
continue
# loop through object dirs for all policies
device_dir = os.path.join(devices, device)
try:
dirs = os.listdir(device_dir)
except OSError as e:
if logger:
logger.debug(
_('Skipping %(dir)s: %(err)s') % {'dir': device_dir,
'err': e.strerror})
continue
for dir_ in dirs:
if not dir_.startswith(DATADIR_BASE):
continue
try:
base, policy = split_policy_string(dir_)
except PolicyError as e:
if logger:
logger.warning(_('Directory %(directory)r does not map '
'to a valid policy (%(error)s)') % {
'directory': dir_, 'error': e})
continue
datadir_path = os.path.join(devices, device, dir_)
partitions = get_auditor_status(datadir_path, logger, auditor_type)
for pos, partition in enumerate(partitions):
update_auditor_status(datadir_path, logger,
partitions[pos:], auditor_type)
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for hsh in hashes:
hsh_path = os.path.join(suff_path, hsh)
yield AuditLocation(hsh_path, device, partition,
policy)
update_auditor_status(datadir_path, logger, [], auditor_type)
def get_auditor_status(datadir_path, logger, auditor_type):
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
status = {}
try:
if six.PY3:
statusfile = open(auditor_status, encoding='utf8')
else:
statusfile = open(auditor_status, 'rb')
with statusfile:
status = statusfile.read()
except (OSError, IOError) as e:
if e.errno != errno.ENOENT and logger:
logger.warning(_('Cannot read %(auditor_status)s (%(err)s)') %
{'auditor_status': auditor_status, 'err': e})
return listdir(datadir_path)
try:
status = json.loads(status)
except ValueError as e:
logger.warning(_('Loading JSON from %(auditor_status)s failed'
' (%(err)s)') %
{'auditor_status': auditor_status, 'err': e})
return listdir(datadir_path)
return status['partitions']
def update_auditor_status(datadir_path, logger, partitions, auditor_type):
status = json.dumps({'partitions': partitions})
if six.PY3:
status = status.encode('utf8')
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
try:
mtime = os.stat(auditor_status).st_mtime
except OSError:
mtime = 0
recently_updated = (mtime + MIN_TIME_UPDATE_AUDITOR_STATUS) > time.time()
if recently_updated and len(partitions) > 0:
if logger:
logger.debug(
'Skipping the update of recently changed %s' % auditor_status)
return
try:
with open(auditor_status, "wb") as statusfile:
statusfile.write(status)
except (OSError, IOError) as e:
if logger:
logger.warning(_('Cannot write %(auditor_status)s (%(err)s)') %
{'auditor_status': auditor_status, 'err': e})
def clear_auditor_status(devices, auditor_type="ALL"):
for device in os.listdir(devices):
for dir_ in os.listdir(os.path.join(devices, device)):
if not dir_.startswith("objects"):
continue
datadir_path = os.path.join(devices, device, dir_)
auditor_status = os.path.join(
datadir_path, "auditor_status_%s.json" % auditor_type)
remove_file(auditor_status)
def strip_self(f):
"""
Wrapper to attach module level functions to base class.
"""
def wrapper(self, *args, **kwargs):
return f(*args, **kwargs)
return wrapper
class DiskFileRouter(object):
policy_type_to_manager_cls = {}
@classmethod
def register(cls, policy_type):
"""
Decorator for Storage Policy implementations to register
their DiskFile implementation.
"""
def register_wrapper(diskfile_cls):
if policy_type in cls.policy_type_to_manager_cls:
raise PolicyError(
'%r is already registered for the policy_type %r' % (
cls.policy_type_to_manager_cls[policy_type],
policy_type))
cls.policy_type_to_manager_cls[policy_type] = diskfile_cls
return diskfile_cls
return register_wrapper
def __init__(self, *args, **kwargs):
self.policy_to_manager = {}
for policy in POLICIES:
manager_cls = self.policy_type_to_manager_cls[policy.policy_type]
self.policy_to_manager[policy] = manager_cls(*args, **kwargs)
def __getitem__(self, policy):
return self.policy_to_manager[policy]
class BaseDiskFileManager(object):
"""
Management class for devices, providing common place for shared parameters
and methods not provided by the DiskFile class (which primarily services
the object server REST API layer).
The `get_diskfile()` method is how this implementation creates a `DiskFile`
object.
.. note::
This class is reference implementation specific and not part of the
pluggable on-disk backend API.
.. note::
TODO(portante): Not sure what the right name to recommend here, as
"manager" seemed generic enough, though suggestions are welcome.
:param conf: caller provided configuration object
:param logger: caller provided logger
"""
diskfile_cls = None # must be set by subclasses
invalidate_hash = strip_self(invalidate_hash)
consolidate_hashes = strip_self(consolidate_hashes)
quarantine_renamer = strip_self(quarantine_renamer)
def __init__(self, conf, logger):
self.logger = logger
self.devices = conf.get('devices', '/srv/node')
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.keep_cache_size = int(conf.get('keep_cache_size', 5242880))
self.bytes_per_sync = int(conf.get('mb_per_sync', 512)) * 1024 * 1024
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.reclaim_age = int(conf.get('reclaim_age', DEFAULT_RECLAIM_AGE))
self.replication_one_per_device = config_true_value(
conf.get('replication_one_per_device', 'true'))
self.replication_lock_timeout = int(conf.get(
'replication_lock_timeout', 15))
self.use_splice = False
self.pipe_size = None
conf_wants_splice = config_true_value(conf.get('splice', 'no'))
# If the operator wants zero-copy with splice() but we don't have the
# requisite kernel support, complain so they can go fix it.
if conf_wants_splice and not splice.available:
self.logger.warning(
"Use of splice() requested (config says \"splice = %s\"), "
"but the system does not support it. "
"splice() will not be used." % conf.get('splice'))
elif conf_wants_splice and splice.available:
try:
sockfd = get_md5_socket()
os.close(sockfd)
except IOError as err:
# AF_ALG socket support was introduced in kernel 2.6.38; on
# systems with older kernels (or custom-built kernels lacking
# AF_ALG support), we can't use zero-copy.
if err.errno != errno.EAFNOSUPPORT:
raise
self.logger.warning("MD5 sockets not supported. "
"splice() will not be used.")
else:
self.use_splice = True
with open('/proc/sys/fs/pipe-max-size') as f:
max_pipe_size = int(f.read())
self.pipe_size = min(max_pipe_size, self.disk_chunk_size)
self.use_linkat = o_tmpfile_supported()
def make_on_disk_filename(self, timestamp, ext=None,
ctype_timestamp=None, *a, **kw):
"""
Returns filename for given timestamp.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param ext: an optional string representing a file extension to be
appended to the returned file name
:param ctype_timestamp: an optional content-type timestamp, an instance
of :class:`~swift.common.utils.Timestamp`
:returns: a file name
"""
rv = timestamp.internal
if ext == '.meta' and ctype_timestamp:
# If ctype_timestamp is None then the filename is simply the
# internal form of the timestamp. If ctype_timestamp is not None
# then the difference between the raw values of the two timestamps
# is appended as a hex number, with its sign.
#
# There are two reasons for encoding the content-type timestamp
# in the filename in this way. First, it means that two .meta files
# having the same timestamp but different content-type timestamps
# (and potentially different content-type values) will be distinct
# and therefore will be independently replicated when rsync
# replication is used. That ensures that all nodes end up having
# all content-type values after replication (with the most recent
# value being selected when the diskfile is opened). Second, having
# the content-type encoded in timestamp in the filename makes it
# possible for the on disk file search code to determine that
# timestamp by inspecting only the filename, and not needing to
# open the file and read its xattrs.
rv = encode_timestamps(timestamp, ctype_timestamp, explicit=True)
if ext:
rv = '%s%s' % (rv, ext)
return rv
def parse_on_disk_filename(self, filename):
"""
Parse an on disk file name.
:param filename: the file name including extension
:returns: a dict, with keys for timestamp, ext and ctype_timestamp:
* timestamp is a :class:`~swift.common.utils.Timestamp`
* ctype_timestamp is a :class:`~swift.common.utils.Timestamp` or
None for .meta files, otherwise None
* ext is a string, the file extension including the leading dot or
the empty string if the filename has no extension.
Subclasses may override this method to add further keys to the
returned dict.
:raises DiskFileError: if any part of the filename is not able to be
validated.
"""
ts_ctype = None
fname, ext = splitext(filename)
try:
if ext == '.meta':
timestamp, ts_ctype = decode_timestamps(
fname, explicit=True)[:2]
else:
timestamp = Timestamp(fname)
except ValueError:
raise DiskFileError('Invalid Timestamp value in filename %r'
% filename)
return {
'timestamp': timestamp,
'ext': ext,
'ctype_timestamp': ts_ctype
}
def _process_ondisk_files(self, exts, results, **kwargs):
"""
Called by get_ondisk_files(). Should be over-ridden to implement
subclass specific handling of files.
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
"""
raise NotImplementedError
def _verify_ondisk_files(self, results, **kwargs):
"""
Verify that the final combination of on disk files complies with the
diskfile contract.
:param results: files that have been found and accepted
:returns: True if the file combination is compliant, False otherwise
"""
data_file, meta_file, ts_file = tuple(
[results[key]
for key in ('data_file', 'meta_file', 'ts_file')])
return ((data_file is None and meta_file is None and ts_file is None)
or (ts_file is not None and data_file is None
and meta_file is None)
or (data_file is not None and ts_file is None))
def _split_list(self, original_list, condition):
"""
Split a list into two lists. The first list contains the first N items
of the original list, in their original order, where 0 < N <=
len(original list). The second list contains the remaining items of the
original list, in their original order.
The index, N, at which the original list is split is the index of the
first item in the list that does not satisfy the given condition. Note
that the original list should be appropriately sorted if the second
list is to contain no items that satisfy the given condition.
:param original_list: the list to be split.
:param condition: a single argument function that will be used to test
for the list item to split on.
:return: a tuple of two lists.
"""
for i, item in enumerate(original_list):
if not condition(item):
return original_list[:i], original_list[i:]
return original_list, []
def _split_gt_timestamp(self, file_info_list, timestamp):
"""
Given a list of file info dicts, reverse sorted by timestamp, split the
list into two: items newer than timestamp, and items at same time or
older than timestamp.
:param file_info_list: a list of file_info dicts.
:param timestamp: a Timestamp.
:return: a tuple of two lists.
"""
return self._split_list(
file_info_list, lambda x: x['timestamp'] > timestamp)
def _split_gte_timestamp(self, file_info_list, timestamp):
"""
Given a list of file info dicts, reverse sorted by timestamp, split the
list into two: items newer than or at same time as the timestamp, and
items older than timestamp.
:param file_info_list: a list of file_info dicts.
:param timestamp: a Timestamp.
:return: a tuple of two lists.
"""
return self._split_list(
file_info_list, lambda x: x['timestamp'] >= timestamp)
def get_ondisk_files(self, files, datadir, verify=True, **kwargs):
"""
Given a simple list of files names, determine the files that constitute
a valid fileset i.e. a set of files that defines the state of an
object, and determine the files that are obsolete and could be deleted.
Note that some files may fall into neither category.
If a file is considered part of a valid fileset then its info dict will
be added to the results dict, keyed by <extension>_info. Any files that
are no longer required will have their info dicts added to a list
stored under the key 'obsolete'.
The results dict will always contain entries with keys 'ts_file',
'data_file' and 'meta_file'. Their values will be the fully qualified
path to a file of the corresponding type if there is such a file in the
valid fileset, or None.
:param files: a list of file names.
:param datadir: directory name files are from.
:param verify: if True verify that the ondisk file contract has not
been violated, otherwise do not verify.
:returns: a dict that will contain keys:
ts_file -> path to a .ts file or None
data_file -> path to a .data file or None
meta_file -> path to a .meta file or None
ctype_file -> path to a .meta file or None
and may contain keys:
ts_info -> a file info dict for a .ts file
data_info -> a file info dict for a .data file
meta_info -> a file info dict for a .meta file
ctype_info -> a file info dict for a .meta file which
contains the content-type value
unexpected -> a list of file paths for unexpected
files
possible_reclaim -> a list of file info dicts for possible
reclaimable files
obsolete -> a list of file info dicts for obsolete files
"""
# Build the exts data structure:
# exts is a dict that maps file extensions to a list of file_info
# dicts for the files having that extension. The file_info dicts are of
# the form returned by parse_on_disk_filename, with the filename added.
# Each list is sorted in reverse timestamp order.
# the results dict is used to collect results of file filtering
results = {}
# The exts dict will be modified during subsequent processing as files
# are removed to be discarded or ignored.
exts = defaultdict(list)
for afile in files:
# Categorize files by extension
try:
file_info = self.parse_on_disk_filename(afile)
file_info['filename'] = afile
exts[file_info['ext']].append(file_info)
except DiskFileError as e:
file_path = os.path.join(datadir or '', afile)
results.setdefault('unexpected', []).append(file_path)
# log warnings if it's not a rsync temp file
if RE_RSYNC_TEMPFILE.match(afile):
self.logger.debug('Rsync tempfile: %s', file_path)
else:
self.logger.warning('Unexpected file %s: %s',
file_path, e)
for ext in exts:
# For each extension sort files into reverse chronological order.
exts[ext] = sorted(
exts[ext], key=lambda info: info['timestamp'], reverse=True)
if exts.get('.ts'):
# non-tombstones older than or equal to latest tombstone are
# obsolete
for ext in filter(lambda ext: ext != '.ts', exts.keys()):
exts[ext], older = self._split_gt_timestamp(
exts[ext], exts['.ts'][0]['timestamp'])
results.setdefault('obsolete', []).extend(older)
# all but most recent .ts are obsolete
results.setdefault('obsolete', []).extend(exts['.ts'][1:])
exts['.ts'] = exts['.ts'][:1]
if exts.get('.meta'):
# retain the newest meta file
retain = 1
if exts['.meta'][1:]:
# there are other meta files so find the one with newest
# ctype_timestamp...
exts['.meta'][1:] = sorted(
exts['.meta'][1:],
key=lambda info: info['ctype_timestamp'],
reverse=True)
# ...and retain this IFF its ctype_timestamp is greater than
# newest meta file
if (exts['.meta'][1]['ctype_timestamp'] >
exts['.meta'][0]['ctype_timestamp']):
if (exts['.meta'][1]['timestamp'] ==
exts['.meta'][0]['timestamp']):
# both at same timestamp so retain only the one with
# newest ctype
exts['.meta'][:2] = [exts['.meta'][1],
exts['.meta'][0]]
retain = 1
else:
# retain both - first has newest metadata, second has
# newest ctype
retain = 2
# discard all meta files not being retained...
results.setdefault('obsolete', []).extend(exts['.meta'][retain:])
exts['.meta'] = exts['.meta'][:retain]
# delegate to subclass handler
self._process_ondisk_files(exts, results, **kwargs)
# set final choice of files
if 'data_info' in results:
if exts.get('.meta'):
# only report a meta file if a data file has been chosen
results['meta_info'] = exts['.meta'][0]
ctype_info = exts['.meta'].pop()
if (ctype_info['ctype_timestamp']
> results['data_info']['timestamp']):
results['ctype_info'] = ctype_info
elif exts.get('.ts'):
# only report a ts file if a data file has not been chosen
# (ts files will commonly already have been removed from exts if
# a data file was chosen, but that may not be the case if
# non-durable EC fragment(s) were chosen, hence the elif here)
results['ts_info'] = exts['.ts'][0]
# set ts_file, data_file, meta_file and ctype_file with path to
# chosen file or None
for info_key in ('data_info', 'meta_info', 'ts_info', 'ctype_info'):
info = results.get(info_key)
key = info_key[:-5] + '_file'
results[key] = join(datadir, info['filename']) if info else None
if verify:
assert self._verify_ondisk_files(
results, **kwargs), \
"On-disk file search algorithm contract is broken: %s" \
% str(results)
return results
def cleanup_ondisk_files(self, hsh_path, **kwargs):
"""
Clean up on-disk files that are obsolete and gather the set of valid
on-disk files for an object.
:param hsh_path: object hash path
:param frag_index: if set, search for a specific fragment index .data
file, otherwise accept the first valid .data file
:returns: a dict that may contain: valid on disk files keyed by their
filename extension; a list of obsolete files stored under the
key 'obsolete'; a list of files remaining in the directory,
reverse sorted, stored under the key 'files'.
"""
def is_reclaimable(timestamp):
return (time.time() - float(timestamp)) > self.reclaim_age
files = listdir(hsh_path)
files.sort(reverse=True)
results = self.get_ondisk_files(
files, hsh_path, verify=False, **kwargs)
if 'ts_info' in results and is_reclaimable(
results['ts_info']['timestamp']):
remove_file(join(hsh_path, results['ts_info']['filename']))
files.remove(results.pop('ts_info')['filename'])
for file_info in results.get('possible_reclaim', []):
# stray files are not deleted until reclaim-age
if is_reclaimable(file_info['timestamp']):
results.setdefault('obsolete', []).append(file_info)
for file_info in results.get('obsolete', []):
remove_file(join(hsh_path, file_info['filename']))
files.remove(file_info['filename'])
results['files'] = files
return results
def _update_suffix_hashes(self, hashes, ondisk_info):
"""
Applies policy specific updates to the given dict of md5 hashes for
the given ondisk_info.
:param hashes: a dict of md5 hashes to be updated
:param ondisk_info: a dict describing the state of ondisk files, as
returned by get_ondisk_files
"""
raise NotImplementedError
def _hash_suffix_dir(self, path):
"""
:param path: full path to directory
"""
hashes = defaultdict(hashlib.md5)
try:
path_contents = sorted(os.listdir(path))
except OSError as err:
if err.errno in (errno.ENOTDIR, errno.ENOENT):
raise PathNotDir()
raise
for hsh in path_contents:
hsh_path = join(path, hsh)
try:
ondisk_info = self.cleanup_ondisk_files(hsh_path)
except OSError as err:
if err.errno == errno.ENOTDIR:
partition_path = dirname(path)
objects_path = dirname(partition_path)
device_path = dirname(objects_path)
quar_path = quarantine_renamer(device_path, hsh_path)
logging.exception(
_('Quarantined %(hsh_path)s to %(quar_path)s because '
'it is not a directory'), {'hsh_path': hsh_path,
'quar_path': quar_path})
continue
raise
if not ondisk_info['files']:
try:
os.rmdir(hsh_path)
except OSError:
pass
continue
# ondisk_info has info dicts containing timestamps for those
# files that could determine the state of the diskfile if it were
# to be opened. We update the suffix hash with the concatenation of
# each file's timestamp and extension. The extension is added to
# guarantee distinct hash values from two object dirs that have
# different file types at the same timestamp(s).
#
# Files that may be in the object dir but would have no effect on
# the state of the diskfile are not used to update the hash.
for key in (k for k in ('meta_info', 'ts_info')
if k in ondisk_info):
info = ondisk_info[key]
hashes[None].update(info['timestamp'].internal + info['ext'])
# delegate to subclass for data file related updates...
self._update_suffix_hashes(hashes, ondisk_info)
if 'ctype_info' in ondisk_info:
# We have a distinct content-type timestamp so update the
# hash. As a precaution, append '_ctype' to differentiate this
# value from any other timestamp value that might included in
# the hash in future. There is no .ctype file so use _ctype to
# avoid any confusion.
info = ondisk_info['ctype_info']
hashes[None].update(info['ctype_timestamp'].internal
+ '_ctype')
try:
os.rmdir(path)
except OSError as e:
if e.errno == errno.ENOENT:
raise PathNotDir()
else:
# if we remove it, pretend like it wasn't there to begin with so
# that the suffix key gets removed
raise PathNotDir()
return hashes
def _hash_suffix(self, path):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param path: full path to directory
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
"""
raise NotImplementedError
def _get_hashes(self, *args, **kwargs):
hashed, hashes = self.__get_hashes(*args, **kwargs)
hashes.pop('updated', None)
hashes.pop('valid', None)
return hashed, hashes
def __get_hashes(self, device, partition, policy, recalculate=None,
do_listdir=False):
"""
Get hashes for each suffix dir in a partition. do_listdir causes it to
mistrust the hash cache for suffix existence at the (unexpectedly high)
cost of a listdir.
:param device: name of target device
:param partition: partition on the device in which the object lives
:param policy: the StoragePolicy instance
:param recalculate: list of suffixes which should be recalculated when
got
:param do_listdir: force existence check for all hashes in the
partition
:returns: tuple of (number of suffix dirs hashed, dictionary of hashes)
"""
hashed = 0
dev_path = self.get_dev_path(device)
partition_path = get_part_path(dev_path, policy, partition)
hashes_file = join(partition_path, HASH_FILE)
modified = False
orig_hashes = {'valid': False}
if recalculate is None:
recalculate = []
try:
orig_hashes = self.consolidate_hashes(partition_path)
except Exception:
self.logger.warning('Unable to read %r', hashes_file,
exc_info=True)
if not orig_hashes['valid']:
# This is the only path to a valid hashes from invalid read (e.g.
# does not exist, corrupt, etc.). Moreover, in order to write this
# valid hashes we must read *the exact same* invalid state or we'll
# trigger race detection.
do_listdir = True
hashes = {'valid': True}
# If the exception handling around consolidate_hashes fired we're
# going to do a full rehash regardless; but we need to avoid
# needless recursion if the on-disk hashes.pkl is actually readable
# (worst case is consolidate_hashes keeps raising exceptions and we
# eventually run out of stack).
# N.B. orig_hashes invalid only effects new parts and error/edge
# conditions - so try not to get overly caught up trying to
# optimize it out unless you manage to convince yourself there's a
# bad behavior.
orig_hashes = read_hashes(partition_path)
else:
hashes = copy.deepcopy(orig_hashes)
if do_listdir:
for suff in os.listdir(partition_path):
if len(suff) == 3:
hashes.setdefault(suff, None)
modified = True
self.logger.debug('Run listdir on %s', partition_path)
hashes.update((suffix, None) for suffix in recalculate)
for suffix, hash_ in hashes.items():
if not hash_:
suffix_dir = join(partition_path, suffix)
try:
hashes[suffix] = self._hash_suffix(suffix_dir)
hashed += 1
except PathNotDir:
del hashes[suffix]
except OSError:
logging.exception(_('Error hashing suffix'))
modified = True
if modified:
with lock_path(partition_path):
if read_hashes(partition_path) == orig_hashes:
write_hashes(partition_path, hashes)
return hashed, hashes
return self.__get_hashes(device, partition, policy,
recalculate=recalculate,
do_listdir=do_listdir)
else:
return hashed, hashes
def construct_dev_path(self, device):
"""
Construct the path to a device without checking if it is mounted.
:param device: name of target device
:returns: full path to the device
"""
return os.path.join(self.devices, device)
def get_dev_path(self, device, mount_check=None):
"""
Return the path to a device, first checking to see if either it
is a proper mount point, or at least a directory depending on
the mount_check configuration option.
:param device: name of target device
:param mount_check: whether or not to check mountedness of device.
Defaults to bool(self.mount_check).
:returns: full path to the device, None if the path to the device is
not a proper mount point or directory.
"""
# we'll do some kind of check unless explicitly forbidden
if mount_check is not False:
if mount_check or self.mount_check:
check = check_mount
else:
check = check_dir
if not check(self.devices, device):
return None
return os.path.join(self.devices, device)
@contextmanager
def replication_lock(self, device):
"""
A context manager that will lock on the device given, if
configured to do so.
:param device: name of target device
:raises ReplicationLockTimeout: If the lock on the device
cannot be granted within the configured timeout.
"""
if self.replication_one_per_device:
dev_path = self.get_dev_path(device)
with lock_path(
dev_path,
timeout=self.replication_lock_timeout,
timeout_class=ReplicationLockTimeout):
yield True
else:
yield True
def pickle_async_update(self, device, account, container, obj, data,
timestamp, policy):
"""
Write data describing a container update notification to a pickle file
in the async_pending directory.
:param device: name of target device
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param data: update data to be written to pickle file
:param timestamp: a Timestamp
:param policy: the StoragePolicy instance
"""
device_path = self.construct_dev_path(device)
async_dir = os.path.join(device_path, get_async_dir(policy))
tmp_dir = os.path.join(device_path, get_tmp_dir(policy))
mkdirs(tmp_dir)
ohash = hash_path(account, container, obj)
write_pickle(
data,
os.path.join(async_dir, ohash[-3:], ohash + '-' +
Timestamp(timestamp).internal),
tmp_dir)
self.logger.increment('async_pendings')
def get_diskfile(self, device, partition, account, container, obj,
policy, **kwargs):
"""
Returns a BaseDiskFile instance for an object based on the object's
partition, path parts and policy.
:param device: name of target device
:param partition: partition on device in which the object lives
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param policy: the StoragePolicy instance
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
return self.diskfile_cls(self, dev_path,
partition, account, container, obj,
policy=policy, use_splice=self.use_splice,
pipe_size=self.pipe_size,
use_linkat=self.use_linkat, **kwargs)
def object_audit_location_generator(self, device_dirs=None,
auditor_type="ALL"):
"""
Yield an AuditLocation for all objects stored under device_dirs.
:param device_dirs: directory of target device
:param auditor_type: either ALL or ZBF
"""
return object_audit_location_generator(self.devices, self.mount_check,
self.logger, device_dirs,
auditor_type)
def get_diskfile_from_audit_location(self, audit_location):
"""
Returns a BaseDiskFile instance for an object at the given
AuditLocation.
:param audit_location: object location to be audited
"""
dev_path = self.get_dev_path(audit_location.device, mount_check=False)
return self.diskfile_cls.from_hash_dir(
self, audit_location.path, dev_path,
audit_location.partition, policy=audit_location.policy)
def get_diskfile_from_hash(self, device, partition, object_hash,
policy, **kwargs):
"""
Returns a DiskFile instance for an object at the given
object_hash. Just in case someone thinks of refactoring, be
sure DiskFileDeleted is *not* raised, but the DiskFile
instance representing the tombstoned object is returned
instead.
:param device: name of target device
:param partition: partition on the device in which the object lives
:param object_hash: the hash of an object path
:param policy: the StoragePolicy instance
:raises DiskFileNotExist: if the object does not exist
:returns: an instance of BaseDiskFile
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
object_path = os.path.join(
dev_path, get_data_dir(policy), str(partition), object_hash[-3:],
object_hash)
try:
filenames = self.cleanup_ondisk_files(object_path)['files']
except OSError as err:
if err.errno == errno.ENOTDIR:
quar_path = self.quarantine_renamer(dev_path, object_path)
logging.exception(
_('Quarantined %(object_path)s to %(quar_path)s because '
'it is not a directory'), {'object_path': object_path,
'quar_path': quar_path})
raise DiskFileNotExist()
if err.errno != errno.ENOENT:
raise
raise DiskFileNotExist()
if not filenames:
raise DiskFileNotExist()
try:
metadata = read_metadata(os.path.join(object_path, filenames[-1]))
except EOFError:
raise DiskFileNotExist()
try:
account, container, obj = split_path(
metadata.get('name', ''), 3, 3, True)
except ValueError:
raise DiskFileNotExist()
return self.diskfile_cls(self, dev_path,
partition, account, container, obj,
policy=policy, **kwargs)
def get_hashes(self, device, partition, suffixes, policy):
"""
:param device: name of target device
:param partition: partition name
:param suffixes: a list of suffix directories to be recalculated
:param policy: the StoragePolicy instance
:returns: a dictionary that maps suffix directories
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = get_part_path(dev_path, policy, partition)
if not os.path.exists(partition_path):
mkdirs(partition_path)
_junk, hashes = tpool_reraise(
self._get_hashes, device, partition, policy, recalculate=suffixes)
return hashes
def _listdir(self, path):
"""
:param path: full path to directory
"""
try:
return os.listdir(path)
except OSError as err:
if err.errno != errno.ENOENT:
self.logger.error(
'ERROR: Skipping %r due to error with listdir attempt: %s',
path, err)
return []
def yield_suffixes(self, device, partition, policy):
"""
Yields tuples of (full_path, suffix_only) for suffixes stored
on the given device and partition.
:param device: name of target device
:param partition: partition name
:param policy: the StoragePolicy instance
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
partition_path = get_part_path(dev_path, policy, partition)
for suffix in self._listdir(partition_path):
if len(suffix) != 3:
continue
try:
int(suffix, 16)
except ValueError:
continue
yield (os.path.join(partition_path, suffix), suffix)
def yield_hashes(self, device, partition, policy,
suffixes=None, **kwargs):
"""
Yields tuples of (hash_only, timestamps) for object
information stored for the given device, partition, and
(optionally) suffixes. If suffixes is None, all stored
suffixes will be searched for object hashes. Note that if
suffixes is not None but empty, such as [], then nothing will
be yielded.
timestamps is a dict which may contain items mapping:
- ts_data -> timestamp of data or tombstone file,
- ts_meta -> timestamp of meta file, if one exists
- ts_ctype -> timestamp of meta file containing most recent
content-type value, if one exists
where timestamps are instances of
:class:`~swift.common.utils.Timestamp`
:param device: name of target device
:param partition: partition name
:param policy: the StoragePolicy instance
:param suffixes: optional list of suffix directories to be searched
"""
dev_path = self.get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
if suffixes is None:
suffixes = self.yield_suffixes(device, partition, policy)
else:
partition_path = get_part_path(dev_path, policy, partition)
suffixes = (
(os.path.join(partition_path, suffix), suffix)
for suffix in suffixes)
key_preference = (
('ts_meta', 'meta_info', 'timestamp'),
('ts_data', 'data_info', 'timestamp'),
('ts_data', 'ts_info', 'timestamp'),
('ts_ctype', 'ctype_info', 'ctype_timestamp'),
)
for suffix_path, suffix in suffixes:
for object_hash in self._listdir(suffix_path):
object_path = os.path.join(suffix_path, object_hash)
try:
results = self.cleanup_ondisk_files(
object_path, **kwargs)
timestamps = {}
for ts_key, info_key, info_ts_key in key_preference:
if info_key not in results:
continue
timestamps[ts_key] = results[info_key][info_ts_key]
if 'ts_data' not in timestamps:
# file sets that do not include a .data or .ts
# file cannot be opened and therefore cannot
# be ssync'd
continue
yield (object_hash, timestamps)
except AssertionError as err:
self.logger.debug('Invalid file set in %s (%s)' % (
object_path, err))
except DiskFileError as err:
self.logger.debug(
'Invalid diskfile filename in %r (%s)' % (
object_path, err))
class BaseDiskFileWriter(object):
"""
Encapsulation of the write context for servicing PUT REST API
requests. Serves as the context manager object for the
:class:`swift.obj.diskfile.DiskFile` class's
:func:`swift.obj.diskfile.DiskFile.create` method.
.. note::
It is the responsibility of the
:func:`swift.obj.diskfile.DiskFile.create` method context manager to
close the open file descriptor.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
:param name: name of object from REST API
:param datadir: on-disk directory object will end up in on
:func:`swift.obj.diskfile.DiskFileWriter.put`
:param fd: open file descriptor of temporary file to receive data
:param tmppath: full path name of the opened file descriptor
:param bytes_per_sync: number bytes written between sync calls
:param diskfile: the diskfile creating this DiskFileWriter instance
:param next_part_power: the next partition power to be used
"""
def __init__(self, name, datadir, fd, tmppath, bytes_per_sync, diskfile,
next_part_power):
# Parameter tracking
self._name = name
self._datadir = datadir
self._fd = fd
self._tmppath = tmppath
self._bytes_per_sync = bytes_per_sync
self._diskfile = diskfile
self.next_part_power = next_part_power
# Internal attributes
self._upload_size = 0
self._last_sync = 0
self._extension = '.data'
self._put_succeeded = False
@property
def manager(self):
return self._diskfile.manager
@property
def put_succeeded(self):
return self._put_succeeded
def write(self, chunk):
"""
Write a chunk of data to disk. All invocations of this method must
come before invoking the :func:
For this implementation, the data is written into a temporary file.
:param chunk: the chunk of data to write as a string object
:returns: the total number of bytes written to an object
"""
while chunk:
written = os.write(self._fd, chunk)
self._upload_size += written
chunk = chunk[written:]
# For large files sync every 512MB (by default) written
diff = self._upload_size - self._last_sync
if diff >= self._bytes_per_sync:
tpool_reraise(fdatasync, self._fd)
drop_buffer_cache(self._fd, self._last_sync, diff)
self._last_sync = self._upload_size
return self._upload_size
def _finalize_put(self, metadata, target_path, cleanup):
# Write the metadata before calling fsync() so that both data and
# metadata are flushed to disk.
write_metadata(self._fd, metadata)
# We call fsync() before calling drop_cache() to lower the amount of
# redundant work the drop cache code will perform on the pages (now
# that after fsync the pages will be all clean).
fsync(self._fd)
# From the Department of the Redundancy Department, make sure we call
# drop_cache() after fsync() to avoid redundant work (pages all
# clean).
drop_buffer_cache(self._fd, 0, self._upload_size)
self.manager.invalidate_hash(dirname(self._datadir))
# After the rename/linkat completes, this object will be available for
# requests to reference.
if self._tmppath:
# It was a named temp file created by mkstemp()
renamer(self._tmppath, target_path)
else:
# It was an unnamed temp file created by open() with O_TMPFILE
link_fd_to_path(self._fd, target_path,
self._diskfile._dirs_created)
# Check if the partition power will/has been increased
new_target_path = None
if self.next_part_power:
new_target_path = replace_partition_in_path(
target_path, self.next_part_power)
if target_path != new_target_path:
try:
fsync_dir(os.path.dirname(target_path))
relink_paths(target_path, new_target_path)
except OSError as exc:
self.manager.logger.exception(
'Relinking %s to %s failed: %s',
target_path, new_target_path, exc)
# If rename is successful, flag put as succeeded. This is done to avoid
# unnecessary os.unlink() of tempfile later. As renamer() has
# succeeded, the tempfile would no longer exist at its original path.
self._put_succeeded = True
if cleanup:
try:
self.manager.cleanup_ondisk_files(self._datadir)
except OSError:
logging.exception(_('Problem cleaning up %s'), self._datadir)
self._part_power_cleanup(target_path, new_target_path)
def _put(self, metadata, cleanup=True, *a, **kw):
"""
Helper method for subclasses.
For this implementation, this method is responsible for renaming the
temporary file to the final name and directory location. This method
should be called after the final call to
:func:`swift.obj.diskfile.DiskFileWriter.write`.
:param metadata: dictionary of metadata to be associated with the
object
:param cleanup: a Boolean. If True then obsolete files will be removed
from the object dir after the put completes, otherwise
obsolete files are left in place.
"""
timestamp = Timestamp(metadata['X-Timestamp'])
ctype_timestamp = metadata.get('Content-Type-Timestamp')
if ctype_timestamp:
ctype_timestamp = Timestamp(ctype_timestamp)
filename = self.manager.make_on_disk_filename(
timestamp, self._extension, ctype_timestamp=ctype_timestamp,
*a, **kw)
metadata['name'] = self._name
target_path = join(self._datadir, filename)
tpool_reraise(self._finalize_put, metadata, target_path, cleanup)
def put(self, metadata):
"""
Finalize writing the file on disk.
:param metadata: dictionary of metadata to be associated with the
object
"""
raise NotImplementedError
def commit(self, timestamp):
"""
Perform any operations necessary to mark the object as durable. For
replication policy type this is a no-op.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
"""
pass
def _part_power_cleanup(self, cur_path, new_path):
"""
Cleanup relative DiskFile directories.
If the partition power is increased soon or has just been increased but
the relinker didn't yet cleanup the old files, an additional cleanup of
the relative dirs has to be done. Otherwise there might be some unused
files left if a PUT or DELETE is done in the meantime
:param cur_path: current full path to an object file
:param new_path: recomputed path to an object file, based on the
next_part_power set in the ring
"""
if new_path is None:
return
# Partition power will be increased soon
if new_path != cur_path:
new_target_dir = os.path.dirname(new_path)
try:
self.manager.cleanup_ondisk_files(new_target_dir)
except OSError:
logging.exception(
_('Problem cleaning up %s'), new_target_dir)
# Partition power has been increased, cleanup not yet finished
else:
prev_part_power = int(self.next_part_power) - 1
old_target_path = replace_partition_in_path(
cur_path, prev_part_power)
old_target_dir = os.path.dirname(old_target_path)
try:
self.manager.cleanup_ondisk_files(old_target_dir)
except OSError:
logging.exception(
_('Problem cleaning up %s'), old_target_dir)
class BaseDiskFileReader(object):
"""
Encapsulation of the WSGI read context for servicing GET REST API
requests. Serves as the context manager object for the
:class:`swift.obj.diskfile.DiskFile` class's
:func:`swift.obj.diskfile.DiskFile.reader` method.
.. note::
The quarantining behavior of this method is considered implementation
specific, and is not required of the API.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
:param fp: open file object pointer reference
:param data_file: on-disk data file name for the object
:param obj_size: verified on-disk size of the object
:param etag: expected metadata etag value for entire file
:param disk_chunk_size: size of reads from disk in bytes
:param keep_cache_size: maximum object size that will be kept in cache
:param device_path: on-disk device path, used when quarantining an obj
:param logger: logger caller wants this object to use
:param quarantine_hook: 1-arg callable called w/reason when quarantined
:param use_splice: if true, use zero-copy splice() to send data
:param pipe_size: size of pipe buffer used in zero-copy operations
:param diskfile: the diskfile creating this DiskFileReader instance
:param keep_cache: should resulting reads be kept in the buffer cache
"""
def __init__(self, fp, data_file, obj_size, etag,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, diskfile,
keep_cache=False):
# Parameter tracking
self._fp = fp
self._data_file = data_file
self._obj_size = obj_size
self._etag = etag
self._diskfile = diskfile
self._disk_chunk_size = disk_chunk_size
self._device_path = device_path
self._logger = logger
self._quarantine_hook = quarantine_hook
self._use_splice = use_splice
self._pipe_size = pipe_size
if keep_cache:
# Caller suggests we keep this in cache, only do it if the
# object's size is less than the maximum.
self._keep_cache = obj_size < keep_cache_size
else:
self._keep_cache = False
# Internal Attributes
self._iter_etag = None
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._md5_of_sent_bytes = None
self._suppress_file_closing = False
self._quarantined_dir = None
@property
def manager(self):
return self._diskfile.manager
def _init_checks(self):
if self._fp.tell() == 0:
self._started_at_0 = True
self._iter_etag = hashlib.md5()
def _update_checks(self, chunk):
if self._iter_etag:
self._iter_etag.update(chunk)
def __iter__(self):
"""Returns an iterator over the data file."""
try:
dropped_cache = 0
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._init_checks()
while True:
chunk = self._fp.read(self._disk_chunk_size)
if chunk:
self._update_checks(chunk)
self._bytes_read += len(chunk)
if self._bytes_read - dropped_cache > DROP_CACHE_WINDOW:
self._drop_cache(self._fp.fileno(), dropped_cache,
self._bytes_read - dropped_cache)
dropped_cache = self._bytes_read
yield chunk
else:
self._read_to_eof = True
self._drop_cache(self._fp.fileno(), dropped_cache,
self._bytes_read - dropped_cache)
break
finally:
if not self._suppress_file_closing:
self.close()
def can_zero_copy_send(self):
return self._use_splice
def zero_copy_send(self, wsockfd):
"""
Does some magic with splice() and tee() to move stuff from disk to
network without ever touching userspace.
:param wsockfd: file descriptor (integer) of the socket out which to
send data
"""
# Note: if we ever add support for zero-copy ranged GET responses,
# we'll have to make this conditional.
self._started_at_0 = True
rfd = self._fp.fileno()
client_rpipe, client_wpipe = os.pipe()
hash_rpipe, hash_wpipe = os.pipe()
md5_sockfd = get_md5_socket()
# The actual amount allocated to the pipe may be rounded up to the
# nearest multiple of the page size. If we have the memory allocated,
# we may as well use it.
#
# Note: this will raise IOError on failure, so we don't bother
# checking the return value.
pipe_size = fcntl.fcntl(client_rpipe, F_SETPIPE_SZ, self._pipe_size)
fcntl.fcntl(hash_rpipe, F_SETPIPE_SZ, pipe_size)
dropped_cache = 0
self._bytes_read = 0
try:
while True:
# Read data from disk to pipe
(bytes_in_pipe, _1, _2) = splice(
rfd, None, client_wpipe, None, pipe_size, 0)
if bytes_in_pipe == 0:
self._read_to_eof = True
self._drop_cache(rfd, dropped_cache,
self._bytes_read - dropped_cache)
break
self._bytes_read += bytes_in_pipe
# "Copy" data from pipe A to pipe B (really just some pointer
# manipulation in the kernel, not actual copying).
bytes_copied = tee(client_rpipe, hash_wpipe, bytes_in_pipe, 0)
if bytes_copied != bytes_in_pipe:
# We teed data between two pipes of equal size, and the
# destination pipe was empty. If, somehow, the destination
# pipe was full before all the data was teed, we should
# fail here. If we don't raise an exception, then we will
# have the incorrect MD5 hash once the object has been
# sent out, causing a false-positive quarantine.
raise Exception("tee() failed: tried to move %d bytes, "
"but only moved %d" %
(bytes_in_pipe, bytes_copied))
# Take the data and feed it into an in-kernel MD5 socket. The
# MD5 socket hashes data that is written to it. Reading from
# it yields the MD5 checksum of the written data.
#
# Note that we don't have to worry about splice() returning
# None here (which happens on EWOULDBLOCK); we're splicing
# $bytes_in_pipe bytes from a pipe with exactly that many
# bytes in it, so read won't block, and we're splicing it into
# an MD5 socket, which synchronously hashes any data sent to
# it, so writing won't block either.
(hashed, _1, _2) = splice(hash_rpipe, None, md5_sockfd, None,
bytes_in_pipe, splice.SPLICE_F_MORE)
if hashed != bytes_in_pipe:
raise Exception("md5 socket didn't take all the data? "
"(tried to write %d, but wrote %d)" %
(bytes_in_pipe, hashed))
while bytes_in_pipe > 0:
try:
res = splice(client_rpipe, None, wsockfd, None,
bytes_in_pipe, 0)
bytes_in_pipe -= res[0]
except IOError as exc:
if exc.errno == errno.EWOULDBLOCK:
trampoline(wsockfd, write=True)
else:
raise
if self._bytes_read - dropped_cache > DROP_CACHE_WINDOW:
self._drop_cache(rfd, dropped_cache,
self._bytes_read - dropped_cache)
dropped_cache = self._bytes_read
finally:
# Linux MD5 sockets return '00000000000000000000000000000000' for
# the checksum if you didn't write any bytes to them, instead of
# returning the correct value.
if self._bytes_read > 0:
bin_checksum = os.read(md5_sockfd, 16)
hex_checksum = ''.join("%02x" % ord(c) for c in bin_checksum)
else:
hex_checksum = MD5_OF_EMPTY_STRING
self._md5_of_sent_bytes = hex_checksum
os.close(client_rpipe)
os.close(client_wpipe)
os.close(hash_rpipe)
os.close(hash_wpipe)
os.close(md5_sockfd)
self.close()
def app_iter_range(self, start, stop):
"""
Returns an iterator over the data file for range (start, stop)
"""
if start or start == 0:
self._fp.seek(start)
if stop is not None:
length = stop - start
else:
length = None
try:
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_ranges(self, ranges, content_type, boundary, size):
"""
Returns an iterator over the data file for a set of ranges
"""
if not ranges:
yield ''
else:
try:
self._suppress_file_closing = True
for chunk in multi_range_iterator(
ranges, content_type, boundary, size,
self.app_iter_range):
yield chunk
finally:
self._suppress_file_closing = False
self.close()
def _drop_cache(self, fd, offset, length):
"""
Method for no-oping buffer cache drop method.
:param fd: file descriptor or filename
"""
if not self._keep_cache:
drop_buffer_cache(fd, offset, length)
def _quarantine(self, msg):
self._quarantined_dir = self.manager.quarantine_renamer(
self._device_path, self._data_file)
self._logger.warning("Quarantined object %s: %s" % (
self._data_file, msg))
self._logger.increment('quarantines')
self._quarantine_hook(msg)
def _handle_close_quarantine(self):
"""Check if file needs to be quarantined"""
if self._iter_etag and not self._md5_of_sent_bytes:
self._md5_of_sent_bytes = self._iter_etag.hexdigest()
if self._bytes_read != self._obj_size:
self._quarantine(
"Bytes read: %s, does not match metadata: %s" % (
self._bytes_read, self._obj_size))
elif self._md5_of_sent_bytes and \
self._etag != self._md5_of_sent_bytes:
self._quarantine(
"ETag %s and file's md5 %s do not match" % (
self._etag, self._md5_of_sent_bytes))
def close(self):
"""
Close the open file handle if present.
For this specific implementation, this method will handle quarantining
the file if necessary.
"""
if self._fp:
try:
if self._started_at_0 and self._read_to_eof:
self._handle_close_quarantine()
except DiskFileQuarantined:
raise
except (Exception, Timeout) as e:
self._logger.error(_(
'ERROR DiskFile %(data_file)s'
' close failure: %(exc)s : %(stack)s'),
{'exc': e, 'stack': ''.join(traceback.format_stack()),
'data_file': self._data_file})
finally:
fp, self._fp = self._fp, None
fp.close()
class BaseDiskFile(object):
"""
Manage object files.
This specific implementation manages object files on a disk formatted with
a POSIX-compliant file system that supports extended attributes as
metadata on a file or directory.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
The following path format is used for data file locations:
<devices_path/<device_dir>/<datadir>/<partdir>/<suffixdir>/<hashdir>/
<datafile>.<ext>
:param mgr: associated DiskFileManager instance
:param device_path: path to the target device or drive
:param partition: partition on the device in which the object lives
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param _datadir: override the full datadir otherwise constructed here
:param policy: the StoragePolicy instance
:param use_splice: if true, use zero-copy splice() to send data
:param pipe_size: size of pipe buffer used in zero-copy operations
:param use_linkat: if True, use open() with linkat() to create obj file
:param open_expired: if True, open() will not raise a DiskFileExpired if
object is expired
:param next_part_power: the next partition power to be used
"""
reader_cls = None # must be set by subclasses
writer_cls = None # must be set by subclasses
def __init__(self, mgr, device_path, partition,
account=None, container=None, obj=None, _datadir=None,
policy=None, use_splice=False, pipe_size=None,
use_linkat=False, open_expired=False, next_part_power=None,
**kwargs):
self._manager = mgr
self._device_path = device_path
self._logger = mgr.logger
self._disk_chunk_size = mgr.disk_chunk_size
self._bytes_per_sync = mgr.bytes_per_sync
self._use_splice = use_splice
self._pipe_size = pipe_size
self._use_linkat = use_linkat
self._open_expired = open_expired
# This might look a lttle hacky i.e tracking number of newly created
# dirs to fsync only those many later. If there is a better way,
# please suggest.
# Or one could consider getting rid of doing fsyncs on dirs altogether
# and mounting XFS with the 'dirsync' mount option which should result
# in all entry fops being carried out synchronously.
self._dirs_created = 0
self.policy = policy
self.next_part_power = next_part_power
if account and container and obj:
self._name = '/' + '/'.join((account, container, obj))
self._account = account
self._container = container
self._obj = obj
else:
# gets populated when we read the metadata
self._name = None
self._account = None
self._container = None
self._obj = None
self._tmpdir = join(device_path, get_tmp_dir(policy))
self._ondisk_info = None
self._metadata = None
self._datafile_metadata = None
self._metafile_metadata = None
self._data_file = None
self._fp = None
self._quarantined_dir = None
self._content_length = None
if _datadir:
self._datadir = _datadir
else:
name_hash = hash_path(account, container, obj)
self._datadir = join(
device_path, storage_directory(get_data_dir(policy),
partition, name_hash))
@property
def manager(self):
return self._manager
@property
def account(self):
return self._account
@property
def container(self):
return self._container
@property
def obj(self):
return self._obj
@property
def content_length(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self._content_length
@property
def timestamp(self):
if self._metadata is None:
raise DiskFileNotOpen()
return Timestamp(self._metadata.get('X-Timestamp'))
@property
def data_timestamp(self):
if self._datafile_metadata is None:
raise DiskFileNotOpen()
return Timestamp(self._datafile_metadata.get('X-Timestamp'))
@property
def durable_timestamp(self):
"""
Provides the timestamp of the newest data file found in the object
directory.
:return: A Timestamp instance, or None if no data file was found.
:raises DiskFileNotOpen: if the open() method has not been previously
called on this instance.
"""
if self._ondisk_info is None:
raise DiskFileNotOpen()
if self._datafile_metadata:
return Timestamp(self._datafile_metadata.get('X-Timestamp'))
return None
@property
def fragments(self):
return None
@property
def content_type(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata.get('Content-Type')
@property
def content_type_timestamp(self):
if self._metadata is None:
raise DiskFileNotOpen()
t = self._metadata.get('Content-Type-Timestamp',
self._datafile_metadata.get('X-Timestamp'))
return Timestamp(t)
@classmethod
def from_hash_dir(cls, mgr, hash_dir_path, device_path, partition, policy):
return cls(mgr, device_path, None, partition, _datadir=hash_dir_path,
policy=policy)
def open(self):
"""
Open the object.
This implementation opens the data file representing the object, reads
the associated metadata in the extended attributes, additionally
combining metadata from fast-POST `.meta` files.
.. note::
An implementation is allowed to raise any of the following
exceptions, but is only required to raise `DiskFileNotExist` when
the object representation does not exist.
:raises DiskFileCollision: on name mis-match with metadata
:raises DiskFileNotExist: if the object does not exist
:raises DiskFileDeleted: if the object was previously deleted
:raises DiskFileQuarantined: if while reading metadata of the file
some data did pass cross checks
:returns: itself for use as a context manager
"""
# First figure out if the data directory exists
try:
files = os.listdir(self._datadir)
except OSError as err:
if err.errno == errno.ENOTDIR:
# If there's a file here instead of a directory, quarantine
# it; something's gone wrong somewhere.
raise self._quarantine(
# hack: quarantine_renamer actually renames the directory
# enclosing the filename you give it, but here we just
# want this one file and not its parent.
os.path.join(self._datadir, "made-up-filename"),
"Expected directory, found file at %s" % self._datadir)
elif err.errno != errno.ENOENT:
raise DiskFileError(
"Error listing directory %s: %s" % (self._datadir, err))
# The data directory does not exist, so the object cannot exist.
files = []
# gather info about the valid files to use to open the DiskFile
file_info = self._get_ondisk_files(files)
self._data_file = file_info.get('data_file')
if not self._data_file:
raise self._construct_exception_from_ts_file(**file_info)
self._fp = self._construct_from_data_file(**file_info)
# This method must populate the internal _metadata attribute.
self._metadata = self._metadata or {}
return self
def __enter__(self):
"""
Context enter.
.. note::
An implementation shall raise `DiskFileNotOpen` when has not
previously invoked the :func:`swift.obj.diskfile.DiskFile.open`
method.
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self
def __exit__(self, t, v, tb):
"""
Context exit.
.. note::
This method will be invoked by the object server while servicing
the REST API *before* the object has actually been read. It is the
responsibility of the implementation to properly handle that.
"""
if self._fp is not None:
fp, self._fp = self._fp, None
fp.close()
def _quarantine(self, data_file, msg):
"""
Quarantine a file; responsible for incrementing the associated logger's
count of quarantines.
:param data_file: full path of data file to quarantine
:param msg: reason for quarantining to be included in the exception
:returns: DiskFileQuarantined exception object
"""
self._quarantined_dir = self.manager.quarantine_renamer(
self._device_path, data_file)
self._logger.warning("Quarantined object %s: %s" % (
data_file, msg))
self._logger.increment('quarantines')
return DiskFileQuarantined(msg)
def _get_ondisk_files(self, files):
"""
Determine the on-disk files to use.
:param files: a list of files in the object's dir
:returns: dict of files to use having keys 'data_file', 'ts_file',
'meta_file'
"""
raise NotImplementedError
def _construct_exception_from_ts_file(self, ts_file, **kwargs):
"""
If a tombstone is present it means the object is considered
deleted. We just need to pull the metadata from the tombstone file
which has the timestamp to construct the deleted exception. If there
was no tombstone, just report it does not exist.
:param ts_file: the tombstone file name found on disk
:returns: DiskFileDeleted if the ts_file was provided, else
DiskFileNotExist
"""
if not ts_file:
exc = DiskFileNotExist()
else:
try:
metadata = self._failsafe_read_metadata(ts_file, ts_file)
except DiskFileQuarantined:
# If the tombstone's corrupted, quarantine it and pretend it
# wasn't there
exc = DiskFileNotExist()
else:
# All well and good that we have found a tombstone file, but
# we don't have a data file so we are just going to raise an
# exception that we could not find the object, providing the
# tombstone's timestamp.
exc = DiskFileDeleted(metadata=metadata)
return exc
def _verify_name_matches_hash(self, data_file):
"""
:param data_file: data file name, used when quarantines occur
"""
hash_from_fs = os.path.basename(self._datadir)
hash_from_name = hash_path(self._name.lstrip('/'))
if hash_from_fs != hash_from_name:
raise self._quarantine(
data_file,
"Hash of name in metadata does not match directory name")
def _verify_data_file(self, data_file, fp):
"""
Verify the metadata's name value matches what we think the object is
named.
:param data_file: data file name being consider, used when quarantines
occur
:param fp: open file pointer so that we can `fstat()` the file to
verify the on-disk size with Content-Length metadata value
:raises DiskFileCollision: if the metadata stored name does not match
the referenced name of the file
:raises DiskFileExpired: if the object has expired
:raises DiskFileQuarantined: if data inconsistencies were detected
between the metadata and the file-system
metadata
"""
try:
mname = self._metadata['name']
except KeyError:
raise self._quarantine(data_file, "missing name metadata")
else:
if mname != self._name:
self._logger.error(
_('Client path %(client)s does not match '
'path stored in object metadata %(meta)s'),
{'client': self._name, 'meta': mname})
raise DiskFileCollision('Client path does not match path '
'stored in object metadata')
try:
x_delete_at = int(self._metadata['X-Delete-At'])
except KeyError:
pass
except ValueError:
# Quarantine, the x-delete-at key is present but not an
# integer.
raise self._quarantine(
data_file, "bad metadata x-delete-at value %s" % (
self._metadata['X-Delete-At']))
else:
if x_delete_at <= time.time() and not self._open_expired:
raise DiskFileExpired(metadata=self._metadata)
try:
metadata_size = int(self._metadata['Content-Length'])
except KeyError:
raise self._quarantine(
data_file, "missing content-length in metadata")
except ValueError:
# Quarantine, the content-length key is present but not an
# integer.
raise self._quarantine(
data_file, "bad metadata content-length value %s" % (
self._metadata['Content-Length']))
fd = fp.fileno()
try:
statbuf = os.fstat(fd)
except OSError as err:
# Quarantine, we can't successfully stat the file.
raise self._quarantine(data_file, "not stat-able: %s" % err)
else:
obj_size = statbuf.st_size
if obj_size != metadata_size:
raise self._quarantine(
data_file, "metadata content-length %s does"
" not match actual object size %s" % (
metadata_size, statbuf.st_size))
self._content_length = obj_size
return obj_size
def _failsafe_read_metadata(self, source, quarantine_filename=None):
"""
Read metadata from source object file. In case of failure, quarantine
the file.
Takes source and filename separately so we can read from an open
file if we have one.
:param source: file descriptor or filename to load the metadata from
:param quarantine_filename: full path of file to load the metadata from
"""
try:
return read_metadata(source)
except (DiskFileXattrNotSupported, DiskFileNotExist):
raise
except Exception as err:
raise self._quarantine(
quarantine_filename,
"Exception reading metadata: %s" % err)
def _merge_content_type_metadata(self, ctype_file):
"""
When a second .meta file is providing the most recent Content-Type
metadata then merge it into the metafile_metadata.
:param ctype_file: An on-disk .meta file
"""
ctypefile_metadata = self._failsafe_read_metadata(
ctype_file, ctype_file)
if ('Content-Type' in ctypefile_metadata
and (ctypefile_metadata.get('Content-Type-Timestamp') >
self._metafile_metadata.get('Content-Type-Timestamp'))
and (ctypefile_metadata.get('Content-Type-Timestamp') >
self.data_timestamp)):
self._metafile_metadata['Content-Type'] = \
ctypefile_metadata['Content-Type']
self._metafile_metadata['Content-Type-Timestamp'] = \
ctypefile_metadata.get('Content-Type-Timestamp')
def _construct_from_data_file(self, data_file, meta_file, ctype_file,
**kwargs):
"""
Open the `.data` file to fetch its metadata, and fetch the metadata
from fast-POST `.meta` files as well if any exist, merging them
properly.
:param data_file: on-disk `.data` file being considered
:param meta_file: on-disk fast-POST `.meta` file being considered
:param ctype_file: on-disk fast-POST `.meta` file being considered that
contains content-type and content-type timestamp
:returns: an opened data file pointer
:raises DiskFileError: various exceptions from
:func:`swift.obj.diskfile.DiskFile._verify_data_file`
"""
fp = open(data_file, 'rb')
self._datafile_metadata = self._failsafe_read_metadata(fp, data_file)
self._metadata = {}
if meta_file:
self._metafile_metadata = self._failsafe_read_metadata(
meta_file, meta_file)
if ctype_file and ctype_file != meta_file:
self._merge_content_type_metadata(ctype_file)
sys_metadata = dict(
[(key, val) for key, val in self._datafile_metadata.items()
if key.lower() in DATAFILE_SYSTEM_META
or is_sys_meta('object', key)])
self._metadata.update(self._metafile_metadata)
self._metadata.update(sys_metadata)
# diskfile writer added 'name' to metafile, so remove it here
self._metafile_metadata.pop('name', None)
# TODO: the check for Content-Type is only here for tests that
# create .data files without Content-Type
if ('Content-Type' in self._datafile_metadata and
(self.data_timestamp >
self._metafile_metadata.get('Content-Type-Timestamp'))):
self._metadata['Content-Type'] = \
self._datafile_metadata['Content-Type']
self._metadata.pop('Content-Type-Timestamp', None)
else:
self._metadata.update(self._datafile_metadata)
if self._name is None:
# If we don't know our name, we were just given a hash dir at
# instantiation, so we'd better validate that the name hashes back
# to us
self._name = self._metadata['name']
self._verify_name_matches_hash(data_file)
self._verify_data_file(data_file, fp)
return fp
def get_metafile_metadata(self):
"""
Provide the metafile metadata for a previously opened object as a
dictionary. This is metadata that was written by a POST and does not
include any persistent metadata that was set by the original PUT.
:returns: object's .meta file metadata dictionary, or None if there is
no .meta file
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metafile_metadata
def get_datafile_metadata(self):
"""
Provide the datafile metadata for a previously opened object as a
dictionary. This is metadata that was included when the object was
first PUT, and does not include metadata set by any subsequent POST.
:returns: object's datafile metadata dictionary
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._datafile_metadata is None:
raise DiskFileNotOpen()
return self._datafile_metadata
def get_metadata(self):
"""
Provide the metadata for a previously opened object as a dictionary.
:returns: object's metadata dictionary
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata
def read_metadata(self):
"""
Return the metadata for an object without requiring the caller to open
the object first.
:returns: metadata dictionary for an object
:raises DiskFileError: this implementation will raise the same
errors as the `open()` method.
"""
with self.open():
return self.get_metadata()
def reader(self, keep_cache=False,
_quarantine_hook=lambda m: None):
"""
Return a :class:`swift.common.swob.Response` class compatible
"`app_iter`" object as defined by
:class:`swift.obj.diskfile.DiskFileReader`.
For this implementation, the responsibility of closing the open file
is passed to the :class:`swift.obj.diskfile.DiskFileReader` object.
:param keep_cache: caller's preference for keeping data read in the
OS buffer cache
:param _quarantine_hook: 1-arg callable called when obj quarantined;
the arg is the reason for quarantine.
Default is to ignore it.
Not needed by the REST layer.
:returns: a :class:`swift.obj.diskfile.DiskFileReader` object
"""
dr = self.reader_cls(
self._fp, self._data_file, int(self._metadata['Content-Length']),
self._metadata['ETag'], self._disk_chunk_size,
self._manager.keep_cache_size, self._device_path, self._logger,
use_splice=self._use_splice, quarantine_hook=_quarantine_hook,
pipe_size=self._pipe_size, diskfile=self, keep_cache=keep_cache)
# At this point the reader object is now responsible for closing
# the file pointer.
self._fp = None
return dr
def _get_tempfile(self):
fallback_to_mkstemp = False
tmppath = None
if self._use_linkat:
self._dirs_created = makedirs_count(self._datadir)
try:
fd = os.open(self._datadir, O_TMPFILE | os.O_WRONLY)
except OSError as err:
if err.errno in (errno.EOPNOTSUPP, errno.EISDIR, errno.EINVAL):
msg = 'open(%s, O_TMPFILE | O_WRONLY) failed: %s \
Falling back to using mkstemp()' \
% (self._datadir, os.strerror(err.errno))
self._logger.warning(msg)
fallback_to_mkstemp = True
else:
raise
if not self._use_linkat or fallback_to_mkstemp:
if not exists(self._tmpdir):
mkdirs(self._tmpdir)
fd, tmppath = mkstemp(dir=self._tmpdir)
return fd, tmppath
@contextmanager
def create(self, size=None):
"""
Context manager to create a file. We create a temporary file first, and
then return a DiskFileWriter object to encapsulate the state.
.. note::
An implementation is not required to perform on-disk
preallocations even if the parameter is specified. But if it does
and it fails, it must raise a `DiskFileNoSpace` exception.
:param size: optional initial size of file to explicitly allocate on
disk
:raises DiskFileNoSpace: if a size is specified and allocation fails
"""
try:
fd, tmppath = self._get_tempfile()
except OSError as err:
if err.errno in (errno.ENOSPC, errno.EDQUOT):
# No more inodes in filesystem
raise DiskFileNoSpace()
raise
dfw = None
try:
if size is not None and size > 0:
try:
fallocate(fd, size)
except OSError as err:
if err.errno in (errno.ENOSPC, errno.EDQUOT):
raise DiskFileNoSpace()
raise
dfw = self.writer_cls(self._name, self._datadir, fd, tmppath,
bytes_per_sync=self._bytes_per_sync,
diskfile=self,
next_part_power=self.next_part_power)
yield dfw
finally:
try:
os.close(fd)
except OSError:
pass
if (dfw is None) or (not dfw.put_succeeded):
# Try removing the temp file only if put did NOT succeed.
#
# dfw.put_succeeded is set to True after renamer() succeeds in
# DiskFileWriter._finalize_put()
try:
if tmppath:
# when mkstemp() was used
os.unlink(tmppath)
except OSError:
self._logger.exception('Error removing tempfile: %s' %
tmppath)
def write_metadata(self, metadata):
"""
Write a block of metadata to an object without requiring the caller to
create the object first. Supports fast-POST behavior semantics.
:param metadata: dictionary of metadata to be associated with the
object
:raises DiskFileError: this implementation will raise the same
errors as the `create()` method.
"""
with self.create() as writer:
writer._extension = '.meta'
writer.put(metadata)
def delete(self, timestamp):
"""
Delete the object.
This implementation creates a tombstone file using the given
timestamp, and removes any older versions of the object file. Any
file that has an older timestamp than timestamp will be deleted.
.. note::
An implementation is free to use or ignore the timestamp
parameter.
:param timestamp: timestamp to compare with each file
:raises DiskFileError: this implementation will raise the same
errors as the `create()` method.
"""
# this is dumb, only tests send in strings
timestamp = Timestamp(timestamp)
with self.create() as deleter:
deleter._extension = '.ts'
deleter.put({'X-Timestamp': timestamp.internal})
class DiskFileReader(BaseDiskFileReader):
pass
class DiskFileWriter(BaseDiskFileWriter):
def put(self, metadata):
"""
Finalize writing the file on disk.
:param metadata: dictionary of metadata to be associated with the
object
"""
super(DiskFileWriter, self)._put(metadata, True)
class DiskFile(BaseDiskFile):
reader_cls = DiskFileReader
writer_cls = DiskFileWriter
def _get_ondisk_files(self, files):
self._ondisk_info = self.manager.get_ondisk_files(files, self._datadir)
return self._ondisk_info
@DiskFileRouter.register(REPL_POLICY)
class DiskFileManager(BaseDiskFileManager):
diskfile_cls = DiskFile
def _process_ondisk_files(self, exts, results, **kwargs):
"""
Implement replication policy specific handling of .data files.
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
"""
if exts.get('.data'):
for ext in exts.keys():
if ext == '.data':
# older .data's are obsolete
exts[ext], obsolete = self._split_gte_timestamp(
exts[ext], exts['.data'][0]['timestamp'])
else:
# other files at same or older timestamp as most recent
# data are obsolete
exts[ext], obsolete = self._split_gt_timestamp(
exts[ext], exts['.data'][0]['timestamp'])
results.setdefault('obsolete', []).extend(obsolete)
# set results
results['data_info'] = exts['.data'][0]
# .meta files *may* be ready for reclaim if there is no data
if exts.get('.meta') and not exts.get('.data'):
results.setdefault('possible_reclaim', []).extend(
exts.get('.meta'))
def _update_suffix_hashes(self, hashes, ondisk_info):
"""
Applies policy specific updates to the given dict of md5 hashes for
the given ondisk_info.
:param hashes: a dict of md5 hashes to be updated
:param ondisk_info: a dict describing the state of ondisk files, as
returned by get_ondisk_files
"""
if 'data_info' in ondisk_info:
file_info = ondisk_info['data_info']
hashes[None].update(
file_info['timestamp'].internal + file_info['ext'])
def _hash_suffix(self, path):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param path: full path to directory
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
:returns: md5 of files in suffix
"""
hashes = self._hash_suffix_dir(path)
return hashes[None].hexdigest()
class ECDiskFileReader(BaseDiskFileReader):
def __init__(self, fp, data_file, obj_size, etag,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, diskfile,
keep_cache=False):
super(ECDiskFileReader, self).__init__(
fp, data_file, obj_size, etag,
disk_chunk_size, keep_cache_size, device_path, logger,
quarantine_hook, use_splice, pipe_size, diskfile, keep_cache)
self.frag_buf = None
self.frag_offset = 0
self.frag_size = self._diskfile.policy.fragment_size
def _init_checks(self):
super(ECDiskFileReader, self)._init_checks()
# for a multi-range GET this will be called at the start of each range;
# only initialise the frag_buf for reads starting at 0.
# TODO: reset frag buf to '' if tell() shows that start is on a frag
# boundary so that we check frags selected by a range not starting at 0
if self._started_at_0:
self.frag_buf = ''
else:
self.frag_buf = None
def _check_frag(self, frag):
if not frag:
return
if not isinstance(frag, six.binary_type):
# ECInvalidParameter can be returned if the frag violates the input
# format so for safety, check the input chunk if it's binary to
# avoid quarantining a valid fragment archive.
self._diskfile._logger.warn(
_('Unexpected fragment data type (not quarantined)'
'%(datadir)s: %(type)s at offset 0x%(offset)x'),
{'datadir': self._diskfile._datadir,
'type': type(frag),
'offset': self.frag_offset})
return
try:
self._diskfile.policy.pyeclib_driver.get_metadata(frag)
except (ECInvalidFragmentMetadata, ECBadFragmentChecksum,
ECInvalidParameter):
# Any of these exceptions may be returned from ECDriver with a
# corrupted fragment.
msg = 'Invalid EC metadata at offset 0x%x' % self.frag_offset
self._quarantine(msg)
# We have to terminate the response iter with an exception but it
# can't be StopIteration, this will produce a STDERR traceback in
# eventlet.wsgi if you have eventlet_debug turned on; but any
# attempt to finish the iterator cleanly won't trigger the needful
# error handling cleanup - failing to do so, and yet still failing
# to deliver all promised bytes will hang the HTTP connection
raise DiskFileQuarantined(msg)
except ECDriverError as err:
self._diskfile._logger.warn(
_('Problem checking EC fragment %(datadir)s: %(err)s'),
{'datadir': self._diskfile._datadir, 'err': err})
def _update_checks(self, chunk):
super(ECDiskFileReader, self)._update_checks(chunk)
if self.frag_buf is not None:
self.frag_buf += chunk
cursor = 0
while len(self.frag_buf) >= cursor + self.frag_size:
self._check_frag(self.frag_buf[cursor:cursor + self.frag_size])
cursor += self.frag_size
self.frag_offset += self.frag_size
if cursor:
self.frag_buf = self.frag_buf[cursor:]
def _handle_close_quarantine(self):
super(ECDiskFileReader, self)._handle_close_quarantine()
self._check_frag(self.frag_buf)
class ECDiskFileWriter(BaseDiskFileWriter):
def _finalize_durable(self, data_file_path, durable_data_file_path):
exc = None
new_data_file_path = new_durable_data_file_path = None
if self.next_part_power:
new_data_file_path = replace_partition_in_path(
data_file_path, self.next_part_power)
new_durable_data_file_path = replace_partition_in_path(
durable_data_file_path, self.next_part_power)
try:
try:
os.rename(data_file_path, durable_data_file_path)
fsync_dir(self._datadir)
if self.next_part_power and \
data_file_path != new_data_file_path:
try:
os.rename(new_data_file_path,
new_durable_data_file_path)
except OSError as exc:
self.manager.logger.exception(
'Renaming new path %s to %s failed: %s',
new_data_file_path, new_durable_data_file_path,
exc)
except (OSError, IOError) as err:
if err.errno not in (errno.ENOSPC, errno.EDQUOT):
# re-raise to catch all handler
raise
params = {'file': durable_data_file_path, 'err': err}
self.manager.logger.exception(
_('No space left on device for %(file)s (%(err)s)'),
params)
exc = DiskFileNoSpace(
'No space left on device for %(file)s (%(err)s)' % params)
else:
try:
self.manager.cleanup_ondisk_files(self._datadir)
except OSError as os_err:
self.manager.logger.exception(
_('Problem cleaning up %(datadir)s (%(err)s)'),
{'datadir': self._datadir, 'err': os_err})
self._part_power_cleanup(
durable_data_file_path, new_durable_data_file_path)
except Exception as err:
params = {'file': durable_data_file_path, 'err': err}
self.manager.logger.exception(
_('Problem making data file durable %(file)s (%(err)s)'),
params)
exc = DiskFileError(
'Problem making data file durable %(file)s (%(err)s)' % params)
if exc:
raise exc
def commit(self, timestamp):
"""
Finalize put by renaming the object data file to include a durable
marker. We do this for EC policy because it requires a 2-phase put
commit confirmation.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:raises DiskFileError: if the diskfile frag_index has not been set
(either during initialisation or a call to put())
"""
data_file_path = join(
self._datadir, self.manager.make_on_disk_filename(
timestamp, '.data', self._diskfile._frag_index))
durable_data_file_path = os.path.join(
self._datadir, self.manager.make_on_disk_filename(
timestamp, '.data', self._diskfile._frag_index, durable=True))
tpool_reraise(
self._finalize_durable, data_file_path, durable_data_file_path)
def put(self, metadata):
"""
The only difference between this method and the replication policy
DiskFileWriter method is adding the frag index to the metadata.
:param metadata: dictionary of metadata to be associated with object
"""
fi = None
cleanup = True
if self._extension == '.data':
# generally we treat the fragment index provided in metadata as
# canon, but if it's unavailable (e.g. tests) it's reasonable to
# use the frag_index provided at instantiation. Either way make
# sure that the fragment index is included in object sysmeta.
fi = metadata.setdefault('X-Object-Sysmeta-Ec-Frag-Index',
self._diskfile._frag_index)
fi = self.manager.validate_fragment_index(fi)
self._diskfile._frag_index = fi
# defer cleanup until commit() writes makes diskfile durable
cleanup = False
super(ECDiskFileWriter, self)._put(metadata, cleanup, frag_index=fi)
class ECDiskFile(BaseDiskFile):
reader_cls = ECDiskFileReader
writer_cls = ECDiskFileWriter
def __init__(self, *args, **kwargs):
super(ECDiskFile, self).__init__(*args, **kwargs)
frag_index = kwargs.get('frag_index')
self._frag_index = None
if frag_index is not None:
self._frag_index = self.manager.validate_fragment_index(frag_index)
self._frag_prefs = self._validate_frag_prefs(kwargs.get('frag_prefs'))
self._durable_frag_set = None
def _validate_frag_prefs(self, frag_prefs):
"""
Validate that frag_prefs is a list of dicts containing expected keys
'timestamp' and 'exclude'. Convert timestamp values to Timestamp
instances and validate that exclude values are valid fragment indexes.
:param frag_prefs: data to validate, should be a list of dicts.
:raise DiskFileError: if the frag_prefs data is invalid.
:return: a list of dicts with converted and validated values.
"""
# We *do* want to preserve an empty frag_prefs list because it
# indicates that a durable file is not required.
if frag_prefs is None:
return None
try:
return [
{'timestamp': Timestamp(pref['timestamp']),
'exclude': [self.manager.validate_fragment_index(fi)
for fi in pref['exclude']]}
for pref in frag_prefs]
except ValueError as e:
raise DiskFileError(
'Bad timestamp in frag_prefs: %r: %s'
% (frag_prefs, e))
except DiskFileError as e:
raise DiskFileError(
'Bad fragment index in frag_prefs: %r: %s'
% (frag_prefs, e))
except (KeyError, TypeError) as e:
raise DiskFileError(
'Bad frag_prefs: %r: %s' % (frag_prefs, e))
@property
def durable_timestamp(self):
"""
Provides the timestamp of the newest durable file found in the object
directory.
:return: A Timestamp instance, or None if no durable file was found.
:raises DiskFileNotOpen: if the open() method has not been previously
called on this instance.
"""
if self._ondisk_info is None:
raise DiskFileNotOpen()
if self._ondisk_info.get('durable_frag_set'):
return self._ondisk_info['durable_frag_set'][0]['timestamp']
return None
@property
def fragments(self):
"""
Provides information about all fragments that were found in the object
directory, including fragments without a matching durable file, and
including any fragment chosen to construct the opened diskfile.
:return: A dict mapping <Timestamp instance> -> <list of frag indexes>,
or None if the diskfile has not been opened or no fragments
were found.
"""
if self._ondisk_info:
frag_sets = self._ondisk_info['frag_sets']
return dict([(ts, [info['frag_index'] for info in frag_set])
for ts, frag_set in frag_sets.items()])
def _get_ondisk_files(self, files):
"""
The only difference between this method and the replication policy
DiskFile method is passing in the frag_index and frag_prefs kwargs to
our manager's get_ondisk_files method.
:param files: list of file names
"""
self._ondisk_info = self.manager.get_ondisk_files(
files, self._datadir, frag_index=self._frag_index,
frag_prefs=self._frag_prefs)
return self._ondisk_info
def purge(self, timestamp, frag_index):
"""
Remove a tombstone file matching the specified timestamp or
datafile matching the specified timestamp and fragment index
from the object directory.
This provides the EC reconstructor/ssync process with a way to
remove a tombstone or fragment from a handoff node after
reverting it to its primary node.
The hash will be invalidated, and if empty or invalid the
hsh_path will be removed on next cleanup_ondisk_files.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param frag_index: fragment archive index, must be
a whole number or None.
"""
purge_file = self.manager.make_on_disk_filename(
timestamp, ext='.ts')
remove_file(os.path.join(self._datadir, purge_file))
if frag_index is not None:
# data file may or may not be durable so try removing both filename
# possibilities
purge_file = self.manager.make_on_disk_filename(
timestamp, ext='.data', frag_index=frag_index)
remove_file(os.path.join(self._datadir, purge_file))
purge_file = self.manager.make_on_disk_filename(
timestamp, ext='.data', frag_index=frag_index, durable=True)
remove_file(os.path.join(self._datadir, purge_file))
self.manager.invalidate_hash(dirname(self._datadir))
@DiskFileRouter.register(EC_POLICY)
class ECDiskFileManager(BaseDiskFileManager):
diskfile_cls = ECDiskFile
def validate_fragment_index(self, frag_index):
"""
Return int representation of frag_index, or raise a DiskFileError if
frag_index is not a whole number.
:param frag_index: a fragment archive index
"""
try:
frag_index = int(str(frag_index))
except (ValueError, TypeError) as e:
raise DiskFileError(
'Bad fragment index: %s: %s' % (frag_index, e))
if frag_index < 0:
raise DiskFileError(
'Fragment index must not be negative: %s' % frag_index)
return frag_index
def make_on_disk_filename(self, timestamp, ext=None, frag_index=None,
ctype_timestamp=None, durable=False, *a, **kw):
"""
Returns the EC specific filename for given timestamp.
:param timestamp: the object timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
:param ext: an optional string representing a file extension to be
appended to the returned file name
:param frag_index: a fragment archive index, used with .data extension
only, must be a whole number.
:param ctype_timestamp: an optional content-type timestamp, an instance
of :class:`~swift.common.utils.Timestamp`
:param durable: if True then include a durable marker in data filename.
:returns: a file name
:raises DiskFileError: if ext=='.data' and the kwarg frag_index is not
a whole number
"""
if ext == '.data':
# for datafiles only we encode the fragment index in the filename
# to allow archives of different indexes to temporarily be stored
# on the same node in certain situations
frag_index = self.validate_fragment_index(frag_index)
rv = timestamp.internal + '#' + str(frag_index)
if durable:
rv += '#d'
return '%s%s' % (rv, ext)
return super(ECDiskFileManager, self).make_on_disk_filename(
timestamp, ext, ctype_timestamp, *a, **kw)
def parse_on_disk_filename(self, filename):
"""
Returns timestamp(s) and other info extracted from a policy specific
file name. For EC policy the data file name includes a fragment index
and possibly a durable marker, both of which which must be stripped off
to retrieve the timestamp.
:param filename: the file name including extension
:returns: a dict, with keys for timestamp, frag_index, durable, ext and
ctype_timestamp:
* timestamp is a :class:`~swift.common.utils.Timestamp`
* frag_index is an int or None
* ctype_timestamp is a :class:`~swift.common.utils.Timestamp` or
None for .meta files, otherwise None
* ext is a string, the file extension including the leading dot or
the empty string if the filename has no extension
* durable is a boolean that is True if the filename is a data file
that includes a durable marker
:raises DiskFileError: if any part of the filename is not able to be
validated.
"""
frag_index = None
float_frag, ext = splitext(filename)
if ext == '.data':
parts = float_frag.split('#')
try:
timestamp = Timestamp(parts[0])
except ValueError:
raise DiskFileError('Invalid Timestamp value in filename %r'
% filename)
# it is an error for an EC data file to not have a valid
# fragment index
try:
frag_index = parts[1]
except IndexError:
# expect validate_fragment_index raise DiskFileError
pass
frag_index = self.validate_fragment_index(frag_index)
try:
durable = parts[2] == 'd'
except IndexError:
durable = False
return {
'timestamp': timestamp,
'frag_index': frag_index,
'ext': ext,
'ctype_timestamp': None,
'durable': durable
}
rv = super(ECDiskFileManager, self).parse_on_disk_filename(filename)
rv['frag_index'] = None
return rv
def _process_ondisk_files(self, exts, results, frag_index=None,
frag_prefs=None, **kwargs):
"""
Implement EC policy specific handling of .data and legacy .durable
files.
If a frag_prefs keyword arg is provided then its value may determine
which fragment index at which timestamp is used to construct the
diskfile. The value of frag_prefs should be a list. Each item in the
frag_prefs list should be a dict that describes per-timestamp
preferences using the following items:
* timestamp: An instance of :class:`~swift.common.utils.Timestamp`.
* exclude: A list of valid fragment indexes (i.e. whole numbers)
that should be EXCLUDED when choosing a fragment at the
timestamp. This list may be empty.
For example::
[
{'timestamp': <Timestamp instance>, 'exclude': [1,3]},
{'timestamp': <Timestamp instance>, 'exclude': []}
]
The order of per-timestamp dicts in the frag_prefs list is significant
and indicates descending preference for fragments from each timestamp
i.e. a fragment that satisfies the first per-timestamp preference in
the frag_prefs will be preferred over a fragment that satisfies a
subsequent per-timestamp preferred, and so on.
If a timestamp is not cited in any per-timestamp preference dict then
it is assumed that any fragment index at that timestamp may be used to
construct the diskfile.
When a frag_prefs arg is provided, including an empty list, there is no
requirement for there to be a durable file at the same timestamp as a
data file that is chosen to construct the disk file
:param exts: dict of lists of file info, keyed by extension
:param results: a dict that may be updated with results
:param frag_index: if set, search for a specific fragment index .data
file, otherwise accept the first valid .data file.
:param frag_prefs: if set, search for any fragment index .data file
that satisfies the frag_prefs.
"""
durable_info = None
if exts.get('.durable'):
# in older versions, separate .durable files were used to indicate
# the durability of data files having the same timestamp
durable_info = exts['.durable'][0]
# Split the list of .data files into sets of frags having the same
# timestamp, identifying the durable and newest sets (if any) as we go.
# To do this we can take advantage of the list of .data files being
# reverse-time ordered. Keep the resulting per-timestamp frag sets in
# a frag_sets dict mapping a Timestamp instance -> frag_set.
all_frags = exts.get('.data')
frag_sets = {}
durable_frag_set = None
while all_frags:
frag_set, all_frags = self._split_gte_timestamp(
all_frags, all_frags[0]['timestamp'])
# sort the frag set into ascending frag_index order
frag_set.sort(key=lambda info: info['frag_index'])
timestamp = frag_set[0]['timestamp']
frag_sets[timestamp] = frag_set
for frag in frag_set:
# a data file marked as durable may supersede a legacy durable
# file if it is newer
if frag['durable']:
if (not durable_info or
durable_info['timestamp'] < timestamp):
# this frag defines the durable timestamp
durable_info = frag
break
if durable_info and durable_info['timestamp'] == timestamp:
durable_frag_set = frag_set
break # ignore frags that are older than durable timestamp
# Choose which frag set to use
chosen_frag_set = None
if frag_prefs is not None:
candidate_frag_sets = dict(frag_sets)
# For each per-timestamp frag preference dict, do we have any frag
# indexes at that timestamp that are not in the exclusion list for
# that timestamp? If so choose the highest of those frag_indexes.
for ts, exclude_indexes in [
(ts_pref['timestamp'], ts_pref['exclude'])
for ts_pref in frag_prefs
if ts_pref['timestamp'] in candidate_frag_sets]:
available_indexes = [info['frag_index']
for info in candidate_frag_sets[ts]]
acceptable_indexes = list(set(available_indexes) -
set(exclude_indexes))
if acceptable_indexes:
chosen_frag_set = candidate_frag_sets[ts]
# override any frag_index passed in as method param with
# the last (highest) acceptable_index
frag_index = acceptable_indexes[-1]
break
else:
# this frag_set has no acceptable frag index so
# remove it from the candidate frag_sets
candidate_frag_sets.pop(ts)
else:
# No acceptable frag index was found at any timestamp mentioned
# in the frag_prefs. Choose the newest remaining candidate
# frag_set - the proxy can decide if it wants the returned
# fragment with that time.
if candidate_frag_sets:
ts_newest = sorted(candidate_frag_sets.keys())[-1]
chosen_frag_set = candidate_frag_sets[ts_newest]
else:
chosen_frag_set = durable_frag_set
# Select a single chosen frag from the chosen frag_set, by either
# matching against a specified frag_index or taking the highest index.
chosen_frag = None
if chosen_frag_set:
if frag_index is not None:
# search the frag set to find the exact frag_index
for info in chosen_frag_set:
if info['frag_index'] == frag_index:
chosen_frag = info
break
else:
chosen_frag = chosen_frag_set[-1]
# If we successfully found a frag then set results
if chosen_frag:
results['data_info'] = chosen_frag
results['durable_frag_set'] = durable_frag_set
results['chosen_frag_set'] = chosen_frag_set
if chosen_frag_set != durable_frag_set:
# hide meta files older than data file but newer than durable
# file so they don't get marked as obsolete (we already threw
# out .meta's that are older than a .durable)
exts['.meta'], _older = self._split_gt_timestamp(
exts['.meta'], chosen_frag['timestamp'])
results['frag_sets'] = frag_sets
# Mark everything older than most recent durable data as obsolete
# and remove from the exts dict.
if durable_info:
for ext in exts.keys():
exts[ext], older = self._split_gte_timestamp(
exts[ext], durable_info['timestamp'])
results.setdefault('obsolete', []).extend(older)
# Mark any isolated legacy .durable as obsolete
if exts.get('.durable') and not durable_frag_set:
results.setdefault('obsolete', []).extend(exts['.durable'])
exts.pop('.durable')
# Fragments *may* be ready for reclaim, unless they are durable
for frag_set in frag_sets.values():
if frag_set in (durable_frag_set, chosen_frag_set):
continue
results.setdefault('possible_reclaim', []).extend(frag_set)
# .meta files *may* be ready for reclaim if there is no durable data
if exts.get('.meta') and not durable_frag_set:
results.setdefault('possible_reclaim', []).extend(
exts.get('.meta'))
def _verify_ondisk_files(self, results, frag_index=None,
frag_prefs=None, **kwargs):
"""
Verify that the final combination of on disk files complies with the
erasure-coded diskfile contract.
:param results: files that have been found and accepted
:param frag_index: specifies a specific fragment index .data file
:param frag_prefs: if set, indicates that fragment preferences have
been specified and therefore that a selected fragment is not
required to be durable.
:returns: True if the file combination is compliant, False otherwise
"""
if super(ECDiskFileManager, self)._verify_ondisk_files(
results, **kwargs):
have_data_file = results['data_file'] is not None
have_durable = (results.get('durable_frag_set') is not None or
(have_data_file and frag_prefs is not None))
return have_data_file == have_durable
return False
def _update_suffix_hashes(self, hashes, ondisk_info):
"""
Applies policy specific updates to the given dict of md5 hashes for
the given ondisk_info.
The only difference between this method and the replication policy
function is the way that data files update hashes dict. Instead of all
filenames hashed into a single hasher, each data file name will fall
into a bucket keyed by its fragment index.
:param hashes: a dict of md5 hashes to be updated
:param ondisk_info: a dict describing the state of ondisk files, as
returned by get_ondisk_files
"""
for frag_set in ondisk_info['frag_sets'].values():
for file_info in frag_set:
fi = file_info['frag_index']
hashes[fi].update(file_info['timestamp'].internal)
if 'durable_frag_set' in ondisk_info:
# The durable_frag_set may be indicated by a legacy
# <timestamp>.durable file or by a durable <timestamp>#fi#d.data
# file. Either way we update hashes[None] with the string
# <timestamp>.durable which is a consistent representation of the
# abstract state of the object regardless of the actual file set.
# That way if we use a local combination of a legacy t1.durable and
# t1#0.data to reconstruct a remote t1#0#d.data then, when next
# hashed, the local and remote will make identical updates to their
# suffix hashes.
file_info = ondisk_info['durable_frag_set'][0]
hashes[None].update(file_info['timestamp'].internal + '.durable')
def _hash_suffix(self, path):
"""
Performs reclamation and returns an md5 of all (remaining) files.
:param path: full path to directory
:raises PathNotDir: if given path is not a valid directory
:raises OSError: for non-ENOTDIR errors
:returns: dict of md5 hex digests
"""
# hash_per_fi instead of single hash for whole suffix
# here we flatten out the hashers hexdigest into a dictionary instead
# of just returning the one hexdigest for the whole suffix
hash_per_fi = self._hash_suffix_dir(path)
return dict((fi, md5.hexdigest()) for fi, md5 in hash_per_fi.items())
|
{
"content_hash": "84d4e665f9984137b5f1a27c83eafeb9",
"timestamp": "",
"source": "github",
"line_count": 3348,
"max_line_length": 79,
"avg_line_length": 41.75776583034648,
"alnum_prop": 0.5781266764421873,
"repo_name": "notmyname/swift",
"id": "362b3a3f34084c550bc9f2fe2c5e5cba049f05c6",
"size": "140400",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "swift/obj/diskfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "248"
},
{
"name": "PHP",
"bytes": "377"
},
{
"name": "Python",
"bytes": "8556180"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
}
|
"""
Brute force contiguity builders used for testing results of parallel
algorithms
"""
_author_ = "Serge Rey <sjsrey@gmail.com>"
import pysal as ps
import numpy as np
from itertools import combinations
#sf = ps.open(ps.examples.get_path("nat.shp"))
#sf = ps.open(ps.examples.get_path("columbus.shp"))
#sf = ps.open(ps.examples.get_path("sids2.shp"))
#bb = sf.bbox
def bf_contiguity(shps, wttype = "QUEEN"):
"""
Brute force contiguity builder
Arguments
---------
shps: list of pysal.cg.Polygon shapes
wttype: string
contiguity type
Returns
-------
neighbors: dict
key is id, value is list of neighboring ids
"""
neighbors = {}
if wttype.upper() == "QUEEN":
vertices = {}
for i, shp in enumerate(shps):
si = set([i])
for vertex in shp.vertices:
if vertex not in vertices:
vertices[vertex] = set()
vertices[vertex] = vertices[vertex].union(si)
for vertex in vertices:
pairs = combinations(vertices[vertex], 2)
for l,r in pairs:
if l not in neighbors:
neighbors[l] = set()
if r not in neighbors:
neighbors[r] = set()
neighbors[l] = neighbors[l].union([r])
neighbors[r] = neighbors[r].union([l])
return neighbors
elif wttype.upper() == 'ROOK':
edges = {}
neighbors = {}
for i, shp in enumerate(shps):
neighbors[i] = set()
nv = len(shp.vertices)
for o in range(nv-1):
d = o + 1
edge = [shp.vertices[o], shp.vertices[d]]
edge.sort()
edge = tuple(edge)
if edge not in edges:
edges[edge] = set()
edges[edge] = edges[edge].union([i])
checked = {}
for edge in edges:
pairs = combinations(edges[edge], 2)
for pair in pairs:
l,r = pair
if pair not in checked and (r,l) not in checked:
neighbors[l] = neighbors[l].union([r])
neighbors[r] = neighbors[r].union([l])
checked[pair] = pair
checked[(r,l)] = (r,l)
return neighbors
else:
print "Weight type not supported: ", wttype
def qf_shapefile(sf):
shps = []
f = ps.open(sf)
for shp in f:
shps.append(shp)
f.close()
return bf_contiguity(shps, wttype = 'QUEEN')
def rf_shapefile(sf):
shps = []
f = ps.open(sf)
for shp in f:
shps.append(shp)
f.close()
return bf_contiguity(shps, wttype = 'ROOK')
if __name__ == '__main__':
sf = ps.examples.get_path("columbus.shp")
queen_col = qf_shapefile(sf)
rook_col = rf_shapefile(sf)
wrc = ps.W(rook_col)
print wrc.histogram
import time
sf = ps.examples.get_path("NAT.shp")
t1 = time.time()
queen = qf_shapefile(sf)
wq = ps.W(queen)
t2 = time.time()
print "National queen: ", t2-t1
sf = ps.examples.get_path("NAT.shp")
t1 = time.time()
rook = rf_shapefile(sf)
wr = ps.W(rook)
t2 = time.time()
print "National rook: ", t2-t1
t1 = time.time()
wrps = ps.rook_from_shapefile(sf)
t2 = time.time()
print "PySAL rook: ", t2-t1
t1 = time.time()
wqps = ps.queen_from_shapefile(sf)
t2 = time.time()
print "PySAL queen: ", t2-t1
|
{
"content_hash": "7d68101306237e96b273ec2f83eb0ea3",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 68,
"avg_line_length": 26.772727272727273,
"alnum_prop": 0.5206564799094511,
"repo_name": "pysal/pPysal",
"id": "7071998d51f3a55637952b543d21bf3f0a6ca073",
"size": "3535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weights/bf_tester.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2559"
},
{
"name": "FORTRAN",
"bytes": "241"
},
{
"name": "HTML",
"bytes": "42412"
},
{
"name": "Python",
"bytes": "429423"
},
{
"name": "Shell",
"bytes": "2658"
}
],
"symlink_target": ""
}
|
import itk
from sys import argv
itk.auto_progress(2)
reader = itk.ImageFileReader.IUC2.New(FileName=argv[1])
filter = itk.SmoothingRecursiveGaussianImageFilter.New(
reader,
Sigma=eval(argv[3]))
itk.imwrite(filter, argv[2])
|
{
"content_hash": "c513ae0c79534ee18144178cb3ee5138",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 55,
"avg_line_length": 25.77777777777778,
"alnum_prop": 0.7586206896551724,
"repo_name": "malaterre/ITK",
"id": "da956f812111beedf6ade22bfd1392234eb411f7",
"size": "1057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Modules/Filtering/AnisotropicSmoothing/wrapping/test/SmoothingRecursiveGaussianImageFilterTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "435417"
},
{
"name": "C++",
"bytes": "34591024"
},
{
"name": "CMake",
"bytes": "1452219"
},
{
"name": "CSS",
"bytes": "17428"
},
{
"name": "HTML",
"bytes": "8263"
},
{
"name": "Java",
"bytes": "28585"
},
{
"name": "JavaScript",
"bytes": "1522"
},
{
"name": "Objective-C++",
"bytes": "5773"
},
{
"name": "Perl",
"bytes": "6029"
},
{
"name": "Python",
"bytes": "448031"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "Shell",
"bytes": "162676"
},
{
"name": "Tcl",
"bytes": "72988"
},
{
"name": "XSLT",
"bytes": "8634"
}
],
"symlink_target": ""
}
|
r"""
For usage and a list of options, try this:
$ python wpadmin.py -h
This program lives here:
https://github.com/raulchacon/wpadmin.py
"""
__version__ = '0.1.1'
import os
from functools import wraps
class NotInWordPressRootError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def wp_root(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if (os.path.isdir(os.path.join(args[0].project_root, 'wp-content',
'themes')) and
os.path.isdir(os.path.join(args[0].project_root, 'wp-content',
'plugins'))):
return f(*args, **kwargs)
raise NotInWordPressRootError('You must run this script from \
the WordPress root folder.')
return decorated_function
@wp_root
def starttheme(args):
"""Creates theme folder with the following empty files/folders:
index.php, style.css, images/, css/ and js/. If with_timber is
True then it additionally creates views/base.twig
"""
# Create Theme folder
theme_root = os.path.join(args.project_root, 'wp-content', 'themes',
args.name)
os.makedirs(theme_root)
# Create files
theme_files = [
'404.php',
'functions.php',
'index.php',
'style.css',
'README.md',
'.gitignore'
]
for f in theme_files:
fh = open(os.path.join(theme_root, f), 'w')
if '.php' in f:
fh.write("<?php\n\n")
fh.close()
if args.classic:
static_dir = theme_root
else:
# Create a static sub directory
static_dir = os.path.join(theme_root, 'static')
os.makedirs(static_dir)
# Change default twig directory from "views" to "templates"
functionsphp = open(os.path.join(theme_root, 'functions.php'), 'a')
functionsphp.write("Timber::$dirname = 'templates';\n")
functionsphp.close()
twig_templates = os.path.join(theme_root, 'templates')
os.makedirs(os.path.join(twig_templates))
open(os.path.join(twig_templates, 'base.twig'), 'a').close()
# Create sub directories
for d in ['images', 'css', 'js']:
os.makedirs(os.path.join(static_dir, d))
@wp_root
def startplugin(args):
"""Creates plugin folder with a php file of the same name."""
plugin_root = os.path.join(
args.project_root,
'wp-content',
'plugins',
args.name
)
os.makedirs(plugin_root)
open(os.path.join(plugin_root, 'README.md'), 'a').close()
open(os.path.join(plugin_root, '.gitignore'), 'a').close()
with open(os.path.join(plugin_root, args.name + '.php'), 'w') as f:
f.write("<?php\n\n")
def _main():
"""Parse options and execute wpadmin commands"""
import argparse
# Create top level parser
parser = argparse.ArgumentParser(description="Create WordPress \
theme/plugin skeleton")
subparsers = parser.add_subparsers()
# Create the parser for the "starttheme" command
parser_starttheme = subparsers.add_parser('starttheme')
parser_starttheme.add_argument('name')
parser_starttheme.add_argument("-c", "--classic", help="create classic theme \
skeleton", action="store_true")
parser_starttheme.set_defaults(project_root=os.getcwd())
parser_starttheme.set_defaults(func=starttheme)
# Create the parser for the "startplugin" command
parser_startplugin = subparsers.add_parser('startplugin')
parser_startplugin.add_argument('name')
parser_startplugin.set_defaults(project_root=os.getcwd())
parser_startplugin.set_defaults(func=startplugin)
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
_main()
|
{
"content_hash": "1c74f90d6d1e51afefd9d5082f026f38",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 82,
"avg_line_length": 29.641221374045802,
"alnum_prop": 0.603399433427762,
"repo_name": "raulchacon/wpadmin.py",
"id": "690ec52d0400f86a697595c661930ad3bcf635da",
"size": "5095",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wpadmin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13203"
}
],
"symlink_target": ""
}
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_SGIS_generate_mipmap'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_SGIS_generate_mipmap',error_checker=_errors._error_checker)
GL_GENERATE_MIPMAP_HINT_SGIS=_C('GL_GENERATE_MIPMAP_HINT_SGIS',0x8192)
GL_GENERATE_MIPMAP_SGIS=_C('GL_GENERATE_MIPMAP_SGIS',0x8191)
|
{
"content_hash": "e1c5d54734932a9c7dfb65bcda643002",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 117,
"avg_line_length": 39.625,
"alnum_prop": 0.7555205047318612,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "3615c9599622e57788dc8e9336cc9464c27fce48",
"size": "634",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GL/SGIS/generate_mipmap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
import scrapy
import astatsscraper.parsing
import astatsscraper.pipelines
# Info on spiders with args:
# http://doc.scrapy.org/en/latest/topics/spiders.html#spider-arguments
A_STATS_OWNED_GAMES_URL_BASE = 'https://astats.astats.nl/astats/User_Games.php?SPL=0&CTO=0&Limit=0&ToPlay=0&PerfectOnly=0&Hidden=0&AchievementsOnly=0&DisplayType=2>F=0&SteamID64='
class OwnedGameIdsSpider(scrapy.Spider):
name = 'OwnedGamesSpider'
pipeline = [astatsscraper.pipelines.AppOwnerPipeline]
def __init__(self, steam_id, *args, **kwargs):
super(OwnedGameIdsSpider, self).__init__(*args, **kwargs)
self.start_urls = [A_STATS_OWNED_GAMES_URL_BASE + str(steam_id)]
def parse(self, response):
return astatsscraper.parsing.parse_owned_games_for_apps(response)
|
{
"content_hash": "a92f853af7b50f9f549afb6f394d47be",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 181,
"avg_line_length": 37.476190476190474,
"alnum_prop": 0.7331639135959339,
"repo_name": "SingingTree/AStatsScraper",
"id": "704c46de1c7aa7b47dae405d379c741c16679be3",
"size": "787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "astatsscraper/spiders/ownedgamesspider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "88"
},
{
"name": "HTML",
"bytes": "4922"
},
{
"name": "Python",
"bytes": "21636"
}
],
"symlink_target": ""
}
|
import logging
logging.basicConfig(level=logging.ERROR)
from pyblog.storage.qiniu_storage import QiniuStorageAdapter
from pyblog.storage.storage_abstract import StorageAbstractAdapter
from collections import deque
class NoStorageDriverError(Exception):
pass
class StorageDriverFactory(object):
__drivers = {'qiniu': QiniuStorageAdapter}
def __new__(cls, *args, **kw):
assert isinstance(args[0], str)
assert isinstance(args[1], dict)
cls.__driver = args[0]
cls.__config = args[1]
return cls._resolve_storage_driver(cls.__driver, cls.__config)
@classmethod
def register(self, driver_name, driver_class):
assert isinstance(driver_name, str)
assert isinstance(
driver_class, StorageAbstractAdapter), "driver class extends StorageAbstractAdapter"
cls.__drivers[driver_name] = driver_class
@classmethod
def _resolve_storage_driver(cls, driver_name, config):
if driver_name in cls.__drivers:
return cls.__drivers[driver_name](config)
return None
class StorageCacheFactory(object):
def __init__(self, config, min_cache=2, max_cache=4):
assert isinstance(config, dict)
assert isinstance(min_cache, int)
assert isinstance(max_cache, int)
self.__config = config
self.__min_cache = min_cache
self.__max_cache = max_cache
self.__cache_size = 0
self.__storage_cache = deque()
self.__driver_name = self.__config.get("driver").lower()
if not hasattr(self, 'get_%s_storage' % self.__driver_name):
raise NoStorageDriverError(self.__driver_name)
for i in range(0, self.__min_cache):
self.__storage_cache.append(
getattr(self, 'get_%s_storage' % self.__driver_name)(self.__config))
self.__cache_size += 1
def get_qiniu_storage(self, config):
return QiniuStorageAdapter(config)
def get_storage(self):
return self._get_storage()
def _get_storage(self):
self._check_cache()
instance = self.__storage_cache.popleft()
self.__cache_size -= 1
return instance
def _check_cache(self):
if self.__cache_size < self.__min_cache:
cache_size = self.__cache_size
for i in range(0, (self.__max_cache - cache_size) // 2):
self.__storage_cache.append(
getattr(self, 'get_%s_storage' % self.__driver_name)(self.__config))
self.__cache_size += 1
def _add_to_cache(self, storage_instance):
if not isinstance(storage_instance, StorageAbstractAdapter):
return False
if self.__cache_size >= self.__max_cache:
return False
self.__storage_cache.append(storage_instance)
self.__cache_size += 1
return True
def cache(self, storage_instance):
return self._add_to_cache(storage_instance)
if __name__ == '__main__':
pass
|
{
"content_hash": "94b74e88842953a173cab25ebbffbaa0",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 96,
"avg_line_length": 34.03409090909091,
"alnum_prop": 0.6143572621035058,
"repo_name": "free-free/pyblog",
"id": "5c2ab547c70a6ef7e475a35cbbf66248d590cdf6",
"size": "3017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyblog/storage/storage_factory.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "137485"
},
{
"name": "HTML",
"bytes": "44683"
},
{
"name": "JavaScript",
"bytes": "79158"
},
{
"name": "PHP",
"bytes": "8877"
},
{
"name": "Python",
"bytes": "187239"
}
],
"symlink_target": ""
}
|
import sys
import os
import shlex
from pyhaystack import info
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
]
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pyhaystack"
copyright = info.__copyright__
author = info.__author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = info.__version__
# The full version, including alpha/beta/rc tags.
release = info.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pyhaystackdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"pyhaystack.tex",
"pyhaystack Documentation",
"Christian Tremblay, P.Eng.",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pyhaystack", "pyhaystack Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pyhaystack",
"pyhaystack Documentation",
author,
"pyhaystack",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
|
{
"content_hash": "e2764830b9ec9b785c765f33b208b521",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 81,
"avg_line_length": 31.754874651810585,
"alnum_prop": 0.6952631578947368,
"repo_name": "ChristianTremblay/pyhaystack",
"id": "cce2ebb70af535d81b0afb61288f7068bebfc4f5",
"size": "11846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "209884"
},
{
"name": "Shell",
"bytes": "721"
}
],
"symlink_target": ""
}
|
"""Test the allgather API on a distributed Ray cluster."""
import pytest
import ray
import cupy as cp
import torch
from ray.util.collective.tests.util import (
create_collective_multigpu_workers,
init_tensors_for_gather_scatter_multigpu,
)
@pytest.mark.parametrize("tensor_backend", ["cupy", "torch"])
@pytest.mark.parametrize(
"array_size", [2, 2**5, 2**10, 2**15, 2**20, [2, 2], [5, 5, 5]]
)
def test_allgather_different_array_size(
ray_start_distributed_multigpu_2_nodes_4_gpus, array_size, tensor_backend
):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
actors, _ = create_collective_multigpu_workers(world_size)
init_tensors_for_gather_scatter_multigpu(
actors, array_size=array_size, tensor_backend=tensor_backend
)
results = ray.get([a.do_allgather_multigpu.remote() for a in actors])
for i in range(world_size):
for j in range(num_gpu_per_worker):
for k in range(actual_world_size):
if tensor_backend == "cupy":
assert (
results[i][j][k] == cp.ones(array_size, dtype=cp.float32)
).all()
else:
assert (
results[i][j][k]
== torch.ones(array_size, dtype=torch.float32).cuda(j)
).all()
def test_allgather_torch_cupy(ray_start_distributed_multigpu_2_nodes_4_gpus):
world_size = 2
num_gpu_per_worker = 2
actual_world_size = world_size * num_gpu_per_worker
shape = [10, 10]
actors, _ = create_collective_multigpu_workers(world_size)
# tensor is pytorch, list is cupy
for i, a in enumerate(actors):
ray.get(
[a.set_buffer.remote(shape, tensor_type0="torch", tensor_type1="torch")]
)
ray.get(
[a.set_list_buffer.remote(shape, tensor_type0="cupy", tensor_type1="cupy")]
)
results = ray.get([a.do_allgather_multigpu.remote() for a in actors])
for i in range(world_size):
for j in range(num_gpu_per_worker):
for k in range(actual_world_size):
assert (results[i][j][k] == cp.ones(shape, dtype=cp.float32)).all()
# tensor is cupy, list is pytorch
for i, a in enumerate(actors):
ray.get([a.set_buffer.remote(shape, tensor_type0="cupy", tensor_type1="cupy")])
ray.get(
[
a.set_list_buffer.remote(
shape, tensor_type0="torch", tensor_type1="torch"
)
]
)
results = ray.get([a.do_allgather_multigpu.remote() for a in actors])
for i in range(world_size):
for j in range(num_gpu_per_worker):
for k in range(actual_world_size):
assert (
results[i][j][k] == torch.ones(shape, dtype=torch.float32).cuda(j)
).all()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
|
{
"content_hash": "d933d144c91ee4ce4ee24dc1d006878d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 87,
"avg_line_length": 34.93103448275862,
"alnum_prop": 0.5725567620927937,
"repo_name": "ray-project/ray",
"id": "74ea2ebc11df2f7668d906f66130e8d9fe664e33",
"size": "3039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/util/collective/tests/distributed_multigpu_tests/test_distributed_multigpu_allgather.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import matplotlib.pyplot as plt
N = 4
D = 2
# XOR
X = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
])
T = np.array([0, 1, 1, 0])
# add a column of ones
ones = np.ones((N, 1))
# add a column of xy = x*y
Xb = np.concatenate((ones, X), axis=1)
# randomly initialize the weights
w = np.random.randn(D + 1)
# calculate the model output
z = Xb.dot(w)
def sigmoid(z):
return 1/(1 + np.exp(-z))
Y = sigmoid(z)
# calculate the cross-entropy error
def cross_entropy(T, Y):
return -(T*np.log(Y) + (1-T)*np.log(1-Y)).sum()
# let's do gradient descent 100 times
learning_rate = 0.001
error = []
w_mags = []
for i in range(100000):
e = cross_entropy(T, Y)
error.append(e)
if i % 1000 == 0:
print(e)
# gradient descent weight udpate with regularization
w += learning_rate * Xb.T.dot(T - Y)
w_mags.append(w.dot(w))
# recalculate Y
Y = sigmoid(Xb.dot(w))
plt.plot(error)
plt.title("Cross-entropy per iteration")
plt.show()
plt.plot(w_mags)
plt.title("w^2 magnitudes")
plt.show()
print("Final w:", w)
print("Final classification rate:", 1 - np.abs(T - np.round(Y)).sum() / N)
|
{
"content_hash": "0f0888e5eb001dd2139bba911a1752e9",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 74,
"avg_line_length": 18.291666666666668,
"alnum_prop": 0.6203492786636294,
"repo_name": "balazssimon/ml-playground",
"id": "ca39f0b525d974ede3379f6a4892d2d17b18e860",
"size": "1565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "udemy/lazyprogrammer/logistic-regression-python/bad_xor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "468040"
},
{
"name": "Python",
"bytes": "446476"
},
{
"name": "R",
"bytes": "60424"
}
],
"symlink_target": ""
}
|
"""
Usage:
cm-tasks menu
cm-tasks queue
cm-tasks start
cm-tasks stop
cm-tasks halt
cm-tasks kill
"""
from docopt import docopt
import os
import sys
from cloudmesh.util.menu import ascii_menu
def hallo():
print "hallo"
def celery_start():
os.system("celery worker --concurrency=10 --app=cloudmesh_task -l info")
def celery_worker_kill():
os.system(
"ps auxww | grep 'celery worker' | awk '{print $$2}' | xargs kill -9")
def celery_worker_halt():
os.system(
"ps auxww | grep 'celery worker' | awk '{print $$2}' | xargs kill -9")
def rabbit_start():
os.system("sudo rabbitmq-server -detached")
def rabbit_stop():
os.system("sudo rabbitmqctl stop")
def menu():
ascii_menu("Queue Management",
[('start rabitmq', rabbit_start),
('start celery', celery_start)
])
def main(arguments):
if arguments["menu"]:
menu()
elif arguments["queue"]:
rabbit_start()
elif arguments["start"]:
celery_start()
elif arguments["stop"]:
rabbit_stop()
elif arguments["kill"]:
celery_worker_kill()
elif arguments["halt"]:
celery_worker_halt()
if __name__ == '__main__':
arguments = docopt(__doc__)
main(arguments)
|
{
"content_hash": "623e1614b74a00945646fc22e9c76d3c",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 78,
"avg_line_length": 18.26027397260274,
"alnum_prop": 0.5723930982745686,
"repo_name": "rajpushkar83/cloudmesh",
"id": "d39db77319c57de846ac4fe0405b42b03ac4aac8",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudmesh_examples/example_1/cm-tasks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "390396"
},
{
"name": "HTML",
"bytes": "4158355"
},
{
"name": "Java",
"bytes": "369"
},
{
"name": "JavaScript",
"bytes": "2803977"
},
{
"name": "Makefile",
"bytes": "7572"
},
{
"name": "PHP",
"bytes": "183557"
},
{
"name": "Python",
"bytes": "1736957"
},
{
"name": "Ruby",
"bytes": "10670"
},
{
"name": "Shell",
"bytes": "32263"
}
],
"symlink_target": ""
}
|
import sys
import warnings
import functools
import operator
import numpy as np
from numpy.core._multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_warns,
HAS_REFCOUNT,
)
class TestIndexing:
def test_index_no_floats(self):
a = np.array([[[5]]])
assert_raises(IndexError, lambda: a[0.0])
assert_raises(IndexError, lambda: a[0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0])
assert_raises(IndexError, lambda: a[0.0,:])
assert_raises(IndexError, lambda: a[:, 0.0])
assert_raises(IndexError, lambda: a[:, 0.0,:])
assert_raises(IndexError, lambda: a[0.0,:,:])
assert_raises(IndexError, lambda: a[0, 0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0, 0])
assert_raises(IndexError, lambda: a[0, 0.0, 0])
assert_raises(IndexError, lambda: a[-1.4])
assert_raises(IndexError, lambda: a[0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0])
assert_raises(IndexError, lambda: a[-1.4,:])
assert_raises(IndexError, lambda: a[:, -1.4])
assert_raises(IndexError, lambda: a[:, -1.4,:])
assert_raises(IndexError, lambda: a[-1.4,:,:])
assert_raises(IndexError, lambda: a[0, 0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0, 0])
assert_raises(IndexError, lambda: a[0, -1.4, 0])
assert_raises(IndexError, lambda: a[0.0:, 0.0])
assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
def test_slicing_no_floats(self):
a = np.array([[5]])
# start as float.
assert_raises(TypeError, lambda: a[0.0:])
assert_raises(TypeError, lambda: a[0:, 0.0:2])
assert_raises(TypeError, lambda: a[0.0::2, :0])
assert_raises(TypeError, lambda: a[0.0:1:2,:])
assert_raises(TypeError, lambda: a[:, 0.0:])
# stop as float.
assert_raises(TypeError, lambda: a[:0.0])
assert_raises(TypeError, lambda: a[:0, 1:2.0])
assert_raises(TypeError, lambda: a[:0.0:2, :0])
assert_raises(TypeError, lambda: a[:0.0,:])
assert_raises(TypeError, lambda: a[:, 0:4.0:2])
# step as float.
assert_raises(TypeError, lambda: a[::1.0])
assert_raises(TypeError, lambda: a[0:, :2:2.0])
assert_raises(TypeError, lambda: a[1::4.0, :0])
assert_raises(TypeError, lambda: a[::5.0,:])
assert_raises(TypeError, lambda: a[:, 0:4:2.0])
# mixed.
assert_raises(TypeError, lambda: a[1.0:2:2.0])
assert_raises(TypeError, lambda: a[1.0::2.0])
assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])
assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
# should still get the DeprecationWarning if step = 0.
assert_raises(TypeError, lambda: a[::0.0])
def test_index_no_array_to_index(self):
# No non-scalar arrays.
a = np.array([[[1]]])
assert_raises(TypeError, lambda: a[a:a:a])
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].base is a)
a = np.array(0)
assert_(isinstance(a[()], np.int_))
def test_void_scalar_empty_tuple(self):
s = np.zeros((), dtype='V4')
assert_equal(s[()].dtype, s.dtype)
assert_equal(s[()], s)
assert_equal(type(s[...]), np.ndarray)
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
index = np.arange(5)
u_index = index.astype(np.uintp)
arr = np.arange(10)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)
assert_array_equal(arr, np.arange(10))
arr = np.arange(10).reshape(5, 2)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)[:,None]
assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
arr = np.arange(25).reshape(5, 5)
assert_array_equal(arr[u_index, u_index], arr[index, index])
def test_empty_fancy_index(self):
# Empty list index creates an empty array
# with the same dtype (but with weird shape)
a = np.array([1, 2, 3])
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([], dtype=np.intp)
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([])
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].base is a)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
assert_equal(a[0, ...], a[0])
assert_equal(a[0, ...], a[0,:])
assert_equal(a[..., 0], a[:, 0])
# Slicing with ellipsis always results
# in an array, not a scalar
assert_equal(a[0, ..., 1], np.array(2))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = np.array(1)
b[(Ellipsis,)] = 2
assert_equal(b, 2)
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[0], [1, 2, 3])
assert_equal(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 64)
def test_single_bool_index(self):
# Single boolean index
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[np.array(True)], a[None])
assert_equal(a[np.array(False)], a[None][0:0])
def test_boolean_shape_mismatch(self):
arr = np.ones((5, 4, 3))
index = np.array([True])
assert_raises(IndexError, arr.__getitem__, index)
index = np.array([False] * 6)
assert_raises(IndexError, arr.__getitem__, index)
index = np.zeros((4, 4), dtype=bool)
assert_raises(IndexError, arr.__getitem__, index)
assert_raises(IndexError, arr.__getitem__, (slice(None), index))
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
# boolean array of length one
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
# boolean assignment
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
a = np.arange(4)
def f(a, v):
a[a > -1] = v
assert_raises(ValueError, f, a, [])
assert_raises(ValueError, f, a, [1, 2, 3])
assert_raises(ValueError, f, a[:1], [1, 2, 3])
def test_boolean_assignment_needs_api(self):
# See also gh-7666
# This caused a segfault on Python 2 due to the GIL not being
# held when the iterator does not need it, but the transfer function
# does
arr = np.zeros(1000)
indx = np.zeros(1000, dtype=bool)
indx[:100] = True
arr[indx] = np.ones(100, dtype=object)
expected = np.zeros(1000)
expected[:100] = 1
assert_array_equal(arr, expected)
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = np.array([[ True, False, True],
[False, True, False],
[ True, False, True]])
assert_equal(a[b], [1, 3, 5, 7, 9])
assert_equal(a[b[1]], [[4, 5, 6]])
assert_equal(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
assert_equal(a, [[0, 2, 0],
[4, 0, 6],
[0, 8, 0]])
def test_boolean_indexing_list(self):
# Regression test for #13715. It's a use-after-free bug which the
# test won't directly catch, but it will show up in valgrind.
a = np.array([1, 2, 3])
b = [True, False, True]
# Two variants of the test because the first takes a fast path
assert_equal(a[b], [1, 3])
assert_equal(a[None, b], [[1, 3]])
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
a = np.ones(5)
b = np.zeros(5, dtype=np.intp)[::-1]
c = np.arange(5)[::-1]
a[b] = c
# If the strides are not reversed, the 0 in the arange comes last.
assert_equal(a[0], 0)
# This also tests that the subspace buffer is initialized:
a = np.ones((5, 2))
c = np.arange(10).reshape(5, 2)[::-1]
a[b, :] = c
assert_equal(a[0], [0, 1])
def test_reversed_strides_result_allocation(self):
# Test a bug when calculating the output strides for a result array
# when the subspace size was 1 (and test other cases as well)
a = np.arange(10)[:, None]
i = np.arange(10)[::-1]
assert_array_equal(a[i], a[i.copy('C')])
a = np.arange(20).reshape(-1, 2)
def test_uncontiguous_subspace_assignment(self):
# During development there was a bug activating a skip logic
# based on ndim instead of size.
a = np.full((3, 4, 2), -1)
b = np.full((3, 4, 2), -1)
a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
assert_equal(a, b)
def test_too_many_fancy_indices_special_case(self):
# Just documents behaviour, this is a small limitation.
a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
def test_scalar_array_bool(self):
# NumPy bools can be used as boolean index (python ones as of yet not)
a = np.array(1)
assert_equal(a[np.bool_(True)], a[np.array(True)])
assert_equal(a[np.bool_(False)], a[np.array(False)])
# After deprecating bools as integers:
#a = np.array([0,1,2])
#assert_equal(a[True, :], a[None, :])
#assert_equal(a[:, True], a[:, None])
#
#assert_(not np.may_share_memory(a, a[True, :]))
def test_everything_returns_views(self):
# Before `...` would return a itself.
a = np.arange(5)
assert_(a is not a[()])
assert_(a is not a[...])
assert_(a is not a[:])
def test_broaderrors_indexing(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self):
a = np.zeros(5)
ind = np.ones(20, dtype=np.intp)
ind[-1] = 10
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
ind = np.ones(20, dtype=np.intp)
ind[0] = 11
assert_raises(IndexError, a.__getitem__, ind)
assert_raises(IndexError, a.__setitem__, ind, 0)
def test_trivial_fancy_not_possible(self):
# Test that the fast path for trivial assignment is not incorrectly
# used when the index is not contiguous or 1D, see also gh-11467.
a = np.arange(6)
idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
assert_array_equal(a[idx], idx)
# this case must not go into the fast path, note that idx is
# a non-contiuguous none 1D array here.
a[idx] = -1
res = np.arange(6)
res[0] = -1
res[3] = -1
assert_array_equal(a, res)
def test_nonbaseclass_values(self):
class SubClass(np.ndarray):
def __array_finalize__(self, old):
# Have array finalize do funny things
self.fill(99)
a = np.zeros((5, 5))
s = a.copy().view(type=SubClass)
s.fill(1)
a[[0, 1, 2, 3, 4], :] = s
assert_((a == 1).all())
# Subspace is last, so transposing might want to finalize
a[:, [0, 1, 2, 3, 4]] = s
assert_((a == 1).all())
a.fill(0)
a[...] = s
assert_((a == 1).all())
def test_subclass_writeable(self):
d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
dtype=[('target', 'S20'), ('V_mag', '>f4')])
ind = np.array([False, True, True], dtype=bool)
assert_(d[ind].flags.writeable)
ind = np.array([0, 1])
assert_(d[ind].flags.writeable)
assert_(d[...].flags.writeable)
assert_(d[0].flags.writeable)
def test_memory_order(self):
# This is not necessary to preserve. Memory layouts for
# more complex indices are not as simple.
a = np.arange(10)
b = np.arange(10).reshape(5,2).T
assert_(a[b].flags.f_contiguous)
# Takes a different implementation branch:
a = a.reshape(-1, 1)
assert_(a[b, 0].flags.f_contiguous)
def test_scalar_return_type(self):
# Full scalar indices should return scalars and object
# arrays should not call PyArray_Return on their items
class Zero:
# The most basic valid indexing
def __index__(self):
return 0
z = Zero()
class ArrayLike:
# Simple array, should behave like the array
def __array__(self):
return np.array(0)
a = np.zeros(())
assert_(isinstance(a[()], np.float_))
a = np.zeros(1)
assert_(isinstance(a[z], np.float_))
a = np.zeros((1, 1))
assert_(isinstance(a[z, np.array(0)], np.float_))
assert_(isinstance(a[z, ArrayLike()], np.float_))
# And object arrays do not call it too often:
b = np.array(0)
a = np.array(0, dtype=object)
a[()] = b
assert_(isinstance(a[()], np.ndarray))
a = np.array([b, None])
assert_(isinstance(a[z], np.ndarray))
a = np.array([[b, None]])
assert_(isinstance(a[z, np.array(0)], np.ndarray))
assert_(isinstance(a[z, ArrayLike()], np.ndarray))
def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
if HAS_REFCOUNT:
refcount = sys.getrefcount(np.dtype(np.intp))
# item setting always checks indices in separate function:
a[np.array([0], dtype=np.intp)] = 1
a[np.array([0], dtype=np.uint8)] = 1
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.intp), 1)
assert_raises(IndexError, a.__setitem__,
np.array([1], dtype=np.uint8), 1)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
def test_unaligned(self):
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
d = v.view(np.dtype("S8"))
# unaligned source
x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
x = x.view(np.dtype("S8"))
x[...] = np.array("b" * 8, dtype="S")
b = np.arange(d.size)
#trivial
assert_equal(d[b], d)
d[b] = x
# nontrivial
# unaligned index array
b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
b = b.view(np.intp)[:d.size]
b[...] = np.arange(d.size)
assert_equal(d[b.astype(np.int16)], d)
d[b.astype(np.int16)] = x
# boolean
d[b % 2 == 0]
d[b % 2 == 0] = x[::2]
def test_tuple_subclass(self):
arr = np.ones((5, 5))
# A tuple subclass should also be an nd-index
class TupleSubclass(tuple):
pass
index = ([1], [1])
index = TupleSubclass(index)
assert_(arr[index].shape == (1,))
# Unlike the non nd-index:
assert_(arr[index,].shape != (1,))
def test_broken_sequence_not_nd_index(self):
# See gh-5063:
# If we have an object which claims to be a sequence, but fails
# on item getting, this should not be converted to an nd-index (tuple)
# If this object happens to be a valid index otherwise, it should work
# This object here is very dubious and probably bad though:
class SequenceLike:
def __index__(self):
return 0
def __len__(self):
return 1
def __getitem__(self, item):
raise IndexError('Not possible')
arr = np.arange(10)
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
# also test that field indexing does not segfault
# for a similar reason, by indexing a structured array
arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
def test_indexing_array_weird_strides(self):
# See also gh-6221
# the shapes used here come from the issue and create the correct
# size for the iterator buffering size.
x = np.ones(10)
x2 = np.ones((10, 2))
ind = np.arange(10)[:, None, None, None]
ind = np.broadcast_to(ind, (10, 55, 4, 4))
# single advanced index case
assert_array_equal(x[ind], x[ind.copy()])
# higher dimensional advanced index
zind = np.zeros(4, dtype=np.intp)
assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
def test_indexing_array_negative_strides(self):
# From gh-8264,
# core dumps if negative strides are used in iteration
arro = np.zeros((4, 4))
arr = arro[::-1, ::-1]
slices = (slice(None), [0, 1, 2, 3])
arr[slices] = 10
assert_array_equal(arr, 10.)
class TestFieldIndexing:
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
a = np.zeros((), [('a','f8')])
assert_(isinstance(a['a'], np.ndarray))
assert_(isinstance(a[['a']], np.ndarray))
class TestBroadcastedAssignments:
def assign(self, a, ind, val):
a[ind] = val
return a
def test_prepending_ones(self):
a = np.zeros((3, 2))
a[...] = np.ones((1, 3, 2))
# Fancy with subspace with and without transpose
a[[0, 1, 2], :] = np.ones((1, 3, 2))
a[:, [0, 1]] = np.ones((1, 3, 2))
# Fancy without subspace (with broadcasting)
a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
def test_prepend_not_one(self):
assign = self.assign
s_ = np.s_
a = np.zeros(5)
# Too large and not only ones.
assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
def test_simple_broadcasting_errors(self):
assign = self.assign
s_ = np.s_
a = np.zeros((5, 1))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.
a = np.zeros((5, 5))
a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
assert_((a[:3, :3] == [2, 3, 4]).all())
def test_broadcast_subspace(self):
a = np.zeros((100, 100))
v = np.arange(100)[:,None]
b = np.arange(100)[::-1]
a[b] = v
assert_((a[::-1] == v).all())
class TestSubclasses:
def test_basic(self):
# Test that indexing in various ways produces SubClass instances,
# and that the base is set up correctly: the original subclass
# instance for views, and a new ndarray for advanced/boolean indexing
# where a copy was made (latter a regression test for gh-11983).
class SubClass(np.ndarray):
pass
a = np.arange(5)
s = a.view(SubClass)
s_slice = s[:3]
assert_(type(s_slice) is SubClass)
assert_(s_slice.base is s)
assert_array_equal(s_slice, a[:3])
s_fancy = s[[0, 1, 2]]
assert_(type(s_fancy) is SubClass)
assert_(s_fancy.base is not s)
assert_(type(s_fancy.base) is np.ndarray)
assert_array_equal(s_fancy, a[[0, 1, 2]])
assert_array_equal(s_fancy.base, a[[0, 1, 2]])
s_bool = s[s > 0]
assert_(type(s_bool) is SubClass)
assert_(s_bool.base is not s)
assert_(type(s_bool.base) is np.ndarray)
assert_array_equal(s_bool, a[a > 0])
assert_array_equal(s_bool.base, a[a > 0])
def test_fancy_on_read_only(self):
# Test that fancy indexing on read-only SubClass does not make a
# read-only copy (gh-14132)
class SubClass(np.ndarray):
pass
a = np.arange(5)
s = a.view(SubClass)
s.flags.writeable = False
s_fancy = s[[0, 1, 2]]
assert_(s_fancy.flags.writeable)
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
def __array_finalize__(self, old):
self.finalize_status = np.array(self)
self.old = old
s = np.arange(10).view(SubClass)
new_s = s[:3]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[[0,1,2,3]]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
new_s = s[s > 0]
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
class TestFancyIndexingCast:
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
bool_index = np.zeros(shape).astype(bool)
bool_index[0, 1] = True
zero_array = np.zeros(shape)
# Assigning float is fine.
zero_array[bool_index] = np.array([1])
assert_equal(zero_array[0, 1], 1)
# Fancy indexing works, although we get a cast warning.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
assert_equal(zero_array[0, 1], 2) # No complex part
# Cast complex to float, throwing away the imaginary portion.
assert_warns(np.ComplexWarning,
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
class TestFancyIndexingEquivalence:
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
a = np.arange(5, dtype=object)
b = a.copy()
a[:3] = [1, (1,2), 3]
b[[0, 1, 2]] = [1, (1,2), 3]
assert_array_equal(a, b)
# test same for subspace fancy indexing
b = np.arange(5, dtype=object)[None, :]
b[[0], :3] = [[1, (1,2), 3]]
assert_array_equal(a, b[0])
# Check that swapping of axes works.
# There was a bug that made the later assignment throw a ValueError
# do to an incorrectly transposed temporary right hand side (gh-5714)
b = b.T
b[:3, [0]] = [[1], [(1,2)], [3]]
assert_array_equal(a, b[:, 0])
# Another test for the memory order of the subspace
arr = np.ones((3, 4, 5), dtype=object)
# Equivalent slicing assignment for comparison
cmp_arr = arr.copy()
cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
arr = arr.copy('F')
arr[[0], ...] = [[[1], [2], [3], [4]]]
assert_array_equal(arr, cmp_arr)
def test_cast_equivalence(self):
# Yes, normal slicing uses unsafe casting.
a = np.arange(5)
b = a.copy()
a[:3] = np.array(['2', '-3', '-1'])
b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
assert_array_equal(a, b)
# test the same for subspace fancy indexing
b = np.arange(5)[None, :]
b[[0], :3] = np.array([['2', '-3', '-1']])
assert_array_equal(a, b[0])
class TestMultiIndexingAutomated:
"""
These tests use code to mimic the C-Code indexing for selection.
NOTE:
* This still lacks tests for complex item setting.
* If you change behavior of indexing, you might want to modify
these tests to try more combinations.
* Behavior was written to match numpy version 1.8. (though a
first version matched 1.7.)
* Only tuple indices are supported by the mimicking code.
(and tested as of writing this)
* Error types should match most of the time as long as there
is only one error. For multiple errors, what gets raised
will usually not be the same one. They are *not* tested.
Update 2016-11-30: It is probably not worth maintaining this test
indefinitely and it can be dropped if maintenance becomes a burden.
"""
def setup(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = ['skip', Ellipsis,
0,
# Boolean indices, up to 3-d for some special cases of eating up
# dimensions, also need to test all False
np.array([True, False, False]),
np.array([[True, False], [False, True]]),
np.array([[[False, False], [False, False]]]),
# Some slices:
slice(-5, 5, 2),
slice(1, 1, 100),
slice(4, -1, -2),
slice(None, None, -3),
# Some Fancy indexes:
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
np.array([2, -1], dtype=np.int8),
np.zeros([1]*31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
# Some simpler indices that still cover a bit more
self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
'skip']
# Very simple ones to fill the rest:
self.fill_indices = [slice(None, None), 0]
def _get_multi_index(self, arr, indices):
"""Mimic multi dimensional indexing.
Parameters
----------
arr : ndarray
Array to be indexed.
indices : tuple of index objects
Returns
-------
out : ndarray
An array equivalent to the indexing operation (but always a copy).
`arr[indices]` should be identical.
no_copy : bool
Whether the indexing operation requires a copy. If this is `True`,
`np.may_share_memory(arr, arr[indices])` should be `True` (with
some exceptions for scalars and possibly 0-d arrays).
Notes
-----
While the function may mostly match the errors of normal indexing this
is generally not the case.
"""
in_indices = list(indices)
indices = []
# if False, this is a fancy or boolean index
no_copy = True
# number of fancy/scalar indexes that are not consecutive
num_fancy = 0
# number of dimensions indexed by a "fancy" index
fancy_dim = 0
# NOTE: This is a funny twist (and probably OK to change).
# The boolean array has illegal indexes, but this is
# allowed if the broadcast fancy-indices are 0-sized.
# This variable is to catch that case.
error_unless_broadcast_to_empty = False
# We need to handle Ellipsis and make arrays from indices, also
# check if this is fancy indexing (set no_copy).
ndim = 0
ellipsis_pos = None # define here mostly to replace all but first.
for i, indx in enumerate(in_indices):
if indx is None:
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
no_copy = False
if indx.ndim == 0:
raise IndexError
# boolean indices can have higher dimensions
ndim += indx.ndim
fancy_dim += indx.ndim
continue
if indx is Ellipsis:
if ellipsis_pos is None:
ellipsis_pos = i
continue # do not increment ndim counter
raise IndexError
if isinstance(indx, slice):
ndim += 1
continue
if not isinstance(indx, np.ndarray):
# This could be open for changes in numpy.
# numpy should maybe raise an error if casting to intp
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
try:
indx = np.array(indx, dtype=np.intp)
except ValueError:
raise IndexError
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
raise IndexError('arrays used as indices must be of '
'integer (or boolean) type')
if indx.ndim != 0:
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
# we can't take more dimensions then we have, not even for 0-d
# arrays. since a[()] makes sense, but not a[(),]. We will
# raise an error later on, unless a broadcasting error occurs
# first.
raise IndexError
if ndim == 0 and None not in in_indices:
# Well we have no indexes or one Ellipsis. This is legal.
return arr.copy(), no_copy
if ellipsis_pos is not None:
in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
(arr.ndim - ndim))
for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
# convert to an index array
indx = np.arange(*indx.indices(arr.shape[ax]))
indices.append(['s', indx])
continue
elif indx is None:
# this is like taking a slice with one element from a new axis:
indices.append(['n', np.array([0], dtype=np.intp)])
arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:]))
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool:
if indx.shape != arr.shape[ax:ax+indx.ndim]:
raise IndexError
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax+indx.ndim], mode='raise')
except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
# concatenate axis into a single one:
if indx.ndim != 0:
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(arr.shape[ax:ax+indx.ndim]),)
+ arr.shape[ax+indx.ndim:]))
indx = flat_indx
else:
# This could be changed, a 0-d boolean index can
# make sense (even outside the 0-d indexed array case)
# Note that originally this is could be interpreted as
# integer in the full integer special case.
raise IndexError
else:
# If the index is a singleton, the bounds check is done
# before the broadcasting. This used to be different in <1.9
if indx.ndim == 0:
if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
raise IndexError
if indx.ndim == 0:
# The index is a scalar. This used to be two fold, but if
# fancy indexing was active, the check was done later,
# possibly after broadcasting it away (1.7. or earlier).
# Now it is always done.
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
raise IndexError
if (len(indices) > 0 and
indices[-1][0] == 'f' and
ax != ellipsis_pos):
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx)
else:
# We have a fancy index that is not after an existing one.
# NOTE: A 0-d array triggers this as well, while one may
# expect it to not trigger it, since a scalar would not be
# considered fancy indexing.
num_fancy += 1
indices.append(['f', indx])
if num_fancy > 1 and not no_copy:
# We have to flush the fancy indexes left
new_indices = indices[:]
axes = list(range(arr.ndim))
fancy_axes = []
new_indices.insert(0, ['f'])
ni = 0
ai = 0
for indx in indices:
ni += 1
if indx[0] == 'f':
new_indices[0].extend(indx[1:])
del new_indices[ni]
ni -= 1
for ax in range(ai, ai + len(indx[1:])):
fancy_axes.append(ax)
axes.remove(ax)
ai += len(indx) - 1 # axis we are at
indices = new_indices
# and now we need to transpose arr:
arr = arr.transpose(*(fancy_axes + axes))
# We only have one 'f' index now and arr is transposed accordingly.
# Now handle newaxis by reshaping...
ax = 0
for indx in indices:
if indx[0] == 'f':
if len(indx) == 1:
continue
# First of all, reshape arr to combine fancy axes into one:
orig_shape = arr.shape
orig_slice = orig_shape[ax:ax + len(indx[1:])]
arr = arr.reshape((arr.shape[:ax]
+ (np.prod(orig_slice).astype(int),)
+ arr.shape[ax + len(indx[1:]):]))
# Check if broadcasting works
res = np.broadcast(*indx[1:])
# unfortunately the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
if res.size != 0:
if error_unless_broadcast_to_empty:
raise IndexError
for _indx, _size in zip(indx[1:], orig_slice):
if _indx.size == 0:
continue
if np.any(_indx >= _size) or np.any(_indx < -_size):
raise IndexError
if len(indx[1:]) == len(orig_slice):
if np.product(orig_slice) == 0:
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='raise')
except Exception:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
else:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='wrap')
else:
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
try:
arr = arr.reshape((arr.shape[:ax]
+ mi.shape
+ arr.shape[ax+1:]))
except ValueError:
# too many dimensions, probably
raise IndexError
ax += mi.ndim
continue
# If we are here, we have a 1D array for take:
arr = arr.take(indx[1], axis=ax)
ax += 1
return arr, no_copy
def _check_multi_index(self, arr, index):
"""Check a multi index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be a reshaped arange.
index : tuple of indexing objects
Index being tested.
"""
# Test item getting
try:
mimic_get, no_copy = self._get_multi_index(arr, index)
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _check_single_index(self, arr, index):
"""Check a single index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be an arange.
index : indexing object
Index being tested. Must be a single index and not a tuple
of indexing objects (see also `_check_multi_index`).
"""
try:
mimic_get, no_copy = self._get_multi_index(arr, (index,))
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _compare_index_result(self, arr, index, mimic_get, no_copy):
"""Compare mimicked result to indexing result.
"""
arr = arr.copy()
indexed_arr = arr[index]
assert_array_equal(indexed_arr, mimic_get)
# Check if we got a view, unless its a 0-sized or 0-d array.
# (then its not a view, and that does not matter)
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
# Check reference count of the original array
if HAS_REFCOUNT:
if no_copy:
# refcount increases by one:
assert_equal(sys.getrefcount(arr), 3)
else:
assert_equal(sys.getrefcount(arr), 2)
# Test non-broadcast setitem:
b = arr.copy()
b[index] = mimic_get + 1000
if b.size == 0:
return # nothing to compare here...
if no_copy and indexed_arr.ndim != 0:
# change indexed_arr in-place to manipulate original:
indexed_arr += 1000
assert_array_equal(arr, b)
return
# Use the fact that the array is originally an arange:
arr.flat[indexed_arr.ravel()] += 1000
assert_array_equal(arr, b)
def test_boolean(self):
a = np.array(5)
assert_equal(a[np.array(True)], 5)
a[np.array(True)] = 1
assert_equal(a, 1)
# NOTE: This is different from normal broadcasting, as
# arr[boolean_array] works like in a multi index. Which means
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool),))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
self._check_multi_index(
self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
# spot and the simple ones in one other spot.
with warnings.catch_warnings():
# This is so that np.array(True) is not accepted in a full integer
# index, when running the file separately.
warnings.filterwarnings('error', '', DeprecationWarning)
warnings.filterwarnings('error', '', np.VisibleDeprecationWarning)
def isskip(idx):
return isinstance(idx, str) and idx == "skip"
for simple_pos in [0, 2, 3]:
tocheck = [self.fill_indices, self.complex_indices,
self.fill_indices, self.fill_indices]
tocheck[simple_pos] = self.simple_indices
for index in product(*tocheck):
index = tuple(i for i in index if not isskip(i))
self._check_multi_index(self.a, index)
self._check_multi_index(self.b, index)
# Check very simple item getting:
self._check_multi_index(self.a, (0, 0, 0, 0))
self._check_multi_index(self.b, (0, 0, 0, 0))
# Also check (simple cases of) too many indices:
assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
def test_1d(self):
a = np.arange(10)
for index in self.complex_indices:
self._check_single_index(a, index)
class TestFloatNonIntegerArgument:
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
"""
def test_valid_indexing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[np.array([0])]
a[[0, 0]]
a[:, [0, 0]]
a[:, 0,:]
a[:,:,:]
def test_valid_slicing(self):
# These should raise no errors.
a = np.array([[[5]]])
a[::]
a[0:]
a[:2]
a[0:2]
a[::2]
a[1::2]
a[:2:2]
a[1:2:2]
def test_non_integer_argument_errors(self):
a = np.array([[5]])
assert_raises(TypeError, np.reshape, a, (1., 1., -1))
assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
assert_raises(TypeError, np.take, a, [0], 1.)
assert_raises(TypeError, np.take, a, [0], np.float64(1.))
def test_non_integer_sequence_multiplication(self):
# NumPy scalar sequence multiply should not work with non-integers
def mult(a, b):
return a * b
assert_raises(TypeError, mult, [1], np.float_(3))
# following should be OK
mult([1], np.int_(3))
def test_reduce_axis_float_index(self):
d = np.zeros((3,3,3))
assert_raises(TypeError, np.min, d, 0.5)
assert_raises(TypeError, np.min, d, (0.5, 1))
assert_raises(TypeError, np.min, d, (1, 2.2))
assert_raises(TypeError, np.min, d, (.2, 1.2))
class TestBooleanIndexing:
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
assert_raises(TypeError, np.reshape, a, (True, -1))
assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
# Note that operator.index(np.array(True)) does not work, a boolean
# array is thus also deprecated, but not with the same message:
assert_raises(TypeError, operator.index, np.array(True))
assert_warns(DeprecationWarning, operator.index, np.True_)
assert_raises(TypeError, np.take, args=(a, [0], False))
def test_boolean_indexing_weirdness(self):
# Weird boolean indexing things
a = np.ones((2, 3, 4))
a[False, True, ...].shape == (0, 2, 3, 4)
a[True, [0, 1], True, True, [1], [[2]]] == (1, 2)
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
class TestArrayToIndexDeprecation:
"""Creating an an index from array not 0-D is an error.
"""
def test_array_to_index_error(self):
# so no exception is expected. The raising is effectively tested above.
a = np.array([[[1]]])
assert_raises(TypeError, operator.index, np.array([1]))
assert_raises(TypeError, np.reshape, a, (a, -1))
assert_raises(TypeError, np.take, a, [0], a)
class TestNonIntegerArrayLike:
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
an integer.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
assert_raises(IndexError, a.__getitem__, (['1', '2'],))
# The following is valid
a.__getitem__([])
class TestMultipleEllipsisError:
"""An index can only have a single ellipsis.
"""
def test_basic(self):
a = np.arange(10)
assert_raises(IndexError, lambda: a[..., ...])
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
class TestCApiAccess:
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
# 0-d arrays don't work:
assert_raises(IndexError, subscript, np.ones(()), 0)
# Out of bound values:
assert_raises(IndexError, subscript, np.ones(10), 11)
assert_raises(IndexError, subscript, np.ones(10), -11)
assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
a = np.arange(10)
assert_array_equal(a[4], subscript(a, 4))
a = a.reshape(5, 2)
assert_array_equal(a[-4], subscript(a, -4))
def test_setitem(self):
assign = functools.partial(array_indexing, 1)
# Deletion is impossible:
assert_raises(ValueError, assign, np.ones(10), 0)
# 0-d arrays don't work:
assert_raises(IndexError, assign, np.ones(()), 0, 0)
# Out of bound values:
assert_raises(IndexError, assign, np.ones(10), 11, 0)
assert_raises(IndexError, assign, np.ones(10), -11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
a = np.arange(10)
assign(a, 4, 10)
assert_(a[4] == 10)
a = a.reshape(5, 2)
assign(a, 4, 10)
assert_array_equal(a[-1], [10, 10])
|
{
"content_hash": "95bfd1e4646caaceccd433fd4b1a4016",
"timestamp": "",
"source": "github",
"line_count": 1296,
"max_line_length": 79,
"avg_line_length": 38.00925925925926,
"alnum_prop": 0.5296995533901746,
"repo_name": "WarrenWeckesser/numpy",
"id": "4bb5cb11ac3c856d86ae37f47075fb76b6e53aa4",
"size": "49260",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "numpy/core/tests/test_indexing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9059444"
},
{
"name": "C++",
"bytes": "174989"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8313055"
},
{
"name": "Shell",
"bytes": "9612"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
"""
Django settings for todosite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jtufmx3n*f9h8o8n0xwk6!#6rp$)@!h0y0759#4o2uwnhrocg8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todo',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'todosite.urls'
WSGI_APPLICATION = 'todosite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Template DIR
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
|
{
"content_hash": "e7d41fcc4c5af792b1535d0817e6fec5",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 71,
"avg_line_length": 24.738636363636363,
"alnum_prop": 0.7253100597152045,
"repo_name": "KellyChan/python-examples",
"id": "8358813c665adb223447654381f6aeb1182436bc",
"size": "2177",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/django/elf/todo/todosite/todosite/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "86277"
},
{
"name": "HTML",
"bytes": "320182"
},
{
"name": "JavaScript",
"bytes": "154998"
},
{
"name": "Jupyter Notebook",
"bytes": "30660"
},
{
"name": "Python",
"bytes": "238130"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_timers', '0003_auto_20150228_1213'),
]
operations = [
migrations.AddField(
model_name='timer',
name='auto_remove',
field=models.IntegerField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='timer',
name='no_refresh',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
{
"content_hash": "c74f517899224463013fe4174d9d1822",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 53,
"avg_line_length": 24.4,
"alnum_prop": 0.5655737704918032,
"repo_name": "ojarva/home-info-display",
"id": "4704834dbd079b1b8b4cce04020c87dfc0003d9e",
"size": "634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homedisplay/info_timers/migrations/0004_auto_20150228_1347.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "22171"
},
{
"name": "CoffeeScript",
"bytes": "115283"
},
{
"name": "HTML",
"bytes": "51598"
},
{
"name": "JavaScript",
"bytes": "9902"
},
{
"name": "Python",
"bytes": "310675"
},
{
"name": "Shell",
"bytes": "1617"
}
],
"symlink_target": ""
}
|
from setuptools import setup
import re
VERSIONFILE="pymailinator/_version.py"
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
major, minor, patch = verstr.split('.')
release = "%s.%s" %(major, minor)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
# Setup
setup(
name='py-mailinator',
version=verstr,
url='https://github.com/mc706/py-mailinator',
author='Ryan McDevitt',
author_email='mcdevitt.ryan@gmail.com',
license='MIT License',
packages=['pymailinator'],
include_package_data=True,
description='Python API wrapper for mailinator',
download_url = 'https://github.com/mc706/py-mailinator/tarball/' + release,
keywords = ['mailinator', 'api', 'email'],
classifiers = [
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Communications :: Email",
"Topic :: Software Development :: Testing",
"Topic :: Utilities",
],
)
|
{
"content_hash": "992fa42cba00cdbd166f0f4ea653aba1",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 35.146341463414636,
"alnum_prop": 0.6190145732130465,
"repo_name": "mc706/py-mailinator",
"id": "ee3b16e2e758e3311c19f5bc4450fe389777ea7e",
"size": "1441",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12905"
}
],
"symlink_target": ""
}
|
try:
from pipeline.component.nn.backend.fate_torch import nn, init, operation, optim, serialization
except ImportError:
nn, init, operation, optim, serialization = None, None, None, None, None
__all__ = ['nn', 'init', 'operation', 'optim', 'serialization']
|
{
"content_hash": "bc9740c328f009fa0934dbc375810c1e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 98,
"avg_line_length": 44.333333333333336,
"alnum_prop": 0.7030075187969925,
"repo_name": "FederatedAI/FATE",
"id": "52d08399b47aa9761cdff49b36c6b35081589ae3",
"size": "266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/fate_client/pipeline/component/nn/backend/fate_torch/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
}
|
from django.core.serializers import json
from python import Deserializer
json.PythonDeserializer = Deserializer
from django.core.serializers.json import *
|
{
"content_hash": "32cece429f9f36d638e3f0b495339c5c",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 38.75,
"alnum_prop": 0.8516129032258064,
"repo_name": "certik/chess",
"id": "16c6bf6b27d64aebb7006745b72e613a8dbc09fc",
"size": "155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/appenginepatch/appenginepatcher/serializers/json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "332405"
}
],
"symlink_target": ""
}
|
import sys, os, tarfile
from ftplib import FTP
from src.utils import select_files, get_local_timestamp, get_remote_timestamp, \
update_progress, create_folder, is_compressed, is_tgz, is_gz, \
extract_gz, extract_tar
from src.exceptions import ConnectionException, FtpPathException, FtpIndexException, \
FtpDownloadException, ActualException
class ftp_functions:
'''provide all needed functions for interacting with the ftp backend'''
FTP_SERVER = ''
FTP_ROOT = ''
CONNECTION = ''
UP = "../"
DOWNLOAD_FOLDER = ''
DEBUG = False
def __init__(self, FTP_SERVER, FTP_ROOT, DOWNLOAD_FOLDER, DEBUG):
self.FTP_SERVER = FTP_SERVER
self.FTP_ROOT = FTP_ROOT
self.DOWNLOAD_FOLDER = DOWNLOAD_FOLDER
self.DEBUG = DEBUG
def get_connection(self):
return self.CONNECTION
def set_connection(self, connection):
self.CONNECTION = connection
def connect(self):
'''wrapper to open connection as anonymus user on a FTP server'''
try:
ftp = FTP(self.FTP_SERVER)
ftp.login()
sys.stdout.write("CONNECTED TO %s \n" % (self.FTP_SERVER))
self.set_connection(ftp)
except:
raise ConnectionException(self.FTP_SERVER)
def go_to_root(self):
'''navigate to the starting point on ftp server site '''
try:
self.get_connection().cwd(self.FTP_ROOT)
# only for debug mode
if self.DEBUG: sys.stdout.write("\nloc: " + self.get_connection().pwd() + "\n")
except:
raise FtpPathException(self.FTP_ROOT)
def go_down(self, folder):
'''move to specific folder on ftp site'''
try:
self.get_connection().cwd(folder)
# only for debug mode
if self.DEBUG: sys.stdout.write("\nloc: " + self.get_connection().pwd() + "\n")
except:
raise FtpPathException(folder)
def go_up(self):
'''move one level up in ftp file tree'''
try:
self.get_connection().cwd(self.UP)
# only for debug mode
if self.DEBUG: sys.stdout.write("\nloc: " + self.get_connection().pwd() + "\n")
except:
raise FtpPathException(self.UP)
def ls(self):
'''wrapper to get a list of all items on current file level'''
return self.get_connection().nlst()
def close(self):
'''wrapper for closing open ftp connection'''
self.get_connection().close()
def get_folder_index(self, folder):
'''generate a list of all subfolders (without single files)'''
dirs = []
try:
sys.stdout.write("Identify folders for Downloading ... \n")
self.get_connection().dir("", dirs.append)
# select only folder
dirs = [x.split()[-1] for x in dirs if x.startswith("d")]
sys.stdout.write("Found %d folders\n" % (len(dirs)))
return dirs if self.DEBUG is False else dirs[:10]
except:
raise FtpIndexException(self.FTP_ROOT)
def get_file_index(self, db_type):
'''generate a list of all files matching "nucl" or "prot" conditions'''
return select_files(self.ls(), db_type)
def is_actual(self, local, remote,):
'''determine if local timestamp is newer or equal to remote timestamp'''
return True if get_local_timestamp(local + os.sep + remote) >= \
get_remote_timestamp(remote, self.get_connection()) else False
# download a folder with all subfolder from ftp source
def download_folder(self, remote_folder, db_type):
'''download a folder with all subfolders and matching files from ftp site'''
local = self.DOWNLOAD_FOLDER + os.sep + remote_folder
# go to remote dir
self.go_down(remote_folder)
# get list of subfolder
folder_list = self.get_folder_index(remote_folder)
# only for cmd output
downloaded = actual = all = 0
# init progressbar
total = float(len(folder_list))
count = float(downloaded + actual)
update_progress(count)
# loop over folder in remote_dir
for item in folder_list:
# create local folder
local_folder = local + os.sep + item
create_folder(local_folder)
# go down in ftp file structure and get a list of matching files
self.go_down(item)
file_list = self.get_file_index(db_type)
# update cmd values
all += len(file_list)
# update progressbar
count += float(1 / total)
update_progress(count)
# loop over files
for x in file_list:
# test timstamps
if not self.is_actual(local_folder, x):
self.download_file(local_folder, x)
downloaded += 1
else:
actual += 1
# go up file structure
self.go_up()
# write status information to stdout
sys.stdout.write("\nDownloaded: %d Actual: %d Total: %d \n\n" %
(downloaded, actual, all))
# go up in file structure to root dir
self.go_up()
def download_file(self, local_folder, item):
'''download and extract a single file from ftp source'''
# specify save location
local_file = local_folder + os.sep + item
try:
# open local file stream
f = open(local_file, "wb")
# copy remote stream to local stream
self.get_connection().retrbinary("RETR " + item, f.write)
f.close()
except:
raise FtpDownloadException(item)
# check endings of files for typical compressing endings
if is_compressed(local_file):
# extract *.tgz files
if is_tgz(local_file):
extract_tar(local_file, local_folder)
# unpack gunzip files and ignore *.tar.gz (because of metacv db creation)
elif is_gz(local_file):
extract_gz(local_file)
else:
pass
def get_gi_map(self, gi_map):
'''download gi_taxid_prot.dmp.gz from ncbi server and extract the complete file'''
local_version = self.DOWNLOAD_FOLDER + os.sep + gi_map
# test for actual or existing file
if not os.path.exists(local_version) or not self.is_actual(self.DOWNLOAD_FOLDER, gi_map):
try:
sys.stdout.write("Download %s from %s ...\n" % (gi_map, self.FTP_SERVER))
# download file
f = open(local_version, "wb")
self.get_connection().retrbinary("RETR " + gi_map, f.write)
f.close()
except:
raise FtpDownloadException(gi_map)
else:
ActualException(gi_map)
# extract file, if no extracted content exists
if not os.path.exists(str.strip(local_version, '.gz')):
extract_gz(local_version, self.DOWNLOAD_FOLDER)
return os.path.abspath(str.strip(local_version, '.gz'))
def get_taxdump(self, taxdump):
'''download taxdump.tar.gz from ncbi server and extract needed files'''
local_taxdump = self.DOWNLOAD_FOLDER + os.sep + taxdump
# test for actual or existing file
if not os.path.exists(local_taxdump) or not self.is_actual(self.DOWNLOAD_FOLDER, taxdump):
try:
sys.stdout.write("Download %s from %s ...\n" % (taxdump, self.FTP_SERVER))
# download file
f = open(local_taxdump, "wb")
self.get_connection().retrbinary("RETR " + taxdump, f.write)
f.close()
except:
raise FtpDownloadException(taxdump)
else:
ActualException(taxdump)
# extract parts of files , if no extracted content exists
if not os.path.exists(self.DOWNLOAD_FOLDER + os.sep + 'names.dmp'):
try:
with tarfile.open(local_taxdump) as tar:
# extract only names.dmp and nodes.dmp from taxdump
for tarinfo in tar:
if tarinfo.name in 'names.dmp':
tar.extract(tarinfo, self.DOWNLOAD_FOLDER)
if tarinfo.name in 'nodes.dmp':
tar.extract(tarinfo, self.DOWNLOAD_FOLDER)
tar.close()
except:
raise ExtractionException(taxdump)
return [os.path.abspath(self.DOWNLOAD_FOLDER + os.sep + 'names.dmp'),
os.path.abspath(self.DOWNLOAD_FOLDER + os.sep + 'nodes.dmp')]
def get_idmapping(self, idmapping):
'''download idmapping.dat.gz from uniprot server and extract the complete file'''
local_idmapping = self.DOWNLOAD_FOLDER + os.sep + idmapping
# test for actual or existing file
if not os.path.exists(local_idmapping) or not self.is_actual(self.DOWNLOAD_FOLDER, idmapping):
try:
sys.stdout.write("Download %s from %s ...\n" % (idmapping, self.FTP_SERVER))
# download file
f = open(local_idmapping, "wb")
self.get_connection().retrbinary("RETR " + idmapping, f.write)
f.close()
except:
raise FtpDownloadException(idmapping)
else:
ActualException(idmapping)
# extract file, if no extracted content exists
if not os.path.exists(str.strip(local_idmapping, '.gz')):
try:
extract_gz(local_idmapping, self.DOWNLOAD_FOLDER)
except:
raise ExtractionException(idmapping)
return os.path.abspath(str.strip(local_idmapping, '.gz'))
|
{
"content_hash": "ef20d1244617699b51f155e722760ec9",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 102,
"avg_line_length": 41.51652892561984,
"alnum_prop": 0.5602667462924256,
"repo_name": "psikon/pyBlastDB",
"id": "9dbad579268a6219da224b0af749c5180f6d8053",
"size": "10047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ftp_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28404"
}
],
"symlink_target": ""
}
|
__name__ = 'datatableview'
__author__ = 'Tim Valenta'
__version_info__ = (0, 8, 3)
__version__ = '.'.join(map(str, __version_info__))
__date__ = '2013/11/14 2:00:00 PM'
__credits__ = ['Tim Valenta', 'Steven Klass']
__license__ = 'See the file LICENSE.txt for licensing information.'
|
{
"content_hash": "d701be3d0c5a856c2077031652a846bf",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 67,
"avg_line_length": 40.42857142857143,
"alnum_prop": 0.607773851590106,
"repo_name": "annabed/django-datatable-view",
"id": "43ccc3ba69b3b0bc6dc2cd54335edde0a7f5bbbe",
"size": "310",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "datatableview/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17075"
},
{
"name": "HTML",
"bytes": "41334"
},
{
"name": "JavaScript",
"bytes": "6778"
},
{
"name": "Python",
"bytes": "143918"
}
],
"symlink_target": ""
}
|
from datetime import date, datetime, timedelta
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext as _
User = settings.AUTH_USER_MODEL
DURATION = 30
# summer starts 1st June, ends 15th August
SUMMER = ((6, 1), (8, 15))
# winter starts 1st December, ends 15th January
WINTER = ((12, 1), (1, 15))
def get_expiration_date(user):
if user:
marks = MarkUser.objects.filter(user=user).order_by('-expiration_date')
if marks:
return marks[0].expiration_date
return None
class MarksManager(models.Manager):
@staticmethod
def all_active():
return Mark.objects.filter(given_to__expiration_date__gt=timezone.now().date())
@staticmethod
def active(user):
return MarkUser.objects.filter(user=user).filter(expiration_date__gt=timezone.now().date())
@staticmethod
def inactive(user=None):
return MarkUser.objects.filter(user=user).filter(expiration_date__lte=timezone.now().date())
class Mark(models.Model):
CATEGORY_CHOICES = (
(0, _("Ingen")),
(1, _("Sosialt")),
(2, _("Bedriftspresentasjon")),
(3, _("Kurs")),
(4, _("Tilbakemelding")),
(5, _("Kontoret")),
(6, _("Betaling")),
)
title = models.CharField(_("tittel"), max_length=155)
added_date = models.DateField(_("utdelt dato"))
given_by = models.ForeignKey(
User,
related_name="mark_given_by",
verbose_name=_("gitt av"),
editable=False,
null=True,
blank=True,
on_delete=models.CASCADE
)
last_changed_date = models.DateTimeField(_("sist redigert"), auto_now=True, editable=False)
last_changed_by = models.ForeignKey(
User,
related_name="marks_last_changed_by",
verbose_name=_("sist redigert av"),
editable=False,
null=True,
blank=False,
on_delete=models.CASCADE
)
description = models.CharField(
_("beskrivelse"),
max_length=255,
help_text=_(
"Hvis dette feltet etterlates blankt vil det fylles med en standard grunn for typen prikk som er valgt."
),
blank=True
)
category = models.SmallIntegerField(_("kategori"), choices=CATEGORY_CHOICES, default=0)
# managers
objects = models.Manager() # default manager
marks = MarksManager() # active marks manager
def __str__(self):
return _("Prikk for %s") % self.title
def save(self, *args, **kwargs):
if not self.added_date:
self.added_date = timezone.now().date()
super(Mark, self).save(*args, **kwargs)
def delete(self, **kwargs):
given_to = [mu.user for mu in self.given_to.all()]
super(Mark, self).delete()
for user in given_to:
_fix_mark_history(user)
class Meta(object):
verbose_name = _("Prikk")
verbose_name_plural = _("Prikker")
permissions = (
('view_mark', 'View Mark'),
)
class MarkUser(models.Model):
"""
One entry for a user that has received a mark.
"""
mark = models.ForeignKey(Mark, related_name="given_to", on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
expiration_date = models.DateField(_("utløpsdato"), editable=False)
def save(self, *args, **kwargs):
run_history_update = False
if not self.expiration_date:
self.expiration_date = timezone.now().date()
run_history_update = True
super(MarkUser, self).save(*args, **kwargs)
if run_history_update:
_fix_mark_history(self.user)
def delete(self):
super(MarkUser, self).delete()
_fix_mark_history(self.user)
def __str__(self):
return _("Mark entry for user: %s") % self.user.get_full_name()
class Meta:
unique_together = ("user", "mark")
ordering = ('expiration_date',)
permissions = (
('view_userentry', 'View UserEntry'),
)
def _fix_mark_history(user):
"""
Goes through a users complete mark history and resets all expiration dates.
The reasons for doing it this way is that the mark rules now insist on marks building
on previous expiration dates if such exists. Instead of having the entire mark database
be a linked list structure, it can be simplified to guarantee the integrity of the
expiration dates by running this whenever;
* new Mark is saved or deleted
* a new MarkUser entry is made
* an existing MarkUser entry is deleted
"""
markusers = MarkUser.objects.filter(user=user).order_by('mark__added_date')
last_expiry_date = None
for entry in markusers:
# If there's a last_expiry date, it means a mark has been processed already.
# If that expiration date is within a DURATION of this added date, build on it.
if last_expiry_date and entry.mark.added_date - timedelta(days=DURATION) < last_expiry_date:
entry.expiration_date = _get_with_duration_and_vacation(last_expiry_date)
# If there is no last_expiry_date or the last expiry date is over a DURATION old
# we add DURATIION days from the added date of the mark.
else:
entry.expiration_date = _get_with_duration_and_vacation(entry.mark.added_date)
entry.save()
last_expiry_date = entry.expiration_date
def _get_with_duration_and_vacation(added_date=timezone.now()):
"""
Checks whether the span of a marks duration needs to have vacation durations added.
"""
if type(added_date) == datetime:
added_date = added_date.date()
# Add the duration
expiry_date = added_date + timedelta(days=DURATION)
# Set up the summer and winter vacations
summer_start_date = date(added_date.year, SUMMER[0][0], SUMMER[0][1])
summer_end_date = date(added_date.year, SUMMER[1][0], SUMMER[1][1])
first_winter_start_date = date(added_date.year, WINTER[0][0], WINTER[0][1])
first_winter_end_date = date(added_date.year + 1, WINTER[1][0], WINTER[1][1])
second_winter_end_date = date(added_date.year, WINTER[1][0], WINTER[1][1])
# If we're in the middle of summer, add the days remaining of summer
if summer_start_date < added_date < summer_end_date:
expiry_date += timedelta(days=(summer_end_date - added_date).days)
# If the number of days between added_date and the beginning of summer vacation is less
# than the duration, we need to add the length of summer to the expiry date
elif 0 < (summer_start_date - added_date).days < DURATION:
expiry_date += timedelta(days=(summer_end_date - summer_start_date).days)
# Same for middle of winter vacation, which will be at the end of the year
elif first_winter_start_date < added_date < first_winter_end_date:
expiry_date += timedelta(days=(first_winter_end_date - added_date).days)
# And for before the vacation
elif 0 < (first_winter_start_date - added_date).days < DURATION:
expiry_date += timedelta(days=(first_winter_end_date - first_winter_start_date).days)
# Then we need to check the edge case where now is between newyears and and of winter vacation
elif second_winter_end_date > added_date:
expiry_date += timedelta(days=(second_winter_end_date - added_date).days)
return expiry_date
class Suspension(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(_('tittel'), max_length=64)
description = models.CharField(_("beskrivelse"), max_length=255)
active = models.BooleanField(default=True)
added_date = models.DateTimeField(auto_now=True, editable=False)
expiration_date = models.DateField(_("utløpsdato"), null=True, blank=True)
# Using id because foreign key to Payment caused circular dependencies
payment_id = models.IntegerField(null=True, blank=True)
def __str__(self):
return "Suspension: " + str(self.user)
# TODO URL
|
{
"content_hash": "2d94d365c6dfe54855b200bc9fa1e587",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 116,
"avg_line_length": 36.588235294117645,
"alnum_prop": 0.6446945337620579,
"repo_name": "dotKom/onlineweb4",
"id": "423eb0fa2765042f549325fdb5400d6fc72d6cef",
"size": "8113",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/marks/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "71414"
},
{
"name": "HTML",
"bytes": "463894"
},
{
"name": "JavaScript",
"bytes": "745404"
},
{
"name": "Python",
"bytes": "925584"
},
{
"name": "Shell",
"bytes": "3130"
},
{
"name": "Standard ML",
"bytes": "1088"
}
],
"symlink_target": ""
}
|
"""
WSGI config for travel_itinerary project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "travel_itinerary.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "ff8d46541beb1003a25d9cc8343b3745",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 29.071428571428573,
"alnum_prop": 0.7788697788697788,
"repo_name": "lwl27/travel_itinerary",
"id": "7685fa7f3a42703fb167483eb04126bca5e17294",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "travel_itinerary/travel_itinerary/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1140"
},
{
"name": "Python",
"bytes": "6380"
}
],
"symlink_target": ""
}
|
class Security:
def __init__(self):
import os
env = os.environ
if env.has_key('PYTHON_KEYFILE'):
keyfile = env['PYTHON_KEYFILE']
else:
keyfile = '.python_keyfile'
if env.has_key('HOME'):
keyfile = os.path.join(env['HOME'], keyfile)
if not os.path.exists(keyfile):
import sys
for dir in sys.path:
kf = os.path.join(dir, keyfile)
if os.path.exists(kf):
keyfile = kf
break
try:
self._key = eval(open(keyfile).readline())
except IOError:
raise IOError, "python keyfile %s: cannot open" % keyfile
def _generate_challenge(self):
import random
return random.randint(100, 100000)
def _compare_challenge_response(self, challenge, response):
return self._encode_challenge(challenge) == response
def _encode_challenge(self, challenge):
p, m = self._key
return pow(long(challenge), p, m)
|
{
"content_hash": "8154e607829b92d81ea3ccf62e9ecb46",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 60,
"avg_line_length": 26,
"alnum_prop": 0.662004662004662,
"repo_name": "OS2World/APP-INTERNET-torpak_2",
"id": "0ffd511a079d5a0895d8207d2e319da1b621b16f",
"size": "858",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Demo/pdist/security.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
class Server(models.Model):
name = models.CharField(
null=True,
blank=True,
max_length=500
)
ip = models.CharField(
null=True,
blank=True,
max_length=500
)
|
{
"content_hash": "8450b862c12a0a574d1d3195c7d90058",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 39,
"avg_line_length": 17.235294117647058,
"alnum_prop": 0.5836177474402731,
"repo_name": "prontodev/stillwithus",
"id": "e81abbb9b1e7d9a34e87bd52a5023781c8e90382",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stillwithus/servers/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1229"
},
{
"name": "Python",
"bytes": "16436"
}
],
"symlink_target": ""
}
|
"""Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
template - the default prefix for all temporary names.
You may change this to control the default prefix.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
try:
from cStringIO import StringIO as _StringIO
except ImportError:
from StringIO import StringIO as _StringIO
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except IOError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import thread as _thread
except ImportError:
import dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises os.error if the
# file doesn't exist.
def _stat(fn):
try:
f = open(fn)
except IOError:
raise _os.error
f.close()
def _exists(fn):
try:
_stat(fn)
except _os.error:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = ("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"0123456789_")
def __init__(self):
self.mutex = _allocate_lock()
self.normcase = _os.path.normcase
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def next(self):
m = self.mutex
c = self.characters
choose = self.rng.choice
m.acquire()
try:
letters = [choose(c) for dummy in "123456"]
finally:
m.release()
return self.normcase(''.join(letters))
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'riscos':
dirname = _os.getenv('Wimp$ScrapDir')
if dirname: dirlist.append(dirname)
elif _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, _os.error):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
flags = _text_openflags
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in xrange(100):
name = namer.next()
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, flags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except (OSError, IOError) as e:
if e.args[0] != _errno.EEXIST:
break # no point trying more names in this directory
pass
raise IOError, (_errno.ENOENT,
("No usable temporary directory found in %s" % dirlist))
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
if _os.name == 'nt' and e.errno == _errno.EACCES:
# On windows, when a directory with the chosen name already
# exists, EACCES error code is returned instead of EEXIST.
continue
raise
raise IOError, (_errno.EEXIST, "No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0700)
return file
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
raise
raise IOError, (_errno.EEXIST, "No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise IOError, (_errno.EEXIST, "No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not issubclass(type(a), type(0)):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="",
prefix=template, dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to os.fdopen (default "w+b").
'bufsize' -- the buffer size argument to os.fdopen (default -1).
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
if 'b' in mode:
flags = _bin_openflags
else:
flags = _text_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
file = _os.fdopen(fd, mode, bufsize)
return _TemporaryFileWrapper(file, name, delete)
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', bufsize=-1, suffix="",
prefix=template, dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to os.fdopen (default "w+b").
'bufsize' -- the buffer size argument to os.fdopen (default -1).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
if 'b' in mode:
flags = _bin_openflags
else:
flags = _text_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _os.fdopen(fd, mode, bufsize)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from
StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', bufsize=-1,
suffix="", prefix=template, dir=None):
self._file = _StringIO()
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = (mode, bufsize, suffix, prefix, dir)
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(*self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# _StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs[0]
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
def next(self):
return self._file.next
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self):
self._file.truncate()
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
def xreadlines(self, *args):
if hasattr(self._file, 'xreadlines'): # real file
return iter(self._file)
else: # StringIO()
return iter(self._file.readlines(*args))
|
{
"content_hash": "fa78390c30514c6ae96c878deab6e8a1",
"timestamp": "",
"source": "github",
"line_count": 622,
"max_line_length": 78,
"avg_line_length": 30.909967845659164,
"alnum_prop": 0.579735774472069,
"repo_name": "jt6562/XX-Net",
"id": "e6843faee887b00d2a81304a28c4a573d9fb080a",
"size": "19226",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python27/1.0/lib/tempfile.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "132"
},
{
"name": "CSS",
"bytes": "85439"
},
{
"name": "HTML",
"bytes": "136897"
},
{
"name": "JavaScript",
"bytes": "345998"
},
{
"name": "PHP",
"bytes": "10671"
},
{
"name": "Python",
"bytes": "6958955"
},
{
"name": "Shell",
"bytes": "4007"
},
{
"name": "Visual Basic",
"bytes": "388"
}
],
"symlink_target": ""
}
|
import sys
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'source_file',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin
)
parser.add_argument(
'-v',
'--version',
action='version',
version='%(prog)s 1.3'
)
parser.add_argument(
'-f',
dest='target_file',
type=argparse.FileType('w'),
help='output file',
# required=True,
default=sys.stdout
)
args = parser.parse_args()
print(args)
# parser.print_help()
if __name__ == '__main__':
main()
|
{
"content_hash": "00f048f06fb3361a4b72e149e4ffec03",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 38,
"avg_line_length": 17.45945945945946,
"alnum_prop": 0.5108359133126935,
"repo_name": "wangyanxi/web-demos",
"id": "5def48835066ca7a4daac1df86bd7fdb77dc15df",
"size": "694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/arg/arg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44475"
},
{
"name": "HTML",
"bytes": "30795"
},
{
"name": "JavaScript",
"bytes": "86662"
},
{
"name": "Objective-C",
"bytes": "2160"
},
{
"name": "PHP",
"bytes": "2543"
},
{
"name": "Python",
"bytes": "2242"
},
{
"name": "Shell",
"bytes": "224"
}
],
"symlink_target": ""
}
|
'''
@author: Frank
'''
import unittest
from kvmagent import kvmagent
from kvmagent.plugins import vm_plugin
class Test(unittest.TestCase):
def testName(self):
cmd = vm_plugin.StartVmCmd()
cmd.bootDev = 'hd'
cmd.cpuNum = 2
cmd.cpuSpeed = 2600
cmd.dataVolumePath = ['/tmp/data.qcow2']
cmd.rootVolumePath = '/tmp/root.qcow2'
cmd.memory = 2048
cmd.nics = []
nic1 = vm_plugin.NicTO()
nic1.mac = 'xxxxxxxx'
nic1.bridgeName = 'br0'
nic1.deviceId = 0
cmd.nics.append(nic1)
cmd.vmName = 'test'
cmd.vmUuid = 'uuid'
vm = vm_plugin.Vm()
vm.boot_dev = cmd.bootDev
vm.cpu_num = cmd.cpuNum
vm.cpu_speed = cmd.cpuSpeed
vm.memory = cmd.memory
vm.name = cmd.vmName
vm.uuid = cmd.vmUuid
vm.nics = cmd.nics
vm.root_volume = cmd.rootVolumePath
vm.data_volumes = cmd.dataVolumePath
vm.qemu_args = ['-append', 'mgmtNicIp=192.168.0.216', 'mgmtNicNetmask=255.255.255.0']
print vm.to_xml(True)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
{
"content_hash": "ee5bd1f0fe85a26f181061520d9e297c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 93,
"avg_line_length": 26.065217391304348,
"alnum_prop": 0.5654712260216848,
"repo_name": "mrwangxc/zstack-utility",
"id": "7f56277a65f5ace26babc5b36131541277bd0883",
"size": "1199",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "kvmagent/kvmagent/test/test_vm_xml.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "4445"
},
{
"name": "Pascal",
"bytes": "187"
},
{
"name": "Puppet",
"bytes": "10417"
},
{
"name": "Python",
"bytes": "2222290"
},
{
"name": "Shell",
"bytes": "234492"
}
],
"symlink_target": ""
}
|
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
{
"content_hash": "64cdbaf6ac9ec0b5e328c7dcbe445414",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 26.5,
"alnum_prop": 0.6792452830188679,
"repo_name": "aniketpuranik/pynet_test",
"id": "d97c024c5baa8c01debb3571eec83fd132922861",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applied_python/bin/django-admin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "Python",
"bytes": "604699"
},
{
"name": "Shell",
"bytes": "3745"
}
],
"symlink_target": ""
}
|
"""
RimuHosting Driver
"""
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.common.types import InvalidCredsError
from libcloud.compute.types import Provider, NodeState
from libcloud.compute.base import NodeDriver, NodeSize, Node, NodeLocation
from libcloud.compute.base import NodeImage
API_CONTEXT = '/r'
API_HOST = 'rimuhosting.com'
class RimuHostingException(Exception):
"""
Exception class for RimuHosting driver
"""
def __str__(self):
return self.args[0]
def __repr__(self):
return "<RimuHostingException '%s'>" % (self.args[0])
class RimuHostingResponse(JsonResponse):
"""
Response Class for RimuHosting driver
"""
def success(self):
if self.status == 403:
raise InvalidCredsError()
return True
def parse_body(self):
try:
js = super(RimuHostingResponse, self).parse_body()
keys = list(js.keys())
if js[keys[0]]['response_type'] == "ERROR":
raise RimuHostingException(
js[keys[0]]['human_readable_message']
)
return js[keys[0]]
except KeyError:
raise RimuHostingException('Could not parse body: %s'
% (self.body))
class RimuHostingConnection(ConnectionKey):
"""
Connection class for the RimuHosting driver
"""
api_context = API_CONTEXT
host = API_HOST
port = 443
responseCls = RimuHostingResponse
def __init__(self, key, secure=True):
# override __init__ so that we can set secure of False for testing
ConnectionKey.__init__(self, key, secure)
def add_default_headers(self, headers):
# We want JSON back from the server. Could be application/xml
# (but JSON is better).
headers['Accept'] = 'application/json'
# Must encode all data as json, or override this header.
headers['Content-Type'] = 'application/json'
headers['Authorization'] = 'rimuhosting apikey=%s' % (self.key)
return headers
def request(self, action, params=None, data='', headers=None,
method='GET'):
if not headers:
headers = {}
if not params:
params = {}
# Override this method to prepend the api_context
return ConnectionKey.request(self, self.api_context + action,
params, data, headers, method)
class RimuHostingNodeDriver(NodeDriver):
"""
RimuHosting node driver
"""
type = Provider.RIMUHOSTING
name = 'RimuHosting'
website = 'http://rimuhosting.com/'
connectionCls = RimuHostingConnection
features = {'create_node': ['password']}
def __init__(self, key, host=API_HOST, port=443,
api_context=API_CONTEXT, secure=True):
"""
:param key: API key (required)
:type key: ``str``
:param host: hostname for connection
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:param api_context: Optional API context.
:type api_context: ``str``
:param secure: Weither to use HTTPS or HTTP.
:type secure: ``bool``
:rtype: ``None``
"""
# Pass in some extra vars so that
self.key = key
self.secure = secure
self.connection = self.connectionCls(key, secure)
self.connection.host = host
self.connection.api_context = api_context
self.connection.port = port
self.connection.driver = self
self.connection.connect()
def _order_uri(self, node, resource):
# Returns the order uri with its resourse appended.
return "/orders/%s/%s" % (node.id, resource)
# TODO: Get the node state.
def _to_node(self, order):
n = Node(id=order['slug'],
name=order['domain_name'],
state=NodeState.RUNNING,
public_ips=(
[order['allocated_ips']['primary_ip']]
+ order['allocated_ips']['secondary_ips']),
private_ips=[],
driver=self.connection.driver,
extra={
'order_oid': order['order_oid'],
'monthly_recurring_fee': order.get(
'billing_info').get('monthly_recurring_fee')})
return n
def _to_size(self, plan):
return NodeSize(
id=plan['pricing_plan_code'],
name=plan['pricing_plan_description'],
ram=plan['minimum_memory_mb'],
disk=plan['minimum_disk_gb'],
bandwidth=plan['minimum_data_transfer_allowance_gb'],
price=plan['monthly_recurring_amt']['amt_usd'],
driver=self.connection.driver
)
def _to_image(self, image):
return NodeImage(id=image['distro_code'],
name=image['distro_description'],
driver=self.connection.driver)
def list_sizes(self, location=None):
# Returns a list of sizes (aka plans)
# Get plans. Note this is really just for libcloud.
# We are happy with any size.
if location is None:
location = ''
else:
location = ";dc_location=%s" % (location.id)
res = self.connection.request(
'/pricing-plans;server-type=VPS%s' % (location)).object
return list(map(lambda x: self._to_size(x), res['pricing_plan_infos']))
def list_nodes(self):
# Returns a list of Nodes
# Will only include active ones.
res = self.connection.request('/orders;include_inactive=N').object
return list(map(lambda x: self._to_node(x), res['about_orders']))
def list_images(self, location=None):
# Get all base images.
# TODO: add other image sources. (Such as a backup of a VPS)
# All Images are available for use at all locations
res = self.connection.request('/distributions').object
return list(map(lambda x: self._to_image(x), res['distro_infos']))
def reboot_node(self, node):
# Reboot
# PUT the state of RESTARTING to restart a VPS.
# All data is encoded as JSON
data = {'reboot_request': {'running_state': 'RESTARTING'}}
uri = self._order_uri(node, 'vps/running-state')
self.connection.request(uri, data=json.dumps(data), method='PUT')
# XXX check that the response was actually successful
return True
def destroy_node(self, node):
# Shutdown a VPS.
uri = self._order_uri(node, 'vps')
self.connection.request(uri, method='DELETE')
# XXX check that the response was actually successful
return True
def create_node(self, **kwargs):
"""Creates a RimuHosting instance
@inherits: :class:`NodeDriver.create_node`
:keyword name: Must be a FQDN. e.g example.com.
:type name: ``str``
:keyword ex_billing_oid: If not set,
a billing method is automatically picked.
:type ex_billing_oid: ``str``
:keyword ex_host_server_oid: The host server to set the VPS up on.
:type ex_host_server_oid: ``str``
:keyword ex_vps_order_oid_to_clone: Clone another VPS to use as
the image for the new VPS.
:type ex_vps_order_oid_to_clone: ``str``
:keyword ex_num_ips: Number of IPs to allocate. Defaults to 1.
:type ex_num_ips: ``int``
:keyword ex_extra_ip_reason: Reason for needing the extra IPs.
:type ex_extra_ip_reason: ``str``
:keyword ex_memory_mb: Memory to allocate to the VPS.
:type ex_memory_mb: ``int``
:keyword ex_disk_space_mb: Diskspace to allocate to the VPS.
Defaults to 4096 (4GB).
:type ex_disk_space_mb: ``int``
:keyword ex_disk_space_2_mb: Secondary disk size allocation.
Disabled by default.
:type ex_disk_space_2_mb: ``int``
:keyword ex_control_panel: Control panel to install on the VPS.
:type ex_control_panel: ``str``
"""
# Note we don't do much error checking in this because we
# expect the API to error out if there is a problem.
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
data = {
'instantiation_options': {
'domain_name': name,
'distro': image.id
},
'pricing_plan_code': size.id,
'vps_parameters': {}
}
if 'ex_control_panel' in kwargs:
data['instantiation_options']['control_panel'] = \
kwargs['ex_control_panel']
auth = self._get_and_check_auth(kwargs.get('auth'))
data['instantiation_options']['password'] = auth.password
if 'ex_billing_oid' in kwargs:
# TODO check for valid oid.
data['billing_oid'] = kwargs['ex_billing_oid']
if 'ex_host_server_oid' in kwargs:
data['host_server_oid'] = kwargs['ex_host_server_oid']
if 'ex_vps_order_oid_to_clone' in kwargs:
data['vps_order_oid_to_clone'] = \
kwargs['ex_vps_order_oid_to_clone']
if 'ex_num_ips' in kwargs and int(kwargs['ex_num_ips']) > 1:
if 'ex_extra_ip_reason' not in kwargs:
raise RimuHostingException(
'Need an reason for having an extra IP')
else:
if 'ip_request' not in data:
data['ip_request'] = {}
data['ip_request']['num_ips'] = int(kwargs['ex_num_ips'])
data['ip_request']['extra_ip_reason'] = \
kwargs['ex_extra_ip_reason']
if 'ex_memory_mb' in kwargs:
data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb']
if 'ex_disk_space_mb' in kwargs:
data['vps_parameters']['disk_space_mb'] = \
kwargs['ex_disk_space_mb']
if 'ex_disk_space_2_mb' in kwargs:
data['vps_parameters']['disk_space_2_mb'] =\
kwargs['ex_disk_space_2_mb']
# Don't send empty 'vps_parameters' attribute
if not data['vps_parameters']:
del data['vps_parameters']
res = self.connection.request(
'/orders/new-vps',
method='POST',
data=json.dumps({"new-vps": data})
).object
node = self._to_node(res['about_order'])
node.extra['password'] = \
res['new_order_request']['instantiation_options']['password']
return node
def list_locations(self):
return [
NodeLocation('DCAUCKLAND', "RimuHosting Auckland", 'NZ', self),
NodeLocation('DCDALLAS', "RimuHosting Dallas", 'US', self),
NodeLocation('DCLONDON', "RimuHosting London", 'GB', self),
NodeLocation('DCSYDNEY', "RimuHosting Sydney", 'AU', self),
]
|
{
"content_hash": "437f1b051788063e302be0eae0b7684a",
"timestamp": "",
"source": "github",
"line_count": 323,
"max_line_length": 79,
"avg_line_length": 35.11145510835913,
"alnum_prop": 0.5570055550656908,
"repo_name": "kun--hust/libcloud_with_cn",
"id": "acde57433680b2cece3cae06848062405a86c504",
"size": "12122",
"binary": false,
"copies": "5",
"ref": "refs/heads/development",
"path": "libcloud/compute/drivers/rimuhosting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "Python",
"bytes": "3236824"
},
{
"name": "Shell",
"bytes": "12584"
}
],
"symlink_target": ""
}
|
import os,sys
import time
home_dir =os.getenv("DRC_BASE")
sys.path.append(home_dir + "/software/build/lib/python2.7/site-packages")
sys.path.append(home_dir + "/software/build/lib/python2.7/dist-packages")
import math
import numpy as np
import lcm
from bot_core.pose_t import pose_t
import time
import drc_utils as bot
pitch = 5.0
yaw = 10.0
pitch_rad = pitch*np.pi/180.0
yaw_rad =yaw*np.pi/180.0
lc = lcm.LCM()
print "Send DESIRED_HEAD_ORIENTATION..."
print pitch , pitch_rad
print yaw , yaw_rad
msg = pose_t();
msg.utime = bot.timestamp_now()
msg.orientation = bot.euler_to_quat([0, pitch_rad, yaw_rad ])
lc.publish("DESIRED_HEAD_ORIENTATION", msg.encode())
|
{
"content_hash": "c2de979247012dd34eed0ba62668fa51",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 73,
"avg_line_length": 23.785714285714285,
"alnum_prop": 0.7222222222222222,
"repo_name": "openhumanoids/oh-distro",
"id": "b681f73ddc038025cb266d6c5c828c312676c7b5",
"size": "684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "software/utils/drc_utils/python/send_head_command.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "131738"
},
{
"name": "C++",
"bytes": "2773796"
},
{
"name": "CMake",
"bytes": "1099155"
},
{
"name": "GLSL",
"bytes": "5320"
},
{
"name": "Java",
"bytes": "233603"
},
{
"name": "JavaScript",
"bytes": "232"
},
{
"name": "M",
"bytes": "3971"
},
{
"name": "Makefile",
"bytes": "82095"
},
{
"name": "Matlab",
"bytes": "1946915"
},
{
"name": "Mercury",
"bytes": "1487"
},
{
"name": "Objective-C",
"bytes": "10657"
},
{
"name": "Pascal",
"bytes": "3353"
},
{
"name": "Perl",
"bytes": "18915"
},
{
"name": "Python",
"bytes": "378988"
},
{
"name": "Shell",
"bytes": "35631"
},
{
"name": "XSLT",
"bytes": "73426"
}
],
"symlink_target": ""
}
|
import collections
import io
import sys
import unittest
def orderly(words):
unique = []
counted = collections.Counter()
for word in words:
if word not in counted:
unique.append(word)
counted[word] += 1
return unique, counted
def main():
n = int(input().strip())
words = []
for _ in range(n):
words.append(input().strip())
unique, counted = orderly(words)
print(len(unique))
print(' '.join([str(counted[word]) for word in unique]))
if __name__ == '__main__': # pragma: no cover
main()
class TestCode(unittest.TestCase):
def generalized_test(self, which):
sys.stdin = open(__file__.replace('.py', f'.{which}.in'), 'r')
sys.stdout = io.StringIO()
expected = open(__file__.replace('.py', f'.{which}.out'), 'r')
main()
self.assertEqual(sys.stdout.getvalue(), expected.read())
for handle in [sys.stdin, sys.stdout, expected]:
handle.close()
def test_0(self):
self.generalized_test('0')
|
{
"content_hash": "d43d93346a6af23b1ccac0b6d193431d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 70,
"avg_line_length": 24.904761904761905,
"alnum_prop": 0.5793499043977055,
"repo_name": "altermarkive/Coding-Interviews",
"id": "b142b4d9e839d76741f6f4c3ed379fadb89ab890",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "algorithm-design/hackerrank/word_order/test_word_order.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""Package for managing security groups in AWS"""
from .create_securitygroup import SpinnakerSecurityGroup
from .destroy_sg import *
|
{
"content_hash": "aa3d021f493c7e02b7f7954198986d1e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 56,
"avg_line_length": 44.333333333333336,
"alnum_prop": 0.8045112781954887,
"repo_name": "gogoair/foremast",
"id": "12d5107d879b1367c8b66a28bc898f7567cd5335",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/foremast/securitygroup/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7614"
},
{
"name": "Python",
"bytes": "484364"
},
{
"name": "Shell",
"bytes": "180"
}
],
"symlink_target": ""
}
|
import csv
import sys
import codecs
import requests
from bs4 import BeautifulSoup
import os
import time
def UnicodeDictReader():
with open('Artworks.csv', 'rb') as csvfile:
csv_reader = csv.DictReader(csvfile)
for row in csv_reader:
yield dict([(key, unicode(value, 'utf-8')) for key, value in row.iteritems()])
for m in UnicodeDictReader():
url = m['URL']
oid = m['ObjectID']
fname = os.path.join('extras', oid + '.txt')
if url and not os.path.isfile(fname):
# time.sleep(1)
r = requests.get(url)
soup = BeautifulSoup(r.text)
div = soup.find("div", {"class": "body-copy"})
if div:
txt = div.getText().encode('utf-8').strip()
if txt and len(txt) > 200 and not txt.startswith('In order to effectively'):
with open(fname, "w") as tf:
tf.write(txt)
print 'writing', fname, len(txt), 'bytes'
|
{
"content_hash": "5089071204d71d38609aed4822758092",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 84,
"avg_line_length": 27.40625,
"alnum_prop": 0.6305587229190421,
"repo_name": "darenr/art-dataset-nlp-experiments",
"id": "36a4a088babe0e8f246a84b9c02f27d01f269c62",
"size": "877",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moma/scrape-extra-text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7107"
},
{
"name": "JavaScript",
"bytes": "12780"
},
{
"name": "Jupyter Notebook",
"bytes": "249589"
},
{
"name": "Python",
"bytes": "45328"
},
{
"name": "Shell",
"bytes": "970"
}
],
"symlink_target": ""
}
|
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._model_containers_operations import build_create_or_update_request_initial, build_delete_request, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ModelContainersOperations:
"""ModelContainersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
skiptoken: Optional[str] = None,
list_view_type: Optional[Union[str, "_models.ListViewType"]] = None,
**kwargs: Any
) -> AsyncIterable["_models.ModelContainerResourceArmPaginatedResult"]:
"""List model containers.
List model containers.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry.
:type registry_name: str
:param skiptoken: Continuation token for pagination.
:type skiptoken: str
:param list_view_type: View type for including/excluding (for example) archived entities.
:type list_view_type: str or ~azure.mgmt.machinelearningservices.models.ListViewType
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ModelContainerResourceArmPaginatedResult or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ModelContainerResourceArmPaginatedResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ModelContainerResourceArmPaginatedResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
skiptoken=skiptoken,
list_view_type=list_view_type,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
skiptoken=skiptoken,
list_view_type=list_view_type,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ModelContainerResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models'} # type: ignore
@distributed_trace_async
async def delete(
self,
name: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> None:
"""Delete container.
Delete container.
:param name: Container name.
:type name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
name=name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}'} # type: ignore
@distributed_trace_async
async def get(
self,
name: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> "_models.ModelContainerData":
"""Get container.
Get container.
:param name: Container name. This is case-sensitive.
:type name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ModelContainerData, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.ModelContainerData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ModelContainerData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
name=name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ModelContainerData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}'} # type: ignore
async def _create_or_update_initial(
self,
name: str,
resource_group_name: str,
registry_name: str,
body: "_models.ModelContainerData",
**kwargs: Any
) -> "_models.ModelContainerData":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ModelContainerData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'ModelContainerData')
request = build_create_or_update_request_initial(
name=name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['x-ms-async-operation-timeout']=self._deserialize('duration', response.headers.get('x-ms-async-operation-timeout'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('ModelContainerData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
name: str,
resource_group_name: str,
registry_name: str,
body: "_models.ModelContainerData",
**kwargs: Any
) -> AsyncLROPoller["_models.ModelContainerData"]:
"""Create or update model container.
Create or update model container.
:param name: Container name.
:type name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param registry_name: Name of Azure Machine Learning registry.
:type registry_name: str
:param body: Container entity to create or update.
:type body: ~azure.mgmt.machinelearningservices.models.ModelContainerData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ModelContainerData or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.ModelContainerData]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ModelContainerData"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
name=name,
resource_group_name=resource_group_name,
registry_name=registry_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response_headers = {}
response = pipeline_response.http_response
response_headers['x-ms-async-operation-timeout']=self._deserialize('duration', response.headers.get('x-ms-async-operation-timeout'))
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('ModelContainerData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/registries/{registryName}/models/{name}'} # type: ignore
|
{
"content_hash": "b1361edee269dc052d31c075fe72e9c6",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 218,
"avg_line_length": 45.55733333333333,
"alnum_prop": 0.657106064153594,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ba6078759086a5779262d8b7fab37f8845e3a712",
"size": "17551",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2021_10_01_dataplanepreview/aio/operations/_model_containers_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Setup for data structures."""
from setuptools import setup
extra_packages = {
'testing': ['ipython', 'pytest', 'pytest-watch', 'pytest-cov', 'tox']
}
setup(
name='Data structures',
description='Python implementation of various data structures.',
version='0.1',
author='Sean Beseler, Morgan Nomura',
author_email='seanwbeseler@gmail.com, morganelle@gmail.com',
license='MIT',
py_modules=['bst'],
package_dir={'': 'src'},
install_requires=[],
extras_require=extra_packages,
entry_points={}
)
|
{
"content_hash": "4ea6b46a5d54e926df21d12d46109b8e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 27.15,
"alnum_prop": 0.6519337016574586,
"repo_name": "morganelle/data-structures",
"id": "5cd7e9640e6a9a6cf0093d7fd743ccd2a34ad508",
"size": "543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24426"
}
],
"symlink_target": ""
}
|
"""
homeassistant.components.device_tracker.ubus
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a OpenWRT router for device
presence.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.ubus/
"""
import logging
import json
from datetime import timedelta
import re
import threading
import requests
from homeassistant.const import CONF_HOST, CONF_USERNAME, CONF_PASSWORD
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
from homeassistant.components.device_tracker import DOMAIN
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
def get_scanner(hass, config):
""" Validates config and returns a Luci scanner. """
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return None
scanner = UbusDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
# pylint: disable=too-many-instance-attributes
class UbusDeviceScanner(object):
"""
This class queries a wireless router running OpenWrt firmware
for connected devices. Adapted from Tomato scanner.
Configure your routers' ubus ACL based on following instructions:
http://wiki.openwrt.org/doc/techref/ubus
Read only access will be fine.
To use this class you have to install rpcd-mod-file package
in your OpenWrt router:
opkg install rpcd-mod-file
"""
def __init__(self, config):
host = config[CONF_HOST]
username, password = config[CONF_USERNAME], config[CONF_PASSWORD]
self.parse_api_pattern = re.compile(r"(?P<param>\w*) = (?P<value>.*);")
self.lock = threading.Lock()
self.last_results = {}
self.url = 'http://{}/ubus'.format(host)
self.session_id = _get_session_id(self.url, username, password)
self.hostapd = []
self.leasefile = None
self.mac2name = None
self.success_init = self.session_id is not None
def scan_devices(self):
"""
Scans for new devices and return a list containing found device ids.
"""
self._update_info()
return self.last_results
def get_device_name(self, device):
""" Returns the name of the given device or None if we don't know. """
with self.lock:
if self.leasefile is None:
result = _req_json_rpc(self.url, self.session_id,
'call', 'uci', 'get',
config="dhcp", type="dnsmasq")
if result:
values = result["values"].values()
self.leasefile = next(iter(values))["leasefile"]
else:
return
if self.mac2name is None:
result = _req_json_rpc(self.url, self.session_id,
'call', 'file', 'read',
path=self.leasefile)
if result:
self.mac2name = dict()
for line in result["data"].splitlines():
hosts = line.split(" ")
self.mac2name[hosts[1].upper()] = hosts[3]
else:
# Error, handled in the _req_json_rpc
return
return self.mac2name.get(device.upper(), None)
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the Luci router is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
with self.lock:
_LOGGER.info("Checking ARP")
if not self.hostapd:
hostapd = _req_json_rpc(self.url, self.session_id,
'list', 'hostapd.*', '')
self.hostapd.extend(hostapd.keys())
self.last_results = []
results = 0
for hostapd in self.hostapd:
result = _req_json_rpc(self.url, self.session_id,
'call', hostapd, 'get_clients')
if result:
results = results + 1
self.last_results.extend(result['clients'].keys())
return bool(results)
def _req_json_rpc(url, session_id, rpcmethod, subsystem, method, **params):
""" Perform one JSON RPC operation. """
data = json.dumps({"jsonrpc": "2.0",
"id": 1,
"method": rpcmethod,
"params": [session_id,
subsystem,
method,
params]})
try:
res = requests.post(url, data=data, timeout=5)
except requests.exceptions.Timeout:
return
if res.status_code == 200:
response = res.json()
if rpcmethod == "call":
return response["result"][1]
else:
return response["result"]
def _get_session_id(url, username, password):
""" Get authentication token for the given host+username+password. """
res = _req_json_rpc(url, "00000000000000000000000000000000", 'call',
'session', 'login', username=username,
password=password)
return res["ubus_rpc_session"]
|
{
"content_hash": "e6e52269909156ef1b2055e8ba322cd5",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 79,
"avg_line_length": 32.641618497109825,
"alnum_prop": 0.5519744997343722,
"repo_name": "caiuspb/home-assistant",
"id": "0355680a31da87e0b4759c42ee1dac15951ad150",
"size": "5647",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/device_tracker/ubus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1328099"
},
{
"name": "Python",
"bytes": "1268986"
},
{
"name": "Shell",
"bytes": "4079"
}
],
"symlink_target": ""
}
|
from django.db import models, migrations
from decimal import Decimal
import dezede.models
import django.db.models.deletion
from django.conf import settings
import image_cropping.fields
import libretto.models.base
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
('libretto', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Diapositive',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField(verbose_name='identifiant de l\u2019objet li\xe9')),
('title', models.CharField(max_length=70, verbose_name='titre')),
('subtitle', models.CharField(max_length=100, verbose_name='sous-titre', blank=True)),
('text_align', models.CharField(default='text-left', max_length=11, verbose_name='alignement du texte', choices=[('text-left', 'Gauche'), ('text-center', 'Centre'), ('text-right', 'Droite')])),
('text_background', models.BooleanField(default=False, help_text='Ajoute un cadre semi-transparent derri\xe8re le texte pour faciliter la lecture.', verbose_name='cadre derri\xe8re le texte')),
('image', models.ImageField(upload_to='accueil', verbose_name='image')),
('cropping', image_cropping.fields.ImageRatioField('image', '450x450', hide_image_field=False, size_warning=True, allow_fullsize=False, free_crop=True, adapt_rotation=False, help_text=None, verbose_name='d\xe9coupage de l\u2019image')),
('image_align', models.CharField(default='text-right', max_length=11, verbose_name='alignement de l\u2019image', choices=[('text-left', 'Gauche'), ('text-center', 'Centre'), ('text-right', 'Droite')])),
('opacity', models.DecimalField(default=0.6, verbose_name='opacit\xe9', max_digits=2, decimal_places=1, choices=[(Decimal('1.0'), 'Opaque'), (Decimal('0.9'), '90 %'), (Decimal('0.8'), '80 %'), (Decimal('0.7'), '70 %'), (Decimal('0.6'), '60 %'), (Decimal('0.5'), '50 %'), (Decimal('0.4'), '40 %'), (Decimal('0.3'), '30 %'), (Decimal('0.2'), '20 %'), (Decimal('0.1'), '10 %')])),
('position', models.PositiveSmallIntegerField(default=1, verbose_name='position')),
('content_type', models.ForeignKey(verbose_name='type d\u2019objet li\xe9', to='contenttypes.ContentType', on_delete=django.db.models.CASCADE)),
('etat', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=libretto.models.base._get_default_etat, verbose_name='\xe9tat', to='libretto.Etat')),
('owner', models.ForeignKey(related_name='diapositive', on_delete=django.db.models.deletion.PROTECT, verbose_name='propri\xe9taire', blank=True, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('position',),
'verbose_name': 'diapositive',
'verbose_name_plural': 'diapositives',
},
bases=(models.Model,),
),
]
|
{
"content_hash": "da6125f5dee8aabb860c856663f985b9",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 393,
"avg_line_length": 73.06818181818181,
"alnum_prop": 0.6289269051321928,
"repo_name": "dezede/dezede",
"id": "5da71efba64085dc9245bdcf560a306996675448",
"size": "3215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dezede/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10100"
},
{
"name": "HTML",
"bytes": "205803"
},
{
"name": "JavaScript",
"bytes": "53836"
},
{
"name": "Less",
"bytes": "21716"
},
{
"name": "Python",
"bytes": "818952"
},
{
"name": "Shell",
"bytes": "433"
},
{
"name": "TeX",
"bytes": "5922"
}
],
"symlink_target": ""
}
|
"""Class of Augeas Configurators."""
import logging
import augeas
from letsencrypt import errors
from letsencrypt import reverter
from letsencrypt.plugins import common
from letsencrypt_apache import constants
logger = logging.getLogger(__name__)
class AugeasConfigurator(common.Plugin):
"""Base Augeas Configurator class.
:ivar config: Configuration.
:type config: :class:`~letsencrypt.interfaces.IConfig`
:ivar aug: Augeas object
:type aug: :class:`augeas.Augeas`
:ivar str save_notes: Human-readable configuration change notes
:ivar reverter: saves and reverts checkpoints
:type reverter: :class:`letsencrypt.reverter.Reverter`
"""
def __init__(self, *args, **kwargs):
super(AugeasConfigurator, self).__init__(*args, **kwargs)
self.aug = augeas.Augeas(
# specify a directory to load our preferred lens from
loadpath=constants.AUGEAS_LENS_DIR,
# Do not save backup (we do it ourselves), do not load
# anything by default
flags=(augeas.Augeas.NONE | augeas.Augeas.NO_MODL_AUTOLOAD))
self.save_notes = ""
# See if any temporary changes need to be recovered
# This needs to occur before VirtualHost objects are setup...
# because this will change the underlying configuration and potential
# vhosts
self.reverter = reverter.Reverter(self.config)
self.recovery_routine()
def check_parsing_errors(self, lens):
"""Verify Augeas can parse all of the lens files.
:param str lens: lens to check for errors
:raises .errors.PluginError: If there has been an error in parsing with
the specified lens.
"""
error_files = self.aug.match("/augeas//error")
for path in error_files:
# Check to see if it was an error resulting from the use of
# the httpd lens
lens_path = self.aug.get(path + "/lens")
# As aug.get may return null
if lens_path and lens in lens_path:
msg = (
"There has been an error in parsing the file (%s): %s",
# Strip off /augeas/files and /error
path[13:len(path) - 6], self.aug.get(path + "/message"))
raise errors.PluginError(msg)
# TODO: Cleanup this function
def save(self, title=None, temporary=False):
"""Saves all changes to the configuration files.
This function first checks for save errors, if none are found,
all configuration changes made will be saved. According to the
function parameters. If an exception is raised, a new checkpoint
was not created.
:param str title: The title of the save. If a title is given, the
configuration will be saved as a new checkpoint and put in a
timestamped directory.
:param bool temporary: Indicates whether the changes made will
be quickly reversed in the future (ie. challenges)
:raises .errors.PluginError: If there was an error in Augeas, in
an attempt to save the configuration, or an error creating a
checkpoint
"""
save_state = self.aug.get("/augeas/save")
self.aug.set("/augeas/save", "noop")
# Existing Errors
ex_errs = self.aug.match("/augeas//error")
try:
# This is a noop save
self.aug.save()
except (RuntimeError, IOError):
self._log_save_errors(ex_errs)
# Erase Save Notes
self.save_notes = ""
raise errors.PluginError(
"Error saving files, check logs for more info.")
# Retrieve list of modified files
# Note: Noop saves can cause the file to be listed twice, I used a
# set to remove this possibility. This is a known augeas 0.10 error.
save_paths = self.aug.match("/augeas/events/saved")
# If the augeas tree didn't change, no files were saved and a backup
# should not be created
if save_paths:
save_files = set()
for path in save_paths:
save_files.add(self.aug.get(path)[6:])
try:
# Create Checkpoint
if temporary:
self.reverter.add_to_temp_checkpoint(
save_files, self.save_notes)
else:
self.reverter.add_to_checkpoint(save_files,
self.save_notes)
except errors.ReverterError as err:
raise errors.PluginError(str(err))
self.aug.set("/augeas/save", save_state)
self.save_notes = ""
self.aug.save()
if title and not temporary:
try:
self.reverter.finalize_checkpoint(title)
except errors.ReverterError as err:
raise errors.PluginError(str(err))
def _log_save_errors(self, ex_errs):
"""Log errors due to bad Augeas save.
:param list ex_errs: Existing errors before save
"""
# Check for the root of save problems
new_errs = self.aug.match("/augeas//error")
# logger.error("During Save - %s", mod_conf)
logger.error("Unable to save files: %s. Attempted Save Notes: %s",
", ".join(err[13:len(err) - 6] for err in new_errs
# Only new errors caused by recent save
if err not in ex_errs), self.save_notes)
# Wrapper functions for Reverter class
def recovery_routine(self):
"""Revert all previously modified files.
Reverts all modified files that have not been saved as a checkpoint
:raises .errors.PluginError: If unable to recover the configuration
"""
try:
self.reverter.recovery_routine()
except errors.ReverterError as err:
raise errors.PluginError(str(err))
# Need to reload configuration after these changes take effect
self.aug.load()
def revert_challenge_config(self):
"""Used to cleanup challenge configurations.
:raises .errors.PluginError: If unable to revert the challenge config.
"""
try:
self.reverter.revert_temporary_config()
except errors.ReverterError as err:
raise errors.PluginError(str(err))
self.aug.load()
def rollback_checkpoints(self, rollback=1):
"""Rollback saved checkpoints.
:param int rollback: Number of checkpoints to revert
:raises .errors.PluginError: If there is a problem with the input or
the function is unable to correctly revert the configuration
"""
try:
self.reverter.rollback_checkpoints(rollback)
except errors.ReverterError as err:
raise errors.PluginError(str(err))
self.aug.load()
def view_config_changes(self):
"""Show all of the configuration changes that have taken place.
:raises .errors.PluginError: If there is a problem while processing
the checkpoints directories.
"""
try:
self.reverter.view_config_changes()
except errors.ReverterError as err:
raise errors.PluginError(str(err))
|
{
"content_hash": "7156c0cde456528dceae02a1b2da795d",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 79,
"avg_line_length": 36.03414634146341,
"alnum_prop": 0.6005144172194395,
"repo_name": "mitnk/letsencrypt",
"id": "9b51c32a9a0eaac32477a13cb2c71ba18aa32ab6",
"size": "7387",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "letsencrypt-apache/letsencrypt_apache/augeas_configurator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "48432"
},
{
"name": "Augeas",
"bytes": "5062"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1374377"
},
{
"name": "Shell",
"bytes": "124081"
}
],
"symlink_target": ""
}
|
"""Implementation of seq2seq model.
The model is based on: https://github.com/google/flax/tree/main/examples/seq2seq
"""
import logging
import numpy as np
from typing import Tuple
from typing import Any
from functools import partial
import jax
import jax.numpy as jnp
from jax import random
from flax import linen as nn
from evojax.policy.base import PolicyNetwork
from evojax.policy.base import PolicyState
from evojax.task.base import TaskState
from evojax.util import create_logger
from evojax.util import get_params_format_fn
class CharacterTable(object):
"""Encode/decodes between strings and integer representations."""
def __init__(self):
self._chars = '0123456789+= '
self.pad_id = len(self._chars)
self.eos_id = self.pad_id + 1
self.vocab_size = len(self._chars) + 2
self._indices_char = dict(
(idx, ch) for idx, ch in enumerate(self._chars))
self._indices_char[self.pad_id] = '_'
def encode(self, inputs: jnp.ndarray) -> jnp.ndarray:
return jnp.concatenate([inputs, jnp.array([self.eos_id])])
def decode(self, inputs):
"""Decode from list of integers to string."""
chars = []
for elem in inputs.tolist():
if elem == self.eos_id:
break
chars.append(self._indices_char[elem])
return ''.join(chars)
char_table = CharacterTable()
class EncoderLSTM(nn.Module):
"""LSTM in the encoder part of the seq2seq model."""
@partial(
nn.transforms.scan,
variable_broadcast='params',
in_axes=1,
out_axes=1,
split_rngs={'params': False})
@nn.compact
def __call__(self, carry, x):
lstm_state, is_eos = carry
new_lstm_state, y = nn.LSTMCell()(lstm_state, x)
# Pass forward the previous state if EOS has already been reached.
def select_carried_state(new_state, old_state):
return jnp.where(is_eos[:, np.newaxis], old_state, new_state)
# LSTM state is a tuple (c, h).
carried_lstm_state = tuple(
select_carried_state(*s) for s in zip(new_lstm_state, lstm_state))
# Update `is_eos`.
is_eos = jnp.logical_or(is_eos, x[:, char_table.eos_id])
return (carried_lstm_state, is_eos), y
@staticmethod
def initialize_carry(batch_size, hidden_size):
# use dummy key since default state init fn is just zeros.
return nn.LSTMCell.initialize_carry(
jax.random.PRNGKey(0), (batch_size,), hidden_size)
class Encoder(nn.Module):
"""LSTM encoder, returning state after EOS is input."""
hidden_size: int
@nn.compact
def __call__(self, inputs):
# inputs.shape = (batch_size, seq_length, vocab_size).
batch_size = inputs.shape[0]
lstm = EncoderLSTM(name='encoder_lstm')
init_lstm_state = lstm.initialize_carry(batch_size, self.hidden_size)
init_is_eos = jnp.zeros(batch_size, dtype=np.bool)
init_carry = (init_lstm_state, init_is_eos)
(final_state, _), _ = lstm(init_carry, inputs)
return final_state
class DecoderLSTM(nn.Module):
"""LSTM in the decoder part of the seq2seq model."""
teacher_force: bool
@partial(
nn.transforms.scan,
variable_broadcast='params',
in_axes=1,
out_axes=1,
split_rngs={'params': False})
@nn.compact
def __call__(self, carry, x):
lstm_state, last_prediction = carry
if not self.teacher_force:
x = last_prediction
lstm_state, y = nn.LSTMCell()(lstm_state, x)
logits = nn.Dense(features=char_table.vocab_size)(y)
predicted_token = jnp.argmax(logits, axis=-1)
prediction = jax.nn.one_hot(
predicted_token, char_table.vocab_size, dtype=jnp.float32)
return (lstm_state, prediction), (logits, prediction)
class Decoder(nn.Module):
"""LSTM decoder."""
init_state: Tuple[Any]
teacher_force: bool
@nn.compact
def __call__(self, inputs):
# inputs.shape = (seq_length, vocab_size).
lstm = DecoderLSTM(teacher_force=self.teacher_force)
init_carry = (self.init_state, inputs[:, 0])
_, (logits, predictions) = lstm(init_carry, inputs)
return logits, predictions
class Seq2seq(nn.Module):
"""Sequence-to-sequence class using encoder/decoder architecture."""
teacher_force: bool
hidden_size: int
@nn.compact
def __call__(self, encoder_inputs, decoder_inputs):
# Encode inputs.
init_decoder_state = Encoder(
hidden_size=self.hidden_size)(encoder_inputs)
# Decode outputs.
logits, predictions = Decoder(
init_state=init_decoder_state,
teacher_force=self.teacher_force)(decoder_inputs[:, :-1])
return logits, predictions
class Seq2seqPolicy(PolicyNetwork):
"""A seq2seq policy that deals with simple additions."""
def __init__(self,
hidden_size: int = 256,
teacher_force: bool = False,
max_len_query_digit: int = 3,
logger: logging.Logger = None):
if logger is None:
self._logger = create_logger('Seq2seqPolicy')
else:
self._logger = logger
max_input_len = max_len_query_digit + 2 + 2
max_output_len = max_len_query_digit + 3
encoder_shape = jnp.ones(
(1, max_input_len, char_table.vocab_size), dtype=jnp.float32)
decoder_shape = jnp.ones(
(1, max_output_len, char_table.vocab_size), dtype=jnp.float32)
model = Seq2seq(hidden_size=hidden_size, teacher_force=teacher_force)
key = random.PRNGKey(0)
params = model.init({'params': key, 'lstm': key},
encoder_shape, decoder_shape)['params']
self.num_params, format_params_fn = get_params_format_fn(params)
self._logger.info(
'Seq2seqPolicy.num_params = {}'.format(self.num_params))
self._format_params_fn = jax.vmap(format_params_fn)
def forward_fn(p, o):
x = jax.nn.one_hot(
char_table.encode(jnp.array([11]))[0:1], char_table.vocab_size,
dtype=jnp.float32)
x = jnp.tile(x, (o.shape[0], max_output_len, 1))
logits, predictions = model.apply({'params': p}, o, x)
return logits
self._forward_fn = jax.vmap(forward_fn)
def get_actions(self,
t_states: TaskState,
params: jnp.ndarray,
p_states: PolicyState) -> Tuple[jnp.ndarray, PolicyState]:
params = self._format_params_fn(params)
return self._forward_fn(params, t_states.obs), p_states
|
{
"content_hash": "ed573f8126e958d36c13d23a0ae66cf0",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 80,
"avg_line_length": 33.61194029850746,
"alnum_prop": 0.6039076376554174,
"repo_name": "google/evojax",
"id": "11ba39b106ca323d6b110bf688ef4ec1f6fce97d",
"size": "7340",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "evojax/policy/seq2seq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "893553"
},
{
"name": "Python",
"bytes": "270885"
}
],
"symlink_target": ""
}
|
"""Code for encoding protocol message primitives.
Contains the logic for encoding every logical protocol field type
into one of the 5 physical wire types.
This code is designed to push the Python interpreter's performance to the
limits.
The basic idea is that at startup time, for every field (i.e. every
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
sizer takes a value of this field's type and computes its byte size. The
encoder takes a writer function and a value. It encodes the value into byte
strings and invokes the writer function to write those strings. Typically the
writer function is the write() method of a BytesIO.
We try to do as much work as possible when constructing the writer and the
sizer rather than when calling them. In particular:
* We copy any needed global functions to local variables, so that we do not need
to do costly global table lookups at runtime.
* Similarly, we try to do any attribute lookups at startup time if possible.
* Every field's tag is encoded to bytes at startup, since it can't change at
runtime.
* Whatever component of the field size we can compute at startup, we do.
* We *avoid* sharing code if doing so would make the code slower and not sharing
does not burden us too much. For example, encoders for repeated fields do
not just call the encoders for singular fields in a loop because this would
add an extra function call overhead for every loop iteration; instead, we
manually inline the single-value encoder into the loop.
* If a Python function lacks a return statement, Python actually generates
instructions to pop the result of the last statement off the stack, push
None onto the stack, and then return that. If we really don't care what
value is returned, then we can save two instructions by returning the
result of the last statement. It looks funny but it helps.
* We assume that type and bounds checking has happened at a higher level.
"""
__author__ = 'kenton@google.com (Kenton Varda)'
import struct
import six
from google.protobuf.internal import wire_format
# This will overflow and thus become IEEE-754 "infinity". We would use
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
_POS_INF = 1e10000
_NEG_INF = -_POS_INF
def _VarintSize(value):
"""Compute the size of a varint value."""
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _SignedVarintSize(value):
"""Compute the size of a signed varint value."""
if value < 0: return 10
if value <= 0x7f: return 1
if value <= 0x3fff: return 2
if value <= 0x1fffff: return 3
if value <= 0xfffffff: return 4
if value <= 0x7ffffffff: return 5
if value <= 0x3ffffffffff: return 6
if value <= 0x1ffffffffffff: return 7
if value <= 0xffffffffffffff: return 8
if value <= 0x7fffffffffffffff: return 9
return 10
def _TagSize(field_number):
"""Returns the number of bytes required to serialize a tag with this field
number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarintSize(wire_format.PackTag(field_number, 0))
# --------------------------------------------------------------------
# In this section we define some generic sizers. Each of these functions
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
# It returns another function which in turn takes parameters specific to a
# particular field, e.g. the field number and whether it is repeated or packed.
# Look at the next section to see how these are used.
def _SimpleSizer(compute_value_size):
"""A sizer which uses the function compute_value_size to compute the size of
each value. Typically compute_value_size is _VarintSize."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(element)
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(element)
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(value)
return FieldSize
return SpecificSizer
def _ModifiedSizer(compute_value_size, modify_value):
"""Like SimpleSizer, but modify_value is invoked on each value before it is
passed to compute_value_size. modify_value is typically ZigZagEncode."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = 0
for element in value:
result += compute_value_size(modify_value(element))
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += compute_value_size(modify_value(element))
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + compute_value_size(modify_value(value))
return FieldSize
return SpecificSizer
def _FixedSizer(value_size):
"""Like _SimpleSizer except for a fixed-size field. The input is the size
of one value."""
def SpecificSizer(field_number, is_repeated, is_packed):
tag_size = _TagSize(field_number)
if is_packed:
local_VarintSize = _VarintSize
def PackedFieldSize(value):
result = len(value) * value_size
return result + local_VarintSize(result) + tag_size
return PackedFieldSize
elif is_repeated:
element_size = value_size + tag_size
def RepeatedFieldSize(value):
return len(value) * element_size
return RepeatedFieldSize
else:
field_size = value_size + tag_size
def FieldSize(value):
return field_size
return FieldSize
return SpecificSizer
# ====================================================================
# Here we declare a sizer constructor for each field type. Each "sizer
# constructor" is a function that takes (field_number, is_repeated, is_packed)
# as parameters and returns a sizer, which in turn takes a field value as
# a parameter and returns its encoded size.
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
_SignedVarintSize, wire_format.ZigZagEncode)
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
BoolSizer = _FixedSizer(1)
def StringSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a string field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element.encode('utf-8'))
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value.encode('utf-8'))
return tag_size + local_VarintSize(l) + l
return FieldSize
def BytesSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a bytes field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
local_len = len
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = local_len(element)
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = local_len(value)
return tag_size + local_VarintSize(l) + l
return FieldSize
def GroupSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a group field."""
tag_size = _TagSize(field_number) * 2
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
result += element.ByteSize()
return result
return RepeatedFieldSize
else:
def FieldSize(value):
return tag_size + value.ByteSize()
return FieldSize
def MessageSizer(field_number, is_repeated, is_packed):
"""Returns a sizer for a message field."""
tag_size = _TagSize(field_number)
local_VarintSize = _VarintSize
assert not is_packed
if is_repeated:
def RepeatedFieldSize(value):
result = tag_size * len(value)
for element in value:
l = element.ByteSize()
result += local_VarintSize(l) + l
return result
return RepeatedFieldSize
else:
def FieldSize(value):
l = value.ByteSize()
return tag_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# MessageSet is special: it needs custom logic to compute its size properly.
def MessageSetItemSizer(field_number):
"""Returns a sizer for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
_TagSize(3))
local_VarintSize = _VarintSize
def FieldSize(value):
l = value.ByteSize()
return static_size + local_VarintSize(l) + l
return FieldSize
# --------------------------------------------------------------------
# Map is special: it needs custom logic to compute its size properly.
def MapSizer(field_descriptor, is_message_map):
"""Returns a sizer for a map field."""
# Can't look at field_descriptor.message_type._concrete_class because it may
# not have been initialized yet.
message_type = field_descriptor.message_type
message_sizer = MessageSizer(field_descriptor.number, False, False)
def FieldSize(map_value):
total = 0
for key in map_value:
value = map_value[key]
# It's wasteful to create the messages and throw them away one second
# later since we'll do the same for the actual encode. But there's not an
# obvious way to avoid this within the current design without tons of code
# duplication. For message map, value.ByteSize() should be called to
# update the status.
entry_msg = message_type._concrete_class(key=key, value=value)
total += message_sizer(entry_msg)
if is_message_map:
value.ByteSize()
return total
return FieldSize
# ====================================================================
# Encoders!
def _VarintEncoder():
"""Return an encoder for a basic varint value (does not include tag)."""
def EncodeVarint(write, value):
bits = value & 0x7f
value >>= 7
while value:
write(six.int2byte(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(six.int2byte(bits))
return EncodeVarint
def _SignedVarintEncoder():
"""Return an encoder for a basic signed varint value (does not include
tag)."""
def EncodeSignedVarint(write, value):
if value < 0:
value += (1 << 64)
bits = value & 0x7f
value >>= 7
while value:
write(six.int2byte(0x80|bits))
bits = value & 0x7f
value >>= 7
return write(six.int2byte(bits))
return EncodeSignedVarint
_EncodeVarint = _VarintEncoder()
_EncodeSignedVarint = _SignedVarintEncoder()
def _VarintBytes(value):
"""Encode the given integer as a varint and return the bytes. This is only
called at startup time so it doesn't need to be fast."""
pieces = []
_EncodeVarint(pieces.append, value)
return b"".join(pieces)
def TagBytes(field_number, wire_type):
"""Encode the given tag and return the bytes. Only called at startup."""
return _VarintBytes(wire_format.PackTag(field_number, wire_type))
# --------------------------------------------------------------------
# As with sizers (see above), we have a number of common encoder
# implementations.
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
"""Return a constructor for an encoder for fields of a particular type.
Args:
wire_type: The field's wire type, for encoding tags.
encode_value: A function which encodes an individual value, e.g.
_EncodeVarint().
compute_value_size: A function which computes the size of an individual
value, e.g. _VarintSize().
"""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(element)
local_EncodeVarint(write, size)
for element in value:
encode_value(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, value)
return EncodeField
return SpecificEncoder
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
"""Like SimpleEncoder but additionally invokes modify_value on every value
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
def SpecificEncoder(field_number, is_repeated, is_packed):
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
size = 0
for element in value:
size += compute_value_size(modify_value(element))
local_EncodeVarint(write, size)
for element in value:
encode_value(write, modify_value(element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
encode_value(write, modify_value(element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return encode_value(write, modify_value(value))
return EncodeField
return SpecificEncoder
def _StructPackEncoder(wire_type, format):
"""Return a constructor for an encoder for a fixed-width field.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
write(local_struct_pack(format, element))
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
write(local_struct_pack(format, element))
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
return write(local_struct_pack(format, value))
return EncodeField
return SpecificEncoder
def _FloatingPointEncoder(wire_type, format):
"""Return a constructor for an encoder for float fields.
This is like StructPackEncoder, but catches errors that may be due to
passing non-finite floating-point values to struct.pack, and makes a
second attempt to encode those values.
Args:
wire_type: The field's wire type, for encoding tags.
format: The format string to pass to struct.pack().
"""
value_size = struct.calcsize(format)
if value_size == 4:
def EncodeNonFiniteOrRaise(write, value):
# Remember that the serialized form uses little-endian byte order.
if value == _POS_INF:
write(b'\x00\x00\x80\x7F')
elif value == _NEG_INF:
write(b'\x00\x00\x80\xFF')
elif value != value: # NaN
write(b'\x00\x00\xC0\x7F')
else:
raise
elif value_size == 8:
def EncodeNonFiniteOrRaise(write, value):
if value == _POS_INF:
write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F')
elif value == _NEG_INF:
write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF')
elif value != value: # NaN
write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F')
else:
raise
else:
raise ValueError('Can\'t encode floating-point values that are '
'%d bytes long (only 4 or 8)' % value_size)
def SpecificEncoder(field_number, is_repeated, is_packed):
local_struct_pack = struct.pack
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value) * value_size)
for element in value:
# This try/except block is going to be faster than any code that
# we could write to check whether element is finite.
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
try:
write(local_struct_pack(format, element))
except SystemError:
EncodeNonFiniteOrRaise(write, element)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_type)
def EncodeField(write, value):
write(tag_bytes)
try:
write(local_struct_pack(format, value))
except SystemError:
EncodeNonFiniteOrRaise(write, value)
return EncodeField
return SpecificEncoder
# ====================================================================
# Here we declare an encoder constructor for each field type. These work
# very similarly to sizer constructors, described earlier.
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
wire_format.ZigZagEncode)
# Note that Python conveniently guarantees that when using the '<' prefix on
# formats, they will also have the same size across all platforms (as opposed
# to without the prefix, where their sizes depend on the C compiler's basic
# type sizes).
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
def BoolEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a boolean field."""
false_byte = b'\x00'
true_byte = b'\x01'
if is_packed:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
def EncodePackedField(write, value):
write(tag_bytes)
local_EncodeVarint(write, len(value))
for element in value:
if element:
write(true_byte)
else:
write(false_byte)
return EncodePackedField
elif is_repeated:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeRepeatedField(write, value):
for element in value:
write(tag_bytes)
if element:
write(true_byte)
else:
write(false_byte)
return EncodeRepeatedField
else:
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
def EncodeField(write, value):
write(tag_bytes)
if value:
return write(true_byte)
return write(false_byte)
return EncodeField
def StringEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a string field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
encoded = element.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
write(encoded)
return EncodeRepeatedField
else:
def EncodeField(write, value):
encoded = value.encode('utf-8')
write(tag)
local_EncodeVarint(write, local_len(encoded))
return write(encoded)
return EncodeField
def BytesEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a bytes field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
local_len = len
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, local_len(element))
write(element)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, local_len(value))
return write(value)
return EncodeField
def GroupEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a group field."""
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(start_tag)
element._InternalSerialize(write)
write(end_tag)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(start_tag)
value._InternalSerialize(write)
return write(end_tag)
return EncodeField
def MessageEncoder(field_number, is_repeated, is_packed):
"""Returns an encoder for a message field."""
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
local_EncodeVarint = _EncodeVarint
assert not is_packed
if is_repeated:
def EncodeRepeatedField(write, value):
for element in value:
write(tag)
local_EncodeVarint(write, element.ByteSize())
element._InternalSerialize(write)
return EncodeRepeatedField
else:
def EncodeField(write, value):
write(tag)
local_EncodeVarint(write, value.ByteSize())
return value._InternalSerialize(write)
return EncodeField
# --------------------------------------------------------------------
# As before, MessageSet is special.
def MessageSetItemEncoder(field_number):
"""Encoder for extensions of MessageSet.
The message set message looks like this:
message MessageSet {
repeated group Item = 1 {
required int32 type_id = 2;
required string message = 3;
}
}
"""
start_bytes = b"".join([
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
TagBytes(2, wire_format.WIRETYPE_VARINT),
_VarintBytes(field_number),
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
local_EncodeVarint = _EncodeVarint
def EncodeField(write, value):
write(start_bytes)
local_EncodeVarint(write, value.ByteSize())
value._InternalSerialize(write)
return write(end_bytes)
return EncodeField
# --------------------------------------------------------------------
# As before, Map is special.
def MapEncoder(field_descriptor):
"""Encoder for extensions of MessageSet.
Maps always have a wire format like this:
message MapEntry {
key_type key = 1;
value_type value = 2;
}
repeated MapEntry map = N;
"""
# Can't look at field_descriptor.message_type._concrete_class because it may
# not have been initialized yet.
message_type = field_descriptor.message_type
encode_message = MessageEncoder(field_descriptor.number, False, False)
def EncodeField(write, value):
for key in value:
entry_msg = message_type._concrete_class(key=key, value=value[key])
encode_message(write, entry_msg)
return EncodeField
|
{
"content_hash": "2d1ca11ea3f3fedf7ad55b8ef3c106f7",
"timestamp": "",
"source": "github",
"line_count": 796,
"max_line_length": 80,
"avg_line_length": 32.65577889447236,
"alnum_prop": 0.6682696006770793,
"repo_name": "dstrockis/outlook-autocategories",
"id": "80e59cab0adecaf245786cde94d00e019e5a7a76",
"size": "27625",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "lib/google/protobuf/internal/encoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39286"
},
{
"name": "CSS",
"bytes": "6267"
},
{
"name": "HTML",
"bytes": "449"
},
{
"name": "JavaScript",
"bytes": "6187"
},
{
"name": "Jupyter Notebook",
"bytes": "163002"
},
{
"name": "Python",
"bytes": "11957653"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
}
|
from twython import Twython
from easy_twitter.models import Settings
def get_tweets(account):
feed_account = Settings.objects.get(display_name=account)
twitter = Twython(feed_account.consumer_key, feed_account.consumer_secret, feed_account.access_token, feed_account.access_token_secret)
time_line = twitter.get_user_timeline(display_name=feed_account.display_name, include_rts=feed_account.show_retweets, count=feed_account.count_limit, exclude_replies=feed_account.exclude_replies)
return time_line
def show_tweets(account):
tweets = get_tweets(account)
return tweets
|
{
"content_hash": "721a0476934d8d4155f4e3fdcf49f044",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 199,
"avg_line_length": 38.75,
"alnum_prop": 0.75,
"repo_name": "publicFunction/django-easy-twitter",
"id": "91862464e8a5ad2422cc200d2f6ed8350f9362bb",
"size": "620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "easy_twitter/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9261"
}
],
"symlink_target": ""
}
|
"""
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
from lib import unwise
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class CloudyResolver(UrlResolver):
name = "cloudy.ec"
domains = ["cloudy.ec", "cloudy.eu", "cloudy.sx", "cloudy.ch", "cloudy.com"]
pattern = '(?://|\.)(cloudy\.(?:ec|eu|sx|ch|com))/(?:video/|v/|embed\.php\?id=)([0-9A-Za-z]+)'
def __init__(self):
self.net = common.Net()
def __get_stream_url(self, media_id, filekey, error_num=0, error_url=None):
'''
Get stream url.
If previously found stream url is a dead link, add error params and try again
'''
if error_num > 0 and error_url:
_error_params = '&numOfErrors={0}&errorCode=404&errorUrl={1}'.format(error_num, urllib.quote_plus(error_url).replace('.', '%2E'))
else:
_error_params = ''
# use api to find stream address
api_call = 'http://www.cloudy.ec/api/player.api.php?{0}&file={1}&key={2}{3}'.format(
'user=undefined&pass=undefined', media_id, urllib.quote_plus(filekey).replace('.', '%2E'), _error_params)
api_html = self.net.http_GET(api_call).content
rapi = re.search('url=(.+?)&title=', api_html)
if rapi:
return urllib.unquote(rapi.group(1))
return None
def __is_stream_url_active(self, web_url):
try:
header = self.net.http_HEAD(web_url)
if header.get_headers():
return True
return False
except:
return False
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
# grab stream details
html = self.net.http_GET(web_url).content
html = unwise.unwise_process(html)
filekey = unwise.resolve_var(html, "vars.key")
error_url = None
stream_url = None
# try to resolve 3 times then give up
for x in range(0, 2):
link = self.__get_stream_url(media_id, filekey, error_num=x, error_url=error_url)
if link:
active = self.__is_stream_url_active(link)
if active:
stream_url = urllib.unquote(link)
break
else:
# link inactive
error_url = link
else:
# no link found
raise ResolverError('File Not Found or removed')
if stream_url:
return stream_url
else:
raise ResolverError('File Not Found or removed')
def get_url(self, host, media_id):
return 'http://www.cloudy.ec/embed.php?id=%s' % media_id
|
{
"content_hash": "76403794a09e69d1012cf0d014aa91cf",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 141,
"avg_line_length": 35.552083333333336,
"alnum_prop": 0.5915616759449165,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "fedf8a002379bf67960a8873eb3297b3a024fe0a",
"size": "3413",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "script.module.urlresolver/lib/urlresolver/plugins/cloudy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import jobs.models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0027_auto_20160209_1709'),
]
operations = [
migrations.AlterField(
model_name='invitationcode',
name='code',
field=models.CharField(default=jobs.models.get_token, max_length=60, unique=True, verbose_name='Invitation code'),
),
migrations.AlterField(
model_name='invitationcode',
name='is_issued',
field=models.BooleanField(default=False, verbose_name='Issued?'),
),
migrations.AlterField(
model_name='invitationcode',
name='is_used',
field=models.BooleanField(default=False, verbose_name='Used?'),
),
migrations.AlterField(
model_name='job',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime.now, editable=False, verbose_name='publish date'),
),
]
|
{
"content_hash": "3c80aab3c281340bbdc63f3c04cab9c7",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 126,
"avg_line_length": 31.057142857142857,
"alnum_prop": 0.6016559337626495,
"repo_name": "Santiago-vdk/jabbs",
"id": "c0ab6458f0f8b6983ea43bbf3e23066b871daab5",
"size": "1159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jobs/migrations/0028_auto_20160209_1806.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "114734"
},
{
"name": "HTML",
"bytes": "20903"
},
{
"name": "JavaScript",
"bytes": "428326"
},
{
"name": "Python",
"bytes": "45843"
}
],
"symlink_target": ""
}
|
"""
H. Déjean
copyright Xerox 2016
READ project
mine a document (itemset generation)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys, os.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
sys.path.append(os.path.dirname(os.path.abspath(sys.argv[0])))
from lxml import etree
import numpy as np
import common.Component as Component
from common.chrono import chronoOff , chronoOn
from spm.structuralMining import sequenceMiner
from spm.feature import featureObject
from ObjectModel.xmlDSDocumentClass import XMLDSDocument
from ObjectModel.XMLDSObjectClass import XMLDSObjectClass
from ObjectModel.XMLDSGRAHPLINEClass import XMLDSGRAPHLINEClass
from ObjectModel.XMLDSTEXTClass import XMLDSTEXTClass
from ObjectModel.XMLDSTOKENClass import XMLDSTOKENClass
from ObjectModel.XMLDSPageClass import XMLDSPageClass
from ObjectModel.treeTemplateClass import treeTemplateClass
from ObjectModel.XMLDSCELLClass import XMLDSTABLECELLClass
# from spm.spm2 import PrefixSpan
class pageVerticalMiner(Component.Component):
"""
pageVerticalMiner class: a component to mine column-like page layout
"""
#DEFINE the version, usage and description of this particular component
usage = ""
version = "v.01"
description = "description: page vertical Zones miner "
kContentSize ='contentSize'
#--- INIT -------------------------------------------------------------------------------------------------------------
def __init__(self):
"""
Always call first the Component constructor.
"""
Component.Component.__init__(self, "pageVerticalMiner", self.usage, self.version, self.description)
# tag level
self.sTag= XMLDSTEXTClass
# TH for comparing numerical features for X
self.THNUMERICAL = 30
# use for evaluation
self.testTH = 30 # use dfor --test BUT ALSO IN RUN !!
self.THCOMP = 10
self.evalData= None
self.bDomTag=True
# TH for sequentiality detection (see structuralMining)
self.fKleenPlusTH =1.5
# pattern provided manually
self.bManual = False
# evaluation using the baseline
self.baselineMode = 0
# ignore existing regions
self.bIgnoreRegions=True
# do not use graphical lines
self.bNOGline = False
# only use GL
self.bGLOnly= False
# does the scanning introduce vertical shift?
self.bScanShift = False
def setParams(self, dParams):
"""
Always call first the Component setParams
Here, we set our internal attribute according to a possibly specified value (otherwise it stays at its default value)
"""
Component.Component.setParams(self, dParams)
if "pattern" in dParams:
self.manualPattern = eval( dParams["pattern"])
self.bManual=True
if "THNUM" in dParams:
self.testTH = dParams["THNUM"]
if "KLEENETH" in dParams:
self.fKleenPlusTH = dParams["KLEENETH"]
if 'baseline' in dParams:
self.baselineMode = dParams['baseline']
if 'bIgnoreRegions' in dParams:
self.bIgnoreRegions = dParams['bIgnoreRegions']
if 'nogline' in dParams:
self.bGLOnly = dParams['nogline']
if 'glonly' in dParams:
self.bNOGline = dParams['glonly']
if 'tag' in dParams:
self.sTag = dParams['tag']
def minePageDimensions(self,lPages):
"""
use page dimensions to build highest structure
need iterations!
"""
self.THNUMERICAL = 60 # 2 cml
## initialization for iter 0
for page in lPages:
page.setFeatureFunction(page.getSetOfFeaturesPageSize,self.THNUMERICAL)
page.computeSetofFeatures()
seqGen = sequenceMiner()
seqGen.setMaxSequenceLength(1)
seqGen.setObjectLevel(XMLDSPageClass)
lSortedFeatures = seqGen.featureGeneration(lPages,2)
for _,p in enumerate(lPages):
p.lFeatureForParsing=p.getSetofFeatures()
icpt=0
lCurList=lPages[:]
lTerminalTemplates=[]
while icpt <=0:
if icpt > 0:
# N ?
seqGen.setMaxSequenceLength(1)
# print '***'*20
seqGen.bDebug = False
for elt in lCurList:
if elt.getSetofFeatures() is None:
elt.resetFeatures()
elt.setFeatureFunction(elt.getSetOfListedAttributes,self.THNUMERICAL,lFeatureList=['virtual'],myLevel=XMLDSPageClass)
elt.computeSetofFeatures()
elt.lFeatureForParsing=elt.getCanonicalFeatures()
else:
elt.setSequenceOfFeatures(elt.lFeatureForParsing)
lSortedFeatures = seqGen.featureGeneration(lCurList,1)
lmaxSequence = seqGen.generateItemsets(lCurList)
seqGen.bDebug = False
# mis very small since: space is small; some specific pages can be
## replace by PrefiwScan
lSeq, _ = seqGen.generateMSPSData(lmaxSequence,lSortedFeatures + lTerminalTemplates,mis = 0.002)
lPatterns = seqGen.miningSequencePrefixScan(lSeq,minSupport=0.01,maxPatternLength=3)
# lPatterns = seqGen.beginMiningSequences(lSeq,lSortedFeatures,lMIS)
if lPatterns is None:
return [lPages]
# ignore unigram: covered by previous steps
if icpt < 3:
lPatterns = list(filter(lambda p_s:len(p_s[0][0])>1,lPatterns))
lPatterns.sort(key=lambda x_y:x_y[1], reverse=True)
seqGen.bDebug = False
seqGen.THRULES = 0.8
lSeqRules = seqGen.generateSequentialRules(lPatterns)
_,dCP = self.getPatternGraph(lSeqRules)
dTemplatesCnd = self.pattern2PageTemplate(lPatterns,dCP,icpt)
#no new template: stop here
if dTemplatesCnd == {}:
icpt=9e9
break
_,lTerminalTemplates,_ = seqGen.testTreeKleeneageTemplates(dTemplatesCnd, lCurList)
# print tranprob
# self.pageSelectFinalTemplates(lTerminalTemplates,tranprob,lCurList)
## store parsed sequences in mytemplate
for templateType in dTemplatesCnd.keys():
for _,_, mytemplate in dTemplatesCnd[templateType]:
# _,lCurList = self.parseWithTemplate(mytemplate,lCurList,bReplace=True)
_,_,lCurList = seqGen.parseWithTreeTemplate(mytemplate, lCurList, bReplace=True)
for elt in lCurList:
if elt.getSetofFeatures() is None:
elt.resetFeatures()
elt.setFeatureFunction(elt.getSetOfListedAttributes,self.THNUMERICAL,lFeatureList=['virtual'],myLevel=XMLDSPageClass)
elt.computeSetofFeatures()
elt.lFeatureForParsing=elt.getSetofFeatures()
icpt +=1
# if self.bDebug:self.printTreeView(lCurList)
lList = self.getFlatStructure(lCurList)
# print lList
del seqGen
# return also the tree ; also organize elements per level/pattern
return lList
def pattern2PageTemplate(self,lPatterns,dCA,step):
"""
select patterns and convert them into appropriate templates.
Need to specify the template for terminals; or simply the registration function ?
"""
dTemplatesTypes = {}
for pattern,support in filter(lambda x_y:x_y[1]>1,lPatterns):
try:
dCA[str(pattern)]
bSkip = True
# print 'skip:',pattern
except KeyError:bSkip=False
# first iter: keep only length=1
# test type of patterns
bSkip = bSkip or (step > 0 and len(pattern) == 1)
bSkip = bSkip or (len(pattern) == 2 and pattern[0] == pattern[1])
# print pattern, bSkip
if not bSkip:
print ('========',pattern, support, self.isMirroredPattern(pattern))
## test is pattern is mirrored
template = treeTemplateClass()
template.setPattern(pattern)
template.buildTreeFromPattern(pattern)
template.setType('lineTemplate')
try:dTemplatesTypes[template.__class__.__name__].append((pattern, support, template))
except KeyError: dTemplatesTypes[template.__class__.__name__] = [(pattern,support,template)]
return dTemplatesTypes
def pattern2TAZonesTemplate(self,lPatterns,dCA):
"""
TA patterns
select patterns and convert them into appropriate templates.
"""
dTemplatesTypes = {}
for pattern,support in filter(lambda x_y:x_y[1]>1,lPatterns):
bSkip=False
try:
dCA[str(pattern)]
bSkip = True
# if len(pattern)==2 and len( pattern[0] ) == len( pattern[1]) == 2:
except KeyError:bSkip=False
if not bSkip:
# if ( len(pattern) == 1 and len(pattern[0])== 2 and pattern[0][0].getValue() !=pattern[0][1].getValue()) or (len(pattern)==2 and len( pattern[0] ) == len( pattern[1] ) == 2 ):
if ( len(pattern) == 1 and len(pattern[0]) >= 1) :
# print pattern, support
template = treeTemplateClass()
template.setPattern(pattern)
template.buildTreeFromPattern(pattern)
template.setType('lineTemplate')
try:dTemplatesTypes[template.__class__.__name__].append((pattern, support, template))
except KeyError: dTemplatesTypes[template.__class__.__name__] = [(pattern,support,template)]
for ttype in dTemplatesTypes.keys():
dTemplatesTypes[ttype].sort(key=lambda x_y_t:len(x_y_t[0]), reverse=True)
return dTemplatesTypes
def isCorrectPattern(self,pattern):
"""
if length = 1: at least 2 elements (one zone)
if length =2:
- same number of elements
- at least one width similar
"""
if len(pattern) == 1:
# stil useful?
return len(pattern[0])>=2 and pattern[0][0].getValue() !=pattern[0][1].getValue()
elif len(pattern) == 2:
# if len(pattern[0]) != 2: return False
# bOK = len( pattern[0] ) == len( pattern[1] ) >= 2
# same width: the longest width must be shared
inv1 = pattern[1][:]
lcouple1= zip(inv1,inv1[1:])
lw1= map(lambda x_y:abs(x_y[1].getValue()-x_y[0].getValue()),lcouple1)
max1 =max(lw1)
lcouple0= zip(pattern[0],pattern[0][1:])
lw0= map(lambda x_y:abs(x_y[1].getValue()-x_y[0].getValue()),lcouple0)
max0= max(lw0)
## all width similar???
# print pattern, zip(lw0,lw1) , max0,max1, abs(max1 - max0) < self.THNUMERICAL*2
return abs(max1 - max0) < self.THNUMERICAL*2
def pattern2VerticalZonesTemplate(self,lPatterns,dCA):
"""
select patterns and convert them into appropriate templates.
Need to specify the template for terminals; or simply the registration function ?
"""
dTemplatesTypes = {}
iNbClosed = 0
for pattern,support in filter(lambda x_y:x_y[1]>1,lPatterns):
bSkip=False
try:
dCA[str(pattern)]
bSkip = True
# if len(pattern)==2 and len( pattern[0] ) == len( pattern[1]) == 2:
except KeyError:bSkip=False
# duplicated a,b
bSkip = bSkip or (len(pattern) == 2 and pattern[0] == pattern[1])
if not bSkip:
iNbClosed+=1
# if ( len(pattern) == 1 and len(pattern[0])>=2 and pattern[0][0].getValue() !=pattern[0][1].getValue()) or (len(pattern)==2 and len( pattern[0] ) == len( pattern[1] ) >= 2 ):
if (len(pattern[0]) == 3 ):
if self.isCorrectPattern(pattern):
## alos width must be similar :['x=115.0', 'x=433.0'], ['x=403.0', 'x=433.0']] not possible !!
template = treeTemplateClass()
template.setPattern(pattern)
template.buildTreeFromPattern(pattern)
template.setType('VTemplate') # ??
try:dTemplatesTypes[template.__class__.__name__].append((pattern, support, template))
except KeyError: dTemplatesTypes[template.__class__.__name__] = [(pattern,support,template)]
print ("closed-patterns: ", iNbClosed)
for ttype in dTemplatesTypes.keys():
# dTemplatesTypes[ttype].sort(key=lambda (x,y,t):len(x[0]), reverse=True)
dTemplatesTypes[ttype].sort(key=lambda p_s_t:p_s_t[1], reverse=True)
return dTemplatesTypes
def computeObjectProfile(self,lPages):
"""
Compute the following information:
* for Pages
- surface covered by lines
- # lines per page : issue with nosy elements
- avg BB
* for lines
- avg width (gaussian?)
- avg height (gaussian)? would be strange
"""
lPageProfiles={}
lPageProfiles[self.kContentSize]=[]
for page in lPages:
lElts= page.getAllNamedObjects(XMLDSTEXTClass)
surface= sum( (x.getWidth()*x.getHeight() for x in lElts))/ (page.getWidth()*page.getHeight())
lPageProfiles[self.kContentSize].append(surface)
return lPageProfiles
def minePageVerticalFeature2D(self,lPages,lFeatureList,level=XMLDSTEXTClass):
"""
Input:
lPages: list of pages
lFeatureList: list of features to be used
level: level ot be used
Output: pages decorted with features
"""
chronoOn()
## COMMUN PROCESSING
### DEPENDS ON OBJECT LEVEL !! TEXT/TOKEN!!
lLElts=[ [] for i in range(0,len(lPages))]
for i,page in enumerate(lPages):
lElts= page.getAllNamedObjects(level)
lElts.sort(key=lambda x:x.getY())
lLElts[i]=lElts
### VerticalZones START
for i,page, in enumerate(lPages):
page._VX1X2Info=[]
lElts=[]
if not self.bGLOnly:
lElts= lLElts[i]
for elt in lElts:
elt.resetFeatures()
elt._canonicalFeatures = None
elt.setFeatureFunction(elt.getSetOfX1X2Attributes,50,lFeatureList=lFeatureList,myLevel=level)
elt.computeSetofFeatures()
# GRAPHICAL LINES
gl = []
if not self.bNOGline:
for graphline in page.getAllNamedObjects(XMLDSGRAPHLINEClass):
graphline.resetFeatures()
graphline._canonicalFeatures = None
if graphline.getHeight() > graphline.getWidth() and graphline.getHeight() > 30: #30 twice the height of a line
gl.append(graphline)
# create a feature
f = featureObject()
f.setType(featureObject.NUMERICAL)
f.setTH(self.THNUMERICAL)
f.setWeight(graphline.getHeight()/64000)
f.setName("x")
f.setObjectName(graphline)
f.addNode(graphline)
f.setValue(round(graphline.getX()))
graphline.addFeature(f)
page.setVGLFeatures(f)
## select regular x
seqGen = sequenceMiner()
# seqGen.bDebug =self.bDebug
_fullFeatures = seqGen.featureGeneration(lElts+gl,1)
for fx in _fullFeatures:
fx.setWeight(sum(x.getHeight() for x in fx.getNodes())/64000)
fx.setWeight(len(fx.getNodes()))
# print(fx,fx.getWeight())
page.setX1X2(_fullFeatures)
del seqGen
self.buildPartitions(lPages)
print ('chronoFeature',chronoOff())
return lPages
def minePageVerticalFeature2(self,lPages,lFeatureList,level=XMLDSTEXTClass):
"""
get page features for vertical zones: find vertical regular vertical Blocks/text structure
"""
import util.TwoDNeighbourhood as TwoDRel
chronoOn()
## COMMUN PROCESSING
### DEPENDS ON OBJECT LEVEL !! TEXT/TOKEN!!
lVEdge = []
lLElts=[ [] for i in range(0,len(lPages))]
for i,page in enumerate(lPages):
page.resetFeatures()
page._canonicalFeatures=None
lElts= page.getAllNamedObjects(level)
for e in lElts:
e.next=[]
lElts.sort(key = lambda x:x.getY())
lLElts[i]=lElts
lVEdge = TwoDRel.findVerticalNeighborEdges(lElts)
for a,b in lVEdge:
a.next.append( b )
### VerticalZones START
for i,page, in enumerate(lPages):
page._VX1Info=[]
lElts=[]
if not self.bGLOnly:
lElts= lLElts[i]
for elt in lElts:
elt.resetFeatures()
elt._canonicalFeatures = None
elt.setFeatureFunction(elt.getSetOfListedAttributes,self.THNUMERICAL,lFeatureList=lFeatureList,myLevel=level)
# elt.setFeatureFunction(elt.getSetOfListedAttributes,self.THNUMERICAL,lFeatureList=lFeatureList,myLevel=level)
# elt.setFeatureFunction(elt.getSetOfListedAttributes,self.THNUMERICAL,lFeatureList=lFeatureList,myLevel=XMLDSTOKENClass)
elt.computeSetofFeatures()
## rename all as 'x'
[x.setName('x') for x in elt.getSetofFeatures()]
## select regular x
seqGen = sequenceMiner()
# seqGen.bDebug =self.bDebug
_fullFeatures = seqGen.featureGeneration(lElts,1)
for fx in _fullFeatures:
fx.setWeight(sum(x.getHeight() for x in fx.getNodes())/64000)
# for article
fx.setWeight(len(fx.getNodes()))
# lKleendPlus = self.getKleenePlusFeatures(lElts)
page.setVX1Info(_fullFeatures)
# page.setVX1Info(_fullFeatures)
del seqGen
self.buildVZones(lPages)
print ('chronoFeature',chronoOff())
return lPages
def minePageVerticalFeature(self,lPages,lFeatureList,level=XMLDSTEXTClass):
"""
get page features for vertical zones: find vertical regular vertical Blocks/text structure
"""
import util.TwoDNeighbourhood as TwoDRel
chronoOn()
## COMMUN PROCESSING
### DEPENDS ON OBJECT LEVEL !! TEXT/TOKEN!!
lVEdge = []
lLElts=[ [] for i in range(0,len(lPages))]
for i,page in enumerate(lPages):
page.resetFeatures()
page._canonicalFeatures=None
lElts= page.getAllNamedObjects(level)
# lElts= page.getAllNamedObjects(XMLDSTOKENClass)
for e in lElts:
e.next=[]
lElts.sort(key=lambda x:x.getY())
lLElts[i]=lElts
lVEdge = TwoDRel.findVerticalNeighborEdges(lElts)
for a,b in lVEdge:
a.next.append( b )
### VerticalZones START
for i,page, in enumerate(lPages):
page._VX1Info=[]
lElts=[]
if not self.bGLOnly:
lElts= lLElts[i]
for elt in lElts:
elt.resetFeatures()
elt._canonicalFeatures = None
elt.setFeatureFunction(elt.getSetOfListedAttributes,self.THNUMERICAL,lFeatureList=lFeatureList,myLevel=level)
# elt.setFeatureFunction(elt.getSetOfListedAttributes,self.THNUMERICAL,lFeatureList=lFeatureList,myLevel=XMLDSTOKENClass)
elt.computeSetofFeatures()
## rename all as 'x'
[x.setName('x') for x in elt.getSetofFeatures()]
# GRAPHICAL LINES
gl = []
if not self.bNOGline:
for graphline in page.getAllNamedObjects(XMLDSGRAPHLINEClass):
# Y
# graphline.resetFeatures()
# graphline._canonicalFeatures = None
# if graphline.getHeight() > graphline.getWidth(): # and graphline.getHeight() > 30: #30 twice the height of a line
#
# gl.append(graphline)
# # create a feature
# f = featureObject()
# f.setType(featureObject.NUMERICAL)
# f.setTH(self.THNUMERICAL)
# f.setWeight(graphline.getHeight()/64000)
# f.setName("x")
# f.setObjectName(graphline)
# f.addNode(graphline)
# f.setValue(round(graphline.getX()))
# graphline.addFeature(f)
# page.setVGLFeatures(f)
#X
graphline.resetFeatures()
graphline._canonicalFeatures = None
if graphline.getWidth() > graphline.getHeight(): # and graphline.getHeight() > 30: #30 twice the height of a line
gl.append(graphline)
# create a feature
f = featureObject()
f.setType(featureObject.NUMERICAL)
f.setTH(self.THNUMERICAL)
f.setWeight(graphline.getWidth())
f.setName("x")
f.setObjectName(graphline)
f.addNode(graphline)
f.setValue(round(graphline.getY()))
graphline.addFeature(f)
page.setVGLFeatures(f)
## select regular x
seqGen = sequenceMiner()
# seqGen.bDebug =self.bDebug
_fullFeatures = seqGen.featureGeneration(lElts+gl,1)
for fx in _fullFeatures:
fx.setWeight(sum(x.getHeight() for x in fx.getNodes())/64000)
# for article
fx.setWeight(len(fx.getNodes()))
lKleendPlus = self.getKleenePlusFeatures(lElts)
page.setVX1Info(lKleendPlus)
# page.setVX1Info(_fullFeatures)
del seqGen
self.buildVZones(lPages)
print ('chronoFeature',chronoOff())
return lPages
def getKleenePlusFeatures(self,lElts):
"""
select KleenePlus elements based on .next (only possible for unigrams)
"""
dFreqFeatures={}
dKleenePlusFeatures = {}
lKleenePlus=[]
for elt in lElts:
if elt.getCanonicalFeatures() is not None:
for fea in elt.getCanonicalFeatures():
if len(fea.getNodes())>0:
try:dFreqFeatures[fea] +=1
except KeyError:dFreqFeatures[fea] = 1
for nextE in elt.next:
if fea in nextE.getSetofFeatures():
try:
dKleenePlusFeatures[fea].append((elt,nextE))
# dKleenePlusFeatures[fea].append(elt)
except KeyError:
dKleenePlusFeatures[fea]=[(elt,nextE)]
# dKleenePlusFeatures[fea].append(nextE)
for fea in dFreqFeatures:
try:
dKleenePlusFeatures[fea]
# print ("###",fea, len(set(dKleenePlusFeatures[fea])), dFreqFeatures[fea] ) #, dKleenePlusFeatures[fea]
if len(set(dKleenePlusFeatures[fea])) >= 0.5 * dFreqFeatures[fea]:
lKleenePlus.append(fea)
fea.setKleenePlus(1.0*len(set(dKleenePlusFeatures[fea])) / dFreqFeatures[fea])
except KeyError:
pass
return lKleenePlus
def recPathGeneration(self,curSeq,lSeq):
"""
input: list of (x1,x2) features
output: comb genration of all possible layout
"""
cur,lRest = lSeq[0],lSeq[1:]
curSeq.append(cur)
lRecList=[curSeq]
curmax=cur.getValue()[-1]
# print ("x",cur,lRest,curmax)
for i,xx2 in enumerate(lRest):
# print (xx2, xx2.getValue()[0] , curmax)
if xx2.getValue()[0] > curmax:
curmax= xx2.getValue()[-1]
ln = self.recPathGeneration(curSeq[:],lRest[i:])
# print ('\t',ln)
lRecList.extend(ln)
# print (lRecList[-1])
return lRecList
def buildPartitions(self,lp):
"""
input: list of pages with horizontal 2Dfeatures
output: list of possible horizontal compatible partitions (sequences of horizontal stuff)
"""
from spm.feature import setOfPointsFeatureObject
for page in lp:
page._Vpartitions=[]
# page.getX1X2().sort(key=lambda x:x.getWeight(),reverse=True)
page.getX1X2().sort(key=lambda x:x.getValue()[0])
lLSeq=[]
for i,x in enumerate(page.getX1X2()):
lLSeq.extend(self.recPathGeneration([], page.getX1X2()[i:]))
lLSeq.sort (key=lambda lx: sum(map(lambda x:x.getWeight(),lx)),reverse=True)
for ln in lLSeq:
feature = setOfPointsFeatureObject()
feature.setName('seqV')
feature.setTH(5)
feature.addNode(page)
feature.setObjectName(page)
feature.setValue(list(map(lambda x:x.getValue(),ln)))
feature.setType(featureObject.COMPLEX)
page.addFeature(feature)
def buildVZones(self,lp):
"""
store vertical positions in each page
"""
for _, p in enumerate(lp):
p.lf_XCut=[]
p.getVX1Info().sort(key=lambda x:x.getWeight(),reverse=True)
# print (p, p.getVX1Info(),p.getVGLFeatures(), p.lf_XCut)
for fi in p.getVX1Info():
if fi not in p.lf_XCut:
# l = sum(x.getWidth()*x.getHeight() for x in fi.getNodes())
l = sum(x.getHeight() for x in fi.getNodes())
fi.setWeight(l)
p.lf_XCut.append(fi)
# else:
# print 'skip!',p, fi, fi.getWeight()
p.getVGLFeatures().sort(key=lambda x:x.getWeight(),reverse=True)
for fi in p.getVGLFeatures():
if fi not in p.lf_XCut:
l = sum(x.getWidth()*x.getHeight() for x in fi.getNodes())
fi.setWeight(l)
p.lf_XCut.append(fi)
# else:
# print 'skip!',p, fi, fi.getWeight()
p.lf_XCut.sort(key=lambda x:x.getWeight(),reverse=True)
p.lf_XCut = p.lf_XCut #[:15]
p.lf_XCut.sort(key=lambda x:x.getValue())
# if self.bDebug : print p, map(lambda x:(x.getTH(),x.getCanonical().getValue(),x.getCanonical().getWeight()),p.lf_XCut)
def getTerminals(self,node):
"""
get terminal objects
"""
if not node.getAttribute('virtual'):
return [node]
lReturn=[]
for obj in node.getObjects():
lReturn.extend(self.getTerminals(obj))
return lReturn
def getFlatStructure(self,lElts,level=1):
lRes=[]
for elt in lElts:
if elt.getAttribute('virtual'):
lRes.append(self.getFlatStructure(elt.getObjects(),level+1))
else:
lRes.append([elt])
try:
if len(lRes) == len([ x for y in lRes for x in y]):
lRes= [ x for y in lRes for x in y]
except TypeError:pass
return lRes
def printTreeView(self,lElts,level=0):
"""
recursive
"""
for elt in lElts:
if elt.getAttribute('virtual'):
print (" "*level, 'Node', elt.getAttribute('virtual'))
self.printTreeView(elt.getObjects(),level+1)
# else:
# print (" "*level, elt.getContent(), elt.lFeatureForParsing)
def processWithTemplate(self,lPattern,lPages):
"""
process sequence of pqges with given pattern
create table
"""
## artifical construction Motter12
# lPattern= [ [41.0, 110,442.0] , [40,340,442] ]
## MO12 pattern2
# lPattern= [ [41.0, 110,250,340,420,442.0]]
## RRB
# lPattern= [ [15,97.0, 311.0]]
#MF012
# lPattern= [ [27.0, 361,430.0] , [27.0,86.0,430.0] ]
# lPattern = [[19,104,277,371,470,],[19,60,104,371,470]]
# #nn_n0171 (hub) [['x=295.0', 'x=850.0'], ['x=34.0', 'x=572.0']]
# lPattern = [ [34.0, 564.0, 738.0], [156.0, 339.0, 846.0] ]
# lPattern = [[295,850,],[34,572]]
#lib
# lPattern = [[28,321],[144,449]]
# lPattern = [ [144.0, 449]]
lfPattern= []
for itemset in lPattern:
fItemset = []
for item in itemset:
f= featureObject()
f.setName("x")
f.setType(featureObject.NUMERICAL)
f.setValue(item)
f.setTH(self.THNUMERICAL)
fItemset.append(f)
f.setWeight(64000)
lfPattern.append(fItemset)
pattern = lfPattern
print (pattern)
print (self.THNUMERICAL)
maintemplate = treeTemplateClass()
maintemplate.buildTreeFromPattern(pattern)
### in prodf: mytemplate given by page.getVerticalTemplates()
mytemplate1 = treeTemplateClass()
mytemplate1.setPattern(pattern[0])
mytemplate2 = treeTemplateClass()
# mytemplate2.setPattern(pattern [1])
if len(lPattern)==2:
mytemplate2.setPattern(pattern[1])
mytemplate2.setParent(maintemplate)
mytemplate1.setParent(maintemplate)
else:
mytemplate2.setPattern(pattern[0])
# registration provides best matching
## from registration matched: select the final cuts
lScores = []
for i,p in enumerate(lPages):
if i %2==0:
mytemplate= mytemplate1
else:
mytemplate = mytemplate2
p.lFeatureForParsing = p.lf_XCut
# print p, p.lf_XCut
sys.stdout.flush()
registeredPoints, _, score = mytemplate.registration(p)
# registeredPoints2, lMissing2, score2 = mytemplate2.registration(p)
# print i,p,registeredPoints
# if score1 == score 2 !!
if score > 0 : # and score1 >= score2:
lfinalCuts= list(map(lambda x_y:x_y[1],list(filter(lambda x_y:x_y[0] != 'EMPTY',registeredPoints))))
# print p,'final1:',lfinalCuts, lMissing, score, '\t\t',registeredPoints
lScores.append(score)
p.addVerticalTemplate(mytemplate)
p.addVSeparator(mytemplate,lfinalCuts)
# elif score2 > 0 and score2 > score1:
# lfinalCuts= map(lambda x_y:x_y[1],filter(lambda (x,y): x!= 'EMPTY',registeredPoints2))
# print registeredPoints2
# print 'final2:',lfinalCuts, lMissing2
# p.addVerticalTemplate(mytemplate2)
# p.addVSeparator(mytemplate,lfinalCuts)
else:
print ('NO REGISTRATION')
fscore= np.average(lScores)
print ('final score:', fscore)
self.tagAsRegion(lPages)
return 1
def computePatternScore(self,pattern):
"""
consider the frequency of the pattern and the weights of the features
"""
fScore = 0
#terminal
if not isinstance(pattern, list):
fScore += pattern.getCanonical().getWeight()
else:
for child in pattern:
fScore += self.computePatternScore(child)
# print 'score:',pattern ,fScore
return fScore
def filterNonRegularPatterns(self,lPatterns):
"""
a template must provide a simplr way to compute the reading order
when a template is unigram: no issue
when a template is bigram: it has to be balanced (same number of vertical zones)
"""
"""
it has an impact on getPatternGraph
"""
return list(filter(lambda x_y: (len(x_y[0]) != 2) or (len(x_y[0][0])==len(x_y[0][1])) , lPatterns))
def highLevelSegmentation(self,lPages):
"""
use: image size and content (empty pages)
"""
lSubList= self.minePageDimensions(lPages)
return lSubList
# useful???? seems there is a 'batch size' latter on
lNewSub=[]
for lp in lSubList:
lProfiles = self.computeObjectProfile(lp)
lS = self.segmentWithProfile(lp,lProfiles[self.kContentSize])
lNewSub.extend(lS)
return lNewSub
def segmentWithProfile(self,lPages,lListSurface):
"""
compute average surface, min, max
and find a threshold for "empty" pages
"""
mean = np.mean(lListSurface)
std = np.std(lListSurface)
# print mean, std
lSubList=[]
lcur=[]
for i,surface in enumerate(lListSurface):
# print i, surface, surface < mean-std*2
if surface < mean-std*2:
lcur.append(lPages[i])
lSubList.append(lcur)
lcur =[]
# print lSubList, lcur
else:
lcur.append(lPages[i])
if lcur !=[]:
lSubList.append(lcur)
return lSubList
def testHighSupport(self,sequences):
"""
compute unigram support
"""
# from mssp
from collections import Counter
import itertools
sequence_count = len(sequences)
flattened_sequences = [ list(set(itertools.chain(*sequence))) for sequence in sequences ]
support_counts = dict(Counter(item for flattened_sequence in flattened_sequences for item in flattened_sequence))
actual_supports = {item:support_counts.get(item)/float(sequence_count) for item in support_counts.keys()}
print(actual_supports.items())
lOneSupport= [k for k,v in actual_supports.items() if v >= 0.5 ]
return lOneSupport
def createTableTemplate(self,lTemplate):
"""
create a pxml for each template
"""
from xml_formats.PageXml import PageXml
from util.unitConversion import convertDot2Pixel
page= self.lPages
for temp in lTemplate:
pageXmlDoc,pageNode = PageXml.createPageXmlDocument(creatorName='NLE', filename = "%s_%s"% (self.getInputFileName(),'tt.pxml'), imgW = convertDot2Pixel(self.dpi,page.getWidth()), imgH = convertDot2Pixel(self.dpi,page.getHeight()))
tablenode= PageXml.createPageXmlNode('TableRegion')
pageNode.append(tablenode)
# creata a table and one cell per column
for child in temp.getChildren():
cellnode = PageXml.createPageXmlNode('TableCell')
tablenode.append(cellnode)
print (child.getPattern().getValue())
def iterativeProcessVSegmentation(self, lLPages):
"""
process lPages by batch
parameter: NBATCH, THNUM?
W unigram: if not issue!!! this is mandatory!!
-> with table???
very ince for segmenting: it means a different template ??
for W /TA
identify sequences where empty states are majoritary (By dichomomy? )
collect information for cut !! a la specbook!!!
"""
setFullListTemplates= set()
for lPages in lLPages:
# lsubList = [lPages]
self.THNUMERICAL = self.testTH
for p in lPages:
p.resetVerticalTemplate()
# self.bDebug=True
# V positions
NBATCH = 3
for nbPage in range(0,len(lPages),NBATCH):
# print nbPage, nbPage + NBATCH
# print lPages[nbPage:nbPage+NBATCH]
sys.stdout.flush()
print ("LENGTH = 1", self.sTag)
# self.minePageVerticalFeature(lPages[nbPage:nbPage+NBATCH], ['x','x2'],level=self.sTag)
self.minePageVerticalFeature2(lPages[nbPage:nbPage+NBATCH], ['x','xxxx2'],level=self.sTag)
## V ZONES
# length 1
lT1, lScore1, score1 = self.processVSegmentation(lPages[nbPage:nbPage+NBATCH],[],bTAMode=False,iMinLen=1,iMaxLen=1)
print( nbPage, nbPage + NBATCH, lT1, score1)
print( '\t',lScore1)
sys.stdout.flush()
bTable = True #lT1 is not None and lT1 !=[] and len(lT1[0].getPattern()) > 6
# if lT1 is not None:
# self.createTableTemplate(lT1)
# ss
##" If parsing structure K+ has good coverage: skip length2?
# if high enough score: skip len=2?
# sys.stdout.flush()
lOneSupport=[]
print( "LENGTH = 2")
if len( lPages) > 1 and bTable or len(lOneSupport) > 18:
score2=0
lT2=None
lScore2=[]
else:
if lT1 is not None:
lNegativesPatterns=list(map(lambda x:x.getPattern(),lT1))
else: lNegativesPatterns=[]
lT2, lScore2, score2 = self.processVSegmentation(lPages[nbPage:nbPage+NBATCH],lNegativesPatterns,bTAMode=False,iMinLen=2,iMaxLen=2)
# score2=0
# lT2=None
# lScore2=[]
# test if cut somewhere
# segment and relearn : if better score: keep the cut
print (nbPage, nbPage+NBATCH, lT2, score2)
print ('\t',lScore2)
# for ii,(_,score) in enumerate(lScore2):
# print '\t',ii,score
bTwo=False
lT=None
# update
if score1 is None: score1 = -1
if score2 is None:score2 = -1
if score2 > score1:
bTwo=True
ldeltemplateset=lT1
lT=lT2
else:
ldeltemplateset=lT2
lT=lT1
if ldeltemplateset:
for p in lPages[nbPage:nbPage+NBATCH]:
# print p.getVerticalTemplates()
for deltemplate in ldeltemplateset:
try:
p.getVerticalTemplates().remove(deltemplate)
except ValueError:pass # page not associated
print ("#",lPages[nbPage:nbPage+NBATCH], bTwo , lT)
## final step: finetuning and creation of separator???
## consider tokens and find best line: even correction with new nomacs??
# self.correctionStep(lPages)
# for visu
# if self.bDomTag:self.tagDomAsTable(lPages)
if self.bDomTag:self.tagAsRegion(lPages)
## build full list of templates
### smooth: see bar 13685: if subpatterns (due to content break)
for p in lPages:
# what is stored in VerticalTemplate: the terminal element: take the parent for the full template
## this is needed by the Viterbi/matching step
for t in p.getVerticalTemplates():
parentTemplate = t.getParent()
setFullListTemplates.add(parentTemplate)
if parentTemplate is not None:
parentTemplate.bIsMirrored = self.isMirroredPattern(parentTemplate.getPattern())
# print setFullListTemplates
return setFullListTemplates
def processVSegmentation(self,lPages,lNegativePatterns,bTAMode= False,iMinLen=1, iMaxLen=1):
"""
use Vertical bloc/text info to find vertical patterns at page level
Should correspond to column-like zones
"""
# self.bDebug = False
for _,p in enumerate(lPages):
p._lBasicFeatures=p.lf_XCut[:]
# print (p, list(map(lambda x:(x,x.getTH()),p.getSetofFeatures())))
# print p, map(lambda x:(x,x.getTH()),p.lf_XCut)
seqGen = sequenceMiner()
seqGen.bDebug = False
seqGen.setMinSequenceLength(iMinLen)
seqGen.setMaxSequenceLength(iMaxLen)
seqGen.setObjectLevel(XMLDSPageClass)
chronoOn()
print ('featuring...',chronoOff())
lSortedFeatures = seqGen.featureGeneration(lPages,2)
for cf in lSortedFeatures:
# weights must not be too large ecause theyv are used in np.obs (max=64000)
# cf.setWeight(sum(x.getHeight() * x.getWidth() for x in cf.getNodes())/64000)
cf.setWeight(sum(x.getHeight() for x in cf.getNodes()) / 64000)
# cf.setWeight(1)
# cf.setWeight(len(cf.getNodes()))
# print (cf, cf.getWeight(),len(cf.getNodes())) # map(lambda x:x.getX(),cf.getNodes())
# print lSortedFeatures
for _,p in enumerate(lPages):
p.lFeatureForParsing = p.getCanonicalFeatures()
if self.bDebug:print (p, p.lFeatureForParsing)
sys.stdout.flush()
if lSortedFeatures == []:
print ("No template found in this document")
return None,None,-1
seqGen.bDebug = False
lmaxSequence = seqGen.generateItemsets(lPages)
lSeq, _ = seqGen.generateMSPSData(lmaxSequence,lSortedFeatures,mis = 0.2)
# if one page: what to do ??
if len(lPages) > 1:
lOneSupport = self.testHighSupport(lSeq)
else : lOneSupport=[]
print ('L1OneSupport: ', lOneSupport)
## reference solution
if len(lOneSupport) < 0:
# MIS also for patterns, not only item!!
## can be used to assess the noise level or here
chronoOn()
print( "generation...")
sys.stdout.flush()
lSeq, _ = seqGen.generateMSPSData(lmaxSequence,lSortedFeatures,mis = 0.1,L1Support = [])
sys.stdout.flush()
## if many MIS=1.0 -> table with many columns!
##actual supports: {'x=40.0': 0.5, 'x=473.0': 1.0, 'x=73.0': 0.75, 'x=558.0': 1.0, 'x=327.0': 1.0, 'x=145.0': 1.0, 'x=243.0': 1.0, 'x=1180.0': 0.25, 'x=726.0': 1.0, 'x=408.0': 1.0, 'x=886.0': 1.0, 'x=1027.0': 0.75, 'x=803.0': 1.0, 'x=952.0': 1.0, 'x=636.0': 1.0, 'x=1136.0': 1.0, 'x=839.0': 0.25}
# lFSortedFeatures = self.factorizeHighlyFrequentItems()
lPatterns = seqGen.miningSequencePrefixScan(lSeq)
if lPatterns is None:
return None, None, -1
# lPatterns = seqGen.beginMiningSequences(lSeq,lSortedFeatures,lMIS)
print( "chronoTraining", chronoOff())
print( 'nb patterns: ',len(list(lPatterns)))
sys.stdout.flush()
lPatterns = self.filterNonRegularPatterns(lPatterns)
lPatterns.sort(key=lambda p_s:self.computePatternScore(p_s[0]), reverse=True)
# lPatterns.sort(key=lambda (p,s):s, reverse=True)
lPatterns = list(filter(lambda p_:p_[0] not in lNegativePatterns, lPatterns))
# if self.bDebug:
# for p,s in lPatterns:
# if s >= 1:
# print p,s, self.computePatternScore(p)
sys.stdout.flush()
### GENERATE SEQUENTIAL RULES
seqGen.bDebug = True
seqGen.THRULES = 0.80
lSeqRules = seqGen.generateSequentialRules(lPatterns)
_,dCP = self.getPatternGraph(lSeqRules)
if bTAMode:
dTemplatesCnd = self.pattern2TAZonesTemplate(lPatterns,dCP)
else:
dTemplatesCnd = self.pattern2VerticalZonesTemplate(lPatterns,dCP)
# print 'patterns:', dTemplatesCnd
chronoOn()
# seqGen.setKleenePlusTH(self.fKleenPlusTH)
seqGen.setKleenePlusTH(1.6)
_, lVTemplates,tranprob = seqGen.testTreeKleeneageTemplates(dTemplatesCnd, lPages,iterMax=10)
print("chronoParsing", chronoOff())
## merge if similar patterns (see testV/nn)
## usually +1 element
else:
### if length=2 how to build the pattern!!!!!
## TABLE:
tableTemplate=treeTemplateClass()
lOneSupport.sort(key=lambda x:x.getValue())
tableTemplate.buildTreeFromPattern(lOneSupport)
lVTemplates= [tableTemplate]
tranprob = np.ones((2,2), dtype = np.float16)
for p in lPages:
p.lFeatureForParsing = p.lf_XCut
## for eac n-cut categories:
# template with 2 , 3 ,4, 5 .. cuts
chronoOn()
# for i,t in enumerate(lVTemplates):
# print i,lPages[-2], t, t.registration(lPages[-2])
# print i,lPages[-1], t, t.registration(lPages[-1])
# print
lT, lScores, score= self.selectFinalTemplate(lVTemplates,tranprob,lPages)
print ("chronoFinalViterbi: %s score= %s" % ( chronoOff(), score))
# del seqGen
return lT, lScores, score
def selectFinalTemplate(self,lTemplates,transProb,lPages):
"""
apply viterbi to select best sequence of templates
"""
import spm.viterbi as viterbi
if lTemplates == []:
return None,None,None
def buildObs(lTemplates,lPages):
"""
build observation prob
"""
N = len(lTemplates) + 1
# print 'N:',N
obs = np.zeros((N,len(lPages)), dtype=np.float16)
for i,temp in enumerate(lTemplates):
for j,page in enumerate(lPages):
x, y, score= temp.registration(page)
# print (page, i, page.lf_XCut,temp, score,x,y)
if score < 0:
score= 0
# # no template
# print ('\t xxx',score)
# obs[-1,j]=1.0
obs[i,j]= score
if np.isinf(obs[i,j]):
obs[i,j] = 64000
if np.isnan(obs[i,j]):
obs[i,j] = 0.0
# print i,j,page, temp,score
#add no-template:-1
return obs / np.amax(obs)
N= len(lTemplates) + 1
# build transition score matrix
## use the support to initialize ?? why
initialProb = np.ones(N) * 1
initialProb = np.reshape(initialProb,(N,1))
obs = buildObs(lTemplates,lPages)
d = viterbi.Decoder(initialProb, transProb, obs)
states,fscore = d.Decode(np.arange(len(lPages)))
np.set_printoptions(precision= 3, linewidth =1000)
print ("viterbi scores:",fscore)
# print (transProb)
# print (obs)
lTemplate=[]
lScores=[]
#assigned to each page the template assigned by viterbi
for i,page, in enumerate(lPages):
try:
mytemplate= lTemplates[states[i]]
if mytemplate not in lTemplate:
lTemplate.append(mytemplate)
except:# no template
mytemplate = None
if mytemplate is not None:
# page.resetVerticalTemplate()
page.addVerticalTemplate(mytemplate)
registeredPoints, _, score = mytemplate.registration(page)
# print ("??",page, states[i], mytemplate, registeredPoints, score, page.lFeatureForParsing)
if registeredPoints:
registeredPoints.sort(key=lambda x_y:x_y[1].getValue())
lcuts = list(map(lambda ref_cut:ref_cut[1],registeredPoints))
print (page, score, lcuts)
# print page, score, lcuts, map(lambda x:x.getWeight(), lcuts),registeredPoints
# print '\t', page.lFeatureForParsing,map(lambda x:x.getWeight(), page.lFeatureForParsing)
page.addVSeparator(mytemplate,lcuts)
lScores.append((states[i],score))
else:
# lScores.append((N,-1))
lScores.append((N,0))
# for t in lTemplate:
# print t, t.getParent()
fscore= np.average(list(map(lambda x_y:x_y[1],lScores)))
return lTemplate, lScores, fscore
def getPatternGraph(self,lRules):
"""
create an graph which linsk exoannded patterns
(a) -> (ab)
(abc) -> (abcd)
rule = (newPattern,item,i,pattern, fConfidence)
RULE: [['x=19.0', 'x=48.0', 'x=345.0'], ['x=19.0', 'x=126.0', 'x=345.0']] => 'x=464.0'[0] (22.0/19.0 = 0.863636363636)
can be used for tagging go up until no parent
for balanced bigram: extension only: odd bigrams are filtered out
"""
dParentChild= {}
dChildParent= {}
for lhs, _, _, fullpattern, _ in lRules:
try:dParentChild[str(fullpattern)].append(lhs)
except KeyError:dParentChild[str(fullpattern)] = [lhs]
try:dChildParent[str(lhs)].append(fullpattern)
except KeyError:dChildParent[str(lhs)] = [fullpattern]
# for bigram: extend to grammy
for child in dChildParent.keys():
ltmp=[]
if len(eval(child)) == 2:
for parent in dChildParent[child]:
try:
ltmp.extend(dChildParent[str(parent)])
except KeyError:pass
dChildParent[child].extend(ltmp)
return dParentChild, dChildParent
def isMirroredPattern(self,pattern):
"""
to be replace by a classification model!!
test if a pattern is mirrored or not
[A,B,C] [C,B,A]
left must be different from right !
AT LEAST 3 VALUES -> 2 values define one region: if mirrored: 2 regions needed!
-> NONO!! BAR: the body is detected, and margins are mirroerd
take into account scan shift
"""
# deprecated if O,max are added to the features
if self.bScanShift:
MTH = 3
else:
MTH = 2
if len(pattern) !=2 or len(pattern[0]) != len(pattern[1]) or (pattern[0] == pattern[1]) or len(pattern[0]) < MTH:
return False
else:
## add zeros (margins) if len(pattern[0]) == 2??
zeroFeature=featureObject()
zeroFeature.setValue(0)
inv1 = pattern[1][:]
inv1.reverse()
lcouple1= zip(inv1,inv1[1:])
lw1= map(lambda x_y:abs(x_y[1].getValue()-x_y[0].getValue()),lcouple1)
lcouple0 = zip(pattern[0],pattern[0][1:])
lw0 = map(lambda x_y:abs(x_y[1].getValue()-x_y[0].getValue()),lcouple0)
final = set(map(lambda x_y: abs(x_y[0] -x_y[1]) < self.THNUMERICAL * 2,zip(lw0,lw1)))
return set(final) == set([True])
##########CORRECTION STEP #################
def correctionStep(self,lPages):
"""
correct segmentation wrt template
- according to the template:
look at token levels the bets cuts?
resegment text if needed
still need to consider graphline
"""
for page in lPages:
# lText = page.getAllNamedObjects(XMLDSTEXTClass)
lTokens = page.getAllNamedObjects(XMLDSTOKENClass)
# xTokenHisto = Counter(map(lambda x:round(x.getX()),lTokens))
# print page, xTokenHisto.most_common(15)
prevcuts= page.lf_XCut
self.minePageVerticalFeature([page], ['x','x2'], level=XMLDSTOKENClass)
page.lFeatureForParsing = page.lf_XCut
if lTokens != []:
for template in page.getVerticalTemplates():
sys.stdout.flush()
registeredPoints, lMissing, score = template.registration(page)
if registeredPoints is not None:
lfinalCuts= map(lambda x_y:x_y[1],filter(lambda x: x[0]!= 'EMPTY',registeredPoints))
# print page,registeredPoints, lMissing, score
page.resetVSeparator(template)
page.addVSeparator(template,lfinalCuts)
#### delete page break ##########################
# here ??
def flattencontent(self,lPages,lTemplates):
"""
build reading order according to templates
for each template create a stream
if bigram (mirrored)
take 0 as ref; and reverse 1
"""
lTemplateRO={}
for template in lTemplates:
# how many stream?
# unigram
if template.getChildren() is None:
N=len(template.getPattern())
else:
# get first child length
N=len(template.getChildren()[0].getPattern())
lTemplateRO[template]= [ [] for i in range(0,N+1)]
# print (template, lTemplateRO[template])
# return
for page in lPages:
for template in page.getVerticalTemplates():
## what is stored if the ternimal: need to access the parent
page.createVerticalZones(template)
parentTemplate= template.getParent()
bReverse=False
## need a real test for isMirrored(self)
# print (parentTemplate,parentTemplate.isMirrored())
if parentTemplate.isMirrored():
bReverse = template.getPattern() == parentTemplate.getPattern()[1]
lregions= page.getVerticalObjects(template)
if bReverse:
lregions.reverse()
# print (page, bReverse)
for i,region in enumerate(lregions):
print (region.getContent().encode('utf-8'))
lTemplateRO[parentTemplate][i].append(region)
# print page, i, parentTemplate,len(lTemplateRO[parentTemplate][i]) #.getContent().encode('utf-8')
## need a 2D smoothing for line: need to find the line grid and match it against the page.
## baseline method! if overlap :merge?
### build blocks for pageTemplate: get headers,...
## from block -> create lines (allow for merge and ordering)
# once blocks: pageTemplate
### now patterns with indentation
for tem in lTemplateRO:
print ("###################################", tem)
lColumnElts=[ [] for i in range(len(lTemplateRO[tem]))]
for i,lreg in enumerate(lTemplateRO[tem]):
# print "%d\t\t%s"%(i,"")
for reg in lreg:
lElts= reg.getObjects()
lColumnElts[i].extend(lElts)
# for elt in lElts:
## compute justification
# try: print elt.getPage(), elt.getContent().encode('utf-8')[:30]
# except:pass
return lColumnElts
############################# DOM TAGGING ###################
def tagDomWithBestTemplate(self,lPages):
"""
Create (empty) REGIONS (for end-to-end; for GT: create table)
"""
for page in lPages:
if page.getNode():
best = None
bestRegisteredPoints =None
lMissingBest= None
bestScore = 0
for mytemplate in page.getVerticalTemplates():
registeredPoints, lMissing, score= mytemplate.registration(page)
print( page, mytemplate, score)
if score > bestScore:
best = mytemplate
bestRegisteredPoints= registeredPoints
lMissingBest=lMissing
bestScore = score
print( page,best, bestScore) # bestRegisteredPoints, lMissingBest
if best:
prevcut=0
for refcut,realcut in bestRegisteredPoints:
if realcut != prevcut:
region = etree.Element('REGION')
region.set("x",str(prevcut))
region.set("y",'0')
region.set("height",str(page.getHeight()))
region.set("width", str(realcut.getValue() - prevcut))
region.set('points', '%f,%f,%f,%f,%f,%f,%f,%f'%(prevcut,0, realcut.getValue(),0 ,realcut.getValue(),page.getHeight(),prevcut,page.getHeight()))
page.getNode().append(region)
prevcut = realcut.getValue()
#final col
if prevcut != page.getWidth():
region = etree.Element('REGION')
width = page.getWidth() - prevcut
region.set("x",str(prevcut))
region.set("y",'0')
region.set("height",str(page.getHeight()))
region.set("width", str(width))
region.set('points', '%f,%f,%f,%f,%f,%f,%f,%f'%(prevcut,0, page.getWidth(),0,page.getWidth(),page.getHeight(),prevcut,page.getHeight()))
page.getNode().append(region)
def deleteRegions(self,page):
"""
delete regions of the page
"""
for region in page.getAllNamedObjects('REGION'):
region.getNode().getparent().remove(region.getNode())
def storeLineInRegions(self,page,lRegions):
"""
assign lines to region
"""
for line in page.getAllNamedObjects(XMLDSTEXTClass):
# print line, lRegions
myReg= line.bestRegionsAssignment(lRegions)
# print (line,myReg)
if myReg:
myReg.addObject(line)
# line.getNode().unlinkNode()
myReg.getNode().append(line.getNode())
def tagAsRegion(self,lPages):
"""
create regions
if bIgnoreRegions: delte previous regions
and assign new regions to textlines
if border page regions are missing :add them?
or don't put them for tagging
"""
for page in lPages:
if page.getNode() is not None:
# if self.bIgnoreRegions:
# self.deleteRegions(page)
lRegions=[]
for template in page.getVerticalTemplates():
# print (page, template, template.getParent())
page.getdVSeparator(template).sort(key=lambda x:x.getValue())
# print page.getdVSeparator(template)
page.getNode().set('template',str(list(map(lambda x:x.getValue(),page.getdVSeparator(template)))))
# print (page,page.getNode().get('template'))
if template.getParent() is not None and len(template.getParent().getPattern())==2:
pos = -1
if template.getPattern() == template.getParent().getPattern()[0]:
pos = 0
elif template.getPattern() == template.getParent().getPattern()[1]:
pos = 1
else:
raise 'template index issue'
page.getNode().set('reftemplate',str((pos,list(map(lambda x:x.getValue(),template.getParent().getPattern()[0])),list(map(lambda x:x.getValue(),template.getParent().getPattern()[1])))))
else:
# sinlge: add () for comparison/evaluation
page.getNode().set('reftemplate',str((0,(list(map(lambda x:x.getValue(),template.getPattern()))))))
prevcut = 1
lCuts=[prevcut]
lRegions=[]
# print page, page.getdVSeparator(template)
for cut in page.getdVSeparator(template):
newReg= XMLDSObjectClass()
domNode = etree.Element('REGION')
domNode.set("x",str(prevcut))
## it is better to avoid
YMinus= 1
domNode.set("y",str(YMinus))
domNode.set("height",str(page.getHeight()-2 * YMinus))
domNode.set("width", str(cut.getValue() - prevcut))
lCuts.append(cut.getValue() )
newReg.setNode(domNode)
page.getNode().append(domNode)
newReg.setDimensions(prevcut,YMinus, page.getHeight()-2 * YMinus,cut.getValue() - prevcut)
# print newReg.getX(),newReg.getY(),newReg.getHeight(),newReg.getWidth(),cut.getValue() - prevcut
lRegions.append(newReg)
prevcut = cut.getValue()
# if lRegions != []:
# if self.bIgnoreRegions:
# self.deleteRegions(page)
# self.storeLineInRegions(page,lRegions)
def tagDomAsTable(self,lPages):
"""
create a table object:
table zone: page
columns: the created vertical zones
"""
for page in lPages:
if page.getNode():
# if several template ???
for template in page.getVerticalTemplates():
### create a table
tableNode = etree.Element('TABLE')
tableNode.set('x','0')
tableNode.set('y','0')
tableNode.set('height',str(page.getHeight()))
tableNode.set('width',str(page.getWidth()))
page.getNode().append(tableNode)
page.getdVSeparator(template).sort(key=lambda x:x.getValue())
# print page.getdVSeparator(template)
prevcut=1
lCells = []
for i,cut in enumerate(page.getdVSeparator(template)):
newCell= XMLDSTABLECELLClass()
cellNode = etree.Element('CELL')
cellNode.set("x",str(prevcut))
cellNode.set("y",'1')
cellNode.set("row","0")
cellNode.set("col",str(i))
cellNode.set("height",str(page.getHeight()))
cellNode.set("width", str(cut.getValue() - prevcut))
cellNode.set('points', '%f,%f,%f,%f,%f,%f,%f,%f'%(prevcut,1, cut.getValue(),1 ,cut.getValue(),page.getHeight(),prevcut,page.getHeight()))
newCell.setNode(cellNode)
newCell.fromDom(cellNode)
tableNode.append(cellNode)
lCells.append(newCell)
# print (newCell.getX(),newCell.getWidth(),newCell.getY(),newCell.getHeight()),
# newCell.setDimensions(prevcut,1, page.getHeight()-2 * 1,cut.getValue() - prevcut)
prevcut = cut.getValue()
if lCells != []:
self.storeLineInRegions(page,lCells)
def testCliping(self,lPages):
"""
all in the name
"""
for page in lPages:
region=XMLDSObjectClass()
region.addAttribute('x', 0)
region.addAttribute('y', 0)
region.addAttribute('height', page.getAttribute('height'))
region.addAttribute('width', 110)
print (region.getX(),region.getY(),region.getWidth(),region.getHeight())
print (page.getAttributes(), page.getX2())
lObjects = page.clipMe(region)
region.setObjectsList(lObjects)
def cleanInput(self,lPages):
"""
Delete Otokens which are too close to x=0 x=max
Does not touch the dom!!
"""
for page in lPages:
for txt in page.getAllNamedObjects(XMLDSTEXTClass):
ltobd=[]
for word in txt.getAllNamedObjects(XMLDSTOKENClass):
if word.getX() < 5:
ltobd.append(word)
elif word.getX2() +5 > page.getWidth():
ltobd.append(word)
elif word.getWidth() < 10:
ltobd.append(word)
# resize text
for x in ltobd:
# print x.getAttribute('id')
txt.getObjects().remove(x)
# resize DOM node as well??
if len(txt.getAllNamedObjects(XMLDSTOKENClass)) == 0:
try:
page.getObjects().remove(txt)
except ValueError:
print(txt)
else:
txt.resizeMe(XMLDSTOKENClass)
def generateTestOutput(self,lPages):
"""
create a run XML file
"""
root = etree.Element('DOCUMENT')
self.evalData = etree.ElementTree(root)
for page in lPages:
domp=etree.Element('PAGE')
domp.set('number',page.getAttribute('number'))
root.append(domp)
if page.getNode().get('template'):
domp.set('template',page.getNode().get('template'))
if page.getNode().get('reftemplate'):
domp.set('reftemplate',page.getNode().get('reftemplate'))
return self.evalData
#--- RUN ---------------------------------------------------------------------------------------------------------------
def loadDSDoc(self,doc):
"""
"""
self.doc= doc
self.ODoc = XMLDSDocument()
chronoOn()
self.ODoc.loadFromDom(self.doc,listPages=range(self.firstPage,self.lastPage+1))
self.lPages= self.ODoc.getPages()
# self.lPages= self.lPages[:1]
# self.cleanInput(self.lPages)
print('chronoloading:', chronoOff())
sys.stdout.flush()
def run(self):
"""
for a set of pages, associate each page with several vertical zones aka column-like elements
Populate the vertical zones with page elements (text)
indicate if bigram page template (mirrored pages)
"""
if self.bManual:
# self.tagWithTemplate(self.manualPattern,self.lPages)
self.THNUMERICAL = 30
# level=XMLDSTEXTClass
self.minePageVerticalFeature(self.lPages, ['x','x2'],level=self.sTag)
self.processWithTemplate(self.manualPattern,self.lPages)
else:
chronoOn()
# first mine page size!!
## if width is not the 'same' , then initial values are not comparable (x-end-ofpage)
# lSubPagesList = self.highLevelSegmentation(self.lPages)
lSubPagesList = [self.lPages]
## need to regroup similar
# self.arrayApproach(lSubPagesList)
# return
if self.baselineMode > 0:
# not implemented
self.baselineSegmentation(lSubPagesList)
else:
lTemplates = self.iterativeProcessVSegmentation(lSubPagesList)
return self.ODoc, lTemplates, self.lPages
## in this package?? current yes
self.flattencontent(self.lPages,lTemplates)
# self.processVSegmentation(self.lPages)
print( 'chronoprocessing: ', chronoOff())
# self.addTagProcessToMetadata(self.doc)
return self.doc
#--- TESTS -------------------------------------------------------------------------------------------------------------
#
# Here we have the code used to test this component on a prepared testset (see under <ROOT>/test/common)
# Do: python ../../src/common/TypicalComponent.py --test REF_TypicalComponent/
#
def testComparePageVertical(self,runElt,refElt):
"""
input: <SeparatorRegion x="51.36" y="7.44" height="764.4" width="2.88"/>
"""
self.THNUMERICAL = 30.0
## x=XX
return abs(runElt - refElt) < (self.THNUMERICAL *2.0)
def testTemplateType(self,srefData,srunData, bVisual):
"""
run PAGE @template
ref PAGE @refteemplate
"""
cntOk = cntErr = cntMissed = 0
RefData = etree.XML(srefData.strip("\n").encode('utf-8'))
RunData = etree.XML(srunData.strip("\n").encode('utf-8'))
lRun = []
if RunData:
lpages = RunData.xpath('//%s' % ('PAGE'))
for page in lpages:
if page.get('reftemplate'):
lRun.append(eval(page.get('reftemplate')))
else:lRun.append([])
lRef = []
lPages = RefData.xpath('//%s' % ('PAGE'))
for page in lPages:
if page.get('reftemplate'):
lRef.append(eval(page.get('reftemplate')))
else: lRef.append([])
runLen = len(lRun)
refLen = len(lRef)
assert runLen == refLen
ltisRefsRunbErrbMiss= list()
for i in range(0,len(lRef)):
if lRun[i] != []:
runLen = len(lRun[i])
else:
runLen=0
if lRef[i] != []:
refLen = len(lRef[i])
else:
refLen=0
# print i, refLen, runLen
if runLen == refLen:
cntOk += 1
ltisRefsRunbErrbMiss.append( (i, lRef[i],lRun[i], False, False) )
else:
cntErr+=1
cntMissed+=1
ltisRefsRunbErrbMiss.append( (i, lRef[i],lRun[i], True, True) )
ltisRefsRunbErrbMiss.sort(key=lambda x_y_z_t_u:x_y_z_t_u[0])
return (cntOk, cntErr, cntMissed,ltisRefsRunbErrbMiss)
def testRUNREFVerticalSegmentation(self,srefData,srunData, bVisual):
"""
Test found run-template and run-reftemplate
"""
cntOk = cntErr = cntMissed = 0
RefData = etree.XML(srefData.strip("\n").encode('utf-8'))
RunData = etree.XML(srunData.strip("\n").encode('utf-8'))
lRun = []
if RunData:
lpages = RunData.xpath('//%s' % ('PAGE'))
for page in lpages:
if page.get('template'):
lRun.append(eval(page.get('template')))
else:lRun.append([])
lRef = []
#### NO LONGER REFDATA!!!
lPages = RunData.xpath('//%s' % ('PAGE'))
for page in lPages:
if page.get('reftemplate'):
lRef.append(eval(page.get('reftemplate')))
else: lRef.append([])
ltisRefsRunbErrbMiss= list()
for i in range(0,len(lRef)):
lRefCovered = []
runLen = len(lRun[i])
if lRef[i]==[]:
refLen=0
refElt=None
posref=None
else:
posref=lRef[i][0]
refLen= len(lRef[i][posref+1])
curRun = curRef = 0
while curRun <= runLen - 1: # or curRef <= refLen -1:
bErr, bMiss = False, False
try:
runElt = lRun[i][curRun]
except IndexError: runElt = None
# print '___',curRun,runElt
curRef = 0
bFound = False
while not bFound and curRef <= refLen - 1:
try: refElt = lRef[i][posref+1][curRef]
except IndexError: refElt = None
# self.compareString(runElt,runElt)
if runElt and refElt not in lRefCovered and self.testComparePageVertical(runElt, refElt):
bFound = True
lRefCovered.append(refElt)
resRef=refElt
else:
curRef += 1
if bFound:
if bVisual:print( "FOUND:", runElt, ' -- ', lRefCovered[-1])
cntOk += 1
curRun += 1
else:
resRef=''
curRun += 1
cntErr += 1
bErr = True
# bMiss = True
if bVisual:print ("ERROR:", runElt)
ltisRefsRunbErrbMiss.append( (i, resRef, runElt,bErr, bMiss) )
if posref is not None:
for ref in lRef[i][posref+1]:
if ref not in lRefCovered:
ltisRefsRunbErrbMiss.append( (i, ref, '',False, True) )
# add missed elements!
cntMissed += 1 #len(lRef[i][posref+1]) - len(lRefCovered)
ltisRefsRunbErrbMiss.sort(key=lambda x_y_z_t_u:x_y_z_t_u[0])
return (cntOk, cntErr, cntMissed,ltisRefsRunbErrbMiss)
def testREFVerticalSegmentation(self,srefData,srunData, bVisual):
"""
Test found reftemplate and reftemplate
"""
cntOk = cntErr = cntMissed = 0
RefData = etree.XML(srefData.strip("\n").encode('utf-8'))
RunData = etree.XML(srunData.strip("\n").encode('utf-8'))
lRun = []
if RunData is not None:
lpages = RunData.xpath('//%s' % ('PAGE'))
for page in lpages:
if page.get('reftemplate'):
lRun.append(eval(page.get('reftemplate')))
else:
lRun.append([])
lRef = []
lPages = RefData.xpath('//%s' % ('PAGE'))
for page in lPages:
if page.get('reftemplate'):
lRef.append(eval(page.get('reftemplate')))
else: lRef.append([])
ltisRefsRunbErrbMiss= list()
for i in range(0,len(lRef)):
lRefCovered = []
if lRun[i] ==[]:
runLen=0
else:
posrun = lRun[i][0]
runLen = len(lRun[i][posrun+1])
if lRef[i]==[]:
refLen=0
refElt=None
posref=None
else:
posref=lRef[i][0]
refLen= len(lRef[i][posref+1])
curRun = curRef = 0
while curRun <= runLen - 1: # or curRef <= refLen -1:
bErr, bMiss = False, False
try:
runElt = lRun[i][posrun+1][curRun]
except IndexError: runElt = None
# print '___',curRun,runElt
curRef = 0
bFound = False
while not bFound and curRef <= refLen - 1:
try: refElt = lRef[i][posref+1][curRef]
except IndexError: refElt = None
# self.compareString(runElt,runElt)
if runElt and refElt not in lRefCovered and self.testComparePageVertical(runElt, refElt):
bFound = True
lRefCovered.append(refElt)
resRef=refElt
else:
curRef += 1
if bFound:
if bVisual:print ("FOUND:", runElt, ' -- ', lRefCovered[-1])
cntOk += 1
curRun += 1
else:
resRef=''
curRun += 1
cntErr += 1
bErr = True
# bMiss = True
if bVisual:print ("ERROR:", runElt)
ltisRefsRunbErrbMiss.append( (i, resRef, runElt,bErr, bMiss) )
if posref is not None:
for ref in lRef[i][posref+1]:
if ref not in lRefCovered:
ltisRefsRunbErrbMiss.append( (i, ref, '',False, True) )
# add missed elements!
# print 'missed', len(lRef[i][posref+1]) , len(lRefCovered), lRef[i][posref+1], lRefCovered
cntMissed += 1#(len(lRef[i][posref+1]) - len(lRefCovered))
ltisRefsRunbErrbMiss.sort(key=lambda x_y_z_t_u:x_y_z_t_u[0])
return (cntOk, cntErr, cntMissed,ltisRefsRunbErrbMiss)
def testVerticalSegmentation(self,srefData,srunData, bVisual):
"""
Test found cuts and reftemplate
"""
cntOk = cntErr = cntMissed = 0
RefData = etree.XML(srefData.strip("\n").encode('utf-8'))
RunData = etree.XML(srunData.strip("\n").encode('utf-8'))
lRun = []
if RunData:
lpages = RunData.xpath('//%s' % ('PAGE'))
for page in lpages:
if page.get('template'):
lRun.append(eval(page.get('template')))
else:lRun.append([])
lRef = []
lPages = RefData.xpath('//%s' % ('PAGE'))
for page in lPages:
if page.get('reftemplate'):
lRef.append(eval(page.get('reftemplate')))
else: lRef.append([])
ltisRefsRunbErrbMiss= list()
for i in range(0,len(lRef)):
lRefCovered = []
runLen = len(lRun[i])
if lRef[i]==[]:
refLen=0
refElt=None
posref=None
else:
posref=lRef[i][0]
refLen= len(lRef[i][posref+1])
curRun = curRef = 0
while curRun <= runLen - 1: # or curRef <= refLen -1:
bErr, bMiss = False, False
try:runElt = lRun[i][curRun]
except IndexError: runElt = None
# print '___',curRun,runElt
curRef = 0
bFound = False
while not bFound and curRef <= refLen - 1:
try: refElt = lRef[i][posref+1][curRef]
except IndexError: refElt = None
# self.compareString(runElt,runElt)
if runElt and refElt not in lRefCovered and self.testComparePageVertical(runElt, refElt):
bFound = True
lRefCovered.append(refElt)
resRef=refElt
else:
curRef += 1
if bFound:
if bVisual:print( "FOUND:", runElt, ' -- ', lRefCovered[-1])
cntOk += 1
curRun += 1
else:
resRef=''
curRun += 1
cntErr += 1
bErr = True
# bMiss = True
if bVisual:print ("ERROR:", runElt)
ltisRefsRunbErrbMiss.append( (i, resRef, runElt,bErr, bMiss) )
if posref is not None:
for ref in lRef[i][posref+1]:
if ref not in lRefCovered:
ltisRefsRunbErrbMiss.append( (i, ref, '',False, True) )
# add missed elements!
cntMissed += (len(lRef[i][posref+1]) - len(lRefCovered))
ltisRefsRunbErrbMiss.sort(key=lambda x_y_z_t_u:x_y_z_t_u[0])
return (cntOk, cntErr, cntMissed,ltisRefsRunbErrbMiss)
def testRun(self, filename, outFile=None):
"""
testRun is responsible for running the component on this file and returning a string that reflects the result in a way
that is understandable to a human and to a program. Nicely serialized Python data or XML is fine
"""
doc = self.loadDom(filename)
self.loadDSDoc(doc)
self.doc= doc
self.run()
# doc.freeDoc()
self.generateTestOutput(self.lPages)
if outFile: self.writeDom(doc)
return etree.tostring(self.evalData,encoding='unicode')
# return etree.tostring(self.evalData,encoding='utf-8',xml_declaration=True)
def testCompare(self, srefData, srunData, bVisual=False):
"""
Our comparison is very simple: same or different. N
We anyway return this in term of precision/recall
If we want to compute the error differently, we must define out own testInit testRecord, testReport
"""
dicTestByTask = dict()
dicTestByTask['VREFzones']= self.testREFVerticalSegmentation(srefData, srunData,bVisual)
# dicTestByTask['Vzones']= self.testVerticalSegmentation(srefData, srunData,bVisual)
dicTestByTask['VRUNREFzones']= self.testRUNREFVerticalSegmentation(srefData, srunData,bVisual)
# dicTestByTask['templateType']= self.testTemplateType(srefData, srunData,bVisual)
return dicTestByTask
#--- MAIN -------------------------------------------------------------------------------------------------------------
#
# In case we want to use this component from a command line
#
# Do: python TypicalComponent.py -i toto.in.xml
#
if __name__ == "__main__":
docM = pageVerticalMiner()
#prepare for the parsing of the command line
docM.createCommandLineParser()
docM.add_option("-f", "--first", dest="first", action="store", type="int", help="first page number", metavar="NN")
docM.add_option("-l", "--last", dest="last", action="store", type="int", help="last page number", metavar="NN")
docM.add_option("-t", "--tag", dest="tag", action="store", type="string", help="tag level", metavar="S")
docM.add_option("--pattern", dest="pattern", action="store", type="string", help="pattern to be applied", metavar="[]")
docM.add_option("--TH", dest="THNUM", action="store", type="int", help="TH as eq delta", metavar="NN")
docM.add_option("--KTH", dest="KLEENETH", action="store", type="float", help="TH for sequentiality", metavar="NN")
docM.add_option("--baseline", dest="baseline", type='int', default=0, action="store", help="baseline method",metavar="N")
docM.add_option("--ignoreRegion", dest="bIgnoreRegions", default=True, action="store_true", help="Ignore existing TextRegions")
docM.add_option("--nogl", dest="nogline", action="store_true",default=False ,help="no graphical line used")
docM.add_option("--glonly", dest="glineonly", action="store_true",default=False ,help="graphical line only (no text)")
#parse the command line
dParams, args = docM.parseCommandLine()
#Now we are back to the normal programmatic mode, we set the componenet parameters
docM.setParams(dParams)
doc = docM.loadDom()
docM.loadDSDoc(doc)
docM.bDebug = True
docM.run()
if doc and docM.getOutputFileName() != "-":
docM.writeDom(doc, True)
|
{
"content_hash": "cba4f409bff5d53563acf7f4a8667d81",
"timestamp": "",
"source": "github",
"line_count": 2163,
"max_line_length": 308,
"avg_line_length": 40.65695792880259,
"alnum_prop": 0.5082953343719084,
"repo_name": "Transkribus/TranskribusDU",
"id": "6238aa2b80c58da2d76d51aa01701b9bb24173d4",
"size": "87984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TranskribusDU/spm/spmPageVerticals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2140"
},
{
"name": "HTML",
"bytes": "7987"
},
{
"name": "Python",
"bytes": "3804398"
},
{
"name": "Shell",
"bytes": "2069"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import logging
import re
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from django.conf.urls import patterns, include, url
from sentry.plugins import plugins
logger = logging.getLogger('sentry.plugins')
def ensure_url(u):
if isinstance(u, (tuple, list)):
return url(*u)
elif not isinstance(u, (RegexURLResolver, RegexURLPattern)):
raise TypeError(
'url must be RegexURLResolver or RegexURLPattern, not %r: %r' % (type(u).__name__, u)
)
return u
def load_plugin_urls(plugins):
urlpatterns = patterns('')
for plugin in plugins:
try:
urls = plugin.get_group_urls()
if not urls:
continue
urls = [ensure_url(u) for u in urls]
except Exception:
logger.exception('routes.failed', extra={
'plugin': type(plugin).__name__,
})
else:
urlpatterns.append(
url(r'^%s/' % re.escape(plugin.slug), include(urls))
)
return urlpatterns
urlpatterns = load_plugin_urls(plugins.all())
|
{
"content_hash": "61dd0c5852792a542f23690d3bc82f3f",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 97,
"avg_line_length": 26.227272727272727,
"alnum_prop": 0.6013864818024264,
"repo_name": "JackDanger/sentry",
"id": "63425aaae2f5af3aa8ec23da43d3a59db3c9bf85",
"size": "1154",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/sentry/plugins/base/group_api_urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "319622"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6279717"
}
],
"symlink_target": ""
}
|
config = dict(
port='8009',
friendly_name='BitRanks',
app_dir='sigtrac',
name='sigtrac',
user='sigtrac',
env='env',
settings='settings.py.prod',
db='sigtrac',
db_user='sigtrac',
db_host='localhost',
custom_domains='sigtrac.nyaruka.com www.bitranks.com bitranks.com',
# compress=True,
celery=True)
|
{
"content_hash": "10c8b25746e1d14a6ae7190d7f0662bc",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 24,
"alnum_prop": 0.6398809523809523,
"repo_name": "nyaruka/sigtrac",
"id": "80fb9bfe7dba1ac3140a406c53ec64a635731a3a",
"size": "336",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabconfig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3249"
},
{
"name": "Groovy",
"bytes": "1318"
},
{
"name": "Java",
"bytes": "56803"
},
{
"name": "JavaScript",
"bytes": "31015"
},
{
"name": "Python",
"bytes": "66975"
},
{
"name": "Shell",
"bytes": "7484"
}
],
"symlink_target": ""
}
|
import m5
from m5.objects import *
m5.util.addToPath('../configs/common')
from Caches import *
nb_cores = 4
cpus = [ TimingSimpleCPU(cpu_id=i) for i in xrange(nb_cores) ]
# system simulated
system = System(cpu = cpus, physmem = SimpleMemory(), membus = CoherentBus())
# l2cache & bus
system.toL2Bus = CoherentBus(clock = '2GHz')
system.l2c = L2Cache(clock = '2GHz', size='4MB', assoc=8)
system.l2c.cpu_side = system.toL2Bus.master
# connect l2c to membus
system.l2c.mem_side = system.membus.slave
# add L1 caches
for cpu in cpus:
cpu.addPrivateSplitL1Caches(L1Cache(size = '32kB', assoc = 1),
L1Cache(size = '32kB', assoc = 4))
# create the interrupt controller
cpu.createInterruptController()
# connect cpu level-1 caches to shared level-2 cache
cpu.connectAllPorts(system.toL2Bus, system.membus)
cpu.clock = '2GHz'
system.system_port = system.membus.slave
# connect memory to membus
system.physmem.port = system.membus.master
# -----------------------
# run simulation
# -----------------------
root = Root( full_system = False, system = system )
root.system.mem_mode = 'timing'
|
{
"content_hash": "d8d0f67fd8ca8e51736371f261616869",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 27.975609756097562,
"alnum_prop": 0.6652136006974717,
"repo_name": "Dexhub/MTX",
"id": "aad15a2ac0ffbf8bea226494a9678953192af5f0",
"size": "2728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/configs/simple-timing-mp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "232142"
},
{
"name": "C",
"bytes": "938054"
},
{
"name": "C++",
"bytes": "8952982"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "JavaScript",
"bytes": "33785"
},
{
"name": "Perl",
"bytes": "760556"
},
{
"name": "Python",
"bytes": "3101104"
},
{
"name": "Ruby",
"bytes": "854209"
},
{
"name": "Shell",
"bytes": "2193"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
}
|
__all__ = ["EdgeSplitter"]
import datetime
import warnings
import networkx as nx
import pandas as pd
import numpy as np
from math import isclose
from ..core import StellarGraph
from ..globalvar import FEATURE_ATTR_NAME
class EdgeSplitter(object):
"""
Class for generating training and test data for link prediction in graphs.
The class requires as input a graph (in networkx format) and a percentage as a function of the total number of edges
in the given graph of the number of positive and negative edges to sample. For heterogeneous graphs, the caller
can also specify the type of edge and an edge property to split on. In the latter case, only a date property
can be used and it must be in the format ``dd/mm/yyyy``. A date to be used as a threshold value such that only
edges that have date after the threshold must be given. This effects only the sampling of positive edges.
Negative edges are sampled at random by (for 'global' method) selecting two nodes in the graph and
then checking if these edges are connected or not. If not, the pair of nodes is considered a negative sample.
Otherwise, it is discarded and the process repeats. Alternatively, negative edges are sampled (for 'local' method)
using DFS search at a distance from the source node (selected at random from all nodes in the graph)
sampled according to a given set of probabilities.
Positive edges can be sampled so that when they are subsequently removed from the graph, the reduced graph is either
guaranteed, or not guaranteed, to remain connected. In the former case, graph connectivity is maintained by first
calculating the minimum spanning tree. The edges that belong to the minimum spanning tree are protected from
removal, and therefore cannot be sampled for the training set. The edges that do not belong to the minimum spanning
tree are then sampled uniformly at random, until the required number of positive edges have been sampled for the
training set. In the latter case, when connectedness of the reduced graph is not guaranteed, positive edges are
sampled uniformly at random from all the edges in the graph, regardless of whether they belong to the spanning tree
(which is not calculated in this case).
Args:
g (StellarGraph or networkx object): The graph to sample edges from.
g_master (StellarGraph or networkx object): The graph representing the original dataset and a superset of the
graph g. If it is not None, then when positive and negative edges are sampled, care is taken to make sure
that a true positive edge is not sampled as a negative edge.
"""
def __init__(self, g, g_master=None):
# rather than rewrite this to use StellarGraph natively, this has the desired API (StellarGraphs in and
# StellarGraphs out) by converting to/from NetworkX at the boundaries
self._input_was_stellargraph = isinstance(g, StellarGraph)
if self._input_was_stellargraph:
g = g.to_networkx()
if isinstance(g_master, StellarGraph):
g_master = g_master.to_networkx()
# the original graph copied over
self.g = g.copy()
self.g_master = g_master
# placeholder: it will hold the subgraph of self.g after edges are removed as positive training samples
self.g_train = None
self.positive_edges_ids = None
self.positive_edges_labels = None
self.negative_edges_ids = None
self.negative_edges_labels = None
self.negative_edge_node_distances = None
self.minedges = None # the minimum spanning tree as a list of edges.
self.minedges_set = None # lookup dictionary for edges in minimum spanning tree
self._random = None
def _train_test_split_homogeneous(
self, p, method, probs=None, keep_connected=False
):
"""
Method for edge splitting applied to homogeneous graphs.
Args:
p (float): Percent of edges to be returned. It is calculated as a function of the total number of edges
in the original graph. If the graph is heterogeneous, the percentage is calculated
as a function of the total number of edges that satisfy the edge_label, edge_attribute_label and
edge_attribute_threshold values given.
method (string): Should be 'global' or 'local'. Specifies the method for selecting negative examples.
probs (list of float, optional): If method is 'local' then this vector of floats specifies the probabilities for
sampling at each depth from the source node. The first value should be 0.0 and all values should sum to 1.0.
keep_connected (bool): If True then when positive edges are removed care is taken that the reduced graph
remains connected. If False, positive edges are removed without guaranteeing the connectivity of the reduced graph.
Returns:
2 numpy arrays, the first Nx2 holding the node ids for the edges and the second Nx1 holding the edge
labels, 0 for negative and 1 for positive example.
"""
# minedges are those edges that if removed we might end up with a disconnected graph after the positive edges
# have been sampled.
if keep_connected:
self.minedges = self._get_minimum_spanning_edges()
else:
self.minedges = []
self.minedges_set = set()
# Sample the positive examples
positive_edges = self._reduce_graph(minedges=self.minedges_set, p=p)
df = pd.DataFrame(positive_edges)
self.positive_edges_ids = np.array(df.iloc[:, 0:2])
self.positive_edges_labels = np.array(df.iloc[:, 2])
if method == "global":
negative_edges = self._sample_negative_examples_global(
p=p, limit_samples=len(positive_edges)
)
else: # method == 'local'
if probs is None: # use default values if not given, by warn user
probs = [0.0, 0.25, 0.50, 0.25]
warnings.warn(
"Using default sampling probabilities (distance from source node): {}".format(
probs
),
RuntimeWarning,
stacklevel=2,
)
negative_edges = self._sample_negative_examples_local_dfs(
p=p, probs=probs, limit_samples=len(positive_edges)
)
df = pd.DataFrame(negative_edges)
self.negative_edges_ids = np.array(df.iloc[:, 0:2])
self.negative_edges_labels = np.array(df.iloc[:, 2])
if len(self.positive_edges_ids) == 0:
raise Exception("Could not sample any positive edges")
if len(self.negative_edges_ids) == 0:
raise Exception("Could not sample any negative edges")
edge_data_ids = np.vstack((self.positive_edges_ids, self.negative_edges_ids))
edge_data_labels = np.hstack(
(self.positive_edges_labels, self.negative_edges_labels)
)
print(
"** Sampled {} positive and {} negative edges. **".format(
len(self.positive_edges_ids), len(self.negative_edges_ids)
)
)
return edge_data_ids, edge_data_labels
def _train_test_split_heterogeneous(
self,
p,
method,
edge_label,
probs=None,
keep_connected=False,
edge_attribute_label=None,
edge_attribute_threshold=None,
):
"""
Splitting edge data based on edge type or edge type and edge property. The edge property must be a date in the
format ``dd/mm/yyyy``. If splitting by date, then a threshold value must also be given such that only edges with
date larger than the threshold can be in the set of positive examples. The edge property does not effect the
sampling of negative examples.
Args:
p (float): Percent of edges to be returned. It is calculated as a function of the total number of edges
in the original graph. If the graph is heterogeneous, the percentage is calculated
as a function of the total number of edges that satisfy the edge_label, edge_attribute_label and
edge_attribute_threshold values given.
method (str): Should be 'global' or 'local'. Specifies the method for selecting negative examples.
edge_label (str): The edge type to split on
probs (list of float, optional): If method=='local' then this vector of floats specifies the probabilities for
sampling at each depth from the source node. The first value should be 0.0 and all values should sum to 1.0.
keep_connected (bool): If True then when positive edges are removed care is taken that the reduced graph
remains connected. If False, positive edges are removed without guaranteeing the connectivity of the reduced graph.
edge_attribute_label (str): The label for the edge attribute to split on
edge_attribute_threshold (str, optional): The threshold value applied to the edge attribute when sampling positive
examples
Returns:
2 numpy arrays, the first N × 2 holding the node ids for the edges and the second N × 1 holding the edge
labels, 0 for negative and 1 for positive example.
"""
# minedges are those edges that if removed we might end up with a disconnected graph after the positive edges
# have been sampled.
if keep_connected:
self.minedges = self._get_minimum_spanning_edges()
else:
self.minedges = []
self.minedges_set = set()
# Note: The caller guarantees the edge_label is not None so we don't have to check here again.
if edge_attribute_threshold is None:
positive_edges = self._reduce_graph_by_edge_type(
minedges=self.minedges_set, p=p, edge_label=edge_label
)
else:
positive_edges = self._reduce_graph_by_edge_type_and_attribute(
minedges=self.minedges_set,
p=p,
edge_label=edge_label,
edge_attribute_label=edge_attribute_label,
edge_attribute_threshold=edge_attribute_threshold,
)
if len(positive_edges) == 0:
raise Exception(
"ERROR: Unable to sample any positive edges of type '{}'".format(
edge_label
)
)
df = pd.DataFrame(positive_edges)
self.positive_edges_ids = np.array(df.iloc[:, 0:2])
self.positive_edges_labels = np.array(df.iloc[:, 2])
if method == "global":
negative_edges = self._sample_negative_examples_by_edge_type_global(
p=p,
edges=positive_edges,
edge_label=edge_label,
limit_samples=len(positive_edges),
)
else: # method == 'local'
if probs is None:
probs = [0.0, 0.25, 0.50, 0.25]
warnings.warn(
"Using default sampling probabilities (distance from source node): {}".format(
probs
),
RuntimeWarning,
stacklevel=2,
)
negative_edges = self._sample_negative_examples_by_edge_type_local_dfs(
p=p,
probs=probs,
edges_positive=positive_edges,
edge_label=edge_label,
limit_samples=len(positive_edges),
)
df = pd.DataFrame(negative_edges)
self.negative_edges_ids = np.array(df.iloc[:, 0:2])
self.negative_edges_labels = np.array(df.iloc[:, 2])
if len(self.positive_edges_ids) == 0:
raise Exception("Could not sample any positive edges")
if len(self.negative_edges_ids) == 0:
raise Exception("Could not sample any negative edges")
edge_data_ids = np.vstack((self.positive_edges_ids, self.negative_edges_ids))
edge_data_labels = np.hstack(
(self.positive_edges_labels, self.negative_edges_labels)
)
print(
"** Sampled {} positive and {} negative edges. **".format(
len(self.positive_edges_ids), len(self.negative_edges_ids)
)
)
return edge_data_ids, edge_data_labels
def train_test_split(
self,
p=0.5,
method="global",
probs=None,
keep_connected=False,
edge_label=None,
edge_attribute_label=None,
edge_attribute_threshold=None,
attribute_is_datetime=None,
seed=None,
):
"""
Generates positive and negative edges and a graph that has the same nodes as the original but the positive
edges removed. It can be used to generate data from homogeneous and heterogeneous graphs.
For heterogeneous graphs, positive and negative examples can be generated based on specified edge type or
edge type and edge property given a threshold value for the latter.
Args:
p (float): Percent of edges to be returned. It is calculated as a function of the total number of edges
in the original graph. If the graph is heterogeneous, the percentage is calculated
as a function of the total number of edges that satisfy the edge_label, edge_attribute_label and
edge_attribute_threshold values given.
method (str): How negative edges are sampled. If 'global', then nodes are selected at random.
If 'local' then the first nodes is sampled from all nodes in the graph, but the second node is
chosen to be from the former's local neighbourhood.
probs (list): list The probabilities for sampling a node that is k-hops from the source node,
e.g., [0.25, 0.75] means that there is a 0.25 probability that the target node will be 1-hope away from the
source node and 0.75 that it will be 2 hops away from the source node. This only affects sampling of
negative edges if method is set to 'local'.
keep_connected (bool): If True then when positive edges are removed care is taken that the reduced graph
remains connected. If False, positive edges are removed without guaranteeing the connectivity of the reduced graph.
edge_label (str, optional) If splitting based on edge type, then this parameter specifies the key for the type
of edges to split on.
edge_attribute_label (str, optional): The label for the edge attribute to split on.
edge_attribute_threshold (str, optional): The threshold value applied to the edge attribute when sampling positive
examples.
attribute_is_datetime (bool, optional): Specifies if edge attribute is datetime or not.
seed (int, optional): seed for random number generator, positive int or 0
Returns:
The reduced graph (positive edges removed) and the edge data as 2 numpy arrays, the first array of
dimensionality N × 2 (where N is the number of edges) holding the node ids for the edges and the second of
dimensionality N × 1 holding the edge labels, 0 for negative and 1 for positive examples. The graph
matches the input graph passed to the :class:`.EdgeSplitter` constructor: the returned graph is a
:class:`.StellarGraph` instance if the input graph was one, and, similarly, a NetworkX graph if the input
graph was one.
"""
if p <= 0 or p >= 1:
raise ValueError("The value of p must be in the interval (0,1)")
if method != "global" and method != "local":
raise ValueError(
"Invalid method {}; valid options are 'local' or 'global'".format(
method
)
)
if not isinstance(keep_connected, (bool,)):
raise ValueError(
"({}) The flag keep_connected be boolean type.".format(
type(self).__name__
)
)
if seed is not None:
if seed < 0:
raise ValueError(
"({}) The random number generator seed value, seed, should be positive integer or None.".format(
type(self).__name__
)
)
if type(seed) != int:
raise ValueError(
"({}) The random number generator seed value, seed, should be integer type or None.".format(
type(self).__name__
)
)
if self._random is None: # only do this one
self._random = np.random.RandomState(seed=seed)
if edge_label is not None: # working with a heterogeneous graph
if (
edge_attribute_label
and edge_attribute_threshold
and not attribute_is_datetime
):
raise ValueError("You can only split by datetime edge attribute")
else: # all three are True
edge_data_ids, edge_data_labels = self._train_test_split_heterogeneous(
p=p,
method=method,
edge_label=edge_label,
edge_attribute_label=edge_attribute_label,
edge_attribute_threshold=edge_attribute_threshold,
keep_connected=keep_connected,
)
else: # working with a homogeneous graph
edge_data_ids, edge_data_labels = self._train_test_split_homogeneous(
p=p, method=method, probs=probs, keep_connected=keep_connected
)
if self._input_was_stellargraph:
# if the graphs came in as a StellarGraph, return one too
result_graph = StellarGraph.from_networkx(
self.g_train, node_features=FEATURE_ATTR_NAME
)
else:
result_graph = self.g_train
return result_graph, edge_data_ids, edge_data_labels
def _get_edges(
self, edge_label, edge_attribute_label=None, edge_attribute_threshold=None
):
"""
Method that filters the edges in the self.g (heterogeneous) graph based on either the edge type
specified by edge_label, or based on edges of edge_label type that have property edge_attribute_label and
the value of the latter property is larger than the edge_attribute_threshold.
Args:
edge_label (str): The type of edges to filter
edge_attribute_label (str, optional): The edge attribute to use for filtering graph edges
edge_attribute_threshold (str, optional): The threshold applied to the edge attribute for filtering edges.
Returns:
(list) List of edges that satisfy the filtering criteria.
"""
# the graph in networkx format is stored in self.g_train
if self.g.is_multigraph():
all_edges = list(self.g.edges(keys=True))
else:
all_edges = list(self.g.edges())
if edge_attribute_label is None or edge_attribute_threshold is None:
# filter by edge_label
edges_with_label = [
e for e in all_edges if self.g.get_edge_data(*e)["label"] == edge_label
]
else:
# filter by edge label, edge attribute and threshold value
edge_attribute_threshold_dt = datetime.datetime.strptime(
edge_attribute_threshold, "%d/%m/%Y"
)
edges_with_label = [
e
for e in all_edges
if (
self.g.get_edge_data(*e)["label"] == edge_label
and datetime.datetime.strptime(
self.g.get_edge_data(*e)[edge_attribute_label], "%d/%m/%Y"
)
> edge_attribute_threshold_dt
)
]
return edges_with_label
def _get_edge_source_and_target_node_types(self, edges):
"""
Method that given a list of edges, for each edge it determines the type of the source and target
nodes and then returns them as a list of tuples.
This routine is necessary because networkx does not provide a direct method for determining the type of nodes
given an edge.
Args:
edges (list): List of edges as returned by networkx graph method edges().
Returns: (list) Returns a list of 2-tuples such that each value in the tuple holds the type (as str) of the
source and target nodes for each element in edges.
"""
# uses self.g_train but any graph object would do since nodes are shared
all_nodes = self.g_train.nodes(data=True)
# dictionary that maps node id to node attributes
all_nodes_as_dict = {n[0]: n[1] for n in all_nodes}
edge_node_types = set()
for edge in edges:
edge_node_types.add(
(
all_nodes_as_dict[edge[0]]["label"],
all_nodes_as_dict[edge[1]]["label"],
)
)
return edge_node_types
def _reduce_graph_by_edge_type_and_attribute(
self,
minedges,
p=0.5,
edge_label=None,
edge_attribute_label=None,
edge_attribute_threshold=None,
):
"""
Reduces the graph self.g_train by a factor p by removing existing edges not on minedges list such that
the reduced tree remains connected. Edges are removed based on the edge type and the values of a given edge
attribute and a threshold applied to the latter.
Args:
minedges (list): Spanning tree edges that cannot be removed.
p (float): Factor by which to reduce the size of the graph.
edge_label (str): The edge type to consider.
edge_attribute_label (str): The edge attribute to consider.
edge_attribute_threshold (str): The threshold value; only edges with attribute value larger than the
threshold can be removed.
Returns:
Returns the list of edges removed from the graph (also modifies the graph self.g_train
by removing the said edges)
"""
# We check that the parameters are given values but we don't check if the graph has edges with label
# edge_label and edge attributes with label edge_attribute_label. For now, we assume that the given values
# are valid; if not, then some cryptic exception is bound to be raised later on in the code.
if edge_label is None:
raise ValueError("edge_label must be specified.")
if edge_attribute_label is None:
raise ValueError("edge_attribute_label must be specified.")
if edge_attribute_threshold is None:
raise ValueError("attribute_threshold must be specified.")
# copy the original graph and start over in case this is not the first time
# reduce_graph has been called.
self.g_train = self.g.copy()
# Filter the graph's edges based on the edge type, edge attribute, and attribute threshold value given.
all_edges = self._get_edges(
edge_label=edge_label,
edge_attribute_label=edge_attribute_label,
edge_attribute_threshold=edge_attribute_threshold,
)
# Also, calculate the number of these edges in the graph.
num_edges_total = len(all_edges)
# print("Graph has {} edges of type {}".format(num_edges_total, edge_label))
# Multiply this number by p to determine the number of positive edge examples to sample
num_edges_to_remove = int(num_edges_total * p)
# shuffle the edges
self._random.shuffle(all_edges)
#
# iterate over the list of edges and for each edge if the edge is not in minedges, remove it from the graph
# until num_edges_to_remove edges have been removed and the graph reduced to p of its original size
count = 0
removed_edges = []
for edge in all_edges:
# Support minedges having keys (NetworkX 2.x) or not (NetworkX 1.x)
if edge not in minedges and (edge[0], edge[1]) not in minedges:
removed_edges.append(
(
edge[0],
edge[1],
1,
) # should this be edge + (1,) to support multigraphs?
) # the last entry is the label
self.g_train.remove_edge(*edge)
count += 1
if count == num_edges_to_remove:
return removed_edges
if len(removed_edges) < num_edges_to_remove:
raise ValueError(
"Unable to sample {} positive edges (could only sample {} positive edges). Consider using smaller value for p or set keep_connected=False".format(
num_edges_to_remove, len(removed_edges)
)
)
def _reduce_graph_by_edge_type(self, minedges, p=0.5, edge_label=None):
"""
Reduces the graph self.g_train by a factor p by removing existing edges not on minedges list such that
the reduced tree remains connected. Edges are removed based on the edge type.
Args:
minedges (list): Minimum spanning tree edges that cannot be removed.
p (float): Factor by which to reduce the size of the graph.
edge_label (str): The edge type to consider.
Returns:
(list) Returns the list of edges removed from self.g_train (also modifies self.g_train by removing said
edges)
"""
if edge_label is None:
raise ValueError("edge_label must be specified")
# copy the original graph and start over in case this is not the first time
# reduce_graph has been called.
self.g_train = self.g.copy()
# Filter the graph's edges based on the specified edge_label
all_edges = self._get_edges(edge_label=edge_label)
num_edges_total = len(all_edges)
print("Network has {} edges of type {}".format(num_edges_total, edge_label))
# Multiply this number by p to determine the number of positive edge examples to sample
num_edges_to_remove = int(num_edges_total * p)
# shuffle the edges
self._random.shuffle(all_edges)
# iterate over the list of filtered edges and for each edge if the edge is not in minedges, remove it from
# the graph until num_edges_to_remove edges have been removed and the graph is reduced to p of its original
# size
count = 0
removed_edges = []
for edge in all_edges:
# Support minedges having keys (NetworkX 2.x) or not (NetworkX 1.x)
if edge not in minedges and (edge[0], edge[1]) not in minedges:
removed_edges.append(
(edge[0], edge[1], 1)
) # the last entry is the label
self.g_train.remove_edge(*edge)
count += 1
if count == num_edges_to_remove:
return removed_edges
if len(removed_edges) < num_edges_to_remove:
raise ValueError(
"Unable to sample {} positive edges (could only sample {} positive edges). Consider using smaller value for p or set keep_connected=False".format(
num_edges_to_remove, len(removed_edges)
)
)
def _reduce_graph(self, minedges, p=0.5):
"""
Reduces the graph self.g_train by a factor p by removing existing edges not on minedges list such that
the reduced tree remains connected. Edge type is ignored and all edges are treated equally.
Args:
minedges (list): Minimum spanning tree edges that cannot be removed.
p (float): Factor by which to reduce the size of the graph.
Returns:
(list) Returns the list of edges removed from self.g_train (also modifies self.g_train by removing the
said edges)
"""
# copy the original graph and start over in case this is not the first time
# reduce_graph has been called.
self.g_train = self.g.copy()
# For multigraphs we should probably use keys
use_keys_in_edges = self.g.is_multigraph()
# For NX 1.x/2.x compatibilty we need to match length of minedges
if len(minedges) > 0:
use_keys_in_edges = len(next(iter(minedges))) == 3
if use_keys_in_edges:
all_edges = list(self.g_train.edges(keys=True))
else:
all_edges = list(self.g_train.edges())
num_edges_to_remove = int(self.g_train.number_of_edges() * p)
if num_edges_to_remove > (self.g_train.number_of_edges() - len(self.minedges)):
raise ValueError(
"Not enough positive edges to sample after reserving {} number of edges for maintaining graph connectivity. Consider setting keep_connected=False.".format(
len(self.minedges)
)
)
# shuffle the edges
self._random.shuffle(all_edges)
# iterate over the list of edges and for each edge if the edge is not in minedges, remove it from the graph
# until num_edges_to_remove edges have been removed and the graph reduced to p of its original size
count = 0
removed_edges = []
for edge in all_edges:
if edge not in minedges:
removed_edges.append(
(edge[0], edge[1], 1)
) # the last entry is the label
self.g_train.remove_edge(*edge)
count += 1
if count == num_edges_to_remove:
return removed_edges
def _sample_negative_examples_by_edge_type_local_dfs(
self,
p=0.5,
probs=None,
edges_positive=None,
edge_label=None,
limit_samples=None,
):
"""
This method produces a list of edges that don't exist in graph self.g (negative examples). The number of
negative edges produced is equal to the number of edges in the graph times p (that should be in the range (0,1]
or limited to maximum limit_samples if the latter is not None. The negative samples are between node types
as inferred from the edge type of the positive examples previously removed from the graph and given in
edges_positive.
This method uses depth-first search to efficiently (memory-wise) sample negative edges based on the local
neighbourhood of randomly (uniformly) sampled source nodes at distances defined by the probabilities in probs.
The source graph is not modified.
Args:
p (float): Factor that multiplies the number of edges in the graph and determines the number of negative
edges to be sampled.
probs (list): Probability distribution for the distance between source and target nodes.
edges_positive (list): The positive edge examples that have previously been removed from the graph
edge_label (str): The edge type to sample negative examples of
limit_samples (int, optional): It limits the maximum number of samples to the given number, if not None
Returns:
(list) A list of 2-tuples that are pairs of node IDs that don't have an edge between them in the graph.
"""
if probs is None:
probs = [0.0, 0.25, 0.50, 0.25]
warnings.warn(
"Using default sampling probabilities up to 3 hops from source node with values {}".format(
probs
)
)
if not isclose(sum(probs), 1.0):
raise ValueError("Sampling probabilities do not sum to 1")
self.negative_edge_node_distances = []
n = len(probs)
# determine the number of edges in the graph that have edge_label type
# Multiply this number by p to determine the number of positive edge examples to sample
all_edges = self._get_edges(edge_label=edge_label)
num_edges_total = len(all_edges)
print("Network has {} edges of type {}".format(num_edges_total, edge_label))
#
num_edges_to_sample = int(num_edges_total * p)
if limit_samples is not None:
if num_edges_to_sample > limit_samples:
num_edges_to_sample = limit_samples
edge_source_target_node_types = self._get_edge_source_and_target_node_types(
edges=edges_positive
)
if self.g_master is None:
edges = self.g.edges()
else:
edges = self.g_master.edges()
# to speed up lookup of edges in edges list, create a set the values stored are the concatenation of
# the source and target node ids.
edges_set = set(edges)
edges_set.update({(e[1], e[0]) for e in edges})
sampled_edges_set = set()
start_nodes = list(self.g.nodes(data=True))
nodes_dict = {node[0]: node[1]["label"] for node in start_nodes}
count = 0
sampled_edges = []
num_iter = int(np.ceil(num_edges_to_sample / (1.0 * len(start_nodes)))) + 1
for _ in np.arange(0, num_iter):
self._random.shuffle(start_nodes)
# sample the distance to the target node using probs
target_node_distances = (
self._random.choice(n, len(start_nodes), p=probs) + 1
)
for u, d in zip(start_nodes, target_node_distances):
# perform DFS search up to d distance from the start node u.
visited = {
node[0]: False for node in start_nodes
} # for marking already visited nodes
nodes_stack = list()
# start at node u
nodes_stack.append((u[0], 0)) # tuple is (node, depth)
while len(nodes_stack) > 0:
next_node = nodes_stack.pop()
v = next_node[0] # retrieve node id
dv = next_node[1] # retrieve node distance from u
if not visited[v]:
visited[v] = True
# Check if this nodes is at depth d; if it is, then this could be selected as the
# target node for a negative edge sample. Otherwise add its neighbours to the stack, only
# if the depth is less than the search depth d.
if dv == d:
u_v_edge_type = (nodes_dict[u[0]], nodes_dict[v])
# if no edge between u and next_node[0] then this is the sample, so record and stop
# searching
# Note: The if statement below is very expensive to evaluate because it need to checks
# the membership of an element in a number of lists that can grow very large for large
# graphs and number examples to sample. Later, we should have a closer look at how we can
# speed this up.
if (
(u_v_edge_type in edge_source_target_node_types)
and (u[0] != v)
and ((u[0], v) not in edges_set)
and ((u[0], v) not in sampled_edges_set)
):
sampled_edges.append(
(u[0], v, 0)
) # the last entry is the class label
sampled_edges_set.add((u[0], v))
sampled_edges_set.add((v, u[0]))
count += 1
self.negative_edge_node_distances.append(d)
break
elif dv < d:
neighbours = list(nx.neighbors(self.g, v))
self._random.shuffle(neighbours)
neighbours = [(k, dv + 1) for k in neighbours]
nodes_stack.extend(neighbours)
if count == num_edges_to_sample:
return sampled_edges
if len(sampled_edges) != num_edges_to_sample:
raise ValueError(
"Unable to sample {} negative edges. Consider using smaller value for p.".format(
num_edges_to_sample
)
)
def _sample_negative_examples_local_dfs(
self, p=0.5, probs=None, limit_samples=None
):
"""
This method produces a list of edges that don't exist in graph self.g (negative examples). The number of
negative edges produced is equal to the number of edges in the graph times p (that should be in the range (0,1]
or limited to maximum limit_samples if the latter is not None.
This method uses depth-first search to efficiently (memory-wise) sample negative edges based on the local
neighbourhood of randomly (uniformly) sampled source nodes at distances defined by the probabilities in probs.
The source graph is not modified.
Args:
p (float): Factor that multiplies the number of edges in the graph and determines the number of no-edges to
be sampled.
probs (list): Probability distribution for the distance between source and target nodes.
limit_samples (int, optional): It limits the maximum number of samples to the given number, if not None
Returns:
(list) A list of 2-tuples that are pairs of node IDs that don't have an edge between them in the graph.
"""
if probs is None:
probs = [0.0, 0.25, 0.50, 0.25]
warnings.warn(
"Using default sampling probabilities up to 3 hops from source node with values {}".format(
probs
),
RuntimeWarning,
)
if not isclose(sum(probs), 1.0):
raise ValueError("Sampling probabilities do not sum to 1")
self.negative_edge_node_distances = []
n = len(probs)
num_edges_to_sample = int(self.g.number_of_edges() * p)
if limit_samples is not None:
if num_edges_to_sample > limit_samples:
num_edges_to_sample = limit_samples
if self.g_master is None:
edges = self.g.edges()
else:
edges = self.g_master.edges()
# to speed up lookup of edges in edges list, create a set the values stored are the concatenation of
# the source and target node ids.
edges_set = set(edges)
edges_set.update({(e[1], e[0]) for e in edges})
sampled_edges_set = set()
start_nodes = list(self.g.nodes(data=False))
count = 0
sampled_edges = []
num_iter = int(np.ceil(num_edges_to_sample / (1.0 * len(start_nodes))))
for _ in np.arange(0, num_iter):
self._random.shuffle(start_nodes)
# sample the distance to the target node using probs
target_node_distances = (
self._random.choice(n, len(start_nodes), p=probs) + 1
)
for u, d in zip(start_nodes, target_node_distances):
# perform DFS search up to d distance from the start node u.
visited = {node: False for node in start_nodes}
nodes_stack = list()
# start at node u
nodes_stack.append((u, 0)) # tuple is node, depth
while len(nodes_stack) > 0:
next_node = nodes_stack.pop()
v = next_node[0]
dv = next_node[1]
if not visited[v]:
visited[v] = True
# Check if this nodes is at depth d; if it is, then this could be selected as the
# target node for a negative edge sample. Otherwise add its neighbours to the stack, only
# if the depth is less than the search depth d.
if dv == d:
# if no edge between u and next_node[0] then this is the sample, so record and stop
# searching
if (
(u != v)
and ((u, v) not in edges_set)
and ((u, v) not in sampled_edges_set)
):
sampled_edges.append(
(u, v, 0)
) # the last entry is the class label
sampled_edges_set.add((u, v))
sampled_edges_set.add((v, u))
count += 1
self.negative_edge_node_distances.append(d)
break
elif dv < d:
neighbours = list(nx.neighbors(self.g, v))
self._random.shuffle(neighbours)
neighbours = [(k, dv + 1) for k in neighbours]
nodes_stack.extend(neighbours)
if count == num_edges_to_sample:
return sampled_edges
if len(sampled_edges) != num_edges_to_sample:
raise ValueError(
"Unable to sample {} negative edges. Consider using smaller value for p.".format(
num_edges_to_sample
)
)
def _sample_negative_examples_global(self, p=0.5, limit_samples=None):
"""
This method samples uniformly at random nodes from the graph and, if they don't have an edge in the graph,
it records the pair as a negative edge.
Args:
p: (float) factor that multiplies the number of edges in the graph and determines the number of negative
edges to be sampled.
limit_samples: (int, optional) it limits the maximum number of samples to the given number, if not None
Returns:
(list) A list of 2-tuples that are pairs of node IDs that don't have an edge between them in the graph.
"""
self.negative_edge_node_distances = []
num_edges_to_sample = int(self.g.number_of_edges() * p)
if limit_samples is not None:
if num_edges_to_sample > limit_samples:
num_edges_to_sample = limit_samples
if self.g_master is None:
edges = list(self.g.edges())
else:
edges = list(self.g_master.edges())
# to speed up lookup of edges in edges list, create a set the values stored are the concatenation of
# the source and target node ids.
edges_set = set(edges)
edges_set.update({(u[1], u[0]) for u in edges})
sampled_edges_set = set()
start_nodes = list(self.g.nodes(data=False))
end_nodes = list(self.g.nodes(data=False))
count = 0
sampled_edges = []
num_iter = int(np.ceil(num_edges_to_sample / (1.0 * len(start_nodes)))) + 1
for _ in np.arange(0, num_iter):
self._random.shuffle(start_nodes)
self._random.shuffle(end_nodes)
for u, v in zip(start_nodes, end_nodes):
if (
(u != v)
and ((u, v) not in edges_set)
and ((u, v) not in sampled_edges_set)
):
sampled_edges.append((u, v, 0)) # the last entry is the class label
sampled_edges_set.update(
{(u, v), (v, u)}
) # test for bi-directional edges
count += 1
if count == num_edges_to_sample:
return sampled_edges
if len(sampled_edges) != num_edges_to_sample:
raise ValueError(
"Unable to sample {} negative edges. Consider using smaller value for p.".format(
num_edges_to_sample
)
)
def _sample_negative_examples_by_edge_type_global(
self, edges, edge_label, p=0.5, limit_samples=None
):
"""
This method produces a list of edges that don't exist in graph self.g (negative examples). The number of
negative edges produced is equal to the number of edges with label edge_label in the graph times p (that should
be in the range (0,1] or limited to maximum limit_samples if the latter is not None. The negative samples are
between node types as inferred from the edge type of the positive examples previously removed from the graph
and given in edges_positive.
The source graph is not modified.
Args:
edges (list): The positive edge examples that have previously been removed from the graph
edge_label (str): The edge type to sample negative examples of
p (float): Factor that multiplies the number of edges in the graph and determines the number of negative
edges to be sampled.
limit_samples (int, optional): It limits the maximum number of samples to the given number, if not None
Returns:
(list) A list of 2-tuples that are pairs of node IDs that don't have an edge between them in the graph.
"""
self.negative_edge_node_distances = []
# determine the number of edges in the graph that have edge_label type
# Multiply this number by p to determine the number of positive edge examples to sample
all_edges = self._get_edges(edge_label=edge_label)
num_edges_total = len(all_edges)
print("Network has {} edges of type {}".format(num_edges_total, edge_label))
#
num_edges_to_sample = int(num_edges_total * p)
if limit_samples is not None:
if num_edges_to_sample > limit_samples:
num_edges_to_sample = limit_samples
edge_source_target_node_types = self._get_edge_source_and_target_node_types(
edges=edges
)
# to speed up lookup of edges in edges list, create a set the values stored are the concatenation of
# the source and target node ids.
edges_set = set(edges)
edges_set.update({(u[1], u[0]) for u in edges})
sampled_edges_set = set()
start_nodes = list(self.g.nodes(data=True))
end_nodes = list(self.g.nodes(data=True))
count = 0
sampled_edges = []
num_iter = int(np.ceil(num_edges_to_sample / (1.0 * len(start_nodes)))) + 1
for _ in np.arange(0, num_iter):
self._random.shuffle(start_nodes)
self._random.shuffle(end_nodes)
for u, v in zip(start_nodes, end_nodes):
u_v_edge_type = (u[1]["label"], v[1]["label"])
if (
(u_v_edge_type in edge_source_target_node_types)
and (u != v)
and ((u[0], v[0]) not in edges_set)
and ((u[0], v[0]) not in sampled_edges_set)
):
sampled_edges.append(
(u[0], v[0], 0)
) # the last entry is the class label
sampled_edges_set.update({(u[0], v[0]), (v[0], u[0])})
count += 1
if count == num_edges_to_sample:
return sampled_edges
if len(sampled_edges) != num_edges_to_sample:
raise ValueError(
"Unable to sample {} negative edges. Consider using smaller value for p.".format(
num_edges_to_sample
)
)
def _get_minimum_spanning_edges(self):
"""
Given an undirected graph, it calculates the minimum set of edges such that graph connectivity is preserved.
Returns:
(list) The minimum spanning edges of the undirected graph self.g
"""
mst = nx.minimum_spanning_edges(self.g, data=False)
edges = list(mst)
# to speed up lookup of edges in edges list, create a set the values stored are the concatenation of
# the source and target node ids.
self.minedges_set = {(u[0], u[1]) for u in edges}
self.minedges_set.update({(u[1], u[0]) for u in edges})
return edges
|
{
"content_hash": "e93fe60b1ad97bbfe55561d75e408640",
"timestamp": "",
"source": "github",
"line_count": 1055,
"max_line_length": 171,
"avg_line_length": 46.60663507109005,
"alnum_prop": 0.5820012202562538,
"repo_name": "stellargraph/stellargraph",
"id": "9de70cc4373ca1b050c66289cfacb76a756b2716",
"size": "49779",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "stellargraph/data/edge_splitter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3274"
},
{
"name": "Python",
"bytes": "1740018"
},
{
"name": "Shell",
"bytes": "18236"
}
],
"symlink_target": ""
}
|
import time
import jwt
from pandaharvester.harvesterconfig import harvester_config
class HarvesterToken():
"""
Methods of JSON Web Token used in harvester frontend
"""
algorithm = 'HS256'
def __init__(self, **kwarg):
# load secret from file
with open(harvester_config.frontend.secretFile) as _f:
self.secret = _f.read()
# wheter verify the token
self.verify = True
if harvester_config.frontend.verifyToken is False:
self.verify = False
# default token lifetime in second. 4 days.
self.default_lifetime = 345600
# default payload spec
self.default_payload_dict = {
'sub': 'Subject',
'exp': 0,
'iss': harvester_config.master.harvester_id,
'iat': 0,
}
def generate(self, payload=None, header=None):
"""
Generate a harvester token.
Additional payload can be upadated, argument as a dict.
"""
timestamp_now = int(time.time())
payload_dict = self.default_payload_dict.copy()
payload_dict['iat'] = timestamp_now
payload_dict['exp'] = timestamp_now + self.default_lifetime
if payload:
payload_dict.update(payload)
token = jwt.encode(payload_dict, key=self.secret, algorithm=self.algorithm, headers=header)
return token
def get_payload(self, token):
"""
Decode a harvester token to a python object, typically dict.
One can set verify=False if other proxy/gateway service already verifies token.
"""
payload = jwt.decode(token, key=self.secret, algorithms=self.algorithm, verify=self.verify)
return payload
|
{
"content_hash": "f0d1f368bae90b6f8cb635b5170ea157",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 99,
"avg_line_length": 33.38461538461539,
"alnum_prop": 0.6111751152073732,
"repo_name": "dougbenjamin/panda-harvester",
"id": "9b1762ecf593885b8234c7b6166cbcaa2645db4b",
"size": "1736",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pandaharvester/harvestermisc/frontend_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1540221"
},
{
"name": "Shell",
"bytes": "21117"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.test import TestCase
from django_openid_auth.models import (
Permission,
UserOpenID,
)
class UserOpenIDModelTestCase(TestCase):
def test_create_useropenid(self):
user = User.objects.create_user('someuser', 'someuser@example.com',
password=None)
user_openid, created = UserOpenID.objects.get_or_create(
user=user,
claimed_id='http://example.com/existing_identity',
display_id='http://example.com/existing_identity')
self.assertEqual('someuser', user_openid.user.username)
self.assertEqual(
user_openid.claimed_id, 'http://example.com/existing_identity')
self.assertEqual(
user_openid.display_id, 'http://example.com/existing_identity')
self.assertFalse(
User.objects.get(username='someuser').has_perm(
'django_openid_auth.account_verified'))
def test_delete_verified_useropenid(self):
user = User.objects.create_user('someuser', 'someuser@example.com',
password=None)
user_openid, created = UserOpenID.objects.get_or_create(
user=user,
claimed_id='http://example.com/existing_identity',
display_id='http://example.com/existing_identity')
permission = Permission.objects.get(codename='account_verified')
user.user_permissions.add(permission)
self.assertTrue(
User.objects.get(username='someuser').has_perm(
'django_openid_auth.account_verified'))
user_openid.delete()
self.assertFalse(
User.objects.get(username='someuser').has_perm(
'django_openid_auth.account_verified'))
|
{
"content_hash": "9482c548add8bdc565e0fc6b43b52494",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 75,
"avg_line_length": 40.32608695652174,
"alnum_prop": 0.6231805929919138,
"repo_name": "kawamon/hue",
"id": "d9a48c836aa37b0c92026dde4210ab9dc1ca3a5f",
"size": "3239",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/django-openid-auth-0.14/django_openid_auth/tests/test_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
from optparse import OptionParser
import os
# Get the command line parameters
parser = OptionParser()
parser.add_option("-f", "--inputDirectory", dest="inputDirectory", default=os.path.dirname(os.path.realpath(__file__)),
help="Give the input directory containing ALL the files from TG-GATEs")
(options, args) = parser.parse_args()
|
{
"content_hash": "6695770fe655f03e453fc9f75ab81f54",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 119,
"avg_line_length": 39,
"alnum_prop": 0.717948717948718,
"repo_name": "J0bbie/AdverseEffectsPredictor",
"id": "cb36a6fad41334d7815f566ec39ac35cb9668c00",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "writeData2DB/readTGGates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14154"
},
{
"name": "R",
"bytes": "9038"
}
],
"symlink_target": ""
}
|
import sys
import struct
file = open("inst_rom.bin", "rb")
write_file = open("inst_rom_en.bin", "wb")
roidata_start = 0x4c70
x = 0x3000
while True:
_bytes = file.read(4)
x += 4
if not _bytes:
break
_byte_array = bytearray(_bytes)
if x<roidata_start:
_byte_array.reverse()
write_file.write(_byte_array)
print(hex(x))
file.close()
write_file.close()
|
{
"content_hash": "7138113f666dd9040b64e5f58e5206c0",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 42,
"avg_line_length": 17.818181818181817,
"alnum_prop": 0.6173469387755102,
"repo_name": "StephenChusang/OS",
"id": "7447bca9bd245761b758f4fe23fe43be8cfa8890",
"size": "414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin2bin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1553"
},
{
"name": "C",
"bytes": "22190"
},
{
"name": "C++",
"bytes": "108"
},
{
"name": "Makefile",
"bytes": "752"
},
{
"name": "Python",
"bytes": "414"
},
{
"name": "Shell",
"bytes": "406"
}
],
"symlink_target": ""
}
|
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
Base = declarative_base()
class User(Base):
__tablename__ = 'huahu'
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, id, name):
self.id = id
self.name = name
def __repr__(self):
return "<User('%s')>"%self.name
class DefConn(object):
def __init__(self):
self.engine = create_engine('mysql+mysqldb://root:@localhost/huahu',echo=True)
def GetConn(self):
self.db=scoped_session(sessionmaker(bind=self.engine))
return self.db
|
{
"content_hash": "9cbede94a304c02c5841e03f475bc3cb",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 86,
"avg_line_length": 29,
"alnum_prop": 0.6500638569604087,
"repo_name": "huahu/sjianbing",
"id": "06f8dc76ca977aba1f7ea6e38bc3767dca702c7b",
"size": "783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3896"
},
{
"name": "JavaScript",
"bytes": "1123"
},
{
"name": "Python",
"bytes": "4984"
}
],
"symlink_target": ""
}
|
import json
from functools import wraps
import flask
import dci.auth_mechanism as am
from dci.common import exceptions as dci_exc
from dci.policies import ROLES
def reject():
"""Sends a 401 reject response that enables basic auth."""
auth_message = ('Could not verify your access level for that URL.'
'Please login with proper credentials.')
auth_message = json.dumps({'_status': 'Unauthorized',
'message': auth_message})
headers = {'WWW-Authenticate': 'Basic realm="Login required"'}
return flask.Response(auth_message, 401, headers=headers,
content_type='application/json')
def _get_auth_class_from_headers(headers):
if 'Authorization' not in headers:
raise dci_exc.DCIException('Authorization header missing',
status_code=401)
auth_type = headers.get('Authorization').split(' ')[0]
if auth_type == 'Bearer':
return am.OpenIDCAuth
elif auth_type == 'DCI-HMAC-SHA256':
return am.HmacMechanism
elif auth_type == 'Basic':
return am.BasicAuthMechanism
raise dci_exc.DCIException('Authorization scheme %s unknown' % auth_type,
status_code=401)
def login_required(f):
@wraps(f)
def decorated(*args, **kwargs):
auth_class = _get_auth_class_from_headers(flask.request.headers)
auth_scheme = auth_class(flask.request)
auth_scheme.authenticate()
return f(auth_scheme.identity, *args, **kwargs)
return decorated
def check_roles(f):
@wraps(f)
def decorated(*args, **kwargs):
identity = args[0]
if identity.role_label in ROLES[f.__name__]:
return f(*args, **kwargs)
raise dci_exc.Unauthorized()
return decorated
|
{
"content_hash": "5c875c11c33c3502038972fb8eaa895f",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 77,
"avg_line_length": 30.466666666666665,
"alnum_prop": 0.6214442013129103,
"repo_name": "enovance/dci-control-server",
"id": "299904d0ef4400e736dda222c099917285aa6ad8",
"size": "2436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dci/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1112"
},
{
"name": "Python",
"bytes": "314720"
},
{
"name": "Shell",
"bytes": "4082"
}
],
"symlink_target": ""
}
|
import requests
from .base import AbstractSharer
class FacebookFeedSharer(AbstractSharer):
def __init__(self, feed_id=None, access_token=None):
super(FacebookFeedSharer, self).__init__()
self.feed_id = feed_id
self.access_token = access_token
def send(self, message, **kw):
request = requests.post(
'https://graph.facebook.com/%s/feed' % self.feed_id,
data={
'message': message,
'access_token': self.access_token
}
)
return request.status_code == 200
|
{
"content_hash": "03783a4289bcc0ebc27666ba63fcaec0",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 30.42105263157895,
"alnum_prop": 0.5761245674740484,
"repo_name": "FelixLoether/python-sharer",
"id": "137d63e18537cdf4e86aef03e21e6b370f3a0b5f",
"size": "578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sharer/facebook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17247"
}
],
"symlink_target": ""
}
|
import sys
import time
import os
import base64
import yaml
from pprint import pprint
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
def harvest_yaml(filename):
lines = []
with open(filename) as fh:
yaml_found = None
for line in fh.readlines():
line = line.strip()
if line.find("---") > 0 and yaml_found is None:
yaml_found = True
elif line.find("---") > 0 and yaml_found is True:
yaml_found = False
if yaml_found:
start_loc = line.find("%") +1
if start_loc > 5:
start_loc = 0
lines.append(line[start_loc:].strip())
elif yaml_found is False:
break
#lines.append("---")
return yaml.safe_load("\n".join(lines))
def main():
data = harvest_yaml(sys.argv[1])
print data
a = yaml.safe_load(data)
pprint(a)
#print yaml.dump(a, Dumper=Dumper, allow_unicode=True, default_flow_style=False)
if __name__ == '__main__':
main()
|
{
"content_hash": "43175d727ad9cc021d3b31665b221ba2",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 81,
"avg_line_length": 19.18,
"alnum_prop": 0.6475495307612096,
"repo_name": "thaapaniemi/motonkone",
"id": "dba3e85b3d4a6a43e42a88c792c5370d5fe4ae19",
"size": "1006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/metadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "TeX",
"bytes": "117076"
}
],
"symlink_target": ""
}
|
import os
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase
from api.models import Page, User
class PageTests(APITestCase):
def setUp(self):
self.user = User.objects.create_user(
username='one', email='one@example.com', password='one')
token = Token.objects.get(user__username=self.user.username)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)
def test_create_page_endpoint(self):
url = reverse('page-list')
data = {
'name': 'Endpoint Works',
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['name'], 'Endpoint Works')
def _create_test_file(self, path):
f = open(path, 'w')
f.write('.title { font-size: 1.5rem }\n')
f.close()
f = open(path, 'rb')
return f
def test_add_page_stylesheet(self):
page = Page.objects.create(name='First', user=self.user)
url = reverse('page-detail', kwargs={'id': page.id})
stylesheet = self._create_test_file(os.path.join('stylesheets', 'stylesheet.css'))
response = self.client.patch(url, data={'stylesheet': stylesheet}, format='multipart')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['stylesheet'], '/static/' + response.data['id'] + '.css')
def test_create_page_with_stylesheet(self):
url = reverse('page-list')
stylesheet = self._create_test_file(os.path.join('stylesheets', 'stylesheet.css'))
data = {
'name': 'Created',
'stylesheet': stylesheet
}
response = self.client.post(url, data, format='multipart')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['name'], 'Created')
self.assertEqual(response.data['stylesheet'], '/static/' + response.data['id'] + '.css')
def test_add_comment_to_page(self):
page = Page.objects.create(name='First', user=self.user)
url = reverse('page-comments', kwargs={'id': page.id})
response = self.client.post(url, data={'text': 'comment'})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_add_comment_with_parent_to_page(self):
page = Page.objects.create(name='First', user=self.user)
url = reverse('page-comments', kwargs={'id': page.id})
# First comment
response = self.client.post(url, data={'text': 'comment'})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
parent = response.data.get('id')
response = self.client.post(url, data={'text': 'comment', 'parent': parent})
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
|
{
"content_hash": "0a7b37be69192586708885243b72e136",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 96,
"avg_line_length": 37.924050632911396,
"alnum_prop": 0.6365153538050734,
"repo_name": "Siecje/agora-api",
"id": "5e2bb1ed00eb2e1ad154d1bbc4bd0c080dc3b550",
"size": "2996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/tests/test_pages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26218"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import json
import unittest
from google.appengine.api import files
from google.appengine.ext import testbed
import webapp2
from webapp2_extras import i18n
# workaround for i18n. without this test will not run
app = webapp2.WSGIApplication(
[webapp2.Route('/', None, name='upload_handler')])
request = webapp2.Request({'SERVER_NAME': 'test', 'SERVER_PORT': 80,
'wsgi.url_scheme': 'http'})
request.app = app
app.set_globals(app=app, request=request)
i18n.default_config['default_locale'] = 'en_US'
i18n.default_config['default_timezone'] = 'UTC'
# End of workaround
class GAETestCase(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.setup_env(app_id="_")
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_user_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_mail_stub()
self.testbed.init_taskqueue_stub()
def tearDown(self):
self.testbed.deactivate()
def assert_can_serialize_as_json(self, json_response):
"""
Asserts that a json_response contains json serializable data. It raises an Exception otherwise
:param template_response: a JsonResponse or JsonUnsecureResponse instance
:return:
"""
json.dumps(json_response.context)
class BlobstoreTestCase(GAETestCase):
def setUp(self):
GAETestCase.setUp(self)
self.testbed.init_blobstore_stub()
self.testbed.init_files_stub()
self.testbed.init_images_stub()
def save_blob(self, blobdata='blobdata'):
file_name = files.blobstore.create(mime_type='application/octet-stream')
with files.open(file_name, 'a') as f:
f.write(blobdata)
files.finalize(file_name)
blob_key = files.blobstore.get_blob_key(file_name)
return blob_key
|
{
"content_hash": "418f793533128f969b664c7201cf7fa6",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 102,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.6681614349775785,
"repo_name": "renzon/blob_app",
"id": "abc4dea9813d9a225fd86b6a7ea258c425d8050f",
"size": "2031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23304"
}
],
"symlink_target": ""
}
|
from collections.abc import Iterable
from pathlib import Path
import time
import openmc
from openmc.checkvalue import check_type, check_value
class Model:
"""Model container.
This class can be used to store instances of :class:`openmc.Geometry`,
:class:`openmc.Materials`, :class:`openmc.Settings`,
:class:`openmc.Tallies`, :class:`openmc.Plots`, and :class:`openmc.CMFD`,
thus making a complete model. The :meth:`Model.export_to_xml` method will
export XML files for all attributes that have been set. If the
:meth:`Model.materials` attribute is not set, it will attempt to create a
``materials.xml`` file based on all materials appearing in the geometry.
Parameters
----------
geometry : openmc.Geometry, optional
Geometry information
materials : openmc.Materials, optional
Materials information
settings : openmc.Settings, optional
Settings information
tallies : openmc.Tallies, optional
Tallies information
plots : openmc.Plots, optional
Plot information
Attributes
----------
geometry : openmc.Geometry
Geometry information
materials : openmc.Materials
Materials information
settings : openmc.Settings
Settings information
tallies : openmc.Tallies
Tallies information
plots : openmc.Plots
Plot information
"""
def __init__(self, geometry=None, materials=None, settings=None,
tallies=None, plots=None):
self.geometry = openmc.Geometry()
self.materials = openmc.Materials()
self.settings = openmc.Settings()
self.tallies = openmc.Tallies()
self.plots = openmc.Plots()
if geometry is not None:
self.geometry = geometry
if materials is not None:
self.materials = materials
if settings is not None:
self.settings = settings
if tallies is not None:
self.tallies = tallies
if plots is not None:
self.plots = plots
@property
def geometry(self):
return self._geometry
@property
def materials(self):
return self._materials
@property
def settings(self):
return self._settings
@property
def tallies(self):
return self._tallies
@property
def plots(self):
return self._plots
@geometry.setter
def geometry(self, geometry):
check_type('geometry', geometry, openmc.Geometry)
self._geometry = geometry
@materials.setter
def materials(self, materials):
check_type('materials', materials, Iterable, openmc.Material)
if isinstance(materials, openmc.Materials):
self._materials = materials
else:
del self._materials[:]
for mat in materials:
self._materials.append(mat)
@settings.setter
def settings(self, settings):
check_type('settings', settings, openmc.Settings)
self._settings = settings
@tallies.setter
def tallies(self, tallies):
check_type('tallies', tallies, Iterable, openmc.Tally)
if isinstance(tallies, openmc.Tallies):
self._tallies = tallies
else:
del self._tallies[:]
for tally in tallies:
self._tallies.append(tally)
@plots.setter
def plots(self, plots):
check_type('plots', plots, Iterable, openmc.Plot)
if isinstance(plots, openmc.Plots):
self._plots = plots
else:
del self._plots[:]
for plot in plots:
self._plots.append(plot)
def deplete(self, timesteps, chain_file=None, method='cecm',
fission_q=None, **kwargs):
"""Deplete model using specified timesteps/power
Parameters
----------
timesteps : iterable of float
Array of timesteps in units of [s]. Note that values are not
cumulative.
chain_file : str, optional
Path to the depletion chain XML file. Defaults to the chain
found under the ``depletion_chain`` in the
:envvar:`OPENMC_CROSS_SECTIONS` environment variable if it exists.
method : str
Integration method used for depletion (e.g., 'cecm', 'predictor')
fission_q : dict, optional
Dictionary of nuclides and their fission Q values [eV].
If not given, values will be pulled from the ``chain_file``.
**kwargs
Keyword arguments passed to integration function (e.g.,
:func:`openmc.deplete.integrator.cecm`)
"""
# Import the depletion module. This is done here rather than the module
# header to delay importing openmc.lib (through openmc.deplete) which
# can be tough to install properly.
import openmc.deplete as dep
# Create OpenMC transport operator
op = dep.Operator(
self.geometry, self.settings, chain_file,
fission_q=fission_q,
)
# Perform depletion
check_value('method', method, ('cecm', 'predictor', 'cf4', 'epc_rk4',
'si_celi', 'si_leqi', 'celi', 'leqi'))
getattr(dep.integrator, method)(op, timesteps, **kwargs)
def export_to_xml(self, directory='.'):
"""Export model to XML files.
Parameters
----------
directory : str
Directory to write XML files to. If it doesn't exist already, it
will be created.
"""
# Create directory if required
d = Path(directory)
if not d.is_dir():
d.mkdir(parents=True)
self.settings.export_to_xml(d)
if not self.settings.dagmc:
self.geometry.export_to_xml(d)
# If a materials collection was specified, export it. Otherwise, look
# for all materials in the geometry and use that to automatically build
# a collection.
if self.materials:
self.materials.export_to_xml(d)
else:
materials = openmc.Materials(self.geometry.get_all_materials()
.values())
materials.export_to_xml(d)
if self.tallies:
self.tallies.export_to_xml(d)
if self.plots:
self.plots.export_to_xml(d)
def run(self, **kwargs):
"""Creates the XML files, runs OpenMC, and returns the path to the last
statepoint file generated.
.. versionchanged:: 0.12
Instead of returning the final k-effective value, this function now
returns the path to the final statepoint written.
Parameters
----------
**kwargs
Keyword arguments passed to :func:`openmc.run`
Returns
-------
Path
Path to the last statepoint written by this run
(None if no statepoint was written)
"""
self.export_to_xml()
# Setting tstart here ensures we don't pick up any pre-existing statepoint
# files in the output directory
tstart = time.time()
last_statepoint = None
openmc.run(**kwargs)
# Get output directory and return the last statepoint written by this run
if self.settings.output and 'path' in self.settings.output:
output_dir = Path(self.settings.output['path'])
else:
output_dir = Path.cwd()
for sp in output_dir.glob('statepoint.*.h5'):
mtime = sp.stat().st_mtime
if mtime >= tstart: # >= allows for poor clock resolution
tstart = mtime
last_statepoint = sp
return last_statepoint
|
{
"content_hash": "6321bb75dfb1b2bc661be278cf5f9ded",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 82,
"avg_line_length": 32.391666666666666,
"alnum_prop": 0.593902752765629,
"repo_name": "liangjg/openmc",
"id": "7d6acb281ced4f38cd2e0bf55300bf1bae119273",
"size": "7774",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "openmc/model/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "10760"
},
{
"name": "C++",
"bytes": "1414270"
},
{
"name": "CMake",
"bytes": "15307"
},
{
"name": "Dockerfile",
"bytes": "1426"
},
{
"name": "Python",
"bytes": "2954151"
},
{
"name": "Shell",
"bytes": "2762"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from django.conf.urls import url, include
from django.views.generic import RedirectView
from .api import LinkedEventsAPIRouter
from helusers import admin
admin.autodiscover()
api_router = LinkedEventsAPIRouter()
class RedirectToAPIRootView(RedirectView):
permanent = False
def get_redirect_url(self, *args, **kwargs):
return reverse('api-root', kwargs={'version': 'v1'})
urlpatterns = [
url(r'^(?P<version>(v0.1|v1))/', include(api_router.urls)),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('allauth.urls')),
url(r'^$', RedirectToAPIRootView.as_view()),
]
|
{
"content_hash": "a8d2f998331a4d98f2aa5f9f7b7b2b54",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 63,
"avg_line_length": 27.75,
"alnum_prop": 0.7102102102102102,
"repo_name": "aapris/linkedevents",
"id": "21da6a6301de508eaf3577dc9460ec66c6af02ab",
"size": "666",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "linkedevents/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4464"
},
{
"name": "Python",
"bytes": "415096"
},
{
"name": "Shell",
"bytes": "2177"
}
],
"symlink_target": ""
}
|
"""Shortest paths and path lengths using the A* ("A star") algorithm.
"""
from heapq import heappush, heappop
from itertools import count
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ['astar_path', 'astar_path_length']
@not_implemented_for('multigraph')
def astar_path(G, source, target, heuristic=None, weight='weight'):
"""Return a list of nodes in a shortest path between source and target
using the A* ("A-star") algorithm.
There may be more than one shortest path. This returns only one.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
Raises
------
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G=nx.path_graph(5)
>>> print(nx.astar_path(G,0,4))
[0, 1, 2, 3, 4]
>>> G=nx.grid_graph(dim=[3,3]) # nodes are two-tuples (x,y)
>>> def dist(a, b):
... (x1, y1) = a
... (x2, y2) = b
... return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
>>> print(nx.astar_path(G,(0,0),(2,2),dist))
[(0, 0), (0, 1), (1, 1), (1, 2), (2, 2)]
See Also
--------
shortest_path, dijkstra_path
"""
if source not in G or target not in G:
msg = 'Either source {} or target {} is not in G'
raise nx.NodeNotFound(msg.format(source, target))
if heuristic is None:
# The default heuristic is h=0 - same as Dijkstra's algorithm
def heuristic(u, v):
return 0
push = heappush
pop = heappop
# The queue stores priority, node, cost to reach, and parent.
# Uses Python heapq to keep in priority order.
# Add a counter to the queue to prevent the underlying heap from
# attempting to compare the nodes themselves. The hash breaks ties in the
# priority and is guarenteed unique for all nodes in the graph.
c = count()
queue = [(0, next(c), source, 0, None)]
# Maps enqueued nodes to distance of discovered paths and the
# computed heuristics to target. We avoid computing the heuristics
# more than once and inserting the node into the queue too many times.
enqueued = {}
# Maps explored nodes to parent closest to the source.
explored = {}
while queue:
# Pop the smallest item from queue.
_, __, curnode, dist, parent = pop(queue)
if curnode == target:
path = [curnode]
node = parent
while node is not None:
path.append(node)
node = explored[node]
path.reverse()
return path
if curnode in explored:
continue
explored[curnode] = parent
for neighbor, w in G[curnode].items():
if neighbor in explored:
continue
ncost = dist + w.get(weight, 1)
if neighbor in enqueued:
qcost, h = enqueued[neighbor]
# if qcost < ncost, a longer path to neighbor remains
# enqueued. Removing it would need to filter the whole
# queue, it's better just to leave it there and ignore
# it when we visit the node a second time.
if qcost <= ncost:
continue
else:
h = heuristic(neighbor, target)
enqueued[neighbor] = ncost, h
push(queue, (ncost + h, next(c), neighbor, ncost, curnode))
raise nx.NetworkXNoPath("Node %s not reachable from %s" % (source, target))
def astar_path_length(G, source, target, heuristic=None, weight='weight'):
"""Return the length of the shortest path between source and target using
the A* ("A-star") algorithm.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path
target : node
Ending node for path
heuristic : function
A function to evaluate the estimate of the distance
from the a node to the target. The function takes
two nodes arguments and must return a number.
Raises
------
NetworkXNoPath
If no path exists between source and target.
See Also
--------
astar_path
"""
if source not in G or target not in G:
msg = 'Either source {} or target {} is not in G'
raise nx.NodeNotFound(msg.format(source, target))
path = astar_path(G, source, target, heuristic, weight)
return sum(G[u][v].get(weight, 1) for u, v in zip(path[:-1], path[1:]))
|
{
"content_hash": "d6527bc70ae418a4f1b3c725285b3572",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 79,
"avg_line_length": 30.3875,
"alnum_prop": 0.5909090909090909,
"repo_name": "JamesClough/networkx",
"id": "3ec06460efae62427fb065a26b8b054215994886",
"size": "5181",
"binary": false,
"copies": "4",
"ref": "refs/heads/inverse_line_graph",
"path": "networkx/algorithms/shortest_paths/astar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "PowerShell",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "3246891"
}
],
"symlink_target": ""
}
|
"""
Given a set of non-overlapping intervals, insert a new interval into the intervals (merge if necessary).
You may assume that the intervals were initially sorted according to their start times.
Example 1:
Given intervals [1,3],[6,9], insert and merge [2,5] in as [1,5],[6,9].
Example 2:
Given [1,2],[3,5],[6,7],[8,10],[12,16], insert and merge [4,9] in as [1,2],[3,10],[12,16].
This is because the new interval [4,9] overlaps with [3,5],[6,7],[8,10].
ANSWER:
Sort and iterate: Time complexity is O(n)
"""
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
if not intervals:
return [newInterval]
ans = []
for i in xrange(len(intervals)):
if intervals[i].start > newInterval.end:
ans.append(newInterval)
ans.extend(intervals[i:])
return ans
if intervals[i].end < newInterval.start:
ans.append(intervals[i])
else:
newInterval = Interval(
min(intervals[i].start,newInterval.start),
max(intervals[i].end,newInterval.end))
ans.append(newInterval)
return ans
if __name__ == '__main__':
l = [1,2],[3,5],[6,7],[8,10],[12,16]
intervals = [Interval(x[0],x[1]) for x in l]
newInterval = Interval(4,9)
for i in Solution().insert(intervals,newInterval):
print i.start, i.end
|
{
"content_hash": "4b4dcfa6c5b457d7e53e3ea2fbaf6e52",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 104,
"avg_line_length": 31.81132075471698,
"alnum_prop": 0.5759193357058126,
"repo_name": "tktrungna/leetcode",
"id": "6e22be6a4e11ec0f28901b64e3268f0d1e98e2c0",
"size": "1686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/insert-interval.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "410956"
}
],
"symlink_target": ""
}
|
"""This script will check out llvm and clang, and then package the results up
to a number of tarballs."""
import argparse
import fnmatch
import itertools
import lzma
import multiprocessing.dummy
import os
import platform
import shutil
import subprocess
import sys
import tarfile
import time
from update import PACKAGE_VERSION, RELEASE_VERSION, STAMP_FILE
# Path constants.
THIS_DIR = os.path.dirname(__file__)
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
THIRD_PARTY_DIR = os.path.join(THIS_DIR, '..', '..', '..', 'third_party')
BUILDTOOLS_DIR = os.path.join(THIS_DIR, '..', '..', '..', 'buildtools')
LLVM_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm')
LLVM_BOOTSTRAP_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-bootstrap')
LLVM_BOOTSTRAP_INSTALL_DIR = os.path.join(THIRD_PARTY_DIR,
'llvm-bootstrap-install')
LLVM_BUILD_DIR = os.path.join(THIRD_PARTY_DIR, 'llvm-build')
LLVM_RELEASE_DIR = os.path.join(LLVM_BUILD_DIR, 'Release+Asserts')
EU_STRIP = os.path.join(BUILDTOOLS_DIR, 'third_party', 'eu-strip', 'bin',
'eu-strip')
def Tee(output, logfile):
logfile.write(output)
print(output, end=' ')
def TeeCmd(cmd, logfile, fail_hard=True):
"""Runs cmd and writes the output to both stdout and logfile."""
# Reading from PIPE can deadlock if one buffer is full but we wait on a
# different one. To work around this, pipe the subprocess's stderr to
# its stdout buffer and don't give it a stdin.
# shell=True is required in cmd.exe since depot_tools has an svn.bat, and
# bat files only work with shell=True set.
proc = subprocess.Popen(cmd, bufsize=1, shell=sys.platform == 'win32',
stdin=open(os.devnull), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
for line in iter(proc.stdout.readline,''):
Tee(str(line.decode()), logfile)
if proc.poll() is not None:
break
exit_code = proc.wait()
if exit_code != 0 and fail_hard:
print('Failed:', cmd)
sys.exit(1)
def PrintTarProgress(tarinfo):
print('Adding', tarinfo.name)
return tarinfo
def GetGsutilPath():
if not 'find_depot_tools' in sys.modules:
sys.path.insert(0, os.path.join(CHROMIUM_DIR, 'build'))
global find_depot_tools
import find_depot_tools
depot_path = find_depot_tools.add_depot_tools_to_path()
if depot_path is None:
print ('depot_tools are not found in PATH. '
'Follow the instructions in this document '
'http://dev.chromium.org/developers/how-tos/install-depot-tools'
' to install depot_tools and then try again.')
sys.exit(1)
gsutil_path = os.path.join(depot_path, 'gsutil.py')
return gsutil_path
def RunGsutil(args):
return subprocess.call([sys.executable, GetGsutilPath()] + args)
def PackageInArchive(directory_path, archive_path):
bin_dir_path = os.path.join(directory_path, 'bin')
if sys.platform != 'win32' and os.path.exists(bin_dir_path):
for f in os.listdir(bin_dir_path):
file_path = os.path.join(bin_dir_path, f)
if not os.path.islink(file_path):
subprocess.call(['strip', file_path])
with tarfile.open(archive_path + '.tar.xz',
'w:xz',
preset=9 | lzma.PRESET_EXTREME) as tar_xz:
with tarfile.open(archive_path + '.tgz', 'w:gz') as tar_gz:
for f in sorted(os.listdir(directory_path)):
tar_xz.add(os.path.join(directory_path, f),
arcname=f,
filter=PrintTarProgress)
# TODO(crbug.com/1261812) Stop making gzip'ed archives once the
# goma/reclient push processes are updated to consume the .xz files
# instead.
tar_gz.add(os.path.join(directory_path, f), arcname=f)
def MaybeUpload(do_upload, filename, gcs_platform, extra_gsutil_args=[]):
gsutil_args = ['cp'] + extra_gsutil_args + [
'-n', '-a', 'public-read', filename,
'gs://chromium-browser-clang-staging/%s/' % (gcs_platform)
]
if do_upload:
print('Uploading %s to Google Cloud Storage...' % filename)
exit_code = RunGsutil(gsutil_args)
if exit_code != 0:
print("gsutil failed, exit_code: %s" % exit_code)
sys.exit(exit_code)
else:
print('To upload, run:')
print('gsutil %s' % ' '.join(gsutil_args))
def UploadPDBsToSymbolServer(binaries):
assert sys.platform == 'win32'
# Upload PDB and binary to the symbol server on Windows. Put them into the
# chromium-browser-symsrv bucket, since chrome devs have that in their
# _NT_SYMBOL_PATH already. Executable and PDB must be at paths following a
# certain pattern for the Microsoft debuggers to be able to load them.
# Executable:
# chromium-browser-symsrv/clang-cl.exe/ABCDEFAB01234/clang-cl.ex_
# ABCDEFAB is the executable's timestamp in %08X format, 01234 is the
# executable's image size in %x format. tools/symsrc/img_fingerprint.py
# can compute this ABCDEFAB01234 string for us, so use that.
# The .ex_ instead of .exe at the end means that the file is compressed.
# PDB:
# gs://chromium-browser-symsrv/clang-cl.exe.pdb/AABBCCDD/clang-cl.exe.pd_
# AABBCCDD here is computed from the output of
# dumpbin /all mybinary.exe | find "Format: RSDS"
# but tools/symsrc/pdb_fingerprint_from_img.py can compute it already, so
# again just use that.
sys.path.insert(0, os.path.join(CHROMIUM_DIR, 'tools', 'symsrc'))
import img_fingerprint, pdb_fingerprint_from_img
files = []
for binary_path in binaries:
binary_path = os.path.join(LLVM_RELEASE_DIR, binary_path)
binary_id = img_fingerprint.GetImgFingerprint(binary_path)
(pdb_id, pdb_path) = pdb_fingerprint_from_img.GetPDBInfoFromImg(binary_path)
files += [(binary_path, binary_id), (pdb_path, pdb_id)]
# The build process builds clang.exe and then copies it to clang-cl.exe
# (both are the same binary and they behave differently on what their
# filename is). Hence, the pdb is at clang.pdb, not at clang-cl.pdb.
# Likewise, lld-link.exe's PDB file is called lld.pdb.
# Compress and upload.
def compress(t):
subprocess.check_call(
['makecab', '/D', 'CompressionType=LZX', '/D', 'CompressionMemory=21',
t[0], '/L', os.path.dirname(t[0])], stdout=open(os.devnull, 'w'))
multiprocessing.dummy.Pool().map(compress, files)
for f, f_id in files:
f_cab = f[:-1] + '_'
dest = '%s/%s/%s' % (os.path.basename(f), f_id, os.path.basename(f_cab))
print('Uploading %s to Google Cloud Storage...' % dest)
gsutil_args = ['cp', '-n', '-a', 'public-read', f_cab,
'gs://chromium-browser-symsrv/' + dest]
exit_code = RunGsutil(gsutil_args)
if exit_code != 0:
print("gsutil failed, exit_code: %s" % exit_code)
sys.exit(exit_code)
def main():
parser = argparse.ArgumentParser(description='build and package clang')
parser.add_argument('--upload', action='store_true',
help='Upload the target archive to Google Cloud Storage.')
parser.add_argument('--build-mac-arm', action='store_true',
help='Build arm binaries. Only valid on macOS.')
args = parser.parse_args()
if args.build_mac_arm and sys.platform != 'darwin':
print('--build-mac-arm only valid on macOS')
return 1
if args.build_mac_arm and platform.machine() == 'arm64':
print('--build-mac-arm only valid on intel to cross-build arm')
return 1
expected_stamp = PACKAGE_VERSION
pdir = 'clang-' + expected_stamp
print(pdir)
if sys.platform == 'darwin':
# When we need to run this script on an arm machine, we need to add a
# --build-mac-intel switch to pick which clang to build, pick the
# 'Mac_arm64' here when there's no flag and 'Mac' when --build-mac-intel is
# passed. Also update the build script to explicitly pass a default triple
# then.
if args.build_mac_arm or platform.machine() == 'arm64':
gcs_platform = 'Mac_arm64'
else:
gcs_platform = 'Mac'
elif sys.platform == 'win32':
gcs_platform = 'Win'
else:
gcs_platform = 'Linux_x64'
with open('buildlog.txt', 'w') as log:
Tee('Starting build\n', log)
# Do a clobber build.
shutil.rmtree(LLVM_BOOTSTRAP_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BOOTSTRAP_INSTALL_DIR, ignore_errors=True)
shutil.rmtree(LLVM_BUILD_DIR, ignore_errors=True)
build_cmd = [
sys.executable,
os.path.join(THIS_DIR, 'build.py'), '--bootstrap', '--disable-asserts',
'--run-tests', '--pgo'
]
if args.build_mac_arm:
build_cmd.append('--build-mac-arm')
if sys.platform != 'darwin':
build_cmd.append('--thinlto')
TeeCmd(build_cmd, log)
stamp = open(STAMP_FILE).read().rstrip()
if stamp != expected_stamp:
print('Actual stamp (%s) != expected stamp (%s).' % (stamp, expected_stamp))
return 1
shutil.rmtree(pdir, ignore_errors=True)
# Copy a list of files to the directory we're going to tar up.
# This supports the same patterns that the fnmatch module understands.
# '$V' is replaced by RELEASE_VERSION further down.
exe_ext = '.exe' if sys.platform == 'win32' else ''
want = [
'bin/llvm-pdbutil' + exe_ext,
'bin/llvm-symbolizer' + exe_ext,
'bin/llvm-undname' + exe_ext,
# Copy built-in headers (lib/clang/3.x.y/include).
'lib/clang/$V/include/*',
'lib/clang/$V/share/asan_*list.txt',
'lib/clang/$V/share/cfi_*list.txt',
]
if sys.platform == 'win32':
want.extend([
'bin/clang-cl.exe',
'bin/lld-link.exe',
'bin/llvm-ml.exe',
])
else:
want.extend([
'bin/clang',
# Add LLD.
'bin/lld',
# Add llvm-ar for LTO.
'bin/llvm-ar',
# llvm-ml for Windows cross builds.
'bin/llvm-ml',
# Include libclang_rt.builtins.a for Fuchsia targets.
'lib/clang/$V/lib/aarch64-unknown-fuchsia/libclang_rt.builtins.a',
'lib/clang/$V/lib/x86_64-unknown-fuchsia/libclang_rt.builtins.a',
# Add llvm-readobj (symlinked from llvm-readelf) for extracting SONAMEs.
'bin/llvm-readobj',
])
if not args.build_mac_arm:
# TODO(thakis): Figure out why this doesn't build in --build-mac-arm
# builds.
want.append(
'lib/clang/$V/lib/x86_64-unknown-fuchsia/libclang_rt.profile.a')
if sys.platform != 'darwin':
# The Fuchsia asan runtime is only built on non-Mac platforms.
want.append('lib/clang/$V/lib/x86_64-unknown-fuchsia/libclang_rt.asan.so')
want.append(
'lib/clang/$V/lib/x86_64-unknown-fuchsia/libclang_rt.asan-preinit.a')
want.append(
'lib/clang/$V/lib/x86_64-unknown-fuchsia/libclang_rt.asan_static.a')
if sys.platform == 'darwin':
want.extend([
# AddressSanitizer runtime.
'lib/clang/$V/lib/darwin/libclang_rt.asan_iossim_dynamic.dylib',
'lib/clang/$V/lib/darwin/libclang_rt.asan_osx_dynamic.dylib',
# OS X and iOS builtin libraries for the _IsOSVersionAtLeast runtime
# function.
'lib/clang/$V/lib/darwin/libclang_rt.ios.a',
'lib/clang/$V/lib/darwin/libclang_rt.iossim.a',
'lib/clang/$V/lib/darwin/libclang_rt.osx.a',
# Profile runtime (used by profiler and code coverage).
'lib/clang/$V/lib/darwin/libclang_rt.profile_iossim.a',
'lib/clang/$V/lib/darwin/libclang_rt.profile_osx.a',
# UndefinedBehaviorSanitizer runtime.
'lib/clang/$V/lib/darwin/libclang_rt.ubsan_iossim_dynamic.dylib',
'lib/clang/$V/lib/darwin/libclang_rt.ubsan_osx_dynamic.dylib',
])
elif sys.platform.startswith('linux'):
want.extend([
# pylint: disable=line-too-long
# Copy the stdlibc++.so.6 we linked the binaries against.
'lib/libstdc++.so.6',
# Add llvm-objcopy for partition extraction on Android.
'bin/llvm-objcopy',
# Add llvm-nm.
'bin/llvm-nm',
# AddressSanitizer C runtime (pure C won't link with *_cxx).
'lib/clang/$V/lib/i386-unknown-linux-gnu/libclang_rt.asan.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.asan.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.asan.a.syms',
'lib/clang/$V/lib/i386-unknown-linux-gnu/libclang_rt.asan_static.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.asan_static.a',
# AddressSanitizer C++ runtime.
'lib/clang/$V/lib/i386-unknown-linux-gnu/libclang_rt.asan_cxx.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.asan_cxx.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.asan_cxx.a.syms',
# AddressSanitizer Android runtime.
'lib/clang/$V/lib/linux/libclang_rt.asan-aarch64-android.so',
'lib/clang/$V/lib/linux/libclang_rt.asan-arm-android.so',
'lib/clang/$V/lib/linux/libclang_rt.asan-i686-android.so',
'lib/clang/$V/lib/linux/libclang_rt.asan_static-aarch64-android.a',
'lib/clang/$V/lib/linux/libclang_rt.asan_static-arm-android.a',
'lib/clang/$V/lib/linux/libclang_rt.asan_static-i686-android.a',
# Builtins for Android.
'lib/clang/$V/lib/linux/libclang_rt.builtins-aarch64-android.a',
'lib/clang/$V/lib/linux/libclang_rt.builtins-arm-android.a',
'lib/clang/$V/lib/linux/libclang_rt.builtins-i686-android.a',
'lib/clang/$V/lib/linux/libclang_rt.builtins-x86_64-android.a',
# Builtins for Lacros (and potentially Linux, but not used there atm).
'lib/clang/$V/lib/aarch64-unknown-linux-gnu/libclang_rt.builtins.a',
'lib/clang/$V/lib/armv7-unknown-linux-gnueabihf/libclang_rt.builtins.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.builtins.a',
# crtstart/crtend for Linux and Lacros.
'lib/clang/$V/lib/aarch64-unknown-linux-gnu/clang_rt.crtbegin.o',
'lib/clang/$V/lib/aarch64-unknown-linux-gnu/clang_rt.crtend.o',
'lib/clang/$V/lib/armv7-unknown-linux-gnueabihf/clang_rt.crtbegin.o',
'lib/clang/$V/lib/armv7-unknown-linux-gnueabihf/clang_rt.crtend.o',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/clang_rt.crtbegin.o',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/clang_rt.crtend.o',
# HWASAN Android runtime.
'lib/clang/$V/lib/linux/libclang_rt.hwasan-aarch64-android.so',
# MemorySanitizer C runtime (pure C won't link with *_cxx).
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.msan.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.msan.a.syms',
# MemorySanitizer C++ runtime.
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.msan_cxx.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.msan_cxx.a.syms',
# Profile runtime (used by profiler and code coverage).
'lib/clang/$V/lib/aarch64-unknown-linux-gnu/libclang_rt.profile.a',
'lib/clang/$V/lib/armv7-unknown-linux-gnueabihf/libclang_rt.profile.a',
'lib/clang/$V/lib/i386-unknown-linux-gnu/libclang_rt.profile.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.profile.a',
'lib/clang/$V/lib/linux/libclang_rt.profile-i686-android.a',
'lib/clang/$V/lib/linux/libclang_rt.profile-aarch64-android.a',
'lib/clang/$V/lib/linux/libclang_rt.profile-arm-android.a',
# ThreadSanitizer C runtime (pure C won't link with *_cxx).
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.tsan.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.tsan.a.syms',
# ThreadSanitizer C++ runtime.
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.tsan_cxx.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.tsan_cxx.a.syms',
# UndefinedBehaviorSanitizer C runtime (pure C won't link with *_cxx).
'lib/clang/$V/lib/i386-unknown-linux-gnu/libclang_rt.ubsan_standalone.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.ubsan_standalone.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.ubsan_standalone.a.syms',
# UndefinedBehaviorSanitizer C++ runtime.
'lib/clang/$V/lib/i386-unknown-linux-gnu/libclang_rt.ubsan_standalone_cxx.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.ubsan_standalone_cxx.a',
'lib/clang/$V/lib/x86_64-unknown-linux-gnu/libclang_rt.ubsan_standalone_cxx.a.syms',
# UndefinedBehaviorSanitizer Android runtime, needed for CFI.
'lib/clang/$V/lib/linux/libclang_rt.ubsan_standalone-aarch64-android.so',
'lib/clang/$V/lib/linux/libclang_rt.ubsan_standalone-arm-android.so',
# Ignorelist for MemorySanitizer (used on Linux only).
'lib/clang/$V/share/msan_*list.txt',
# pylint: enable=line-too-long
])
elif sys.platform == 'win32':
want.extend([
# AddressSanitizer C runtime (pure C won't link with *_cxx).
'lib/clang/$V/lib/windows/clang_rt.asan-x86_64.lib',
# AddressSanitizer C++ runtime.
'lib/clang/$V/lib/windows/clang_rt.asan_cxx-x86_64.lib',
# Thunk for AddressSanitizer needed for static build of a shared lib.
'lib/clang/$V/lib/windows/clang_rt.asan_dll_thunk-x86_64.lib',
# AddressSanitizer runtime for component build.
'lib/clang/$V/lib/windows/clang_rt.asan_dynamic-x86_64.dll',
'lib/clang/$V/lib/windows/clang_rt.asan_dynamic-x86_64.lib',
# Thunk for AddressSanitizer for component build of a shared lib.
'lib/clang/$V/lib/windows/clang_rt.asan_dynamic_runtime_thunk-x86_64.lib',
# Profile runtime (used by profiler and code coverage).
'lib/clang/$V/lib/windows/clang_rt.profile-i386.lib',
'lib/clang/$V/lib/windows/clang_rt.profile-x86_64.lib',
# UndefinedBehaviorSanitizer C runtime (pure C won't link with *_cxx).
'lib/clang/$V/lib/windows/clang_rt.ubsan_standalone-x86_64.lib',
# UndefinedBehaviorSanitizer C++ runtime.
'lib/clang/$V/lib/windows/clang_rt.ubsan_standalone_cxx-x86_64.lib',
])
# reclient is a tool for executing programs remotely. When uploading the
# binary to be executed, it needs to know which other files the binary depends
# on. This can include shared libraries, as well as other dependencies not
# explicitly mentioned in the source code (those would be found by reclient's
# include scanner) such as sanitizer ignore lists.
reclient_inputs = {
'clang': [
'lib/clang/$V/share/asan_*list.txt',
'lib/clang/$V/share/cfi_*list.txt',
],
}
# Check that all non-glob wanted files exist on disk.
want = [w.replace('$V', RELEASE_VERSION) for w in want]
found_all_wanted_files = True
for w in want:
if '*' in w: continue
if os.path.exists(os.path.join(LLVM_RELEASE_DIR, w)): continue
print('wanted file "%s" but it did not exist' % w, file=sys.stderr)
found_all_wanted_files = False
if not found_all_wanted_files:
return 1
# Check that all reclient inputs are in the package.
for tool in reclient_inputs:
reclient_inputs[tool] = [i.replace('$V', RELEASE_VERSION)
for i in reclient_inputs[tool]]
missing = set(reclient_inputs[tool]) - set(want)
if missing:
print('reclient inputs not part of package: ', missing, file=sys.stderr)
return 1
reclient_input_strings = {t: '' for t in reclient_inputs}
# TODO(thakis): Try walking over want and copying the files in there instead
# of walking the directory and doing fnmatch() against want.
for root, dirs, files in os.walk(LLVM_RELEASE_DIR):
dirs.sort() # Walk dirs in sorted order.
# root: third_party/llvm-build/Release+Asserts/lib/..., rel_root: lib/...
rel_root = root[len(LLVM_RELEASE_DIR)+1:]
rel_files = [os.path.join(rel_root, f) for f in files]
wanted_files = list(set(itertools.chain.from_iterable(
fnmatch.filter(rel_files, p) for p in want)))
if wanted_files:
# Guaranteed to not yet exist at this point:
os.makedirs(os.path.join(pdir, rel_root))
for f in sorted(wanted_files):
src = os.path.join(LLVM_RELEASE_DIR, f)
dest = os.path.join(pdir, f)
shutil.copy(src, dest)
# Strip libraries.
if 'libclang_rt.builtins' in f and 'android' in f:
# Keep the builtins' DWARF info for unwinding.
pass
elif sys.platform == 'darwin' and f.endswith('.dylib'):
subprocess.call(['strip', '-x', dest])
elif (sys.platform.startswith('linux') and
os.path.splitext(f)[1] in ['.so', '.a']):
subprocess.call([EU_STRIP, '-g', dest])
# If this is an reclient input, add it to the inputs file(s).
for tool, inputs in reclient_inputs.items():
if any(fnmatch.fnmatch(f, i) for i in inputs):
rel_input = os.path.relpath(dest, os.path.join(pdir, 'bin'))
reclient_input_strings[tool] += ('%s\n' % rel_input)
# Write the reclient inputs files.
if sys.platform != 'win32':
reclient_input_strings['clang++'] = reclient_input_strings['clang']
reclient_input_strings['clang-cl'] = reclient_input_strings['clang']
else:
reclient_input_strings['clang-cl.exe'] = reclient_input_strings.pop('clang')
for tool, string in reclient_input_strings.items():
filename = os.path.join(pdir, 'bin', '%s_remote_toolchain_inputs' % tool)
print('%s:\n%s' % (filename, string))
with open(filename, 'w') as f:
f.write(string)
# Set up symlinks.
if sys.platform != 'win32':
os.symlink('clang', os.path.join(pdir, 'bin', 'clang++'))
os.symlink('clang', os.path.join(pdir, 'bin', 'clang-cl'))
os.symlink('lld', os.path.join(pdir, 'bin', 'ld.lld'))
os.symlink('lld', os.path.join(pdir, 'bin', 'ld64.lld'))
os.symlink('lld', os.path.join(pdir, 'bin', 'lld-link'))
os.symlink('lld', os.path.join(pdir, 'bin', 'wasm-ld'))
os.symlink('llvm-readobj', os.path.join(pdir, 'bin', 'llvm-readelf'))
if sys.platform.startswith('linux'):
os.symlink('llvm-objcopy', os.path.join(pdir, 'bin', 'llvm-strip'))
# Make `--target=*-cros-linux-gnu` work with
# LLVM_ENABLE_PER_TARGET_RUNTIME_DIR=ON.
for arch, abi in [('armv7', 'gnueabihf'), ('aarch64', 'gnu'),
('x86_64', 'gnu')]:
old = '%s-unknown-linux-%s' % (arch, abi)
new = old.replace('unknown', 'cros').replace('armv7', 'armv7a')
os.symlink(
old, os.path.join(pdir, 'lib', 'clang', RELEASE_VERSION, 'lib', new))
# Create main archive.
PackageInArchive(pdir, pdir)
MaybeUpload(args.upload, pdir + '.t*z', gcs_platform)
# Upload build log next to it.
os.rename('buildlog.txt', pdir + '-buildlog.txt')
MaybeUpload(args.upload,
pdir + '-buildlog.txt',
gcs_platform,
extra_gsutil_args=['-z', 'txt'])
os.remove(pdir + '-buildlog.txt')
# Zip up llvm-code-coverage for code coverage.
code_coverage_dir = 'llvm-code-coverage-' + stamp
shutil.rmtree(code_coverage_dir, ignore_errors=True)
os.makedirs(os.path.join(code_coverage_dir, 'bin'))
for filename in ['llvm-cov', 'llvm-profdata']:
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', filename + exe_ext),
os.path.join(code_coverage_dir, 'bin'))
PackageInArchive(code_coverage_dir, code_coverage_dir)
MaybeUpload(args.upload, code_coverage_dir + '.t*z', gcs_platform)
# Zip up llvm-objdump and related tools for sanitizer coverage and Supersize.
objdumpdir = 'llvmobjdump-' + stamp
shutil.rmtree(objdumpdir, ignore_errors=True)
os.makedirs(os.path.join(objdumpdir, 'bin'))
for filename in [
'llvm-bcanalyzer', 'llvm-cxxfilt', 'llvm-dwarfdump', 'llvm-nm',
'llvm-objdump'
]:
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', filename + exe_ext),
os.path.join(objdumpdir, 'bin'))
llvmobjdump_stamp_file_base = 'llvmobjdump_build_revision'
llvmobjdump_stamp_file = os.path.join(objdumpdir, llvmobjdump_stamp_file_base)
with open(llvmobjdump_stamp_file, 'w') as f:
f.write(expected_stamp)
f.write('\n')
if sys.platform != 'win32':
os.symlink('llvm-objdump', os.path.join(objdumpdir, 'bin', 'llvm-otool'))
PackageInArchive(objdumpdir, objdumpdir)
MaybeUpload(args.upload, objdumpdir + '.t*z', gcs_platform)
# Zip up clang-tidy for users who opt into it, and Tricium.
clang_tidy_dir = 'clang-tidy-' + stamp
shutil.rmtree(clang_tidy_dir, ignore_errors=True)
os.makedirs(os.path.join(clang_tidy_dir, 'bin'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'clang-tidy' + exe_ext),
os.path.join(clang_tidy_dir, 'bin'))
PackageInArchive(clang_tidy_dir, clang_tidy_dir)
MaybeUpload(args.upload, clang_tidy_dir + '.t*z', gcs_platform)
# Zip up clangd for users who opt into it.
clangd_dir = 'clangd-' + stamp
shutil.rmtree(clangd_dir, ignore_errors=True)
os.makedirs(os.path.join(clangd_dir, 'bin'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'clangd' + exe_ext),
os.path.join(clangd_dir, 'bin'))
PackageInArchive(clangd_dir, clangd_dir)
MaybeUpload(args.upload, clangd_dir + '.t*z', gcs_platform)
# Zip up clang-format so we can update it (separately from the clang roll).
clang_format_dir = 'clang-format-' + stamp
shutil.rmtree(clang_format_dir, ignore_errors=True)
os.makedirs(os.path.join(clang_format_dir, 'bin'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'clang-format' + exe_ext),
os.path.join(clang_format_dir, 'bin'))
PackageInArchive(clang_format_dir, clang_format_dir)
MaybeUpload(args.upload, clang_format_dir + '.t*z', gcs_platform)
# Zip up clang-libs for users who opt into it. We want Clang and LLVM headers
# and libs, as well as a couple binaries. The LLVM parts are needed by the
# Rust build.
clang_libs_dir = 'clang-libs-' + stamp
shutil.rmtree(clang_libs_dir, ignore_errors=True)
os.makedirs(os.path.join(clang_libs_dir, 'include'))
# TODO(danakj): It's possible we need to also include headers from
# LLVM_DIR/clang/lib/AST/ and other subdirs of lib, but we won't include them
# unless we see it's needed, and we can document why.
shutil.copytree(os.path.join(LLVM_DIR, 'clang', 'include', 'clang'),
os.path.join(clang_libs_dir, 'include', 'clang'))
# Copy LLVM includes. The llvm source and build directory includes must be
# merged. llvm-c for C bindings is also included.
#
# Headers and libs are copied from LLVM_BOOTSTRAP_DIR, not LLVM_RELEASE_DIR,
# because the release libs have LTO so they contain LLVM bitcode while the
# bootstrap libs do not. The Rust build consumes these,the first stage of
# which cannot handle newer LLVM bitcode. The stage 0 rustc is linked against
# an older LLVM.
shutil.copytree(os.path.join(LLVM_DIR, 'llvm', 'include', 'llvm'),
os.path.join(clang_libs_dir, 'include', 'llvm'))
shutil.copytree(os.path.join(LLVM_DIR, 'llvm', 'include', 'llvm-c'),
os.path.join(clang_libs_dir, 'include', 'llvm-c'))
shutil.copytree(os.path.join(LLVM_BOOTSTRAP_DIR, 'include', 'llvm'),
os.path.join(clang_libs_dir, 'include', 'llvm'),
dirs_exist_ok=True)
# Copy llvm-config and FileCheck which the Rust build needs
os.makedirs(os.path.join(clang_libs_dir, 'bin'))
shutil.copy(os.path.join(LLVM_BOOTSTRAP_DIR, 'bin', 'llvm-config' + exe_ext),
os.path.join(clang_libs_dir, 'bin'))
shutil.copy(os.path.join(LLVM_BOOTSTRAP_DIR, 'bin', 'FileCheck' + exe_ext),
os.path.join(clang_libs_dir, 'bin'))
os.makedirs(os.path.join(clang_libs_dir, 'lib'))
if sys.platform == 'win32':
clang_libs_want = [
'*.lib',
]
else:
clang_libs_want = [
'*.a',
]
for lib_path in os.listdir(os.path.join(LLVM_BOOTSTRAP_DIR, 'lib')):
for lib_want in clang_libs_want:
if fnmatch.fnmatch(lib_path, lib_want):
shutil.copy(os.path.join(LLVM_BOOTSTRAP_DIR, 'lib', lib_path),
os.path.join(clang_libs_dir, 'lib'))
PackageInArchive(clang_libs_dir, clang_libs_dir)
MaybeUpload(args.upload, clang_libs_dir + '.t*z', gcs_platform)
if sys.platform == 'darwin':
# dsymutil isn't part of the main zip, and it gets periodically
# deployed to CIPD (manually, not as part of clang rolls) for use in the
# Mac build toolchain.
dsymdir = 'dsymutil-' + stamp
shutil.rmtree(dsymdir, ignore_errors=True)
os.makedirs(os.path.join(dsymdir, 'bin'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'dsymutil'),
os.path.join(dsymdir, 'bin'))
PackageInArchive(dsymdir, dsymdir)
MaybeUpload(args.upload, dsymdir + '.t*z', gcs_platform)
# Zip up the translation_unit tool.
translation_unit_dir = 'translation_unit-' + stamp
shutil.rmtree(translation_unit_dir, ignore_errors=True)
os.makedirs(os.path.join(translation_unit_dir, 'bin'))
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'translation_unit' +
exe_ext),
os.path.join(translation_unit_dir, 'bin'))
PackageInArchive(translation_unit_dir, translation_unit_dir)
MaybeUpload(args.upload, translation_unit_dir + '.t*z', gcs_platform)
# Zip up the libclang binaries.
libclang_dir = 'libclang-' + stamp
shutil.rmtree(libclang_dir, ignore_errors=True)
os.makedirs(os.path.join(libclang_dir, 'bin'))
os.makedirs(os.path.join(libclang_dir, 'bindings', 'python', 'clang'))
if sys.platform == 'win32':
shutil.copy(os.path.join(LLVM_RELEASE_DIR, 'bin', 'libclang.dll'),
os.path.join(libclang_dir, 'bin'))
for filename in ['__init__.py', 'cindex.py', 'enumerations.py']:
shutil.copy(os.path.join(LLVM_DIR, 'clang', 'bindings', 'python', 'clang',
filename),
os.path.join(libclang_dir, 'bindings', 'python', 'clang'))
PackageInArchive(libclang_dir, libclang_dir)
MaybeUpload(args.upload, libclang_dir + '.t*z', gcs_platform)
if sys.platform == 'win32' and args.upload:
binaries = [f for f in want if f.endswith('.exe') or f.endswith('.dll')]
assert 'bin/clang-cl.exe' in binaries
assert 'bin/lld-link.exe' in binaries
start = time.time()
UploadPDBsToSymbolServer(binaries)
end = time.time()
print('symbol upload took', end - start, 'seconds')
# FIXME: Warn if the file already exists on the server.
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "fdaa59e56ee9fbb49d87914b2a902c91",
"timestamp": "",
"source": "github",
"line_count": 704,
"max_line_length": 92,
"avg_line_length": 43.15767045454545,
"alnum_prop": 0.652404305039002,
"repo_name": "chromium/chromium",
"id": "c7411f0a997ce1b6ab1fa2080d5fdb9096cecd78",
"size": "30547",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tools/clang/scripts/package.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Django settings for myproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^g!wjn8m0t!c*7%hj3jj@(%a@o&*oa39j9$lj7(va3&dapvnj_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_DIRS = (
'/Users/natebecker/.virtualenvs/venv_address_booker/startover/myproject/cookie_app/templates',
os.path.join(BASE_DIR, '/cookie_app/templates'),
'/home/ubuntu/siter/business-contacter-django-app/myproject/cookie_app/templates',
)
# Application definition
INSTALLED_APPS = (
# 'grappelli',###django admin interface alternative https://github.com/sehmaschine/django-grappelli
# 'xadmin',###from https://xadmin.readthedocs.org/en/latest/quickstart.html#id2
# 'crispy_forms',
# 'reversion',
'suit',###django admin interface alternative http://django-suit.readthedocs.org/en/develop/
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.staticfiles',
'floppyforms',
'cookie_app',
'bootstrap3',
'django_tables2',
# 'csvimport.app.CSVImportConf', ####note that commenting this out removes csv importer from the left-hand menu side
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'myproject.urls'
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')###added as per http://stackoverflow.com/questions/23215581/unable-to-perform-collectstatic
# #####note that if you uncomment this, the locally deployed website looks like what the EC2 instance has (no assets are visible)
# STATICFILES_DIRS = (
# os.path.join(BASE_DIR, "static"),
# '/home/ubuntu/siter/business-contacter-django-app/myproject/static',
# '/Users/natebecker/.virtualenvs/venv_address_booker/startover/myproject',
# )
##########I added this for django_tables2 compatibility (see http://django-tables2.readthedocs.org/en/latest/)###also used in grappelli/django suit https://github.com/sehmaschine/django-grappelli
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
TEMPLATE_CONTEXT_PROCESSORS += ('django.core.context_processors.request',)
##########
######django suit configurations
SUIT_CONFIG = {
# header
'ADMIN_NAME': 'Quantile',
# 'HEADER_DATE_FORMAT': 'l, j. F Y',
# 'HEADER_TIME_FORMAT': 'H:i',
# forms
# 'SHOW_REQUIRED_ASTERISK': True, # Default True
# 'CONFIRM_UNSAVED_CHANGES': True, # Default True
# menu
# 'SEARCH_URL': '/admin/auth/user/',
'SEARCH_URL': '',# Set to empty string if you want to hide search from menu
'MENU_ICONS': {
'sites': 'icon-leaf',
# 'auth': 'icon-lock',
},
# 'MENU_OPEN_FIRST_CHILD': True, # Default True
'MENU_EXCLUDE': ('auth.group','cookie_app.datetime'),
'MENU': (
'sites',
# {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')},
# {'label': 'Settings', 'icon':'icon-cog', 'models': ('auth.user', 'auth.group')},
# {'label': 'Initial Data', 'icon':' icon-tasks', 'models': ('cookie_app.Initial_Borr_List_Page'), 'url': '/admin/cookie_app/initial_borr_list_page/'},####commented out to remove initial data tag
{'label': 'Followup Data', 'icon':'icon-ok', 'models': ('cookie_app.more_data_page'),'url': '/admin/cookie_app/more_data_page/'},
# {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'},
),
# misc
'LIST_PER_PAGE': 100
}
|
{
"content_hash": "b8f1f9bd39291db76dfa8075f2966dd0",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 203,
"avg_line_length": 33.11333333333334,
"alnum_prop": 0.6845178175961345,
"repo_name": "nathanielbecker/business-contacter-django-app",
"id": "bfc436cf27182f68d9a60abbc6a68323f2e72444",
"size": "4967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myproject/myproject/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "12312"
},
{
"name": "Python",
"bytes": "118081"
},
{
"name": "Shell",
"bytes": "7046"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import pytest
import mock
import opentracing
def teardown_function(function):
opentracing._reset_global_tracer()
def test_opentracing_tracer():
assert opentracing.tracer is opentracing.global_tracer()
assert isinstance(opentracing.global_tracer(), opentracing.Tracer)
def test_is_global_tracer_registered():
assert opentracing.is_global_tracer_registered() is False
def test_set_global_tracer():
tracer = mock.Mock()
opentracing.set_global_tracer(tracer)
assert opentracing.global_tracer() is tracer
assert opentracing.is_global_tracer_registered()
# Register another value.
tracer = mock.Mock()
opentracing.set_global_tracer(tracer)
assert opentracing.global_tracer() is tracer
assert opentracing.is_global_tracer_registered()
def test_register_none():
with pytest.raises(ValueError):
opentracing.set_global_tracer(None)
|
{
"content_hash": "de46658ade8596cdac7746f120d04e3e",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 70,
"avg_line_length": 26.742857142857144,
"alnum_prop": 0.7435897435897436,
"repo_name": "kawamon/hue",
"id": "aa0227cd83e129998d87bd6a834e6bedc37447f6",
"size": "1515",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/opentracing-2.2.0/tests/test_globaltracer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "5786"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "118907"
},
{
"name": "C",
"bytes": "3196521"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "308860"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "1050129"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "10981"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "7312"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "24999718"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "471854"
},
{
"name": "JavaScript",
"bytes": "28075556"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "Jupyter Notebook",
"bytes": "73168"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Lex",
"bytes": "264449"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "269655"
},
{
"name": "Mako",
"bytes": "3614942"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "3204"
},
{
"name": "Python",
"bytes": "76440000"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "95764"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "190718"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TSQL",
"bytes": "10013"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "317058"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "2133855"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ebooks', '0110_auto_20200830_1750'),
]
operations = [
migrations.AlterField(
model_name='book',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='r', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='chapter',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='r', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='part',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='r', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='replacementrule',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='r', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='secret',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='r', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='section',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='r', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='sectionaspect',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='r', max_length=1, verbose_name='icon style'),
),
migrations.AlterField(
model_name='universalsection',
name='icon_style',
field=models.CharField(blank=True, choices=[('r', 'r - regular'), ('s', 's - solid'), ('l', 'l - light'), ('d', 'd - duotone'), ('b', 'b - brand')], default='r', max_length=1, verbose_name='icon style'),
),
]
|
{
"content_hash": "0ee10ea9f8900e905bd900edc49076f8",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 215,
"avg_line_length": 54.568627450980394,
"alnum_prop": 0.5127560186848724,
"repo_name": "flavoi/diventi",
"id": "0bf88d5fb7d075592785bb8f6ec4dada75b960b7",
"size": "2833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diventi/ebooks/migrations/0111_auto_20200830_1753.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "385265"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "826530"
}
],
"symlink_target": ""
}
|
from sandglass.time.api import API
from sandglass.time.models.client import Client
from sandglass.time.resource.model import ModelResource
from sandglass.time.schemas.client import ClientListSchema
from sandglass.time.schemas.client import ClientSchema
class ClientResource(ModelResource):
"""
REST API resource for Client model.
"""
name = 'clients'
model = Client
schema = ClientSchema
list_schema = ClientListSchema
API.register('v1', ClientResource)
|
{
"content_hash": "a80df694725b7c175c5d26575b865b69",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 58,
"avg_line_length": 25.63157894736842,
"alnum_prop": 0.7659137577002053,
"repo_name": "sanglass/sandglass.time",
"id": "f248a44883a1d8b3a6d03eba0af8eb0b8530ea16",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandglass/time/api/v1/client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "1262"
},
{
"name": "Python",
"bytes": "221820"
}
],
"symlink_target": ""
}
|
from google.cloud import language
def get_sentiment(in_text):
language_client = language.Client()
document = language_client.document_from_text(in_text)
sentiment = document.analyze_sentiment().sentiment
return (sentiment.score, sentiment.magnitude)
def get_entities(in_text):
language_client = language.Client()
document = language_client.document_from_text(in_text)
response = document.analyze_entities()
return {ent.name: ent.entity_type for ent in response.entities}
|
{
"content_hash": "8d773568e5c6b99bcd4394420cf810b7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 33.8,
"alnum_prop": 0.7396449704142012,
"repo_name": "spmassot/Spaghetti-Twitter-Bot",
"id": "c670e3b9428d844399dd0282e4ab99c04ee4dc17",
"size": "507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google_language.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9834"
}
],
"symlink_target": ""
}
|
from itertools import chain, groupby
import numpy as np
from ._base import Descriptor
from ._graph_matrix import DistanceMatrix
__all__ = (
"InformationContent",
"TotalIC",
"StructuralIC",
"BondingIC",
"ComplementaryIC",
"ModifiedIC",
"ZModifiedIC",
)
class BFSTree(object):
__slots__ = ("tree", "visited", "bonds", "atoms")
def __init__(self, mol):
self.tree = {}
self.visited = set()
self.bonds = {}
for b in mol.GetBonds():
s = b.GetBeginAtomIdx()
d = b.GetEndAtomIdx()
t = b.GetBondType()
self.bonds[s, d] = t
self.bonds[d, s] = t
self.atoms = [
(a.GetAtomicNum(), a.GetDegree(), a.GetNeighbors()) for a in mol.GetAtoms()
]
def reset(self, i):
self.tree.clear()
self.visited.clear()
self.tree[i] = ()
self.visited.add(i)
def expand(self):
self._expand(self.tree)
def _expand(self, tree):
for src, dst in list(tree.items()):
self.visited.add(src)
if dst is ():
tree[src] = {
n.GetIdx(): ()
for n in self.atoms[src][2]
if n.GetIdx() not in self.visited
}
else:
self._expand(dst)
def _code(self, tree, before, trail):
if len(tree) == 0:
yield trail
else:
for src, dst in tree.items():
code = []
if before is not None:
bt = self.bonds[before, src]
code.append(bt)
code.append(self.atoms[src][:2])
nxt = tuple(chain(trail, code))
for t in self._code(dst, src, nxt):
yield t
def get_code(self, i, order):
self.reset(i)
for _ in range(order):
self.expand()
return tuple(sorted(self._code(self.tree, None, ())))
class InformationContentBase(Descriptor):
__slots__ = ("_order",)
kekulize = True
def __str__(self):
return self._name + str(self._order)
@classmethod
def preset(cls, version):
return (cls(o) for o in range(6))
def parameters(self):
return (self._order,)
def __init__(self, order=0):
self._order = order
rtype = float
class Ag(InformationContentBase):
__slots__ = ("_order",)
@classmethod
def preset(cls, version):
return ()
_name = "Ag"
def dependencies(self):
return {"D": DistanceMatrix(self.explicit_hydrogens)}
def calculate(self, D):
if self._order == 0:
atoms = [a.GetAtomicNum() for a in self.mol.GetAtoms()]
else:
tree = BFSTree(self.mol)
atoms = [
tree.get_code(i, self._order) for i in range(self.mol.GetNumAtoms())
]
ad = {a: i for i, a in enumerate(atoms)}
Ags = [(k, sum(1 for _ in g)) for k, g in groupby(sorted(atoms))]
Nags = len(Ags)
return (
np.fromiter((ad[k] for k, _ in Ags), "int", Nags),
np.fromiter((ag for _, ag in Ags), "float", Nags),
)
rtype = None
def _shannon_entropy_term(a):
return a * np.log2(a)
shannon_entropy_term = np.vectorize(_shannon_entropy_term)
def shannon_entropy(a, w=1):
N = np.sum(a)
return -np.sum(w * shannon_entropy_term(a / N))
class InformationContent(InformationContentBase):
r"""neighborhood information content descriptor.
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered neighborhood information content".format(self._order)
_name = "IC"
def dependencies(self):
return {"iAgs": Ag(self._order)}
def calculate(self, iAgs):
_, Ags = iAgs
return shannon_entropy(Ags)
class TotalIC(InformationContentBase):
r"""neighborhood total information content descriptor.
.. math::
{\rm TIC}_m = A \cdot {\rm IC}_m
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered neighborhood total information content".format(self._order)
_name = "TIC"
def dependencies(self):
return {"ICm": InformationContent(self._order)}
def calculate(self, ICm):
A = self.mol.GetNumAtoms()
return A * ICm
class StructuralIC(TotalIC):
r"""structural information content descriptor.
.. math::
{\rm SIC}_m = \frac{{\rm IC}_m}{\log_2 A}
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered structural information content".format(self._order)
_name = "SIC"
def calculate(self, ICm):
d = np.log2(self.mol.GetNumAtoms())
with self.rethrow_zerodiv():
return ICm / d
class BondingIC(TotalIC):
r"""bonding information content descriptor.
.. math::
{\rm BIC}_m = \frac{{\rm IC}_m}{\log_2 \sum^B_{b=1} \pi^{*}_b}
:type order: int
:param order: order(number of edge) of subgraph
:returns: NaN when :math:`\sum^B_{b=1} \pi^{*}_b <= 0`
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered bonding information content".format(self._order)
_name = "BIC"
def calculate(self, ICm):
B = sum(b.GetBondTypeAsDouble() for b in self.mol.GetBonds())
with self.rethrow_zerodiv():
log2B = np.log2(B)
return ICm / log2B
class ComplementaryIC(TotalIC):
r"""complementary information content descriptor.
.. math::
{\rm CIC}_m = \log_2 A - {\rm IC}_m
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered complementary information content".format(self._order)
_name = "CIC"
def calculate(self, ICm):
A = self.mol.GetNumAtoms()
return np.log2(A) - ICm
class ModifiedIC(InformationContent):
r"""modified information content index descriptor.
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered modified information content".format(self._order)
_name = "MIC"
def calculate(self, iAgs):
ids, Ags = iAgs
w = np.vectorize(lambda i: self.mol.GetAtomWithIdx(int(i)).GetMass())(ids)
return shannon_entropy(Ags, w)
class ZModifiedIC(InformationContent):
r"""Z-modified information content index descriptor.
:type order: int
:param order: order(number of edge) of subgraph
"""
since = "1.0.0"
__slots__ = ()
def description(self):
return "{}-ordered Z-modified information content".format(self._order)
_name = "ZMIC"
def calculate(self, iAgs):
ids, Ags = iAgs
w = Ags * np.vectorize(
lambda i: self.mol.GetAtomWithIdx(int(i)).GetAtomicNum()
)(ids)
return shannon_entropy(Ags, w)
|
{
"content_hash": "eb04e519c85da53cd68fffc8cfd2b1e2",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 87,
"avg_line_length": 22.850931677018632,
"alnum_prop": 0.5501494971459636,
"repo_name": "mordred-descriptor/mordred",
"id": "0aca0af5559ae99568535eac8672af22c90224ce",
"size": "7358",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mordred/InformationContent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "252306"
},
{
"name": "Shell",
"bytes": "4077"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='splutter',
packages=['splutter'],
version='0.1.0',
description='A modern curses UI framework for Python.',
author='John Carlyle',
url='https://github.com/elegantbadger/splutter',
long_description=long_description,
keywords=['curses', 'terminal'],
license='MIT',
classifiers=[],
)
|
{
"content_hash": "0f6c9a671996e6d2ab31e19c996bb075",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 64,
"avg_line_length": 23.041666666666668,
"alnum_prop": 0.6690777576853526,
"repo_name": "ElegantBadger/splutter",
"id": "c131b17d15f503fbc5ea2ba33bb9534e7421a95c",
"size": "553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25789"
}
],
"symlink_target": ""
}
|
import sys
import unittest
import threading
import time
import Gaffer
import GafferTest
class OutputRedirectionTest( GafferTest.TestCase ) :
def testRedirection( self ) :
out = []
err = []
with Gaffer.OutputRedirection( stdOut = out.append, stdErr = err.append ) :
sys.stdout.write( "OUT" )
print( "PRINT" )
sys.stderr.write( "ERR" )
self.assertEqual( out, [ "OUT", "PRINT", "\n" ] )
self.assertEqual( err, [ "ERR" ] )
sys.stdout.write( "" )
sys.stderr.write( "" )
self.assertEqual( out, [ "OUT", "PRINT", "\n" ] )
self.assertEqual( err, [ "ERR" ] )
def testThreading( self ) :
perThreadOuts = []
perThreadErrs = []
threads = []
def f( threadIndex ) :
with Gaffer.OutputRedirection( stdOut = perThreadOuts[threadIndex].append, stdErr = perThreadErrs[threadIndex].append ) :
for i in range( 0, 100 ) :
sys.stdout.write( "OUT %d %d" % ( threadIndex, i ) )
sys.stderr.write( "ERR %d %d" % ( threadIndex, i ) )
time.sleep( 0.001 )
for i in range( 0, 100 ) :
perThreadOuts.append( [] )
perThreadErrs.append( [] )
t = threading.Thread( target = f, args = ( i, ) )
threads.append( t )
t.start()
for t in threads :
t.join()
for i in range( 0, 100 ) :
self.assertEqual( len( perThreadOuts[i] ), 100 )
self.assertEqual( len( perThreadErrs[i] ), 100 )
for j in range( 0, 100 ) :
self.assertEqual( perThreadOuts[i][j], "OUT %d %d" % ( i, j ) )
self.assertEqual( perThreadErrs[i][j], "ERR %d %d" % ( i, j ) )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "ec8fa7dda87ac62c04891e7e1db2dbe3",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 124,
"avg_line_length": 25.06451612903226,
"alnum_prop": 0.6081081081081081,
"repo_name": "hradec/gaffer",
"id": "fc2f459d997f679cfc22d8e1fd324ea61e8041a1",
"size": "3357",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python/GafferTest/OutputRedirectionTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "54696"
},
{
"name": "C++",
"bytes": "8682649"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "9458935"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14299"
}
],
"symlink_target": ""
}
|
"""
Functional tests for ``flocker.node._deploy``.
"""
from subprocess import check_call
from twisted.trial.unittest import TestCase
from .. import Deployer, Deployment, Application, DockerImage, Node
from ..gear import GearClient
from ..testtools import wait_for_unit_state, if_gear_configured
from ...testtools import random_name
from ...volume.testtools import create_volume_service
from ...route import make_memory_network
class DeployerTests(TestCase):
"""
Functional tests for ``Deployer``.
"""
@if_gear_configured
def test_restart(self):
"""
Stopped applications that are supposed to be running are restarted
when ``Deployer.change_node_state`` is run.
"""
name = random_name()
gear_client = GearClient("127.0.0.1")
deployer = Deployer(create_volume_service(self), gear_client,
make_memory_network())
self.addCleanup(gear_client.remove, name)
desired_state = Deployment(nodes=frozenset([
Node(hostname=u"localhost",
applications=frozenset([Application(
name=name,
image=DockerImage.from_string(
u"openshift/busybox-http-app"))]))]))
d = deployer.change_node_state(desired_state,
Deployment(nodes=frozenset()),
u"localhost")
d.addCallback(lambda _: wait_for_unit_state(gear_client, name,
[u'active']))
def started(_):
# Now that it's running, stop it behind our back:
check_call([b"gear", b"stop", name])
return wait_for_unit_state(gear_client, name,
[u'inactive', u'failed'])
d.addCallback(started)
def stopped(_):
# Redeploy, which should restart it:
return deployer.change_node_state(desired_state, desired_state,
u"localhost")
d.addCallback(stopped)
d.addCallback(lambda _: wait_for_unit_state(gear_client, name,
[u'active']))
# Test will timeout if unit was not restarted:
return d
|
{
"content_hash": "46ae66a28955b6a0f0754d7e80fa55cb",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 75,
"avg_line_length": 37.38709677419355,
"alnum_prop": 0.5530629853321829,
"repo_name": "beni55/flocker",
"id": "1aa3bf9ceaf5daf0a53413cb7f825d31aabf9392",
"size": "2380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flocker/node/functional/test_deploy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "540895"
},
{
"name": "Ruby",
"bytes": "797"
},
{
"name": "Shell",
"bytes": "3744"
}
],
"symlink_target": ""
}
|
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('inventory', '0005_item_views'),
]
operations = [
migrations.AlterField(
model_name='item',
name='description',
field=ckeditor_uploader.fields.RichTextUploadingField(verbose_name='Beskrivelse'),
),
]
|
{
"content_hash": "0e69d3a85617ac4fcabcadc1ec19fae1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 94,
"avg_line_length": 23.647058823529413,
"alnum_prop": 0.6268656716417911,
"repo_name": "hackerspace-ntnu/website",
"id": "d4489ada6001160467cfda472e06a22185f714ba",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory/migrations/0006_auto_20201021_0135.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16771"
},
{
"name": "HTML",
"bytes": "235369"
},
{
"name": "JavaScript",
"bytes": "43249"
},
{
"name": "Python",
"bytes": "323186"
}
],
"symlink_target": ""
}
|
import re
import functools
import distutils.core
import distutils.errors
import distutils.extension
if "__PEX_UNVENDORED__" in __import__("os").environ:
from setuptools.extern.six.moves import map # vendor:skip
else:
from pex.third_party.setuptools.extern.six.moves import map
from .monkey import get_unpatched
def _have_cython():
"""
Return True if Cython can be imported.
"""
cython_impl = 'Cython.Distutils.build_ext'
try:
# from (cython_impl) import build_ext
__import__(cython_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
# for compatibility
have_pyrex = _have_cython
_Extension = get_unpatched(distutils.core.Extension)
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def __init__(self, name, sources, *args, **kw):
# The *args is needed for compatibility as calls may use positional
# arguments. py_limited_api may be set only via keyword.
self.py_limited_api = kw.pop("py_limited_api", False)
_Extension.__init__(self, name, sources, *args, **kw)
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if _have_cython():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
|
{
"content_hash": "b7e919a78bed52fcc622baa37f908413",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 76,
"avg_line_length": 30.62295081967213,
"alnum_prop": 0.6472162740899358,
"repo_name": "pantsbuild/pex",
"id": "8553481363ec8eda34e670fb8854d1ce2d7639dd",
"size": "1868",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pex/vendor/_vendored/setuptools/setuptools/extension.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1379"
},
{
"name": "Python",
"bytes": "2190044"
},
{
"name": "Shell",
"bytes": "1472"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from setuptools import find_packages
from xmm import __author__, __email__, __url__, __license__, __version__, __summary__, __keywords__
with open('requirements.in') as f:
install_requires = f.read().splitlines()
try:
import pypandoc
readme_contents = pypandoc.convert('README.md', 'rst')
changelog_contents = pypandoc.convert('CHANGELOG.md', 'rst')
except(IOError, ImportError):
with open('README.md') as f:
readme_contents = f.read()
with open('CHANGELOG.md') as f:
changelog_contents = f.read()
long_description = '{}\n{}'.format(readme_contents, changelog_contents)
setup(
name='xmm',
version=__version__,
description=__summary__,
long_description=long_description,
author=__author__,
author_email=__email__,
url=__url__,
license=__license__,
packages=find_packages(exclude=('tests', 'docs')),
package_data={'': ['LICENSE', 'README.md', 'CHANGELOG.md', 'docs/*', 'config/*', 'bin/*', 'pkg/*', 'resources/*']},
include_package_data=True,
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=['pytest'],
scripts=['pkg/xmm.bash', 'pkg/xmm.zsh'],
entry_points={
'console_scripts': [
'xmm = xmm.cli:main'
]
},
keywords=__keywords__,
classifiers=[
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Topic :: Games/Entertainment',
'Topic :: System :: Archiving :: Packaging',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
{
"content_hash": "98448e9d6a8f55053fdab633f62176dd",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 119,
"avg_line_length": 30.75,
"alnum_prop": 0.6043360433604336,
"repo_name": "z/xonotic-map-manager",
"id": "432bff96d480242489a31c1b574a5df4c7aa567d",
"size": "1845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "840"
},
{
"name": "Python",
"bytes": "97980"
},
{
"name": "Shell",
"bytes": "2053"
}
],
"symlink_target": ""
}
|
from .async_client import RegistryAsyncClient
from .client import RegistryClient
__all__ = (
"RegistryClient",
"RegistryAsyncClient",
)
|
{
"content_hash": "b2ccaa932509492141ce233d840b873e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 45,
"avg_line_length": 20.714285714285715,
"alnum_prop": 0.7310344827586207,
"repo_name": "googleapis/python-apigee-registry",
"id": "2e8903ad24908b4f2c338d86994fbe259517ba39",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/apigee_registry_v1/services/registry/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2223877"
},
{
"name": "Shell",
"bytes": "30687"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/creature/npc/theme_park/shared_event_transport.iff"
result.attribute_template_id = 9
result.stfName("theme_park_name","transport")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "8afb78da39d9c8151527cbdfc9bfceb5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 24.076923076923077,
"alnum_prop": 0.7028753993610224,
"repo_name": "obi-two/Rebelion",
"id": "eee056dea72ac638a9e22cd6f6b6855fea0ecb79",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/creature/npc/theme_park/shared_event_transport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('csyllabusapi', '0004_auto_20171030_0200'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now_add=True)),
],
),
migrations.RemoveField(
model_name='course',
name='program',
),
migrations.RemoveField(
model_name='faculty',
name='university',
),
migrations.RemoveField(
model_name='program',
name='faculty',
),
migrations.RemoveField(
model_name='university',
name='country',
),
migrations.DeleteModel(
name='Country',
),
migrations.DeleteModel(
name='Course',
),
migrations.DeleteModel(
name='Faculty',
),
migrations.DeleteModel(
name='Program',
),
migrations.DeleteModel(
name='University',
),
]
|
{
"content_hash": "7ecd4bf7c67a92d3bea0ccd3f8150a7f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 114,
"avg_line_length": 27,
"alnum_prop": 0.5101327742837177,
"repo_name": "CSyllabus/webapp",
"id": "77a5a3fdb61f20945801b11876abf3a3ecf2a523",
"size": "1503",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "backend/apps/csyllabusapi/migrations/0005_auto_20171030_0208.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "946"
},
{
"name": "CSS",
"bytes": "231277"
},
{
"name": "HTML",
"bytes": "95429"
},
{
"name": "JavaScript",
"bytes": "913374"
},
{
"name": "PHP",
"bytes": "2280"
},
{
"name": "Python",
"bytes": "313702"
},
{
"name": "Shell",
"bytes": "1341"
},
{
"name": "TypeScript",
"bytes": "235488"
}
],
"symlink_target": ""
}
|
from math import sqrt
# function for calculating (Euclidean Distance Score) distance-based similarity score for person1 and person2
def similarity_distance(input_recs, person1, person2):
# Gather list of shared_items
shared_items = {}
for item_key in input_recs[person1]:
if item_key in input_recs[person2]:
shared_items[item_key] = 1
# Return 0 if no common ratings
if len(shared_items) == 0:
return 0
# Add the squares of the differences
sum_of_squares = sum(pow(input_recs[person1][item_key]- input_recs[person2][item_key], 2)
for item_key in input_recs[person1]
if item_key in input_recs[person2])
# Return the Euclidean Distance Score
return 1/(1 + sqrt(sum_of_squares))
# function for calculating (Pearson correlation coefficient) for p1 and p2
# This function will have a value between -1 and 1
# 1 means that two critics have the exact same ratings
def similarity_pearson(input_recs, p1, p2):
# Gather list of shared_items
shared_items = {}
for each_critic_movie_rating_key in input_recs[p1]:
if each_critic_movie_rating_key in input_recs[p2]:
shared_items[each_critic_movie_rating_key] = 1
# Were any matches found?
match_length = len(shared_items)
if match_length == 0:
return 0
# Add up all of the ratings for each person
sum_p1 = sum([input_recs[p1][each_critic_movie_rating_key]
for each_critic_movie_rating_key in shared_items])
sum_p2 = sum([input_recs[p2][each_critic_movie_rating_key]
for each_critic_movie_rating_key in shared_items])
# Sum up the squares
sqr_sum_p1 = sum([pow(input_recs[p1][each_critic_movie_rating_key], 2)
for each_critic_movie_rating_key in shared_items])
sqr_sum_p2 = sum([pow(input_recs[p2][each_critic_movie_rating_key], 2)
for each_critic_movie_rating_key in shared_items])
# Multiply the ratings for p1 and p2 together and get the Sum total
sum_multiply_p1_and_p2 = sum([input_recs[p1][each_critic_movie_rating_key]*
input_recs[p2][each_critic_movie_rating_key]
for each_critic_movie_rating_key in shared_items])
# Calculate the Pearson score
numerator = sum_multiply_p1_and_p2 - (sum_p1*sum_p2/match_length)
denominator = sqrt( (sqr_sum_p1-pow(sum_p1,2)/match_length) * (sqr_sum_p2-pow(sum_p2,2)/match_length) )
if denominator == 0:
return 0
return numerator/denominator
# set of movies
critics={'Lisa Rose': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.5,
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5,
'The Night Listener': 3.0},
'Gene Seymour': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5,
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 3.5},
'Michael Phillips': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0,
'Superman Returns': 3.5, 'The Night Listener': 4.0},
'Claudia Puig': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0,
'The Night Listener': 4.5, 'Superman Returns': 4.0,
'You, Me and Dupree': 2.5},
'Mick LaSalle': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0,
'You, Me and Dupree': 2.0},
'Jack Matthews': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0,
'The Night Listener': 3.0, 'Superman Returns': 5.0, 'You, Me and Dupree': 3.5},
'Toby': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0,'Superman Returns':4.0}}
|
{
"content_hash": "ae92ab6e85f63f700809b37c4b1f9af4",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 109,
"avg_line_length": 44.925925925925924,
"alnum_prop": 0.638636988183567,
"repo_name": "jpbinary/Programming-Collective-Intelligence",
"id": "5c7d7d9464a86d050e7a68b83b93bd985bb1f7c0",
"size": "3700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recommendations.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6350"
}
],
"symlink_target": ""
}
|
import asyncio
import collections
import concurrent.futures
import errno
import functools
import gc
import inspect
import itertools
import os
import signal
import socket
import subprocess
import ssl
import stat
import sys
import threading
import traceback
import time
import warnings
import weakref
|
{
"content_hash": "9dc84a1266fd63e55297eba544e83693",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 25,
"avg_line_length": 14.95,
"alnum_prop": 0.862876254180602,
"repo_name": "MagicStack/uvloop",
"id": "2ccf9cae31016b266b5a8a656b3aea0c3e571fc9",
"size": "361",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "uvloop/includes/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2588"
},
{
"name": "Cython",
"bytes": "372938"
},
{
"name": "Makefile",
"bytes": "1130"
},
{
"name": "Python",
"bytes": "344183"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, absolute_import
from .syntax import * # noqa
def exists(value):
"Query to test if a value exists."
if not isinstance(value, Token):
raise TypeError('value must be a token')
if not hasattr(value, 'identifier'):
raise TypeError('value must support an identifier')
if not value.identifier:
value = value.__class__(**value.__dict__)
value.identifier = 'v'
ident = Identifier(value.identifier)
return Query([
OptionalMatch(value),
Return(Predicate(ident, 'IS NOT NULL')),
Limit(1),
])
def get(value):
"Query to get the value."
if not isinstance(value, Token):
raise TypeError('value must be a token')
if not hasattr(value, 'identifier'):
raise TypeError('value must support an identifier')
if not value.identifier:
value = value.__class__(**value.__dict__)
value.identifier = 'v'
ident = Identifier(value.identifier)
return Query([
Match(value),
Return(ident)
])
|
{
"content_hash": "157c694f54541d6f3faea37ab70d1dba",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 59,
"avg_line_length": 24.930232558139537,
"alnum_prop": 0.6147388059701493,
"repo_name": "bruth/cypher",
"id": "b8dfebb8947567a90400cf68db74b4bfb6ec6f14",
"size": "1072",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cypher/shortcuts.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "20233"
}
],
"symlink_target": ""
}
|
import logging
import platform
import re
import sys
from functools import reduce
from google.protobuf import text_format
from google.protobuf.descriptor import FieldDescriptor
from google.protobuf.message import Message
from pathlib import Path
from typing import NewType, Any, Optional, List, Iterable
UniqueId = NewType("UniqueId", str)
HashCode = NewType("HashCode", int)
# Configure logging with timestamp, log level, filename, and line number.
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s:%(levelname)s:%(filename)s(%(lineno)d)] %(message)s")
logger = logging.getLogger(__name__)
def import_compiled_proto(build_path) -> Any:
"""Global import from function. |self.build_path| is needed to perform
this import, hence why it's not a top-level import.
The compiled proto is located ${build_path}/pyproto/ and generated as a part
of compiling Chrome."""
# Use the build path to import the compiled traffic annotation proto.
proto_path = build_path / "pyproto" / "chrome" / "browser" / "privacy"
sys.path.insert(0, str(proto_path))
try:
global traffic_annotation_pb2
global traffic_annotation
import traffic_annotation_pb2
# Used for accessing enum constants.
from traffic_annotation_pb2 import NetworkTrafficAnnotation as \
traffic_annotation
return traffic_annotation_pb2
except ImportError as e:
logger.critical(
"Failed to import the compiled traffic annotation proto. Make sure "+ \
"Chrome is built in '{}' before running this script.".format(
build_path))
raise
def get_current_platform(build_path: Optional[Path] = None) -> str:
"""Return the target platform of |build_path| based on heuristics."""
# Use host platform as the source of truth (in most cases).
current_platform: str = platform.system().lower()
if current_platform == "linux" and build_path is not None:
# It could be an Android build directory, being compiled from a Linux host.
# Look for a target_os="android" line in args.gn.
try:
gn_args = (build_path / "args.gn").read_text(encoding="utf-8")
pattern = re.compile(r"^\s*target_os\s*=\s*\"(android|chromeos)\"\s*$",
re.MULTILINE)
match = pattern.search(gn_args)
if match:
current_platform = match.group(1)
except (ValueError, OSError) as e:
logger.info(e)
# Maybe the file's absent, or it can't be decoded as UTF-8, or something.
# It's probably not Android/ChromeOS in that case.
pass
return current_platform
def twos_complement_8bit(b: int) -> int:
"""Interprets b like a signed 8-bit integer, possibly changing its sign.
For instance, twos_complement_8bit(204) returns -52."""
if b >= 256:
raise ValueError("b must fit inside 8 bits")
if b & (1 << 7):
# Negative number, calculate its value using two's-complement.
return b - (1 << 8)
else:
# Positive number, do not touch.
return b
def iterative_hash(s: str) -> HashCode:
"""Compute the has code of the given string as in:
net/traffic_annotation/network_traffic_annotation.h
Args:
s: str
The seed, e.g. unique id of traffic annotation.
Returns: int
A hash code.
"""
return HashCode(
reduce(lambda acc, b: (acc * 31 + twos_complement_8bit(b)) % 138003713,
s.encode("utf-8"), 0))
def compute_hash_value(text: str) -> HashCode:
"""Same as iterative_hash, but returns -1 for empty strings."""
return iterative_hash(text) if text else HashCode(-1)
def merge_string_field(src: Message, dst: Message, field: str):
"""Merges the content of one string field into an annotation."""
if getattr(src, field):
if getattr(dst, field):
setattr(dst, field, "{}\n{}".format(getattr(src, field),
getattr(dst, field)))
else:
setattr(dst, field, getattr(src, field))
def fill_proto_with_bogus(proto: Message, field_numbers: List[int]):
"""Fill proto with bogus values for the fields identified by field_numbers.
Uses reflection to fill the proto with the right types."""
descriptor = proto.DESCRIPTOR
for field_number in field_numbers:
field_number = abs(field_number)
if field_number not in descriptor.fields_by_number:
raise ValueError("{} is not a valid {} field".format(
field_number, descriptor.name))
field = descriptor.fields_by_number[field_number]
repeated = field.label == FieldDescriptor.LABEL_REPEATED
if field.type == FieldDescriptor.TYPE_STRING and not repeated:
setattr(proto, field.name, "[Archived]")
elif field.type == FieldDescriptor.TYPE_ENUM and not repeated:
# Assume the 2nd value in the enum is reasonable, since the 1st is
# UNSPECIFIED.
setattr(proto, field.name, field.enum_type.values[1].number)
elif field.type == FieldDescriptor.TYPE_MESSAGE and repeated:
getattr(proto, field.name).add()
else:
raise NotImplementedError("Unimplemented proto field type {} ({})".format(
field.type, "repeated" if repeated else "non-repeated"))
def extract_annotation_id(line: str) -> Optional[UniqueId]:
"""Returns the annotation id given an '<item id=...' line"""
m = re.search('id="([^"]+)"', line)
return UniqueId(m.group(1)) if m else None
def escape_for_tsv(text: str) -> str:
"""Changes double-quotes to single-quotes, and adds double-quotes around the
text if it has newlines/tabs."""
text.replace("\"", "'")
if "\n" in text or "\t" in text:
return "\"{}\"".format(text)
return text
def policy_to_text(chrome_policy: Iterable[Message]) -> str:
"""Unnests the policy name/values from chrome_policy, producing a
human-readable string.
For example, this:
chrome_policy {
SyncDisabled {
policy_options {
mode: MANDATORY
}
SyncDisabled: true
}
}
becomes this:
SyncDisabled: true"""
items = []
# Use the protobuf serializer library to print the fields, 2 levels deep.
for policy in chrome_policy:
for field, value in policy.ListFields():
for subfield, subvalue in value.ListFields():
if subfield.name == "policy_options":
# Skip the policy_options field.
continue
writer = text_format.TextWriter(as_utf8=True)
text_format.PrintField(subfield,
subvalue,
writer,
as_one_line=True,
use_short_repeated_primitives=True)
items.append(writer.getvalue().strip())
# We wrote an extra comma at the end, remove it before returning.
return ", ".join(items)
return re.sub(r", $", "", writer.getvalue()).strip()
def write_annotations_tsv_file(file_path: Path, annotations: List["Annotation"],
missing_ids: List[UniqueId]):
"""Writes a TSV file of all annotations and their contents in file_path."""
logger.info("Saving annotations to TSV file: {}.".format(file_path))
Destination = traffic_annotation.TrafficSemantics.Destination
CookiesAllowed = traffic_annotation.TrafficPolicy.CookiesAllowed
lines = []
title = "Unique ID\tLast Update\tSender\tDescription\tTrigger\tData\t" + \
"Destination\tCookies Allowed\tCookies Store\tSetting\tChrome Policy\t" + \
"Comments\tSource File"
column_count = title.count("\t")
for missing_id in missing_ids:
lines.append(missing_id + "\t" * column_count)
for annotation in annotations:
if annotation.type.value != "definition":
continue
# TODO(nicolaso): Use StringIO for faster concatenation.
line = annotation.proto.unique_id
# Placeholder for Last Update Date, will be updated in the scripts.
line += "\t"
# Semantics.
semantics = annotation.proto.semantics
semantics_list = [
semantics.sender,
escape_for_tsv(semantics.description),
escape_for_tsv(semantics.trigger),
escape_for_tsv(semantics.data),
]
for semantic_info in semantics_list:
line += "\t{}".format(semantic_info)
destination_names = {
Destination.WEBSITE: "Website",
Destination.GOOGLE_OWNED_SERVICE: "Google",
Destination.LOCAL: "Local",
Destination.OTHER: "Other",
}
if (semantics.destination == Destination.OTHER
and semantics.destination_other):
line += "\tOther: {}".format(semantics.destination_other)
elif semantics.destination in destination_names:
line += "\t{}".format(destination_names[semantics.destination])
else:
raise ValueError("Invalid value for the semantics.destination field")
# Policy.
policy = annotation.proto.policy
if annotation.proto.policy.cookies_allowed == CookiesAllowed.YES:
line += "\tYes"
else:
line += "\tNo"
line += "\t{}".format(escape_for_tsv(policy.cookies_store))
line += "\t{}".format(escape_for_tsv(policy.setting))
# Chrome policies.
if policy.chrome_policy:
policies_text = policy_to_text(policy.chrome_policy)
else:
policies_text = policy.policy_exception_justification
line += "\t{}".format(escape_for_tsv(policies_text))
# Comments.
line += "\t{}".format(escape_for_tsv(annotation.proto.comments))
# Source.
source = annotation.proto.source
code_search_link = "https://cs.chromium.org/chromium/src/"
line += "\t{}{}?l={}".format(code_search_link, source.file, source.line)
lines.append(line)
lines.sort()
lines.insert(0, title)
report = "\n".join(lines) + "\n"
file_path.write_text(report, encoding="utf-8")
|
{
"content_hash": "f5642bce3e4ddd6251f22b6c268e503c",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 80,
"avg_line_length": 34.75812274368231,
"alnum_prop": 0.6615081013710012,
"repo_name": "scheib/chromium",
"id": "3bbd97a5f4cb60d4e8077111d8f4b4f1238b56ce",
"size": "9791",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/traffic_annotation/scripts/auditor/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from agate.rows import Row
from agate import utils
@utils.allow_tableset_proxy
def select(self, key):
"""
Create a new table with only the specified columns.
:param key:
Either the name of a single column to include or a sequence of such
names.
:returns:
A new :class:`.Table`.
"""
if not utils.issequence(key):
key = [key]
indexes = tuple(self._column_names.index(k) for k in key)
column_types = tuple(self._column_types[i] for i in indexes)
new_rows = []
for row in self._rows:
new_rows.append(Row(tuple(row[i] for i in indexes), key))
return self._fork(new_rows, key, column_types)
|
{
"content_hash": "923804618aa1f0fdde39b3ec1011e221",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 75,
"avg_line_length": 25.96153846153846,
"alnum_prop": 0.6311111111111111,
"repo_name": "flother/agate",
"id": "f806f6640db792c6f3058ca4d3510677ed1f6e28",
"size": "722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agate/table/select.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "165242"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from random_music.random_music import __version__
with open(u'README') as fh:
long_description = fh.read()
setup(
name=u"random_music",
version=__version__,
author=u"Caoilte Guiry",
author_email=u"",
license=u'BSD License',
description=u"Plays a pseudo-random sequence of songs",
long_description=long_description,
packages=find_packages(),
entry_points={
'console_scripts': ['music = random_music.random_music:main']
},
test_suite='random_music.tests'
)
|
{
"content_hash": "4329fcfb50da694038828d3e71fbf619",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 69,
"avg_line_length": 26.761904761904763,
"alnum_prop": 0.6690391459074733,
"repo_name": "caoilteguiry/random_music",
"id": "9f520be5031dd7b90b1981461a97199332c4cf7d",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23063"
}
],
"symlink_target": ""
}
|
"""
Init
Functions:
None
Classes:
None
Exceptions:
None
"""
import os
import platform
import sys
import ec2rlcore.logutil
__all__ = ["awshelpers", "backup", "console_out",
"constraint", "logutil", "main",
"menu", "menu_config", "menu_item",
"menu_textpad_mod", "module", "moduledir",
"options", "paralleldiagnostics", "prediag",
"programversion", "s3upload"]
if sys.hexversion < 0x2070000:
print("ec2rl requires Python 2.7+, but running version is {0}.".format(
platform.python_version()))
sys.exit(201)
dual_log = ec2rlcore.logutil.LogUtil.dual_log_info
# Add vendored library directories to sys.path
_callp = sys.argv[0]
if not os.path.isabs(_callp):
_callp = os.path.abspath(_callp)
# Modules whose implementation is the same for Python2/3
sys.path.insert(0, "{}/lib".format(os.path.split(_callp)[0]))
# Modules whose implementation differs between Python2/3
if sys.hexversion >= 0x3000000:
sys.path.insert(0, "{}/lib/python3".format(os.path.split(_callp)[0]))
else:
sys.path.insert(0, "{}/lib/python2".format(os.path.split(_callp)[0]))
|
{
"content_hash": "dd10e3e1b090cd45647237bfbbf059b2",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 75,
"avg_line_length": 26,
"alnum_prop": 0.6555944055944056,
"repo_name": "gregbdunn/aws-ec2rescue-linux",
"id": "e6c3bc7fc85b88b58af259daa5c913fc9251ba0a",
"size": "1713",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "ec2rlcore/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "701"
},
{
"name": "Makefile",
"bytes": "5044"
},
{
"name": "Python",
"bytes": "4595518"
},
{
"name": "Shell",
"bytes": "5229"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.