text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin
from django.views.generic.edit import ModelFormMixin, ProcessFormView, BaseDeleteView
from django.utils.translation import get_language
from hvad.forms import translatable_modelform_factory
from hvad.utils import collect_context_modifiers
import warnings
class _TransitionObjectMixin(SingleObjectMixin):
# Remove in 1.5
def get_object(self, queryset=None):
assert not callable(getattr(self, '_get_object', None)), (
'Method \'_get_object()\' was removed. Please update view %s to use '
'\'get_object()\' instead.' % self.__class__.__name__)
assert not callable(getattr(self, 'filter_kwargs', None)), (
'Method \'filter_kwargs()\' was removed. Please update view %s to use '
'\'get_queryset()\' or \'get_object()\'.' % self.__class__.__name__)
assert not (self.pk_url_kwarg == 'pk' and 'object_id' in self.kwargs and 'pk' not in self.kwargs), (
'Default view argument for pk has changed from \'object_id\' '
'to \'pk\'. Please update view %s.' % self.__class__.__name__)
return super(_TransitionObjectMixin, self).get_object(queryset)
class TranslatableModelFormMixin(ModelFormMixin, _TransitionObjectMixin):
''' ModelFormMixin that works with an TranslatableModelForm in **enforce** mode '''
query_language_key = 'language'
def get_language(self):
# Remove in 1.5
assert not callable(getattr(self, '_language', None)), (
'Method \'_language\' has been renamed to \'get_language()\'. '
'Please update view %s.' % self.__class__.__name__)
return self.request.GET.get(self.query_language_key) or get_language()
def get_form_class(self):
if self.model is not None:
model = self.model
elif getattr(self, 'object', None) is not None:
model = self.object.__class__
else:
qs = self.get_queryset()
model = getattr(qs, 'shared_model', qs.model)
kwargs = {}
if self.form_class is not None:
kwargs['form'] = self.form_class
return translatable_modelform_factory(self.get_language(), model, **kwargs)
def get_context_data(self, **kwargs):
# Deprecation warning is triggered inside collect_context_modifiers
# remove this in 1.5
context = super(TranslatableModelFormMixin, self).get_context_data(**kwargs)
context.update(collect_context_modifiers(self, extra_kwargs=kwargs))
return context
#=============================================================================
class TranslatableBaseCreateView(TranslatableModelFormMixin, ProcessFormView):
def get(self, request, *args, **kwargs):
self.object = None
return super(TranslatableBaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(TranslatableBaseCreateView, self).post(request, *args, **kwargs)
class TranslatableCreateView(SingleObjectTemplateResponseMixin, TranslatableBaseCreateView):
template_name_suffix = '_form'
#-------------------------------------------------------------------------
class TranslatableBaseUpdateView(TranslatableModelFormMixin, ProcessFormView):
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(TranslatableBaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(TranslatableBaseUpdateView, self).post(request, *args, **kwargs)
class TranslatableUpdateView(SingleObjectTemplateResponseMixin, TranslatableBaseUpdateView):
template_name_suffix = '_form'
#-------------------------------------------------------------------------
class TranslatableBaseDeleteView(BaseDeleteView, _TransitionObjectMixin):
pass
class TranslatableDeleteView(SingleObjectTemplateResponseMixin, TranslatableBaseDeleteView):
template_name_suffix = '_confirm_delete'
#=============================================================================
#=============================================================================
#=============================================================================
from django.views.generic.edit import UpdateView
from hvad.admin import TranslatableModelAdminMixin
from hvad.forms import TranslatableModelForm
class TranslatableBaseView(UpdateView, TranslatableModelAdminMixin): #pragma: no cover
# Remove in 1.5
form_class = TranslatableModelForm
def __init__(self, *args, **kwargs):
raise AssertionError(
'TranslatableBaseView has been removed. Please update view %s to use '
'new Django-compliant view instead.' % self.__class__.__name__
)
|
{
"content_hash": "e590cd448b0b9dc7aef01c70c097213e",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 108,
"avg_line_length": 44.64545454545455,
"alnum_prop": 0.6196294033801669,
"repo_name": "promil23/django-hvad",
"id": "5d0175ef684cc8d6ee39563070f45870fe7e0828",
"size": "4911",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "hvad/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "13766"
},
{
"name": "Python",
"bytes": "452451"
}
],
"symlink_target": ""
}
|
from pykalman import KalmanFilter
def moving_average(values, transition_covariance=.01):
kf = KalmanFilter(transition_matrices = [1],
observation_matrices = [1],
initial_state_mean = 0,
initial_state_covariance = 1,
observation_covariance=1,
transition_covariance=transition_covariance)
means, covs = kf.filter(values)
return means, covs
|
{
"content_hash": "44f0433983ec1fe31d51ea456f3d2ed9",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 38.5,
"alnum_prop": 0.577922077922078,
"repo_name": "gpostelnicu/fin_data",
"id": "45a39bd04e797a34b84a0a51f1a3fcb07130a77c",
"size": "462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fin_data/signals/kalman.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2331"
},
{
"name": "Python",
"bytes": "29985"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from logging import getLogger
Logger = getLogger('chime.error_functions')
from flask import current_app, request
from urllib import quote
from urlparse import urlparse
from os.path import join, exists
from .view_functions import get_repo, strip_index_file, path_display_type, get_value_from_front_matter, FOLDER_FILE_TYPE
from .repo_functions import TASK_METADATA_FILENAME
EMAIL_SUBJECT_TEXT = u'Chime Error Report'
EMAIL_BODY_PREFIX = u'\n\n----- Please add any relevant details above this line -----\n\n'
def common_error_template_args(app_config):
''' Return dictionary of template arguments common to error pages.
'''
return {
"activities_path": u'/',
"support_email": app_config.get('SUPPORT_EMAIL_ADDRESS'),
"support_phone_number": app_config.get('SUPPORT_PHONE_NUMBER')
}
def make_email_params(message, path=None, uuid=None):
''' Construct email params to send to the template.
'''
email_subject = EMAIL_SUBJECT_TEXT
email_message = EMAIL_BODY_PREFIX + message
if path:
email_message = u'\n'.join([email_message, u'path: {}'.format(path)])
if uuid:
email_subject = u'{} ({})'.format(email_subject, uuid)
return u'?subject={}&body={}'.format(quote(email_subject), quote(email_message))
def extract_branch_name_from_path(path):
''' If the name of a branch that exists in the passed repo is in the passed URL, return it
'''
repo = get_repo(flask_app=current_app)
for branch_name_candidate in path.split('/'):
if branch_name_candidate in repo.branches:
return branch_name_candidate
return None
def summarize_conflict_details(error):
''' Make an object that summarizes the files affected by a merge conflict.
The object looks like this:
[
{'edit_path': u'', 'display_type': u'Article', 'actions': u'Deleted', 'title': u'How to Find Us'},
{'edit_path': u'/tree/34246e3/edit/contact/hours-of-operation/', 'display_type': u'Article', 'actions': u'Edited', 'title': u'Hours of Operation'},
{'edit_path': u'/tree/34246e3/edit/contact/driving-directions/', 'display_type': u'Article', 'actions': u'Edited', 'title': u'Driving Directions'},
{'edit_path': u'/tree/34246e3/edit/contact/', 'display_type': u'Category', 'actions': u'Created', 'title': u'Contact'}
]
'''
repo = get_repo(flask_app=current_app)
path = urlparse(request.url).path
# get the branch name (unless it's the default branch)
branch_name = repo.active_branch.name
if branch_name == current_app.config['default_branch']:
branch_name = extract_branch_name_from_path(path)
conflict_files = error.files()
summary = []
for id_file in conflict_files:
# skip the task metadata file
if TASK_METADATA_FILENAME in id_file['path']:
continue
file_description = {'actions': id_file['actions'].title()}
edit_path = u''
display_type = u''
title = id_file['path'].split('/')[-1]
# construct location info if the file's there
file_loc = join(repo.working_dir, id_file['path'])
if exists(file_loc):
dir_path = strip_index_file(id_file['path'])
dir_loc = join(repo.working_dir, dir_path)
display_type = path_display_type(dir_loc)
# if it's not a category or article, it's just a file
if display_type == FOLDER_FILE_TYPE:
display_type = path_display_type(file_loc)
title = get_value_from_front_matter('title', file_loc) or title
edit_path = join(u'/tree/{}/edit/'.format(branch_name), dir_path)
else:
# the file's not there, so just dump the whole path into the title
title = id_file['path']
display_type = u'Unknown'
file_description['edit_path'] = edit_path
file_description['display_type'] = display_type.title()
file_description['title'] = title
summary.append(file_description)
return summary
|
{
"content_hash": "d2922b7afae35156b28e11d406cc7847",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 159,
"avg_line_length": 41.86734693877551,
"alnum_prop": 0.6361199122593224,
"repo_name": "darvelo/chime",
"id": "9c96d515a3590d5e3e098d2a31f5455607353897",
"size": "4103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chime/error_functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "475678"
},
{
"name": "HTML",
"bytes": "1357068"
},
{
"name": "JavaScript",
"bytes": "58766"
},
{
"name": "Python",
"bytes": "615145"
},
{
"name": "Ruby",
"bytes": "16142"
},
{
"name": "Shell",
"bytes": "2988"
}
],
"symlink_target": ""
}
|
from typing import Sequence
from google.api_core.client_options import ClientOptions
from google.cloud import documentai
# TODO(developer): Uncomment these variables before running the sample.
# project_id = 'YOUR_PROJECT_ID'
# location = 'YOUR_PROCESSOR_LOCATION' # Format is 'us' or 'eu'
# processor_id = 'YOUR_PROCESSOR_ID' # Create processor before running sample
# file_path = '/path/to/local/pdf'
# mime_type = 'application/pdf' # Refer to https://cloud.google.com/document-ai/docs/file-types for supported file types
def process_document_splitter_sample(
project_id: str, location: str, processor_id: str, file_path: str, mime_type: str
):
# Online processing request to Document AI
document = process_document(
project_id, location, processor_id, file_path, mime_type
)
# Read the splitter output from a document splitter/classifier processor:
# e.g. https://cloud.google.com/document-ai/docs/processors-list#processor_procurement-document-splitter
# This processor only provides text for the document and information on how
# to split the document on logical boundaries. To identify and extract text,
# form elements, and entities please see other processors like the OCR, form,
# and specalized processors.
print(f"Found {len(document.entities)} subdocuments:")
for entity in document.entities:
conf_percent = f"{entity.confidence:.1%}"
pages_range = page_refs_to_string(entity.page_anchor.page_refs)
# Print subdocument type information, if available
if entity.type_:
print(
f"{conf_percent} confident that {pages_range} a '{entity.type_}' subdocument."
)
else:
print(f"{conf_percent} confident that {pages_range} a subdocument.")
def process_document(
project_id: str, location: str, processor_id: str, file_path: str, mime_type: str
) -> documentai.Document:
# You must set the api_endpoint if you use a location other than 'us', e.g.:
opts = ClientOptions(api_endpoint=f"{location}-documentai.googleapis.com")
client = documentai.DocumentProcessorServiceClient(client_options=opts)
# The full resource name of the processor, e.g.:
# projects/project_id/locations/location/processor/processor_id
name = client.processor_path(project_id, location, processor_id)
# Read the file into memory
with open(file_path, "rb") as image:
image_content = image.read()
# Load Binary Data into Document AI RawDocument Object
raw_document = documentai.RawDocument(content=image_content, mime_type=mime_type)
# Configure the process request
request = documentai.ProcessRequest(name=name, raw_document=raw_document)
result = client.process_document(request=request)
return result.document
def page_refs_to_string(
page_refs: Sequence[documentai.Document.PageAnchor.PageRef],
) -> str:
"""Converts a page ref to a string describing the page or page range."""
if len(page_refs) == 1:
num = str(int(page_refs[0].page) + 1)
return f"page {num} is"
nums = ""
for page_ref in page_refs:
nums += f"{int(page_ref.page) + 1}, "
return f"pages {nums[:-2]} are"
# [END documentai_process_splitter_document]
|
{
"content_hash": "247194c3d8a85fcebd1cbb0c4b9184bd",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 120,
"avg_line_length": 39,
"alnum_prop": 0.6974969474969475,
"repo_name": "googleapis/python-documentai",
"id": "e49e8ff1895af411581feca08fc47dd47861a389",
"size": "3901",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/process_document_splitter_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1819136"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import pylab as pl
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = pl.figure(figsize=(8, 5))
pl.subplot(1, 2, 1)
pl.spy(coef_lasso_)
pl.xlabel('Feature')
pl.ylabel('Time (or Task)')
pl.text(10, 5, 'Lasso')
pl.subplot(1, 2, 2)
pl.spy(coef_multi_task_lasso_)
pl.xlabel('Feature')
pl.ylabel('Time (or Task)')
pl.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
pl.figure()
pl.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
pl.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
pl.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
pl.legend(loc='upper center')
pl.axis('tight')
pl.ylim([-1.1, 1.1])
pl.show()
|
{
"content_hash": "7c4e6fb958481c95e6fef112e780475c",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 33.16923076923077,
"alnum_prop": 0.6572356215213359,
"repo_name": "Eric89GXL/scikit-learn",
"id": "699b2e00d7f03baa8f91657f275e9b8a5152377b",
"size": "2178",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "examples/linear_model/plot_multi_task_lasso_support.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import unittest
from conans.test.utils.tools import TestClient
class UserChannelTestPackage(unittest.TestCase):
def test(self):
# https://github.com/conan-io/conan/issues/2501
client = TestClient()
conanfile = """from conans import ConanFile
class SayConan(ConanFile):
pass
"""
test = """from conans import ConanFile
class SayConan(ConanFile):
def requirements(self):
self.output.info("USER: %s!!" % self.user)
self.output.info("CHANNEL: %s!!" % self.channel)
def test(self):
pass
"""
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test})
client.run("create . Pkg/0.1@conan/testing")
self.assertIn("Pkg/0.1@conan/testing (test package): USER: conan!!", client.out)
self.assertIn("Pkg/0.1@conan/testing (test package): CHANNEL: testing!!", client.out)
class SameUserChannelTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
conanfile = """
from conans import ConanFile
class SayConan(ConanFile):
name = "Say"
version = "0.1"
build_policy = "missing"
def build(self):
self.output.info("Building %s")
"""
for channel in ("lasote/stable", "other/testing"):
self.client.save({"conanfile.py": conanfile % channel})
self.client.run("export . %s" % channel)
self.conanfile = """
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
build_policy = "missing"
def requirements(self):
self.requires("Say/0.1@%s/%s" % (self.user, self.channel))
def build(self):
self.output.info("Building %s/%s" % (self.user, self.channel) )
"""
self.test_conanfile = """
from conans import ConanFile, CMake
import os
class HelloReuseConan(ConanFile):
requires = "Hello/0.1@lasote/stable"
def test(self):
pass
"""
self.client.save({"conanfile.py": self.conanfile,
"test/conanfile.py": self.test_conanfile})
def test_create(self):
self.client.run("create . lasote/stable")
self.assertIn("Say/0.1@lasote/stable: Building lasote/stable", self.client.user_io.out)
self.assertIn("Hello/0.1@lasote/stable: Building lasote/stable", self.client.user_io.out)
self.assertNotIn("other/testing", self.client.user_io.out)
self.client.save({"conanfile.py": self.conanfile,
"test/conanfile.py": self.test_conanfile.replace("lasote/stable",
"other/testing")})
self.client.run("create . other/testing")
self.assertIn("Say/0.1@other/testing: Building other/testing", self.client.user_io.out)
self.assertIn("Hello/0.1@other/testing: Building other/testing", self.client.user_io.out)
self.assertNotIn("lasote/stable", self.client.user_io.out)
def test_local_commands(self):
self.client.run("install .", assert_error=True)
self.assertIn("ERROR: conanfile.py (Hello/0.1@None/None): "
"Error in requirements() method, line 10", self.client.out)
self.assertIn("ConanException: CONAN_USERNAME environment "
"variable not defined, but self.user is used", self.client.out)
os.environ["CONAN_USERNAME"] = "lasote"
self.client.run("install .", assert_error=True)
self.assertIn("ERROR: conanfile.py (Hello/0.1@None/None): "
"Error in requirements() method, line 10", self.client.out)
self.assertIn("ConanException: CONAN_CHANNEL environment "
"variable not defined, but self.channel is used", self.client.out)
os.environ["CONAN_CHANNEL"] = "stable"
self.client.run("install .")
self.assertIn("Say/0.1@lasote/stable: Building lasote/stable", self.client.user_io.out)
self.assertNotIn("other/testing", self.client.user_io.out)
os.environ["CONAN_USERNAME"] = "other"
os.environ["CONAN_CHANNEL"] = "testing"
self.client.run("install .")
self.assertIn("Say/0.1@other/testing: Building other/testing", self.client.user_io.out)
self.assertNotIn("lasote/stable", self.client.user_io.out)
del os.environ["CONAN_USERNAME"]
del os.environ["CONAN_CHANNEL"]
# Now use the default_ methods to declare user and channel
self.client = TestClient()
conanfile = """
from conans import ConanFile
class SayConan(ConanFile):
name = "Say"
version = "0.1"
build_policy = "missing"
default_user = "userfoo"
def build(self):
self.output.info("Building %s/%s" % (self.user, self.channel) )
@property
def default_channel(self):
return "channelbar"
"""
self.client.save({"conanfile.py": conanfile})
self.client.run("install .")
self.client.run("build .")
self.assertIn("Building userfoo/channelbar", self.client.out)
class BuildRequireUserChannelTest(unittest.TestCase):
def test(self):
# https://github.com/conan-io/conan/issues/2254
client = TestClient()
conanfile = """
from conans import ConanFile
class SayConan(ConanFile):
def build_requirements(self):
self.output.info("MYUSER: %s" % self.user)
self.output.info("MYCHANNEL: %s" % self.channel)
"""
client.save({"conanfile.py": conanfile})
client.run("install . -e CONAN_USERNAME=myuser -e CONAN_CHANNEL=mychannel")
self.assertIn("MYUSER: myuser", client.out)
self.assertIn("MYCHANNEL: mychannel", client.out)
def test_profile(self):
# https://github.com/conan-io/conan/issues/2254
client = TestClient()
conanfile = """
from conans import ConanFile
class SayConan(ConanFile):
def build_requirements(self):
self.output.info("MYUSER: %s" % self.user)
self.output.info("MYCHANNEL: %s" % self.channel)
"""
myprofile = """[env]
CONAN_USERNAME=myuser
CONAN_CHANNEL=mychannel
"""
client.save({"conanfile.py": conanfile,
"myprofile": myprofile})
client.run("install . -pr=myprofile")
self.assertIn("MYUSER: myuser", client.out)
self.assertIn("MYCHANNEL: mychannel", client.out)
|
{
"content_hash": "30976fa22b720554b2891349d53dbf32",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 97,
"avg_line_length": 34.98351648351648,
"alnum_prop": 0.6210146065651013,
"repo_name": "memsharded/conan",
"id": "a5a5655121cd9e2f6676781f678da3fa72aa7af8",
"size": "6367",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/integration/same_userchannel_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Groovy",
"bytes": "12586"
},
{
"name": "Python",
"bytes": "4334185"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
}
|
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
"""
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
sessionWishlist = ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
sessionWishlist = messages.StringField(5, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
month = ndb.IntegerProperty()
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6)
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10)
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class ConferenceForms(messages.Message):
"""ConferenceForms--multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class Speaker(ndb.Model):
"""Speaker -- Speaker object"""
name = ndb.StringProperty(required=True)
class SpeakerForm(messages.Message):
"""SpeakerForm - Speaker outbound form message"""
speaker = messages.StringField(1)
sessionNames = messages.StringField(2)
class Session(Conference):
"""Session -- Session object -- child of the Conference"""
name = ndb.StringProperty(required=True)
organizerUserId = ndb.StringProperty()
highlights = ndb.StringProperty()
speaker = ndb.KeyProperty(kind='Speaker', required=True)
location = ndb.StringProperty()
duration = ndb.StringProperty()
typeOfSession = ndb.StringProperty()
date = ndb.DateProperty()
startTime = ndb.TimeProperty()
class SessionForm(messages.Message):
"""SessionForm -- Session outbound from message"""
name = messages.StringField(1)
highlights = messages.StringField(2)
speaker = messages.StringField(3)
location = messages.StringField(4)
duration = messages.StringField(5)
typeOfSession = messages.StringField(6)
date = messages.StringField(7)
startTime = messages.StringField(8)
websafeKey = messages.StringField(9)
conferenceName = messages.StringField(10)
organizerUserId = messages.StringField(12)
class SessionForms(messages.Message):
"""SessionForms -- multiple Session outbound from message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms
multiple ConferenceQueryForm inbound form message
"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True)
class SessionQueryForm(messages.Message):
"""SessionQueryForm -- Session query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class SessionQueryForms(messages.Message):
"""SessionQueryForms -- multiple SessionQueryForm inbound from message"""
filters = messages.MessageField(SessionQueryForm, 1, repeated=True)
class SessionGetRequest(messages.Message):
speaker = messages.StringField(1)
|
{
"content_hash": "905608268da20076df95a93c3dd287c1",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 77,
"avg_line_length": 30.88268156424581,
"alnum_prop": 0.7212373371924746,
"repo_name": "YH-Zhou/ConferenceCentral",
"id": "ce38da4c5cbe58126e89c2d10042957b4c26bc1f",
"size": "5551",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "23913"
},
{
"name": "JavaScript",
"bytes": "32837"
},
{
"name": "Python",
"bytes": "47445"
}
],
"symlink_target": ""
}
|
"""DatabaseExtractors query upstream databases and save data on S3 before writing manifests."""
from contextlib import closing
from typing import Dict, List, Optional
import etl.db
from etl.config.dw import DataWarehouseSchema
from etl.extract.extractor import Extractor
from etl.relation import RelationDescription
class DatabaseExtractor(Extractor):
"""
Parent class for database extractors.
This class pulls out parameters and helps with partitioning and sampling.
"""
def __init__(
self,
name: str,
schemas: Dict[str, DataWarehouseSchema],
relations: List[RelationDescription],
max_partitions: int,
use_sampling: bool,
keep_going: bool,
dry_run: bool,
) -> None:
super().__init__(name, schemas, relations, keep_going, needs_to_wait=True, dry_run=dry_run)
self.max_partitions = max_partitions
self.use_sampling = use_sampling
def options_info(self) -> List[str]:
info = super().options_info()
info.append("max-partitions={}".format(self.max_partitions))
info.append("use-sampling={}".format(self.use_sampling))
return info
def use_sampling_with_table(self, size: int) -> bool:
"""Return True iff option `--use-sampling` appeared and table is large enough (> 100MB)."""
return self.use_sampling and (size > 100 * 1024**2)
def select_min_partition_size(self, size: int) -> int:
"""
Return min partition size to stay above when calculating the number of partitions.
Redshift documentation suggests to stay above 1MB for data files. Assuming that the CSV
files can be compressed 1:10 and that sampling will reduce that 1:10, then we have:
* with sampling: 100MB
* w/o sampling: 10MB
"""
if self.use_sampling_with_table(size):
return 100 * 1024**2
return 10 * 1024**2
def maximize_partitions(self, table_size: int) -> int:
"""
Return largest "legal" number of partions for this table.
Determine the maximum number of row-wise partitions a table can be divided into while
respecting a minimum partition size, and a limit on the number of partitions.
The number of partitions will (1) stay below the maximum defined as the default or in the
table design file, (2) stay above the number where the partition size is above the
minimum size, (3) is a multiple of 4. (Rule 1 wins over rule 2.)
>>> extractor = DatabaseExtractor(
... "test", {}, [], 64, use_sampling=False, keep_going=False, dry_run=True
... )
>>> extractor.maximize_partitions(1)
1
>>> extractor.maximize_partitions(10485750)
1
>>> extractor.maximize_partitions(10485760)
1
>>> extractor.maximize_partitions(10485770)
1
>>> extractor.maximize_partitions(20971510)
1
>>> extractor.maximize_partitions(20971520)
2
>>> extractor.maximize_partitions(30971520)
2
>>> extractor.maximize_partitions(41943040)
4
>>> extractor.maximize_partitions(671088630)
60
>>> extractor.maximize_partitions(671088640)
64
>>> extractor.maximize_partitions(671088650)
64
>>> extractor.maximize_partitions(470958407680)
64
>>> extractor.maximize_partitions(0)
1
"""
min_partition_size = self.select_min_partition_size(table_size)
# Find largest value at or below max_partitions which is also a multiple of 4.
# (Using a multiple of 4 here since that's likely the min number of slices.)
partitions = max(range(0, self.max_partitions + 1, 4))
partition_size = table_size / partitions
while partition_size < min_partition_size and partitions > 1:
if partitions > 4:
partitions -= 4
elif partitions == 4:
partitions = 2
else:
partitions = 1
partition_size = table_size / partitions
self.logger.debug(
"Number of partitions: %d (max: %d), partition size: %d (table size: %d, min size: %d)",
partitions,
self.max_partitions,
int(partition_size),
table_size,
min_partition_size,
)
return partitions
def select_statement(
self, relation: RelationDescription, add_sampling_on_column: Optional[str]
) -> str:
"""
Create "SELECT statement with quoted identifiers and base WHERE clause.
Return something like
"SELECT id, name FROM table WHERE TRUE" or
"SELECT id, name FROM table WHERE ((id % 10) = 1)"
where the actual statement uses delimited identifiers.
Note the existence of the WHERE clause which allows appending more conditions.
"""
selected_columns = relation.get_columns_with_casts()
statement = """SELECT {} FROM {}""".format(
", ".join(selected_columns), relation.source_table_name
)
condition = relation.table_design.get("extract_settings", {}).get("condition", "TRUE")
if add_sampling_on_column is None:
statement += """ WHERE ({})""".format(condition)
else:
self.logger.info(
"Adding sampling on column '%s' while extracting '%s.%s'",
add_sampling_on_column,
relation.source_name,
relation.source_table_name.identifier,
)
statement += """ WHERE (({}) AND ("{}" % 10) = 1)""".format(
condition, add_sampling_on_column
)
return statement
def fetch_source_table_size(self, dsn_dict: Dict[str, str], relation: RelationDescription) -> int:
"""
Return size or estimated size of source table for this relation in bytes.
For source tables in a postgres database, fetch the actual size from pg_catalog tables.
Otherwise, pessimistically estimate a large fixed size.
"""
stmt = """
SELECT pg_catalog.pg_table_size(%s) AS "bytes"
, pg_catalog.pg_size_pretty(pg_catalog.pg_table_size(%s)) AS pretty_size
"""
table = relation.source_table_name
subprotocol = dsn_dict["subprotocol"]
if subprotocol.startswith("postgres"):
with closing(etl.db.connection(dsn_dict, readonly=True)) as conn:
rows = etl.db.query(conn, stmt, (str(table), str(table)))
bytes_size, pretty_size = rows[0]["bytes"], rows[0]["pretty_size"]
self.logger.info(
"Size of table '%s.%s': %s (%s)",
relation.source_name,
table.identifier,
bytes_size,
pretty_size,
)
else:
bytes_size, pretty_size = 671088640, "671 Mb"
self.logger.info(
"Pessimistic size estimate for non-postgres table '%s.%s': %s (%s)",
relation.source_name,
table.identifier,
bytes_size,
pretty_size,
)
return bytes_size
|
{
"content_hash": "ebdec7d3761e9302ba57bfe9753935a8",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 102,
"avg_line_length": 38.4,
"alnum_prop": 0.5911458333333334,
"repo_name": "harrystech/arthur-redshift-etl",
"id": "ba2b39de5b69e08d66e9a1d81828598d9484cfd6",
"size": "7296",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "python/etl/extract/database_extractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1894"
},
{
"name": "Dockerfile",
"bytes": "3430"
},
{
"name": "HTML",
"bytes": "1551"
},
{
"name": "JavaScript",
"bytes": "5280"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Python",
"bytes": "578354"
},
{
"name": "Ruby",
"bytes": "82"
},
{
"name": "Shell",
"bytes": "87818"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import tf2
from tensorflow.contrib.solvers.python.ops import lanczos
from tensorflow.contrib.solvers.python.ops import util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test as test_lib
def _add_test(test, test_name, fn):
test_name = "_".join(["test", test_name])
if hasattr(test, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(test, test_name, fn)
class LanczosBidiagTest(test_lib.TestCase):
pass # Filled in below.
def _get_lanczos_tests(dtype_, use_static_shape_, shape_, orthogonalize_,
steps_):
def test_lanczos_bidiag(self):
np.random.seed(1)
a_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
tol = 1e-12 if dtype_ == np.float64 else 1e-5
with self.cached_session() as sess:
if use_static_shape_:
a = constant_op.constant(a_np)
else:
a = array_ops.placeholder(dtype_)
operator = util.create_operator(a)
lbd = lanczos.lanczos_bidiag(
operator, steps_, orthogonalize=orthogonalize_)
# The computed factorization should satisfy the equations
# A * V = U * B
# A' * U[:, :-1] = V * B[:-1, :]'
av = math_ops.matmul(a, lbd.v)
ub = lanczos.bidiag_matmul(lbd.u, lbd.alpha, lbd.beta, adjoint_b=False)
atu = math_ops.matmul(a, lbd.u[:, :-1], adjoint_a=True)
vbt = lanczos.bidiag_matmul(lbd.v, lbd.alpha, lbd.beta, adjoint_b=True)
if use_static_shape_:
av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt])
else:
av_val, ub_val, atu_val, vbt_val = sess.run([av, ub, atu, vbt],
feed_dict={a: a_np})
self.assertAllClose(av_val, ub_val, atol=tol, rtol=tol)
self.assertAllClose(atu_val, vbt_val, atol=tol, rtol=tol)
return [test_lanczos_bidiag]
if __name__ == "__main__":
for dtype in np.float32, np.float64:
for shape in [[4, 4], [7, 4], [5, 8]]:
for orthogonalize in True, False:
for steps in range(1, min(shape) + 1):
# TF2 does not support placeholders so we skip it
for use_static_shape in set([True, tf2.enabled()]):
arg_string = "%s_%s_%s_%s_staticshape_%s" % (
dtype.__name__, "_".join(map(str, shape)), orthogonalize, steps,
use_static_shape)
for test_fn in _get_lanczos_tests(dtype, use_static_shape, shape,
orthogonalize, steps):
name = "_".join(["Lanczos", test_fn.__name__, arg_string])
_add_test(LanczosBidiagTest, name, test_fn)
test_lib.main()
|
{
"content_hash": "61e1f5a1e6b15173db27c58da14729fc",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 80,
"avg_line_length": 37.59493670886076,
"alnum_prop": 0.6080808080808081,
"repo_name": "theflofly/tensorflow",
"id": "f31bdbd399c9de4f2f5d557b75b1ece6d64a765e",
"size": "3660",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/solvers/python/kernel_tests/lanczos_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644154"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59546729"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1507157"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46310564"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481712"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
"""
Display uptime for workers in running Storm topologies.
"""
from __future__ import absolute_import, print_function
from pkg_resources import parse_version
from prettytable import PrettyTable
from .common import add_environment
from ..util import get_ui_json, storm_lib_version
def subparser_hook(subparsers):
""" Hook to add subparser for this command. """
subparser = subparsers.add_parser('worker_uptime',
description=__doc__,
help=main.__doc__)
subparser.set_defaults(func=main)
add_environment(subparser)
def display_worker_uptime(env_name):
topology_summary = '/api/v1/topology/summary'
topology_detail = '/api/v1/topology/{topology}'
component = '/api/v1/topology/{topology}/component/{component}'
topo_summary_json = get_ui_json(env_name, topology_summary)
topology_ids = [x['id'] for x in topo_summary_json['topologies']]
worker_stats = []
for topology in topology_ids:
topology_detail_json = get_ui_json(env_name,
topology_detail.format(topology=topology))
spouts = [x['spoutId'] for x in topology_detail_json['spouts']]
bolts = [x['boltId'] for x in topology_detail_json['bolts']]
for comp in spouts + bolts:
comp_detail = get_ui_json(env_name,
component.format(topology=topology,
component=comp))
worker_stats += [(worker['host'], worker['id'], worker['uptime'],
worker['workerLogLink']) for worker in
comp_detail['executorStats']]
worker_stats = sorted(set(worker_stats))
print("# Worker Stats")
table = PrettyTable(["Host", "Worker ID", "Uptime", "Log URL"])
table.align = 'l'
table.align['Uptime'] = 'r'
for row in worker_stats:
table.add_row(row)
print(table)
print()
def main(args):
""" Display uptime for Storm workers. """
storm_version = storm_lib_version()
if storm_version >= parse_version('0.9.2-incubating'):
display_worker_uptime(args.environment)
else:
print("ERROR: Storm {0} does not support this command."
.format(storm_version))
|
{
"content_hash": "cccd0e8b71fed36591d118c6d4d9527d",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 85,
"avg_line_length": 37.58064516129032,
"alnum_prop": 0.5901287553648069,
"repo_name": "crohling/streamparse",
"id": "3d8f539b4020265eaa01bd428b69b4381fd7293c",
"size": "2330",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "streamparse/cli/worker_uptime.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "36956"
},
{
"name": "Python",
"bytes": "172161"
},
{
"name": "Shell",
"bytes": "1381"
}
],
"symlink_target": ""
}
|
__author__ = 'Shamal Faily'
from . import ObjectCreationParameters
class TemplateObstacleParameters(ObjectCreationParameters.ObjectCreationParameters):
def __init__(self,obsName,obsCat,obsDef,obsConcerns,obsResp,obsProb,obsProbRat):
ObjectCreationParameters.ObjectCreationParameters.__init__(self)
self.theName = obsName
self.theCategory = obsCat
self.theDefinition = obsDef
self.theConcerns = obsConcerns
self.theResponsibilities = obsResp
self.theProbability = obsProb
self.theProbabilityRationale = obsProbRat
def name(self): return self.theName
def category(self): return self.theCategory
def definition(self): return self.theDefinition
def concerns(self): return self.theConcerns
def responsibilities(self): return self.theResponsibilities
def probability(self): return self.theProbability
def probabilityRationale(self): return self.theProbabilityRationale
|
{
"content_hash": "9f601f706a4128b8b19704b755ffba84",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 84,
"avg_line_length": 41.54545454545455,
"alnum_prop": 0.7932166301969366,
"repo_name": "nathanbjenx/cairis",
"id": "dd6ad8c3e8c2351b3464509195afd601d5e88470",
"size": "1712",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cairis/core/TemplateObstacleParameters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588306"
},
{
"name": "Dockerfile",
"bytes": "829"
},
{
"name": "Gherkin",
"bytes": "1615"
},
{
"name": "HTML",
"bytes": "1664076"
},
{
"name": "JavaScript",
"bytes": "416319"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "PLpgSQL",
"bytes": "1494775"
},
{
"name": "Python",
"bytes": "4006311"
},
{
"name": "Shell",
"bytes": "7035"
}
],
"symlink_target": ""
}
|
from bitcoin.main import *
from bitcoin.transaction import *
from bitcoin.bci import *
from bitcoin.deterministic import *
from bitcoin.blocks import *
# Takes privkey, address, value (satoshis), fee (satoshis)
def send(frm, to, value, fee=10000, **kwargs):
return sendmultitx(frm, to + ":" + str(value), fee, **kwargs)
# Takes privkey, "address1:value1, address2:value2" (satoshis), fee (satoshis)
def sendmultitx(frm, *args, **kwargs): # def sendmultitx(frm, tovalues, fee=10000, **kwargs)
tv, fee = args[:-1], int(args[-1])
network = kwargs.get('network', set_network(tv[0].split(':')[0]))
outs = []
outvalue = 0
for a in tv:
outs.append(a)
outvalue += int(a.split(":")[1])
u = unspent(privtoaddr(frm, (111 if network == 'testnet' else 0)), **kwargs)
u2 = select(u, int(outvalue)+int(fee))
argz = u2 + outs + [privtoaddr(frm, (111 if network == 'testnet' else 0)), fee]
tx = mksend(*argz)
tx2 = signall(tx, frm)
return pushtx(tx2, network, **kwargs)
# Takes address, address, value (satoshis), fee(satoshis)
def preparetx(frm, to, value, fee=10000, **kwargs):
"""Composite from fromAddr, toAddr, value & fee"""
tovalues = to + ":" + str(value)
return preparemultitx(frm, tovalues, fee, **kwargs)
# Takes address, address:value, address:value ... (satoshis), fee(satoshis)
def preparemultitx(frm, *args, **kwargs):
tv, fee = args[:-1], int(args[-1])
network = kwargs.get('network', set_network(tv[0].split(':')[0]))
outs = []
outvalue = 0
for a in tv:
outs.append(a)
outvalue += int(a.split(":")[1])
u = unspent(frm, **kwargs)
u2 = select(u, int(outvalue)+int(fee))
argz = u2 + outs + [frm, fee]
return mksend(*argz)
# BIP32 hierarchical deterministic multisig script
def bip32_hdm_script(*args):
if len(args) == 3:
keys, req, path = args
else:
i, keys, path = 0, [], []
while len(args[i]) > 40:
keys.append(args[i])
i += 1
req = int(args[i])
path = map(int, args[i+1:])
pubs = sorted(map(lambda x: bip32_descend(x, path), keys))
return mk_multisig_script(pubs, req)
# BIP32 hierarchical deterministic multisig address
def bip32_hdm_addr(*args):
return scriptaddr(bip32_hdm_script(*args))
# Setup a coinvault transaction
def setup_coinvault_tx(tx, script):
txobj = deserialize(tx)
N = deserialize_script(script)[-2]
for inp in txobj["ins"]:
inp["script"] = serialize_script([None] * (N+1) + [script])
return serialize(txobj)
# Sign a coinvault transaction
def sign_coinvault_tx(tx, priv):
pub = privtopub(priv)
txobj = deserialize(tx)
subscript = deserialize_script(txobj['ins'][0]['script'])
oscript = deserialize_script(subscript[-1])
k, pubs = oscript[0], oscript[1:-2]
for j in range(len(txobj['ins'])):
scr = deserialize_script(txobj['ins'][j]['script'])
for i, p in enumerate(pubs):
if p == pub:
scr[i+1] = multisign(tx, j, subscript[-1], priv)
if len(filter(lambda x: x, scr[1:-1])) >= k:
scr = [None] + filter(lambda x: x, scr[1:-1])[:k] + [scr[-1]]
txobj['ins'][j]['script'] = serialize_script(scr)
return serialize(txobj)
# Inspects a transaction
def inspect(tx, **kwargs):
d = deserialize(tx)
isum = 0
ins = {}
for _in in d['ins']:
h = _in['outpoint']['hash']
i = _in['outpoint']['index']
prevout = deserialize(fetchtx(h, **kwargs))['outs'][i]
isum += prevout['value']
a = script_to_address(prevout['script'])
ins[a] = ins.get(a, 0) + prevout['value']
outs = []
osum = 0
for _out in d['outs']:
outs.append({'address': script_to_address(_out['script']),
'value': _out['value']})
osum += _out['value']
return {
'fee': isum - osum,
'outs': outs,
'ins': ins
}
def merkle_prove(txhash):
blocknum = str(get_block_height(txhash))
header = get_block_header_data(blocknum)
hashes = get_txs_in_block(blocknum)
i = hashes.index(txhash)
return mk_merkle_proof(header, hashes, i)
def tx_size(txobj, unit="bytes"):
"""Get Tx size in bytes"""
if isinstance(txobj, dict):
return tx_size(serialize(txobj))
assert unit in ("bytes", "kilobytes")
if unit=='bytes':
return len(txobj)
elif unit=='kilobytes':
return len(txobj) / 1024.0
def realtime_tx_fee(txobj, priority='medium'):
"""Get realtime Tx Fee (in Satoshis) for txobj"""
assert priority in ('low', 'medium', 'high')
if isinstance(txobj, dict):
return realtime_tx_fee(serialize(txobj), priority)
tx_size_kbytes = tx_size(txobj, unit='kilobytes')
tx_fee_api = get_fee_estimate(priority)
return int(tx_size_kbytes * tx_fee_api)
#def estimate_tx_size(*args):
# """Estimate Tx size in bytes"""
# if not isinstance(txobj, dict):
# txobj = deserialize(txobj)
# ins = txobj.get('ins', [])
# outs = txobj.get('outs', [])
# nins = len(ins) if 'ins' else 1
# nouts = len(outs) if 'outs' else 1
# return (nouts * 34) + (148 * nins) + 10
#
|
{
"content_hash": "fcc2951536252edcb985e3a865ff67a1",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 94,
"avg_line_length": 31.721212121212123,
"alnum_prop": 0.5909438288116163,
"repo_name": "wizardofozzie/pybitcointools",
"id": "b7d7c8e11302a9ce7c4bce17532becd698a5a747",
"size": "5234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitcoin/composite.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "697550"
}
],
"symlink_target": ""
}
|
extensions = ['hieroglyph']
templates_path = ['_templates']
exclude_patterns = ['_build']
source_suffix = '.rst'
master_doc = 'index'
project = u'Slides'
copyright = u'2017, Markus Zoeller'
author = u'Markus Zoeller'
# TODO: Use ``git describe`` here.
version = u'2017.12.28'
release = version # it's always the same
language = "en"
pygments_style = 'sphinx'
todo_include_todos = True
# -- hieroglyph options ------------------------------------------------------
slide_theme = "single-level" # slides, slides2, single-level
|
{
"content_hash": "1a202b18cad4630f003e12dc41130163",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 78,
"avg_line_length": 25.285714285714285,
"alnum_prop": 0.623352165725047,
"repo_name": "markuszoeller/edu",
"id": "649a536f2c2aebd0686d54e0aed60404879acd43",
"size": "632",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "build/slides-conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2686"
},
{
"name": "Python",
"bytes": "7294"
},
{
"name": "Shell",
"bytes": "626"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import json
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import shutil
import socket
import subprocess
import sys
import time
from pyversion import is_python3
if is_python3():
import urllib.parse
import xmlrpc.client
else:
import imp
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.parse = urlparse
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
from git_command import GIT, git_require
from git_refs import R_HEADS, HEAD
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
The -f/--force-broken option can be used to proceed with syncing
other projects if a project sync fails.
The --force-sync option can be used to overwrite existing git
directories if they have previously been linked to a different
object direcotry. WARNING: This may cause data to be lost since
refs may be removed when overwriting.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
SSH Connections
---------------
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
Compatibility
~~~~~~~~~~~~~
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help="continue sync even if a project fails to sync")
p.add_option('--force-sync',
dest='force_sync', action='store_true',
help="overwrite an existing git directory if it needs to "
"point to a different object directory. WARNING: this "
"may cause loss of data")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='no_tags', action='store_true',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from a known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchProjectList(self, opt, projects, *args, **kwargs):
"""Main function of the fetch threads when jobs are > 1.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and not opt.force_broken:
break
def _FetchHelper(self, opt, project, lock, fetched, pm, sem, err_event):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
err_event: We'll set this event in the case of an error (after printing
out info about the error).
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
if not opt.quiet:
print('Fetching project %s' % project.name)
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we call sem.release().
# - We always make sure we unlock the lock if we locked it.
try:
try:
start = time.time()
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync',
file=sys.stderr)
else:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
err_event.set()
except Exception as e:
print('error: Cannot fetch %s (%s: %s)' \
% (project.name, type(e).__name__, str(e)), file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
lock.release()
sem.release()
return success
def _Fetch(self, projects, opt):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching projects', len(projects))
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and not opt.force_broken:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
lock=lock,
fetched=fetched,
pm=pm,
sem=sem,
err_event=err_event)
if self.jobs > 1:
t = _threading.Thread(target = self._FetchProjectList,
kwargs = kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Exited sync due to fetch errors', file=sys.stderr)
sys.exit(1)
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects)
return fetched
def _GCProjects(self, projects):
gitdirs = {}
for project in projects:
gitdirs[project.gitdir] = project.bare_git
has_dash_c = git_require((1, 7, 2))
if multiprocessing and has_dash_c:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for bare_git in gitdirs.values():
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print('\nerror: Exited sync due to gc errors', file=sys.stderr)
sys.exit(1)
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
for path in old_project_paths:
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
if os.path.exists(self.manifest.topdir + '/' + path):
gitdir = os.path.join(self.manifest.topdir, path, '.git')
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = gitdir,
objdir = gitdir,
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty():
print('error: Cannot remove project "%s": uncommitted changes '
'are present' % project.relpath, file=sys.stderr)
print(' commit changes, then run sync again',
file=sys.stderr)
return -1
else:
print('Deleting obsolete path %s' % project.worktree,
file=sys.stderr)
shutil.rmtree(project.worktree)
# Try deleting parent subdirs if they are empty
project_dir = os.path.dirname(project.worktree)
while project_dir != self.manifest.topdir:
try:
os.rmdir(project_dir)
except OSError:
break
project_dir = os.path.dirname(project_dir)
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) / 3)
if opt.network_only and opt.detach_head:
print('error: cannot combine -n and -d', file=sys.stderr)
sys.exit(1)
if opt.network_only and opt.local_only:
print('error: cannot combine -n and -l', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_sync:
print('error: cannot combine -m and -s', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_tag:
print('error: cannot combine -m and -t', file=sys.stderr)
sys.exit(1)
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
print('error: -u and -p may only be combined with -s or -t',
file=sys.stderr)
sys.exit(1)
if None in [opt.manifest_server_username, opt.manifest_server_password]:
print('error: both -u and -p must be given', file=sys.stderr)
sys.exit(1)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
smart_sync_manifest_name = "smart_sync_override.xml"
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, smart_sync_manifest_name)
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
print('.netrc file does not exist or could not be opened',
file=sys.stderr)
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
username, _account, password = \
info.authenticators(parse_result.hostname)
except TypeError:
# TypeError is raised when the given hostname is not present
# in the .netrc file.
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
try:
server = xmlrpc.client.Server(manifest_server)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if 'SYNC_TARGET' in env:
target = env['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env:
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = smart_sync_manifest_name
try:
f = open(smart_sync_manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError as e:
print('error: cannot write manifest to %s:\n%s'
% (smart_sync_manifest_path, e),
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
else: # Not smart sync or smart tag mode
if os.path.isfile(smart_sync_manifest_path):
try:
os.remove(smart_sync_manifest_path)
except OSError as e:
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
no_tags=opt.no_tags,
optimized_fetch=opt.optimized_fetch)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
mp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
self._ReloadManifest(manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt)
_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt))
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList():
sys.exit(1)
syncbuf = SyncBuffer(mp.config,
detach_head = opt.detach_head)
pm = Progress('Syncing work tree', len(all_projects))
for project in all_projects:
pm.update()
if project.worktree:
project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
pm.end()
print(file=sys.stderr)
if not syncbuf.Finish():
sys.exit(1)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path)
try:
self._times = json.load(f)
finally:
f.close()
except (IOError, ValueError):
try:
os.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'w')
try:
json.dump(self._times, f, indent=2)
finally:
f.close()
except (IOError, TypeError):
try:
os.remove(self._path)
except OSError:
pass
|
{
"content_hash": "5d8c72176c51afe026225a328464da7b",
"timestamp": "",
"source": "github",
"line_count": 837,
"max_line_length": 93,
"avg_line_length": 34.432497013142175,
"alnum_prop": 0.6080152671755725,
"repo_name": "4455jkjh/repo",
"id": "43d450be5b8b6a739fff3b70c0f866cb12967b73",
"size": "29423",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "subcmds/sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "362956"
},
{
"name": "Shell",
"bytes": "6043"
}
],
"symlink_target": ""
}
|
from django import template
from django.template.loader import render_to_string
register = template.Library()
@register.tag
def pagemenu(parser, token):
"""
Output pagemenu.
"""
try:
tag_name, pagemenu = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('pagemenu tag requires 1 argument (pagemenu), %s given' % (len(token.split_contents()) - 1))
return PageMenuNode(pagemenu)
class PageMenuNode(template.Node):
def __init__(self, pagemenu):
self.pagemenu = template.Variable(pagemenu)
def render(self, context):
pagemenu = self.pagemenu.resolve(context)
context = {
'request': context['request'],
'pagemenu': pagemenu,
}
return render_to_string('pagemenu/inclusion_tags/pagemenu.html', context)
|
{
"content_hash": "08a3a4a6e354c2d8754f6e5ed7dc4808",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 135,
"avg_line_length": 31.22222222222222,
"alnum_prop": 0.6571767497034401,
"repo_name": "praekelt/django-pagemenu",
"id": "604ac3fb828273a8661e66ce5d14c1405cbff0a7",
"size": "843",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pagemenu/templatetags/pagemenu_inclusion_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10681"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
# Copyright 2008-2013 Alex Zaddach (mrzmanwiki@gmail.com), bjweeks
# This file is part of wikitools.
# wikitools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# wikitools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with wikitools. If not, see <http://www.gnu.org/licenses/>.
import wiki
import page
import api
import socket
import re
class User:
"""A user on the wiki"""
def __init__(self, site, name, check=True):
"""
wiki - A wiki object
name - The username, as a string
check - Checks for existence, normalizes name
"""
self.site = site
self.name = name.strip()
if not isinstance(self.name, unicode):
self.name = unicode(self.name, 'utf8')
self.exists = True # If we're not going to check, assume it does
self.blocked = None # So we can tell the difference between blocked/not blocked/haven't checked
self.editcount = -1
self.groups = []
self.id = 0
if check:
self.setUserInfo()
self.isIP = False
self.IPcheck()
self.page = page.Page(self.site, ':'.join([self.site.namespaces[2]['*'], self.name]), check=check, followRedir=False)
def IPcheck(self):
try: #IPv4 check
s = socket.inet_aton(self.name.replace(' ', '_'))
if socket.inet_ntoa(s) == self.name:
self.isIP = True
self.exists = False
return
except:
pass
try:
s = socket.inet_pton(socket.AF_INET6, self.name.replace(' ', '_'))
if self.IPnorm(socket.inet_ntop(socket.AF_INET6, s)) == self.IPnorm(self.name):
self.isIP = True
self.exists = False
self.name = self.IPnorm(self.name)
return
except:
pass
def IPnorm(self, ip):
"""This is basically a port of MediaWiki's IP::sanitizeIP but assuming no CIDR ranges"""
ip = ip.upper()
# Expand zero abbreviations
abbrevPos = ip.find('::')
if abbrevPos != -1:
addressEnd = len(ip) - 1
# If the '::' is at the beginning...
if abbrevPos == 0:
repeat = '0:'
extra = '0' if ip == '::' else ''
pad = 9
elif abbrevPos == addressEnd - 1:
repeat = ':0'
extra = ''
pad = 9
else:
repeat = ':0'
extra = ':'
pad = 8
ip = ip.replace( '::', repeat*(pad-ip.count(':'))+extra)
# Remove leading zereos from each bloc as needed
ip = re.sub('/(^|:)0+(([0-9A-Fa-f]{1,4}))/', '\1\2', ip)
return ip;
def setUserInfo(self):
"""Sets basic user info"""
params = {
'action': 'query',
'list': 'users',
'ususers':self.name,
'usprop':'blockinfo|groups|editcount'
}
req = api.APIRequest(self.site, params)
response = req.query()
user = response['query']['users'][0]
self.name = user['name']
if 'missing' in user or 'invalid' in user:
self.exists = False
return
self.id = int(user['userid'])
self.editcount = int(user['editcount'])
if 'groups' in user:
self.groups = user['groups']
if 'blockedby' in user:
self.blocked = True
else:
self.blocked = False
return self
def getTalkPage(self, check=True, followRedir=False):
"""Convenience function to get an object for the user's talk page"""
return page.Page(self.site, ':'.join([self.site.namespaces[3]['*'], self.name]), check=check, followRedir=False)
def isBlocked(self, force=False):
"""Determine if a user is blocked"""
if self.blocked is not None and not force:
return self.blocked
params = {'action':'query',
'list':'blocks',
'bkusers':self.name,
'bkprop':'id'
}
req = api.APIRequest(self.site, params)
res = req.query(False)
if len(res['query']['blocks']) > 0:
self.blocked = True
else:
self.blocked = False
return self.blocked
def block(self, reason=False, expiry=False, anononly=False, nocreate=False, autoblock=False, noemail=False, hidename=False, allowusertalk=False, reblock=False):
"""Block the user
Params are the same as the API
reason - block reason
expiry - block expiration
anononly - block anonymous users only
nocreate - disable account creation
autoblock - block IP addresses used by the user
noemail - block user from sending email through the site
hidename - hide the username from the log (requires hideuser right)
allowusertalk - allow the user to edit their talk page
reblock - overwrite existing block
"""
token = self.site.getToken('csrf')
params = {'action':'block',
'user':self.name,
'token':token
}
if reason:
params['reason'] = reason
if expiry:
params['expiry'] = expiry
if anononly:
params['anononly'] = ''
if nocreate:
params['nocreate'] = ''
if autoblock:
params['autoblock'] = ''
if noemail:
params['noemail'] = ''
if hidename:
params['hidename'] = ''
if allowusertalk:
params['allowusertalk'] = ''
if reblock:
params['reblock'] = ''
req = api.APIRequest(self.site, params, write=False)
res = req.query()
if 'block' in res:
self.blocked = True
return res
def unblock(self, reason=False):
"""Unblock the user
reason - reason for the log
"""
token = self.site.getToken('csrf')
params = {
'action': 'unblock',
'user': self.name,
'token': token
}
if reason:
params['reason'] = reason
req = api.APIRequest(self.site, params, write=False)
res = req.query()
if 'unblock' in res:
self.blocked = False
return res
def __hash__(self):
return int(self.name) ^ hash(self.site.apibase)
def __eq__(self, other):
if not isinstance(other, User):
return False
if self.name == other.name and self.site == other.site:
return True
return False
def __ne__(self, other):
if not isinstance(other, User):
return True
if self.name == other.name and self.site == other.site:
return False
return True
def __str__(self):
return self.__class__.__name__ + ' ' + repr(self.name) + " on " + repr(self.site.domain)
def __repr__(self):
return "<"+self.__module__+'.'+self.__class__.__name__+" "+repr(self.name)+" on "+repr(self.site.apibase)+">"
|
{
"content_hash": "23557f5375965e604cbcfd838e3191bb",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 161,
"avg_line_length": 29.08108108108108,
"alnum_prop": 0.6356877323420075,
"repo_name": "rrallo/GenomeBrowser",
"id": "85ebbe69c1d57d8005f03b2205eff240d018a20d",
"size": "6458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GenomeBrowser/wikitools/user.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Objective-C",
"bytes": "15494"
},
{
"name": "Python",
"bytes": "79362"
}
],
"symlink_target": ""
}
|
import logging
import time
import zlib
import sys
import socket
import os
from ..packages import six
from ..packages import requests
from .. import version as agent_version
from .addresses import platform_url, proxy_details
from .exceptions import (NetworkInterfaceException, ForceAgentRestart,
ForceAgentDisconnect, DiscardDataForRequest, RetryDataForRequest,
ServerIsUnavailable)
from ..common.encoding_utils import json_encode, json_decode
_logger = logging.getLogger(__name__)
# User agent string that must be used in all requests. The data collector
# does not rely on this, but is used to target specific agents if there
# is a problem with data collector handling requests.
USER_AGENT = 'NewRelic-PythonAgent/%s (Python %s %s)' % (
agent_version, sys.version.split()[0], sys.platform)
# Platform agent collector interface.
class PlatformInterface(object):
def __init__(self, license_key, host='platform-api.newrelic.com',
port=None, ssl=True, timeout=30.0, proxy_host=None,
proxy_port=None, proxy_user=None, proxy_pass=None):
self.license_key = license_key
self.host = host
self.port = port
self.ssl = ssl
self.timeout = timeout
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
def send_request(self, url, proxies, session, payload=()):
"""Constructs and sends a request to the data collector."""
headers = {}
config = {}
start = time.time()
# Validate that the license key was actually set and if not replace
# it with a string which makes it more obvious it was not set.
license_key = self.license_key
if not self.license_key:
license_key = 'NO LICENSE KEY WAS SET IN AGENT CONFIGURATION'
headers['User-Agent'] = USER_AGENT
headers['Content-Encoding'] = 'identity'
headers['X-License-Key'] = license_key
# At this time we use JSON content encoding for the data being
# sent. If an error does occur when encoding the JSON, then it
# isn't likely going to work later on in a subsequent request
# with same data, even if aggregated with other data, so we need
# to log the details and then flag that data should be thrown
# away. Don't mind being noisy in the the log in this situation
# as it would indicate a problem with the implementation of the
# agent.
try:
data = json_encode(payload)
except Exception as exc:
_logger.error('Error encoding data for JSON payload '
'with payload of %r. Exception which occurred was %r. '
'Please report this problem to New Relic support.',
payload, exc)
raise DiscardDataForRequest(str(exc))
# Log details of call and/or payload for debugging. Use the JSON
# encoded value so know that what is encoded is correct.
_logger.debug('Calling data collector to report custom metrics '
'with payload=%r.', data)
# Compress the serialized JSON being sent as content if over 64KiB
# in size. If less than 2MB in size compress for speed. If over
# 2MB then compress for smallest size. This parallels what the Ruby
# agent does.
if len(data) > 64*1024:
headers['Content-Encoding'] = 'deflate'
level = (len(data) < 2000000) and 1 or 9
data = zlib.compress(six.b(data), level)
# If there is no requests session object provided for making
# requests create one now. We want to close this as soon as we
# are done with it.
auto_close_session = False
if not session:
session = requests.session()
auto_close_session = True
# The 'requests' library can raise a number of exception derived
# from 'RequestException' before we even manage to get a connection
# to the data collector. The data collector can the generate a
# number of different types of HTTP errors for requests.
try:
r = session.post(url, headers=headers, proxies=proxies,
timeout=self.timeout, data=data)
# Read the content now so we can force close the socket
# connection if this is a transient session as quickly
# as possible.
content = r.content
except requests.RequestException as exc:
if not self.proxy_host or not self.proxy_port:
_logger.warning('Data collector is not contactable. This can '
'be because of a network issue or because of the data '
'collector being restarted. In the event that contact '
'cannot be made after a period of time then please '
'report this problem to New Relic support for further '
'investigation. The error raised was %r.', exc)
else:
_logger.warning('Data collector is not contactable via the '
'proxy host %r on port %r with proxy user of %r. This '
'can be because of a network issue or because of the '
'data collector being restarted. In the event that '
'contact cannot be made after a period of time then '
'please report this problem to New Relic support for '
'further investigation. The error raised was %r.',
self.proxy_host, self.proxy_port, self.proxy_user, exc)
raise RetryDataForRequest(str(exc))
finally:
if auto_close_session:
session.close()
session = None
if r.status_code != 200:
_logger.debug('Received a non 200 HTTP response from the data '
'collector where url=%r, license_key=%r, headers=%r, '
'status_code=%r and content=%r.', url, license_key,
headers, r.status_code, content)
if r.status_code == 400:
if headers['Content-Encoding'] == 'deflate':
data = zlib.decompress(data)
_logger.error('Data collector is indicating that a bad '
'request has been submitted for url %r, headers of %r '
'and payload of %r with response of %r. Please report '
'this problem to New Relic support.', url, headers, data,
content)
raise DiscardDataForRequest()
elif r.status_code == 403:
_logger.error('Data collector is indicating that the license '
'key %r is not valid.', license_key)
raise DiscardDataForRequest()
elif r.status_code == 413:
_logger.warning('Data collector is indicating that a request '
'was received where the request content size '
'was over the maximum allowed size limit. The length of '
'the request content was %d. If this keeps occurring on a '
'regular basis, please report this problem to New Relic '
'support for further investigation.', len(data))
raise DiscardDataForRequest()
elif r.status_code in (503, 504):
_logger.warning('Data collector is unavailable. This can be a '
'transient issue because of the data collector or our '
'core application being restarted. If the issue persists '
'it can also be indicative of a problem with our servers. '
'In the event that availability of our servers is not '
'restored after a period of time then please report this '
'problem to New Relic support for further investigation.')
raise ServerIsUnavailable()
elif r.status_code != 200:
if not self.proxy_host or not self.proxy_port:
_logger.warning('An unexpected HTTP response was received '
'from the data collector of %r. The payload for '
'the request was %r. If this issue persists then '
'please report this problem to New Relic support '
'for further investigation.', r.status_code, payload)
else:
_logger.warning('An unexpected HTTP response was received '
'from the data collector of %r while connecting '
'via proxy host %r on port %r with proxy user of %r. '
'The payload for the request was %r. If this issue '
'persists then please report this problem to New '
'Relic support for further investigation.',
r.status_code, self.proxy_host, self.proxy_port,
self.proxy_user, payload)
raise DiscardDataForRequest()
# Log details of response payload for debugging. Use the JSON
# encoded value so know that what original encoded value was.
duration = time.time() - start
_logger.debug('Valid response from data collector after %.2f '
'seconds with content=%r.', duration, content)
# If we got this far we should have a legitimate response from the
# data collector. The response is JSON so need to decode it.
# Everything will come back as Unicode.
try:
if six.PY3:
content = content.decode('UTF-8')
result = json_decode(content)
except Exception as exc:
_logger.error('Error decoding data for JSON payload '
'with payload of %r. Exception which occurred was %r. '
'Please report this problem to New Relic support.',
content, exc)
raise DiscardDataForRequest(str(exc))
# The decoded JSON can be either for a successful response or an
# error. A successful response has a 'return_value' element and an
# error an 'exception' element.
if 'status' in result:
return result['status']
error_message = result['error']
# Now need to check for server side exceptions. The following
# exceptions can occur for abnormal events.
_logger.debug('Received an exception from the data collector where '
'url=%r, license_key=%r, headers=%r and error_message=%r. ',
url, license_key, headers, error_message)
raise DiscardDataForRequest(error_message)
def create_session(self):
url = platform_url(self.host, self.port, self.ssl)
proxies = proxy_details(None, self.proxy_host, self.proxy_port,
self.proxy_user, self.proxy_pass)
return PlatformSession(self, url, proxies)
class PlatformSession(object):
def __init__(self, interface, platform_url, http_proxies):
self._interface = interface
self._platform_url = platform_url
self._http_proxies = http_proxies
self._requests_session_object = None
@property
def _requests_session(self):
if self._requests_session_object is None:
self._requests_session_object = requests.session()
return self._requests_session_object
def close_connection(self):
if self._requests_session_object:
self._requests_session_object.close()
self._requests_session_object = None
def send_metric_data(self, name, guid, version, duration, metrics):
agent = {}
agent['host'] = socket.gethostname()
agent['pid'] = os.getpid()
agent['version'] = version or '0.0.0.'
component = {}
component['name'] = name
component['guid'] = guid
component['duration'] = duration
component['metrics'] = metrics
payload = {}
payload['agent'] = agent
payload['components'] = [component]
return self._interface.send_request(self._platform_url,
self._http_proxies, self._requests_session, payload)
|
{
"content_hash": "199f93bae4f48acc03949655d2ac7b05",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 79,
"avg_line_length": 40.16828478964401,
"alnum_prop": 0.5939413470834676,
"repo_name": "Arable/evepod",
"id": "09bf115e4e6ed27810c3e44c16939ce299522f1f",
"size": "12412",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/newrelic-2.12.0.10/newrelic/network/platform_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6111"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Perl",
"bytes": "84"
},
{
"name": "Python",
"bytes": "6111061"
},
{
"name": "Shell",
"bytes": "4078"
}
],
"symlink_target": ""
}
|
"""
PaaSTA service list (instances) etc.
"""
from pyramid.view import view_config
from paasta_tools import __version__
@view_config(route_name="version", request_method="GET", renderer="json")
def version(request):
return __version__
|
{
"content_hash": "0c03b219dadcf954e41e9949509821bb",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 21.90909090909091,
"alnum_prop": 0.7136929460580913,
"repo_name": "Yelp/paasta",
"id": "2fffa66c3264b7e74a7277cd73749d1b1592af43",
"size": "841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paasta_tools/api/views/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "19456"
},
{
"name": "Gherkin",
"bytes": "4399"
},
{
"name": "Makefile",
"bytes": "12710"
},
{
"name": "Python",
"bytes": "4745271"
},
{
"name": "Shell",
"bytes": "98025"
}
],
"symlink_target": ""
}
|
import subprocess
import argparse
import json
import pickle
import ast
import datetime
import os
from pprint import pprint
import requests
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
def unix_time_millis(dt):
return unix_time(dt) * 1000.0
def epoch():
return unix_time(datetime.datetime.utcnow())
def get_homedir():
p = subprocess.Popen('echo $HOME', shell=True, stdout=subprocess.PIPE)
out = p.stdout.read().strip()
return out
HOMEDIR = get_homedir()
def get_instances():
if manage_cache('get', 'instances') is not None:
print 'Getting instances from cache'
out = manage_cache('get', 'instances');
else:
print 'Getting instances from CLI'
instancelist = subprocess.Popen('aws ec2 describe-instances', shell=True, stdout=subprocess.PIPE)
out = instancelist.stdout.read().strip()#.replace('\n', '')
out = json.loads(out)
manage_cache('set', 'instances', json.dumps(out))
return out
def manage_cache(action='valid', field=None, jsondata=None):
if field is not None:
cachefile = '%s/.aws-%s' % (HOMEDIR, field)
if action == 'set' and jsondata is not None:
data = {
'data': json.loads(jsondata),
'date': epoch()
}
print 'Writing to cache file %s' % cachefile
with open(cachefile, 'wb') as f:
pickle.dump(data, f)
return data
elif action == 'get':
if os.path.isfile(cachefile):
with open(cachefile, 'rb') as f:
data = pickle.load(f)
if manage_cache('valid', field, json.dumps(data)):
return data['data']
else:
return None
else:
return None
elif action == 'valid' and jsondata is not None:
data = json.loads(jsondata)
if data['date'] < epoch() - (2 * 60 * 60):
#print 'The data is invalid'
return False
else:
#print 'The data is still valid'
return True
def controller():
parser = argparse.ArgumentParser(description='Send AWS EC2 instances description to GAE',
prog='send-aws-ec2.py',
version='%prog 0.5'
)
parser.add_argument('-a', '--app', action='store', dest='app', help='THe Google App Engine app name', default='tom-schneider')
parser.add_argument('-u', '--url', action='store', dest='app_url', help='The URL regex for the GAE app', default='https://<app>.appspot.com/events')
args = parser.parse_args()
print args
instances = get_instances()
#pprint(instances)
event = {
'subscriber': args.app_url.replace('<app>', args.app),
'type': 'aws-instance',
'data': {
'PublicDnsName': instances['Reservations'][0]['Instances'][0]['PublicDnsName'],
'State': instances['Reservations'][0]['Instances'][0]['State'],
'InstanceId': instances['Reservations'][0]['Instances'][0]['InstanceId'],
},
}
print event
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(event['subscriber'], data=json.dumps(event), headers=headers)
print(r.status_code, r.reason)
pprint(json.loads(r.content))
def main():
controller()
#This idiom means the below code only runs when executed from command line
if __name__ == '__main__':
main()
|
{
"content_hash": "9abdd47e07e9a2f93549a4582fe8043e",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 152,
"avg_line_length": 31.11320754716981,
"alnum_prop": 0.6303820497271073,
"repo_name": "ThomasMarcel/tom-schneider-flask",
"id": "22c16df2343104b8eb7fa8d20b48e877f44596a4",
"size": "3321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/send-aws-ec2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50819"
},
{
"name": "HTML",
"bytes": "19610"
},
{
"name": "JavaScript",
"bytes": "2749"
},
{
"name": "Python",
"bytes": "77915"
},
{
"name": "Shell",
"bytes": "4402"
}
],
"symlink_target": ""
}
|
"""
lychee upload v1.3
(C) 2014-2015 Roman Sirokov
Imports images from a location on hard drive to the Lychee installation on a remote server via SSH.
Based on lycheesync by Gustave Paté
https://github.com/GustavePate/lycheesync
"""
import argparse
import os
import sys
import re
import logging
import sources.directory as directory
import upload
from conf import *
logging.basicConfig(level=logging.WARNING, format='%(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
def main():
try:
if conf.source == "directory":
albums = directory.get_photos()
elif conf.source == "iPhoto" or conf.source == "Aperture":
albums = iphoto.get_photos()
u = upload.Upload()
u.upload(albums)
except Exception as e:
logger.error(e, exc_info=True)
sys.exit(1)
def parse_arguments():
"""
Specify and parse command line arguments common to all the platforms
"""
parser = argparse.ArgumentParser(description=("Upload all the photos in the local photo directory and its "
"sub-directories to Lychee. Directories are converted to albums."))
parser.add_argument('server', metavar='username@hostname:path', type=str, nargs=1,
help='Server connection string with a full path to the directory where Lychee is installed.')
source_group = parser.add_mutually_exclusive_group()
source_group.add_argument('-d', '--dir', help="path to the photo directory where to export photos from.", type=str)
parser.add_argument('-r', '--replace', help="replace albums in Lychee with local ones", action='store_true')
parser.add_argument('-P', '--port', help="alternative SSH port", type=int)
parser.add_argument('-p', '--public', help="make uploaded photos public", action='store_true')
parser.add_argument('-v', '--verbose', help='print verbose messages', action='store_true')
parser.add_argument('-q', '--quality', help='JPEG quality 0-99 for resized pictures', type=int)
parser.add_argument('--medium', help='Maximum size for medium sized pictures. 1920px by default', type=int)
parser.add_argument('--big', help='Maximum size for big sized pictures. By default pictures are untouched ',
type=int)
parser.add_argument('--originals',
help='Upload original untouched files. To be used with the --big option, otherwise ignored.',
action='store_true')
if conf.osx:
add_mac_arguments(parser, source_group)
args = parser.parse_args()
conf.replace = args.replace
conf.public = args.public
if args.verbose:
conf.verbose = logging.DEBUG
if args.server:
if not parse_server_string(args.server[0]):
logger.error("Server string must be in the username@hostname:path format")
return False
if args.dir:
if not os.path.exists(args.dir):
logger.error("Photo directory does not exist:" + args.dir)
return False
conf.dir = args.dir
conf.source = "directory"
elif not conf.osx:
logger.error("Please specify a directory to export photos from")
return False
if args.port:
conf.port = args.port
else:
conf.port = 22
if args.quality:
conf.quality = args.quality
else:
conf.quality = 70
if args.medium:
conf.medium_size = args.medium
else:
conf.medium_size = 1920
if args.big:
conf.big_size = args.big
if args.originals and args.big:
conf.upload_originals = True
if conf.osx:
if not parse_mac_arguments(args):
return False
return True
def parse_server_string(server_string):
"""
Parse a server string and store values in the global configuration
:param server_string: Server string in the form of user@host:path
:return: True if successful, False if parsing fails
"""
match = re.match("(.+)@([\w\d\-\.]+):(.+)", server_string)
if match:
conf.username = match.group(1)
conf.server = match.group(2)
conf.path = match.group(3)
return True
else:
return False
def parse_mac_arguments(args):
"""
Parse command line arguments specific to OSX (iPhoto / Aperture stuff)
:param args:
:return:
"""
conf.originals = args.originals
library_dir = None
if args.iphoto:
conf.source = "iPhoto"
library_dir = args.iphoto
library_file = "AlbumData.xml"
library_suffix = ".photolibrary" # iPhoto library directory extension
if args.aperture:
conf.source = "Aperture"
library_dir = args.aperture
library_file = "ApertureData.xml"
library_suffix = ".aplibrary" # Aperture library directory extension
if library_dir: # Check that the library exists
library_dir = os.path.expanduser(library_dir)
if not os.path.exists(library_dir):
# Aperture / iPhoto Library directory has an extension, which is hidden in Finder. So we check for that
# as well
if library_dir.endswith("/"):
library_dir = library_dir[:-1]
library_dir += library_suffix
if not os.path.exists(library_dir):
# Remove .photolibrary prefix for the error message
logger.error("{} library is not found in {}".format(conf.source, library_dir[:-13]))
return False
conf.xmlsource = os.path.join(library_dir, library_file)
#Check one more time that the actual library file exist
if not os.path.exists(conf.xmlsource):
logger.error("{} library file does not exist: {}".format(conf.source, conf.xmlsource))
if args.events: conf.events = args.events
if args.albums: conf.albums = args.albums
if args.smarts: conf.smarts = args.smarts
if args.exclude:
conf.exclude = args.exclude
else:
conf.exclude = ""
return True
def add_mac_arguments(parser, group):
"""
Add command line arguments specific to OSX
:param parser: the command line parser object
:param group: the mutually exclusive photos source group (directory, iphoto, aperture)
:return:
"""
group.add_argument('--iphoto', metavar="path",
help='Import from iPhoto. If path is not provided, then default location is used.',
nargs="?", const=conf.IPHOTO_DEFAULT_PATH)
group.add_argument('--aperture', metavar="path",
help='Import from Aperture. If path is not provided, then default location is used.',
nargs='?', const=conf.APERTURE_DEFAULT_PATH, type=str, action="store")
parser.add_argument('-e', '--events', const=".", type=str, nargs="?", metavar="pattern",
help="Export matching events. The argument is a regular expression. "
"If the argument is omitted, then all events are exported.")
parser.add_argument('-a', '--albums', const=".", type=str, nargs="?", metavar="pattern",
help="Export matching regular albums. The argument is a regular expression. "
"If the argument is omitted, then all events are exported.")
parser.add_argument('-s', '--smarts', const=".", type=str, nargs="?", metavar="pattern",
help="Export matching smart albums. The argument is a regular expression. "
"If the argument is omitted, then all events are exported.")
parser.add_argument('-x', '--exclude', metavar="pattern", type=str,
help="Don't export matching albums or events. The pattern is a regular expression.")
if __name__ == '__main__':
conf.osx = (sys.platform == 'darwin')
if conf.osx:
import sources.iphoto as iphoto
if parse_arguments():
main()
else:
sys.exit(1)
|
{
"content_hash": "0b47c70fd94ae715e90f145aa51a8fee",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 119,
"avg_line_length": 34.84848484848485,
"alnum_prop": 0.617888198757764,
"repo_name": "r0x0r/lycheeupload",
"id": "ba3a28031595e94725c5feb4e68f4ed658600de7",
"size": "8089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lycheeupload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "76671"
}
],
"symlink_target": ""
}
|
import pytest
from blitzdb import Document
from ..conftest import test_mongo
if test_mongo:
def test_non_existing_key(mongodb_backend):
attributes = {'foo': 'bar', 'baz': 1243, 'd': {1: 3, 4: 5}, 'l': [1, 2, 3, 4]}
doc = Document(attributes)
mongodb_backend.save(doc)
mongodb_backend.commit()
mongodb_backend.update(doc,['non_existing_key'])
mongodb_backend.commit()
assert mongodb_backend.get(Document,{'pk' : doc.pk}) == doc
def test_basics(mongodb_backend):
attributes = {'foo': 'bar', 'baz': 1243, 'd': {1: 3, 4: 5}, 'l': [1, 2, 3, 4]}
doc = Document(attributes)
mongodb_backend.save(doc)
mongodb_backend.commit()
doc.foobar = 'baz'
mongodb_backend.update(doc,['foobar'])
mongodb_backend.commit()
assert mongodb_backend.get(Document,{'foobar' : 'baz'}) == doc
def test_update_non_existing_document(mongodb_backend):
attributes = {'foo': 'bar', 'baz': 1243, 'd': {1: 3, 4: 5}, 'l': [1, 2, 3, 4]}
doc = Document(attributes)
doc.foobar = 'baz'
with pytest.raises(Document.DoesNotExist):
mongodb_backend.update(doc,['foobar'])
mongodb_backend.commit()
with pytest.raises(Document.DoesNotExist):
assert mongodb_backend.get(Document,{'foobar' : 'baz'})
def test_deep_update(mongodb_backend):
attributes = {'foo': {'bar' : 'baz'}, 'baz': 1243, 'd': {1: 3, 4: 5}, 'l': [1, 2, 3, 4]}
doc = Document(attributes)
mongodb_backend.save(doc)
mongodb_backend.commit()
mongodb_backend.update(doc,{'foo.bar' : 'bam'})
mongodb_backend.commit()
assert mongodb_backend.get(Document,{'foo.bar' : 'bam'}) == doc
doc.foo['bar'] = 'squirrel'
#we update using a list rather than a dict
mongodb_backend.update(doc,['foo.bar'])
mongodb_backend.commit()
assert mongodb_backend.get(Document,{'foo.bar' : 'squirrel'}) == doc
|
{
"content_hash": "6d3ea27f76fdf8ad694dd90f43979d7c",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 96,
"avg_line_length": 30.313432835820894,
"alnum_prop": 0.5775480059084195,
"repo_name": "adewes/blitzdb",
"id": "cd80294e04658b5a0b1b3fda98b36708087a56c4",
"size": "2031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/mongo/test_update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "313886"
}
],
"symlink_target": ""
}
|
import numpy as np
from .base import HomogFamilyAlignment
from .affine import DiscreteAffine
from .similarity import Similarity
class Translation(DiscreteAffine, Similarity):
r"""
An ``n_dims``-dimensional translation transform.
Parameters
----------
translation : ``(n_dims,)`` `ndarray`
The translation in each axis.
skip_checks : `bool`, optional
If ``True`` avoid sanity checks on ``h_matrix`` for performance.
"""
def __init__(self, translation, skip_checks=False):
translation = np.asarray(translation)
h_matrix = np.eye(translation.shape[0] + 1)
h_matrix[:-1, -1] = translation
Similarity.__init__(self, h_matrix, copy=False,
skip_checks=skip_checks)
@classmethod
def init_identity(cls, n_dims):
r"""
Creates an identity transform.
Parameters
----------
n_dims : `int`
The number of dimensions.
Returns
-------
identity : :class:`Translation`
The identity matrix transform.
"""
return Translation(np.zeros(n_dims))
def _transform_str(self):
message = 'Translation by {}'.format(self.translation_component)
return message
@property
def n_parameters(self):
r"""
The number of parameters: ``n_dims``
:type: `int`
"""
return self.n_dims
def _as_vector(self):
r"""
Return the parameters of the transform as a 1D array. These parameters
are parametrised as deltas from the identity warp. The parameters
are output in the order ``[t0, t1, ...]``.
+-----------+--------------------------------------------+
|parameter | definition |
+==========+=============================================+
|t0 | The translation in the first axis |
|t1 | The translation in the second axis |
|... | ... |
|tn | The translation in the nth axis |
+----------+---------------------------------------------+
Returns
-------
ts : ``(n_dims,)`` `ndarray`
The translation in each axis.
"""
return self.h_matrix[:-1, -1]
def from_vector_inplace(self, p):
r"""
Updates the :class:`Translation` inplace.
Parameters
----------
vector : ``(n_dims,)`` `ndarray`
The array of parameters.
"""
self.h_matrix[:-1, -1] = p
def pseudoinverse(self):
r"""
The inverse translation (negated).
:type: :class:`Translation`
"""
return Translation(-self.translation_component, skip_checks=True)
class AlignmentTranslation(HomogFamilyAlignment, Translation):
r"""
Constructs a :class:`Translation` by finding the optimal translation
transform to align `source` to `target`.
Parameters
----------
source : :map:`PointCloud`
The source pointcloud instance used in the alignment
target : :map:`PointCloud`
The target pointcloud instance used in the alignment
"""
def __init__(self, source, target):
HomogFamilyAlignment.__init__(self, source, target)
Translation.__init__(self, target.centre() - source.centre())
def from_vector_inplace(self, p):
r"""
Updates the :class:`Translation` inplace.
Parameters
----------
vector : ``(n_dims,)`` `ndarray`
The array of parameters.
"""
Translation.from_vector_inplace(self, p)
self._sync_target_from_state()
def _sync_state_from_target(self):
translation = self.target.centre() - self.source.centre()
self.h_matrix[:-1, -1] = translation
def as_non_alignment(self):
r"""
Returns a copy of this translation without its alignment nature.
Returns
-------
transform : :map:`Translation`
A version of this transform with the same transform behavior but
without the alignment logic.
"""
return Translation(self.translation_component)
|
{
"content_hash": "476c79ae75ae8f35a3b94b6f5dcdf035",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 78,
"avg_line_length": 30.204225352112676,
"alnum_prop": 0.5283282816507344,
"repo_name": "mozata/menpo",
"id": "77b0e48634023ff5dd2b9ea9238060cbbd081957",
"size": "4289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menpo/transform/homogeneous/translation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "155"
},
{
"name": "C",
"bytes": "70100"
},
{
"name": "C++",
"bytes": "44577"
},
{
"name": "Makefile",
"bytes": "263"
},
{
"name": "Python",
"bytes": "1728478"
},
{
"name": "Shell",
"bytes": "280"
}
],
"symlink_target": ""
}
|
from socialauth.models import UserProfile
import logging
logger = logging.getLogger(__name__)
def get_profile(
backend,
user,
response,
details,
is_new=False,
*args,
**kwargs
):
img_url = None
#Get photo
if backend.name == 'facebook':
img_url = 'http://graph.facebook.com/%s/picture?type=large' \
% response['id']
elif backend.name == 'twitter':
img_url = response.get('profile_image_url', '').replace('_normal', '')
if backend.name == 'google-oauth2':
if response.get('image') and response['image'].get('url'):
img_url = response['image'].get('url')
logger.debug("Image url:%s" % img_url)
profile = UserProfile.objects.get_or_create(user = user)[0]
logger.debug("details:%s" % details)
#logger.debug("response:%s" % response)
profile.username = details['username']
profile.fullname = details['fullname']
profile.first_name = details['first_name']
profile.last_name = details['last_name']
profile.email = details['email']
profile.photo = img_url
profile.key = backend.strategy.session_get('key')
profile.backend= backend.name
profile.save()
# profile1 = user.profile
# attrs = vars(profile1)
# logger.debug("profile1:%s" % attrs)
|
{
"content_hash": "7b9d0086cc6a033e973bff6b573e437a",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 27.12,
"alnum_prop": 0.5958702064896755,
"repo_name": "latuannetnam/django-socialauth",
"id": "3824d9b9c0882d1091ca685b5496e2eac5db9a19",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myproject/socialauth/pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "529"
},
{
"name": "HTML",
"bytes": "1886"
},
{
"name": "Python",
"bytes": "10686"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys, os
md = os.path.abspath(os.path.split(__file__)[0])
sys.path = [os.path.join(md, '..', '..', 'util')] + sys.path
dataFile = "../../atlas/CochlearNucleus/images/cochlear_nucleus.ma"
labelFile = "../../atlas/CochlearNucleus/images/cochlear_nucleus_label.ma"
from acq4.util import Qt
import acq4.pyqtgraph as pg
#import acq4.pyqtgraph.ColorButton as ColorButton
#import acq4.pyqtgraph.ProgressDialog as ProgressDialog
import numpy as np
import builderTemplate
import acq4.util.metaarray as metaarray
import acq4.util.debug as debug
import user
Qt.QApplication.setGraphicsSystem('raster')
app = Qt.QApplication([])
win = Qt.QMainWindow()
cw = Qt.QWidget()
win.setCentralWidget(cw)
ui = builderTemplate.Ui_Form()
ui.setupUi(cw)
win.show()
win.resize(800,600)
ui.labelTree.header().setResizeMode(Qt.QHeaderView.ResizeToContents)
data = metaarray.MetaArray(file=dataFile, mmap=True)
## data must have axes (anterior, dorsal, right)
if not os.path.exists(labelFile):
label = metaarray.MetaArray(np.zeros(data.shape[:-1], dtype=np.uint16), info=data.infoCopy()[:3] + [{'labels': {}}])
label.write(labelFile, mappable=True)
label = metaarray.MetaArray(file=labelFile, mmap=True, writable=True)
labelCache = None
labelInfo = {}
#ui.view.enableMouse()
#ui.view.setAspectLocked(True)
vb = pg.ViewBox()
ui.view.setCentralItem(vb)
vb.setAspectLocked(True)
vb.invertY(False)
dataImg = pg.ImageItem()
labelImg = pg.ImageItem() # mode=Qt.QPainter.CompositionMode_Plus)
#labelImg.setCompositionMode(Qt.QPainter.CompositionMode_Overlay)
labelImg.setZValue(10)
labelImg.setOpacity(1)
vb.addItem(dataImg)
vb.addItem(labelImg)
def connectSignals():
for r in [ui.rightRadio, ui.dorsalRadio, ui.rostralRadio]:
r.toggled.connect(imageChanged)
ui.zSlider.valueChanged.connect(updateImage)
ui.radiusSpin.valueChanged.connect(updateKernel)
ui.greyCheck.toggled.connect(updateImage)
ui.labelSlider.valueChanged.connect(imageChanged)
ui.labelTree.itemChanged.connect(itemChanged)
ui.labelTree.currentItemChanged.connect(itemSelected)
ui.overlayCheck.toggled.connect(overlayToggled)
def init():
connectSignals()
updateKernel()
labelData = label._info[-1]['labels']
d = dict([(x['id'], x) for x in labelData])
keys = list(d.keys())
keys.sort()
for k in keys:
addLabel(d[k])
def keyPressEvent(ev):
k = ev.key()
mod = ev.modifiers()
if k == Qt.Qt.Key_Right:
if mod & Qt.Qt.ControlModifier:
copyLabel(1)
ui.zSlider.setValue(ui.zSlider.value()+1)
elif k == Qt.Qt.Key_Left:
if mod & Qt.Qt.ControlModifier:
copyLabel(-1)
ui.zSlider.setValue(ui.zSlider.value()-1)
elif k == Qt.Qt.Key_Equal:
ui.radiusSpin.setValue(ui.radiusSpin.value()+1)
elif k == Qt.Qt.Key_Minus:
ui.radiusSpin.setValue(ui.radiusSpin.value()-1)
elif k == Qt.Qt.Key_Space:
if labelImg.isVisible():
labelImg.setVisible(False)
else:
updateLabelImage()
labelImg.setVisible(True)
elif k == Qt.Qt.Key_G:
ui.greyCheck.toggle()
else:
ev.ignore()
cw.keyPressEvent = keyPressEvent
currentPos = [0,0,0]
zAxis = 0
def draw(src, dst, mask, srcSlice, dstSlice, ev):
addLabel()
#p = debug.Profiler('draw', disabled=True)
l = displayLabel.view(np.ndarray)[ui.zSlider.value()]
#p.mark('1')
mod = ev.modifiers()
mask = mask[srcSlice]
src = src[srcSlice].astype(l.dtype)
if mod & Qt.Qt.ShiftModifier:
#src = 1-src
l[dstSlice] &= ~(src * 2**ui.labelSpin.value())
#l[dstSlice] = l[dstSlice] * (1-mask) + src * mask
#p.mark('2')
else:
l[dstSlice] |= src * 2**ui.labelSpin.value()
#p.mark('3')
updateLabelImage(dstSlice)
#p.mark('4')
#p.finish()
def addLabel(info=None):
global labelInfo
create = False
if info is None:
create = True
l = ui.labelSpin.value()
if l in labelInfo:
return
info = {
'visible': True,
'name': 'label',
'color': pg.intColor(len(labelInfo), 16),
'id': l
}
else:
info = info.copy()
info['color'] = pg.mkColor(info['color'])
l = info['id']
item = Qt.QTreeWidgetItem([str(l), info['name'], ''])
item.setFlags(item.flags() | Qt.Qt.ItemIsEditable | Qt.Qt.ItemIsUserCheckable)
if info['visible']:
item.setCheckState(0, Qt.Qt.Checked)
else:
item.setCheckState(0, Qt.Qt.Unchecked)
btn = pg.ColorButton(color=info['color'])
ui.labelTree.addTopLevelItem(item)
ui.labelTree.setItemWidget(item, 2, btn)
labelInfo[l] = {'item': item, 'btn': btn}
btn.sigColorChanged.connect(itemChanged)
btn.sigColorChanging.connect(imageChanged)
if create:
writeMeta()
def overlayToggled(b):
if b:
labelImg.setCompositionMode(Qt.QPainter.CompositionMode_Overlay)
else:
labelImg.setCompositionMode(Qt.QPainter.CompositionMode_SourceOver)
updateImage()
def itemChanged(*args):
imageChanged()
writeMeta()
def writeMeta():
meta = []
for k, v in labelInfo.items():
meta.append( {
'id': k,
'name': str(v['item'].text(1)),
'color': pg.colorStr(v['btn'].color()),
'visible': v['item'].checkState(0) == Qt.Qt.Checked
} )
label._info[-1]['labels'] = meta
label.writeMeta(labelFile)
def itemSelected(item):
ui.labelTree.editItem(item, 1)
def copyLabel(n):
i1 = ui.zSlider.value()
i2 = i1 + n
if i2 < 0 or i2 > displayLabel.shape[0]:
return
#displayLabel[i2] &= ~mask
#displayLabel[i2] |= displayLabel[i1] & mask
mask = np.uint16(2**ui.labelSpin.value())
displayLabel[i2] = (displayLabel[i1] & mask) | (displayLabel[i2] & ~mask)
def updateImage():
currentPos[zAxis] = ui.zSlider.value()
if ui.greyCheck.isChecked():
img = displayData.view(np.ndarray)[ui.zSlider.value()].mean(axis=2)
else:
img = displayData.view(np.ndarray)[ui.zSlider.value()]
dataImg.setImage(img, levels=None)
#labelImg.updateImage(displayLabel.view(np.ndarray)[ui.zSlider.value()], copy=False, white=10, black=0)
if labelImg.isVisible():
updateLabelImage()
def renderLabels(z, sl=None, overlay=False):
#p = debug.Profiler('updateLabelImage', disabled=True)
if sl is None:
sl = (slice(None), slice(None))
l = displayLabel.view(np.ndarray)[z]
#p.mark('1')
lsl = l[sl]
img = np.empty(lsl.shape+(4,), dtype=np.uint16)
#img.fill(128)
img.fill(0)
val = ui.labelSlider.value()/128.
for k, v in labelInfo.items():
if not v['item'].checkState(0) == Qt.Qt.Checked:
continue
c = pg.colorTuple(v['btn'].color())
mask = (lsl&(2**k) > 0)
alpha = c[3]/255. * val
img[mask] *= 1.0 - alpha
img[...,0] += mask * int(c[0] * alpha)
img[...,1] += mask * int(c[1] * alpha)
img[...,2] += mask * int(c[2] * alpha)
#img[...,0] += mask * int(c[0] * val)
#img[...,1] += mask * int(c[1] * val)
#img[...,2] += mask * int(c[2] * val)
img[...,3] += mask * (alpha * 255)
if overlay:
img += 128
img = img.clip(0,255).astype(np.ubyte)
return img
def renderStack(overlay=True):
"""
Export label data as a 3D, RGB image
if overlay is True, multiply in the original data image
"""
stack = np.zeros(displayLabel.shape + (4,), dtype=np.ubyte)
with pg.ProgressDialog("Rendering label stack...", maximum=displayLabel.shape[0]) as dlg:
for z in range(displayLabel.shape[0]):
stack[z] = renderLabels(z)
if overlay: ## multiply colors, not alpha.
stack[z][..., :3] *= displayData[z].mean(axis=2)[..., np.newaxis].astype(float)/256.
print(z)
dlg += 1
if dlg.wasCanceled():
raise Exception("Stack render canceled.")
return stack
def renderVolume(stack, alpha=0.3, loss=0.01):
im = np.zeros(stack.shape[1:3]+(3,), dtype=float)
for z in range(stack.shape[0]):
sz = stack[z].astype(float) # -128
mask = sz.max(axis=2) > 0
szm = sz[mask]
alphaChan = szm[...,3:4]*alpha/256.
im *= (1.0-loss)
im[mask] *= 1.0-alphaChan
im[mask] += szm[...,:3] * alphaChan
#im[mask] *= (1.0-alpha)
#im[mask] += sz[mask] * alpha
print(z)
return im
def updateLabelImage(sl=None):
global labelCache
if labelCache is None: ## if we haven't cached a full frame, then the full frame must be rendered.
sl = (slice(None), slice(None))
img = renderLabels(ui.zSlider.value(), sl, overlay=ui.overlayCheck.isChecked())
if labelCache is None:
labelCache = img
labelImg.setImage(labelCache, levels=None)
else:
labelCache[sl] = img
labelImg.updateImage()
def imageChanged():
global zAxis, displayData, displayLabel, labelCache
labelCache = None
if ui.rightRadio.isChecked():
axes = ('right', 'anterior', 'dorsal')
zAxis = 0
elif ui.dorsalRadio.isChecked():
axes = ('dorsal', 'right', 'anterior')
zAxis = 1
else:
axes = ('anterior', 'right', 'dorsal')
zAxis = 2
displayData = data.transpose(axes)
displayLabel = label.transpose(axes).view(np.ndarray)
ui.zSlider.setMaximum(displayData.shape[0]-1)
ui.zSlider.setValue(currentPos[zAxis])
updateImage()
#vb.setRange(dataImg.boundingRect())
vb.autoRange()
def updateKernel():
global drawKernel
r = ui.radiusSpin.value()+1
d = (r*2) - 1
x = np.array([range(d)])
y = x.transpose()
drawKernel = (np.sqrt((x-r+1)**2 + (y-r+1)**2) < r-1).astype(np.ubyte)
labelImg.setDrawKernel(drawKernel, mask=drawKernel, center=(r-1,r-1), mode=draw)
init()
imageChanged()
|
{
"content_hash": "32e3f62e8070ea22f68a2a90e1a1f3d4",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 149,
"avg_line_length": 30.580357142857142,
"alnum_prop": 0.6005839416058394,
"repo_name": "meganbkratz/acq4",
"id": "6c04a3a5e9b098c686a9aa5428d1ccdbd7cb02e9",
"size": "10275",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "acq4/analysis/scripts/builder/builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "3037"
},
{
"name": "Arduino",
"bytes": "18651"
},
{
"name": "Batchfile",
"bytes": "64"
},
{
"name": "C",
"bytes": "705091"
},
{
"name": "C++",
"bytes": "321384"
},
{
"name": "CSS",
"bytes": "716"
},
{
"name": "MATLAB",
"bytes": "1752"
},
{
"name": "Objective-C",
"bytes": "596020"
},
{
"name": "Processing",
"bytes": "13403"
},
{
"name": "Python",
"bytes": "5922488"
}
],
"symlink_target": ""
}
|
import traceback
from cassandra.metadata import cql_keywords_reserved
from . import pylexotron, util
Hint = pylexotron.Hint
class CqlParsingRuleSet(pylexotron.ParsingRuleSet):
available_compression_classes = (
'DeflateCompressor',
'SnappyCompressor',
'LZ4Compressor',
)
available_compaction_classes = (
'LeveledCompactionStrategy',
'SizeTieredCompactionStrategy',
'DateTieredCompactionStrategy'
)
replication_strategies = (
'SimpleStrategy',
'OldNetworkTopologyStrategy',
'NetworkTopologyStrategy'
)
replication_factor_strategies = (
'SimpleStrategy',
'org.apache.cassandra.locator.SimpleStrategy',
'OldNetworkTopologyStrategy',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy'
)
def __init__(self, *args, **kwargs):
pylexotron.ParsingRuleSet.__init__(self, *args, **kwargs)
# note: commands_end_with_newline may be extended by callers.
self.commands_end_with_newline = set()
self.set_reserved_keywords(cql_keywords_reserved)
def set_reserved_keywords(self, keywords):
"""
We cannot let resreved cql keywords be simple 'identifier' since this caused
problems with completion, see CASSANDRA-10415
"""
syntax = '<reserved_identifier> ::= /(' + '|'.join(r'\b%s\b' % (k,) for k in keywords) + ')/ ;'
self.append_rules(syntax)
def completer_for(self, rulename, symname):
def registrator(f):
def completerwrapper(ctxt):
cass = ctxt.get_binding('cassandra_conn', None)
if cass is None:
return ()
return f(ctxt, cass)
completerwrapper.func_name = 'completerwrapper_on_' + f.func_name
self.register_completer(completerwrapper, rulename, symname)
return completerwrapper
return registrator
def explain_completion(self, rulename, symname, explanation=None):
if explanation is None:
explanation = '<%s>' % (symname,)
@self.completer_for(rulename, symname)
def explainer(ctxt, cass):
return [Hint(explanation)]
return explainer
def cql_massage_tokens(self, toklist):
curstmt = []
output = []
term_on_nl = False
for t in toklist:
if t[0] == 'endline':
if term_on_nl:
t = ('endtoken',) + t[1:]
else:
# don't put any 'endline' tokens in output
continue
# Convert all unicode tokens to ascii, where possible. This
# helps avoid problems with performing unicode-incompatible
# operations on tokens (like .lower()). See CASSANDRA-9083
# for one example of this.
str_token = t[1]
if isinstance(str_token, unicode):
try:
str_token = str_token.encode('ascii')
t = (t[0], str_token) + t[2:]
except UnicodeEncodeError:
pass
curstmt.append(t)
if t[0] == 'endtoken':
term_on_nl = False
output.extend(curstmt)
curstmt = []
else:
if len(curstmt) == 1:
# first token in statement; command word
cmd = t[1].lower()
term_on_nl = bool(cmd in self.commands_end_with_newline)
output.extend(curstmt)
return output
def cql_parse(self, text, startsymbol='Start'):
tokens = self.lex(text)
tokens = self.cql_massage_tokens(tokens)
return self.parse(startsymbol, tokens, init_bindings={'*SRC*': text})
def cql_whole_parse_tokens(self, toklist, srcstr=None, startsymbol='Start'):
return self.whole_match(startsymbol, toklist, srcstr=srcstr)
def cql_split_statements(self, text):
tokens = self.lex(text)
tokens = self.cql_massage_tokens(tokens)
stmts = util.split_list(tokens, lambda t: t[0] == 'endtoken')
output = []
in_batch = False
for stmt in stmts:
if in_batch:
output[-1].extend(stmt)
else:
output.append(stmt)
if len(stmt) > 2:
if stmt[-3][1].upper() == 'APPLY':
in_batch = False
elif stmt[0][1].upper() == 'BEGIN':
in_batch = True
return output, in_batch
def cql_complete_single(self, text, partial, init_bindings={}, ignore_case=True,
startsymbol='Start'):
tokens = (self.cql_split_statements(text)[0] or [[]])[-1]
bindings = init_bindings.copy()
# handle some different completion scenarios- in particular, completing
# inside a string literal
prefix = None
dequoter = util.identity
lasttype = None
if tokens:
lasttype = tokens[-1][0]
if lasttype == 'unclosedString':
prefix = self.token_dequote(tokens[-1])
tokens = tokens[:-1]
partial = prefix + partial
dequoter = self.dequote_value
requoter = self.escape_value
elif lasttype == 'unclosedName':
prefix = self.token_dequote(tokens[-1])
tokens = tokens[:-1]
partial = prefix + partial
dequoter = self.dequote_name
requoter = self.escape_name
elif lasttype == 'unclosedComment':
return []
bindings['partial'] = partial
bindings['*LASTTYPE*'] = lasttype
bindings['*SRC*'] = text
# find completions for the position
completions = self.complete(startsymbol, tokens, bindings)
hints, strcompletes = util.list_bifilter(pylexotron.is_hint, completions)
# it's possible to get a newline token from completion; of course, we
# don't want to actually have that be a candidate, we just want to hint
if '\n' in strcompletes:
strcompletes.remove('\n')
if partial == '':
hints.append(Hint('<enter>'))
# find matches with the partial word under completion
if ignore_case:
partial = partial.lower()
f = lambda s: s and dequoter(s).lower().startswith(partial)
else:
f = lambda s: s and dequoter(s).startswith(partial)
candidates = filter(f, strcompletes)
if prefix is not None:
# dequote, re-escape, strip quotes: gets us the right quoted text
# for completion. the opening quote is already there on the command
# line and not part of the word under completion, and readline
# fills in the closing quote for us.
candidates = [requoter(dequoter(c))[len(prefix) + 1:-1] for c in candidates]
# the above process can result in an empty string; this doesn't help for
# completions
candidates = filter(None, candidates)
# prefix a space when desirable for pleasant cql formatting
if tokens:
newcandidates = []
for c in candidates:
if self.want_space_between(tokens[-1], c) \
and prefix is None \
and not text[-1].isspace() \
and not c[0].isspace():
c = ' ' + c
newcandidates.append(c)
candidates = newcandidates
# append a space for single, complete identifiers
if len(candidates) == 1 and candidates[0][-1].isalnum() \
and lasttype != 'unclosedString' \
and lasttype != 'unclosedName':
candidates[0] += ' '
return candidates, hints
@staticmethod
def want_space_between(tok, following):
if following in (',', ')', ':'):
return False
if tok[0] == 'op' and tok[1] in (',', ')', '='):
return True
if tok[0] == 'stringLiteral' and following[0] != ';':
return True
if tok[0] == 'star' and following[0] != ')':
return True
if tok[0] == 'endtoken':
return True
if tok[1][-1].isalnum() and following[0] != ',':
return True
return False
def cql_complete(self, text, partial, cassandra_conn=None, ignore_case=True, debug=False,
startsymbol='Start'):
init_bindings = {'cassandra_conn': cassandra_conn}
if debug:
init_bindings['*DEBUG*'] = True
print "cql_complete(%r, partial=%r)" % (text, partial)
completions, hints = self.cql_complete_single(text, partial, init_bindings,
startsymbol=startsymbol)
if hints:
hints = [h.text for h in hints]
hints.append('')
if len(completions) == 1 and len(hints) == 0:
c = completions[0]
if debug:
print "** Got one completion: %r. Checking for further matches...\n" % (c,)
if not c.isspace():
new_c = self.cql_complete_multiple(text, c, init_bindings, startsymbol=startsymbol)
completions = [new_c]
if debug:
print "** New list of completions: %r" % (completions,)
return hints + completions
def cql_complete_multiple(self, text, first, init_bindings, startsymbol='Start'):
debug = init_bindings.get('*DEBUG*', False)
try:
completions, hints = self.cql_complete_single(text + first, '', init_bindings,
startsymbol=startsymbol)
except Exception:
if debug:
print "** completion expansion had a problem:"
traceback.print_exc()
return first
if hints:
if not first[-1].isspace():
first += ' '
if debug:
print "** completion expansion found hints: %r" % (hints,)
return first
if len(completions) == 1 and completions[0] != '':
if debug:
print "** Got another completion: %r." % (completions[0],)
if completions[0][0] in (',', ')', ':') and first[-1] == ' ':
first = first[:-1]
first += completions[0]
else:
common_prefix = util.find_common_prefix(completions)
if common_prefix == '':
return first
if common_prefix[0] in (',', ')', ':') and first[-1] == ' ':
first = first[:-1]
if debug:
print "** Got a partial completion: %r." % (common_prefix,)
first += common_prefix
if debug:
print "** New total completion: %r. Checking for further matches...\n" % (first,)
return self.cql_complete_multiple(text, first, init_bindings, startsymbol=startsymbol)
@staticmethod
def cql_extract_orig(toklist, srcstr):
# low end of span for first token, to high end of span for last token
return srcstr[toklist[0][2][0]:toklist[-1][2][1]]
@staticmethod
def token_dequote(tok):
if tok[0] == 'unclosedName':
# strip one quote
return tok[1][1:].replace('""', '"')
if tok[0] == 'stringLiteral':
# strip quotes
return tok[1][1:-1].replace("''", "'")
if tok[0] == 'unclosedString':
# strip one quote
return tok[1][1:].replace("''", "'")
if tok[0] == 'unclosedComment':
return ''
return tok[1]
@staticmethod
def token_is_word(tok):
return tok[0] == 'identifier'
|
{
"content_hash": "737789dd9e71ded0347c9be240782dd8",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 103,
"avg_line_length": 37.67192429022082,
"alnum_prop": 0.535672416680623,
"repo_name": "iburmistrov/Cassandra",
"id": "6ee3cf57db9c978e24c648d6b1d031a8f6f4494d",
"size": "12840",
"binary": false,
"copies": "1",
"ref": "refs/heads/cassandra-2.1",
"path": "pylib/cqlshlib/cqlhandling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "801"
},
{
"name": "Batchfile",
"bytes": "21303"
},
{
"name": "GAP",
"bytes": "90980"
},
{
"name": "Java",
"bytes": "9831270"
},
{
"name": "PowerShell",
"bytes": "38303"
},
{
"name": "Python",
"bytes": "327612"
},
{
"name": "Shell",
"bytes": "45316"
},
{
"name": "Thrift",
"bytes": "40240"
}
],
"symlink_target": ""
}
|
from calvin.utilities.calvin_callback import CalvinCB
from calvin.utilities import calvinlogger
from calvin.utilities import calvinuuid
from calvin.runtime.south.plugins.transports import base_transport
_log = calvinlogger.get_logger(__name__)
_join_request_reply = {'cmd': 'JOIN_REPLY', 'id': None, 'sid': None, 'serializer': None}
_join_request = {'cmd': 'JOIN_REQUEST', 'id': None, 'sid': None, 'serializers': []}
class CalvinTransport(base_transport.BaseTransport):
def __init__(self, rt_id, remote_uri, callbacks, transport, proto=None):
"""docstring for __init__"""
super(CalvinTransport, self).__init__(rt_id, remote_uri, callbacks=callbacks)
self._rt_id = rt_id
self._remote_rt_id = None
self._coder = None
self._transport = transport(self._uri.hostname, self._uri.port, callbacks, proto=proto)
self._rtt = 2000 # Init rt in ms
# TODO: This should be incoming param
self._verify_client = lambda x: True
self._incoming = proto is not None
if self._incoming:
# TODO: Set timeout
# Incomming connection timeout if no join
self._transport.callback_register("disconnected", CalvinCB(self._disconnected))
self._transport.callback_register("data", CalvinCB(self._data_received))
def connect(self, timeout=10):
if self._transport.is_connected():
raise Exception("Transport already connected")
self._transport.callback_register("connected", CalvinCB(self._send_join))
self._transport.callback_register("disconnected", CalvinCB(self._disconnected))
self._transport.callback_register("data", CalvinCB(self._data_received))
# TODO: set timeout
self._transport.join()
def disconnect(self, timeout=10):
# TODO: Set timepout
if self._transport.is_connected():
self._transport.disconnect()
def is_connected(self):
return self._transport.is_connected()
def send(self, payload, timeout=None, coder=None):
tcoder = coder or self._coder
try:
_log.debug('send_message %s => %s "%s"' % (self._rt_id, self._remote_rt_id, payload))
self._callback_execute('send_message', self, payload)
# Send
raw_payload = tcoder.encode(payload)
_log.debug('raw_send_message %s => %s "%s"' % (self._rt_id, self._remote_rt_id, raw_payload))
self._callback_execute('raw_send_message', self, raw_payload)
self._transport.send(raw_payload)
# TODO: Set timeout of send
return True
except:
_log.exception("Send message failed!!")
_log.error("Payload = '%s'" % repr(payload))
return False
def _get_join_coder(self):
return self.get_coders()['json']
def _get_msg_uuid(self):
return calvinuuid.uuid("MSGID")
def _send_join(self):
self._callback_execute('peer_connected', self, self.get_uri())
msg = _join_request
msg['id'] = self._rt_id
msg['sid'] = self._get_msg_uuid()
msg['serializers'] = self.get_coders().keys()
self.send(msg, coder=self._get_join_coder())
def _send_join_reply(self, _id, serializer, sid):
msg = _join_request_reply
msg['id'] = self._rt_id
msg['sid'] = sid
msg['serializer'] = serializer
self.send(msg, coder=self._get_join_coder())
def _handle_join(self, data):
try:
data_obj = self._get_join_coder().decode(data)
coder_name = None
# Verify package
if 'cmd' not in data_obj or data_obj['cmd'] != 'JOIN_REQUEST' or \
'serializers' not in data_obj or 'id' not in data_obj or 'sid' not in data_obj:
raise Exception('Not a valid package "%s"' % data_obj)
sid = data_obj['sid']
for coder in self.get_coders():
if coder in data_obj['serializers']:
self._coder = self.get_coders()[coder]
coder_name = coder
break
# Verify remote
valid = self._verify_client(data_obj)
# TODO: Callback or use join_finished
if valid:
self._remote_rt_id = data_obj['id']
except:
_log.exception("_handle_join: Failed!!")
# TODO: disconnect ?
return
self._send_join_reply(not valid or self._rt_id, coder_name, sid)
self._joined(False)
def _joined(self, is_orginator):
self._callback_execute('join_finished', self, self._remote_rt_id, self.get_uri(), is_orginator)
def _handle_join_reply(self, data):
try:
data_obj = self.get_coders()['json'].decode(data)
# Verify package and set local data
if 'cmd' not in data_obj or data_obj['cmd'] != 'request_reply' or \
'serializer' not in data_obj or 'id' not in data_obj or 'sid' not in data_obj:
pass
if data_obj['serializer'] in self.get_coders():
self._coder = self.get_coders()[data_obj['serializer']]
if data_obj['id'] is not None:
# Request denied
self._remote_rt_id = data_obj['id']
except:
_log.exception("_handle_join: Failed!!")
# TODO: disconnect ?
return
self._joined(True)
def _disconnected(self, reason):
# TODO: unify reason
self._callback_execute('peer_disconnected', self, self._remote_rt_id, reason)
def _data_received(self, data):
self._callback_execute('raw_data_received', self, data)
if self._remote_rt_id is None:
if self._incoming:
self._handle_join(data)
else:
# We have not joined yet
self._handle_join_reply(data)
return
# TODO: How to error this
data_obj = None
# decode
try:
data_obj = self._coder.decode(data)
except:
_log.exception("Message decode failed")
self._callback_execute('data_received', self, data_obj)
class CalvinServer(base_transport.BaseServer):
def __init__(self, rt_id, listen_uri, callbacks, server_transport, client_transport):
super(CalvinServer, self).__init__(rt_id, listen_uri, callbacks=callbacks)
self._rt_id = rt_id
self._port = None
self._peers = {}
self._callbacks = callbacks
# TODO: Get iface from addr and lookup host
iface = ''
self._transport = server_transport(iface=iface, port=self._listen_uri.port or 0)
self._client_transport = client_transport
def _started(self, port):
self._port = port
self._callback_execute('server_started', self, self._port)
def _stopped(self):
self._port = None
self._callback_execute('server_stopped', self)
# TODO: remove this ?
# self._transport.callback_register('peer_connected', CalvinCB(self._peer_connected))
def _client_connected(self, uri, protocol):
"""
Callback when the client connects still needs a join to be finnshed
before we can callback upper layers
"""
tp = CalvinTransport(self._rt_id, uri, self._callbacks,
self._client_transport, proto=protocol)
self._callback_execute('peer_connected', tp, tp.get_uri())
self._peers[uri] = tp
def start(self):
# These should come from us
self._transport.callback_register('server_started', CalvinCB(self._started))
self._transport.callback_register('server_stopped', CalvinCB(self._stopped))
self._transport.callback_register('client_connected', CalvinCB(self._client_connected))
# Start the server
self._port = self._transport.start()
def stop(self):
return self._transport.stop()
def is_listening(self):
return self._transport.is_listening()
|
{
"content_hash": "9dd5aeea14c9ec8c700ea0ae27317c31",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 105,
"avg_line_length": 36.82272727272727,
"alnum_prop": 0.5830144426613998,
"repo_name": "les69/calvin-base",
"id": "5a856baaa7e95c0cd2535284f2c64b0503da9d74",
"size": "8706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/runtime/south/plugins/transports/lib/twisted/twisted_transport.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1727"
},
{
"name": "HTML",
"bytes": "7958"
},
{
"name": "JavaScript",
"bytes": "59355"
},
{
"name": "Python",
"bytes": "1614514"
},
{
"name": "Shell",
"bytes": "18513"
}
],
"symlink_target": ""
}
|
"""Runs an infinite loop to find race conditions in context get_tasklet."""
from google.appengine.ext import testbed
from ndb import model
from ndb import tasklets
def cause_problem():
tb = testbed.Testbed()
tb.activate()
tb.init_datastore_v3_stub()
tb.init_memcache_stub()
ctx = tasklets.make_default_context()
tasklets.set_context(ctx)
ctx.set_datastore_policy(True)
ctx.set_cache_policy(False)
ctx.set_memcache_policy(True)
@tasklets.tasklet
def problem_tasklet():
class Foo(model.Model):
pass
key = yield ctx.put(Foo())
yield ctx.get(key) # Trigger get_tasklet that does not complete...
yield ctx.delete(key) # ... by the time this delete_tasklet starts.
a = yield ctx.get(key)
assert a is None, '%r is not None' % a
problem_tasklet().check_success()
print 'No problem yet...'
tb.deactivate()
if __name__ == '__main__':
while True:
cause_problem()
|
{
"content_hash": "a35917eae746629910f07b2cab51cc8c",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 75,
"avg_line_length": 25.027027027027028,
"alnum_prop": 0.6771058315334774,
"repo_name": "GoogleCloudPlatform/datastore-ndb-python",
"id": "b256d05c1524405221ead5f87f5f697f16b78289",
"size": "1532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/gettaskletrace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1336"
},
{
"name": "Python",
"bytes": "899696"
},
{
"name": "Shell",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0031_projector_default_height"),
("agenda", "0008_default_ordering_item"),
]
operations = [
migrations.AddField(
model_name="item",
name="tags",
field=models.ManyToManyField(blank=True, to="core.Tag"),
),
]
|
{
"content_hash": "63db4b21feeadb37e764c7fb3cd7e4c6",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 23.647058823529413,
"alnum_prop": 0.5746268656716418,
"repo_name": "jwinzer/OpenSlides",
"id": "96384a03258847e4f3f9ee3ea7dd9ddc9b1fdb8a",
"size": "452",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "server/openslides/agenda/migrations/0009_item_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2553"
},
{
"name": "HTML",
"bytes": "504880"
},
{
"name": "JavaScript",
"bytes": "74835"
},
{
"name": "M4",
"bytes": "18419"
},
{
"name": "Python",
"bytes": "1533060"
},
{
"name": "SCSS",
"bytes": "137122"
},
{
"name": "Shell",
"bytes": "9338"
},
{
"name": "Smarty",
"bytes": "7554"
},
{
"name": "TypeScript",
"bytes": "2637192"
}
],
"symlink_target": ""
}
|
__author__ = 'hs634'
from collections import defaultdict
class TrieNode(object):
def __init__(self, value=None):
self.val = value
self.children = {}
self.is_end = False
class Trie(object):
def __init__(self, root_value='0'):
self.root = TrieNode(root_value)
def insert(self, word):
temp = self.root
for idx, ch in enumerate(word):
if ch in temp.children:
temp = temp.children.get(ch)
else:
new_node = TrieNode(ch)
temp.children[ch] = new_node
temp = new_node
temp.is_end = True
def insert_list(self, word_list):
for word in word_list:
self.insert(word)
def get_longest_prefix(self, word):
temp = self.root
prefix, prev_match = '', ''
for ch in word:
if ch in temp.children:
temp = temp.children.get(ch)
prefix += ch
if temp.is_end:
prev_match = prefix
else:
break
if not temp.is_end:
return prev_match
return prefix
def main():
trie = Trie()
trie.insert_list(['are', 'area', 'base', 'cat', 'cater', 'children',
'basement'])
#print trie.get_longest_prefix('caterer')
print trie.get_longest_prefix('are')
if __name__ == "__main__":
main()
|
{
"content_hash": "8a4fc45b23b922cbf99c10db92399d77",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 72,
"avg_line_length": 21.279411764705884,
"alnum_prop": 0.497581202487906,
"repo_name": "hs634/algorithms",
"id": "b866f0e99c4a020856ddc96770d9ff352c74d83b",
"size": "1447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/graphs/trie_find_longest_prefix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "173234"
},
{
"name": "Python",
"bytes": "195068"
}
],
"symlink_target": ""
}
|
from couchdbkit import Document, StringProperty, SchemaListProperty
import bcrypt
__all__ = ['Permission', 'Group', 'User', 'init_model']
def hashpw(password, salt=None):
"""
Hash a password using the optional salt. If salt is not specified one
will be generated. The hash, which includes the salt, is returned.
:param password: The password to hash.
:param salt: The optional salt to use.
:return: The hashed password.
"""
if salt is None:
salt = bcrypt.gensalt()
return unicode(bcrypt.hashpw(password, salt))
def hashcmp(hash, password):
"""
Compare a hash to an un-hashed password. Returns True if they match
or false otherwise.
:param hash A password hash generated by hashpw().
:param password An unhashed password to compare against.
:return: True if the password matches the hash, False if it does not.
"""
salt = hash[:29]
return hash == hashpw(password, salt)
class Permission(Document):
"""
Permission document. Permissions belong to groups in a many-to-many relationship.
"""
name = StringProperty(required=True)
class Group(Document):
"""
Group document. Groups are assigned to users in a many-to-many relationship.
"""
name = StringProperty(required=True)
permissions = SchemaListProperty(Permission)
class User(Document):
"""
User document.
"""
username = StringProperty(required=True)
password = StringProperty()
groups = SchemaListProperty(Group)
@staticmethod
def create(username, password, groups=[]):
"""
Convenience method for creating a new user.
:param username: The username of the new user.
:param password: The password of the new user.
:param groups: The groups to assign to the new user.
:return: The new user document.
"""
hash = hashpw(password)
return User(username=username, password=hash, groups=groups)
def authenticate(self, password):
"""
Authenticate the user against a plaintext password.
:param password: The plaintext password to authenticate the user with.
:return: True if authentication is successful, False otherwise.
"""
return hashcmp(self.password, password)
def set_password(self, password):
"""
Set the password. Hashed the password before setting.
:param password: The password to set in plaintext.
"""
self.password = hashpw(password)
def init_model(database):
"""
Initialize the model. Associates the given database with each of the documents.
:param database: The database to initialize the model with.
"""
User.set_db(database)
Group.set_db(database)
Permission.set_db(database)
|
{
"content_hash": "0dd0efc84e648228d3eb2a3c6a2430bf",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 86,
"avg_line_length": 32.870588235294115,
"alnum_prop": 0.6664280601288475,
"repo_name": "BlueDragonX/whatcouch",
"id": "a19966f368a6517d20ecff595237bcfb5d028195",
"size": "3284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whatcouch/model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "672"
},
{
"name": "Python",
"bytes": "74485"
}
],
"symlink_target": ""
}
|
from .health_evaluation import HealthEvaluation
class ReplicaHealthEvaluation(HealthEvaluation):
"""Represents health evaluation for a replica, containing information about
the data and the algorithm used by health store to evaluate health. The
evaluation is returned only when the aggregated health state is either
Error or Warning.
:param aggregated_health_state: Possible values include: 'Invalid', 'Ok',
'Warning', 'Error', 'Unknown'
:type aggregated_health_state: str
:param description: Description of the health evaluation, which
represents a summary of the evaluation process.
:type description: str
:param Kind: Polymorphic Discriminator
:type Kind: str
:param partition_id: Id of the partition to which the replica belongs.
:type partition_id: str
:param replica_or_instance_id: Id of the stateful service replica or the
stateless service instance.
:type replica_or_instance_id: str
:param unhealthy_evaluations: List of unhealthy evaluations that led to
the current aggregated health state of the replica. The types of the
unhealthy evaluations can be EventHealthEvaluation.
:type unhealthy_evaluations: list of :class:`HealthEvaluationWrapper
<azure.servicefabric.models.HealthEvaluationWrapper>`
"""
_validation = {
'Kind': {'required': True},
}
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'Kind': {'key': 'Kind', 'type': 'str'},
'partition_id': {'key': 'PartitionId', 'type': 'str'},
'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'},
'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'},
}
def __init__(self, aggregated_health_state=None, description=None, partition_id=None, replica_or_instance_id=None, unhealthy_evaluations=None):
super(ReplicaHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description)
self.partition_id = partition_id
self.replica_or_instance_id = replica_or_instance_id
self.unhealthy_evaluations = unhealthy_evaluations
self.Kind = 'Replica'
|
{
"content_hash": "335113c15c1e770c37335c4f43bc2617",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 147,
"avg_line_length": 48.270833333333336,
"alnum_prop": 0.6983167889512301,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "db1c4349529985b7c2cf37a57c55dfd372da326c",
"size": "2791",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-servicefabric/azure/servicefabric/models/replica_health_evaluation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
}
|
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import unittest
import datetime
import broker_backtesting_test
from pyalgotrade import broker
from pyalgotrade.broker import fillstrategy
from pyalgotrade.broker import backtesting
from pyalgotrade import bar
class BaseTestCase(unittest.TestCase):
TestInstrument = "orcl"
class FreeFunctionsTestCase(BaseTestCase):
def testStopOrderTriggerBuy(self):
barsBuilder = broker_backtesting_test.BarsBuilder(BaseTestCase.TestInstrument, bar.Frequency.MINUTE)
# Bar is below
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(5, 5, 5, 5)), None)
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(5, 6, 4, 5)), None)
# High touches
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(5, 10, 4, 9)), 10)
# High penetrates
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(5, 11, 4, 9)), 10)
# Open touches
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(10, 10, 10, 10)), 10)
# Open is above
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(11, 12, 4, 9)), 11)
# Bar gaps above
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(12, 13, 11, 12)), 12)
def testStopOrderTriggerSell(self):
barsBuilder = broker_backtesting_test.BarsBuilder(BaseTestCase.TestInstrument, bar.Frequency.MINUTE)
# Bar is above
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(15, 15, 15, 15)), None)
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(15, 16, 11, 15)), None)
# Low touches
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(15, 16, 10, 11)), 10)
# Low penetrates
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(15, 16, 9, 11)), 10)
# Open touches
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(10, 10, 10, 10)), 10)
# Open is below
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(9, 12, 4, 9)), 9)
# Bar gaps below
self.assertEqual(fillstrategy.get_stop_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(8, 9, 6, 9)), 8)
def testLimitOrderTriggerBuy(self):
barsBuilder = broker_backtesting_test.BarsBuilder(BaseTestCase.TestInstrument, bar.Frequency.MINUTE)
# Bar is above
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(15, 15, 15, 15)), None)
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(15, 16, 11, 15)), None)
# Low touches
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(15, 16, 10, 11)), 10)
# Low penetrates
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(15, 16, 9, 11)), 10)
# Open touches
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(10, 10, 10, 10)), 10)
# Open is below
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(9, 12, 4, 9)), 9)
# Bar gaps below
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.BUY, 10, False, barsBuilder.nextBar(8, 9, 6, 9)), 8)
def testLimitOrderTriggerSell(self):
barsBuilder = broker_backtesting_test.BarsBuilder(BaseTestCase.TestInstrument, bar.Frequency.MINUTE)
# Bar is below
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(5, 5, 5, 5)), None)
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(5, 6, 4, 5)), None)
# High touches
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(5, 10, 4, 9)), 10)
# High penetrates
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(5, 11, 4, 9)), 10)
# Open touches
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(10, 10, 10, 10)), 10)
# Open is above
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(11, 12, 4, 9)), 11)
# Bar gaps above
self.assertEqual(fillstrategy.get_limit_price_trigger(broker.Order.Action.SELL, 10, False, barsBuilder.nextBar(12, 13, 11, 12)), 12)
class DefaultStrategyTestCase(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.barsBuilder = broker_backtesting_test.BarsBuilder(BaseTestCase.TestInstrument, bar.Frequency.MINUTE)
self.strategy = fillstrategy.DefaultStrategy()
def __getFilledMarketOrder(self, quantity, price):
order = backtesting.MarketOrder(
broker.Order.Action.BUY,
BaseTestCase.TestInstrument,
quantity,
False,
broker.IntegerTraits()
)
order.setState(broker.Order.State.ACCEPTED)
order.addExecutionInfo(broker.OrderExecutionInfo(price, quantity, 0, datetime.datetime.now()))
return order
def testVolumeLimitPerBar(self):
volume = 100
self.strategy.onBars(None, self.barsBuilder.nextBars(11, 12, 4, 9, volume))
self.assertEquals(self.strategy.getVolumeLeft()[BaseTestCase.TestInstrument], 25)
self.assertEquals(self.strategy.getVolumeUsed()[BaseTestCase.TestInstrument], 0)
self.strategy.onOrderFilled(None, self.__getFilledMarketOrder(24, 11))
self.assertEquals(self.strategy.getVolumeLeft()[BaseTestCase.TestInstrument], 1)
self.assertEquals(self.strategy.getVolumeUsed()[BaseTestCase.TestInstrument], 24)
with self.assertRaisesRegexp(Exception, "Invalid fill quantity. Not enough volume left 1"):
self.strategy.onOrderFilled(None, self.__getFilledMarketOrder(25, 11))
self.assertEquals(self.strategy.getVolumeLeft()[BaseTestCase.TestInstrument], 1)
self.assertEquals(self.strategy.getVolumeUsed()[BaseTestCase.TestInstrument], 24)
|
{
"content_hash": "a172c02036ab9f59ed650c20b55d65c1",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 141,
"avg_line_length": 60.85470085470085,
"alnum_prop": 0.7116573033707865,
"repo_name": "cgqyh/pyalgotrade-mod",
"id": "21d6f1921311a29cfc43cdff8ae431a074640caa",
"size": "7734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testcases/fill_strategy_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1066824"
},
{
"name": "Shell",
"bytes": "504"
}
],
"symlink_target": ""
}
|
import sys
import os
import unittest
from copow.lib import powlib
class TestVersion(unittest.TestCase):
def setUp(self):
"""Setup everything needed for the test (should be as few as possible)"""
return
def testFirst(self):
"""test 1st says: 1st specify and fail, then implement and pass."""
assert False
return
def tearDown(self):
"""cleanup everything """
return
|
{
"content_hash": "02fc8a4f6ac38e09dd496f75d2dd151d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 81,
"avg_line_length": 21.363636363636363,
"alnum_prop": 0.5914893617021276,
"repo_name": "pythononwheels/copow",
"id": "3a8a5733f25ba5a3003a4d810b0797252fac469e",
"size": "706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/models/testVersion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4906"
},
{
"name": "JavaScript",
"bytes": "1487"
},
{
"name": "Python",
"bytes": "196068"
}
],
"symlink_target": ""
}
|
""" Domain expression processing
The main duty of this module is to compile a domain expression into a
SQL query. A lot of things should be documented here, but as a first
step in the right direction, some tests in test_osv_expression.yml
might give you some additional information.
For legacy reasons, a domain uses an inconsistent two-levels abstract
syntax (domains are regular Python data structures). At the first
level, a domain is an expression made of terms (sometimes called
leaves) and (domain) operators used in prefix notation. The available
operators at this level are '!', '&', and '|'. '!' is a unary 'not',
'&' is a binary 'and', and '|' is a binary 'or'. For instance, here
is a possible domain. (<term> stands for an arbitrary term, more on
this later.)::
['&', '!', <term1>, '|', <term2>, <term3>]
It is equivalent to this pseudo code using infix notation::
(not <term1>) and (<term2> or <term3>)
The second level of syntax deals with the term representation. A term
is a triple of the form (left, operator, right). That is, a term uses
an infix notation, and the available operators, and possible left and
right operands differ with those of the previous level. Here is a
possible term::
('company_id.name', '=', 'OpenERP')
The left and right operand don't have the same possible values. The
left operand is field name (related to the model for which the domain
applies). Actually, the field name can use the dot-notation to
traverse relationships. The right operand is a Python value whose
type should match the used operator and field type. In the above
example, a string is used because the name field of a company has type
string, and because we use the '=' operator. When appropriate, a 'in'
operator can be used, and thus the right operand should be a list.
Note: the non-uniform syntax could have been more uniform, but this
would hide an important limitation of the domain syntax. Say that the
term representation was ['=', 'company_id.name', 'OpenERP']. Used in a
complete domain, this would look like::
['!', ['=', 'company_id.name', 'OpenERP']]
and you would be tempted to believe something like this would be
possible::
['!', ['=', 'company_id.name', ['&', ..., ...]]]
That is, a domain could be a valid operand. But this is not the
case. A domain is really limited to a two-level nature, and can not
take a recursive form: a domain is not a valid second-level operand.
Unaccent - Accent-insensitive search
OpenERP will use the SQL function 'unaccent' when available for the
'ilike' and 'not ilike' operators, and enabled in the configuration.
Normally the 'unaccent' function is obtained from `the PostgreSQL
'unaccent' contrib module
<http://developer.postgresql.org/pgdocs/postgres/unaccent.html>`_.
.. todo: The following explanation should be moved in some external
installation guide
The steps to install the module might differ on specific PostgreSQL
versions. We give here some instruction for PostgreSQL 9.x on a
Ubuntu system.
Ubuntu doesn't come yet with PostgreSQL 9.x, so an alternative package
source is used. We use Martin Pitt's PPA available at
`ppa:pitti/postgresql
<https://launchpad.net/~pitti/+archive/postgresql>`_.
.. code-block:: sh
> sudo add-apt-repository ppa:pitti/postgresql
> sudo apt-get update
Once the package list is up-to-date, you have to install PostgreSQL
9.0 and its contrib modules.
.. code-block:: sh
> sudo apt-get install postgresql-9.0 postgresql-contrib-9.0
When you want to enable unaccent on some database:
.. code-block:: sh
> psql9 <database> -f /usr/share/postgresql/9.0/contrib/unaccent.sql
Here :program:`psql9` is an alias for the newly installed PostgreSQL
9.0 tool, together with the correct port if necessary (for instance if
PostgreSQL 8.4 is running on 5432). (Other aliases can be used for
createdb and dropdb.)
.. code-block:: sh
> alias psql9='/usr/lib/postgresql/9.0/bin/psql -p 5433'
You can check unaccent is working:
.. code-block:: sh
> psql9 <database> -c"select unaccent('hélène')"
Finally, to instruct OpenERP to really use the unaccent function, you have to
start the server specifying the ``--unaccent`` flag.
"""
import logging
import traceback
import openerp.modules
from openerp.osv import fields
from openerp.osv.orm import MAGIC_COLUMNS
import openerp.tools as tools
#.apidoc title: Domain Expressions
# Domain operators.
NOT_OPERATOR = '!'
OR_OPERATOR = '|'
AND_OPERATOR = '&'
DOMAIN_OPERATORS = (NOT_OPERATOR, OR_OPERATOR, AND_OPERATOR)
# List of available term operators. It is also possible to use the '<>'
# operator, which is strictly the same as '!='; the later should be prefered
# for consistency. This list doesn't contain '<>' as it is simpified to '!='
# by the normalize_operator() function (so later part of the code deals with
# only one representation).
# Internals (i.e. not available to the user) 'inselect' and 'not inselect'
# operators are also used. In this case its right operand has the form (subselect, params).
TERM_OPERATORS = ('=', '!=', '<=', '<', '>', '>=', '=?', '=like', '=ilike',
'like', 'not like', 'ilike', 'not ilike', 'in', 'not in',
'child_of')
# A subset of the above operators, with a 'negative' semantic. When the
# expressions 'in NEGATIVE_TERM_OPERATORS' or 'not in NEGATIVE_TERM_OPERATORS' are used in the code
# below, this doesn't necessarily mean that any of those NEGATIVE_TERM_OPERATORS is
# legal in the processed term.
NEGATIVE_TERM_OPERATORS = ('!=', 'not like', 'not ilike', 'not in')
TRUE_LEAF = (1, '=', 1)
FALSE_LEAF = (0, '=', 1)
TRUE_DOMAIN = [TRUE_LEAF]
FALSE_DOMAIN = [FALSE_LEAF]
_logger = logging.getLogger(__name__)
# --------------------------------------------------
# Generic domain manipulation
# --------------------------------------------------
def normalize_domain(domain):
"""Returns a normalized version of ``domain_expr``, where all implicit '&' operators
have been made explicit. One property of normalized domain expressions is that they
can be easily combined together as if they were single domain components.
"""
assert isinstance(domain, (list, tuple)), "Domains to normalize must have a 'domain' form: a list or tuple of domain components"
if not domain:
return TRUE_DOMAIN
result = []
expected = 1 # expected number of expressions
op_arity = {NOT_OPERATOR: 1, AND_OPERATOR: 2, OR_OPERATOR: 2}
for token in domain:
if expected == 0: # more than expected, like in [A, B]
result[0:0] = [AND_OPERATOR] # put an extra '&' in front
expected = 1
result.append(token)
if isinstance(token, (list, tuple)): # domain term
expected -= 1
else:
expected += op_arity.get(token, 0) - 1
assert expected == 0, 'This domain is syntactically not correct: %s' % (domain)
return result
def combine(operator, unit, zero, domains):
"""Returns a new domain expression where all domain components from ``domains``
have been added together using the binary operator ``operator``. The given
domains must be normalized.
:param unit: the identity element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``i`` which, when
combined with any domain ``x`` via ``operator``, yields ``x``.
E.g. [(1,'=',1)] is the typical unit for AND_OPERATOR: adding it
to any domain component gives the same domain.
:param zero: the absorbing element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``z`` which, when
combined with any domain ``x`` via ``operator``, yields ``z``.
E.g. [(1,'=',1)] is the typical zero for OR_OPERATOR: as soon as
you see it in a domain component the resulting domain is the zero.
:param domains: a list of normalized domains.
"""
result = []
count = 0
for domain in domains:
if domain == unit:
continue
if domain == zero:
return zero
if domain:
result += domain
count += 1
result = [operator] * (count - 1) + result
return result
def AND(domains):
"""AND([D1,D2,...]) returns a domain representing D1 and D2 and ... """
return combine(AND_OPERATOR, TRUE_DOMAIN, FALSE_DOMAIN, domains)
def OR(domains):
"""OR([D1,D2,...]) returns a domain representing D1 or D2 or ... """
return combine(OR_OPERATOR, FALSE_DOMAIN, TRUE_DOMAIN, domains)
def distribute_not(domain):
""" Distribute any '!' domain operators found inside a normalized domain.
Because we don't use SQL semantic for processing a 'left not in right'
query (i.e. our 'not in' is not simply translated to a SQL 'not in'),
it means that a '! left in right' can not be simply processed
by __leaf_to_sql by first emitting code for 'left in right' then wrapping
the result with 'not (...)', as it would result in a 'not in' at the SQL
level.
This function is thus responsible for pushing any '!' domain operators
inside the terms themselves. For example::
['!','&',('user_id','=',4),('partner_id','in',[1,2])]
will be turned into:
['|',('user_id','!=',4),('partner_id','not in',[1,2])]
"""
def negate(leaf):
"""Negates and returns a single domain leaf term,
using the opposite operator if possible"""
left, operator, right = leaf
mapping = {
'<': '>=',
'>': '<=',
'<=': '>',
'>=': '<',
'=': '!=',
'!=': '=',
}
if operator in ('in', 'like', 'ilike'):
operator = 'not ' + operator
return [(left, operator, right)]
if operator in ('not in', 'not like', 'not ilike'):
operator = operator[4:]
return [(left, operator, right)]
if operator in mapping:
operator = mapping[operator]
return [(left, operator, right)]
return [NOT_OPERATOR, (left, operator, right)]
def distribute_negate(domain):
"""Negate the domain ``subtree`` rooted at domain[0],
leaving the rest of the domain intact, and return
(negated_subtree, untouched_domain_rest)
"""
if is_leaf(domain[0]):
return negate(domain[0]), domain[1:]
if domain[0] == AND_OPERATOR:
done1, todo1 = distribute_negate(domain[1:])
done2, todo2 = distribute_negate(todo1)
return [OR_OPERATOR] + done1 + done2, todo2
if domain[0] == OR_OPERATOR:
done1, todo1 = distribute_negate(domain[1:])
done2, todo2 = distribute_negate(todo1)
return [AND_OPERATOR] + done1 + done2, todo2
if not domain:
return []
if domain[0] != NOT_OPERATOR:
return [domain[0]] + distribute_not(domain[1:])
if domain[0] == NOT_OPERATOR:
done, todo = distribute_negate(domain[1:])
return done + distribute_not(todo)
# --------------------------------------------------
# Generic leaf manipulation
# --------------------------------------------------
def _quote(to_quote):
if '"' not in to_quote:
return '"%s"' % to_quote
return to_quote
def generate_table_alias(src_table_alias, joined_tables=[]):
""" Generate a standard table alias name. An alias is generated as following:
- the base is the source table name (that can already be an alias)
- then, each joined table is added in the alias using a 'link field name'
that is used to render unique aliases for a given path
- returns a tuple composed of the alias, and the full table alias to be
added in a from condition with quoting done
Examples:
- src_table_alias='res_users', join_tables=[]:
alias = ('res_users','"res_users"')
- src_model='res_users', join_tables=[(res.partner, 'parent_id')]
alias = ('res_users__parent_id', '"res_partner" as "res_users__parent_id"')
:param model src_table_alias: model source of the alias
:param list joined_tables: list of tuples
(dst_model, link_field)
:return tuple: (table_alias, alias statement for from clause with quotes added)
"""
alias = src_table_alias
if not joined_tables:
return '%s' % alias, '%s' % _quote(alias)
for link in joined_tables:
alias += '__' + link[1]
assert len(alias) < 64, 'Table alias name %s is longer than the 64 characters size accepted by default in postgresql.' % alias
return '%s' % alias, '%s as %s' % (_quote(joined_tables[-1][0]), _quote(alias))
def get_alias_from_query(from_query):
""" :param string from_query: is something like :
- '"res_partner"' OR
- '"res_partner" as "res_users__partner_id"''
"""
from_splitted = from_query.split(' as ')
if len(from_splitted) > 1:
return from_splitted[0].replace('"', ''), from_splitted[1].replace('"', '')
else:
return from_splitted[0].replace('"', ''), from_splitted[0].replace('"', '')
def normalize_leaf(element):
""" Change a term's operator to some canonical form, simplifying later
processing. """
if not is_leaf(element):
return element
left, operator, right = element
original = operator
operator = operator.lower()
if operator == '<>':
operator = '!='
if isinstance(right, bool) and operator in ('in', 'not in'):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % ((left, original, right),))
operator = '=' if operator == 'in' else '!='
if isinstance(right, (list, tuple)) and operator in ('=', '!='):
_logger.warning("The domain term '%s' should use the 'in' or 'not in' operator." % ((left, original, right),))
operator = 'in' if operator == '=' else 'not in'
return left, operator, right
def is_operator(element):
""" Test whether an object is a valid domain operator. """
return isinstance(element, basestring) and element in DOMAIN_OPERATORS
def is_leaf(element, internal=False):
""" Test whether an object is a valid domain term:
- is a list or tuple
- with 3 elements
- second element if a valid op
:param tuple element: a leaf in form (left, operator, right)
:param boolean internal: allow or not the 'inselect' internal operator
in the term. This should be always left to False.
Note: OLD TODO change the share wizard to use this function.
"""
INTERNAL_OPS = TERM_OPERATORS + ('<>',)
if internal:
INTERNAL_OPS += ('inselect', 'not inselect')
return (isinstance(element, tuple) or isinstance(element, list)) \
and len(element) == 3 \
and element[1] in INTERNAL_OPS \
and ((isinstance(element[0], basestring) and element[0])
or element in (TRUE_LEAF, FALSE_LEAF))
# --------------------------------------------------
# SQL utils
# --------------------------------------------------
def select_from_where(cr, select_field, from_table, where_field, where_ids, where_operator):
# todo: merge into parent query as sub-query
res = []
if where_ids:
if where_operator in ['<', '>', '>=', '<=']:
cr.execute('SELECT "%s" FROM "%s" WHERE "%s" %s %%s' % \
(select_field, from_table, where_field, where_operator),
(where_ids[0],)) # TODO shouldn't this be min/max(where_ids) ?
res = [r[0] for r in cr.fetchall()]
else: # TODO where_operator is supposed to be 'in'? It is called with child_of...
for i in range(0, len(where_ids), cr.IN_MAX):
subids = where_ids[i:i + cr.IN_MAX]
cr.execute('SELECT "%s" FROM "%s" WHERE "%s" IN %%s' % \
(select_field, from_table, where_field), (tuple(subids),))
res.extend([r[0] for r in cr.fetchall()])
return res
def select_distinct_from_where_not_null(cr, select_field, from_table):
cr.execute('SELECT distinct("%s") FROM "%s" where "%s" is not null' % (select_field, from_table, select_field))
return [r[0] for r in cr.fetchall()]
# --------------------------------------------------
# ExtendedLeaf class for managing leafs and contexts
# -------------------------------------------------
class ExtendedLeaf(object):
""" Class wrapping a domain leaf, and giving some services and management
features on it. In particular it managed join contexts to be able to
construct queries through multiple models.
"""
# --------------------------------------------------
# Join / Context manipulation
# running examples:
# - res_users.name, like, foo: name is on res_partner, not on res_users
# - res_partner.bank_ids.name, like, foo: bank_ids is a one2many with _auto_join
# - res_partner.state_id.name, like, foo: state_id is a many2one with _auto_join
# A join:
# - link between src_table and dst_table, using src_field and dst_field
# i.e.: inherits: res_users.partner_id = res_partner.id
# i.e.: one2many: res_partner.id = res_partner_bank.partner_id
# i.e.: many2one: res_partner.state_id = res_country_state.id
# - done in the context of a field
# i.e.: inherits: 'partner_id'
# i.e.: one2many: 'bank_ids'
# i.e.: many2one: 'state_id'
# - table names use aliases: initial table followed by the context field
# names, joined using a '__'
# i.e.: inherits: res_partner as res_users__partner_id
# i.e.: one2many: res_partner_bank as res_partner__bank_ids
# i.e.: many2one: res_country_state as res_partner__state_id
# - join condition use aliases
# i.e.: inherits: res_users.partner_id = res_users__partner_id.id
# i.e.: one2many: res_partner.id = res_partner__bank_ids.parr_id
# i.e.: many2one: res_partner.state_id = res_partner__state_id.id
# Variables explanation:
# - src_table: working table before the join
# -> res_users, res_partner, res_partner
# - dst_table: working table after the join
# -> res_partner, res_partner_bank, res_country_state
# - src_table_link_name: field name used to link the src table, not
# necessarily a field (because 'id' is not a field instance)
# i.e.: inherits: 'partner_id', found in the inherits of the current table
# i.e.: one2many: 'id', not a field
# i.e.: many2one: 'state_id', the current field name
# - dst_table_link_name: field name used to link the dst table, not
# necessarily a field (because 'id' is not a field instance)
# i.e.: inherits: 'id', not a field
# i.e.: one2many: 'partner_id', _fields_id of the current field
# i.e.: many2one: 'id', not a field
# - context_field_name: field name used as a context to make the alias
# i.e.: inherits: 'partner_id': found in the inherits of the current table
# i.e.: one2many: 'bank_ids': current field name
# i.e.: many2one: 'state_id': current field name
# --------------------------------------------------
def __init__(self, leaf, model, join_context=None):
""" Initialize the ExtendedLeaf
:attr [string, tuple] leaf: operator or tuple-formatted domain
expression
:attr obj model: current working model
:attr list _models: list of chained models, updated when
adding joins
:attr list join_context: list of join contexts. This is a list of
tuples like ``(lhs, table, lhs_col, col, link)``
where
lhs
source (left hand) model
model
destination (right hand) model
lhs_col
source model column for join condition
col
destination model column for join condition
link
link column between source and destination model
that is not necessarily (but generally) a real column used
in the condition (i.e. in many2one); this link is used to
compute aliases
"""
assert model, 'Invalid leaf creation without table'
self.join_context = join_context or []
self.leaf = leaf
# normalize the leaf's operator
self.normalize_leaf()
# set working variables; handle the context stack and previous tables
self.model = model
self._models = []
for item in self.join_context:
self._models.append(item[0])
self._models.append(model)
# check validity
self.check_leaf()
def __str__(self):
return '<osv.ExtendedLeaf: %s on %s (ctx: %s)>' % (str(self.leaf), self.model._table, ','.join(self._get_context_debug()))
def generate_alias(self):
links = [(context[1]._table, context[4]) for context in self.join_context]
alias, alias_statement = generate_table_alias(self._models[0]._table, links)
return alias
def add_join_context(self, model, lhs_col, table_col, link):
""" See above comments for more details. A join context is a tuple like:
``(lhs, model, lhs_col, col, link)``
After adding the join, the model of the current leaf is updated.
"""
self.join_context.append((self.model, model, lhs_col, table_col, link))
self._models.append(model)
self.model = model
def get_join_conditions(self):
conditions = []
alias = self._models[0]._table
for context in self.join_context:
previous_alias = alias
alias += '__' + context[4]
conditions.append('"%s"."%s"="%s"."%s"' % (previous_alias, context[2], alias, context[3]))
return conditions
def get_tables(self):
tables = set()
links = []
for context in self.join_context:
links.append((context[1]._table, context[4]))
alias, alias_statement = generate_table_alias(self._models[0]._table, links)
tables.add(alias_statement)
return tables
def _get_context_debug(self):
names = ['"%s"."%s"="%s"."%s" (%s)' % (item[0]._table, item[2], item[1]._table, item[3], item[4]) for item in self.join_context]
return names
# --------------------------------------------------
# Leaf manipulation
# --------------------------------------------------
def check_leaf(self):
""" Leaf validity rules:
- a valid leaf is an operator or a leaf
- a valid leaf has a field objects unless
- it is not a tuple
- it is an inherited field
- left is id, operator is 'child_of'
- left is in MAGIC_COLUMNS
"""
if not is_operator(self.leaf) and not is_leaf(self.leaf, True):
raise ValueError("Invalid leaf %s" % str(self.leaf))
def is_operator(self):
return is_operator(self.leaf)
def is_true_leaf(self):
return self.leaf == TRUE_LEAF
def is_false_leaf(self):
return self.leaf == FALSE_LEAF
def is_leaf(self, internal=False):
return is_leaf(self.leaf, internal=internal)
def normalize_leaf(self):
self.leaf = normalize_leaf(self.leaf)
return True
def create_substitution_leaf(leaf, new_elements, new_model=None):
""" From a leaf, create a new leaf (based on the new_elements tuple
and new_model), that will have the same join context. Used to
insert equivalent leafs in the processing stack. """
if new_model is None:
new_model = leaf.model
new_join_context = [tuple(context) for context in leaf.join_context]
new_leaf = ExtendedLeaf(new_elements, new_model, join_context=new_join_context)
return new_leaf
class expression(object):
""" Parse a domain expression
Use a real polish notation
Leafs are still in a ('foo', '=', 'bar') format
For more info: http://christophe-simonis-at-tiny.blogspot.com/2008/08/new-new-domain-notation.html
"""
def __init__(self, cr, uid, exp, table, context):
""" Initialize expression object and automatically parse the expression
right after initialization.
:param exp: expression (using domain ('foo', '=', 'bar' format))
:param table: root model
:attr list result: list that will hold the result of the parsing
as a list of ExtendedLeaf
:attr list joins: list of join conditions, such as
(res_country_state."id" = res_partner."state_id")
:attr root_model: base model for the query
:attr list expression: the domain expression, that will be normalized
and prepared
"""
self.has_unaccent = openerp.modules.registry.RegistryManager.get(cr.dbname).has_unaccent
self.joins = []
self.root_model = table
# normalize and prepare the expression for parsing
self.expression = distribute_not(normalize_domain(exp))
# parse the domain expression
self.parse(cr, uid, context=context)
# ----------------------------------------
# Leafs management
# ----------------------------------------
def get_tables(self):
""" Returns the list of tables for SQL queries, like select from ... """
tables = []
for leaf in self.result:
for table in leaf.get_tables():
if table not in tables:
tables.append(table)
table_name = _quote(self.root_model._table)
if table_name not in tables:
tables.append(table_name)
return tables
# ----------------------------------------
# Parsing
# ----------------------------------------
def parse(self, cr, uid, context):
""" Transform the leaves of the expression
The principle is to pop elements from a leaf stack one at a time.
Each leaf is processed. The processing is a if/elif list of various
cases that appear in the leafs (many2one, function fields, ...).
Two things can happen as a processing result:
- the leaf has been modified and/or new leafs have to be introduced
in the expression; they are pushed into the leaf stack, to be
processed right after
- the leaf is added to the result
Some internal var explanation:
:var obj working_model: model object, model containing the field
(the name provided in the left operand)
:var list field_path: left operand seen as a path (foo.bar -> [foo, bar])
:var obj relational_model: relational model of a field (field._obj)
ex: res_partner.bank_ids -> res.partner.bank
"""
def to_ids(value, relational_model, context=None, limit=None):
""" Normalize a single id or name, or a list of those, into a list of ids
:param {int,long,basestring,list,tuple} value:
if int, long -> return [value]
if basestring, convert it into a list of basestrings, then
if list of basestring ->
perform a name_search on relational_model for each name
return the list of related ids
"""
names = []
if isinstance(value, basestring):
names = [value]
elif value and isinstance(value, (tuple, list)) and all(isinstance(item, basestring) for item in value):
names = value
elif isinstance(value, (int, long)):
return [value]
if names:
name_get_list = [name_get[0] for name in names for name_get in relational_model.name_search(cr, uid, name, [], 'ilike', context=context, limit=limit)]
return list(set(name_get_list))
return list(value)
def child_of_domain(left, ids, left_model, parent=None, prefix='', context=None):
""" Return a domain implementing the child_of operator for [(left,child_of,ids)],
either as a range using the parent_left/right tree lookup fields
(when available), or as an expanded [(left,in,child_ids)] """
if left_model._parent_store and (not left_model.pool._init):
# TODO: Improve where joins are implemented for many with '.', replace by:
# doms += ['&',(prefix+'.parent_left','<',o.parent_right),(prefix+'.parent_left','>=',o.parent_left)]
doms = []
for o in left_model.browse(cr, uid, ids, context=context):
if doms:
doms.insert(0, OR_OPERATOR)
doms += [AND_OPERATOR, ('parent_left', '<', o.parent_right), ('parent_left', '>=', o.parent_left)]
if prefix:
return [(left, 'in', left_model.search(cr, uid, doms, context=context))]
return doms
else:
def recursive_children(ids, model, parent_field):
if not ids:
return []
ids2 = model.search(cr, uid, [(parent_field, 'in', ids)], context=context)
return ids + recursive_children(ids2, model, parent_field)
return [(left, 'in', recursive_children(ids, left_model, parent or left_model._parent_name))]
def pop():
""" Pop a leaf to process. """
return self.stack.pop()
def push(leaf):
""" Push a leaf to be processed right after. """
self.stack.append(leaf)
def push_result(leaf):
""" Push a leaf to the results. This leaf has been fully processed
and validated. """
self.result.append(leaf)
self.result = []
self.stack = [ExtendedLeaf(leaf, self.root_model) for leaf in self.expression]
# process from right to left; expression is from left to right
self.stack.reverse()
while self.stack:
# Get the next leaf to process
leaf = pop()
# Get working variables
working_model = leaf.model
if leaf.is_operator():
left, operator, right = leaf.leaf, None, None
elif leaf.is_true_leaf() or leaf.is_false_leaf():
# because we consider left as a string
left, operator, right = ('%s' % leaf.leaf[0], leaf.leaf[1], leaf.leaf[2])
else:
left, operator, right = leaf.leaf
field_path = left.split('.', 1)
field = working_model._columns.get(field_path[0])
if field and field._obj:
relational_model = working_model.pool.get(field._obj)
else:
relational_model = None
# ----------------------------------------
# SIMPLE CASE
# 1. leaf is an operator
# 2. leaf is a true/false leaf
# -> add directly to result
# ----------------------------------------
if leaf.is_operator() or leaf.is_true_leaf() or leaf.is_false_leaf():
push_result(leaf)
# ----------------------------------------
# FIELD NOT FOUND
# -> from inherits'd fields -> work on the related model, and add
# a join condition
# -> ('id', 'child_of', '..') -> use a 'to_ids'
# -> but is one on the _log_access special fields, add directly to
# result
# TODO: make these fields explicitly available in self.columns instead!
# -> else: crash
# ----------------------------------------
elif not field and field_path[0] in working_model._inherit_fields:
# comments about inherits'd fields
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
next_model = working_model.pool.get(working_model._inherit_fields[field_path[0]][0])
leaf.add_join_context(next_model, working_model._inherits[next_model._name], 'id', working_model._inherits[next_model._name])
push(leaf)
elif left == 'id' and operator == 'child_of':
ids2 = to_ids(right, working_model, context)
dom = child_of_domain(left, ids2, working_model)
for dom_leaf in reversed(dom):
new_leaf = create_substitution_leaf(leaf, dom_leaf, working_model)
push(new_leaf)
elif not field and field_path[0] in MAGIC_COLUMNS:
push_result(leaf)
elif not field:
raise ValueError("Invalid field %r in leaf %r" % (left, str(leaf)))
# ----------------------------------------
# PATH SPOTTED
# -> many2one or one2many with _auto_join:
# - add a join, then jump into linked field: field.remaining on
# src_table is replaced by remaining on dst_table, and set for re-evaluation
# - if a domain is defined on the field, add it into evaluation
# on the relational table
# -> many2one, many2many, one2many: replace by an equivalent computed
# domain, given by recursively searching on the remaining of the path
# -> note: hack about fields.property should not be necessary anymore
# as after transforming the field, it will go through this loop once again
# ----------------------------------------
elif len(field_path) > 1 and field._type == 'many2one' and field._auto_join:
# res_partner.state_id = res_partner__state_id.id
leaf.add_join_context(relational_model, field_path[0], 'id', field_path[0])
push(create_substitution_leaf(leaf, (field_path[1], operator, right), relational_model))
elif len(field_path) > 1 and field._type == 'one2many' and field._auto_join:
# res_partner.id = res_partner__bank_ids.partner_id
leaf.add_join_context(relational_model, 'id', field._fields_id, field_path[0])
domain = field._domain(working_model) if callable(field._domain) else field._domain
push(create_substitution_leaf(leaf, (field_path[1], operator, right), relational_model))
if domain:
domain = normalize_domain(domain)
for elem in reversed(domain):
push(create_substitution_leaf(leaf, elem, relational_model))
push(create_substitution_leaf(leaf, AND_OPERATOR, relational_model))
elif len(field_path) > 1 and field._auto_join:
raise NotImplementedError('_auto_join attribute not supported on many2many field %s' % left)
elif len(field_path) > 1 and field._type == 'many2one':
right_ids = relational_model.search(cr, uid, [(field_path[1], operator, right)], context=context)
leaf.leaf = (field_path[0], 'in', right_ids)
push(leaf)
# Making search easier when there is a left operand as field.o2m or field.m2m
elif len(field_path) > 1 and field._type in ['many2many', 'one2many']:
right_ids = relational_model.search(cr, uid, [(field_path[1], operator, right)], context=context)
table_ids = working_model.search(cr, uid, [(field_path[0], 'in', right_ids)], context=dict(context, active_test=False))
leaf.leaf = ('id', 'in', table_ids)
push(leaf)
# -------------------------------------------------
# FUNCTION FIELD
# -> not stored: error if no _fnct_search, otherwise handle the result domain
# -> stored: management done in the remaining of parsing
# -------------------------------------------------
elif isinstance(field, fields.function) and not field.store and not field._fnct_search:
# this is a function field that is not stored
# the function field doesn't provide a search function and doesn't store
# values in the database, so we must ignore it : we generate a dummy leaf
leaf.leaf = TRUE_LEAF
_logger.error(
"The field '%s' (%s) can not be searched: non-stored "
"function field without fnct_search",
field.string, left)
# avoid compiling stack trace if not needed
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(''.join(traceback.format_stack()))
push(leaf)
elif isinstance(field, fields.function) and not field.store:
# this is a function field that is not stored
fct_domain = field.search(cr, uid, working_model, left, [leaf.leaf], context=context)
if not fct_domain:
leaf.leaf = TRUE_LEAF
push(leaf)
else:
# we assume that the expression is valid
# we create a dummy leaf for forcing the parsing of the resulting expression
for domain_element in reversed(fct_domain):
push(create_substitution_leaf(leaf, domain_element, working_model))
# self.push(create_substitution_leaf(leaf, TRUE_LEAF, working_model))
# self.push(create_substitution_leaf(leaf, AND_OPERATOR, working_model))
# -------------------------------------------------
# RELATIONAL FIELDS
# -------------------------------------------------
# Applying recursivity on field(one2many)
elif field._type == 'one2many' and operator == 'child_of':
ids2 = to_ids(right, relational_model, context)
if field._obj != working_model._name:
dom = child_of_domain(left, ids2, relational_model, prefix=field._obj)
else:
dom = child_of_domain('id', ids2, working_model, parent=left)
for dom_leaf in reversed(dom):
push(create_substitution_leaf(leaf, dom_leaf, working_model))
elif field._type == 'one2many':
call_null = True
if right is not False:
if isinstance(right, basestring):
ids2 = [x[0] for x in relational_model.name_search(cr, uid, right, [], operator, context=context, limit=None)]
if ids2:
operator = 'in'
else:
if not isinstance(right, list):
ids2 = [right]
else:
ids2 = right
if not ids2:
if operator in ['like', 'ilike', 'in', '=']:
#no result found with given search criteria
call_null = False
push(create_substitution_leaf(leaf, FALSE_LEAF, working_model))
else:
ids2 = select_from_where(cr, field._fields_id, relational_model._table, 'id', ids2, operator)
if ids2:
call_null = False
o2m_op = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
push(create_substitution_leaf(leaf, ('id', o2m_op, ids2), working_model))
if call_null:
o2m_op = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
push(create_substitution_leaf(leaf, ('id', o2m_op, select_distinct_from_where_not_null(cr, field._fields_id, relational_model._table)), working_model))
elif field._type == 'many2many':
rel_table, rel_id1, rel_id2 = field._sql_names(working_model)
#FIXME
if operator == 'child_of':
def _rec_convert(ids):
if relational_model == working_model:
return ids
return select_from_where(cr, rel_id1, rel_table, rel_id2, ids, operator)
ids2 = to_ids(right, relational_model, context)
dom = child_of_domain('id', ids2, relational_model)
ids2 = relational_model.search(cr, uid, dom, context=context)
push(create_substitution_leaf(leaf, ('id', 'in', _rec_convert(ids2)), working_model))
else:
call_null_m2m = True
if right is not False:
if isinstance(right, basestring):
res_ids = [x[0] for x in relational_model.name_search(cr, uid, right, [], operator, context=context)]
if res_ids:
operator = 'in'
else:
if not isinstance(right, list):
res_ids = [right]
else:
res_ids = right
if not res_ids:
if operator in ['like', 'ilike', 'in', '=']:
#no result found with given search criteria
call_null_m2m = False
push(create_substitution_leaf(leaf, FALSE_LEAF, working_model))
else:
operator = 'in' # operator changed because ids are directly related to main object
else:
call_null_m2m = False
m2m_op = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
push(create_substitution_leaf(leaf, ('id', m2m_op, select_from_where(cr, rel_id1, rel_table, rel_id2, res_ids, operator) or [0]), working_model))
if call_null_m2m:
m2m_op = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
push(create_substitution_leaf(leaf, ('id', m2m_op, select_distinct_from_where_not_null(cr, rel_id1, rel_table)), working_model))
elif field._type == 'many2one':
if operator == 'child_of':
ids2 = to_ids(right, relational_model, context)
if field._obj != working_model._name:
dom = child_of_domain(left, ids2, relational_model, prefix=field._obj)
else:
dom = child_of_domain('id', ids2, working_model, parent=left)
for dom_leaf in reversed(dom):
push(create_substitution_leaf(leaf, dom_leaf, working_model))
else:
def _get_expression(relational_model, cr, uid, left, right, operator, context=None):
if context is None:
context = {}
c = context.copy()
c['active_test'] = False
#Special treatment to ill-formed domains
operator = (operator in ['<', '>', '<=', '>=']) and 'in' or operator
dict_op = {'not in': '!=', 'in': '=', '=': 'in', '!=': 'not in'}
if isinstance(right, tuple):
right = list(right)
if (not isinstance(right, list)) and operator in ['not in', 'in']:
operator = dict_op[operator]
elif isinstance(right, list) and operator in ['!=', '=']: # for domain (FIELD,'=',['value1','value2'])
operator = dict_op[operator]
res_ids = [x[0] for x in relational_model.name_search(cr, uid, right, [], operator, limit=None, context=c)]
if operator in NEGATIVE_TERM_OPERATORS:
res_ids.append(False) # TODO this should not be appended if False was in 'right'
return left, 'in', res_ids
# resolve string-based m2o criterion into IDs
if isinstance(right, basestring) or \
right and isinstance(right, (tuple, list)) and all(isinstance(item, basestring) for item in right):
push(create_substitution_leaf(leaf, _get_expression(relational_model, cr, uid, left, right, operator, context=context), working_model))
else:
# right == [] or right == False and all other cases are handled by __leaf_to_sql()
push_result(leaf)
# -------------------------------------------------
# OTHER FIELDS
# -> datetime fields: manage time part of the datetime
# field when it is not there
# -> manage translatable fields
# -------------------------------------------------
else:
if field._type == 'datetime' and right and len(right) == 10:
if operator in ('>', '>='):
right += ' 00:00:00'
elif operator in ('<', '<='):
right += ' 23:59:59'
push(create_substitution_leaf(leaf, (left, operator, right), working_model))
elif field.translate:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
if need_wildcard:
right = '%%%s%%' % right
inselect_operator = 'inselect'
if sql_operator in NEGATIVE_TERM_OPERATORS:
# negate operator (fix lp:1071710)
sql_operator = sql_operator[4:] if sql_operator[:3] == 'not' else '='
inselect_operator = 'not inselect'
subselect = '( SELECT res_id' \
' FROM ir_translation' \
' WHERE name = %s' \
' AND lang = %s' \
' AND type = %s'
instr = ' %s'
#Covering in,not in operators with operands (%s,%s) ,etc.
if sql_operator == 'in':
instr = ','.join(['%s'] * len(right))
subselect += ' AND value ' + sql_operator + ' ' + " (" + instr + ")" \
') UNION (' \
' SELECT id' \
' FROM "' + working_model._table + '"' \
' WHERE "' + left + '" ' + sql_operator + ' ' + " (" + instr + "))"
else:
subselect += ' AND value ' + sql_operator + instr + \
') UNION (' \
' SELECT id' \
' FROM "' + working_model._table + '"' \
' WHERE "' + left + '" ' + sql_operator + instr + ")"
params = [working_model._name + ',' + left,
context.get('lang', False) or 'en_US',
'model',
right,
right,
]
push(create_substitution_leaf(leaf, ('id', inselect_operator, (subselect, params)), working_model))
else:
push_result(leaf)
# ----------------------------------------
# END OF PARSING FULL DOMAIN
# -> generate joins
# ----------------------------------------
joins = set()
for leaf in self.result:
joins |= set(leaf.get_join_conditions())
self.joins = list(joins)
def __leaf_to_sql(self, eleaf):
model = eleaf.model
leaf = eleaf.leaf
left, operator, right = leaf
# final sanity checks - should never fail
assert operator in (TERM_OPERATORS + ('inselect', 'not inselect')), \
"Invalid operator %r in domain term %r" % (operator, leaf)
assert leaf in (TRUE_LEAF, FALSE_LEAF) or left in model._all_columns \
or left in MAGIC_COLUMNS, "Invalid field %r in domain term %r" % (left, leaf)
table_alias = '"%s"' % (eleaf.generate_alias())
if leaf == TRUE_LEAF:
query = 'TRUE'
params = []
elif leaf == FALSE_LEAF:
query = 'FALSE'
params = []
elif operator == 'inselect':
query = '(%s."%s" in (%s))' % (table_alias, left, right[0])
params = right[1]
elif operator == 'not inselect':
query = '(%s."%s" not in (%s))' % (table_alias, left, right[0])
params = right[1]
elif operator in ['in', 'not in']:
# Two cases: right is a boolean or a list. The boolean case is an
# abuse and handled for backward compatibility.
if isinstance(right, bool):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % (leaf,))
if operator == 'in':
r = 'NOT NULL' if right else 'NULL'
else:
r = 'NULL' if right else 'NOT NULL'
query = '(%s."%s" IS %s)' % (table_alias, left, r)
params = []
elif isinstance(right, (list, tuple)):
params = list(right)
check_nulls = False
for i in range(len(params))[::-1]:
if params[i] == False:
check_nulls = True
del params[i]
if params:
if left == 'id':
instr = ','.join(['%s'] * len(params))
else:
instr = ','.join([model._columns[left]._symbol_set[0]] * len(params))
query = '(%s."%s" %s (%s))' % (table_alias, left, operator, instr)
else:
# The case for (left, 'in', []) or (left, 'not in', []).
query = 'FALSE' if operator == 'in' else 'TRUE'
if check_nulls and operator == 'in':
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif not check_nulls and operator == 'not in':
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif check_nulls and operator == 'not in':
query = '(%s AND %s."%s" IS NOT NULL)' % (query, table_alias, left) # needed only for TRUE.
else: # Must not happen
raise ValueError("Invalid domain term %r" % (leaf,))
elif right == False and (left in model._columns) and model._columns[left]._type == "boolean" and (operator == '='):
query = '(%s."%s" IS NULL or %s."%s" = false )' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '='):
query = '%s."%s" IS NULL ' % (table_alias, left)
params = []
elif right == False and (left in model._columns) and model._columns[left]._type == "boolean" and (operator == '!='):
query = '(%s."%s" IS NOT NULL and %s."%s" != false)' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '!='):
query = '%s."%s" IS NOT NULL' % (table_alias, left)
params = []
elif operator == '=?':
if right is False or right is None:
# '=?' is a short-circuit that makes the term TRUE if right is None or False
query = 'TRUE'
params = []
else:
# '=?' behaves like '=' in other cases
query, params = self.__leaf_to_sql(
create_substitution_leaf(eleaf, (left, '=', right), model))
elif left == 'id':
query = '%s.id %s %%s' % (table_alias, operator)
params = right
else:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
if left in model._columns:
format = need_wildcard and '%s' or model._columns[left]._symbol_set[0]
if self.has_unaccent and sql_operator in ('ilike', 'not ilike'):
query = '(unaccent(%s."%s") %s unaccent(%s))' % (table_alias, left, sql_operator, format)
else:
query = '(%s."%s" %s %s)' % (table_alias, left, sql_operator, format)
elif left in MAGIC_COLUMNS:
query = "(%s.\"%s\" %s %%s)" % (table_alias, left, sql_operator)
params = right
else: # Must not happen
raise ValueError("Invalid field %r in domain term %r" % (left, leaf))
add_null = False
if need_wildcard:
if isinstance(right, str):
str_utf8 = right
elif isinstance(right, unicode):
str_utf8 = right.encode('utf-8')
else:
str_utf8 = str(right)
params = '%%%s%%' % str_utf8
add_null = not str_utf8
elif left in model._columns:
params = model._columns[left]._symbol_set[1](right)
if add_null:
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
if isinstance(params, basestring):
params = [params]
return query, params
def to_sql(self):
stack = []
params = []
# Process the domain from right to left, using a stack, to generate a SQL expression.
self.result.reverse()
for leaf in self.result:
if leaf.is_leaf(internal=True):
q, p = self.__leaf_to_sql(leaf)
params.insert(0, p)
stack.append(q)
elif leaf.leaf == NOT_OPERATOR:
stack.append('(NOT (%s))' % (stack.pop(),))
else:
ops = {AND_OPERATOR: ' AND ', OR_OPERATOR: ' OR '}
q1 = stack.pop()
q2 = stack.pop()
stack.append('(%s %s %s)' % (q1, ops[leaf.leaf], q2,))
assert len(stack) == 1
query = stack[0]
joins = ' AND '.join(self.joins)
if joins:
query = '(%s) AND %s' % (joins, query)
return query, tools.flatten(params)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "59c907c5650bc0c06315720a14b2e3c7",
"timestamp": "",
"source": "github",
"line_count": 1214,
"max_line_length": 173,
"avg_line_length": 46.183690280065896,
"alnum_prop": 0.5344141830310165,
"repo_name": "chjw8016/GreenOdoo7-haibao",
"id": "b1717e91a5893b7c6ce362366e17cea5dee89ce7",
"size": "57048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openerp/osv/expression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "90846"
},
{
"name": "CSS",
"bytes": "384369"
},
{
"name": "JavaScript",
"bytes": "1730589"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9394626"
},
{
"name": "Shell",
"bytes": "5172"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
}
|
import datetime
from hashlib import sha1
import logging
from pymongo import MongoClient
from pymongo.mongo_replica_set_client import MongoReplicaSetClient
import pymongo.uri_parser
import pymongo.errors
from saml2.saml import NAMEID_FORMAT_PERSISTENT
from saml2.eptid import Eptid
from saml2.mdstore import InMemoryMetaData
from saml2.mdstore import metadata_modules
from saml2.mdstore import load_metadata_modules
from saml2.s_utils import PolicyError
from saml2.ident import code_binary
from saml2.ident import IdentDB
from saml2.ident import Unknown
from saml2.mdie import to_dict
from saml2.mdie import from_dict
import six
__author__ = 'rolandh'
logger = logging.getLogger(__name__)
ONTS = load_metadata_modules()
MMODS = metadata_modules()
class CorruptDatabase(Exception):
pass
def context_match(cfilter, cntx):
# TODO
return True
class SessionStorageMDB(object):
""" Session information is stored in a MongoDB database"""
def __init__(self, database="", collection="assertion", **kwargs):
db = _mdb_get_database(database, **kwargs)
self.assertion = db[collection]
def store_assertion(self, assertion, to_sign):
name_id = assertion.subject.name_id
nkey = sha1(code_binary(name_id)).hexdigest()
doc = {
"name_id_key": nkey,
"assertion_id": assertion.id,
"assertion": to_dict(assertion, MMODS, True),
"to_sign": to_sign,
}
_ = self.assertion.insert_one(doc)
def get_assertion(self, cid):
res = []
for item in self.assertion.find({"assertion_id": cid}):
res.append({"assertion": from_dict(item["assertion"], ONTS, True),
"to_sign": item["to_sign"]})
if len(res) == 1:
return res[0]
elif res is []:
return None
else:
raise SystemError("More then one assertion with the same ID")
def get_assertions_by_subject(self, name_id=None, session_index=None,
requested_context=None):
"""
:param name_id: One of name_id or key can be used to get the authn
statement
:param session_index: If match against a session index should be done
:param requested_context: Authn statements should match a specific
authn context
:return:
"""
result = []
key = sha1(code_binary(name_id)).hexdigest()
for item in self.assertion.find({"name_id_key": key}):
assertion = from_dict(item["assertion"], ONTS, True)
if session_index or requested_context:
for statement in assertion.authn_statement:
if session_index:
if statement.session_index == session_index:
result.append(assertion)
break
if requested_context:
if context_match(requested_context,
statement.authn_context):
result.append(assertion)
break
else:
result.append(assertion)
return result
def remove_authn_statements(self, name_id):
logger.debug("remove authn about: %s", name_id)
key = sha1(code_binary(name_id)).hexdigest()
for item in self.assertion.find({"name_id_key": key}):
self.assertion.remove(item["_id"])
def get_authn_statements(self, name_id, session_index=None,
requested_context=None):
"""
:param name_id:
:param session_index:
:param requested_context:
:return:
"""
return [k.authn_statement for k in self.get_assertions_by_subject(
name_id, session_index, requested_context)]
class IdentMDB(IdentDB):
def __init__(self, database="", collection="ident", domain="",
name_qualifier=""):
IdentDB.__init__(self, None, domain, name_qualifier)
self.mdb = MDB(database=database, collection=collection)
self.mdb.primary_key = "user_id"
def in_store(self, _id):
if [x for x in self.mdb.get(ident_id=_id)]:
return True
else:
return False
def create_id(self, nformat, name_qualifier="", sp_name_qualifier=""):
_id = self._create_id(nformat, name_qualifier, sp_name_qualifier)
while self.in_store(_id):
_id = self._create_id(nformat, name_qualifier, sp_name_qualifier)
return _id
def store(self, ident, name_id):
self.mdb.store(ident, name_id=to_dict(name_id, MMODS, True))
def find_nameid(self, userid, nformat=None, sp_name_qualifier=None,
name_qualifier=None, sp_provided_id=None, **kwargs):
# reset passed for compatibility kwargs for next usage
kwargs = {}
if nformat:
kwargs["name_format"] = nformat
if sp_name_qualifier:
kwargs["sp_name_qualifier"] = sp_name_qualifier
if name_qualifier:
kwargs["name_qualifier"] = name_qualifier
if sp_provided_id:
kwargs["sp_provided_id"] = sp_provided_id
res = []
for item in self.mdb.get(userid, **kwargs):
res.append(from_dict(item["name_id"], ONTS, True))
return res
def find_local_id(self, name_id):
cnid = to_dict(name_id, MMODS, True)
for item in self.mdb.get(name_id=cnid):
return item[self.mdb.primary_key]
return None
def match_local_id(self, userid, sp_name_qualifier, name_qualifier):
"""
Match a local persistent identifier.
Look for an existing persistent NameID matching userid,
sp_name_qualifier and name_qualifier.
"""
filter = {
"name_id.sp_name_qualifier": sp_name_qualifier,
"name_id.name_qualifier": name_qualifier,
"name_id.format": NAMEID_FORMAT_PERSISTENT,
}
res = self.mdb.get(value=userid, **filter)
if not res:
return None
return from_dict(res[0]["name_id"], ONTS, True)
def remove_remote(self, name_id):
cnid = to_dict(name_id, MMODS, True)
self.mdb.remove(name_id=cnid)
def handle_name_id_mapping_request(self, name_id, name_id_policy):
_id = self.find_local_id(name_id)
if not _id:
raise Unknown("Unknown entity")
if name_id_policy.allow_create == "false":
raise PolicyError("Not allowed to create new identifier")
# else create and return a new one
return self.construct_nameid(_id, name_id_policy=name_id_policy)
class MDB(object):
primary_key = "mdb"
def __init__(self, database, collection, **kwargs):
_db = _mdb_get_database(database, **kwargs)
self.db = _db[collection]
def store(self, value, **kwargs):
if value:
doc = {self.primary_key: value}
else:
doc = {}
doc.update(kwargs)
# Add timestamp to all documents to allow external garbage collecting
if "created_at" not in doc:
doc["created_at"] = datetime.datetime.utcnow()
_ = self.db.insert_one(doc)
def get(self, value=None, **kwargs):
if value is not None:
doc = {self.primary_key: value}
doc.update(kwargs)
return [item for item in self.db.find(doc)]
elif kwargs:
return [item for item in self.db.find(kwargs)]
def remove(self, key=None, **kwargs):
if key is None:
if kwargs:
for item in self.db.find(kwargs):
self.db.remove(item["_id"])
else:
doc = {self.primary_key: key}
doc.update(kwargs)
for item in self.db.find(doc):
self.db.remove(item["_id"])
def keys(self):
for item in self.db.find():
yield item[self.primary_key]
def items(self):
for item in self.db.find():
_key = item[self.primary_key]
del item[self.primary_key]
del item["_id"]
yield _key, item
def __contains__(self, key):
doc = {self.primary_key: key}
res = [item for item in self.db.find(doc)]
if not res:
return False
else:
return True
def reset(self):
self.db.drop()
def _mdb_get_database(uri, **kwargs):
"""
Helper-function to connect to MongoDB and return a database object.
The `uri' argument should be either a full MongoDB connection URI string,
or just a database name in which case a connection to the default mongo
instance at mongodb://localhost:27017 will be made.
Performs explicit authentication if a username is provided in a connection
string URI, since PyMongo does not always seem to do that as promised.
:params database: name as string or (uri, name)
:returns: pymongo database object
"""
if not "tz_aware" in kwargs:
# default, but not forced
kwargs["tz_aware"] = True
connection_factory = MongoClient
_parsed_uri = {}
try:
_parsed_uri = pymongo.uri_parser.parse_uri(uri)
except pymongo.errors.InvalidURI:
# assume URI to be just the database name
db_name = uri
_conn = MongoClient()
pass
else:
if "replicaset" in _parsed_uri["options"]:
connection_factory = MongoReplicaSetClient
db_name = _parsed_uri.get("database", "pysaml2")
_conn = connection_factory(uri, **kwargs)
_db = _conn[db_name]
if "username" in _parsed_uri:
_db.authenticate(
_parsed_uri.get("username", None),
_parsed_uri.get("password", None)
)
return _db
#------------------------------------------------------------------------------
class EptidMDB(Eptid):
def __init__(self, secret, database="", collection="eptid"):
Eptid.__init__(self, secret)
self.mdb = MDB(database, collection)
self.mdb.primary_key = "eptid_key"
def __getitem__(self, key):
res = self.mdb.get(key)
if not res:
raise KeyError(key)
elif len(res) == 1:
return res[0]["eptid"]
else:
raise CorruptDatabase("Found more than one EPTID document")
def __setitem__(self, key, value):
_ = self.mdb.store(key, **{"eptid": value})
#------------------------------------------------------------------------------
def protect(dic):
res = {}
for key, val in dic.items():
key = key.replace(".", "__")
if isinstance(val, six.string_types):
pass
elif isinstance(val, dict):
val = protect(val)
elif isinstance(val, list):
li = []
for va in val:
if isinstance(va, six.string_types):
pass
elif isinstance(va, dict):
va = protect(va)
# I don't think lists of lists will appear am I wrong ?
li.append(va)
val = li
res[key] = val
return res
def unprotect(dic):
res = {}
for key, val in dic.items():
if key == "__class__":
pass
else:
key = key.replace("__", ".")
if isinstance(val, six.string_types):
pass
elif isinstance(val, dict):
val = unprotect(val)
elif isinstance(val, list):
li = []
for va in val:
if isinstance(va, six.string_types):
pass
elif isinstance(val, dict):
va = unprotect(va)
li.append(va)
val = li
res[key] = val
return res
def export_mdstore_to_mongo_db(mds, database, collection, sub_collection=""):
mdb = MDB(database, collection, sub_collection=sub_collection)
mdb.reset()
mdb.primary_key = "entity_id"
for key, desc in mds.items():
kwargs = {
"entity_description": protect(desc),
}
mdb.store(key, **kwargs)
class MetadataMDB(InMemoryMetaData):
def __init__(self, attrc, database="", collection=""):
super(MetadataMDB, self).__init__(attrc)
self.mdb = MDB(database, collection)
self.mdb.primary_key = "entity_id"
def _ext_service(self, entity_id, typ, service, binding):
try:
srvs = self[entity_id][typ]
except KeyError:
return None
if not srvs:
return srvs
res = []
for srv in srvs:
if "extensions" in srv:
for elem in srv["extensions"]["extension_elements"]:
if elem["__class__"] == service:
if elem["binding"] == binding:
res.append(elem)
return res
def load(self):
pass
def items(self):
for key, item in self.mdb.items():
yield key, unprotect(item["entity_description"])
def keys(self):
return self.mdb.keys()
def values(self):
for key, item in self.mdb.items():
yield unprotect(item["entity_description"])
def __contains__(self, item):
return item in self.mdb
def __getitem__(self, item):
res = self.mdb.get(item)
if not res:
raise KeyError(item)
elif len(res) == 1:
return unprotect(res[0]["entity_description"])
else:
raise CorruptDatabase("More then one document with key %s" % item)
def bindings(self, entity_id, typ, service):
pass
|
{
"content_hash": "a2345ce3ab998e6574b6e3c073b7feaf",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 79,
"avg_line_length": 31.463470319634702,
"alnum_prop": 0.5564182570205355,
"repo_name": "cloudera/hue",
"id": "0cc33619072a61d656a43ccac5cff89ccb15691f",
"size": "13781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py3/pysaml2-5.0.0/src/saml2/mongo_store.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
import sys, imp, os
try:
import electrum
except:
sys.path.append(os.environ['HOME'] + '/sourcecode/electrum')
imp.load_module('electrum', *imp.find_module('lib'))
import electrum
script_dir = os.path.dirname(os.path.realpath(__file__))
class ElectrumClient:
def __init__(self, server = None, proxy = None):
options={}
# options['electrum_path'] = os.environ['HOME'] + '/.electrum'
options['electrum_path'] = script_dir
self.conf = electrum.SimpleConfig(options)
if None != server:
self.conf.set_key('server', server, False)
if None != proxy:
self.conf.set_key('proxy', proxy, False)
if None != server and None != proxy:
self.conf.set_key('auto_cycle', False, False)
else:
self.conf.set_key('auto_cycle', True, False)
print 'server: ', self.conf.get('server')
print 'proxy: ', self.conf.get('proxy')
# self.sock = electrum.daemon.get_daemon(self.conf, True)
# self.netw = electrum.NetworkProxy(self.sock, self.conf)
self.netw = electrum.Network(self.conf)
self.netw.start()
def get_history(self, addr):
hist = self.netw.synchronous_get(('blockchain.address.get_history', [addr]))
return hist
def get_transaction_details(self, tx_hash):
raw = self.netw.synchronous_get(('blockchain.transaction.get', [tx_hash]))
return electrum.Transaction.deserialize(raw)
def get_balance(self, address):
hist = self.get_history(address)
if hist == ['*']: return 0
bal = 0
received_coins = [] # list of coins received at address
for elem in hist:
tx = self.get_transaction_details(elem['tx_hash'])
if not tx: continue
for i, (addr, value) in enumerate(tx.get_outputs()):
if addr == address:
key = elem['tx_hash'] + ':%d'%i
received_coins.append(key)
for elem in hist:
tx = self.get_transaction_details(elem['tx_hash'])
if not tx: continue
for i, (addr, value) in enumerate(tx.get_outputs()):
key = elem['tx_hash'] + ':%d'%i
if addr == address:
bal += value
return bal
# test code
if __name__ == "__main__":
cli = ElectrumClient('ulrichard.ch:50002:s', '')
# cli = ElectrumClient()
addr = '1JuArrY4wpG9v6bgDbuugPPvbZTTn5Vxou'
hist = cli.get_history(addr)
print 'transaction history for: ', addr
print 'transaction count: ', len(hist)
# electrum.print_json(hist)
for elem in hist:
tx = cli.get_transaction_details(elem['tx_hash'])
print 'inputs count: ', len(tx.inputs), ' outputs count: ', len(tx.outputs)
print 'balance: ', cli.get_balance(addr) / 100000000.0
|
{
"content_hash": "fee1e6b806e820c2197f9f437b703aa6",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 84,
"avg_line_length": 33.811764705882354,
"alnum_prop": 0.5768963117606124,
"repo_name": "ulrichard/bitcoinutilities",
"id": "4a3e42b4fbb9c6fed3d340d7f4280f447b607d64",
"size": "2897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum_get_history.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "30762"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
}
|
arduino = Runtime.createAndStart("Arduino","Arduino")
arduino.connect("/dev/ttyACM0")
leftneckPin = 14
rightneckPin = 15
# Function to keep the servo movements in sync
# If both servos should rotate in the same direction, change from "- delta" to "+ delta"
def neckMoveTo(restPos,delta):
leftneckServo.moveTo(restPos + delta)
rightneckServo.moveTo(restPos - delta)
#
leftneckServo = Runtime.createAndStart("leftNeck","Servo")
rightneckServo = Runtime.createAndStart("rightNeck","Servo")
leftneckServo.attach(arduino,leftneckPin)
rightneckServo.attach(arduino,rightneckPin)
restPos = 90
delta = 0
neckMoveTo(restPos,delta)
sleep(1)
delta = 45
neckMoveTo(restPos,delta)
sleep(2)
delta = -45
neckMoveTo(restPos,delta)
sleep(2)
delta = 0
neckMoveTo(restPos,delta)
sleep(2)
def neckMoveTo(restPos,delta):
leftneckServo.moveTo(restPos + delta)
rightneckServo.moveTo(restPos - delta)
|
{
"content_hash": "433ee095612b2dc25c7b4f91c598c5d8",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 29.5,
"alnum_prop": 0.7785310734463277,
"repo_name": "MyRobotLab/pyrobotlab",
"id": "8926d3e73538edddfb3d85df1780116ccda5d266",
"size": "905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/pedrosenarego/scripts/BobNeck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1827"
},
{
"name": "C",
"bytes": "126258"
},
{
"name": "C++",
"bytes": "373018"
},
{
"name": "Java",
"bytes": "156911"
},
{
"name": "Processing",
"bytes": "17022"
},
{
"name": "Python",
"bytes": "3309101"
},
{
"name": "Shell",
"bytes": "4635"
},
{
"name": "VBA",
"bytes": "11115"
}
],
"symlink_target": ""
}
|
from pyoembed.providers import BaseProvider
class GeographOrgUkProvider(BaseProvider):
priority = 10
oembed_schemas = ['http://*.geograph.org.uk/*',
'http://*.geograph.co.uk/*',
'http://*.geograph.ie/*',
# not tested
'http://*.wikimedia.org/*_geograph.org.uk_*']
oembed_endpoint = 'http://api.geograph.org.uk/api/oembed'
|
{
"content_hash": "911ca9ec8cfc5d2e1ed0e10eb8a3ec4b",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 67,
"avg_line_length": 32.69230769230769,
"alnum_prop": 0.5364705882352941,
"repo_name": "rafaelmartins/pyoembed",
"id": "1319c0bfaf4d6366afb2987e3a34d0a4b0a67d30",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyoembed/providers/geographorguk.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "778"
},
{
"name": "Python",
"bytes": "39939"
}
],
"symlink_target": ""
}
|
"""
Code PyQt4
In this example, we create a simple
window in PyQt4.
"""
import sys, os
from PyQt4 import QtGui, QtCore
class Button(QtGui.QToolButton):
def __init__(self, text, parent=None):
super(Button, self).__init__(parent)
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Preferred)
self.setText(text)
def sizeHint(self):
size = super(Button, self).sizeHint()
size.setHeight(size.height() + 20)
size.setWidth(max(size.width(), size.height()))
return size
class Calculator(QtGui.QDialog):
NumDigitButtons = 10
def __init__(self, parent=None):
super(Calculator, self).__init__(parent)
self.pendingAdditiveOperator = ''
self.pendingMultiplicativeOperator = ''
self.sumInMemory = 0.0
self.sumSoFar = 0.0
self.factorSoFar = 0.0
self.waitingForOperand = True
self.display = QtGui.QLineEdit('0')
self.display.setReadOnly(True)
self.display.setAlignment(QtCore.Qt.AlignRight)
self.display.setMaxLength(15)
font = self.display.font()
font.setPointSize(font.pointSize() + 8)
self.display.setFont(font)
self.digitButtons = []
for i in range(Calculator.NumDigitButtons):
self.digitButtons.append(self.createButton(str(i),
self.digitClicked))
self.divisionButton = self.createButton("\367",
self.multiplicativeOperatorClicked)
self.multiplicatButton = self.createButton("\327",
self.multiplicativeOperatorClicked)
self.minusButton = self.createButton("-", self.additiveOperatorClicked)
self.plusButton = self.createButton("+", self.additiveOperatorClicked)
mainLayout = QtGui.QGridLayout()
mainLayout.setSizeConstraint(QtGui.QLayout.SetFixedSize)
mainLayout.addWidget(self.display, 0, 0, 1, 6)
for i in range(1, Calculator.NumDigitButtons):
row = ((9 - i) / 3) + 2
column = ((i - 1) % 3) + 1
mainLayout.addWidget(self.digitButtons[i], row, column)
mainLayout.addWidget(self.divisionButton, 2, 4)
mainLayout.addWidget(self.multiplicatButton, 3, 4)
mainLayout.addWidget(self.minusButton, 4, 4)
mainLayout.addWidget(self.plusButton, 5, 4)
mainLayout.addWidget(self.digitButtons[0], 5, 1)
self.setLayout(mainLayout)
self.setWindowTitle("Calculator")
def createButton(self, text, member):
button = Button(text)
button.clicked.connect(member)
return button
def digitClicked(self):
clickedButton = self.sender()
digitValue = int(clickedButton.text())
if self.display.text() == '0' and digitValue == 0.0:
return
if self.waitingForOperand:
self.display.clear()
self.waitingForOperand = False
self.display.setText(self.display.text() + str(digitValue))
def multiplicativeOperatorClicked(self):
pass
def additiveOperatorClicked(self):
pass
def main():
app = QtGui.QApplication(sys.argv)
calc = Calculator()
sys.exit(calc.exec_())
if __name__ == '__main__':
main()
|
{
"content_hash": "021ddef37e7425db7dab94f47fceaf0f",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 28.652173913043477,
"alnum_prop": 0.6215477996965099,
"repo_name": "janusnic/21v-python",
"id": "4f21906ef705dad2b9c78c86768f65ddd0ffbce3",
"size": "3338",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "unit_10/17.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "990972"
},
{
"name": "SQLPL",
"bytes": "147"
}
],
"symlink_target": ""
}
|
"""
/shell.py will allow you to get a console and enter commands within your flask environment.
"""
import os
import sys
import readline
from pprint import pprint
from flask import *
sys.path.insert(0, '/home/lucas/www/reddit.lucasou.com/reddit-env/flask_reddit')
from flask_reddit import *
from flask_reddit.users.models import *
from flask_reddit.threads.models import *
from flask_reddit.subreddits.models import *
from flask_reddit.threads.models import thread_upvotes, comment_upvotes
os.environ['PYTHONINSPECT'] = 'True'
|
{
"content_hash": "0986a9ab2ff53b533ea04a0dd2ea193a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 91,
"avg_line_length": 29.444444444444443,
"alnum_prop": 0.7830188679245284,
"repo_name": "codelucas/flask_reddit",
"id": "61e36a1634516564bb3d1cadd2941633d709a629",
"size": "555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/shell.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5745"
},
{
"name": "HTML",
"bytes": "25867"
},
{
"name": "Python",
"bytes": "45704"
},
{
"name": "Shell",
"bytes": "637"
}
],
"symlink_target": ""
}
|
import logging
import os
import uuid
from keystoneauth1.identity import generic
from keystoneauth1 import session as keystone_session
from designateclient.v2 import client
logging.basicConfig(level='DEBUG')
auth = generic.Password(
auth_url=os.environ.get('OS_AUTH_URL'),
username=os.environ.get('OS_USERNAME'),
password=os.environ.get('OS_PASSWORD'),
project_name=os.environ.get('OS_PROJECT_NAME'),
project_domain_id='default',
user_domain_id='default')
session = keystone_session.Session(auth=auth)
client = client.Client(session=session)
# Primary Zone
primary = client.zones.create(
'primary-%s.io.' % str(uuid.uuid4()),
'PRIMARY',
'root@x.com')
# Secondary Zone
slave = client.zones.create(
'secondary-%s.io.' % str(uuid.uuid4()),
'SECONDARY',
masters=["127.0.1.1"])
# Try updating Masters for the Secondary
new_slave = client.zones.update(
slave['id'],
{"masters": ["10.0.0.1", "10.0.0.10"]}
)
# List all Zones
zones = client.zones.list()
|
{
"content_hash": "47114b223e3b60573b6b2e1d35fb9355",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 53,
"avg_line_length": 23,
"alnum_prop": 0.6897233201581028,
"repo_name": "openstack/python-designateclient",
"id": "febbe9068495097f7413538a958998566d9a45e2",
"size": "1012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/examples/zone_create_secondary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "238282"
}
],
"symlink_target": ""
}
|
"""Tests for the Cast config flow."""
from unittest.mock import ANY, patch
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import cast
from tests.common import MockConfigEntry
async def test_creating_entry_sets_up_media_player(hass):
"""Test setting up Cast loads the media player."""
with patch(
"homeassistant.components.cast.media_player.async_setup_entry",
return_value=True,
) as mock_setup, patch(
"pychromecast.discovery.discover_chromecasts", return_value=(True, None)
), patch(
"pychromecast.discovery.stop_discovery"
):
result = await hass.config_entries.flow.async_init(
cast.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.FlowResultType.FORM
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
@pytest.mark.parametrize(
"source",
[
config_entries.SOURCE_IMPORT,
config_entries.SOURCE_USER,
config_entries.SOURCE_ZEROCONF,
],
)
async def test_single_instance(hass, source):
"""Test we only allow a single config flow."""
MockConfigEntry(domain="cast").add_to_hass(hass)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
"cast", context={"source": source}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
async def test_user_setup(hass):
"""Test we can finish a config flow."""
result = await hass.config_entries.flow.async_init(
"cast", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
users = await hass.auth.async_get_users()
assert len(users) == 1
assert result["type"] == "create_entry"
assert result["result"].data == {
"ignore_cec": [],
"known_hosts": [],
"uuid": [],
"user_id": users[0].id, # Home Assistant cast user
}
async def test_user_setup_options(hass):
"""Test we can finish a config flow."""
result = await hass.config_entries.flow.async_init(
"cast", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"known_hosts": "192.168.0.1, , 192.168.0.2 "}
)
users = await hass.auth.async_get_users()
assert len(users) == 1
assert result["type"] == "create_entry"
assert result["result"].data == {
"ignore_cec": [],
"known_hosts": ["192.168.0.1", "192.168.0.2"],
"uuid": [],
"user_id": users[0].id, # Home Assistant cast user
}
async def test_zeroconf_setup(hass):
"""Test we can finish a config flow through zeroconf."""
result = await hass.config_entries.flow.async_init(
"cast", context={"source": config_entries.SOURCE_ZEROCONF}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
users = await hass.auth.async_get_users()
assert len(users) == 1
assert result["type"] == "create_entry"
assert result["result"].data == {
"ignore_cec": [],
"known_hosts": [],
"uuid": [],
"user_id": users[0].id, # Home Assistant cast user
}
async def test_zeroconf_setup_onboarding(hass):
"""Test we automatically finish a config flow through zeroconf during onboarding."""
with patch(
"homeassistant.components.onboarding.async_is_onboarded", return_value=False
):
result = await hass.config_entries.flow.async_init(
"cast", context={"source": config_entries.SOURCE_ZEROCONF}
)
users = await hass.auth.async_get_users()
assert len(users) == 1
assert result["type"] == "create_entry"
assert result["result"].data == {
"ignore_cec": [],
"known_hosts": [],
"uuid": [],
"user_id": users[0].id, # Home Assistant cast user
}
def get_suggested(schema, key):
"""Get suggested value for key in voluptuous schema."""
for k in schema.keys():
if k == key:
if k.description is None or "suggested_value" not in k.description:
return None
return k.description["suggested_value"]
@pytest.mark.parametrize(
"parameter_data",
[
(
"known_hosts",
["192.168.0.10", "192.168.0.11"],
"192.168.0.10,192.168.0.11",
"192.168.0.1, , 192.168.0.2 ",
["192.168.0.1", "192.168.0.2"],
),
(
"uuid",
["bla", "blu"],
"bla,blu",
"foo, , bar ",
["foo", "bar"],
),
(
"ignore_cec",
["cast1", "cast2"],
"cast1,cast2",
"other_cast, , some_cast ",
["other_cast", "some_cast"],
),
],
)
async def test_option_flow(hass, parameter_data):
"""Test config flow options."""
basic_parameters = ["known_hosts"]
advanced_parameters = ["ignore_cec", "uuid"]
parameter, initial, suggested, user_input, updated = parameter_data
data = {
"ignore_cec": [],
"known_hosts": [],
"uuid": [],
}
data[parameter] = initial
config_entry = MockConfigEntry(domain="cast", data=data)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
# Test ignore_cec and uuid options are hidden if advanced options are disabled
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["step_id"] == "basic_options"
data_schema = result["data_schema"].schema
assert set(data_schema) == {"known_hosts"}
orig_data = dict(config_entry.data)
# Reconfigure known_hosts
context = {"source": config_entries.SOURCE_USER, "show_advanced_options": True}
result = await hass.config_entries.options.async_init(
config_entry.entry_id, context=context
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["step_id"] == "basic_options"
data_schema = result["data_schema"].schema
for other_param in basic_parameters:
if other_param == parameter:
continue
assert get_suggested(data_schema, other_param) == ""
if parameter in basic_parameters:
assert get_suggested(data_schema, parameter) == suggested
user_input_dict = {}
if parameter in basic_parameters:
user_input_dict[parameter] = user_input
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=user_input_dict,
)
assert result["type"] == data_entry_flow.FlowResultType.FORM
assert result["step_id"] == "advanced_options"
for other_param in basic_parameters:
if other_param == parameter:
continue
assert config_entry.data[other_param] == []
# No update yet
assert config_entry.data[parameter] == initial
# Reconfigure ignore_cec, uuid
data_schema = result["data_schema"].schema
for other_param in advanced_parameters:
if other_param == parameter:
continue
assert get_suggested(data_schema, other_param) == ""
if parameter in advanced_parameters:
assert get_suggested(data_schema, parameter) == suggested
user_input_dict = {}
if parameter in advanced_parameters:
user_input_dict[parameter] = user_input
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input=user_input_dict,
)
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
assert result["data"] is None
for other_param in advanced_parameters:
if other_param == parameter:
continue
assert config_entry.data[other_param] == []
assert config_entry.data[parameter] == updated
# Clear known_hosts
result = await hass.config_entries.options.async_init(config_entry.entry_id)
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"known_hosts": ""},
)
assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY
assert result["data"] is None
expected_data = {**orig_data, "known_hosts": []}
if parameter in advanced_parameters:
expected_data[parameter] = updated
assert dict(config_entry.data) == expected_data
async def test_known_hosts(hass, castbrowser_mock):
"""Test known hosts is passed to pychromecasts."""
result = await hass.config_entries.flow.async_init(
"cast", context={"source": config_entries.SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"known_hosts": "192.168.0.1, 192.168.0.2"}
)
assert result["type"] == "create_entry"
await hass.async_block_till_done()
config_entry = hass.config_entries.async_entries("cast")[0]
assert castbrowser_mock.return_value.start_discovery.call_count == 1
castbrowser_mock.assert_called_once_with(ANY, ANY, ["192.168.0.1", "192.168.0.2"])
castbrowser_mock.reset_mock()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"known_hosts": "192.168.0.11, 192.168.0.12"},
)
await hass.async_block_till_done()
castbrowser_mock.return_value.start_discovery.assert_not_called()
castbrowser_mock.assert_not_called()
castbrowser_mock.return_value.host_browser.update_hosts.assert_called_once_with(
["192.168.0.11", "192.168.0.12"]
)
|
{
"content_hash": "64f13921393193c3a2e28dd50f7f8df4",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 88,
"avg_line_length": 34.46621621621622,
"alnum_prop": 0.6237012350519506,
"repo_name": "w1ll1am23/home-assistant",
"id": "97218a396dd3cfc3b2816c62b9ed4fb64d1c7c24",
"size": "10202",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/cast/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from csv import DictReader
from math import exp, log, sqrt
# parameters #################################################################
train = 'train.csv' # path to training file
test = 'test.csv' # path to testing file
D = 2 ** 20 # number of weights use for learning
alpha = .1 # learning rate for sgd optimization
# function definitions #######################################################
# A. Bounded logloss
# INPUT:
# p: our prediction
# y: real answer
# OUTPUT
# logarithmic loss of p given y
def logloss(p, y):
p = max(min(p, 1. - 10e-12), 10e-12)
return -log(p) if y == 1. else -log(1. - p)
# B. Apply hash trick of the original csv row
# for simplicity, we treat both integer and categorical features as categorical
# INPUT:
# csv_row: a csv dictionary, ex: {'Lable': '1', 'I1': '357', 'I2': '', ...}
# D: the max index that we can hash to
# OUTPUT:
# x: a list of indices that its value is 1
def get_x(csv_row, D):
x = [0] # 0 is the index of the bias term
for key, value in csv_row.items():
index = int(value + key[1:], 16) % D # weakest hash ever ;)
x.append(index)
return x # x contains indices of features that have a value of 1
# C. Get probability estimation on x
# INPUT:
# x: features
# w: weights
# OUTPUT:
# probability of p(y = 1 | x; w)
def get_p(x, w):
wTx = 0.
for i in x: # do wTx
wTx += w[i] * 1. # w[i] * x[i], but if i in x we got x[i] = 1.
return 1. / (1. + exp(-max(min(wTx, 20.), -20.))) # bounded sigmoid
# D. Update given model
# INPUT:
# w: weights
# n: a counter that counts the number of times we encounter a feature
# this is used for adaptive learning rate
# x: feature
# p: prediction of our model
# y: answer
# OUTPUT:
# w: updated model
# n: updated count
def update_w(w, n, x, p, y):
for i in x:
# alpha / (sqrt(n) + 1) is the adaptive learning rate heuristic
# (p - y) * x[i] is the current gradient
# note that in our case, if i in x then x[i] = 1
w[i] -= (p - y) * alpha / (sqrt(n[i]) + 1.)
n[i] += 1.
return w, n
# training and testing #######################################################
# initialize our model
w = [0.] * D # weights
n = [0.] * D # number of times we've encountered a feature
# start training a logistic regression model using on pass sgd
loss = 0.
for t, row in enumerate(DictReader(open(train))):
y = 1. if row['Label'] == '1' else 0.
del row['Label'] # can't let the model peek the answer
del row['Id'] # we don't need the Id
# main training procedure
# step 1, get the hashed features
x = get_x(row, D)
# step 2, get prediction
p = get_p(x, w)
# for progress validation, useless for learning our model
loss += logloss(p, y)
if t % 1000000 == 0 and t > 1:
print('%s\tencountered: %d\tcurrent logloss: %f' % (
datetime.now(), t, loss/t))
# step 3, update model with answer
w, n = update_w(w, n, x, p, y)
# testing (build kaggle's submission file)
with open('fast_submission.csv', 'w') as submission:
submission.write('Id,Predicted\n')
for t, row in enumerate(DictReader(open(test))):
Id = row['Id']
del row['Id']
x = get_x(row, D)
p = get_p(x, w)
submission.write('%s,%f\n' % (Id, p))
zip('fast_submission.zip', c('fast_submission.csv') )
|
{
"content_hash": "7c3009984c67cfcfe47128aaffbe0667",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 29.584745762711865,
"alnum_prop": 0.5585792036665712,
"repo_name": "ryanswanstrom/Sense.io-Projects",
"id": "673cf909ab02fa0696165d5cbfb5990a16d956b2",
"size": "3491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaggle-display-ad/fast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Julia",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "90972"
},
{
"name": "R",
"bytes": "86310"
}
],
"symlink_target": ""
}
|
ONBOARDING_FLOW = ['/onboarding/follow', '/invite', '/onboarding/welcome']
ONBOARDING_START = ONBOARDING_FLOW[0] + '?onboarding'
ONBOARDING_FINISH = '/'
def is_onboarding(request):
return (request.user.is_authenticated()
and 'onboarding' in request.GET
and request.path in ONBOARDING_FLOW)
def get_next(request):
try:
return ONBOARDING_FLOW[ONBOARDING_FLOW.index(request.path) + 1] + '?onboarding'
except IndexError:
return '/onboarding/finish'
def current_step(request):
return ONBOARDING_FLOW.index(request.path) + 1
|
{
"content_hash": "ab57738399422172b4289604aaed3819",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 87,
"avg_line_length": 30.526315789473685,
"alnum_prop": 0.6810344827586207,
"repo_name": "canvasnetworks/canvas",
"id": "1c4b6fc1b3c92f0196ea639623af0b8dc24000c0",
"size": "580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/apps/onboarding/flow.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "537625"
},
{
"name": "HTML",
"bytes": "689709"
},
{
"name": "JavaScript",
"bytes": "1313262"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6659685"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "5326"
}
],
"symlink_target": ""
}
|
"""Pathname and path-related operations for the Macintosh."""
import os
from stat import *
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] <> ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] <> ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the last dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c == ':':
root, ext = root + ext + c, ''
elif c == '.':
if ext:
root, ext = root + ext, c
else:
ext = c
elif ext:
ext = ext + c
else:
root = root + c
return root, ext
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st[ST_MODE])
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_SIZE]
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_MTIME]
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_ATIME]
def islink(s):
"""Return true if the pathname refers to a symbolic link.
Always false on the Mac, until we understand Aliases.)"""
return 0
def isfile(s):
"""Return true if the pathname refers to an existing regular file."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
def exists(s):
"""Return true if the pathname refers to an existing file or directory."""
try:
st = os.stat(s)
except os.error:
return 0
return 1
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] <> item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
norm_error = 'macpath.norm_error: path cannot be normalized'
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immedeately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk.
For each directory under top (including top itself),
func(arg, dirname, filenames) is called, where
dirname is the name of the directory and filenames is the list
of files (and subdirectories etc.) in the directory.
The func may modify the filenames list, to implement a filter,
or to impose a different order of visiting."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
|
{
"content_hash": "b280e7c6336e62acec78f12fcd5e4f99",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 79,
"avg_line_length": 25.896860986547086,
"alnum_prop": 0.578008658008658,
"repo_name": "MalloyPower/parsing-python",
"id": "12eec0c97ea5c81db2c10116a4cd89c316e4b68b",
"size": "5775",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.0/Lib/macpath.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
"""
CNAME Lookup Middleware
Middleware that translates an unknown domain in the host header to
something that ends with the configured storage_domain by looking up
the given domain's CNAME record in DNS.
This middleware will continue to follow a CNAME chain in DNS until it finds
a record ending in the configured storage domain or it reaches the configured
maximum lookup depth. If a match is found, the environment's Host header is
rewritten and the request is passed further down the WSGI chain.
"""
import socket
from swift import gettext_ as _
try:
import dns.resolver
from dns.exception import DNSException
from dns.resolver import NXDOMAIN, NoAnswer
except ImportError:
# catch this to allow docs to be built without the dependency
MODULE_DEPENDENCY_MET = False
else: # executed if the try block finishes with no errors
MODULE_DEPENDENCY_MET = True
from swift.common.swob import Request, HTTPBadRequest
from swift.common.utils import cache_from_env, get_logger, list_from_csv
def lookup_cname(domain): # pragma: no cover
"""
Given a domain, returns its DNS CNAME mapping and DNS ttl.
:param domain: domain to query on
:returns: (ttl, result)
"""
try:
answer = dns.resolver.query(domain, 'CNAME').rrset
ttl = answer.ttl
result = answer.items[0].to_text()
result = result.rstrip('.')
return ttl, result
except (DNSException, NXDOMAIN, NoAnswer):
return 0, None
def is_ip(domain):
try:
socket.inet_pton(socket.AF_INET, domain)
return True
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, domain)
return True
except socket.error:
return False
class CNAMELookupMiddleware(object):
"""
CNAME Lookup Middleware
See above for a full description.
:param app: The next WSGI filter or app in the paste.deploy
chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf):
if not MODULE_DEPENDENCY_MET:
# reraise the exception if the dependency wasn't met
raise ImportError('dnspython is required for this module')
self.app = app
storage_domain = conf.get('storage_domain', 'example.com')
self.storage_domain = ['.' + s for s in
list_from_csv(storage_domain)
if not s.startswith('.')]
self.storage_domain += [s for s in list_from_csv(storage_domain)
if s.startswith('.')]
self.lookup_depth = int(conf.get('lookup_depth', '1'))
self.memcache = None
self.logger = get_logger(conf, log_route='cname-lookup')
def _domain_endswith_in_storage_domain(self, a_domain):
for domain in self.storage_domain:
if a_domain.endswith(domain):
return True
return False
def __call__(self, env, start_response):
if not self.storage_domain:
return self.app(env, start_response)
if 'HTTP_HOST' in env:
given_domain = env['HTTP_HOST']
else:
given_domain = env['SERVER_NAME']
port = ''
if ':' in given_domain:
given_domain, port = given_domain.rsplit(':', 1)
if given_domain == self.storage_domain[1:]: # strip initial '.'
return self.app(env, start_response)
if is_ip(given_domain):
return self.app(env, start_response)
a_domain = given_domain
if not self._domain_endswith_in_storage_domain(a_domain):
if self.memcache is None:
self.memcache = cache_from_env(env)
error = True
for tries in xrange(self.lookup_depth):
found_domain = None
if self.memcache:
memcache_key = ''.join(['cname-', a_domain])
found_domain = self.memcache.get(memcache_key)
if not found_domain:
ttl, found_domain = lookup_cname(a_domain)
if self.memcache:
memcache_key = ''.join(['cname-', given_domain])
self.memcache.set(memcache_key, found_domain,
time=ttl)
if found_domain is None or found_domain == a_domain:
# no CNAME records or we're at the last lookup
error = True
found_domain = None
break
elif self._domain_endswith_in_storage_domain(found_domain):
# Found it!
self.logger.info(
_('Mapped %(given_domain)s to %(found_domain)s') %
{'given_domain': given_domain,
'found_domain': found_domain})
if port:
env['HTTP_HOST'] = ':'.join([found_domain, port])
else:
env['HTTP_HOST'] = found_domain
error = False
break
else:
# try one more deep in the chain
self.logger.debug(
_('Following CNAME chain for '
'%(given_domain)s to %(found_domain)s') %
{'given_domain': given_domain,
'found_domain': found_domain})
a_domain = found_domain
if error:
if found_domain:
msg = 'CNAME lookup failed after %d tries' % \
self.lookup_depth
else:
msg = 'CNAME lookup failed to resolve to a valid domain'
resp = HTTPBadRequest(request=Request(env), body=msg,
content_type='text/plain')
return resp(env, start_response)
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf): # pragma: no cover
conf = global_conf.copy()
conf.update(local_conf)
def cname_filter(app):
return CNAMELookupMiddleware(app, conf)
return cname_filter
|
{
"content_hash": "dbaae7cca5dacaed616f375cba79deec",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 77,
"avg_line_length": 37.96363636363636,
"alnum_prop": 0.5539591315453385,
"repo_name": "gotostack/swift",
"id": "bd3697144b7dbcd240a234c5c3562759c9ca3081",
"size": "6860",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "swift/common/middleware/cname_lookup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import unittest
from telemetry import decorators
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@decorators.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "188521226c1dd245af8410ae81e47669",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 34.904761904761905,
"alnum_prop": 0.762619372442019,
"repo_name": "krieger-od/nwjs_chromium.src",
"id": "e9e9042943183b3e9a7e1ae81b596e6666916b14",
"size": "895",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/core/backends/chrome/ios_browser_finder_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23945"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "4123085"
},
{
"name": "C++",
"bytes": "225911506"
},
{
"name": "CSS",
"bytes": "875874"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Go",
"bytes": "18155"
},
{
"name": "HTML",
"bytes": "27190037"
},
{
"name": "Java",
"bytes": "7645280"
},
{
"name": "JavaScript",
"bytes": "18828195"
},
{
"name": "Makefile",
"bytes": "96270"
},
{
"name": "Objective-C",
"bytes": "1228317"
},
{
"name": "Objective-C++",
"bytes": "7573158"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "248854"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "418340"
},
{
"name": "Python",
"bytes": "8032628"
},
{
"name": "Shell",
"bytes": "464218"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
}
|
import argparse
from fp import similarity, fingerprint, score, read_file
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Rank pairs of files by similarity.')
parser.add_argument('files', nargs='+', type=str, help='files to rank')
parser.add_argument('--k', type=int, default=2, help='n-gram length (default: 2)')
parser.add_argument('--t', type=int, default=5, help='min length of the guaranteed match (default: 5)')
args = parser.parse_args()
k, t = args.k, args.t
fps = dict()
for filename in args.files:
text = read_file(filename)
fp, _ = fingerprint(text, k=k, t=t)
fps[filename] = fp
res = []
for i in range(len(args.files)):
for j in range(i + 1, len(args.files)):
file1, file2 = args.files[i], args.files[j]
res.append((score(fps[file1], fps[file2]), file1, file2))
res = sorted(res, key=lambda t: t[0], reverse=True)
for s, file1, file2 in res[:50]:
print(s, '\t', file1, '\t', file2)
|
{
"content_hash": "f54b849fd55f43c520d81900dc9ee99e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 107,
"avg_line_length": 43,
"alnum_prop": 0.6056201550387597,
"repo_name": "lionell/labs",
"id": "0c1b249d23f4e2871f3c4edec5b3b3e74f58e092",
"size": "1056",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nlp/rank.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2742"
},
{
"name": "C",
"bytes": "7333"
},
{
"name": "C++",
"bytes": "456253"
},
{
"name": "CMake",
"bytes": "4245"
},
{
"name": "CSS",
"bytes": "229"
},
{
"name": "Common Lisp",
"bytes": "5522"
},
{
"name": "Dockerfile",
"bytes": "204"
},
{
"name": "Go",
"bytes": "2762"
},
{
"name": "HTML",
"bytes": "24328"
},
{
"name": "Hack",
"bytes": "1087"
},
{
"name": "Java",
"bytes": "291066"
},
{
"name": "JavaScript",
"bytes": "45777"
},
{
"name": "Jupyter Notebook",
"bytes": "8619419"
},
{
"name": "Lex",
"bytes": "4701"
},
{
"name": "Limbo",
"bytes": "1733"
},
{
"name": "M",
"bytes": "655"
},
{
"name": "MATLAB",
"bytes": "4130"
},
{
"name": "Makefile",
"bytes": "2866"
},
{
"name": "PHP",
"bytes": "34890"
},
{
"name": "Perl",
"bytes": "22604"
},
{
"name": "Prolog",
"bytes": "32203"
},
{
"name": "Python",
"bytes": "15075"
},
{
"name": "Yacc",
"bytes": "15806"
}
],
"symlink_target": ""
}
|
from decimal import Decimal as D
from unittest import mock
from django.test import TestCase
from oscar.apps.partner import strategy
class TestNoTaxMixin(TestCase):
def setUp(self):
self.mixin = strategy.NoTax()
self.product = mock.Mock()
self.stockrecord = mock.Mock()
self.stockrecord.price_excl_tax = D('12.00')
def test_returns_no_prices_without_stockrecord(self):
policy = self.mixin.pricing_policy(
self.product, None)
self.assertFalse(policy.exists)
def test_returns_zero_tax(self):
policy = self.mixin.pricing_policy(
self.product, self.stockrecord)
self.assertEqual(D('0.00'), policy.tax)
def test_doesnt_add_tax_to_net_price(self):
policy = self.mixin.pricing_policy(
self.product, self.stockrecord)
self.assertEqual(D('12.00'), policy.incl_tax)
class TestFixedRateTaxMixin(TestCase):
def setUp(self):
self.mixin = strategy.FixedRateTax()
self.mixin.rate = D('0.10')
self.product = mock.Mock()
self.stockrecord = mock.Mock()
self.stockrecord.price_excl_tax = D('12.00')
def test_returns_no_prices_without_stockrecord(self):
policy = self.mixin.pricing_policy(
self.product, None)
self.assertFalse(policy.exists)
def test_returns_correct_tax(self):
policy = self.mixin.pricing_policy(
self.product, self.stockrecord)
expected_tax = self.stockrecord.price_excl_tax * self.mixin.get_rate(
self.product, self.stockrecord)
self.assertEqual(expected_tax, policy.tax)
def test_adds_tax_to_net_price(self):
policy = self.mixin.pricing_policy(
self.product, self.stockrecord)
self.assertEqual(D('13.20'), policy.incl_tax)
|
{
"content_hash": "2bbafa653a0e179a63a109c88b821d6c",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 32.07017543859649,
"alnum_prop": 0.6449671772428884,
"repo_name": "sasha0/django-oscar",
"id": "46bec65c8c59720455caff64e9a3360257b9b02e",
"size": "1828",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/integration/partner/test_tax_mixin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "387941"
},
{
"name": "Dockerfile",
"bytes": "544"
},
{
"name": "HTML",
"bytes": "518624"
},
{
"name": "JavaScript",
"bytes": "344864"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "1957797"
},
{
"name": "Shell",
"bytes": "1643"
}
],
"symlink_target": ""
}
|
"""
@summary:
Demonstrates the basic usage.
@author: Ruben Reifenberg
"""
# First, create a pure database-writing server:
# (this is independent from the connection type, like XMLRPC)
from rrlog.server import dbwriter_sa
engineStr = "mysql://logtester@localhost/logtest"
logServer = dbwriter_sa.createRotatingServer(
engineStr = engineStr,
tableNamePattern = "logtable_%s", # "pattern" because %s (or %d) is required for the rotate-number
rotateCount=3,
rotateLineMin=10,
tsFormat="std1", # Timestamp format: std1 is shorthand for the strftime-format "%H:%M.%S;%3N"
)
# Start the server as an XMLRPC server:
from rrlog.server import xmlrpc
xmlrpc.startServer(
logServer,
ports=(9804,9805,9806,), # try in this order, use the first port available
)
# The server waits for requests now.
|
{
"content_hash": "e265265fa94e42ef5e85c21b18f84c7f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 100,
"avg_line_length": 23.142857142857142,
"alnum_prop": 0.7358024691358025,
"repo_name": "shful/python-rrlog",
"id": "c8c781e4782cf1c695e38997d2ed1194b8e1d91f",
"size": "1938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/demo/demo_xmlrpcserverdatabase.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "162448"
}
],
"symlink_target": ""
}
|
import mock
from rally import exceptions
from rally.plugins.openstack import types
from tests.unit import fakes
from tests.unit import test
class FlavorTestCase(test.TestCase):
def setUp(self):
super(FlavorTestCase, self).setUp()
self.clients = fakes.FakeClients()
self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.tiny",
id="1"))
self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.nano",
id="42"))
self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.large",
id="44"))
self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.large",
id="45"))
def test_transform_by_id(self):
resource_config = {"id": "42"}
flavor_id = types.Flavor.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(flavor_id, "42")
def test_transform_by_name(self):
resource_config = {"name": "m1.nano"}
flavor_id = types.Flavor.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(flavor_id, "42")
def test_transform_by_name_no_match(self):
resource_config = {"name": "m1.medium"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.Flavor.transform, self.clients,
resource_config)
def test_transform_by_name_multiple_match(self):
resource_config = {"name": "m1.large"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.Flavor.transform, self.clients,
resource_config)
def test_transform_by_regex(self):
resource_config = {"regex": "m(1|2)\.nano"}
flavor_id = types.Flavor.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(flavor_id, "42")
def test_transform_by_regex_multiple_match(self):
resource_config = {"regex": "^m1"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.Flavor.transform, self.clients,
resource_config)
def test_transform_by_regex_no_match(self):
resource_config = {}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.Flavor.transform, self.clients,
resource_config)
class EC2FlavorTestCase(test.TestCase):
def setUp(self):
super(EC2FlavorTestCase, self).setUp()
self.clients = fakes.FakeClients()
self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.tiny",
id="1"))
self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.nano",
id="2"))
self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.large",
id="3"))
self.clients.nova().flavors._cache(fakes.FakeResource(name="m1.xlarge",
id="3"))
def test_transform_by_name(self):
resource_config = {"name": "m1.nano"}
flavor_name = types.EC2Flavor.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(flavor_name, "m1.nano")
def test_transform_by_id(self):
resource_config = {"id": "2"}
flavor_name = types.EC2Flavor.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(flavor_name, "m1.nano")
def test_transform_by_id_no_match(self):
resource_config = {"id": "4"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.EC2Flavor.transform, self.clients,
resource_config)
def test_transform_by_id_multiple_match(self):
resource_config = {"id": "3"}
self.assertRaises(exceptions.MultipleMatchesFound,
types.EC2Flavor.transform, self.clients,
resource_config)
class GlanceImageTestCase(test.TestCase):
def setUp(self):
super(GlanceImageTestCase, self).setUp()
self.clients = fakes.FakeClients()
image1 = fakes.FakeResource(name="cirros-0.3.4-uec", id="100")
self.clients.glance().images._cache(image1)
image2 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk", id="101")
self.clients.glance().images._cache(image2)
image3 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy",
id="102")
self.clients.glance().images._cache(image3)
image4 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy",
id="103")
self.clients.glance().images._cache(image4)
def test_transform_by_id(self):
resource_config = {"id": "100"}
image_id = types.GlanceImage.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(image_id, "100")
def test_transform_by_name(self):
resource_config = {"name": "^cirros-0.3.4-uec$"}
image_id = types.GlanceImage.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(image_id, "100")
def test_transform_by_name_no_match(self):
resource_config = {"name": "cirros-0.3.4-uec-boot"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.GlanceImage.transform, self.clients,
resource_config)
def test_transform_by_name_match_multiple(self):
resource_config = {"name": "cirros-0.3.4-uec-ramdisk-copy"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.GlanceImage.transform, self.clients,
resource_config)
def test_transform_by_regex(self):
resource_config = {"regex": "-uec$"}
image_id = types.GlanceImage.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(image_id, "100")
def test_transform_by_regex_match_multiple(self):
resource_config = {"regex": "^cirros"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.GlanceImage.transform, self.clients,
resource_config)
def test_transform_by_regex_no_match(self):
resource_config = {"regex": "-boot$"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.GlanceImage.transform, self.clients,
resource_config)
class EC2ImageTestCase(test.TestCase):
def setUp(self):
super(EC2ImageTestCase, self).setUp()
self.clients = fakes.FakeClients()
image1 = fakes.FakeResource(name="cirros-0.3.4-uec", id="100")
self.clients.glance().images._cache(image1)
image2 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk", id="102")
self.clients.glance().images._cache(image2)
image3 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy",
id="102")
self.clients.glance().images._cache(image3)
image4 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy",
id="103")
self.clients.glance().images._cache(image4)
ec2_image1 = fakes.FakeResource(name="cirros-0.3.4-uec", id="200")
ec2_image2 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk",
id="201")
ec2_image3 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy",
id="202")
ec2_image4 = fakes.FakeResource(name="cirros-0.3.4-uec-ramdisk-copy",
id="203")
self.clients.ec2().get_all_images = mock.Mock(
return_value=[ec2_image1, ec2_image2, ec2_image3, ec2_image4])
def test_transform_by_name(self):
resource_config = {"name": "^cirros-0.3.4-uec$"}
ec2_image_id = types.EC2Image.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(ec2_image_id, "200")
def test_transform_by_id(self):
resource_config = {"id": "100"}
ec2_image_id = types.EC2Image.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(ec2_image_id, "200")
def test_transform_by_id_no_match(self):
resource_config = {"id": "101"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.EC2Image.transform, self.clients,
resource_config)
def test_transform_by_id_match_multiple(self):
resource_config = {"id": "102"}
self.assertRaises(exceptions.MultipleMatchesFound,
types.EC2Image.transform, self.clients,
resource_config)
def test_transform_by_name_no_match(self):
resource_config = {"name": "cirros-0.3.4-uec-boot"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.EC2Image.transform, self.clients,
resource_config)
def test_transform_by_name_match_multiple(self):
resource_config = {"name": "cirros-0.3.4-uec-ramdisk-copy"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.EC2Image.transform, self.clients,
resource_config)
def test_transform_by_regex(self):
resource_config = {"regex": "-uec$"}
ec2_image_id = types.EC2Image.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(ec2_image_id, "200")
def test_transform_by_regex_match_multiple(self):
resource_config = {"regex": "^cirros"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.EC2Image.transform, self.clients,
resource_config)
def test_transform_by_regex_no_match(self):
resource_config = {"regex": "-boot$"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.EC2Image.transform, self.clients,
resource_config)
class VolumeTypeTestCase(test.TestCase):
def setUp(self):
super(VolumeTypeTestCase, self).setUp()
self.clients = fakes.FakeClients()
volume_type1 = fakes.FakeResource(name="lvmdriver-1", id=100)
self.clients.cinder().volume_types._cache(volume_type1)
def test_transform_by_id(self):
resource_config = {"id": 100}
volumetype_id = types.VolumeType.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(volumetype_id, 100)
def test_transform_by_name(self):
resource_config = {"name": "lvmdriver-1"}
volumetype_id = types.VolumeType.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(volumetype_id, 100)
def test_transform_by_name_no_match(self):
resource_config = {"name": "nomatch-1"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.VolumeType.transform,
self.clients, resource_config)
def test_transform_by_regex(self):
resource_config = {"regex": "^lvm.*-1"}
volumetype_id = types.VolumeType.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(volumetype_id, 100)
def test_transform_by_regex_no_match(self):
resource_config = {"regex": "dd"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.VolumeType.transform,
self.clients, resource_config)
class NeutronNetworkTestCase(test.TestCase):
def setUp(self):
super(NeutronNetworkTestCase, self).setUp()
self.clients = fakes.FakeClients()
net1_data = {"network": {
"name": "net1"
}}
network1 = self.clients.neutron().create_network(net1_data)
self.net1_id = network1["network"]["id"]
def test_transform_by_id(self):
resource_config = {"id": self.net1_id}
network_id = types.NeutronNetwork.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(network_id, self.net1_id)
def test_transform_by_name(self):
resource_config = {"name": "net1"}
network_id = types.NeutronNetwork.transform(
clients=self.clients, resource_config=resource_config)
self.assertEqual(network_id, self.net1_id)
def test_transform_by_name_no_match(self):
resource_config = {"name": "nomatch-1"}
self.assertRaises(exceptions.InvalidScenarioArgument,
types.NeutronNetwork.transform,
self.clients, resource_config)
|
{
"content_hash": "f8e19d2c0ca53de6335f4a3f87b885da",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 79,
"avg_line_length": 43.032051282051285,
"alnum_prop": 0.5852822880977209,
"repo_name": "eayunstack/rally",
"id": "40c59c45d5b6206f849e18f66e5e7ddc3585d28e",
"size": "14078",
"binary": false,
"copies": "2",
"ref": "refs/heads/product",
"path": "tests/unit/plugins/openstack/test_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "36716"
},
{
"name": "Mako",
"bytes": "17389"
},
{
"name": "Python",
"bytes": "2988245"
},
{
"name": "Shell",
"bytes": "41128"
}
],
"symlink_target": ""
}
|
import unreal_engine as ue
class FacesDetector:
def __init__(self):
self.texture_to_draw = None
def draw_hud(self):
# exit if we do not have enough data
if not self.texture_to_draw:
return
# draw what the player pawn is seeing
self.uobject.hud_draw_texture(self.texture_to_draw, 0, 0, 256, 256)
|
{
"content_hash": "fb16064337059d37d5cc3c97e46a3b8c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 75,
"avg_line_length": 26.642857142857142,
"alnum_prop": 0.5898123324396782,
"repo_name": "getnamo/UnrealEnginePython",
"id": "56f5081cc39672c67275909f76f41bba45d9afb4",
"size": "373",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tutorials/FaceRecognitionWithOpenCVAndUnrealEnginePython_Assets/hud_first.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1177094"
},
{
"name": "C#",
"bytes": "23839"
},
{
"name": "C++",
"bytes": "2133454"
},
{
"name": "Python",
"bytes": "109035"
},
{
"name": "Shell",
"bytes": "232"
}
],
"symlink_target": ""
}
|
import json
import os
import sys
import logging
from subprocess import Popen, PIPE
def mkdir(directory):
if not os.path.isdir(directory):
cmd = 'mkdir -p %s' % directory
output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
logging.info(cmd)
logging.info(output)
def mount(entry_point, mount_point):
# Check if the mount point exists. If not
# go ahead and create it.
# mount -t glusterfs entry_point mount_point
cmd = 'mount -t glusterfs %s %s' % (entry_point,
mount_point)
output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
logging.info(cmd)
logging.info(output)
print output
def umount(mount_point):
cmd = 'umount %s' % mount_point
output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
logging.info(cmd)
logging.info(output)
cmd = sys.argv[1]
if cmd == "mount":
entry = sys.argv[2]
mkdir('/service/data')
mount(entry, '/service/data')
elif cmd == "umount":
umount('/service/data')
|
{
"content_hash": "09d02d26371aab60ff9ab969b56b7d8a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 66,
"avg_line_length": 28.2972972972973,
"alnum_prop": 0.6255969436485196,
"repo_name": "jhorey/ferry",
"id": "665e12ff46e8bfca8f20979e0be88c3142a8e35a",
"size": "1047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ferry/data/dockerfiles/spark/mounthelper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "527"
},
{
"name": "Groovy",
"bytes": "645"
},
{
"name": "JavaScript",
"bytes": "284"
},
{
"name": "PLpgSQL",
"bytes": "214"
},
{
"name": "PigLatin",
"bytes": "279"
},
{
"name": "Python",
"bytes": "493950"
},
{
"name": "Shell",
"bytes": "44954"
}
],
"symlink_target": ""
}
|
"""Defines a git hook to allow pre-commit warnings and errors about import order.
usage:
exit_code = git_hook(strict=True|False, modify=True|False)
"""
import os
import subprocess # nosec - Needed for hook
from pathlib import Path
from typing import List
from isort import Config, api, exceptions
def get_output(command: List[str]) -> str:
"""Run a command and return raw output
:param str command: the command to run
:returns: the stdout output of the command
"""
result = subprocess.run(command, stdout=subprocess.PIPE, check=True) # nosec - trusted input
return result.stdout.decode()
def get_lines(command: List[str]) -> List[str]:
"""Run a command and return lines of output
:param str command: the command to run
:returns: list of whitespace-stripped lines output by command
"""
stdout = get_output(command)
return [line.strip() for line in stdout.splitlines()]
def git_hook(
strict: bool = False, modify: bool = False, lazy: bool = False, settings_file: str = ""
) -> int:
"""Git pre-commit hook to check staged files for isort errors
:param bool strict - if True, return number of errors on exit,
causing the hook to fail. If False, return zero so it will
just act as a warning.
:param bool modify - if True, fix the sources if they are not
sorted properly. If False, only report result without
modifying anything.
:param bool lazy - if True, also check/fix unstaged files.
This is useful if you frequently use ``git commit -a`` for example.
If False, only check/fix the staged files for isort errors.
:param str settings_file - A path to a file to be used as
the configuration file for this run.
When settings_file is the empty string, the configuration file
will be searched starting at the directory containing the first
staged file, if any, and going upward in the directory structure.
:return number of errors if in strict mode, 0 otherwise.
"""
# Get list of files modified and staged
diff_cmd = ["git", "diff-index", "--cached", "--name-only", "--diff-filter=ACMRTUXB", "HEAD"]
if lazy:
diff_cmd.remove("--cached")
files_modified = get_lines(diff_cmd)
if not files_modified:
return 0
errors = 0
config = Config(
settings_file=settings_file,
settings_path=os.path.dirname(os.path.abspath(files_modified[0])),
)
for filename in files_modified:
if filename.endswith(".py"):
# Get the staged contents of the file
staged_cmd = ["git", "show", f":{filename}"]
staged_contents = get_output(staged_cmd)
try:
if not api.check_code_string(
staged_contents, file_path=Path(filename), config=config
):
errors += 1
if modify:
api.sort_file(filename, config=config)
except exceptions.FileSkipped: # pragma: no cover
pass
return errors if strict else 0
|
{
"content_hash": "503da681f9e39a515725cb177b5c2ada",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 97,
"avg_line_length": 36.406976744186046,
"alnum_prop": 0.6339827531140211,
"repo_name": "PyCQA/isort",
"id": "135886fcbcbb28f8ec8144e27937bd86693b1540",
"size": "3131",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "isort/hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "158"
},
{
"name": "Dockerfile",
"bytes": "682"
},
{
"name": "Python",
"bytes": "848358"
},
{
"name": "Shell",
"bytes": "1260"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from react import VERSION
setup(
name='PyReact',
version=VERSION,
author='Kunal Mehta',
author_email='kunalm@fb.com',
url='https://github.com/reactjs/react-python/',
license='Apache-2.0',
description='Python bridge to JSX & the React JavaScript library.',
long_description=open('DESCRIPTION').read(),
classifiers =[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Code Generators',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(),
include_package_data=True,
package_data={'js': [
'js/react/react.js',
'js/react/react.min.js',
'js/react/react-with-addons.js',
'js/react/react-with-addons.min.js',
'js/react/JSXTransformer.js',
]},
install_requires=[
'PyExecJS >= 1.0.5',
]
)
|
{
"content_hash": "c59273f28e361a02ea7b51b80b1841f1",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 71,
"avg_line_length": 31.885714285714286,
"alnum_prop": 0.6129032258064516,
"repo_name": "frankier/react-python",
"id": "59ab67b7d3d36de20dd14ff87226af716dc44b1a",
"size": "1116",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "387"
},
{
"name": "Python",
"bytes": "10258"
}
],
"symlink_target": ""
}
|
"""
This module is used to find key words and phrases in the articles index and map the
text to sentiment values in the googles index
"""
import json
import re
import pickle
import math
import os
import numpy as np
import nltk
import elastic
class ElasticSentimentSelection(object):
"""
Conducts a search on an index and then finds a matching document
in another index where sentiment is kept by id match
"""
def __init__(self, rebuild=False):
# declare variables for sentiment searcher
self.relevant_documents = {}
# create sentiment model for objectivity
self.word_features = []
self.classifier = None
if os.path.exists('models/sentiment/label_probdist.p') and \
os.path.exists('models/sentiment/feature_probdist.p') and \
os.path.exists('models/sentiment/word_feature_list.p') and not rebuild:
print 'loading sentiment model'
# load in model files
with open('models/sentiment/label_probdist.p', 'rb') as label_probdist_file:
label_probdist = pickle.load(label_probdist_file)
with open('models/sentiment/feature_probdist.p', 'rb') as feature_probdist_file:
feature_probdist = pickle.load(feature_probdist_file)
with open('models/sentiment/word_feature_list.p', 'rb') as word_feature_list_file:
self.word_features = pickle.load(word_feature_list_file)
# instantiate classifier
self.classifier = nltk.NaiveBayesClassifier(label_probdist, feature_probdist)
else:
print 'generating sentiment model'
# get training data
subjective_sents = nltk.corpus.subjectivity.sents(categories='subj')
objective_sents = nltk.corpus.subjectivity.sents(categories='obj')
subjective_docs = [(sent, 'subj') for sent in subjective_sents]
objective_docs = [(sent, 'obj') for sent in objective_sents]
# train model
sentiment_training_data = subjective_docs + objective_docs
self.create_word_features(self.extract_words(sentiment_training_data))
self.classifier = self.train_sentiment_classifier(sentiment_training_data)
# save out model so it will not need to be regenerated
with open('models/sentiment/label_probdist.p', 'wb') as label_probdist_file:
pickle.dump(self.classifier._label_probdist, label_probdist_file)
with open('models/sentiment/feature_probdist.p', 'wb') as feature_probdist_file:
pickle.dump(self.classifier._feature_probdist, feature_probdist_file)
with open('models/sentiment/word_feature_list.p', 'wb') as word_feature_list_file:
pickle.dump(self.word_features, word_feature_list_file)
def extract_words(self, text_tuples):
"""
Extraces all words from a training corpus where each entry is a tuple ([tokens], sentiment).
Inputs:
text_tuples (list of tuples): list of tuples in form ([tokens], sentiment)
Output:
(list): all words in training corpus
"""
all_words = []
for (words, _) in text_tuples:
all_words.extend(words)
return all_words
def create_word_features(self, wordlist):
"""
Returns the unique set of words from a wordlits
Inputs:
wordlist (list): list of string words
Output:
(list): unique words in wordlist
"""
wordlist = nltk.FreqDist(wordlist)
self.word_features = wordlist.keys()
return self.word_features
def extract_features(self, document):
"""
Extracts features from a document given a wordlist
Input:
document (list): list of words in document
Output:
(list): features found in document
"""
document_words = set(document)
features = {}
for word in self.word_features:
features['contains(%s)' % word] = (word in document_words)
return features
def train_sentiment_classifier(self, training_data):
"""
Trains a Naive Bayes Classifier on a set of training data
Inputs:
training_data (list of tuples): list of tuples in format ([tokens], sentiment)
Output:
(NaiveBayesClassifier): instance of a trained naive bayes classifier
"""
# generate list of word features for training data
self.create_word_features(self.extract_words(training_data))
# extract features for training data
training_set = nltk.classify.apply_features(self.extract_features, training_data)
# create and return classifier
return nltk.NaiveBayesClassifier.train(training_set)
def get_avg_sentiment(self, search_phrase):
"""
Computes average sentiment from all relevant documents returned from search.
inputs:
search_phrase (string): string to search for in ES
output:
(float): average polarity from all related documents
"""
# get relevant documents
self.relevant_documents = self.get_relevant_documents(search_phrase)
# return average polarity for phrase
average_polarity = 0
for i in self.relevant_documents['hits']['hits']:
average_polarity += float(i['_source']['documentSentiment']['polarity'])
return average_polarity / (len(self.relevant_documents['hits']['hits']) + 0.0000001)
def get_best_sentence(self, search_phrase):
"""
Return best sentence, with assocated text, and title
Inputs:
search_phrase (string): string phrase to search for in elastic search
Outputs:
(string): top sentence found in best doc
(article_title): title of article for context
"""
# get sentiment for phrase
average_sentiment = self.get_avg_sentiment(search_phrase)
# find closest document
closest_doc = self.get_closest_document(average_sentiment)
# find the best (most subjective) sentence
top_sentence = self.get_most_subjective_sentence(closest_doc)
# get article title
article_title = closest_doc['_source']['ProQ:']
# get article full text
article_full_text = closest_doc['_source']['Full text:']
return top_sentence, article_title, article_full_text
def get_closest_document(self, sentiment):
"""
Returns closest document by sentiment to average sentiment.
Inputs:
sentiment (float): average sentiment of all relevant documents
Outputs:
(dictionary): closest document
"""
# variables to hold closest document
closest = {}
closest_value = 10000000
# find closest difference
for i in self.relevant_documents['hits']['hits']:
current_diff = math.sqrt(math.pow(i['_source']['documentSentiment']['polarity'] - \
sentiment, 2))
if current_diff < closest_value:
closest = i
closest_value = current_diff
return closest
def get_most_subjective_sentence(self, closest_doc):
"""
Compue subjectivity for each sentence and pick one that is most
Inputs:
closest_doc (dictionary): dictionary fetched from ES
Outputs;
(string): top sentence by subjectivity
"""
top_sentence = ''
top_sentence_subjectivity = 0
for i in closest_doc['_source']['sentences']:
curr_sentence_tokens = [token.lower() for token in i['content'].split()]
curr_sentence_features = self.extract_features(curr_sentence_tokens)
curr_subjectivity = self.classifier.prob_classify(curr_sentence_features).prob('subj')
if curr_subjectivity > top_sentence_subjectivity:
top_sentence = i['content']
top_sentence_subjectivity = curr_subjectivity
return top_sentence
def get_relevant_documents(self, search_phrase):
"""
Fetches relevant documents from elastic search based on query.
Get only the documents that have a score greater than the average score.
input:
search_phrase (string): string to search for in ES
output:
(dict): dictionary of fetched documents
"""
# get all scores for top 100 documents
index = 'flattened-articles/_search'
score_payload = {'from': 0, 'size': 500, \
'fields': '_score', \
'query': {'query_string': { \
'query': search_phrase.encode('utf-8'), \
'fields': ['Full text:']}}}
score_response = json.loads(elastic.search(elastic.ES_URL, index, score_payload))
# create list of scores with 0 excluded
scores = []
for i in score_response['hits']['hits']:
float_score = float(i['_score'])
if float_score > 0:
scores.append(float_score)
quantile = np.percentile(scores, 50)
# get responses where min_score >= quantile
payload = {'_source': ['ProQ:', 'sentences', 'documentSentiment', 'Full text:'],
'min_score': quantile, \
'from': 0, 'size': 500, \
'query': {'query_string': {'query': search_phrase.encode('utf-8'), \
'fields': ['Full text:']}}}
response = json.loads(elastic.search(elastic.ES_URL, index, payload))
return response
def main():
"""
Called when module is called from command line
"""
ess = ElasticSentimentSelection()
print ess.get_best_sentence('Aladdin')
print ess.get_best_sentence('Goodman Theater')
print ess.get_best_sentence('John Malkovich')
print ess.get_best_sentence('Romeo and Juliet')
print ess.get_best_sentence('Hamlet')
if __name__ == '__main__':
main()
|
{
"content_hash": "fe2eb621776155ee2c991da535931b3d",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 100,
"avg_line_length": 36.17314487632509,
"alnum_prop": 0.6030086939533066,
"repo_name": "kapil1garg/eecs338-chris-jones",
"id": "62fb02ef6c7c3a8ef047156d00040c5c973437a4",
"size": "10237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "es_sentiment_selection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "8825249"
},
{
"name": "Python",
"bytes": "77808"
},
{
"name": "Shell",
"bytes": "88"
}
],
"symlink_target": ""
}
|
import sys
import os
import torch
sys.path += ['../']
import gzip
import pickle
from utils.util import pad_input_ids, multi_file_process, numbered_byte_file_generator, EmbeddingCache
import csv
from model.models import MSMarcoConfigDict, ALL_MODELS
from torch.utils.data import DataLoader, Dataset, TensorDataset, IterableDataset, get_worker_info
import numpy as np
from os import listdir
from os.path import isfile, join
import argparse
import json
def write_query_rel(args, pid2offset, query_file, positive_id_file, out_query_file, out_id_file):
print(
"Writing query files " +
str(out_query_file) +
" and " +
str(out_id_file))
query_positive_id = set()
query_positive_id_path = os.path.join(
args.data_dir,
positive_id_file,
)
print("Loading query_2_pos_docid")
with gzip.open(query_positive_id_path, 'rt', encoding='utf8') if positive_id_file[-2:] == "gz" else open(query_positive_id_path, 'r', encoding='utf8') as f:
if args.data_type == 0:
tsvreader = csv.reader(f, delimiter=" ")
else:
tsvreader = csv.reader(f, delimiter="\t")
for [topicid, _, docid, rel] in tsvreader:
query_positive_id.add(int(topicid))
query_collection_path = os.path.join(
args.data_dir,
query_file,
)
out_query_path = os.path.join(
args.out_data_dir,
out_query_file,
)
qid2offset = {}
print('start query file split processing')
multi_file_process(
args,
32,
query_collection_path,
out_query_path,
QueryPreprocessingFn)
print('start merging splits')
idx = 0
with open(out_query_path, 'wb') as f:
for record in numbered_byte_file_generator(
out_query_path, 32, 8 + 4 + args.max_query_length * 4):
q_id = int.from_bytes(record[:8], 'big')
if q_id not in query_positive_id:
# exclude the query as it is not in label set
continue
f.write(record[8:])
qid2offset[q_id] = idx
idx += 1
if idx < 3:
print(str(idx) + " " + str(q_id))
qid2offset_path = os.path.join(
args.out_data_dir,
"qid2offset.pickle",
)
with open(qid2offset_path, 'wb') as handle:
pickle.dump(qid2offset, handle, protocol=4)
print("done saving qid2offset")
print("Total lines written: " + str(idx))
meta = {'type': 'int32', 'total_number': idx,
'embedding_size': args.max_query_length}
with open(out_query_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_query_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
out_id_path = os.path.join(
args.out_data_dir,
out_id_file,
)
print("Writing qrels")
with gzip.open(query_positive_id_path, 'rt', encoding='utf8') if positive_id_file[-2:] == "gz" else open(query_positive_id_path, 'r', encoding='utf8') as f, \
open(out_id_path, "w", encoding='utf-8') as out_id:
if args.data_type == 0:
tsvreader = csv.reader(f, delimiter=" ")
else:
tsvreader = csv.reader(f, delimiter="\t")
out_line_count = 0
for [topicid, _, docid, rel] in tsvreader:
topicid = int(topicid)
if args.data_type == 0:
docid = int(docid[1:])
else:
docid = int(docid)
out_id.write(str(qid2offset[topicid]) +
"\t" +
str(pid2offset[docid]) +
"\t" +
rel +
"\n")
out_line_count += 1
print("Total lines written: " + str(out_line_count))
def preprocess(args):
pid2offset = {}
if args.data_type == 0:
in_passage_path = os.path.join(
args.data_dir,
"msmarco-docs.tsv",
)
else:
in_passage_path = os.path.join(
args.data_dir,
"collection.tsv",
)
out_passage_path = os.path.join(
args.out_data_dir,
"passages",
)
if os.path.exists(out_passage_path):
print("preprocessed data already exist, exit preprocessing")
return
out_line_count = 0
print('start passage file split processing')
multi_file_process(
args,
32,
in_passage_path,
out_passage_path,
PassagePreprocessingFn)
print('start merging splits')
with open(out_passage_path, 'wb') as f:
for idx, record in enumerate(numbered_byte_file_generator(
out_passage_path, 32, 8 + 4 + args.max_seq_length * 4)):
p_id = int.from_bytes(record[:8], 'big')
f.write(record[8:])
pid2offset[p_id] = idx
if idx < 3:
print(str(idx) + " " + str(p_id))
out_line_count += 1
print("Total lines written: " + str(out_line_count))
meta = {
'type': 'int32',
'total_number': out_line_count,
'embedding_size': args.max_seq_length}
with open(out_passage_path + "_meta", 'w') as f:
json.dump(meta, f)
embedding_cache = EmbeddingCache(out_passage_path)
print("First line")
with embedding_cache as emb:
print(emb[0])
pid2offset_path = os.path.join(
args.out_data_dir,
"pid2offset.pickle",
)
with open(pid2offset_path, 'wb') as handle:
pickle.dump(pid2offset, handle, protocol=4)
print("done saving pid2offset")
if args.data_type == 0:
write_query_rel(
args,
pid2offset,
"msmarco-doctrain-queries.tsv",
"msmarco-doctrain-qrels.tsv",
"train-query",
"train-qrel.tsv")
write_query_rel(
args,
pid2offset,
"msmarco-test2019-queries.tsv",
"2019qrels-docs.txt",
"dev-query",
"dev-qrel.tsv")
else:
write_query_rel(
args,
pid2offset,
"queries.train.tsv",
"qrels.train.tsv",
"train-query",
"train-qrel.tsv")
write_query_rel(
args,
pid2offset,
"queries.dev.small.tsv",
"qrels.dev.small.tsv",
"dev-query",
"dev-qrel.tsv")
def PassagePreprocessingFn(args, line, tokenizer):
if args.data_type == 0:
line_arr = line.split('\t')
p_id = int(line_arr[0][1:]) # remove "D"
url = line_arr[1].rstrip()
title = line_arr[2].rstrip()
p_text = line_arr[3].rstrip()
full_text = url + "<sep>" + title + "<sep>" + p_text
# keep only first 10000 characters, should be sufficient for any
# experiment that uses less than 500 - 1k tokens
full_text = full_text[:args.max_doc_character]
else:
line = line.strip()
line_arr = line.split('\t')
p_id = int(line_arr[0])
p_text = line_arr[1].rstrip()
# keep only first 10000 characters, should be sufficient for any
# experiment that uses less than 500 - 1k tokens
full_text = p_text[:args.max_doc_character]
passage = tokenizer.encode(
full_text,
add_special_tokens=True,
max_length=args.max_seq_length,
)
passage_len = min(len(passage), args.max_seq_length)
input_id_b = pad_input_ids(passage, args.max_seq_length)
return p_id.to_bytes(8,'big') + passage_len.to_bytes(4,'big') + np.array(input_id_b,np.int32).tobytes()
def QueryPreprocessingFn(args, line, tokenizer):
line_arr = line.split('\t')
q_id = int(line_arr[0])
passage = tokenizer.encode(
line_arr[1].rstrip(),
add_special_tokens=True,
max_length=args.max_query_length)
passage_len = min(len(passage), args.max_query_length)
input_id_b = pad_input_ids(passage, args.max_query_length)
return q_id.to_bytes(8,'big') + passage_len.to_bytes(4,'big') + np.array(input_id_b,np.int32).tobytes()
def GetProcessingFn(args, query=False):
def fn(vals, i):
passage_len, passage = vals
max_len = args.max_query_length if query else args.max_seq_length
pad_len = max(0, max_len - passage_len)
token_type_ids = ([0] if query else [1]) * passage_len + [0] * pad_len
attention_mask = [1] * passage_len + [0] * pad_len
passage_collection = [(i, passage, attention_mask, token_type_ids)]
query2id_tensor = torch.tensor(
[f[0] for f in passage_collection], dtype=torch.long)
all_input_ids_a = torch.tensor(
[f[1] for f in passage_collection], dtype=torch.int)
all_attention_mask_a = torch.tensor(
[f[2] for f in passage_collection], dtype=torch.bool)
all_token_type_ids_a = torch.tensor(
[f[3] for f in passage_collection], dtype=torch.uint8)
dataset = TensorDataset(
all_input_ids_a,
all_attention_mask_a,
all_token_type_ids_a,
query2id_tensor)
return [ts for ts in dataset]
return fn
def GetTrainingDataProcessingFn(args, query_cache, passage_cache):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(
args, query=True)(
query_cache[qid], qid)[0]
pos_data = GetProcessingFn(
args, query=False)(
passage_cache[pos_pid], pos_pid)[0]
pos_label = torch.tensor(1, dtype=torch.long)
neg_label = torch.tensor(0, dtype=torch.long)
for neg_pid in neg_pids:
neg_data = GetProcessingFn(
args, query=False)(
passage_cache[neg_pid], neg_pid)[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2], pos_label)
yield (query_data[0], query_data[1], query_data[2], neg_data[0], neg_data[1], neg_data[2], neg_label)
return fn
def GetTripletTrainingDataProcessingFn(args, query_cache, passage_cache):
def fn(line, i):
line_arr = line.split('\t')
qid = int(line_arr[0])
pos_pid = int(line_arr[1])
neg_pids = line_arr[2].split(',')
neg_pids = [int(neg_pid) for neg_pid in neg_pids]
all_input_ids_a = []
all_attention_mask_a = []
query_data = GetProcessingFn(
args, query=True)(
query_cache[qid], qid)[0]
pos_data = GetProcessingFn(
args, query=False)(
passage_cache[pos_pid], pos_pid)[0]
for neg_pid in neg_pids:
neg_data = GetProcessingFn(
args, query=False)(
passage_cache[neg_pid], neg_pid)[0]
yield (query_data[0], query_data[1], query_data[2], pos_data[0], pos_data[1], pos_data[2],
neg_data[0], neg_data[1], neg_data[2])
return fn
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir",
)
parser.add_argument(
"--out_data_dir",
default=None,
type=str,
required=True,
help="The output data dir",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " +
", ".join(
MSMarcoConfigDict.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " +
", ".join(ALL_MODELS),
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--max_doc_character",
default=10000,
type=int,
help="used before tokenizer to save tokenizer latency",
)
parser.add_argument(
"--data_type",
default=0,
type=int,
help="0 for doc, 1 for passage",
)
args = parser.parse_args()
return args
def main():
args = get_arguments()
if not os.path.exists(args.out_data_dir):
os.makedirs(args.out_data_dir)
preprocess(args)
if __name__ == '__main__':
main()
|
{
"content_hash": "5a0de6531243671fe8b08d0d4875cfa6",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 162,
"avg_line_length": 31.236238532110093,
"alnum_prop": 0.5371906894779352,
"repo_name": "microsoft/ANCE",
"id": "a01a84f3b47964381956fa2bc90fbf22a363fce3",
"size": "13619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/msmarco_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "15576"
},
{
"name": "Python",
"bytes": "209886"
},
{
"name": "Shell",
"bytes": "13826"
}
],
"symlink_target": ""
}
|
import urllib.parse
from django.test import Client, TestCase
from django.urls import reverse
from circuits.models import Circuit, CircuitType, Provider
from utilities.testing import create_test_user
class ProviderTestCase(TestCase):
def setUp(self):
user = create_test_user(permissions=['circuits.view_provider'])
self.client = Client()
self.client.force_login(user)
Provider.objects.bulk_create([
Provider(name='Provider 1', slug='provider-1', asn=65001),
Provider(name='Provider 2', slug='provider-2', asn=65002),
Provider(name='Provider 3', slug='provider-3', asn=65003),
])
def test_provider_list(self):
url = reverse('circuits:provider_list')
params = {
"q": "test",
}
response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)))
self.assertEqual(response.status_code, 200)
def test_provider(self):
provider = Provider.objects.first()
response = self.client.get(provider.get_absolute_url())
self.assertEqual(response.status_code, 200)
class CircuitTypeTestCase(TestCase):
def setUp(self):
user = create_test_user(permissions=['circuits.view_circuittype'])
self.client = Client()
self.client.force_login(user)
CircuitType.objects.bulk_create([
CircuitType(name='Circuit Type 1', slug='circuit-type-1'),
CircuitType(name='Circuit Type 2', slug='circuit-type-2'),
CircuitType(name='Circuit Type 3', slug='circuit-type-3'),
])
def test_circuittype_list(self):
url = reverse('circuits:circuittype_list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class CircuitTestCase(TestCase):
def setUp(self):
user = create_test_user(permissions=['circuits.view_circuit'])
self.client = Client()
self.client.force_login(user)
provider = Provider(name='Provider 1', slug='provider-1', asn=65001)
provider.save()
circuittype = CircuitType(name='Circuit Type 1', slug='circuit-type-1')
circuittype.save()
Circuit.objects.bulk_create([
Circuit(cid='Circuit 1', provider=provider, type=circuittype),
Circuit(cid='Circuit 2', provider=provider, type=circuittype),
Circuit(cid='Circuit 3', provider=provider, type=circuittype),
])
def test_circuit_list(self):
url = reverse('circuits:circuit_list')
params = {
"provider": Provider.objects.first().slug,
"type": CircuitType.objects.first().slug,
}
response = self.client.get('{}?{}'.format(url, urllib.parse.urlencode(params)))
self.assertEqual(response.status_code, 200)
def test_circuit(self):
circuit = Circuit.objects.first()
response = self.client.get(circuit.get_absolute_url())
self.assertEqual(response.status_code, 200)
|
{
"content_hash": "e598edbe201239e5498f08d362fc6903",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 87,
"avg_line_length": 31.778947368421054,
"alnum_prop": 0.6316661146074859,
"repo_name": "lampwins/netbox",
"id": "cb0ea0a320f7647da67d5d26beecef61832ad9db",
"size": "3019",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/circuits/tests/test_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815169"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, unicode_literals
import jinja2
from lxml import etree, html
from lxml.html import clean
import logging
import sys
import zipfile
from preprocess import preprocess
import json
from cgi import escape
__author__ = 'bluec0re'
log = logging.getLogger(__name__)
def transform_html(root, init=False, default_style=None):
bold = root.tag == 'strong' or 'bold' in root.attrib.get('style', '')
italic = root.tag == 'i' or 'italic' in root.attrib.get('style', '')
if root.tag == 'li':
default_style = root.getparent().get('class', 'ListParagraph')
if root.tag == 'pre':
default_style = 'poc'
style = root.attrib.get('class', default_style)
new_paragraph = root.tag in ('p', 'h1', 'h2', 'h3', 'h4', 'li', 'pre', 'br') and not init or style
new_r = not init or new_paragraph or bold or italic
result = ''
if new_r:
result += '</w:t></w:r>'
if new_paragraph:
result += '</w:p>'
result += '<w:p><w:pPr>'
if style:
result += '<w:pStyle w:val="%s"/>' % escape(style)
if root.tag == 'li':
result += '<w:numPr><w:ilvl w:val="0"/><w:numId w:val="5"/></w:numPr>'
result += '<w:rPr></w:rPr>'
result += '</w:pPr>'
if new_r:
result += '<w:r><w:rPr>'
if bold:
result += '<w:b />'
result += '</w:rPr><w:t>'
if root.text is not None:
result += escape(root.text).strip()
for child in root.getchildren():
result += transform_html(child, default_style=style)
if root.tail is not None:
result += escape(root.tail).strip()
return result
def preprocess_html(context):
if isinstance(context, dict):
for key, value in context.items():
context[key] = preprocess_html(value)
return context
elif isinstance(context, list):
return [preprocess_html(v) for v in context]
elif isinstance(context, tuple):
return (preprocess_html(v) for v in context)
elif isinstance(context, (str, unicode)):
# clean html first
cleaner = clean.Cleaner()
cleaner.safe_attrs_only = True
cleaner.safe_attrs = ('style', 'class')
cleaner.allow_tags = ('p', 'a', 'br', 'span', 'strong', 'h1', 'h2', 'h3', 'h4', 'i', 'ul', 'li', 'br', 'pre')
cleaner.remove_unknown_tags = False
h = cleaner.clean_html(context)
h = html.fromstring(h)
# transform to docx code
if h.find('p') is not None or h.find('span') is not None or\
h.find('strong') is not None or h.find('a') is not None:
value = transform_html(h, True)
else:
# remove enclosing tag
roottag = h.tag
value = etree.tostring(h)
value = value[len(roottag) + 2:-(len(roottag)+3)]
return value
else:
return context
def render(doc, context, debug=False):
if isinstance(doc, etree._Element):
doc = etree.tostring(doc,
encoding='utf-8',
xml_declaration=True,
standalone=True).decode('utf-8')
template = jinja2.Template(doc)
context = preprocess_html(context)
if debug:
doc = template.render(**context).encode('utf-8')
with open('templated.xml', 'w') as fp:
fp.write(doc)
doc = etree.XML(doc)
else:
doc = etree.XML(template.render(**context).encode('utf-8'))
# cleanup control nodes
for el in doc.xpath('//*[@is_control="true"]'):
par = el.getparent()
par.remove(el)
if par.find('w:r/w:t', par.nsmap) is None:
par.getparent().remove(par)
return etree.tostring(doc,
encoding='utf-8',
xml_declaration=True,
standalone=True)
def main(preproc=True):
zipin = zipfile.ZipFile(sys.argv[1])
if preproc:
doc = preprocess(zipin.open('word/document.xml'), debug=True)
else:
doc = zipin.read('word/document.xml').decode('utf-8')
processed_doc = render(doc, json.load(sys.stdin))
print(processed_doc)
target = 'Processed_' + sys.argv[1]
outzip = zipfile.ZipFile(target, "w")
for fileinfo in zipin.infolist():
if fileinfo.filename != 'word/document.xml':
outzip.writestr(fileinfo, zipin.read(fileinfo))
else:
outzip.writestr('word/document.xml', processed_doc)
if __name__ == '__main__':
logging.basicConfig(level='DEBUG')
main()
|
{
"content_hash": "1dd895c0411564d944d47f278099eb52",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 117,
"avg_line_length": 31.27027027027027,
"alnum_prop": 0.56028522039758,
"repo_name": "bluec0re/python-docx_templating",
"id": "6de19a31a119127c824a0acc0f54df99245b103c",
"size": "4628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51641"
}
],
"symlink_target": ""
}
|
from os import path
from django.template import Library
register = Library()
@register.filter
def shortenfilepath(path, num_dirs=2, pathsep='/'):
splitted = path.split(path)[1]
return pathsep.join(path.split(pathsep)[-num_dirs:])
|
{
"content_hash": "0604bb079ee4ee98501e77efc9815a92",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 56,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.7291666666666666,
"repo_name": "reincubate/django-templatesadmin",
"id": "dfe213727448ebcf33fb97eafcab64b14d7f9782",
"size": "240",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "templatesadmin/templatetags/templatesadmin_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4042"
},
{
"name": "Python",
"bytes": "16646"
}
],
"symlink_target": ""
}
|
from geopy.geocoders.bing import Bing
from geopy.geocoders.google import Google
from geopy.geocoders.googlev3 import GoogleV3
from geopy.geocoders.dot_us import GeocoderDotUS
from geopy.geocoders.geonames import GeoNames
from geopy.geocoders.wiki_gis import MediaWiki
from geopy.geocoders.wiki_semantic import SemanticMediaWiki
from geopy.geocoders.yahoo import Yahoo
from geopy.geocoders.openmapquest import OpenMapQuest
class Place(object):
"""This represents a place
This is a generic container to hold a "place". It's intended to be used with overriding
the XXX method for a geocoder. This will then give you access to more attributes of a
geo-coded object in a consistent manner than simply (location (lat, long))."""
def __init__(self, *args, **kwargs):
self.street_line1 = kwargs.get('street_line1', None)
self.street_line2 = kwargs.get('street_line2', None)
self.suite = kwargs.get('suite', None)
self.street_number = kwargs.get('street_number', None)
self.route = kwargs.get('route', None)
self.city = kwargs.get('city', None)
self.state = kwargs.get('state', None)
self.zipcode = kwargs.get('zipcode', None)
self.county = kwargs.get('county', None)
self.neighborhood = kwargs.get('neighborhood', None)
self.latitude = kwargs.get('latitude', None)
self.longitude = kwargs.get('longitude', None)
self.is_confirmed = kwargs.get('is_confirmed', False)
self.formatted_address = kwargs.get('formatted_address', None)
self.subdivision_id = kwargs.get('subdivision_id', None)
self.errors = []
self.warnings = []
def __unicode__(self):
return self.formatted_address
def __repr__(self):
results = []
if not self.formatted_address:
return 'Unknown'
for i in self.formatted_address:
try: results.append(i.encode('ascii'))
except UnicodeEncodeError: results.append("?")
return "".join(results)
def get_home_object(self):
"""This return the dictionary needed for a home"""
subdivision, city, county = None, None, None
if self.county:
try:
county = County.objects.get(name__iexact=self.county,
state = self.state)
except ObjectDoesNotExist:
self.errors.append("%s county does not exist in %s" % (self.county,
self.state))
else:
self.errors.append("County not given")
if self.subdivision_id:
from apps.subdivision.models import Subdivision
try:
subdivision = Subdivision.objects.get(id=self.subdivision_id)
except ObjectDoesNotExist:
error = "Unable to find subdivision id %s" % self.subdivision_id
log.errors(error)
self.errors.append(error)
try:
if hasattr(county, 'name') and self.city:
city = City.objects.get(name__iexact=self.city, county=county)
except ObjectDoesNotExist:
if self.is_confirmed:
warning = ("%s (%s) does not exist - adding it" % (self.city,
self.county))
city = get_or_create_unregistered_city(name=self.city,
county=county,
latitude=self.latitude,
longitude=self.longitude)
else:
error = "The city %s does not exist within %s county" % (self.city ,
self.county)
self.errors.append(error)
log.error(error)
errors = None
if len(self.errors):
errors = "<ul>"
for item in self.errors: errors += "<li>%s</li>" % item
errors += "</ul>"
return dict(street_line1=self.street_line1, lot_number=self.lot_number,
street_line2=self.street_line2, city=city,
state=self.state, zipcode=self.zipcode,
confirmed_address=self.is_confirmed, latitude=self.latitude,
longitude = self.longitude, subdivision=subdivision,
errors = errors)
|
{
"content_hash": "91ba1a3cd94b908b6ddc605e4a713609",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 92,
"avg_line_length": 45.83838383838384,
"alnum_prop": 0.5579550462758924,
"repo_name": "pivotal-energy-solutions/geopy",
"id": "84544065bf1eb63f8a53e7288112f8dc1aa727ed",
"size": "4538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geopy/geocoders/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132200"
}
],
"symlink_target": ""
}
|
"""Package contenant la commande 'escalader'."""
from primaires.interpreteur.commande.commande import Commande
from primaires.interpreteur.masque.exceptions.erreur_interpretation import \
ErreurInterpretation
class CmdEscalader(Commande):
"""Commande 'escalader'"""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "escalader", "climb")
self.nom_categorie = "bouger"
self.schema = "<nom_sortie>"
self.aide_courte = "escalade une paroi"
self.aide_longue = \
"Cette commande permet d'escalader une paroi. Il vous faut " \
"préciser le nom de la sortie à escalader."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
sortie = dic_masques["nom_sortie"].sortie
salle = personnage.salle
nom_complet = sortie.nom_complet.capitalize()
if not sortie.direction in ("bas", "haut") or not sortie.diff_escalade:
raise ErreurInterpretation(
"Vous n'avez pas besoin d'escalader dans cette direction.")
personnage.deplacer_vers(sortie.nom, escalade=True)
|
{
"content_hash": "eef957220a3d627688bea75504184e46",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.6380165289256199,
"repo_name": "stormi/tsunami",
"id": "fe99ddca4155e4d11a4fbb7daf3c1d345cc0c851",
"size": "2779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/salle/commandes/escalader/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, with_statement
import logging
import re
import socket
import sys
import traceback
from tornado.concurrent import Future, return_future, ReturnValueIgnoredError, run_on_executor
from tornado.escape import utf8, to_unicode
from tornado import gen
from tornado.iostream import IOStream
from tornado import stack_context
from tornado.tcpserver import TCPServer
from tornado.testing import AsyncTestCase, LogTrapTestCase, bind_unused_port, gen_test
from tornado.test.util import unittest
try:
from concurrent import futures
except ImportError:
futures = None
class ReturnFutureTest(AsyncTestCase):
@return_future
def sync_future(self, callback):
callback(42)
@return_future
def async_future(self, callback):
self.io_loop.add_callback(callback, 42)
@return_future
def immediate_failure(self, callback):
1 / 0
@return_future
def delayed_failure(self, callback):
self.io_loop.add_callback(lambda: 1 / 0)
@return_future
def return_value(self, callback):
# Note that the result of both running the callback and returning
# a value (or raising an exception) is unspecified; with current
# implementations the last event prior to callback resolution wins.
return 42
@return_future
def no_result_future(self, callback):
callback()
def test_immediate_failure(self):
with self.assertRaises(ZeroDivisionError):
# The caller sees the error just like a normal function.
self.immediate_failure(callback=self.stop)
# The callback is not run because the function failed synchronously.
self.io_loop.add_timeout(self.io_loop.time() + 0.05, self.stop)
result = self.wait()
self.assertIs(result, None)
def test_return_value(self):
with self.assertRaises(ReturnValueIgnoredError):
self.return_value(callback=self.stop)
def test_callback_kw(self):
future = self.sync_future(callback=self.stop)
result = self.wait()
self.assertEqual(result, 42)
self.assertEqual(future.result(), 42)
def test_callback_positional(self):
# When the callback is passed in positionally, future_wrap shouldn't
# add another callback in the kwargs.
future = self.sync_future(self.stop)
result = self.wait()
self.assertEqual(result, 42)
self.assertEqual(future.result(), 42)
def test_no_callback(self):
future = self.sync_future()
self.assertEqual(future.result(), 42)
def test_none_callback_kw(self):
# explicitly pass None as callback
future = self.sync_future(callback=None)
self.assertEqual(future.result(), 42)
def test_none_callback_pos(self):
future = self.sync_future(None)
self.assertEqual(future.result(), 42)
def test_async_future(self):
future = self.async_future()
self.assertFalse(future.done())
self.io_loop.add_future(future, self.stop)
future2 = self.wait()
self.assertIs(future, future2)
self.assertEqual(future.result(), 42)
@gen_test
def test_async_future_gen(self):
result = yield self.async_future()
self.assertEqual(result, 42)
def test_delayed_failure(self):
future = self.delayed_failure()
self.io_loop.add_future(future, self.stop)
future2 = self.wait()
self.assertIs(future, future2)
with self.assertRaises(ZeroDivisionError):
future.result()
def test_kw_only_callback(self):
@return_future
def f(**kwargs):
kwargs['callback'](42)
future = f()
self.assertEqual(future.result(), 42)
def test_error_in_callback(self):
self.sync_future(callback=lambda future: 1 / 0)
# The exception gets caught by our StackContext and will be re-raised
# when we wait.
self.assertRaises(ZeroDivisionError, self.wait)
def test_no_result_future(self):
future = self.no_result_future(self.stop)
result = self.wait()
self.assertIs(result, None)
# result of this future is undefined, but not an error
future.result()
def test_no_result_future_callback(self):
future = self.no_result_future(callback=lambda: self.stop())
result = self.wait()
self.assertIs(result, None)
future.result()
@gen_test
def test_future_traceback(self):
@return_future
@gen.engine
def f(callback):
yield gen.Task(self.io_loop.add_callback)
try:
1 / 0
except ZeroDivisionError:
self.expected_frame = traceback.extract_tb(
sys.exc_info()[2], limit=1)[0]
raise
try:
yield f()
self.fail("didn't get expected exception")
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
self.assertIn(self.expected_frame, tb)
# The following series of classes demonstrate and test various styles
# of use, with and without generators and futures.
class CapServer(TCPServer):
def handle_stream(self, stream, address):
logging.info("handle_stream")
self.stream = stream
self.stream.read_until(b"\n", self.handle_read)
def handle_read(self, data):
logging.info("handle_read")
data = to_unicode(data)
if data == data.upper():
self.stream.write(b"error\talready capitalized\n")
else:
# data already has \n
self.stream.write(utf8("ok\t%s" % data.upper()))
self.stream.close()
class CapError(Exception):
pass
class BaseCapClient(object):
def __init__(self, port, io_loop):
self.port = port
self.io_loop = io_loop
def process_response(self, data):
status, message = re.match('(.*)\t(.*)\n', to_unicode(data)).groups()
if status == 'ok':
return message
else:
raise CapError(message)
class ManualCapClient(BaseCapClient):
def capitalize(self, request_data, callback=None):
logging.info("capitalize")
self.request_data = request_data
self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
self.stream.connect(('127.0.0.1', self.port),
callback=self.handle_connect)
self.future = Future()
if callback is not None:
self.future.add_done_callback(
stack_context.wrap(lambda future: callback(future.result())))
return self.future
def handle_connect(self):
logging.info("handle_connect")
self.stream.write(utf8(self.request_data + "\n"))
self.stream.read_until(b'\n', callback=self.handle_read)
def handle_read(self, data):
logging.info("handle_read")
self.stream.close()
try:
self.future.set_result(self.process_response(data))
except CapError as e:
self.future.set_exception(e)
class DecoratorCapClient(BaseCapClient):
@return_future
def capitalize(self, request_data, callback):
logging.info("capitalize")
self.request_data = request_data
self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
self.stream.connect(('127.0.0.1', self.port),
callback=self.handle_connect)
self.callback = callback
def handle_connect(self):
logging.info("handle_connect")
self.stream.write(utf8(self.request_data + "\n"))
self.stream.read_until(b'\n', callback=self.handle_read)
def handle_read(self, data):
logging.info("handle_read")
self.stream.close()
self.callback(self.process_response(data))
class GeneratorCapClient(BaseCapClient):
@return_future
@gen.engine
def capitalize(self, request_data, callback):
logging.info('capitalize')
stream = IOStream(socket.socket(), io_loop=self.io_loop)
logging.info('connecting')
yield gen.Task(stream.connect, ('127.0.0.1', self.port))
stream.write(utf8(request_data + '\n'))
logging.info('reading')
data = yield gen.Task(stream.read_until, b'\n')
logging.info('returning')
stream.close()
callback(self.process_response(data))
class ClientTestMixin(object):
def setUp(self):
super(ClientTestMixin, self).setUp() # type: ignore
self.server = CapServer(io_loop=self.io_loop)
sock, port = bind_unused_port()
self.server.add_sockets([sock])
self.client = self.client_class(io_loop=self.io_loop, port=port)
def tearDown(self):
self.server.stop()
super(ClientTestMixin, self).tearDown() # type: ignore
def test_callback(self):
self.client.capitalize("hello", callback=self.stop)
result = self.wait()
self.assertEqual(result, "HELLO")
def test_callback_error(self):
self.client.capitalize("HELLO", callback=self.stop)
self.assertRaisesRegexp(CapError, "already capitalized", self.wait)
def test_future(self):
future = self.client.capitalize("hello")
self.io_loop.add_future(future, self.stop)
self.wait()
self.assertEqual(future.result(), "HELLO")
def test_future_error(self):
future = self.client.capitalize("HELLO")
self.io_loop.add_future(future, self.stop)
self.wait()
self.assertRaisesRegexp(CapError, "already capitalized", future.result)
def test_generator(self):
@gen.engine
def f():
result = yield self.client.capitalize("hello")
self.assertEqual(result, "HELLO")
self.stop()
f()
self.wait()
def test_generator_error(self):
@gen.engine
def f():
with self.assertRaisesRegexp(CapError, "already capitalized"):
yield self.client.capitalize("HELLO")
self.stop()
f()
self.wait()
class ManualClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
client_class = ManualCapClient
class DecoratorClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
client_class = DecoratorCapClient
class GeneratorClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
client_class = GeneratorCapClient
@unittest.skipIf(futures is None, "concurrent.futures module not present")
class RunOnExecutorTest(AsyncTestCase):
@gen_test
def test_no_calling(self):
class Object(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self.executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor
def f(self):
return 42
o = Object(io_loop=self.io_loop)
answer = yield o.f()
self.assertEqual(answer, 42)
@gen_test
def test_call_with_no_args(self):
class Object(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self.executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor()
def f(self):
return 42
o = Object(io_loop=self.io_loop)
answer = yield o.f()
self.assertEqual(answer, 42)
@gen_test
def test_call_with_io_loop(self):
class Object(object):
def __init__(self, io_loop):
self._io_loop = io_loop
self.executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor(io_loop='_io_loop')
def f(self):
return 42
o = Object(io_loop=self.io_loop)
answer = yield o.f()
self.assertEqual(answer, 42)
@gen_test
def test_call_with_executor(self):
class Object(object):
def __init__(self, io_loop):
self.io_loop = io_loop
self.__executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor(executor='_Object__executor')
def f(self):
return 42
o = Object(io_loop=self.io_loop)
answer = yield o.f()
self.assertEqual(answer, 42)
@gen_test
def test_call_with_both(self):
class Object(object):
def __init__(self, io_loop):
self._io_loop = io_loop
self.__executor = futures.thread.ThreadPoolExecutor(1)
@run_on_executor(io_loop='_io_loop', executor='_Object__executor')
def f(self):
return 42
o = Object(io_loop=self.io_loop)
answer = yield o.f()
self.assertEqual(answer, 42)
|
{
"content_hash": "7b5de44bb8f152b9d5f9e87a2371da39",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 94,
"avg_line_length": 32.065,
"alnum_prop": 0.6135973803212225,
"repo_name": "lancezlin/ml_template_py",
"id": "8ce095ec1b3118e2d49a99b168c844d9399d139e",
"size": "13422",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/tornado/test/concurrent_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "326933"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "7806"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "596861"
},
{
"name": "JavaScript",
"bytes": "4020233"
},
{
"name": "Jupyter Notebook",
"bytes": "517957"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "41191064"
},
{
"name": "Shell",
"bytes": "3373"
},
{
"name": "Smarty",
"bytes": "26298"
}
],
"symlink_target": ""
}
|
import unittest
from rx import Observable
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestFind(unittest.TestCase):
def test_find_never(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(150, 1)
)
def create():
return xs.find(lambda x,i,s: True)
res = scheduler.start(create)
res.messages.assert_equal(
)
def test_find_empty(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(150, 1),
on_completed(210)
)
def create():
return xs.find(lambda x,i,s: True)
res = scheduler.start(create)
res.messages.assert_equal(
on_next(210, None),
on_completed(210)
)
def test_find_single(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(150, 1),
on_next(210, 2),
on_completed(220)
)
def create():
return xs.find(lambda x,i,s: x==2)
res = scheduler.start(create)
res.messages.assert_equal(
on_next(210, 2),
on_completed(210)
)
def test_find_notfound(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(150, 1),
on_next(210, 2),
on_completed(220)
)
def create():
return xs.find(lambda x,i,s: x==3)
res = scheduler.start(create)
res.messages.assert_equal(
on_next(220, None),
on_completed(220)
)
def test_find_Error(self):
ex = Exception('error')
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(150, 1),
on_next(210, 2),
on_error(220, ex)
)
def create():
return xs.find(lambda x,i,s: x==3)
res = scheduler.start(create)
res.messages.assert_equal(
on_error(220, ex)
)
def test_find_throws(self):
ex = 'error'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(150, 1),
on_next(210, 2),
on_completed(220)
)
def create():
def predicate(x, i, source):
raise Exception(ex)
return xs.find(predicate)
res = scheduler.start(create)
res.messages.assert_equal(
on_error(210, ex)
)
|
{
"content_hash": "1d0a453aa81a67e0d6a442d14667b19a",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 76,
"avg_line_length": 25.413793103448278,
"alnum_prop": 0.5457937584803256,
"repo_name": "dbrattli/RxPY",
"id": "ab60adb47852acb5148ff8bb811655ded229d4bc",
"size": "2948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_observable/test_find.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1334787"
}
],
"symlink_target": ""
}
|
from django.template import Template, Context, TemplateSyntaxError
from django.test import TestCase
from django.test.utils import override_settings
from ..serializers import <%= classifiedName %>Serializer
from .. import models
class Test<%= classifiedName %>Serializer(TestCase):
pass
|
{
"content_hash": "8254d2535fb16b93e899f499af089f0d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 66,
"avg_line_length": 29.3,
"alnum_prop": 0.7952218430034129,
"repo_name": "thePortus/generator-djangular-gift",
"id": "da8134cb0433bf896abe3950f820401256a37030",
"size": "317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-serializer/templates/_.tests.serializer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "550"
},
{
"name": "HTML",
"bytes": "499"
},
{
"name": "JavaScript",
"bytes": "60995"
},
{
"name": "Python",
"bytes": "9064"
}
],
"symlink_target": ""
}
|
from parser import Parser
import logging
import os
import sys
class Cleanup(Parser):
nodes = None
def __init__(self, logger=None):
Parser.__init__(self, logger)
def run_cleanup(self):
if self.nodes is None:
logging.debug("Node list is None")
sys.exit(-1)
# Should launch shell commands
for i in range(0, len(self.nodes)):
node = self.nodes[i]
command = "ssh {0}@{1} \"killall -u {0}\" &".format(
self.net_id, node.get_host_name())
logging.debug(command)
if os.system(command) != 0:
logging.debug("Failed to execute %s", command)
sys.exit( -1 )
if __name__ == "__main__":
log_file_path = os.path.join(os.getcwd(), "cleanup.log")
if os.path.isfile(log_file_path):
os.remove(log_file_path)
logging.basicConfig(
filename="cleanup.log",
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m-%d-%Y %I:%M %p',
level=logging.DEBUG
)
clean = Cleanup(logging)
logging.debug("Cleanup.py launched in " + os.getcwd())
if os.path.isfile(os.path.join(os.getcwd(), clean.CONFIG_FILE)):
logging.debug("Opened \'{0}\'".format(clean.CONFIG_FILE))
clean.nodes = clean.parse_config_file()
clean.run_cleanup()
else:
logging.warning("\'%s\' not found.", clean.CONFIG_FILE)
print "Could not find \'{}\' in \' {}". \
format(clean.CONFIG_FILE, os.getcwd())
logging.debug("Cleanup.py completed successfully. ")
|
{
"content_hash": "f9040ca2b6955f0a0c2a0efb93cf1638",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 68,
"avg_line_length": 28.526315789473685,
"alnum_prop": 0.550430504305043,
"repo_name": "Sriee/Mutex",
"id": "e6b7770b0958a5b6637a08c04fd168b8e20fc878",
"size": "1626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cleanup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "60591"
},
{
"name": "Python",
"bytes": "10999"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import os
import re
from typing import (
Dict,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.cloud.artifactregistry_v1.services.artifact_registry import pagers
from google.cloud.artifactregistry_v1.types import apt_artifact, artifact, file, package
from google.cloud.artifactregistry_v1.types import repository
from google.cloud.artifactregistry_v1.types import repository as gda_repository
from google.cloud.artifactregistry_v1.types import service, settings
from google.cloud.artifactregistry_v1.types import tag
from google.cloud.artifactregistry_v1.types import tag as gda_tag
from google.cloud.artifactregistry_v1.types import version, yum_artifact
from .transports.base import DEFAULT_CLIENT_INFO, ArtifactRegistryTransport
from .transports.grpc import ArtifactRegistryGrpcTransport
from .transports.grpc_asyncio import ArtifactRegistryGrpcAsyncIOTransport
class ArtifactRegistryClientMeta(type):
"""Metaclass for the ArtifactRegistry client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ArtifactRegistryTransport]]
_transport_registry["grpc"] = ArtifactRegistryGrpcTransport
_transport_registry["grpc_asyncio"] = ArtifactRegistryGrpcAsyncIOTransport
def get_transport_class(
cls,
label: Optional[str] = None,
) -> Type[ArtifactRegistryTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ArtifactRegistryClient(metaclass=ArtifactRegistryClientMeta):
"""The Artifact Registry API service.
Artifact Registry is an artifact management system for storing
artifacts from different package management systems.
The resources managed by this API are:
- Repositories, which group packages and their data.
- Packages, which group versions and their tags.
- Versions, which are specific forms of a package.
- Tags, which represent alternative names for versions.
- Files, which contain content and are optionally associated with a
Package or Version.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "artifactregistry.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ArtifactRegistryClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ArtifactRegistryClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ArtifactRegistryTransport:
"""Returns the transport used by the client instance.
Returns:
ArtifactRegistryTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def apt_artifact_path(
project: str,
location: str,
repository: str,
apt_artifact: str,
) -> str:
"""Returns a fully-qualified apt_artifact string."""
return "projects/{project}/locations/{location}/repositories/{repository}/aptArtifacts/{apt_artifact}".format(
project=project,
location=location,
repository=repository,
apt_artifact=apt_artifact,
)
@staticmethod
def parse_apt_artifact_path(path: str) -> Dict[str, str]:
"""Parses a apt_artifact path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/repositories/(?P<repository>.+?)/aptArtifacts/(?P<apt_artifact>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def docker_image_path(
project: str,
location: str,
repository: str,
docker_image: str,
) -> str:
"""Returns a fully-qualified docker_image string."""
return "projects/{project}/locations/{location}/repositories/{repository}/dockerImages/{docker_image}".format(
project=project,
location=location,
repository=repository,
docker_image=docker_image,
)
@staticmethod
def parse_docker_image_path(path: str) -> Dict[str, str]:
"""Parses a docker_image path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/repositories/(?P<repository>.+?)/dockerImages/(?P<docker_image>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def file_path(
project: str,
location: str,
repository: str,
file: str,
) -> str:
"""Returns a fully-qualified file string."""
return "projects/{project}/locations/{location}/repositories/{repository}/files/{file}".format(
project=project,
location=location,
repository=repository,
file=file,
)
@staticmethod
def parse_file_path(path: str) -> Dict[str, str]:
"""Parses a file path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/repositories/(?P<repository>.+?)/files/(?P<file>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def project_settings_path(
project: str,
) -> str:
"""Returns a fully-qualified project_settings string."""
return "projects/{project}/projectSettings".format(
project=project,
)
@staticmethod
def parse_project_settings_path(path: str) -> Dict[str, str]:
"""Parses a project_settings path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/projectSettings$", path)
return m.groupdict() if m else {}
@staticmethod
def repository_path(
project: str,
location: str,
repository: str,
) -> str:
"""Returns a fully-qualified repository string."""
return (
"projects/{project}/locations/{location}/repositories/{repository}".format(
project=project,
location=location,
repository=repository,
)
)
@staticmethod
def parse_repository_path(path: str) -> Dict[str, str]:
"""Parses a repository path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/repositories/(?P<repository>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def tag_path(
project: str,
location: str,
repository: str,
package: str,
tag: str,
) -> str:
"""Returns a fully-qualified tag string."""
return "projects/{project}/locations/{location}/repositories/{repository}/packages/{package}/tags/{tag}".format(
project=project,
location=location,
repository=repository,
package=package,
tag=tag,
)
@staticmethod
def parse_tag_path(path: str) -> Dict[str, str]:
"""Parses a tag path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/repositories/(?P<repository>.+?)/packages/(?P<package>.+?)/tags/(?P<tag>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def version_path(
project: str,
location: str,
repository: str,
package: str,
version: str,
) -> str:
"""Returns a fully-qualified version string."""
return "projects/{project}/locations/{location}/repositories/{repository}/packages/{package}/versions/{version}".format(
project=project,
location=location,
repository=repository,
package=package,
version=version,
)
@staticmethod
def parse_version_path(path: str) -> Dict[str, str]:
"""Parses a version path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/repositories/(?P<repository>.+?)/packages/(?P<package>.+?)/versions/(?P<version>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def yum_artifact_path(
project: str,
location: str,
repository: str,
yum_artifact: str,
) -> str:
"""Returns a fully-qualified yum_artifact string."""
return "projects/{project}/locations/{location}/repositories/{repository}/yumArtifacts/{yum_artifact}".format(
project=project,
location=location,
repository=repository,
yum_artifact=yum_artifact,
)
@staticmethod
def parse_yum_artifact_path(path: str) -> Dict[str, str]:
"""Parses a yum_artifact path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/repositories/(?P<repository>.+?)/yumArtifacts/(?P<yum_artifact>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Optional[Union[str, ArtifactRegistryTransport]] = None,
client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the artifact registry client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ArtifactRegistryTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
client_options = cast(client_options_lib.ClientOptions, client_options)
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ArtifactRegistryTransport):
# transport is a ArtifactRegistryTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def list_docker_images(
self,
request: Optional[Union[artifact.ListDockerImagesRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDockerImagesPager:
r"""Lists docker images.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_list_docker_images():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.ListDockerImagesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_docker_images(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.ListDockerImagesRequest, dict]):
The request object. The request to list docker images.
parent (str):
Required. The name of the parent
resource whose docker images will be
listed.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.services.artifact_registry.pagers.ListDockerImagesPager:
The response from listing docker
images.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a artifact.ListDockerImagesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, artifact.ListDockerImagesRequest):
request = artifact.ListDockerImagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_docker_images]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDockerImagesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_docker_image(
self,
request: Optional[Union[artifact.GetDockerImageRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> artifact.DockerImage:
r"""Gets a docker image.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_get_docker_image():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.GetDockerImageRequest(
name="name_value",
)
# Make the request
response = client.get_docker_image(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.GetDockerImageRequest, dict]):
The request object. The request to get docker images.
name (str):
Required. The name of the docker
images.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.DockerImage:
DockerImage represents a docker artifact.
The following fields are returned as untyped metadata
in the Version resource, using camelcase keys (i.e.
metadata.imageSizeBytes): \* imageSizeBytes \*
mediaType \* buildTime
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a artifact.GetDockerImageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, artifact.GetDockerImageRequest):
request = artifact.GetDockerImageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_docker_image]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def import_apt_artifacts(
self,
request: Optional[Union[apt_artifact.ImportAptArtifactsRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Imports Apt artifacts. The returned Operation will
complete once the resources are imported. Package,
Version, and File resources are created based on the
imported artifacts. Imported artifacts that conflict
with existing resources are ignored.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_import_apt_artifacts():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.ImportAptArtifactsRequest(
)
# Make the request
operation = client.import_apt_artifacts(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.ImportAptArtifactsRequest, dict]):
The request object. The request to import new apt
artifacts.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.artifactregistry_v1.types.ImportAptArtifactsResponse`
The response message from importing APT artifacts.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a apt_artifact.ImportAptArtifactsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, apt_artifact.ImportAptArtifactsRequest):
request = apt_artifact.ImportAptArtifactsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.import_apt_artifacts]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
apt_artifact.ImportAptArtifactsResponse,
metadata_type=apt_artifact.ImportAptArtifactsMetadata,
)
# Done; return the response.
return response
def import_yum_artifacts(
self,
request: Optional[Union[yum_artifact.ImportYumArtifactsRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Imports Yum (RPM) artifacts. The returned Operation
will complete once the resources are imported. Package,
Version, and File resources are created based on the
imported artifacts. Imported artifacts that conflict
with existing resources are ignored.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_import_yum_artifacts():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.ImportYumArtifactsRequest(
)
# Make the request
operation = client.import_yum_artifacts(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.ImportYumArtifactsRequest, dict]):
The request object. The request to import new yum
artifacts.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.artifactregistry_v1.types.ImportYumArtifactsResponse`
The response message from importing YUM artifacts.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a yum_artifact.ImportYumArtifactsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, yum_artifact.ImportYumArtifactsRequest):
request = yum_artifact.ImportYumArtifactsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.import_yum_artifacts]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
yum_artifact.ImportYumArtifactsResponse,
metadata_type=yum_artifact.ImportYumArtifactsMetadata,
)
# Done; return the response.
return response
def list_repositories(
self,
request: Optional[Union[repository.ListRepositoriesRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListRepositoriesPager:
r"""Lists repositories.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_list_repositories():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.ListRepositoriesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_repositories(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.ListRepositoriesRequest, dict]):
The request object. The request to list repositories.
parent (str):
Required. The name of the parent
resource whose repositories will be
listed.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.services.artifact_registry.pagers.ListRepositoriesPager:
The response from listing
repositories.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a repository.ListRepositoriesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, repository.ListRepositoriesRequest):
request = repository.ListRepositoriesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_repositories]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListRepositoriesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_repository(
self,
request: Optional[Union[repository.GetRepositoryRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> repository.Repository:
r"""Gets a repository.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_get_repository():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.GetRepositoryRequest(
name="name_value",
)
# Make the request
response = client.get_repository(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.GetRepositoryRequest, dict]):
The request object. The request to retrieve a
repository.
name (str):
Required. The name of the repository
to retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.Repository:
A Repository for storing artifacts
with a specific format.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a repository.GetRepositoryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, repository.GetRepositoryRequest):
request = repository.GetRepositoryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_repository]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_repository(
self,
request: Optional[Union[gda_repository.CreateRepositoryRequest, dict]] = None,
*,
parent: Optional[str] = None,
repository: Optional[gda_repository.Repository] = None,
repository_id: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a repository. The returned Operation will
finish once the repository has been created. Its
response will be the created Repository.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_create_repository():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.CreateRepositoryRequest(
parent="parent_value",
)
# Make the request
operation = client.create_repository(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.CreateRepositoryRequest, dict]):
The request object. The request to create a new
repository.
parent (str):
Required. The name of the parent
resource where the repository will be
created.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
repository (google.cloud.artifactregistry_v1.types.Repository):
The repository to be created.
This corresponds to the ``repository`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
repository_id (str):
The repository id to use for this
repository.
This corresponds to the ``repository_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.artifactregistry_v1.types.Repository`
A Repository for storing artifacts with a specific
format.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, repository, repository_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gda_repository.CreateRepositoryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gda_repository.CreateRepositoryRequest):
request = gda_repository.CreateRepositoryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if repository is not None:
request.repository = repository
if repository_id is not None:
request.repository_id = repository_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_repository]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gda_repository.Repository,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
def update_repository(
self,
request: Optional[Union[gda_repository.UpdateRepositoryRequest, dict]] = None,
*,
repository: Optional[gda_repository.Repository] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gda_repository.Repository:
r"""Updates a repository.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_update_repository():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.UpdateRepositoryRequest(
)
# Make the request
response = client.update_repository(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.UpdateRepositoryRequest, dict]):
The request object. The request to update a repository.
repository (google.cloud.artifactregistry_v1.types.Repository):
The repository that replaces the
resource on the server.
This corresponds to the ``repository`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The update mask applies to the resource. For the
``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.Repository:
A Repository for storing artifacts
with a specific format.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([repository, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gda_repository.UpdateRepositoryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gda_repository.UpdateRepositoryRequest):
request = gda_repository.UpdateRepositoryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if repository is not None:
request.repository = repository
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_repository]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("repository.name", request.repository.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_repository(
self,
request: Optional[Union[repository.DeleteRepositoryRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a repository and all of its contents. The
returned Operation will finish once the repository has
been deleted. It will not have any Operation metadata
and will return a google.protobuf.Empty response.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_delete_repository():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.DeleteRepositoryRequest(
name="name_value",
)
# Make the request
operation = client.delete_repository(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.DeleteRepositoryRequest, dict]):
The request object. The request to delete a repository.
name (str):
Required. The name of the repository
to delete.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a repository.DeleteRepositoryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, repository.DeleteRepositoryRequest):
request = repository.DeleteRepositoryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_repository]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
def list_packages(
self,
request: Optional[Union[package.ListPackagesRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListPackagesPager:
r"""Lists packages.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_list_packages():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.ListPackagesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_packages(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.ListPackagesRequest, dict]):
The request object. The request to list packages.
parent (str):
Required. The name of the parent
resource whose packages will be listed.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.services.artifact_registry.pagers.ListPackagesPager:
The response from listing packages.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a package.ListPackagesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, package.ListPackagesRequest):
request = package.ListPackagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_packages]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPackagesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_package(
self,
request: Optional[Union[package.GetPackageRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> package.Package:
r"""Gets a package.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_get_package():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.GetPackageRequest(
name="name_value",
)
# Make the request
response = client.get_package(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.GetPackageRequest, dict]):
The request object. The request to retrieve a package.
name (str):
Required. The name of the package to
retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.Package:
Packages are named collections of
versions.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a package.GetPackageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, package.GetPackageRequest):
request = package.GetPackageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_package]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_package(
self,
request: Optional[Union[package.DeletePackageRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a package and all of its versions and tags.
The returned operation will complete once the package
has been deleted.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_delete_package():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.DeletePackageRequest(
name="name_value",
)
# Make the request
operation = client.delete_package(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.DeletePackageRequest, dict]):
The request object. The request to delete a package.
name (str):
Required. The name of the package to
delete.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a package.DeletePackageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, package.DeletePackageRequest):
request = package.DeletePackageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_package]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
def list_versions(
self,
request: Optional[Union[version.ListVersionsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListVersionsPager:
r"""Lists versions.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_list_versions():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.ListVersionsRequest(
)
# Make the request
page_result = client.list_versions(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.ListVersionsRequest, dict]):
The request object. The request to list versions.
parent (str):
The name of the parent resource whose
versions will be listed.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.services.artifact_registry.pagers.ListVersionsPager:
The response from listing versions.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a version.ListVersionsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, version.ListVersionsRequest):
request = version.ListVersionsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_versions]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListVersionsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_version(
self,
request: Optional[Union[version.GetVersionRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> version.Version:
r"""Gets a version
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_get_version():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.GetVersionRequest(
)
# Make the request
response = client.get_version(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.GetVersionRequest, dict]):
The request object. The request to retrieve a version.
name (str):
The name of the version to retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.Version:
The body of a version resource. A
version resource represents a collection
of components, such as files and other
data. This may correspond to a version
in many package management schemes.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a version.GetVersionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, version.GetVersionRequest):
request = version.GetVersionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_version]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_version(
self,
request: Optional[Union[version.DeleteVersionRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a version and all of its content. The
returned operation will complete once the version has
been deleted.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_delete_version():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.DeleteVersionRequest(
)
# Make the request
operation = client.delete_version(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.DeleteVersionRequest, dict]):
The request object. The request to delete a version.
name (str):
The name of the version to delete.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a version.DeleteVersionRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, version.DeleteVersionRequest):
request = version.DeleteVersionRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_version]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=service.OperationMetadata,
)
# Done; return the response.
return response
def list_files(
self,
request: Optional[Union[file.ListFilesRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListFilesPager:
r"""Lists files.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_list_files():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.ListFilesRequest(
)
# Make the request
page_result = client.list_files(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.ListFilesRequest, dict]):
The request object. The request to list files.
parent (str):
The name of the repository whose
files will be listed. For example:
"projects/p1/locations/us-central1/repositories/repo1
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.services.artifact_registry.pagers.ListFilesPager:
The response from listing files.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a file.ListFilesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, file.ListFilesRequest):
request = file.ListFilesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_files]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListFilesPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_file(
self,
request: Optional[Union[file.GetFileRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> file.File:
r"""Gets a file.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_get_file():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.GetFileRequest(
)
# Make the request
response = client.get_file(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.GetFileRequest, dict]):
The request object. The request to retrieve a file.
name (str):
The name of the file to retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.File:
Files store content that is
potentially associated with Packages or
Versions.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a file.GetFileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, file.GetFileRequest):
request = file.GetFileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_file]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def list_tags(
self,
request: Optional[Union[tag.ListTagsRequest, dict]] = None,
*,
parent: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTagsPager:
r"""Lists tags.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_list_tags():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.ListTagsRequest(
)
# Make the request
page_result = client.list_tags(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.ListTagsRequest, dict]):
The request object. The request to list tags.
parent (str):
The name of the parent resource whose
tags will be listed.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.services.artifact_registry.pagers.ListTagsPager:
The response from listing tags.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tag.ListTagsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tag.ListTagsRequest):
request = tag.ListTagsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_tags]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTagsPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
def get_tag(
self,
request: Optional[Union[tag.GetTagRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tag.Tag:
r"""Gets a tag.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_get_tag():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.GetTagRequest(
)
# Make the request
response = client.get_tag(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.GetTagRequest, dict]):
The request object. The request to retrieve a tag.
name (str):
The name of the tag to retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.Tag:
Tags point to a version and represent
an alternative name that can be used to
access the version.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tag.GetTagRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tag.GetTagRequest):
request = tag.GetTagRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_tag]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def create_tag(
self,
request: Optional[Union[gda_tag.CreateTagRequest, dict]] = None,
*,
parent: Optional[str] = None,
tag: Optional[gda_tag.Tag] = None,
tag_id: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gda_tag.Tag:
r"""Creates a tag.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_create_tag():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.CreateTagRequest(
)
# Make the request
response = client.create_tag(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.CreateTagRequest, dict]):
The request object. The request to create a new tag.
parent (str):
The name of the parent resource where
the tag will be created.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tag (google.cloud.artifactregistry_v1.types.Tag):
The tag to be created.
This corresponds to the ``tag`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tag_id (str):
The tag id to use for this
repository.
This corresponds to the ``tag_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.Tag:
Tags point to a version and represent
an alternative name that can be used to
access the version.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, tag, tag_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gda_tag.CreateTagRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gda_tag.CreateTagRequest):
request = gda_tag.CreateTagRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if tag is not None:
request.tag = tag
if tag_id is not None:
request.tag_id = tag_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_tag]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_tag(
self,
request: Optional[Union[gda_tag.UpdateTagRequest, dict]] = None,
*,
tag: Optional[gda_tag.Tag] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gda_tag.Tag:
r"""Updates a tag.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_update_tag():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.UpdateTagRequest(
)
# Make the request
response = client.update_tag(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.UpdateTagRequest, dict]):
The request object. The request to create or update a
tag.
tag (google.cloud.artifactregistry_v1.types.Tag):
The tag that replaces the resource on
the server.
This corresponds to the ``tag`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The update mask applies to the resource. For the
``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.Tag:
Tags point to a version and represent
an alternative name that can be used to
access the version.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tag, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gda_tag.UpdateTagRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gda_tag.UpdateTagRequest):
request = gda_tag.UpdateTagRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tag is not None:
request.tag = tag
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_tag]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("tag.name", request.tag.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def delete_tag(
self,
request: Optional[Union[tag.DeleteTagRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes a tag.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_delete_tag():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.DeleteTagRequest(
)
# Make the request
client.delete_tag(request=request)
Args:
request (Union[google.cloud.artifactregistry_v1.types.DeleteTagRequest, dict]):
The request object. The request to delete a tag.
name (str):
The name of the tag to delete.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a tag.DeleteTagRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, tag.DeleteTagRequest):
request = tag.DeleteTagRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_tag]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
def set_iam_policy(
self,
request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Updates the IAM policy for a given resource.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
from google.iam.v1 import iam_policy_pb2 # type: ignore
def sample_set_iam_policy():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = iam_policy_pb2.SetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = client.set_iam_policy(request=request)
# Handle the response
print(response)
Args:
request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]):
The request object. Request message for `SetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
one or more members, or principals, to a single role.
Principals can be user accounts, service accounts,
Google groups, and domains (such as G Suite). A role
is a named list of permissions; each role can be an
IAM predefined role or a user-created custom role.
For some types of Google Cloud resources, a binding
can also specify a condition, which is a logical
expression that allows access to a resource only if
the expression evaluates to true. A condition can add
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": [ "user:eve@example.com" ],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
], "etag": "BwWWja0YfJA=", "version": 3
}
**YAML example:**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin -
members: - user:\ eve@example.com role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z') etag:
BwWWja0YfJA= version: 3
For a description of IAM and its features, see the
[IAM
documentation](\ https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
if isinstance(request, dict):
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
request = iam_policy_pb2.SetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
request = iam_policy_pb2.SetIamPolicyRequest()
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_iam_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_iam_policy(
self,
request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> policy_pb2.Policy:
r"""Gets the IAM policy for a given resource.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
from google.iam.v1 import iam_policy_pb2 # type: ignore
def sample_get_iam_policy():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = iam_policy_pb2.GetIamPolicyRequest(
resource="resource_value",
)
# Make the request
response = client.get_iam_policy(request=request)
# Handle the response
print(response)
Args:
request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]):
The request object. Request message for `GetIamPolicy`
method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.policy_pb2.Policy:
An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources.
A Policy is a collection of bindings. A binding binds
one or more members, or principals, to a single role.
Principals can be user accounts, service accounts,
Google groups, and domains (such as G Suite). A role
is a named list of permissions; each role can be an
IAM predefined role or a user-created custom role.
For some types of Google Cloud resources, a binding
can also specify a condition, which is a logical
expression that allows access to a resource only if
the expression evaluates to true. A condition can add
constraints based on attributes of the request, the
resource, or both. To learn which resources support
conditions in their IAM policies, see the [IAM
documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies).
**JSON example:**
{
"bindings": [
{
"role":
"roles/resourcemanager.organizationAdmin",
"members": [ "user:mike@example.com",
"group:admins@example.com",
"domain:google.com",
"serviceAccount:my-project-id@appspot.gserviceaccount.com"
]
}, { "role":
"roles/resourcemanager.organizationViewer",
"members": [ "user:eve@example.com" ],
"condition": { "title": "expirable access",
"description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } }
], "etag": "BwWWja0YfJA=", "version": 3
}
**YAML example:**
bindings: - members: - user:\ mike@example.com -
group:\ admins@example.com - domain:google.com -
serviceAccount:\ my-project-id@appspot.gserviceaccount.com
role: roles/resourcemanager.organizationAdmin -
members: - user:\ eve@example.com role:
roles/resourcemanager.organizationViewer
condition: title: expirable access description:
Does not grant access after Sep 2020 expression:
request.time <
timestamp('2020-10-01T00:00:00.000Z') etag:
BwWWja0YfJA= version: 3
For a description of IAM and its features, see the
[IAM
documentation](\ https://cloud.google.com/iam/docs/).
"""
# Create or coerce a protobuf request object.
if isinstance(request, dict):
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
request = iam_policy_pb2.GetIamPolicyRequest(**request)
elif not request:
# Null request, just make one.
request = iam_policy_pb2.GetIamPolicyRequest()
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_iam_policy]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def test_iam_permissions(
self,
request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> iam_policy_pb2.TestIamPermissionsResponse:
r"""Tests if the caller has a list of permissions on a
resource.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
from google.iam.v1 import iam_policy_pb2 # type: ignore
def sample_test_iam_permissions():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = iam_policy_pb2.TestIamPermissionsRequest(
resource="resource_value",
permissions=['permissions_value1', 'permissions_value2'],
)
# Make the request
response = client.test_iam_permissions(request=request)
# Handle the response
print(response)
Args:
request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]):
The request object. Request message for
`TestIamPermissions` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse:
Response message for TestIamPermissions method.
"""
# Create or coerce a protobuf request object.
if isinstance(request, dict):
# The request isn't a proto-plus wrapped type,
# so it must be constructed via keyword expansion.
request = iam_policy_pb2.TestIamPermissionsRequest(**request)
elif not request:
# Null request, just make one.
request = iam_policy_pb2.TestIamPermissionsRequest()
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def get_project_settings(
self,
request: Optional[Union[settings.GetProjectSettingsRequest, dict]] = None,
*,
name: Optional[str] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> settings.ProjectSettings:
r"""Retrieves the Settings for the Project.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_get_project_settings():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.GetProjectSettingsRequest(
name="name_value",
)
# Make the request
response = client.get_project_settings(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.GetProjectSettingsRequest, dict]):
The request object. Gets the redirection status for a
project.
name (str):
Required. The name of the
projectSettings resource.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.ProjectSettings:
The Artifact Registry settings that
apply to a Project.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a settings.GetProjectSettingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, settings.GetProjectSettingsRequest):
request = settings.GetProjectSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_project_settings]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def update_project_settings(
self,
request: Optional[Union[settings.UpdateProjectSettingsRequest, dict]] = None,
*,
project_settings: Optional[settings.ProjectSettings] = None,
update_mask: Optional[field_mask_pb2.FieldMask] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> settings.ProjectSettings:
r"""Updates the Settings for the Project.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
def sample_update_project_settings():
# Create a client
client = artifactregistry_v1.ArtifactRegistryClient()
# Initialize request argument(s)
request = artifactregistry_v1.UpdateProjectSettingsRequest(
)
# Make the request
response = client.update_project_settings(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.artifactregistry_v1.types.UpdateProjectSettingsRequest, dict]):
The request object. Sets the settings of the project.
project_settings (google.cloud.artifactregistry_v1.types.ProjectSettings):
The project settings.
This corresponds to the ``project_settings`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Field mask to support partial
updates.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.artifactregistry_v1.types.ProjectSettings:
The Artifact Registry settings that
apply to a Project.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_settings, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a settings.UpdateProjectSettingsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, settings.UpdateProjectSettingsRequest):
request = settings.UpdateProjectSettingsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_settings is not None:
request.project_settings = project_settings
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_project_settings]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("project_settings.name", request.project_settings.name),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-artifact-registry",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ArtifactRegistryClient",)
|
{
"content_hash": "fb68acdabead0f9182fe12af1ee93747",
"timestamp": "",
"source": "github",
"line_count": 3651,
"max_line_length": 171,
"avg_line_length": 40.57628047110381,
"alnum_prop": 0.5878942110379091,
"repo_name": "googleapis/python-artifact-registry",
"id": "a1d200bdbd189eeb7526b1e02f5ee38ffe099c76",
"size": "148744",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/artifactregistry_v1/services/artifact_registry/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1577437"
},
{
"name": "Shell",
"bytes": "30693"
}
],
"symlink_target": ""
}
|
"""
Script to help deploy a Fluo or Accumulo cluster (optionally to AWS EC2)
"""
import os
import sys
from sys import exit
import shutil
from botocore.exceptions import ClientError
from config import DeployConfig, HOST_VAR_DEFAULTS, PLAY_VAR_DEFAULTS
from util import parse_args, AMI_HELP_MSG, get_block_device_map
from os.path import isfile, join
import time
import subprocess
import boto3
class MuchosCluster:
def __init__(self, config):
self.config = config
def launch_node(self, hostname, services, sg_id):
associate_public_ip = True
if self.config.has_option('ec2', 'associate_public_ip'):
associate_public_ip = self.config.get('ec2', 'associate_public_ip').strip().lower() == 'true'
request = {'MinCount': 1, 'MaxCount': 1,
'NetworkInterfaces': [{'DeviceIndex': 0, 'AssociatePublicIpAddress': associate_public_ip,
'Groups': [sg_id]}]}
if self.config.has_option('ec2', 'subnet_id'):
request['NetworkInterfaces'][0]['SubnetId'] = self.config.get('ec2', 'subnet_id')
if 'worker' in services:
instance_type = self.config.get('ec2', 'worker_instance_type')
else:
instance_type = self.config.get('ec2', 'default_instance_type')
request['InstanceType'] = instance_type
request['InstanceInitiatedShutdownBehavior'] = self.config.get('ec2', 'shutdown_behavior')
if not self.config.has_option('ec2', 'aws_ami'):
exit('aws_ami property must be set!')
image_id = self.config.get('ec2', 'aws_ami')
if not image_id:
exit('aws_ami property was not properly')
request['ImageId'] = image_id
request['BlockDeviceMappings'] = get_block_device_map(instance_type)
if self.config.has_option('ec2', 'key_name'):
request['KeyName'] = self.config.get('ec2', 'key_name')
tags = [{'Key': 'Name', 'Value': self.config.cluster_name + '-' + hostname},
{'Key': 'Muchos', 'Value': self.config.cluster_name}]
for key, val in self.config.instance_tags().iteritems():
tags.append({'Key': key, 'Value': val})
request['TagSpecifications'] = [{'ResourceType': 'instance', 'Tags': tags}]
if self.config.has_option('ec2', 'user_data_path'):
user_data_path = self.config.get('ec2', 'user_data_path')
with open(user_data_path, 'r') as user_data_file:
user_data = user_data_file.read()
request['UserData'] = user_data
ec2 = boto3.client('ec2')
response = None
try:
response = ec2.run_instances(**request)
except ClientError as e:
exit("ERROR - Failed to launch EC2 instance due to exception:\n\n{0}\n\n{1}".format(e, AMI_HELP_MSG))
if response is None or len(response['Instances']) != 1:
exit('ERROR - Failed to start {0} node'.format(hostname))
print 'Launching {0} node using {1}'.format(hostname, image_id)
return response['Instances'][0]
def create_security_group(self):
ec2 = boto3.client('ec2')
sg = self.config.sg_name
create_group = True
group_id = None
try:
response = ec2.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': [sg]}])
if len(response['SecurityGroups']) > 0:
group_id = response['SecurityGroups'][0]['GroupId']
create_group = False
except ClientError:
pass
if create_group:
print "Creating security group " + sg
request = {'Description': "Security group created by Muchos", 'GroupName': sg}
if self.config.has_option('ec2', 'vpc_id'):
request['VpcId'] = self.config.get('ec2', 'vpc_id')
response = ec2.create_security_group(**request)
group_id = response['GroupId']
ec2.authorize_security_group_ingress(GroupName=sg, SourceSecurityGroupName=sg)
ec2.authorize_security_group_ingress(GroupName=sg, IpProtocol='tcp', FromPort=22, ToPort=22,
CidrIp='0.0.0.0/0')
return group_id
def delete_security_group(self):
sg_id = None
ec2 = boto3.client('ec2')
try:
response = ec2.describe_security_groups(Filters=[{'Name': 'group-name', 'Values': [self.config.sg_name]}])
if len(response['SecurityGroups']) > 0:
sg_id = response['SecurityGroups'][0]['GroupId']
except ClientError:
pass
if not sg_id:
print "Could not find security group '{0}'".format(self.config.sg_name)
return
print "Attempting to delete security group '{0}' with id '{1}'...".format(self.config.sg_name, sg_id)
sg_exists = True
while sg_exists:
try:
request = {'GroupId': sg_id}
ec2.delete_security_group(**request)
sg_exists = False
except ClientError as e:
print "Failed to delete security group '{0}' due exception below:\n{1}\nRetrying in 10 sec..."\
.format(self.config.sg_name, e)
time.sleep(10)
print "Deleted security group"
def launch(self):
if self.active_nodes():
exit('ERROR - There are already instances running for {0} cluster'.format(self.config.cluster_name))
if isfile(self.config.hosts_path):
exit("ERROR - A hosts file already exists at {0}. Please delete before running launch again"
.format(self.config.hosts_path))
self.config.verify_launch()
print "Launching {0} cluster".format(self.config.cluster_name)
if self.config.has_option('ec2', 'security_group_id'):
sg_id = self.config.get('ec2', 'security_group_id')
else:
sg_id = self.create_security_group()
instance_d = {}
for (hostname, services) in self.config.nodes().items():
instance = self.launch_node(hostname, services, sg_id)
instance_d[instance['InstanceId']] = hostname
num_running = len(self.status(['running']))
num_expected = len(self.config.nodes())
while num_running != num_expected:
print "{0} of {1} nodes have started. Waiting another 5 sec..".format(num_running, num_expected)
time.sleep(5)
num_running = len(self.status(['running']))
with open(self.config.hosts_path, 'w') as hosts_file:
for instance in self.status(['running']):
public_ip = ''
if 'PublicIpAddress' in instance:
public_ip = instance['PublicIpAddress']
private_ip = instance['PrivateIpAddress']
hostname = instance_d[instance['InstanceId']]
print >>hosts_file, hostname, private_ip, public_ip
print "All {0} nodes have started. Created hosts file at {1}".format(num_expected, self.config.hosts_path)
def sync(self):
config = self.config
print 'Syncing ansible directory on {0} cluster proxy node'.format(config.cluster_name)
host_vars = HOST_VAR_DEFAULTS
play_vars = PLAY_VAR_DEFAULTS
for section in ("general", "ansible-vars", config.get('performance', 'profile')):
for (name, value) in config.items(section):
if name not in ('proxy_hostname', 'proxy_socks_port'):
if name in host_vars:
host_vars[name] = value
if name in play_vars:
play_vars[name] = value
play_vars['accumulo_sha256'] = config.checksum('accumulo')
play_vars['fluo_sha256'] = config.checksum('fluo')
play_vars['fluo_yarn_sha256'] = config.checksum('fluo_yarn')
play_vars['hadoop_sha256'] = config.checksum('hadoop')
play_vars['spark_sha256'] = config.checksum('spark')
play_vars['zookeeper_sha256'] = config.checksum('zookeeper')
cloud_provider = host_vars.get('cloud_provider', 'ec2')
node_type_map = {}
if cloud_provider == 'ec2':
node_type_map = config.node_type_map()
play_vars["mount_root"] = config.mount_root
play_vars["metrics_drive_ids"] = config.metrics_drive_ids()
play_vars["fstype"] = config.fstype()
play_vars["force_format"] = config.force_format()
play_vars["shutdown_delay_minutes"] = config.get("ec2", "shutdown_delay_minutes")
if cloud_provider == 'baremetal':
play_vars["mount_root"] = config.get("baremetal", "mount_root")
play_vars["metrics_drive_ids"] = config.get("baremetal", "metrics_drives_ids").split(",")
mounts = config.get("baremetal", "mounts").split(",")
devices = config.get("baremetal", "devices").split(",")
for node_type in 'default', 'worker':
node_type_map[node_type] = {'mounts': mounts, 'devices': devices}
play_vars["node_type_map"] = node_type_map
host_vars['worker_data_dirs'] = str(node_type_map['worker']['mounts'])
host_vars['default_data_dirs'] = str(node_type_map['default']['mounts'])
with open(join(config.deploy_path, "ansible/site.yml"), 'w') as site_file:
print >>site_file, "- import_playbook: common.yml"
if config.has_service("spark"):
print >>site_file, "- import_playbook: spark.yml"
print >>site_file, "- import_playbook: hadoop.yml"
print >>site_file, "- import_playbook: zookeeper.yml"
if config.has_service("metrics"):
print >>site_file, "- import_playbook: metrics.yml"
print >>site_file, "- import_playbook: accumulo.yml"
if config.has_service('fluo'):
print >>site_file, "- import_playbook: fluo.yml"
if config.has_service('fluo_yarn'):
print >>site_file, "- import_playbook: fluo_yarn.yml"
if config.has_service("mesosmaster"):
print >>site_file, "- import_playbook: mesos.yml"
ansible_conf = join(config.deploy_path, "ansible/conf")
with open(join(ansible_conf, "hosts"), 'w') as hosts_file:
print >>hosts_file, "[proxy]\n{0}".format(config.proxy_hostname())
print >>hosts_file, "\n[accumulomaster]\n{0}".format(config.get_service_hostnames("accumulomaster")[0])
print >>hosts_file, "\n[namenode]\n{0}".format(config.get_service_hostnames("namenode")[0])
print >>hosts_file, "\n[resourcemanager]\n{0}".format(config.get_service_hostnames("resourcemanager")[0])
if config.has_service("spark"):
print >>hosts_file, "\n[spark]\n{0}".format(config.get_service_hostnames("spark")[0])
if config.has_service("mesosmaster"):
print >>hosts_file, "\n[mesosmaster]\n{0}".format(config.get_service_hostnames("mesosmaster")[0])
if config.has_service("metrics"):
print >>hosts_file, "\n[metrics]\n{0}".format(config.get_service_hostnames("metrics")[0])
print >>hosts_file, "\n[zookeepers]"
for (index, zk_host) in enumerate(config.get_service_hostnames("zookeeper"), start=1):
print >>hosts_file, "{0} id={1}".format(zk_host, index)
if config.has_service('fluo'):
print >>hosts_file, "\n[fluo]"
for host in config.get_service_hostnames("fluo"):
print >>hosts_file, host
if config.has_service('fluo_yarn'):
print >>hosts_file, "\n[fluo_yarn]"
for host in config.get_service_hostnames("fluo_yarn"):
print >>hosts_file, host
print >>hosts_file, "\n[workers]"
for worker_host in config.get_service_hostnames("worker"):
print >>hosts_file, worker_host
print >>hosts_file, "\n[accumulo:children]\naccumulomaster\nworkers"
print >>hosts_file, "\n[hadoop:children]\nnamenode\nresourcemanager\nworkers"
print >>hosts_file, "\n[nodes]"
for (private_ip, hostname) in config.get_private_ip_hostnames():
print >>hosts_file, "{0} ansible_ssh_host={1} node_type={2}".format(hostname, private_ip,
config.node_type(hostname))
print >>hosts_file, "\n[all:vars]"
for (name, value) in sorted(host_vars.items()):
print >>hosts_file, "{0} = {1}".format(name, value)
with open(join(config.deploy_path, "ansible/group_vars/all"), 'w') as play_vars_file:
for (name, value) in sorted(play_vars.items()):
print >>play_vars_file, "{0}: {1}".format(name, value)
# copy keys file to ansible/conf (if it exists)
conf_keys = join(config.deploy_path, "conf/keys")
ansible_keys = join(ansible_conf, "keys")
if isfile(conf_keys):
shutil.copyfile(conf_keys, ansible_keys)
else:
open(ansible_keys, 'w').close()
basedir = config.get('general', 'cluster_basedir')
cmd = "rsync -az --delete -e \"ssh -o 'StrictHostKeyChecking no'\""
subprocess.call("{cmd} {src} {usr}@{ldr}:{tdir}".format(cmd=cmd, src=join(config.deploy_path, "ansible"),
usr=config.get('general', 'cluster_user'), ldr=config.get_proxy_ip(), tdir=basedir),
shell=True)
self.exec_on_proxy_verified("{0}/ansible/scripts/install_ansible.sh".format(basedir), opts='-t')
def setup(self):
config = self.config
print 'Setting up {0} cluster'.format(config.cluster_name)
self.sync()
conf_upload = join(config.deploy_path, "conf/upload")
accumulo_tarball = join(conf_upload, "accumulo-{0}-bin.tar.gz".format(config.version("accumulo")))
fluo_tarball = join(conf_upload, "fluo-{0}-bin.tar.gz".format(config.version("fluo")))
fluo_yarn_tarball = join(conf_upload, "fluo-yarn-{0}-bin.tar.gz".format(config.version("fluo_yarn")))
basedir = config.get('general', 'cluster_basedir')
cluster_tarballs = "{0}/tarballs".format(basedir)
self.exec_on_proxy_verified("mkdir -p {0}".format(cluster_tarballs))
if isfile(accumulo_tarball):
self.send_to_proxy(accumulo_tarball, cluster_tarballs)
if isfile(fluo_tarball) and config.has_service('fluo'):
self.send_to_proxy(fluo_tarball, cluster_tarballs)
if isfile(fluo_yarn_tarball) and config.has_service('fluo_yarn'):
self.send_to_proxy(fluo_yarn_tarball, cluster_tarballs)
self.execute_playbook("site.yml")
def status(self, states):
ec2 = boto3.client('ec2')
response = ec2.describe_instances(Filters=[{'Name': 'tag:Muchos', 'Values': [self.config.cluster_name]}])
nodes = []
for res in response['Reservations']:
for inst in res['Instances']:
if inst['State']['Name'] in states:
nodes.append(inst)
return nodes
def active_nodes(self):
return self.status(['pending', 'running', 'stopping', 'stopped'])
@staticmethod
def print_nodes(nodes):
for node in nodes:
name = 'Unknown'
for tag in node['Tags']:
if tag['Key'] == 'Name':
name = tag['Value']
print " ", name, node['InstanceId'], node['PrivateIpAddress'], node.get('PublicIpAddress', '')
def terminate(self, hosts_path):
nodes = self.active_nodes()
print "The following {0} nodes in {1} cluster will be terminated:".format(len(nodes), self.config.cluster_name)
self.print_nodes(nodes)
response = raw_input("Do you want to continue? (y/n) ")
if response == "y":
ec2 = boto3.client('ec2')
for node in nodes:
ec2.terminate_instances(InstanceIds=[node['InstanceId']])
print "Terminated nodes."
if not self.config.has_option('ec2', 'security_group_id'):
self.delete_security_group()
if isfile(hosts_path):
os.remove(hosts_path)
print "Removed hosts file at ", hosts_path
else:
print "Aborted termination"
def ssh(self):
self.wait_until_proxy_ready()
fwd = ''
if self.config.has_option('general', 'proxy_socks_port'):
fwd = "-D " + self.config.get('general', 'proxy_socks_port')
ssh_command = "ssh -C -A -o 'StrictHostKeyChecking no' {fwd} {usr}@{ldr}".format(
usr=self.config.get('general', 'cluster_user'), ldr=self.config.get_proxy_ip(), fwd=fwd)
print "Logging into proxy using: {0}".format(ssh_command)
retcode = subprocess.call(ssh_command, shell=True)
if retcode != 0:
exit("ERROR - Command failed with return code of {0}: {1}".format(retcode, ssh_command))
def exec_on_proxy(self, command, opts=''):
ssh_command = "ssh -A -o 'StrictHostKeyChecking no' {opts} {usr}@{ldr} '{cmd}'".format(
usr=self.config.get('general', 'cluster_user'),
ldr=self.config.get_proxy_ip(), cmd=command, opts=opts)
return subprocess.call(ssh_command, shell=True), ssh_command
def exec_on_proxy_verified(self, command, opts=''):
(retcode, ssh_command) = self.exec_on_proxy(command, opts)
if retcode != 0:
exit("ERROR - Command failed with return code of {0}: {1}".format(retcode, ssh_command))
def wait_until_proxy_ready(self):
cluster_user = self.config.get('general', 'cluster_user')
print "Checking if '{0}' proxy can be reached using: ssh {1}@{2}"\
.format(self.config.proxy_hostname(), cluster_user, self.config.get_proxy_ip())
while True:
(retcode, ssh_command) = self.exec_on_proxy('pwd > /dev/null')
if retcode == 0:
print "Connected to proxy using SSH!"
time.sleep(1)
break
print "Proxy could not be accessed using SSH. Will retry in 5 sec..."
time.sleep(5)
def execute_playbook(self, playbook):
print "Executing '{0}' playbook".format(playbook)
basedir = self.config.get('general', 'cluster_basedir')
self.exec_on_proxy_verified("time -p ansible-playbook {base}/ansible/{playbook}"
.format(base=basedir, playbook=playbook), opts='-t')
def send_to_proxy(self, path, target, skip_if_exists=True):
print "Copying to proxy: ", path
cmd = "scp -o 'StrictHostKeyChecking no'"
if skip_if_exists:
cmd = "rsync --update --progress -e \"ssh -o 'StrictHostKeyChecking no'\""
subprocess.call("{cmd} {src} {usr}@{ldr}:{tdir}".format(
cmd=cmd, src=path, usr=self.config.get('general', 'cluster_user'), ldr=self.config.get_proxy_ip(),
tdir=target), shell=True)
def main():
deploy_path = os.environ.get('MUCHOS_HOME')
if not deploy_path:
exit('ERROR - MUCHOS_HOME env variable must be set!')
if not os.path.isdir(deploy_path):
exit('ERROR - Directory set by MUCHOS_HOME does not exist: '+deploy_path)
config_path = join(deploy_path, "conf/muchos.props")
if not isfile(config_path):
exit('ERROR - A config file does not exist at '+config_path)
checksums_path = join(deploy_path, "conf/checksums")
if not isfile(checksums_path):
exit('ERROR - A checksums file does not exist at '+checksums_path)
hosts_dir = join(deploy_path, "conf/hosts/")
# parse command line args
retval = parse_args(hosts_dir)
if not retval:
print "Invalid command line arguments. For help, use 'muchos -h'"
sys.exit(1)
(opts, action, args) = retval
hosts_path = join(hosts_dir, opts.cluster)
config = DeployConfig(deploy_path, config_path, hosts_path, checksums_path, opts.cluster)
config.verify_config(action)
cluster = MuchosCluster(config)
if action == 'launch':
cluster.launch()
elif action == 'status':
nodes = cluster.status(['running'])
print "Found {0} nodes in {1} cluster".format(len(nodes), config.cluster_name)
cluster.print_nodes(nodes)
elif action == 'sync':
cluster.sync()
elif action == 'setup':
cluster.setup()
elif action == 'config':
if opts.property == 'all':
config.print_all()
else:
config.print_property(opts.property)
elif action == 'ssh':
cluster.ssh()
elif action in ('wipe', 'kill', 'cancel_shutdown'):
if not isfile(hosts_path):
exit("Hosts file does not exist for cluster: "+hosts_path)
if action == 'wipe':
print "Killing all processes started by Muchos and wiping Muchos data from {0} cluster"\
.format(config.cluster_name)
elif action == 'kill':
print "Killing all processes started by Muchos on {0} cluster".format(config.cluster_name)
elif action == 'cancel_shutdown':
print "Cancelling automatic shutdown of {0} cluster".format(config.cluster_name)
cluster.execute_playbook(action + ".yml")
elif action == 'terminate':
cluster.terminate(hosts_path)
else:
print 'ERROR - Unknown action:', action
main()
|
{
"content_hash": "4bcbd45d6f9cac5d0cd860e0b5035aed",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 119,
"avg_line_length": 45.614736842105266,
"alnum_prop": 0.5828218027414963,
"repo_name": "mikewalch/zetten",
"id": "a7824f5b417de98d00a7687644f2fce6f914a168",
"size": "22279",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/muchos/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40401"
},
{
"name": "Shell",
"bytes": "6583"
},
{
"name": "VimL",
"bytes": "55"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='richenum',
version='1.3.0',
description='Enum library for python.',
long_description=(
open('README.rst').read() + '\n\n' +
open('CHANGELOG.rst').read() + '\n\n' +
open('AUTHORS.rst').read()),
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='python enum richenum',
url='https://github.com/hearsaycorp/richenum',
author='Hearsay Social',
author_email='opensource@hearsaysocial.com',
license='MIT',
package_dir={'': 'src'},
packages=find_packages('src'),
tests_require=['pytest'],
setup_requires=["pytest-runner"],
install_requires=['six'],
test_suite='tests'
)
|
{
"content_hash": "e7f0fdb12b88eadc54cb2c0d6d5fef70",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 71,
"avg_line_length": 36.13513513513514,
"alnum_prop": 0.5991024682124159,
"repo_name": "hearsaycorp/richenum",
"id": "aeafa4f0525964291443bb2f11b03ecb71870279",
"size": "1360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25943"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('body', models.TextField(verbose_name='body')),
('sent_at', models.DateTimeField(auto_now_add=True, verbose_name='sent at')),
('sender_deleted_at', models.DateTimeField(null=True, verbose_name='sender deleted at', blank=True)),
],
options={
'ordering': ['-sent_at'],
'verbose_name': 'message',
'verbose_name_plural': 'messages',
},
),
migrations.CreateModel(
name='MessageContact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('latest_message', models.ForeignKey(verbose_name='latest message', to='umessages.Message', on_delete=models.CASCADE)),
('um_from_user', models.ForeignKey(related_name='um_from_users', verbose_name='from user', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
('um_to_user', models.ForeignKey(related_name='um_to_users', verbose_name='to user', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'ordering': ['latest_message'],
'verbose_name': 'contact',
'verbose_name_plural': 'contacts',
},
),
migrations.CreateModel(
name='MessageRecipient',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('read_at', models.DateTimeField(null=True, verbose_name='read at', blank=True)),
('deleted_at', models.DateTimeField(null=True, verbose_name='recipient deleted at', blank=True)),
('message', models.ForeignKey(verbose_name='message', to='umessages.Message', on_delete=models.CASCADE)),
('user', models.ForeignKey(verbose_name='recipient', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'verbose_name': 'recipient',
'verbose_name_plural': 'recipients',
},
),
migrations.AddField(
model_name='message',
name='recipients',
field=models.ManyToManyField(related_name='received_messages', verbose_name='recipients', through='umessages.MessageRecipient', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='message',
name='sender',
field=models.ForeignKey(related_name='sent_messages', verbose_name='sender', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
),
migrations.AlterUniqueTogether(
name='messagecontact',
unique_together=set([('um_from_user', 'um_to_user')]),
),
]
|
{
"content_hash": "eca9597cdbf2a42a89ae3486a9ec7312",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 169,
"avg_line_length": 47.357142857142854,
"alnum_prop": 0.5779788838612367,
"repo_name": "bioinformatics-ua/django-userena",
"id": "b8606a1790119d4d77a576ef93549141ba83d758",
"size": "3339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userena/contrib/umessages/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "21556"
},
{
"name": "Python",
"bytes": "246254"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
}
|
from emigrer.settings.base import *
|
{
"content_hash": "6524dff011c54a8ab2ca4303e94c912b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 36,
"alnum_prop": 0.8055555555555556,
"repo_name": "henocdz/evernote-migrator",
"id": "3bd52878e5473e08d3097fa6d27f55ea77197917",
"size": "36",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "emigrer/settings/dev.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4227"
}
],
"symlink_target": ""
}
|
"""
Harvester for the SHAREOK Repository Repository for the SHARE project
Example API call: https://shareok.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc
"""
from __future__ import unicode_literals
import logging
from lxml import etree
from scrapi.base import OAIHarvester
logger = logging.getLogger(__name__)
class ShareOKHarvester(OAIHarvester):
short_name = 'shareok'
long_name = 'SHAREOK Repository'
url = 'https://shareok.org'
timezone_granularity = True
verify = False
base_url = 'https://shareok.org/oai/request'
# TODO - add date back in once we fix elasticsearch mapping
property_list = [
'type', 'source', 'format',
'description', 'setSpec'
]
approved_sets = [
'com_11244_14447',
'com_11244_1',
'col_11244_14248',
'com_11244_6231',
'col_11244_7929',
'col_11244_7920',
'col_11244_10476',
'com_11244_10465',
'com_11244_10460',
'col_11244_10466',
'col_11244_10464',
'col_11244_10462'
]
def normalize(self, raw_doc):
str_result = raw_doc.get('doc')
result = etree.XML(str_result)
set_spec = result.xpath(
'ns0:header/ns0:setSpec/node()',
namespaces=self.namespaces
)
# check if all of the sets in the record are in the approved set list.
# If all of them aren't, don't normalize.
actual = {x.replace('publication:', '') for x in set_spec}
if not len(set(self.approved_sets).intersection(actual)) == len(actual):
logger.info('Series {} not in approved list'.format(set_spec))
return None
status = result.xpath('ns0:header/@status', namespaces=self.namespaces)
if status and status[0] == 'deleted':
logger.info('Deleted record, not normalizing {}'.format(raw_doc['docID']))
return None
return super(OAIHarvester, self).normalize(raw_doc)
|
{
"content_hash": "26dd3241714f0f7de5dee66ab03e7b79",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 88,
"avg_line_length": 30.50769230769231,
"alnum_prop": 0.6116994452849218,
"repo_name": "ostwald/scrapi",
"id": "52daf47eea5620755057b301fe209a00c14919fb",
"size": "1983",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scrapi/harvesters/shareok.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "255919"
}
],
"symlink_target": ""
}
|
"""Provides some useful tools for working with Cyclopts, including reporting
output.
:author: Matthew Gidden <matthew.gidden _at_ gmail.com>
"""
from __future__ import print_function
import os
import io
import uuid
import shutil
import operator
import tables as t
import numpy as np
from functools import reduce
from collections import defaultdict, Iterable, Sequence, Mapping
import paramiko as pm
from os import kill
from signal import alarm, signal, SIGALRM, SIGKILL
from subprocess import PIPE, Popen
import getpass
import importlib
import itertools as itools
import gc
import resource
import cyclopts
from cyclopts.params import PARAM_CTOR_ARGS, Param, BoolParam, SupConstrParam, \
CoeffParam
FILTERS = t.Filters(complevel=4)
cyclopts_remote_run_dir = 'cyclopts-runs'
class Incrementer(object):
"""A simple helper class to increment a value"""
def __init__(self, start = 0):
"""Parameters
----------
start : int, optional
an initial value
"""
self._val = start - 1
def next(self):
"""Returns an incremented value"""
self._val += 1
return self._val
class NotSpecified(object):
"""A helper class singleton for run control meaning that a 'real' value
has not been given."""
def __repr__(self):
return "NotSpecified"
NotSpecified = NotSpecified()
#
# Run Control
#
# Code basis taken from xdress' run control in xdress/utils.py.
#
class RunControl(object):
"""A composable configuration class for cyclopts. Unlike argparse.Namespace,
this keeps the object dictionary (__dict__) separate from the run control
attributes dictionary (_dict). Modified from xdress' run control in
xdress/utils.py"""
def __init__(self, **kwargs):
"""Parameters
-------------
kwargs : optional
Items to place into run control.
"""
self._dict = {}
for k, v in kwargs.items():
setattr(self, k, v)
self._updaters = {}
def __getattr__(self, key):
if key in self._dict:
return self._dict[key]
elif key in self.__dict__:
return self.__dict__[key]
elif key in self.__class__.__dict__:
return self.__class__.__dict__[key]
else:
msg = "RunControl object has no attribute {0!r}.".format(key)
raise AttributeError(msg)
def __setattr__(self, key, value):
if key.startswith('_'):
self.__dict__[key] = value
else:
if value is NotSpecified and key in self:
return
self._dict[key] = value
def __delattr__(self, key):
if key in self._dict:
del self._dict[key]
elif key in self.__dict__:
del self.__dict__[key]
elif key in self.__class__.__dict__:
del self.__class__.__dict__[key]
else:
msg = "RunControl object has no attribute {0!r}.".format(key)
raise AttributeError(msg)
def __iter__(self):
return iter(self._dict)
def __repr__(self):
keys = sorted(self._dict.keys())
s = ", ".join(["{0!s}={1!r}".format(k, self._dict[k]) for k in keys])
return "{0}({1})".format(self.__class__.__name__, s)
def _pformat(self):
keys = sorted(self._dict.keys())
f = lambda k: "{0!s}={1}".format(k, pformat(self._dict[k], indent=2))
s = ",\n ".join(map(f, keys))
return "{0}({1})".format(self.__class__.__name__, s)
def __contains__(self, key):
return key in self._dict or key in self.__dict__ or \
key in self.__class__.__dict__
def __eq__(self, other):
if hasattr(other, '_dict'):
return self._dict == other._dict
elif isinstance(other, Mapping):
return self._dict == other
else:
return NotImplemented
def __ne__(self, other):
if hasattr(other, '_dict'):
return self._dict != other._dict
elif isinstance(other, Mapping):
return self._dict != other
else:
return NotImplemented
def _update(self, other):
"""Updates the rc with values from another mapping. If this rc has
if a key is in self, other, and self._updaters, then the updaters
value is called to perform the update. This function should return
a copy to be safe and not update in-place.
"""
if hasattr(other, '_dict'):
other = other._dict
elif not hasattr(other, 'items'):
other = dict(other)
for k, v in other.items():
if v is NotSpecified:
pass
elif k in self._updaters and k in self:
v = self._updaters[k](getattr(self, k), v)
setattr(self, k, v)
def parse_rc(files):
"""Parse a list of rc files.
Parameters
----------
files : list or str
the files to parse
Returns
-------
rc : RunControl
"""
files = [files] if isinstance(files, basestring) else files
rc = RunControl()
for rcfile in files:
if not os.path.isfile(rcfile):
continue
rcdict = {}
exec_file(rcfile, rcdict, rcdict)
rc._update(rcdict)
return rc
def exec_file(filename, glb=None, loc=None):
"""A function equivalent to the Python 2.x execfile statement. Taken from
xdress/utils.py"""
with io.open(filename, 'r') as f:
src = f.read()
exec(compile(src, filename, "exec"), glb, loc)
def _merge_leaf(node, dest_file):
src = node
dest = dest_file.get_node(node._v_pathname)
if isinstance(node, t.Table):
dtypes = src.dtype.names
# this is a hack because appending rows throws an error
# see http://stackoverflow.com/questions/17847587/pytables-appending-recarray
# dest.append([row for row in src.iterrows()])
for src_row in src.iterrows():
dest_row = dest.row
for j in range(len(dtypes)):
dest_row[dtypes[j]] = src_row[j]
dest_row.append()
dest.flush()
def _copy_node(node, dest_file, recursive=False):
if node._v_depth == 0: # base recursion level, don't copy root
return
if dest_file.__contains__(node._v_pathname):
return
parent = node._v_parent
if not dest_file.__contains__(parent._v_pathname):
_copy_node(parent, dest_file) # parent doesn't exist, copy it
# copy node
node._v_file.copy_node(
node._v_pathname,
newparent=dest_file.get_node(node._v_parent._v_pathname),
recursive=recursive)
dest_file.flush()
def _merge_node(node, dest_file):
if not dest_file.__contains__(node._v_pathname):
_copy_node(node, dest_file, recursive=True)
return
if isinstance(node, t.Leaf):
_merge_leaf(node, dest_file)
else:
for child in node._v_children:
_merge_node(node._v_file.get_node(node._v_pathname + '/' + child),
dest_file)
def combine(files, new_file=None, clean=False, verbose=False):
"""Combines two or more databases with identical layout, writing their
output into a new file or appending to the first in the list.
Parameters
----------
files : iterator
An iterator listing all databases to combine
new_file : str, optional
The new database to write to. If None, all databases are appended to the
end of the first database in the list.
clean : bool, optional
Whether to remove original files after combining them
verbose : bool, optional
Whether to print output
"""
if new_file is not None and os.path.exists(new_file):
raise ValueError('Cannot write combined hdf5 files to an existing location.')
first = files.next()
if new_file is not None:
if verbose:
print('Starting with base file {0}'.format(first))
shutil.copyfile(first, new_file)
fname = new_file
if clean:
os.remove(first)
else:
fname = first
aggdb = t.open_file(fname, 'a')
for f in files:
if verbose:
print('Merging {0}'.format(f))
db = t.open_file(f, 'r')
_merge_node(db.root, aggdb)
aggdb.flush()
db.close()
if clean:
os.remove(f)
aggdb.close()
def get_process_children(pid):
"""Return
------
children : list of ints
all of a processes' children
"""
p = Popen('ps --no-headers -o pid --ppid %d' % pid, shell = True,
stdout = PIPE, stderr = PIPE)
stdout, stderr = p.communicate()
return [int(p) for p in stdout.split()]
def ssh_test_connect(client, host, user, keyfile=None, auth=True):
"""Tests an ssh connection and returns success or failure thereof.
Parameters
----------
client : paramiko SSH client
host : str
user : str
keyfile : str, optional
auth : bool, optional
whether to prompt for a password authorization on failure
"""
keyfile = keyfile if keyfile is not None else \
[os.path.join(os.environ['HOME'], '.ssh','id_rsa'),
os.path.join(os.environ['HOME'], '.ssh','chtckey')]
try:
client.connect(host, username=user, key_filename=keyfile)
client.close()
can_connect = True
except pm.AuthenticationException:
can_connect = False
except pm.BadHostKeyException:
import pdb; pdb.set_trace()
can_connect = False
password = None
if not can_connect and auth:
password = False
while not password:
password = getpass.getpass("{0}@{1} password: ".format(user, host))
# pub = pm.ssh_pub_key(keyfile)
# cmds = ["mkdir -p ~/.ssh",
# 'echo "{0}" >> ~/.ssh/authorized_keys'.format(pub),
# 'chmod og-rw ~/.ssh/authorized_keys',
# 'chmod a-x ~/.ssh/authorized_keys',
# 'chmod 700 ~/.ssh',
# ]
client.connect(host, username=user, password=password)
# for cmd in cmds:
# stdin, stdout, stderr = client.exec_command(cmd)
# client.close()
# # verify thatthis key works
# client.connect(host, username=user, key_filename=keyfile)
# client.close()
can_connect = True
print("finished connecting")
return can_connect, keyfile, password
def str_to_uuid(x):
"""return a uuid from a stored value, allows strings of len == 15 which is
missing a null-padded value"""
if len(x) < 16:
while len(x) < 16:
x += '\0'
return uuid.UUID(bytes=x)
def uuid_to_str(x):
"""return a string of a uuid, needed in case the uuid is missing a null-
padded value"""
ret = x.hex
if len(ret) < 16:
while len(ret) < 16:
ret += '\0'
return ret
def uuidhex(bytes=bytes):
return uuid.UUID(bytes=bytes).hex
def memusg(pid):
"""in kb"""
# could also use
# resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
fname = os.path.join(os.path.sep, 'proc', str(pid), 'status')
with io.open(fname) as f:
lines = f.readlines()
return float(next(l for l in lines if l.startswith('VmSize')).split()[1])
def attr_from_sources(attr, sources):
"""Returns an attribute from a priority queue of sources, returns None if no
source has that attribute"""
for source in sources:
if hasattr(source, attr) and getattr(source, attr) is not None:
return getattr(source, attr)
return None
def all_obj_rcs(rc=None, args=None):
"""Return all run control files in priority order known to cyclopts"""
ret = []
if rc is not None:
ret.append(rc)
if args is not None and hasattr(args, 'cycrc') and os.path.exists(args.cycrc):
ret.append(parse_rc(args.cycrc))
check = os.path.expanduser('~/.cycloptsrc.py')
if os.path.exists(check):
ret.append(parse_rc(check))
return ret
def obj_info(kind=None, rcs=None, args=None):
"""Get information about an importable object
Parameters
----------
kind : str
the kind of object
rcs : list of RunControl objects or single object, optional
rcs are checked in order
args : argparse args, optional
CLI args
Return
------
info : tuple of package, module, and class names
"""
mod, cname, pack = None, None, None
rcs = [rcs] if not isinstance(rcs, list) else rcs
sources = [args] + rcs # try CLI first, then rcs in order
attrs = ['{0}_package'.format(kind), '{0}_module'.format(kind),
'{0}_class'.format(kind)]
return tuple(attr_from_sources(x, sources) for x in attrs)
def get_obj(kind=None, rcs=None, args=None):
"""Get an object of certain kind, e.g. species or family. Both the rc and
args argument will be searched for attributes named <kind>_package,
<kind>_module, and <kind>_cname. The package/module is then imported and an
instance of the cname is returned. The CLI is searched before the rcs.
Parameters
----------
kind : str
the kind of object
rcs : list of RunControl objects or single object, optional
rcs are checked in order
args : argparse args, optional
CLI args
Return
------
inst : an object instance
"""
pack, mod, cname = obj_info(kind=kind, rcs=rcs, args=args)
try:
mod = importlib.import_module(mod, package=pack)
except AttributeError:
raise RuntimeError('Could not find {0} module {1}. Make sure to add '
'a {0}_module entry to a run control file or the '
'CLI.'.format(kind, mod))
if cname is None or not hasattr(mod, cname):
raise RuntimeError('Could not find {0} class {1}. Make sure to add '
'a {0}_class entry to a run control file or the '
'CLI.'.format(kind, cname))
inst = getattr(mod, cname)()
return inst
def collect_instids(h5file, path, rc=None, instids=None, colname='instid'):
"""Collects all instids as specified.
If rc and instids is None, all ids found in the h5file's path are collected.
Otherwise, instids provided by the instid listing and the paramater space
defined by the run control `inst_queries` parameter are collected.
Parameters
----------
h5file : PyTables File object
the file to collect ids from
path : str
the path to a property table node
rc : RunControl object, optional
a run control object specifying a subset of instids to collect
instids : collection of uuids
explicit instids to collect
colname : str
the instance id column name
Return
------
instids : set of uuids
"""
instids = set(instids) if instids is not None else set()
rc = rc if rc is not None else RunControl()
instids |= set(uuid.UUID(x) for x in rc.inst_ids) \
if 'inst_ids' in rc else set()
# inst queries are a mapping from instance table names to queryable
# conditions, the result of which is a collection of instids that meet those
# conditions
h5node = h5file.get_node(path)
conds = rc.inst_queries if 'inst_queries' in rc else []
if isinstance(conds, basestring):
conds = [conds]
if len(conds) > 0:
ops = conds[1::2]
conds = ['({0})'.format(c) if \
not c.lstrip().startswith('(') and \
not c.rstrip().endswith(')') else c for c in conds[::2]]
cond = ' '.join(
[' '.join(i) for i in \
itools.izip_longest(conds, ops, fillvalue='')]).strip()
vals = [x[colname] for x in h5node.where(cond)]
vals = [str_to_uuid(x) for x in vals]
instids |= set(vals)
# if no ids, then run everything
if len(instids) == 0:
for row in h5node.iterrows():
instids.add(str_to_uuid(row[colname]))
return instids
"""simple utility for determining if something is a sequence (and not a
string)"""
seq_not_str = lambda obj: isinstance(obj, Sequence) \
and not isinstance(obj, basestring)
def n_permutations(x, iter_keys=[], recurse=True):
"""Parameters
----------
x : dict, list, or other
iter_keys : a list of keys atomic values should be iterables, optional
recurse : bool, whether to recurse at the lowest level
Returns
-------
n : int
the total number of permutations of values in x, if x has
container values, those are recusively interrogated as well
"""
n = 1
if seq_not_str(x):
if seq_not_str(x[0]):
if recurse:
for y in x:
n *= n_permutations(y, recurse=recurse)
else:
n *= len(x)
else:
factor = len(x) if recurse else 1
n *= factor
elif isinstance(x, Mapping):
for k, v in x.items():
flag = False if k in iter_keys else True # in blacklist
n *= n_permutations(v, recurse=flag)
return n
def expand_args(x):
"""Parameters
----------
x : list of lists of arguments
Returns
-------
args : generator
a generator that returns a collection of single arguments
"""
for y in itools.product(*x):
yield y
def conv_insts(fam, fam_io_manager, sp, sp_io_manager,
ninst=1, update_freq=100, verbose=False):
n = 0
for point in sp.points():
param_uuid = uuid.uuid4()
sp.record_point(point, param_uuid, sp_io_manager)
for i in range(ninst):
inst_uuid = uuid.uuid4()
inst = sp.gen_inst(point, inst_uuid, sp_io_manager)
fam.record_inst(inst, inst_uuid, param_uuid, sp.name,
fam_io_manager)
if n % update_freq == 0:
if verbose:
# print('Total writes: {0}'.format(
# sum([tbl.n_writes for tbl in fam_tables.values() + sp_tables.values()])))
print('Memusg before collect: {0}'.format(
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
gc.collect()
if verbose:
print('Memusg after collect: {0}'.format(
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
print('{0} instances have been converted'.format(n))
n += 1
if verbose:
print('{0} instances have been converted'.format(n))
def cyc_members(obj):
"""return a list of persistable members per the Cyclopts style guide."""
members = obj.__class__.__dict__.keys()
cycfilter = lambda x: x.startswith('_') or x.endswith('_') or x[0].isupper()
return [x for x in members if not cycfilter(x)]
def fam_and_sp(args):
rc = parse_rc(args.rc) if hasattr(args, 'rc') else None
if hasattr(args, 'cycrc'):
obj_rcs = [rc, parse_rc(args.cycrc)] \
if os.path.exists(args.cycrc) else [rc]
sp = get_obj(kind='species', rcs=obj_rcs, args=args)
fam = sp.family
return fam, sp
def drive_post_process(res_tbl,
fam=None, fam_io_managers=None,
sp=None, sp_io_managers=None,
verbose_freq=None, limit=None):
iid_to_sids = res_tbl.value_mapping('instid', 'solnid', uuids=True)
niids = len(iid_to_sids.keys())
count = 0
verbose = verbose_freq is not None
for iid, sids in iid_to_sids.items():
if verbose:
if count % verbose_freq == 0:
print('{0}/{1} instances have been post processed.'.format(
count, niids))
if count == limit:
return # early exit
count += 1
props = None
if fam is not None:
props = fam.post_process(iid, sids, fam_io_managers)
if sp is not None:
sp.post_process(iid, sids, props, sp_io_managers)
def col2grp(in_old, out_old, in_new, out_new):
"""Make old input/output files using a columnar id-based schema into a group
id-based schema. Currently only works for ExchangeFamily and
StructuredSpecies."""
in_old = t.open_file(in_old, mode='r')
out_old = t.open_file(out_old, mode='r')
in_new = t.open_file(in_new, mode='w')
out_new = t.open_file(out_new, mode='w')
# setup tables
arctbl = in_old.root.Species.StructuredRequest.Arcs \
if in_old.__contains__('/Species/StructuredRequest') else \
in_old.root.Species.StructuredSupply.Arcs
all_tbls = {in_new: [in_old.root.Family.ResourceExchange.ExchangeArcs,
arctbl,],
out_new: [out_old.root.Family.ResourceExchange.ExchangeInstSolutions,]}
old_files = {in_new: in_old,
out_new: out_old}
for h5f, tbls in all_tbls.items():
pths = [x._v_pathname for x in tbls]
oldf = old_files[h5f]
for node in oldf.walk_nodes(classname='Leaf'):
pth = node._v_pathname
if pth not in pths:
_copy_node(oldf.get_node(pth), h5f)
for h5f, tbls in all_tbls.items():
for tbl in tbls:
dtype = np.dtype(tbl.dtype.descr[1:])
path = tbl._v_pathname
colid = ''
ntbl = None
_copy_node(tbl._v_parent, h5f)
h5f.create_group(tbl._v_parent._v_pathname, tbl._v_name,
filters=FILTERS)
for x in tbl.iterrows():
y = str_to_uuid(x[0]).hex
if y != colid:
if ntbl is not None:
ntbl.flush()
colid = y
ntbl = cyclopts.cyclopts_io.Table(h5f, '/'.join([path, 'id_'+colid]), dt=dtype)
ntbl.create()
# either do [x[1:]] or append
ntbl.append_data([x[1:]])
ntbl.flush()
in_old.close()
out_old.close()
in_new.close()
out_new.close()
def masked_filter(c, mask, unmask=False):
"""Return a subset of a collection with a mask applied"""
if not unmask:
return [c[i] for i in range(len(c)) if mask[i]]
else:
return [c[i] for i in range(len(c)) if not mask[i]]
# def run_insts_mp():
# q = mp.Queue()
# pool = mp.Pool(4, multi_proc_gen, (q,))
# lock = mp.Lock()
# for point in sp.points():
# param_uuid = uuid.uuid4()
# if lock is not None:
# lock.acquire()
# sp.record_point(point, param_uuid, sp_manager.tables)
# if lock is not None:
# lock.release()
# for i in range(ninst):
# inst_uuid = uuid.uuid4()
# # q.put((inst_uuid, param_uuid, point, sp, fam,
# # fam_manager.tables, lock))
# q.put((inst_uuid, param_uuid, lock))
# while not q.empty():
# if verbose and q.qsize() % update_freq == 0:
# print('{0} instances have been converted'.format(n))
# time.sleep(1)
|
{
"content_hash": "6db49a236d8375211985dd4de5907fce",
"timestamp": "",
"source": "github",
"line_count": 695,
"max_line_length": 103,
"avg_line_length": 33.771223021582735,
"alnum_prop": 0.5668271483958928,
"repo_name": "gidden/cyclopts",
"id": "452fd6028b2ebc1e75591bee96890bd04964dc58",
"size": "23471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyclopts/tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3245"
},
{
"name": "C++",
"bytes": "10533"
},
{
"name": "Python",
"bytes": "530544"
},
{
"name": "Shell",
"bytes": "295"
}
],
"symlink_target": ""
}
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def __init__(self):
self.pre = None
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root:
if not self.isValidBST(root.left):
return False
if self.pre and root.val <= self.pre.val:
return False
self.pre = root
return self.isValidBST(root.right)
return True
|
{
"content_hash": "2b91e4d4f9dd549d5e9678ec0120e1f4",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 53,
"avg_line_length": 23.958333333333332,
"alnum_prop": 0.4956521739130435,
"repo_name": "Lanceolata/code-problems",
"id": "939c6591d495c72e238141a8f72ab7322730aa7e",
"size": "647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/leetcode/Question_0098_Validate_Binary_Search_Tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "127"
},
{
"name": "C++",
"bytes": "130299"
},
{
"name": "Java",
"bytes": "149575"
},
{
"name": "Python",
"bytes": "106289"
}
],
"symlink_target": ""
}
|
"""A class for managing IPython extensions."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
from shutil import copyfile
import sys
from traitlets.config.configurable import Configurable
from IPython.utils.path import ensure_dir_exists
from traitlets import Instance
try:
from importlib import reload
except ImportError :
## deprecated since 3.4
from imp import reload
#-----------------------------------------------------------------------------
# Main class
#-----------------------------------------------------------------------------
class ExtensionManager(Configurable):
"""A class to manage IPython extensions.
An IPython extension is an importable Python module that has
a function with the signature::
def load_ipython_extension(ipython):
# Do things with ipython
This function is called after your extension is imported and the
currently active :class:`InteractiveShell` instance is passed as
the only argument. You can do anything you want with IPython at
that point, including defining new magic and aliases, adding new
components, etc.
You can also optionally define an :func:`unload_ipython_extension(ipython)`
function, which will be called if the user unloads or reloads the extension.
The extension manager will only call :func:`load_ipython_extension` again
if the extension is reloaded.
You can put your extension modules anywhere you want, as long as
they can be imported by Python's standard import mechanism. However,
to make it easy to write extensions, you can also put your extensions
in ``os.path.join(self.ipython_dir, 'extensions')``. This directory
is added to ``sys.path`` automatically.
"""
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
def __init__(self, shell=None, **kwargs):
super(ExtensionManager, self).__init__(shell=shell, **kwargs)
self.shell.observe(
self._on_ipython_dir_changed, names=('ipython_dir',)
)
self.loaded = set()
@property
def ipython_extension_dir(self):
return os.path.join(self.shell.ipython_dir, u'extensions')
def _on_ipython_dir_changed(self, change):
ensure_dir_exists(self.ipython_extension_dir)
def load_extension(self, module_str):
"""Load an IPython extension by its module name.
Returns the string "already loaded" if the extension is already loaded,
"no load function" if the module doesn't have a load_ipython_extension
function, or None if it succeeded.
"""
if module_str in self.loaded:
return "already loaded"
from IPython.utils.syspathcontext import prepended_to_syspath
with self.shell.builtin_trap:
if module_str not in sys.modules:
with prepended_to_syspath(self.ipython_extension_dir):
__import__(module_str)
mod = sys.modules[module_str]
if self._call_load_ipython_extension(mod):
self.loaded.add(module_str)
else:
return "no load function"
def unload_extension(self, module_str):
"""Unload an IPython extension by its module name.
This function looks up the extension's name in ``sys.modules`` and
simply calls ``mod.unload_ipython_extension(self)``.
Returns the string "no unload function" if the extension doesn't define
a function to unload itself, "not loaded" if the extension isn't loaded,
otherwise None.
"""
if module_str not in self.loaded:
return "not loaded"
if module_str in sys.modules:
mod = sys.modules[module_str]
if self._call_unload_ipython_extension(mod):
self.loaded.discard(module_str)
else:
return "no unload function"
def reload_extension(self, module_str):
"""Reload an IPython extension by calling reload.
If the module has not been loaded before,
:meth:`InteractiveShell.load_extension` is called. Otherwise
:func:`reload` is called and then the :func:`load_ipython_extension`
function of the module, if it exists is called.
"""
from IPython.utils.syspathcontext import prepended_to_syspath
if (module_str in self.loaded) and (module_str in sys.modules):
self.unload_extension(module_str)
mod = sys.modules[module_str]
with prepended_to_syspath(self.ipython_extension_dir):
reload(mod)
if self._call_load_ipython_extension(mod):
self.loaded.add(module_str)
else:
self.load_extension(module_str)
def _call_load_ipython_extension(self, mod):
if hasattr(mod, 'load_ipython_extension'):
mod.load_ipython_extension(self.shell)
return True
def _call_unload_ipython_extension(self, mod):
if hasattr(mod, 'unload_ipython_extension'):
mod.unload_ipython_extension(self.shell)
return True
def install_extension(self, url, filename=None):
"""Download and install an IPython extension.
If filename is given, the file will be so named (inside the extension
directory). Otherwise, the name from the URL will be used. The file must
have a .py or .zip extension; otherwise, a ValueError will be raised.
Returns the full path to the installed file.
"""
# Ensure the extension directory exists
ensure_dir_exists(self.ipython_extension_dir)
if os.path.isfile(url):
src_filename = os.path.basename(url)
copy = copyfile
else:
# Deferred imports
try:
from urllib.parse import urlparse # Py3
from urllib.request import urlretrieve
except ImportError:
from urlparse import urlparse
from urllib import urlretrieve
src_filename = urlparse(url).path.split('/')[-1]
copy = urlretrieve
if filename is None:
filename = src_filename
if os.path.splitext(filename)[1] not in ('.py', '.zip'):
raise ValueError("The file must have a .py or .zip extension", filename)
filename = os.path.join(self.ipython_extension_dir, filename)
copy(url, filename)
return filename
|
{
"content_hash": "a0f50a20f4bc54d4f4f25a814f5161c5",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 90,
"avg_line_length": 38.30232558139535,
"alnum_prop": 0.6258348512446873,
"repo_name": "Edu-Glez/Bank_sentiment_analysis",
"id": "87c671130b84e8751820e890adb0127b64b9cb95",
"size": "6606",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "env/lib/python3.6/site-packages/IPython/core/extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lex",
"bytes": "101463"
},
{
"name": "Python",
"bytes": "29876"
},
{
"name": "Shell",
"bytes": "1509"
}
],
"symlink_target": ""
}
|
"""
talkoohakemisto
~~~~~~~~~~~~~~~
This module contains the Flask application core.
"""
import os
import warnings
import colander
from flask import Flask, jsonify, _request_ctx_stack
import itsdangerous
import jsonpatch
import jsonpointer
from sqlalchemy.exc import SAWarning
from sqlalchemy.orm.exc import NoResultFound
from .extensions import db, mail, sentry
warnings.simplefilter('error', SAWarning)
class Application(Flask):
def __init__(self, environment=None):
super(Application, self).__init__(__name__)
self._init_settings(environment)
self._init_extensions()
self._init_blueprints()
self._init_errorhandlers()
self._init_request_hooks()
def _init_settings(self, environment=None):
"""
Initialize application configuration.
This method loads the configuration from the given environment
(production, development, test). If no environment is given as an
argument, the environment is read from ``FLASK_ENV`` environmental
variable. If ``FLASK_ENV`` is not defined, the environment defaults to
development.
The environment specific configuration is loaded from the module
corresponding to the environment in :module:`.settings`.
:param environment: the application environment
"""
if environment is None:
environment = os.environ.get('FLASK_ENV', 'development')
settings_module = 'talkoohakemisto.settings.' + environment
self.config.from_object(settings_module)
def _init_blueprints(self):
from .views.municipality import municipality
from .views.type import type
from .views.voluntary_work import voluntary_work
self.register_blueprint(municipality)
self.register_blueprint(type)
self.register_blueprint(voluntary_work)
def _init_extensions(self):
"""Initialize and configure Flask extensions with this application."""
db.init_app(self)
mail.init_app(self)
self._init_raven()
def _init_raven(self):
from raven.conf import EXCLUDE_LOGGER_DEFAULTS, setup_logging
from raven.handlers.logging import SentryHandler
# Initialize Raven only if SENTRY_DSN setting is defined.
if not self.config.get('SENTRY_DSN'):
return
sentry.init_app(self)
handler = SentryHandler(sentry.client)
setup_logging(handler, exclude=EXCLUDE_LOGGER_DEFAULTS + (
'celery',
'newrelic',
'requests',
))
def _init_errorhandlers(self):
@self.errorhandler(400)
def bad_request(error):
return jsonify(message=u'Bad request'), 400
@self.errorhandler(jsonpatch.JsonPatchException)
@self.errorhandler(jsonpointer.JsonPointerException)
def json_patch_exception(error):
return jsonify(message=unicode(error)), 400
@self.errorhandler(403)
@self.errorhandler(itsdangerous.BadData)
def forbidden(error):
return jsonify(message=u'Forbidden'), 403
@self.errorhandler(404)
@self.errorhandler(NoResultFound)
def object_not_found(error):
return jsonify(message=u'Not found'), 404
@self.errorhandler(405)
def method_not_allowed(error):
return jsonify(
message=u'The method is not allowed for the requested URL.'
), 500
@self.errorhandler(colander.Invalid)
def invalid_data(error):
errors = [
{
'path': '/' + key.replace('.', '/'),
'reason': value
}
for key, value in error.asdict().iteritems()
]
return jsonify(message=u'Validation failed', errors=errors), 400
@self.errorhandler(500)
def internal_error(error):
return jsonify(message=u'Internal server error'), 500
def _init_request_hooks(self):
self.after_request(self._add_cors_headers)
self.after_request(self._ensure_response_has_proper_content_type)
def _add_cors_headers(self, response):
url_adapter = _request_ctx_stack.top.url_adapter
# url_adapter.allowed_methods()
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': ', '.join(
['HEAD', 'OPTIONS', 'GET', 'POST', 'PATCH', 'PUT']
),
'Access-Control-Allow-Headers': ', '.join([
'Accept',
'Content-Type',
'Origin',
'X-Requested-With',
])
}
print headers
response.headers.extend(headers)
return response
def _ensure_response_has_proper_content_type(self, response):
if response.mimetype == 'application/json':
response.mimetype = 'application/vnd.api+json'
return response
|
{
"content_hash": "f16c1681f4345124cce107562889b5fb",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 79,
"avg_line_length": 31.97452229299363,
"alnum_prop": 0.6107569721115538,
"repo_name": "talkoopaiva/talkoohakemisto-api",
"id": "18a0b35dd38efd13c24c7e33211ea315f86b80ef",
"size": "5044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talkoohakemisto/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99171"
}
],
"symlink_target": ""
}
|
import weakref
from django.conf import settings as django_settings
from django.http import JsonResponse
from django.db import transaction
from rest_framework.decorators import api_view, throttle_classes
from rest_framework.response import Response
from rest_framework import generics
from rest_framework import status
from rest_framework import permissions as drf_permissions
from rest_framework.exceptions import ValidationError, NotFound
from framework.auth.oauth_scopes import CoreScopes
from rest_framework.mixins import ListModelMixin
from api.base import permissions as base_permissions
from api.base.exceptions import RelationshipPostMakesNoChanges
from api.base.filters import ListFilterMixin
from api.users.serializers import UserSerializer
from api.base.parsers import JSONAPIRelationshipParser
from api.base.parsers import JSONAPIRelationshipParserForRegularJSON
from api.base.requests import EmbeddedRequest
from api.base.serializers import LinkedNodesRelationshipSerializer
from api.base.serializers import LinkedRegistrationsRelationshipSerializer
from api.base.throttling import RootAnonThrottle, UserRateThrottle
from api.base import utils
from api.nodes.permissions import ReadOnlyIfRegistration
from api.nodes.permissions import ContributorOrPublic
from api.nodes.permissions import ContributorOrPublicForRelationshipPointers
from api.base.utils import is_bulk_request, get_user_auth
from website.models import Pointer
from osf.models.contributor import Contributor, get_contributor_permissions
CACHE = weakref.WeakKeyDictionary()
class JSONAPIBaseView(generics.GenericAPIView):
def __init__(self, **kwargs):
assert getattr(self, 'view_name', None), 'Must specify view_name on view.'
assert getattr(self, 'view_category', None), 'Must specify view_category on view.'
self.view_fqn = ':'.join([self.view_category, self.view_name])
super(JSONAPIBaseView, self).__init__(**kwargs)
def _get_embed_partial(self, field_name, field):
"""Create a partial function to fetch the values of an embedded field. A basic
example is to include a Node's children in a single response.
:param str field_name: Name of field of the view's serializer_class to load
results for
:return function object -> dict:
"""
if getattr(field, 'field', None):
field = field.field
def partial(item):
# resolve must be implemented on the field
v, view_args, view_kwargs = field.resolve(item, field_name, self.request)
if not v:
return None
if isinstance(self.request._request, EmbeddedRequest):
request = self.request._request
else:
request = EmbeddedRequest(self.request)
view_kwargs.update({
'request': request,
'is_embedded': True
})
# Setup a view ourselves to avoid all the junk DRF throws in
# v is a function that hides everything v.cls is the actual view class
view = v.cls()
view.args = view_args
view.kwargs = view_kwargs
view.request = request
view.request.parser_context['kwargs'] = view_kwargs
view.format_kwarg = view.get_format_suffix(**view_kwargs)
_cache_key = (v.cls, field_name, view.get_serializer_class(), item)
if _cache_key in CACHE.setdefault(self.request._request, {}):
# We already have the result for this embed, return it
return CACHE[self.request._request][_cache_key]
# Cache serializers. to_representation of a serializer should NOT augment it's fields so resetting the context
# should be sufficient for reuse
if not view.get_serializer_class() in CACHE.setdefault(self.request._request, {}):
CACHE[self.request._request][view.get_serializer_class()] = view.get_serializer_class()(many=isinstance(view, ListModelMixin))
ser = CACHE[self.request._request][view.get_serializer_class()]
try:
ser._context = view.get_serializer_context()
if not isinstance(view, ListModelMixin):
ret = ser.to_representation(view.get_object())
else:
queryset = view.filter_queryset(view.get_queryset())
page = view.paginate_queryset(queryset)
ret = ser.to_representation(page or queryset)
if page is not None:
request.parser_context['view'] = view
request.parser_context['kwargs'].pop('request')
view.paginator.request = request
ret = view.paginator.get_paginated_response(ret).data
except Exception as e:
with transaction.atomic():
ret = view.handle_exception(e).data
# Allow request to be gc'd
ser._context = None
# Cache our final result
CACHE[self.request._request][_cache_key] = ret
return ret
return partial
def get_serializer_context(self):
"""Inject request into the serializer context. Additionally, inject partial functions
(request, object -> embed items) if the query string contains embeds. Allows
multiple levels of nesting.
"""
context = super(JSONAPIBaseView, self).get_serializer_context()
if self.kwargs.get('is_embedded'):
embeds = []
else:
embeds = self.request.query_params.getlist('embed')
fields_check = self.serializer_class._declared_fields.copy()
for field in fields_check:
if getattr(fields_check[field], 'field', None):
fields_check[field] = fields_check[field].field
for field in fields_check:
if getattr(fields_check[field], 'always_embed', False) and field not in embeds:
embeds.append(unicode(field))
if getattr(fields_check[field], 'never_embed', False) and field in embeds:
embeds.remove(field)
embeds_partials = {}
for embed in embeds:
embed_field = fields_check.get(embed)
embeds_partials[embed] = self._get_embed_partial(embed, embed_field)
context.update({
'enable_esi': (
utils.is_truthy(self.request.query_params.get('esi', django_settings.ENABLE_ESI)) and
self.request.accepted_renderer.media_type in django_settings.ESI_MEDIA_TYPES
),
'embed': embeds_partials,
'envelope': self.request.query_params.get('envelope', 'data'),
})
return context
class LinkedNodesRelationship(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView):
""" Relationship Endpoint for Linked Node relationships
Used to set, remove, update and retrieve the ids of the linked nodes attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the nodes requested. Data can contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection and for the user that is
making the request to be able to read the nodes requested. Data can contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_nodes", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = LinkedNodesRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
def get_object(self):
object = self.get_node(check_object_permissions=False)
auth = utils.get_user_auth(self.request)
obj = {'data': [
pointer for pointer in
object.linked_nodes.filter(is_deleted=False, type='osf.node')
if pointer.can_view(auth)
], 'self': object}
self.check_object_permissions(self.request, obj)
return obj
def perform_destroy(self, instance):
data = self.request.data['data']
auth = utils.get_user_auth(self.request)
current_pointers = {pointer._id: pointer for pointer in instance['data']}
collection = instance['self']
for val in data:
if val['id'] in current_pointers:
collection.rm_pointer(current_pointers[val['id']], auth)
def create(self, *args, **kwargs):
try:
ret = super(LinkedNodesRelationship, self).create(*args, **kwargs)
except RelationshipPostMakesNoChanges:
return Response(status=status.HTTP_204_NO_CONTENT)
return ret
class LinkedRegistrationsRelationship(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView, generics.CreateAPIView):
""" Relationship Endpoint for Linked Registrations relationships
Used to set, remove, update and retrieve the ids of the linked registrations attached to this collection. For each id, there
exists a node link that contains that registration.
##Actions
###Create
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 201
This requires both edit permission on the collection, and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will create a node_link for all node_ids in the request that
do not currently have a corresponding node_link in this collection.
###Update
Method: PUT || PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 200
This requires both edit permission on the collection and for the user that is
making the request to be able to read the registrations requested. Data can contain any number of
node identifiers. This will replace the contents of the node_links for this collection with
the contents of the request. It will delete all node links that don't have a node_id in the data
array, create node links for the node_ids that don't currently have a node id, and do nothing
for node_ids that already have a corresponding node_link. This means a update request with
{"data": []} will remove all node_links in this collection
###Destroy
Method: DELETE
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": [{
"type": "linked_registrations", # required
"id": <node_id> # required
}]
}
Success: 204
This requires edit permission on the node. This will delete any node_links that have a
corresponding node_id in the request.
"""
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NODE_LINKS_WRITE]
serializer_class = LinkedRegistrationsRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
def get_object(self):
object = self.get_node(check_object_permissions=False)
auth = utils.get_user_auth(self.request)
obj = {'data': [
pointer for pointer in
object.linked_nodes.filter(is_deleted=False, type='osf.registration')
if pointer.can_view(auth)
], 'self': object}
self.check_object_permissions(self.request, obj)
return obj
def perform_destroy(self, instance):
data = self.request.data['data']
auth = utils.get_user_auth(self.request)
current_pointers = {pointer.node._id: pointer for pointer in instance['data']}
collection = instance['self']
for val in data:
if val['id'] in current_pointers:
collection.rm_pointer(current_pointers[val['id']], auth)
def create(self, *args, **kwargs):
try:
ret = super(LinkedRegistrationsRelationship, self).create(*args, **kwargs)
except RelationshipPostMakesNoChanges:
return Response(status=status.HTTP_204_NO_CONTENT)
return ret
@api_view(('GET',))
@throttle_classes([RootAnonThrottle, UserRateThrottle])
def root(request, format=None, **kwargs):
"""Welcome to the V2 Open Science Framework API. With this API you can access users, projects, components, logs, and files
from the [Open Science Framework](https://osf.io/). The Open Science Framework (OSF) is a free, open-source service
maintained by the [Center for Open Science](http://cos.io/).
The OSF serves as a repository and archive for study designs, materials, data, manuscripts, or anything else
associated with your research during the research process. Every project and file on the OSF has a permanent unique
identifier, and every registration (a permanent, time-stamped version of your projects and files) can be assigned a
DOI/ARK. You can use the OSF to measure your impact by monitoring the traffic to projects and files you make
public. With the OSF you have full control of what parts of your research are public and what remains private.
Beta notice: This API is currently a beta service. You are encouraged to use the API and will receive support
when doing so, however, while the API remains in beta status, it may change without notice as a result of
product updates. The temporary beta status of the API will remain in place while it matures. In a future
release, the beta status will be removed, at which point we will provide details on how long we will support
the API V2 and under what circumstances it might change.
#General API Usage
The OSF API generally conforms to the [JSON-API v1.0 spec](http://jsonapi.org/format/1.0/). Where exceptions
exist, they will be noted. Each endpoint will have its own documentation, but there are some general principles.
Assume undocumented routes/features/fields are unstable.
##Requests
###Canonical URLs
All canonical URLs have trailing slashes. A request to an endpoint without a trailing slash will result in a 301
redirect to the canonical URL. There are some exceptions when working with the Files API, so if a URL in a response
does not have a slash, do not append one.
###Plurals
Endpoints are always pluralized. `/users/`, not `/user/`, `/nodes/`, not `/node/`.
###Common Actions
Every endpoint in the OSF API responds to `GET`, `HEAD`, and `OPTION` requests. You must have adequate permissions
to interact with the endpoint. Unauthorized use will result in 401 Unauthorized or 403 Forbidden responses. Use
`HEAD` to probe an endpoint and make sure your headers are well-formed. `GET` will return a representation of the
entity or entity collection referenced by the endpoint. An `OPTIONS` request will return a JSON object that describes the
endpoint, including the name, a description, the acceptable request formats, the allowed response formats, and any
actions available via the endpoint.
###Versioning
Versioning can be specified in three different ways:
1. URL Path Versioning, e.g. `/v2/` or `/v3/`
+ A version specified via the URL path is a **required** part of the URL.
+ Only a major version can be specified via the URL path, i.e. `/v2.0.6/` is invalid,
additionally, paths such as `/v2.0/` are invalid.
+ If the default version of the API is within the major version specified in the URL path,
the default version will be applied (i.e. if the default version is `2.3` and the URL path is `/v2/`,
then version returned will be `2.3`).
+ If the default version of the API is not within the major version specified in the URL path,
the URL path version will be applied (i.e. if the default version is `3.0` and the URL path is `/v2/`,
then the version returned will be `2.0`)
2. Query Parameter Versioning, e.g. `/v2/nodes/?version=2.1.6`
+ Pinning to a specific version via a query parameter is **optional**.
+ A specific version (major, minor, or patch) for a single request can be specified via the `version`
query parameter, as long as it is an allowed version.
+ If the version specified in the query parameter does not fall within the same major version
specified in the URL path, i.e `/v2/nodes/?version=3.1.4` a `409 Conflict` response will be returned.
3. Header Versioning, e.g. `Accept-Header=application/vnd.api+json;version=3.0.1`
+ Pinning to a specific version via request header is **optional**.
+ A specific version (major, minor, or patch) for a single request can be specified
via the `Accept Header` of the request, as long as it is an allowed version.
+ If the version specified in the header does not fall within the same major version specified
in the URL path a `409 Conflict` response will be returned.
+ If both a header version and query parameter version are specified, the versions must match exactly
or a `409 Conflict` response will be returned (i.e. one does not take precedence over the other).
###Filtering
Entity collections can be filtered by adding a query parameter in the form:
filter[<fieldname>]=<matching information>
String queries are filtered using substring matching. For example, if you were trying to find [Lise
Meitner](http://en.wikipedia.org/wiki/Lise_Meitner):
/users/?filter[full_name]=meitn
You can filter on multiple fields, or the same field in different ways, by &-ing the query parameters together.
/users/?filter[full_name]=lise&filter[family_name]=mei
Boolean fields should be queried with `true` or `false`.
/nodes/?filter[registered]=true
You can request multiple resources by filtering on id and placing comma-separated values in your query parameter.
/nodes/?filter[id]=aegu6,me23a
You can filter with case-sensitivity or case-insensitivity by using `contains` and `icontains`, respectively.
/nodes/?filter[tags][icontains]=help
###Embedding
All related resources that appear in the `relationships` attribute are embeddable, meaning that
by adding a query parameter like:
/nodes/?embed=contributors
it is possible to fetch a Node and its contributors in a single request. The embedded results will have the following
structure:
{relationship_name}: {full_embedded_response}
Where `full_embedded_response` means the full API response resulting from a GET request to the `href` link of the
corresponding related resource. This means if there are no errors in processing the embedded request the response will have
the format:
data: {response}
And if there are errors processing the embedded request the response will have the format:
errors: {errors}
Multiple embeds can be achieved with multiple query parameters separated by "&".
/nodes/?embed=contributors&embed=comments
Some endpoints are automatically embedded.
###Pagination
All entity collection endpoints respond to the `page` query parameter behavior as described in the [JSON-API
pagination spec](http://jsonapi.org/format/1.0/#crud). However, pagination links are provided in the response, and
you are encouraged to use that rather than adding query parameters by hand.
###Formatting POST/PUT/PATCH request bodies
The OSF API follows the JSON-API spec for [create and update requests](http://jsonapi.org/format/1.0/#crud). This means
all request bodies must be wrapped with some metadata. Each request body must be an object with a `data` key
containing at least a `type` member. The value of the `type` member must agree with the `type` of the entities
represented by the endpoint. If not, a 409 Conflict will be returned. The request should also contain an
`attributes` member with an object containing the key-value pairs to be created/updated. PUT/PATCH requests must
also have an `id` key that matches the id part of the endpoint. If the `id` key does not match the id path part, a
409 Conflict error will be returned.
####Example 1: Creating a Node via POST
POST /v2/nodes/
{
"data": {
"type": "nodes",
"attributes": {
"title" : "A Phylogenetic Tree of Famous Internet Cats",
"category" : "project",
"description" : "How closely related are Grumpy Cat and C.H. Cheezburger? Is memefulness inheritable?"
}
}
}
####Example 2: Updating a User via PUT
PUT /v2/users/me/
{
"data": {
"id": "3rqxc",
"type": "users",
"attributes": {
"full_name" : "Henrietta Swan Leavitt",
"given_name" : "Henrietta",
"middle_names" : "Swan",
"family_name" : "Leavitt"
}
}
}
**NB:** If you PUT/PATCH to the `/users/me/` endpoint, you must still provide your full user id in the `id` field of
the request. We do not support using the `me` alias in request bodies at this time.
###PUT vs. PATCH
For most endpoints that support updates via PUT requests, we also allow PATCH updates. The only difference is that
PUT requests require all mandatory attributes to be set, even if their value is unchanged. PATCH requests may omit
mandatory attributes, whose value will be unchanged.
###Attribute Validation
Endpoints that allow creation or modification of entities generally limit updates to certain attributes of the
entity. If you attempt to set an attribute that does not permit updates (such as a `date_created` timestamp), the
API will silently ignore that attribute. This will not affect the response from the API: if the request would have
succeeded without the updated attribute, it will still report as successful. Likewise, if the request would have
failed without the attribute update, the API will still report a failure.
Typoed or non-existent attributes will behave the same as non-updatable attributes and be silently ignored. If a
request is not working the way you expect, make sure to double check your spelling.
##Responses
###Entities
An entity is a single resource that has been retrieved from the API, usually from an endpoint with the entity's id
as the final path part. A successful response from an entity request will be a JSON object with a top level `data`
key pointing to a sub-object with the following members:
+ `id`
The identifier for the entity. This MUST be included with [PUT and PATCH
requests](#formatting-postputpatch-request-bodies).
+ `type`
The type identifier of this entity. This MUST be included with [all create/update
requests](#formatting-postputpatch-request-bodies).
+ `attributes`
The properties of the entity. Names, descriptions, etc.
+ `relationships`
Relationships are urls to other entities or entity collections that have a relationship to the entity. For example,
the node entity provides a `contributors` relationship that points to the endpoint to retrieve all contributors to
that node. It is recommended to use these links rather than to id-filter general entity collection endpoints.
They'll be faster, easier, and less error-prone. Generally a relationship will have the following structure:
{relationship_name}: {
"links": {
"related": {
"href": {url_to_related_entity_or_entity_collection},
"meta": {}
}
}
}
If there are no related entities, `href` will be null.
+ `embeds`
Please see `Embedding` documentation under `Requests`.
+ `links`
Links are urls to alternative representations of the entity or actions that may be performed on the entity. Most
entities will provide a `self` link that is the canonical endpoint for the entity where update and delete requests
should be sent. In-depth documentation of actions is available by navigating to the `self` link in the Browsable
API. Most entities will also provide an `html` link that directs to the entity's page on the [OSF](http://osf.io/).
###Entity Collections
Entity collection endpoints return a list of entities and an additional data structure with pagination links, such as
"next", "prev", "first", and "last". The OSF API limits all entity collection responses to a maximum of 10 entities.
The response object has two keys:
+ `data`
`data` is an array of entities that match the query. Each entity in the array is the same representation that is
returned from that entity's `self` link, meaning that refetching the entity is unnecessary.
+ `links`
`links` contains pagination information, including links to the previous, next, first, and last pages of results.
The meta key contains the total number of entities available, as well as the current number of results displayed per
page. If there are only enough results to fill one page, the `first`, `last`, `prev`, and `next` values will be
null.
###Errors
When a request fails for whatever reason, the OSF API will return an appropriate HTTP error code and include a
descriptive error in the body of the response. The response body will be an object with a key, `errors`, pointing
to an array of error objects. Generally, these error objects will consist of a `detail` key with a detailed error
message and a `source` object that may contain a field `pointer` that is a [JSON
Pointer](https://tools.ietf.org/html/rfc6901) to the error-causing attribute. The `error` objects may include
additional information in accordance with the [JSON-API error spec](http://jsonapi.org/format/1.0/#error-objects).
####Example: Error response from an incorrect create node request
{
"errors": [
{
"source": {
"pointer": "/data/attributes/category"
},
"detail": "This field is required."
},
{
"source": {
"pointer": "/data/type"
},
"detail": "This field may not be null."
},
{
"source": {
"pointer": "/data/attributes/title"
},
"detail": "This field is required."
}
]
}
##OSF Enum Fields
Some entities in the OSF API have fields that only take a restricted set of values. Those fields are listed here
for reference. Fuller descriptions are available on the relevant entity pages.
###OSF Node Categories
value description
==========================================
project Project
hypothesis Hypothesis
methods and measures Methods and Measures
procedure Procedure
instrumentation Instrumentation
data Data
analysis Analysis
communication Communication
other Other
###OSF Node Permission keys
value description
==========================================
read Read-only access
write Write access (make changes, cannot delete)
admin Admin access (full write, create, delete, contributor add)
###Storage Providers
Valid storage providers are:
value description
==========================================
box Box.com
dataverse Dataverse
dropbox Dropbox
figshare figshare
github GitHub
googledrive Google Drive
osfstorage OSF Storage
s3 Amazon S3
"""
if request.user and not request.user.is_anonymous():
user = request.user
current_user = UserSerializer(user, context={'request': request}).data
else:
current_user = None
kwargs = request.parser_context['kwargs']
return_val = {
'meta': {
'message': 'Welcome to the OSF API.',
'version': request.version,
'current_user': current_user,
},
'links': {
'nodes': utils.absolute_reverse('nodes:node-list', kwargs=kwargs),
'users': utils.absolute_reverse('users:user-list', kwargs=kwargs),
'collections': utils.absolute_reverse('collections:collection-list', kwargs=kwargs),
'registrations': utils.absolute_reverse('registrations:registration-list', kwargs=kwargs),
'institutions': utils.absolute_reverse('institutions:institution-list', kwargs=kwargs),
'licenses': utils.absolute_reverse('licenses:license-list', kwargs=kwargs),
'metaschemas': utils.absolute_reverse('metaschemas:metaschema-list', kwargs=kwargs),
'addons': utils.absolute_reverse('addons:addon-list', kwargs=kwargs),
}
}
if utils.has_admin_scope(request):
return_val['meta']['admin'] = True
return Response(return_val)
def error_404(request, format=None, *args, **kwargs):
return JsonResponse(
{'errors': [{'detail': 'Not found.'}]},
status=404,
content_type='application/vnd.api+json; application/json'
)
class BaseContributorDetail(JSONAPIBaseView, generics.RetrieveAPIView):
# overrides RetrieveAPIView
def get_object(self):
node = self.get_node()
user = self.get_user()
# May raise a permission denied
self.check_object_permissions(self.request, user)
try:
contributor = node.contributor_set.get(user=user)
except Contributor.DoesNotExist:
raise NotFound('{} cannot be found in the list of contributors.'.format(user))
user.permission = get_contributor_permissions(contributor, as_list=False)
user.bibliographic = contributor.visible
user.node_id = node._id
user.index = list(node.get_contributor_order()).index(contributor.id)
return user
class BaseContributorList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin):
def get_default_queryset(self):
node = self.get_node()
visible_contributors = set(node.visible_contributor_ids)
contributors = []
index = 0
for contributor in node.contributors:
contributor.index = index
contributor.bibliographic = contributor._id in visible_contributors
contributor.permission = node.get_permissions(contributor)[-1]
contributor.node_id = node._id
contributors.append(contributor)
index += 1
return contributors
def get_queryset(self):
queryset = self.get_queryset_from_request()
# If bulk request, queryset only contains contributors in request
if is_bulk_request(self.request):
contrib_ids = []
for item in self.request.data:
try:
contrib_ids.append(item['id'].split('-')[1])
except AttributeError:
raise ValidationError('Contributor identifier not provided.')
except IndexError:
raise ValidationError('Contributor identifier incorrectly formatted.')
queryset[:] = [contrib for contrib in queryset if contrib._id in contrib_ids]
return queryset
class BaseNodeLinksDetail(JSONAPIBaseView, generics.RetrieveAPIView):
pass
class BaseNodeLinksList(JSONAPIBaseView, generics.ListAPIView):
def get_queryset(self):
auth = get_user_auth(self.request)
query = self.get_node()\
.node_relations.select_related('child')\
.filter(is_node_link=True, child__is_deleted=False)\
.exclude(child__type='osf.collection')
return sorted([
node_link for node_link in query
if node_link.child.can_view(auth) and not node_link.child.is_retracted
], key=lambda node_link: node_link.child.date_modified, reverse=True)
class BaseLinkedList(JSONAPIBaseView, generics.ListAPIView):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NULL]
# subclass must set
serializer_class = None
view_category = None
view_name = None
model_class = Pointer
def get_queryset(self):
auth = get_user_auth(self.request)
linked_node_ids = [
each.id for each in self.get_node().linked_nodes
.filter(is_deleted=False)
.exclude(type='osf.collection')
.order_by('-date_modified')
if each.can_view(auth)
]
return self.get_node().linked_nodes.filter(id__in=linked_node_ids)
|
{
"content_hash": "8135b6db11b2f9658338ca2d1e2cf12c",
"timestamp": "",
"source": "github",
"line_count": 859,
"max_line_length": 142,
"avg_line_length": 42.37019790454016,
"alnum_prop": 0.6433948785580833,
"repo_name": "mluo613/osf.io",
"id": "930d70c6047678843606247c387dca775477ebbf",
"size": "36396",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/base/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176516"
},
{
"name": "HTML",
"bytes": "181210"
},
{
"name": "JavaScript",
"bytes": "2015658"
},
{
"name": "Jupyter Notebook",
"bytes": "19626"
},
{
"name": "Mako",
"bytes": "748050"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "8492180"
},
{
"name": "Shell",
"bytes": "379"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Test the filesystem related flows."""
import io
import os
import platform
from unittest import mock
from absl import app
from grr_response_core.lib import utils
from grr_response_core.lib.parsers import windows_registry_parser as winreg_parser
from grr_response_core.lib.parsers import wmi_parser
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import file_finder as rdf_file_finder
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import compatibility
from grr_response_core.lib.util import temp
from grr_response_server import data_store
from grr_response_server import file_store
from grr_response_server import notification
from grr_response_server.databases import db
# TODO(user): break the dependency cycle described in filesystem.py and
# and remove this import.
# pylint: disable=unused-import
from grr_response_server.flows.general import collectors
# pylint: enable=unused-import
from grr_response_server.flows.general import file_finder
from grr_response_server.flows.general import filesystem
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import acl_test_lib
from grr.test_lib import action_mocks
from grr.test_lib import flow_test_lib
from grr.test_lib import parser_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
class TestFilesystem(flow_test_lib.FlowTestsBaseclass):
"""Test the interrogate flow."""
def setUp(self):
super().setUp()
self.client_id = self.SetupClient(0)
def testListDirectoryOnFile(self):
"""OS ListDirectory on a file will raise."""
client_mock = action_mocks.ListDirectoryClientMock()
pb = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test_img.dd"),
pathtype=rdf_paths.PathSpec.PathType.OS)
# Make sure the flow raises.
with self.assertRaises(RuntimeError):
with test_lib.SuppressLogs():
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.ListDirectory),
client_mock,
client_id=self.client_id,
pathspec=pb,
creator=self.test_username)
def testListDirectory(self):
"""Test that the ListDirectory flow works."""
client_mock = action_mocks.ListDirectoryClientMock()
pb = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test_img.dd"),
pathtype=rdf_paths.PathSpec.PathType.OS)
pb.Append(path="test directory", pathtype=rdf_paths.PathSpec.PathType.TSK)
with mock.patch.object(notification, "Notify") as mock_notify:
# Change the username so we get a notification about the flow termination.
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.ListDirectory),
client_mock,
client_id=self.client_id,
pathspec=pb,
creator="User")
self.assertEqual(mock_notify.call_count, 1)
args = list(mock_notify.mock_calls[0])[1]
self.assertEqual(args[0], "User")
com = rdf_objects.UserNotification.Type.TYPE_VFS_LIST_DIRECTORY_COMPLETED
self.assertEqual(args[1], com)
self.assertIn(pb.path, args[2])
children = self._ListTestChildPathInfos(["test_img.dd"])
self.assertLen(children, 1)
self.assertEqual(children[0].components[-1], "Test Directory")
def testListDirectoryOnNonexistentDir(self):
"""Test that the ListDirectory flow works."""
client_mock = action_mocks.ListDirectoryClientMock()
pb = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test_img.dd"),
pathtype=rdf_paths.PathSpec.PathType.OS)
pb.Append(path="doesnotexist", pathtype=rdf_paths.PathSpec.PathType.TSK)
with self.assertRaises(RuntimeError):
with test_lib.SuppressLogs():
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.ListDirectory),
client_mock,
client_id=self.client_id,
pathspec=pb,
creator=self.test_username)
def _ListTestChildPathInfos(self,
path_components,
path_type=rdf_objects.PathInfo.PathType.TSK):
components = self.base_path.strip("/").split("/") + path_components
return data_store.REL_DB.ListChildPathInfos(self.client_id, path_type,
components)
def testUnicodeListDirectory(self):
"""Test that the ListDirectory flow works on unicode directories."""
client_mock = action_mocks.ListDirectoryClientMock()
pb = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test_img.dd"),
pathtype=rdf_paths.PathSpec.PathType.OS)
filename = "入乡随俗 海外春节别样过法"
pb.Append(path=filename, pathtype=rdf_paths.PathSpec.PathType.TSK)
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.ListDirectory),
client_mock,
client_id=self.client_id,
pathspec=pb,
creator=self.test_username)
# Check the output file is created
components = ["test_img.dd", filename]
children = self._ListTestChildPathInfos(components)
self.assertLen(children, 1)
filename = children[0].components[-1]
self.assertEqual(filename, "入乡随俗.txt")
def testRecursiveListDirectory(self):
"""Test that RecursiveListDirectory lists files only up to max depth."""
client_mock = action_mocks.ListDirectoryClientMock()
dir_components = ["dir1", "dir2", "dir3", "dir4"]
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
os.makedirs(os.path.join(temp_dirpath, *dir_components))
pathspec = rdf_paths.PathSpec(
path=temp_dirpath, pathtype=rdf_paths.PathSpec.PathType.OS)
flow_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.RecursiveListDirectory),
client_mock,
client_id=self.client_id,
pathspec=pathspec,
max_depth=2,
creator=self.test_username)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 2)
dirs = [_.pathspec.Basename() for _ in results]
self.assertCountEqual(dirs, ["dir1", "dir2"])
def testRecursiveListDirectoryTrivial(self):
"""Test that RecursiveListDirectory lists files only up to max depth."""
client_mock = action_mocks.ListDirectoryClientMock()
dir_components = ["dir1", "dir2"]
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
os.makedirs(os.path.join(temp_dirpath, *dir_components))
pathspec = rdf_paths.PathSpec(
path=temp_dirpath, pathtype=rdf_paths.PathSpec.PathType.OS)
flow_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.RecursiveListDirectory),
client_mock,
client_id=self.client_id,
pathspec=pathspec,
max_depth=1,
creator=self.test_username)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 1)
self.assertEqual(results[0].pathspec.Basename(), "dir1")
def testRecursiveListDirectoryDeep(self):
"""Test that RecursiveListDirectory lists files only up to max depth."""
client_mock = action_mocks.ListDirectoryClientMock()
dir_components = ["dir1", "dir2", "dir3", "dir4", "dir5"]
with temp.AutoTempDirPath(remove_non_empty=True) as temp_dirpath:
os.makedirs(os.path.join(temp_dirpath, *dir_components))
pathspec = rdf_paths.PathSpec(
path=temp_dirpath, pathtype=rdf_paths.PathSpec.PathType.OS)
flow_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.RecursiveListDirectory),
client_mock,
client_id=self.client_id,
pathspec=pathspec,
max_depth=3,
creator=self.test_username)
results = flow_test_lib.GetFlowResults(self.client_id, flow_id)
self.assertLen(results, 3)
dirs = [_.pathspec.Basename() for _ in results]
self.assertCountEqual(dirs, ["dir1", "dir2", "dir3"])
def testGlob(self):
"""Test that glob works properly."""
users = [
rdf_client.User(username="test"),
rdf_client.User(username="syslog")
]
client_id = self.SetupClient(0, users=users)
client_mock = action_mocks.GlobClientMock()
# This glob selects all files which start with the username on this system.
paths = [
os.path.join(self.base_path, "%%Users.username%%*"),
os.path.join(self.base_path, "VFSFixture/var/*/wtmp")
]
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
creator=self.test_username,
check_flow_errors=False)
expected_files = [
filename for filename in os.listdir(self.base_path)
if filename.startswith("test") or filename.startswith("syslog")
]
expected_files.append("VFSFixture")
children = self._ListTestChildPathInfos(
[], path_type=rdf_objects.PathInfo.PathType.OS)
found_files = [child.components[-1] for child in children]
self.assertCountEqual(expected_files, found_files)
children = self._ListTestChildPathInfos(
["VFSFixture", "var", "log"],
path_type=rdf_objects.PathInfo.PathType.OS)
self.assertLen(children, 1)
self.assertEqual(children[0].components[-1], "wtmp")
def _RunGlob(self, paths):
client_mock = action_mocks.GlobClientMock()
session_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=self.client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
creator=self.test_username)
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
return [st.pathspec.path for st in results]
def testGlobWithStarStarRootPath(self):
"""Test ** expressions with root_path."""
users = [
rdf_client.User(username="test"),
rdf_client.User(username="syslog")
]
self.client_id = self.SetupClient(0, users=users)
client_mock = action_mocks.GlobClientMock()
# Glob for foo at a depth of 4.
path = os.path.join("foo**4")
root_path = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test_img.dd"),
pathtype=rdf_paths.PathSpec.PathType.OS)
root_path.Append(path="/", pathtype=rdf_paths.PathSpec.PathType.TSK)
# Run the flow.
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=self.client_id,
paths=[path],
root_path=root_path,
pathtype=rdf_paths.PathSpec.PathType.OS,
creator=self.test_username)
children = self._ListTestChildPathInfos(
["test_img.dd", "glob_test", "a", "b"])
self.assertLen(children, 1)
self.assertEqual(children[0].components[-1], "foo")
def _MakeTestDirs(self):
fourth_level_dir = utils.JoinPath(self.temp_dir, "1/2/3/4")
os.makedirs(fourth_level_dir)
top_level_path = self.temp_dir
io.open(utils.JoinPath(top_level_path, "bar"), "wb").close()
for level in range(1, 5):
top_level_path = utils.JoinPath(top_level_path, str(level))
for filename in ("foo", "fOo", "bar"):
file_path = utils.JoinPath(top_level_path, filename + str(level))
io.open(file_path, "wb").close()
self.assertTrue(os.path.exists(file_path))
def testGlobWithStarStar(self):
"""Test that ** expressions mean recursion."""
self._MakeTestDirs()
# Test filename and directory with spaces
os.makedirs(utils.JoinPath(self.temp_dir, "1/2 space"))
path_spaces = utils.JoinPath(self.temp_dir, "1/2 space/foo something")
io.open(path_spaces, "wb").close()
self.assertTrue(os.path.exists(path_spaces))
# Get the foos using default of 3 directory levels.
paths = [os.path.join(self.temp_dir, "1/**/foo*")]
# Handle filesystem case insensitivity
expected_results = [
"1/2/3/4/foo4", "/1/2/3/foo3", "1/2/foo2", "1/2 space/foo something"
]
if platform.system() == "Linux":
expected_results = [
"1/2/3/4/fOo4", "1/2/3/4/foo4", "/1/2/3/fOo3", "/1/2/3/foo3",
"1/2/fOo2", "1/2/foo2", "1/2 space/foo something"
]
expected_results = [
utils.JoinPath(self.temp_dir, x) for x in expected_results
]
results = self._RunGlob(paths)
self.assertCountEqual(results, expected_results)
# Get the files 2 levels down only.
paths = [os.path.join(self.temp_dir, "1/", "**2/foo*")]
# Handle filesystem case insensitivity
expected_results = ["1/2/3/foo3", "/1/2/foo2", "1/2 space/foo something"]
if platform.system() == "Linux":
expected_results = [
"1/2/3/foo3", "1/2/3/fOo3", "/1/2/fOo2", "/1/2/foo2",
"1/2 space/foo something"
]
expected_results = [
utils.JoinPath(self.temp_dir, x) for x in expected_results
]
results = self._RunGlob(paths)
self.assertCountEqual(results, expected_results)
# Get all of the bars.
paths = [os.path.join(self.temp_dir, "**10bar*")]
expected_results = [
"bar", "1/bar1", "/1/2/bar2", "/1/2/3/bar3", "/1/2/3/4/bar4"
]
expected_results = [
utils.JoinPath(self.temp_dir, x) for x in expected_results
]
results = self._RunGlob(paths)
self.assertCountEqual(results, expected_results)
def testGlobWithTwoStars(self):
self._MakeTestDirs()
paths = [os.path.join(self.temp_dir, "1/", "*/*/foo*")]
# Handle filesystem case insensitivity
expected_results = ["1/2/3/foo3"]
if platform.system() == "Linux":
expected_results = ["1/2/3/foo3", "1/2/3/fOo3"]
expected_results = [
utils.JoinPath(self.temp_dir, x) for x in expected_results
]
results = self._RunGlob(paths)
self.assertCountEqual(results, expected_results)
def testGlobWithMultiplePaths(self):
self._MakeTestDirs()
paths = [
os.path.join(self.temp_dir, "*/*/foo*"),
os.path.join(self.temp_dir, "notthere"),
os.path.join(self.temp_dir, "*/notthere"),
os.path.join(self.temp_dir, "*/foo*")
]
# Handle filesystem case sensitivity
expected_results = ["1/foo1", "/1/2/foo2"]
if platform.system() == "Linux":
expected_results = ["1/foo1", "1/fOo1", "/1/2/fOo2", "/1/2/foo2"]
results = self._RunGlob(paths)
self.assertCountEqual(
results, [utils.JoinPath(self.temp_dir, x) for x in expected_results])
def testGlobWithInvalidStarStar(self):
client_mock = action_mocks.GlobClientMock()
# This glob is invalid since it uses 2 ** expressions..
paths = [os.path.join(self.base_path, "test_img.dd", "**", "**", "foo")]
# Make sure the flow raises.
with self.assertRaises(ValueError):
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=self.client_id,
paths=paths,
pathtype=rdf_paths.PathSpec.PathType.OS,
creator=self.test_username)
def testGlobWithWildcardsInsideTSKFile(self):
client_mock = action_mocks.GlobClientMock()
# This glob should find this file in test data: glob_test/a/b/foo.
path = os.path.join("*", "a", "b", "*")
root_path = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test_img.dd"),
pathtype=rdf_paths.PathSpec.PathType.OS)
root_path.Append(path="/", pathtype=rdf_paths.PathSpec.PathType.TSK)
# Run the flow.
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=self.client_id,
paths=[path],
root_path=root_path,
pathtype=rdf_paths.PathSpec.PathType.OS,
creator=self.test_username)
children = self._ListTestChildPathInfos(
["test_img.dd", "glob_test", "a", "b"])
self.assertLen(children, 1)
self.assertEqual(children[0].components[-1], "foo")
def testGlobWithWildcardsInsideTSKFileCaseInsensitive(self):
client_mock = action_mocks.GlobClientMock()
# This glob should find this file in test data: glob_test/a/b/foo.
path = os.path.join("*", "a", "b", "FOO*")
root_path = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test_IMG.dd"),
pathtype=rdf_paths.PathSpec.PathType.OS)
root_path.Append(path="/", pathtype=rdf_paths.PathSpec.PathType.TSK)
# Run the flow.
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=self.client_id,
paths=[path],
root_path=root_path,
pathtype=rdf_paths.PathSpec.PathType.OS,
creator=self.test_username)
children = self._ListTestChildPathInfos(
["test_img.dd", "glob_test", "a", "b"])
self.assertLen(children, 1)
self.assertEqual(children[0].components[-1], "foo")
def testGlobWildcardsAndTSK(self):
client_mock = action_mocks.GlobClientMock()
# This glob should find this file in test data: glob_test/a/b/foo.
path = os.path.join(self.base_path, "test_IMG.dd", "glob_test", "a", "b",
"FOO*")
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=self.client_id,
paths=[path],
pathtype=rdf_paths.PathSpec.PathType.OS,
creator=self.test_username)
children = self._ListTestChildPathInfos(
["test_img.dd", "glob_test", "a", "b"])
self.assertLen(children, 1)
self.assertEqual(children[0].components[-1], "foo")
def testGlobWildcardOnImage(self):
client_mock = action_mocks.GlobClientMock()
# Specifying a wildcard for the image will not open it.
path = os.path.join(self.base_path, "*.dd", "glob_test", "a", "b", "FOO*")
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=self.client_id,
paths=[path],
pathtype=rdf_paths.PathSpec.PathType.OS,
creator=self.test_username)
with self.assertRaises(db.UnknownPathError):
self._ListTestChildPathInfos(["test_img.dd", "glob_test", "a", "b"])
def testGlobDirectory(self):
"""Test that glob expands directories."""
users = [
rdf_client.User(username="test", appdata="test_data/index.dat"),
rdf_client.User(username="test2", appdata="test_data/History"),
rdf_client.User(username="test3", appdata="%%PATH%%"),
]
self.client_id = self.SetupClient(0, users=users)
client_mock = action_mocks.GlobClientMock()
# This glob selects all files which start with the username on this system.
path = os.path.join(os.path.dirname(self.base_path), "%%users.appdata%%")
# Run the flow.
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=self.client_id,
paths=[path],
creator=self.test_username)
children = self._ListTestChildPathInfos(
[], path_type=rdf_objects.PathInfo.PathType.OS)
self.assertLen(children, 1)
self.assertEqual(children[0].components[-1], "index.dat")
def testGlobGrouping(self):
"""Tests the glob grouping functionality."""
pattern = "test_data/{ntfs_img.dd,*log,*.exe}"
client_mock = action_mocks.GlobClientMock()
path = os.path.join(os.path.dirname(self.base_path), pattern)
# Run the flow.
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=self.client_id,
paths=[path],
creator=self.test_username)
children = self._ListTestChildPathInfos(
[], path_type=rdf_objects.PathInfo.PathType.OS)
files_found = [child.components[-1] for child in children]
self.assertCountEqual(files_found, [
"ntfs_img.dd",
"apache_false_log",
"apache_log",
"syslog",
"win_hello.exe",
])
def testIllegalGlob(self):
"""Test that illegal globs raise."""
paths = ["Test/%%Weird_illegal_attribute%%"]
# Run the flow - we expect an AttributeError error to be raised from the
# flow since Weird_illegal_attribute is not a valid client attribute.
flow_id = flow_test_lib.StartFlow(
filesystem.Glob, paths=paths, client_id=self.client_id)
flow_obj = data_store.REL_DB.ReadFlowObject(self.client_id, flow_id)
self.assertEqual(
flow_obj.error_message,
"Some attributes are not part of the knowledgebase: "
"weird_illegal_attribute")
self.assertIn("KbInterpolationUnknownAttributesError", flow_obj.backtrace)
def testGlobRoundtrips(self):
"""Tests that glob doesn't use too many client round trips."""
for pattern, num_find, num_stat, duplicated_ok in [
("test_data/test_artifact.json", 0, 1, False),
("test_data/test_*", 1, 0, False),
("test_da*/test_artifact.json", 1, 1, False),
("test_da*/test_*", 2, 0, False),
("test_da*/test_{artifact,artifacts}.json", 1, 2, True),
("test_data/test_{artifact,artifacts}.json", 0, 2, False),
("test_data/{ntfs_img.dd,*.log,*.raw}", 1, 1, False),
("test_data/{*.log,*.raw}", 1, 0, False),
("test_data/a/**/helloc.txt", 1, None, False),
("test_data/a/**/hello{c,d}.txt", 1, None, True),
("test_data/a/**/hello*.txt", 4, None, False),
("test_data/a/**.txt", 1, None, False),
("test_data/a/**5*.txt", 1, None, False),
("test_data/a/**{.json,.txt}", 1, 0, False),
]:
path = os.path.join(os.path.dirname(self.base_path), pattern)
client_mock = action_mocks.GlobClientMock()
# Run the flow.
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.Glob),
client_mock,
client_id=self.client_id,
paths=[path],
creator=self.test_username)
if num_find is not None:
self.assertEqual(client_mock.action_counts.get("Find", 0), num_find)
if num_stat is not None:
self.assertEqual(
client_mock.action_counts.get("GetFileStat", 0), num_stat)
if not duplicated_ok:
# Check for duplicate client calls. There might be duplicates that are
# very cheap when we look for a wildcard (* or **) first and later in
# the pattern for a group of files ({}).
for method in "StatFile", "Find":
stat_args = client_mock.recorded_args.get(method, [])
stat_paths = [c.pathspec.CollapsePath() for c in stat_args]
self.assertCountEqual(stat_paths, set(stat_paths))
def _CheckCasing(self, path, filename):
path_infos = self._ListTestChildPathInfos(
path.split("/"), path_type=rdf_objects.PathInfo.PathType.OS)
filenames = [path_info.components[-1] for path_info in path_infos]
self.assertIn(filename, filenames)
def testGlobCaseCorrection(self):
# This should get corrected to "a/b/c/helloc.txt"
test_path = "a/B/c/helloC.txt"
self._RunGlob([os.path.join(self.base_path, test_path)])
self._CheckCasing("a", "b")
self._CheckCasing("a/b/c", "helloc.txt")
def testGlobCaseCorrectionUsingWildcards(self):
# Make sure this also works with *s in the glob.
# This should also get corrected to "a/b/c/helloc.txt"
test_path = "a/*/C/*.txt"
self._RunGlob([os.path.join(self.base_path, test_path)])
self._CheckCasing("a", "b")
self._CheckCasing("a/b", "c")
def testDownloadDirectoryUnicode(self):
"""Test a FileFinder flow with depth=1."""
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.OS,
vfs_test_lib.ClientVFSHandlerFixture):
# Mock the client actions FileFinder uses.
client_mock = action_mocks.FileFinderClientMock()
flow_test_lib.TestFlowHelper(
compatibility.GetName(file_finder.FileFinder),
client_mock,
client_id=self.client_id,
paths=["/c/Downloads/*"],
action=rdf_file_finder.FileFinderAction.Download(),
creator=self.test_username)
# There should be 6 children:
expected_filenames = [
"a.txt", "b.txt", "c.txt", "d.txt", "sub1", "中国新闻网新闻中.txt"
]
children = data_store.REL_DB.ListChildPathInfos(
self.client_id, rdf_objects.PathInfo.PathType.OS, ["c", "Downloads"])
filenames = [child.components[-1] for child in children]
self.assertCountEqual(filenames, expected_filenames)
def _SetupTestDir(self, directory):
base = utils.JoinPath(self.temp_dir, directory)
os.makedirs(base)
with io.open(utils.JoinPath(base, "a.txt"), "wb") as fd:
fd.write(b"Hello World!\n")
with io.open(utils.JoinPath(base, "b.txt"), "wb") as fd:
pass
with io.open(utils.JoinPath(base, "c.txt"), "wb") as fd:
pass
with io.open(utils.JoinPath(base, "d.txt"), "wb") as fd:
pass
sub = utils.JoinPath(base, "sub1")
os.makedirs(sub)
with io.open(utils.JoinPath(sub, "a.txt"), "wb") as fd:
fd.write(b"Hello World!\n")
with io.open(utils.JoinPath(sub, "b.txt"), "wb") as fd:
pass
with io.open(utils.JoinPath(sub, "c.txt"), "wb") as fd:
pass
return base
def testDownloadDirectory(self):
"""Test a FileFinder flow with depth=1."""
# Mock the client actions FileFinder uses.
client_mock = action_mocks.FileFinderClientMock()
test_dir = self._SetupTestDir("testDownloadDirectory")
flow_test_lib.TestFlowHelper(
compatibility.GetName(file_finder.FileFinder),
client_mock,
client_id=self.client_id,
paths=[test_dir + "/*"],
action=rdf_file_finder.FileFinderAction.Download(),
creator=self.test_username)
# There should be 5 children:
expected_filenames = ["a.txt", "b.txt", "c.txt", "d.txt", "sub1"]
children = data_store.REL_DB.ListChildPathInfos(
self.client_id, rdf_objects.PathInfo.PathType.OS,
test_dir.strip("/").split("/"))
filenames = [child.components[-1] for child in children]
self.assertCountEqual(filenames, expected_filenames)
fd = file_store.OpenFile(
db.ClientPath.FromPathInfo(self.client_id, children[0]))
self.assertEqual(fd.read(), b"Hello World!\n")
def testDownloadDirectorySub(self):
"""Test a FileFinder flow with depth=5."""
# Mock the client actions FileFinder uses.
client_mock = action_mocks.FileFinderClientMock()
test_dir = self._SetupTestDir("testDownloadDirectorySub")
flow_test_lib.TestFlowHelper(
compatibility.GetName(file_finder.FileFinder),
client_mock,
client_id=self.client_id,
paths=[test_dir + "/**5"],
action=rdf_file_finder.FileFinderAction.Download(),
creator=self.test_username)
expected_filenames = ["a.txt", "b.txt", "c.txt", "d.txt", "sub1"]
expected_filenames_sub = ["a.txt", "b.txt", "c.txt"]
components = test_dir.strip("/").split("/")
children = data_store.REL_DB.ListChildPathInfos(
self.client_id, rdf_objects.PathInfo.PathType.OS, components)
filenames = [child.components[-1] for child in children]
self.assertCountEqual(filenames, expected_filenames)
children = data_store.REL_DB.ListChildPathInfos(
self.client_id, rdf_objects.PathInfo.PathType.OS, components + ["sub1"])
filenames = [child.components[-1] for child in children]
self.assertCountEqual(filenames, expected_filenames_sub)
def testDiskVolumeInfoOSXLinux(self):
client_mock = action_mocks.UnixVolumeClientMock()
session_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.DiskVolumeInfo),
client_mock,
client_id=self.client_id,
creator=self.test_username,
path_list=["/usr/local", "/home"])
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertCountEqual([x.unixvolume.mount_point for x in results],
["/", "/usr"])
@parser_test_lib.WithParser("WmiDisk", wmi_parser.WMILogicalDisksParser)
@parser_test_lib.WithParser("WinReg", winreg_parser.WinSystemRootParser)
def testDiskVolumeInfoWindows(self):
self.client_id = self.SetupClient(0, system="Windows")
with vfs_test_lib.VFSOverrider(rdf_paths.PathSpec.PathType.REGISTRY,
vfs_test_lib.FakeRegistryVFSHandler):
client_mock = action_mocks.WindowsVolumeClientMock()
session_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.DiskVolumeInfo),
client_mock,
client_id=self.client_id,
creator=self.test_username,
path_list=[r"D:\temp\something", r"/var/tmp"])
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
# We asked for D and we guessed systemroot (C) for "/var/tmp", but only
# C and Z are present, so we should just get C.
self.assertCountEqual([x.windowsvolume.drive_letter for x in results],
["C:"])
session_id = flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.DiskVolumeInfo),
client_mock,
client_id=self.client_id,
creator=self.test_username,
path_list=[r"Z:\blah"])
results = flow_test_lib.GetFlowResults(self.client_id, session_id)
self.assertCountEqual([x.windowsvolume.drive_letter for x in results],
["Z:"])
def testGlobBackslashHandlingNoRegex(self):
self._Touch("foo.txt")
self._Touch("foo.txt~")
paths = [
utils.JoinPath(self.temp_dir, "foo.txt"),
# The backslash in the path will be replaced by a forward-slash when
# building a tree representing all the paths (this behavior isn't
# particularly correct). Note that the tilde does not need to be
# escaped.
utils.JoinPath(self.temp_dir, r"foo.txt\~"),
]
expected_paths = [utils.JoinPath(self.temp_dir, "foo.txt")]
results = self._RunGlob(paths)
self.assertCountEqual(expected_paths, results)
def testGlobBackslashHandlingWithRegex(self):
os.mkdir(utils.JoinPath(self.temp_dir, "1"))
self._Touch("1/foo.txt")
self._Touch("1/foo.txt~")
paths = [
utils.JoinPath(self.temp_dir, "*/foo.txt"),
# The backslash in the path will be replaced by a forward-slash when
# building a tree representing all the paths (this behavior isn't
# particularly correct). Note that the tilde does not need to be
# escaped.
utils.JoinPath(self.temp_dir, r"*/foo.txt\~"),
]
expected_paths = [utils.JoinPath(self.temp_dir, "1/foo.txt")]
results = self._RunGlob(paths)
self.assertCountEqual(expected_paths, results)
def testGlobBackslashHandlingWithRecursion(self):
os.makedirs(utils.JoinPath(self.temp_dir, "1/2"))
self._Touch("1/foo.txt")
self._Touch("1/foo.txt~")
self._Touch("1/2/foo.txt")
self._Touch("1/2/foo.txt~")
paths = [
utils.JoinPath(self.temp_dir, "**2/foo.txt"),
# The backslash in the path will be replaced by a forward-slash when
# building a tree representing all the paths (this behavior isn't
# particularly correct). Note that the tilde does not need to be
# escaped.
utils.JoinPath(self.temp_dir, r"**2/foo.txt\~"),
]
expected_paths = [
utils.JoinPath(self.temp_dir, "1/foo.txt"),
utils.JoinPath(self.temp_dir, "1/2/foo.txt"),
]
results = self._RunGlob(paths)
self.assertCountEqual(expected_paths, results)
def _Touch(self, relative_path):
io.open(utils.JoinPath(self.temp_dir, relative_path), "wb").close()
def testListingRegistryDirectoryDoesNotYieldMtimes(self):
with vfs_test_lib.RegistryVFSStubber():
client_id = self.SetupClient(0)
pb = rdf_paths.PathSpec(
path="/HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest",
pathtype=rdf_paths.PathSpec.PathType.REGISTRY)
client_mock = action_mocks.ListDirectoryClientMock()
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.ListDirectory),
client_mock,
client_id=client_id,
pathspec=pb,
creator=self.test_username)
children = data_store.REL_DB.ListChildPathInfos(
self.client_id, rdf_objects.PathInfo.PathType.REGISTRY,
["HKEY_LOCAL_MACHINE", "SOFTWARE", "ListingTest"])
self.assertLen(children, 2)
for child in children:
self.assertIsNone(child.stat_entry.st_mtime)
def testNotificationWhenListingRegistry(self):
# Change the username so notifications get written.
username = "notification_test"
acl_test_lib.CreateUser(username)
with vfs_test_lib.RegistryVFSStubber():
client_id = self.SetupClient(0)
pb = rdf_paths.PathSpec(
path="/HKEY_LOCAL_MACHINE/SOFTWARE/ListingTest",
pathtype=rdf_paths.PathSpec.PathType.REGISTRY)
client_mock = action_mocks.ListDirectoryClientMock()
flow_test_lib.TestFlowHelper(
compatibility.GetName(filesystem.ListDirectory),
client_mock,
client_id=client_id,
pathspec=pb,
creator=username)
notifications = data_store.REL_DB.ReadUserNotifications(username)
self.assertLen(notifications, 1)
n = notifications[0]
self.assertEqual(n.reference.vfs_file.path_type,
rdf_objects.PathInfo.PathType.REGISTRY)
self.assertEqual(n.reference.vfs_file.path_components,
["HKEY_LOCAL_MACHINE", "SOFTWARE", "ListingTest"])
def main(argv):
# Run the full test suite
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
{
"content_hash": "1a4694afa9168ff32490961f6b6723a7",
"timestamp": "",
"source": "github",
"line_count": 923,
"max_line_length": 82,
"avg_line_length": 36.683640303358615,
"alnum_prop": 0.6504031424436634,
"repo_name": "google/grr",
"id": "0c16383405d21f1f58baa743b01522ad06eef390",
"size": "33907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/flows/general/filesystem_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12697"
},
{
"name": "C++",
"bytes": "54814"
},
{
"name": "Dockerfile",
"bytes": "1822"
},
{
"name": "HCL",
"bytes": "8451"
},
{
"name": "HTML",
"bytes": "366783"
},
{
"name": "JavaScript",
"bytes": "13088"
},
{
"name": "Jupyter Notebook",
"bytes": "199216"
},
{
"name": "Makefile",
"bytes": "3244"
},
{
"name": "PowerShell",
"bytes": "531"
},
{
"name": "Python",
"bytes": "8844725"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "SCSS",
"bytes": "105120"
},
{
"name": "Shell",
"bytes": "48663"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TypeScript",
"bytes": "2139377"
}
],
"symlink_target": ""
}
|
from sqlalchemy import *
from sqlalchemy import sql
from sqlalchemy.databases import sybase
from test.lib import *
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = sybase.dialect()
def test_extract(self):
t = sql.table('t', sql.column('col1'))
mapping = {
'day': 'day',
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond',
'millisecond': 'millisecond',
'year': 'year',
}
for field, subst in mapping.items():
self.assert_compile(
select([extract(field, t.c.col1)]),
'SELECT DATEPART("%s", t.col1) AS anon_1 FROM t' % subst)
|
{
"content_hash": "781a81f60462315ad8061184d7feb056",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 73,
"avg_line_length": 26.071428571428573,
"alnum_prop": 0.5465753424657535,
"repo_name": "ioram7/keystone-federado-pgid2013",
"id": "0a7cbf6b6dd72e61377b9ceea284491899332f7a",
"size": "730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build/sqlalchemy/test/dialect/test_sybase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1841"
},
{
"name": "C",
"bytes": "10584735"
},
{
"name": "C++",
"bytes": "19231"
},
{
"name": "CSS",
"bytes": "172341"
},
{
"name": "JavaScript",
"bytes": "530938"
},
{
"name": "Python",
"bytes": "26306359"
},
{
"name": "Shell",
"bytes": "38138"
},
{
"name": "XSLT",
"bytes": "306125"
}
],
"symlink_target": ""
}
|
import numpy as np
from data.image import image
from spec.mamba import *
with description('image'):
with it('constructs without error'):
expect(calling(image.Image, np.zeros((3, 3)))).not_to(raise_error)
with it('tracks mutations'):
i = image.Image(np.zeros((3, 3), dtype=np.uint8))
i.invert()
expect(i.has_mutation('invert')).to(be_true)
with it('tracks mutations from parents'):
i = image.Image(np.zeros((3, 3), dtype=np.uint8))
i2 = i.invert().fork().fork()
expect(i2.has_mutation('invert')).to(be_true)
with description('computation'):
with before.each:
self.img = image.Image(np.zeros((3, 3), dtype=np.uint8))
self.bincount_patch = mock.patch(
'data.image.image.np.bincount', return_value='patched')
self.mock_bincount = self.bincount_patch.start()
with after.each:
self.bincount_patch.stop()
with it('caches computations'):
expect(self.mock_bincount).not_to(have_been_called)
expect(self.img.bincount).to(equal('patched'))
expect(self.mock_bincount).to(have_been_called_once)
expect(self.img.bincount).to(equal('patched'))
expect(self.mock_bincount).to(have_been_called_once)
with it('invalidates computations'):
expect(self.img.bincount).to(equal('patched'))
expect(self.mock_bincount).to(have_been_called_once)
self.img.invert()
expect(self.img.bincount).to(equal('patched'))
expect(self.mock_bincount).to(have_been_called_times(2))
with description('crop'):
with it('requires normalize first'):
i = image.Image(np.ones((3, 3), dtype=np.uint8))
expect(calling(i.crop, 0)).to(raise_error(
ValueError, 'normalize() must occur before crop(0)'))
with description('extract_rect') as self:
with before.each:
self.image = image.Image(np.arange(16, dtype=np.uint8).reshape(4, 4))
with it('returns a slice'):
expect(self.image.extract_rect(0, 1, 2, 3).tolist()).to(equal([
# Skips [0, 1, 2, 3] because of y = 1.
[4, 5], # 6, 7 (width = 2).
[8, 9], # 8, 9.
[12, 13], # 14, 15.
]))
with it('prevents mutation'):
def mutate() -> None:
rect = self.image.extract_rect(0, 1, 2, 3)
rect[0][0] = 1
expect(calling(mutate)).to(raise_error(ValueError))
with description('__str__'):
with it('handles empty mutations'):
i = image.Image(np.ones((3, 3), dtype=np.uint8))
expect(str(i)).to(look_like("""
Image()
"""))
with it('handles simple mutations'):
i = image.Image(np.ones((3, 3), dtype=np.uint8))
i.normalize().invert()
expect(str(i)).to(look_like("""
Image()
.normalize()
.invert()
"""))
with it('handles mutations mutations across generations'):
i = image.Image(np.ones((3, 3), dtype=np.uint8))
i = i.normalize().fork().invert()
expect(str(i)).to(look_like("""
Image()
.normalize()
.fork()
.invert()
"""))
with it('handles just generations'):
i = image.Image(np.ones((3, 3), dtype=np.uint8))
i = i.fork().fork().fork()
expect(str(i)).to(look_like("""
Image()
.fork()
.fork()
.fork()
"""))
with it('produces reasonable args for nparray'):
i = image.Image(np.ones((3, 3), dtype=np.uint8))
i.mask(np.ones((3, 3)))
expect(str(i)).to(look_like("""
Image()
.mask(np.ones((3, 3)))
"""))
with description('get_debug_data'):
with it('returns basic debug data'):
data = np.ones((3, 3), dtype=np.uint8)
expect(image.Image(data).get_debug_data().tolist()).to(
equal(data.tolist()))
with it('is read only'):
result = image.Image(np.ones((3, 3), dtype=np.uint8)).get_debug_data()
def mutate() -> None:
result[0][0] = 1
expect(calling(mutate)).to(raise_error(ValueError))
with it('returns a history of changes'):
data = np.ones((3, 3), dtype=np.uint8)
img = image.Image(data).fork().normalize().invert()
expect(img.get_debug_data(replay_mutations=True)).to(have_len(3))
|
{
"content_hash": "478b923d84e5c9568d59857828f636c9",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 76,
"avg_line_length": 32.92913385826772,
"alnum_prop": 0.5839311334289814,
"repo_name": "PhilHarnish/forge",
"id": "50dc51a7c9ba92966c3bafe864a744635a88fcf6",
"size": "4182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spec/data/image/image_spec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "254508"
},
{
"name": "HTML",
"bytes": "12062"
},
{
"name": "JavaScript",
"bytes": "178383"
},
{
"name": "Jupyter Notebook",
"bytes": "1755506"
},
{
"name": "Python",
"bytes": "1033953"
},
{
"name": "Ruby",
"bytes": "800"
},
{
"name": "Shell",
"bytes": "3181"
}
],
"symlink_target": ""
}
|
from optparse import OptionParser
import sys
from urlparse import urlsplit
import mirrormeta
parser = OptionParser(usage='usage: %prog [options] sf.net URL [sf.net URL] [...]')
parser.add_option("-o", action="store", type="string", metavar="FILE",
dest="filename", help="output file (default: stdout)")
parser.add_option("-m", action="store", type="string", metavar="FILE",
dest="mirrorlist", default="thirdpartymirrors",
help="mirror list to use (default: thirdpartymirrors)")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
if options.filename:
output = open(options.filename, 'w')
else:
output = sys.stdout
mirrorlist = open(options.mirrorlist)
meta = mirrormeta.metalink(mirrorlist, True)
for arg in args:
url = urlsplit(arg)
file = mirrormeta.metalink_file(meta, 'sourceforge', url.path[1:])
output.write(meta.toxml())
output.write('\n')
|
{
"content_hash": "b8e1291e73aea0d2e153e792ce1fa189",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 83,
"avg_line_length": 31.225806451612904,
"alnum_prop": 0.6683884297520661,
"repo_name": "d0k/mirrormeta",
"id": "fa58d198752b1382aaf6a02c593e1550d1057d04",
"size": "1118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sfmeta.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8366"
}
],
"symlink_target": ""
}
|
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_display_units01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [69572096, 93549312]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
{
"content_hash": "3e83578aa188fed7c900fefe07376778",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 68,
"avg_line_length": 24.243243243243242,
"alnum_prop": 0.6243032329988851,
"repo_name": "jmcnamara/XlsxWriter",
"id": "cf0dc708fe4d07d371a1f2e241ab5256f026d2b2",
"size": "1110",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/comparison/test_chart_display_units01.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
}
|
import __builtin__
import system
import unittest
import riemann
import logging
from marathon import models
class TestingRiemannFunctions(unittest.TestCase):
def setUp(self):
logging.basicConfig()
__builtin__.__pillar__ = {}
__builtin__.__grains__ = {}
__builtin__.__salt__ = {'system.wait_for': system.wait_for, 'marathon_client.apps': lambda: {
'chronos': [models.MarathonTask(app_id='chronos', host='host1', ports=[11]),
models.MarathonTask(app_id='chronos', host='host2', ports=[12])],
'kafka-mesos': [models.MarathonTask(app_id='kafka-mesos', host='host1', ports=[12])]}}
def test_jmx_checks_empty(self):
__builtin__.__pillar__ = {'riemann_checks': {}}
self.assertEqual(riemann.jmx_checks('host1'), [])
def test_jmx_checks_with_kafka_only(self):
__builtin__.__pillar__ = {'riemann_checks': {'jmx': {'kafka-mesos': [{'obj': 'x'}]}}}
self.assertEqual(riemann.jmx_checks('host1'), [])
def test_jmx_checks(self):
__builtin__.__pillar__ = {'riemann_checks': {'jmx': {'kafka-mesos': [{'obj': 'x'}], 'chronos': [{'obj': 'x'}]}},
'chronos': {'check_port_index': 0}}
self.assertEqual(riemann.jmx_checks('host1'),
[{'app_id': 'chronos', 'name': 'chronos-11', 'port': 11, 'queries': [{'obj': 'x'}]}])
|
{
"content_hash": "154dcacc8ac79f9aecc66af6b82f7b2c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 120,
"avg_line_length": 43.84375,
"alnum_prop": 0.556664290805417,
"repo_name": "elyast/saltstack-formulas",
"id": "df8e38c10721e9fa5ea8f053f27d06ee448c7e25",
"size": "1403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_modules_tests/unit/riemann_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "688"
},
{
"name": "HTML",
"bytes": "10373"
},
{
"name": "PLpgSQL",
"bytes": "378"
},
{
"name": "Python",
"bytes": "50773"
},
{
"name": "Ruby",
"bytes": "91"
},
{
"name": "SQLPL",
"bytes": "716"
},
{
"name": "SaltStack",
"bytes": "87890"
},
{
"name": "Scheme",
"bytes": "171"
},
{
"name": "Shell",
"bytes": "19506"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals, division
import re
def dasherize(value):
"""Dasherizes the passed value."""
value = value.strip()
value = re.sub(r'([A-Z])', r'-\1', value)
value = re.sub(r'[-_\s]+', r'-', value)
value = re.sub(r'^-', r'', value)
value = value.lower()
return value
|
{
"content_hash": "4d81e7863e24991c9dc2cdc322ec49d4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 28.083333333333332,
"alnum_prop": 0.5875370919881305,
"repo_name": "armet/python-armet",
"id": "a46a9d0b081a89a1bcdacc3f9f77373589bb5dd4",
"size": "361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "armet/utils/string.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "285382"
}
],
"symlink_target": ""
}
|
"""
Django settings for deploy project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@s8+x9jn97o77_76@tr@59^cvxz6tj76!q0x%^rvq7in7co8w@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'application',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'deploy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'deploy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'deploy',
'USER': 'deploy',
'PASSWORD': 'deploy',
'HOST': 'localhost',
'PORT': '3306'
}
}
LOGIN_URL = '/application/login/'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
import logging
logging.basicConfig(
level = logging.DEBUG,
format = '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
filename = '/tmp/deploy.log',
)
|
{
"content_hash": "2b41565341178f1288c95ea9c8c25491",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 83,
"avg_line_length": 25.559322033898304,
"alnum_prop": 0.6813660477453581,
"repo_name": "dntdevops/cloudops",
"id": "78f312ce510b40652bc7aff068334492a5f52b8c",
"size": "3016",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "deploy/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45954"
},
{
"name": "HTML",
"bytes": "61443"
},
{
"name": "JavaScript",
"bytes": "2590"
},
{
"name": "Python",
"bytes": "61568"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('opendebates', '0021_submission_local_votes'),
]
operations = [
migrations.AddField(
model_name='sitemode',
name='announcement_body',
field=models.TextField(null=True, blank=True),
),
migrations.AddField(
model_name='sitemode',
name='announcement_headline',
field=models.CharField(max_length=255, null=True, blank=True),
),
migrations.AddField(
model_name='sitemode',
name='announcement_link',
field=models.URLField(null=True, blank=True),
),
]
|
{
"content_hash": "117690c8d837afc902b5fac88792d4bd",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 27.428571428571427,
"alnum_prop": 0.5807291666666666,
"repo_name": "ejucovy/django-opendebates",
"id": "977c17a3025a215065eac5d26a2a823c541a2395",
"size": "792",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "opendebates/migrations/0022_auto_20160420_1423.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "35435"
},
{
"name": "HTML",
"bytes": "53283"
},
{
"name": "JavaScript",
"bytes": "18710"
},
{
"name": "Python",
"bytes": "206230"
},
{
"name": "Shell",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
entityName = ''
offset = 0
cur_batch = sdc.createBatch()
hasNext = True
while hasNext:
offset = offset + 1
record = sdc.createRecord('record souce id')
record.value = "record value"
cur_batch.addError(record, "error msg")
# if the batch is full, process it and start a new one
if cur_batch.errorCount() >= sdc.batchSize:
# blocks until all records are written to all destinations
# (or failure) and updates offset
# in accordance with delivery guarantee
cur_batch.process(entityName, str(offset))
cur_batch = sdc.createBatch()
# if the pipeline has been stopped, we should end the script
if sdc.isStopped():
hasNext = False
|
{
"content_hash": "328eff18329c424f7bf5fb749848d5c6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 68,
"avg_line_length": 30.041666666666668,
"alnum_prop": 0.6490984743411928,
"repo_name": "streamsets/datacollector",
"id": "95252effd82e38d1415b622367be151c2de4693d",
"size": "1303",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jython-protolib/src/test/resources/com/streamsets/pipeline/stage/origin/jython/GeneratorOriginScriptErrors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "101291"
},
{
"name": "CSS",
"bytes": "125357"
},
{
"name": "Groovy",
"bytes": "27033"
},
{
"name": "HTML",
"bytes": "558399"
},
{
"name": "Java",
"bytes": "23349126"
},
{
"name": "JavaScript",
"bytes": "1126994"
},
{
"name": "Python",
"bytes": "26996"
},
{
"name": "Scala",
"bytes": "6646"
},
{
"name": "Shell",
"bytes": "30118"
},
{
"name": "TSQL",
"bytes": "3632"
}
],
"symlink_target": ""
}
|
import sys
import os
from os import path
import re
from itertools import imap, repeat, chain
import globals
import process
import parboilfile as pbf
from futures import Future
from error import ErrorType
class Benchmark(object):
"""A benchmark.
If the benchmark is malformed or otherwise invalid, only the 'name' and
'invalid' fields will be set. Otherwise all fields will be set.
Fields:
name The name of the benchmark. This is also the benchmark
directory name.
invalid None if the benchmark is valid; otherwise, an exception
describing why the benchmark is invalid.
path Full path of the benchmark directory.
descr A description of the benchmark.
impls A dictionary of benchmark source implementations.
datas A dictionary of data sets used to run the benchmark."""
def __init__(self, name, path = None, impls = [], datasets = [],
description=None, invalid=None):
self.name = name
self.invalid = invalid
if invalid is None:
self.path = path
self.impls = dict(imap(lambda i: (i.name, i), impls))
self.datas = dict(imap(lambda i: (i.name, i), datasets))
self.descr = description
def createFromName(name):
"""Scan the benchmark directory for the benchmark named 'name'
and create a benchmark object for it."""
bmkdir = globals.benchdir.getChildByName(name)
datadir = globals.datadir.getChildByName(name)
descr = process.read_description_file(bmkdir)
try:
# Scan implementations of the benchmark
impls = [BenchImpl.createFromDir(impl)
for impl in process.scan_for_benchmark_versions(bmkdir)]
# Scan data sets of the benchmark
datas = [BenchDataset.createFromDir(data)
for data in process.scan_for_benchmark_datasets(datadir)]
# If no exception occurred, the benchmark is valid
return Benchmark(name, bmkdir.getPath(), impls, datas, descr)
finally:
pass
#except Exception, e:
# return Benchmark(name, invalid=e)
createFromName = staticmethod(createFromName)
def describe(self):
"""Return a string describing this benchmark."""
if self.invalid:
return "Error in benchmark:\n" + str(self.invalid)
if self.descr is None:
header = "Benchmark '" + self.name + "'"
else:
header = self.descr
impls = " ".join([impl.name for impl in self.impls.itervalues()])
datas = " ".join([data.name for data in self.datas.itervalues()])
return header + "\nVersions: " + impls + "\nData sets: " + datas
def instance_check(x):
if not isinstance(x, Benchmark):
raise TypeError, "argument must be an instance of Benchmark"
instance_check = staticmethod(instance_check)
class BenchImpl(object):
"""An implementation of a benchmark."""
def __init__(self, dir, description=None):
if not isinstance(dir, pbf.Directory):
raise TypeEror, "dir must be a directory"
self.name = dir.getName()
self.dir = dir
self.descr = description
def createFromDir(dir):
"""Scan the directory containing a benchmark implementation
and create a BenchImpl object from it."""
# Get the description from a file, if provided
descr = process.read_description_file(dir)
return BenchImpl(dir, descr)
createFromDir = staticmethod(createFromDir)
def makefile(self, benchmark, target=None, action=None, platform=None, opt={}):
"""Run this implementation's makefile."""
self.platform = platform
Benchmark.instance_check(benchmark)
def perform():
srcdir = path.join('src', self.name)
builddir = path.join('build', self.name)
if self.platform == None: platform = 'default'
else: platform = self.platform
env={'SRCDIR':srcdir,
'BUILDDIR':builddir + '_' + platform,
'BIN':path.join(builddir+'_'+platform,benchmark.name),
'PARBOIL_ROOT':globals.root,
'PLATFORM':platform,
'BUILD':self.name}
env.update(opt)
mkfile = globals.root + os.sep + 'common' + os.sep + 'mk'
# Run the makefile to build the benchmark
ret = process.makefile(target=target,
action=action,
filepath=path.join(mkfile, "Makefile"),
env=env)
if ret == True:
return ErrorType.Success
else:
return ErrorType.CompileError
# Go to the benchmark directory before building
return process.with_path(benchmark.path, perform)
def build(self, benchmark, platform):
"""Build an executable of this benchmark implementation."""
return self.makefile(benchmark, action='build', platform=platform)
def isBuilt(self, benchmark, platform):
"""Determine whether the executable is up to date."""
return self.makefile(benchmark, action='q', platform=platform) == ErrorType.Success
def clean(self, benchmark, platform):
"""Remove build files for this benchmark implementation."""
return self.makefile(benchmark, action='clean', platform=platform)
def run(self, benchmark, dataset, do_output=True, extra_opts=[], platform=None):
"""Run this benchmark implementation.
Return True if the benchmark terminated normally or False
if there was an error."""
if platform == None:
self.platform = 'default'
else:
self.platform = platform
# Ensure that the benchmark has been built
if not self.isBuilt(benchmark, platform):
rc = self.build(benchmark, platform)
# Stop if 'make' failed
if rc != ErrorType.Success: return rc
def perform():
if self.platform == None:
platform = 'default'
else:
platform = self.platform
# Run the program
#exename = path.join('build', self.name+'_'+platform, benchmark.name)
#args = [exename] + extra_opts + dataset.getCommandLineArguments(benchmark, do_output)
#rc = process.spawnwaitv(exename, args)
args = extra_opts + dataset.getCommandLineArguments(benchmark, do_output)
#zhangfeng zf
print args
args = reduce(lambda x, y: x + ' ' + y, args)
#zhangfeng zf
print args
###
try:
rc = self.makefile(benchmark, action='run', platform=platform, opt={"ARGS":args})
except KeyboardInterrupt:
rc = ErrorType.Killed
# Program exited with error?
# if rc != 0: return ErrorType.RunFailed
# return ErrorType.Success
return rc
return process.with_path(benchmark.path, perform)
def debug(self, benchmark, dataset, do_output=True, extra_opts=[], platform=None):
"""Debug this benchmark implementation."""
if platform == None:
self.platform = 'default'
else:
self.platform = platform
# Ensure that the benchmark has been built
if not self.isBuilt(benchmark, platform):
rc = self.build(benchmark, platform)
# Stop if 'make' failed
if rc != ErrorType.Success: return rc
def perform():
if self.platform == None:
platform = 'default'
else:
platform = self.platform
# Run the program
args = extra_opts + dataset.getCommandLineArguments(benchmark, do_output)
args = reduce(lambda x, y: x + ' ' + y, args)
###
rc = self.makefile(benchmark, action='debug', platform=platform, opt={"ARGS":args})
# Program exited with error?
if rc != 0: return ErrorType.RunFailed
return ErrorType.Success
return process.with_path(benchmark.path, perform)
def check(self, benchmark, dataset):
"""Check the output from the last run of this benchmark
implementation.
Return True if the output checks successfully or False
otherwise."""
def perform():
output_file = dataset.getTemporaryOutputFile(benchmark).getPath()
reference_file = dataset.getReferenceOutputPath()
#zhangfeng zf
print benchmark.path
print output_file
print reference_file
compare = os.path.join('tools', 'compare-output')
print compare
rc = process.spawnwaitl(compare,
compare, reference_file, output_file)
# Program exited with error, or mismatch in output?
if rc != 0: return False
return True
return process.with_path(benchmark.path, perform)
def __str__(self):
return "<BenchImpl '" + self.name + "'>"
class BenchDataset(object):
"""Data sets for running a benchmark."""
def __init__(self, dir, in_files=[], out_files=[], parameters=[],
description=None):
if not isinstance(dir, pbf.Directory):
raise TypeError, "dir must be a pbf.Directory"
self.name = dir.getName()
self.dir = dir
self.inFiles = in_files
self.outFiles = out_files
self.parameters = parameters
self.descr = description
def createFromDir(dir):
"""Scan the directory containing a dataset
and create a BenchDataset object from it."""
# Identify the paths where files may be found
input_dir = dir.getChildByName('input')
output_dir = dir.getChildByName('output')
#benchmark_path = path.join(globals.root, 'benchmarks', name)
def check_default_input_files():
# This function is called to see if the input file set
# guessed by scanning the input directory can be used
if invalid_default_input_files:
raise ValueError, "Cannot infer command line when there are multiple input files in a data set\n(Fix by adding an input DESCRIPTION file)"
if input_dir.exists():
input_descr = process.read_description_file(input_dir)
input_files = input_dir.scanAndReturnNames()
# If more than one input file was found, cannot use the default
# input file list produced by scanning the directory
invalid_default_input_files = len(input_files) > 1
else:
# If there's no input directory, assume the benchmark
# takes no input
input_descr = None
input_files = []
invalid_default_input_files = False
# Read the text of the input description file
if input_descr is not None:
(parameters, input_files1, input_descr) = \
unpack_dataset_description(input_descr, input_files=None)
if input_files1 is None:
# No override value given; use the default
check_default_input_files()
else:
input_files = input_files1
else:
check_default_input_files()
parameters = []
# Look for output files
output_descr = process.read_description_file(output_dir)
output_files = output_dir.scanAndReturnNames()
if len(output_files) > 1:
raise ValueError, "Multiple output files not supported"
# Concatenate input and output descriptions
if input_descr and output_descr:
descr = input_descr + "\n\n" + output_descr
else:
descr = input_descr or output_descr
return BenchDataset(dir, input_files, output_files, parameters, descr)
createFromDir = staticmethod(createFromDir)
def getName(self):
"""Get the name of this dataset."""
return self.name
def getTemporaryOutputDir(self, benchmark):
"""Get the pbf.Directory for the output of a benchmark run.
This function should always return the same pbf.Directory if its parameters
are the same. The output path is not the path where the reference
output is stored."""
rundir = globals.benchdir.getChildByName(benchmark.name).getChildByName('run')
if rundir.getChildByName(self.name) is None:
datasetpath = path.join(rundir.getPath(), self.name)
filepath = path.join(datasetpath, self.outFiles[0])
rundir.addChild(pbf.Directory(datasetpath, [pbf.File(filepath, False)]))
return rundir.getChildByName(self.name)
def getTemporaryOutputFile(self, benchmark):
"""Get the pbf.File for the output of a benchmark run.
This function should always return the same pbf.File if its parameters
are the same. The output path is not where the referrence output
is stored."""
return self.getTemporaryOutputDir(benchmark).getChildByName(self.outFiles[0])
def getReferenceOutputPath(self):
"""Get the name of the reference file, to which the output of a
benchmark run should be compared."""
return path.join(self.dir.getPath(), 'output', self.outFiles[0])
def getCommandLineArguments(self, benchmark, do_output=True):
"""Get the command line arguments that should be passed to the
executable to run this data set. If 'output' is True, then
the executable will be passed flags to save its output to a file.
Directories to hold ouptut files are created if they do not exist."""
args = []
# Add arguments to pass input files to the benchmark
if self.inFiles:
in_files = ",".join([path.join(self.dir.getPath(),'input', x)
for x in self.inFiles])
args.append("-i")
args.append(in_files)
# Add arguments to store the output somewhere, if output is
# desired
if do_output and self.outFiles:
if len(self.outFiles) != 1:
raise ValueError, "only one output file is supported"
out_file = self.getTemporaryOutputFile(benchmark)
args.append("-o")
args.append(out_file.getPath())
# Ensure that a directory exists for the output
self.getTemporaryOutputDir(benchmark).touch()
args += self.parameters
return args
def __str__(self):
return "<BenchData '" + self.name + "'>"
def unpack_dataset_description(descr, parameters=[], input_files=[]):
"""Read information from the raw contents of a data set description
file. Optional 'parameters' and 'input_files' arguments may be
given, which will be retained unless overridden by the description
file."""
leftover = []
split_at_colon = re.compile(r"^\s*([a-zA-Z]+)\s*:(.*)$")
# Initialize these to default empty strings
parameter_text = None
input_file_text = None
# Scan the description line by line
for line in descr.split('\n'):
m = split_at_colon.match(line)
if m is None: continue
# This line appears to declare something that should be
# interpreted
keyword = m.group(1)
if keyword == "Parameters":
parameter_text = m.group(2)
elif keyword == "Inputs":
input_file_text = m.group(2)
# else, ignore the line
# Split the strings into (possibly) multiple arguments, discarding
# whitespace
if parameter_text is not None: parameters = parameter_text.split()
if input_file_text is not None: input_files = input_file_text.split()
return (parameters, input_files, descr)
def version_scanner():
"""version_scanner() -> (path -> pbf.Directory)
Return a function to find benchmark versions in the src
directory for the benchmark."""
return lambda x: pbf.scan_file(x, True, lambda y: pbf.Directory(y), ['.svn'])
def find_benchmarks():
"""Find benchmarks in the repository. The benchmarks are
identified, but their contents are not scanned immediately. A
dictionary is returned mapping benchmark names to futures
containing the benchmarks."""
if not globals.root:
raise ValueError, "root directory has not been set"
# Scan all benchmarks in the 'benchmarks' directory and
# lazily create benchmark objects.
db = {}
try:
globals.benchdir.scan()
globals.datadir.scan()
for bmkdir in globals.benchdir.getScannedChildren():
bmk = Future(lambda bmkdir=bmkdir: Benchmark.createFromName(bmkdir.getName()))
db[bmkdir.getName()] = bmk
except OSError, e:
sys.stdout.write("Benchmark directory not found!\n\n")
return {}
return db
def _desc_file(dpath):
"""_desc_file(dpath)
Returns a pbf.File for an optional description file in the directory dpath."""
return pbf.File(path.join(dpath,'DESCRIPTION'), False)
def benchmark_scanner():
"""benchmark_scanner -> (path -> pbf.Directory)
Returns a function which will scan a filename and create a pbf.Directory
for a benchmark represented by that name."""
def create_benchmark_dir(dpath):
expected = [pbf.Directory(path.join(dpath,'src'), [], version_scanner()),
pbf.Directory(path.join(dpath,'tools'),
[pbf.File(path.join(dpath,'compare-output'))]),
pbf.Directory(path.join(dpath,'build'), must_exist=False),
pbf.Directory(path.join(dpath,'run'), must_exist=False),
_desc_file(dpath)]
return pbf.Directory(dpath, expected)
return lambda x: pbf.scan_file(x, True, create_benchmark_dir,['_darcs','.svn'])
def dataset_scanner():
"""dataset_scanner -> (path -> pbf.Directory)
Returns a function which will scan a filename and create a pbf.Directory
for a folder containing datasets for the benchmark of the same name."""
def create_dataset_dir(dpath):
simple_scan = lambda x: pbf.scan_file(x)
expected = [pbf.Directory(path.join(dpath,'input'),
[_desc_file(path.join(dpath,'input'))], simple_scan),
pbf.Directory(path.join(dpath,'output'), [], simple_scan),
_desc_file(dpath)]
return pbf.Directory(dpath, expected)
return lambda x: pbf.scan_file(x, True, create_dataset_dir, ['.svn', '_darcs'])
def dataset_repo_scanner():
"""dataset_repo_scanner -> (path -> pbf.Directory)
Returns a function which will scan a filename and create a pbf.Directory
for a folder containing a dataset repository for parboil benchmarks."""
benchmark_dsets_scanner = lambda x: pbf.Directory(x, [], dataset_scanner())
return lambda x: pbf.scan_file(x, True, benchmark_dsets_scanner)
|
{
"content_hash": "3f1ba290335469afa404b6b717fc650d",
"timestamp": "",
"source": "github",
"line_count": 528,
"max_line_length": 154,
"avg_line_length": 36.640151515151516,
"alnum_prop": 0.6058616768324201,
"repo_name": "zhangfengthu/CoRunBench",
"id": "2436deca9757fbabee0302e7c6d36f353ef7f449",
"size": "19411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parboil/driver/benchmark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3274601"
},
{
"name": "C++",
"bytes": "956781"
},
{
"name": "Cuda",
"bytes": "256576"
},
{
"name": "Groff",
"bytes": "2788958"
},
{
"name": "Makefile",
"bytes": "141232"
},
{
"name": "Objective-C",
"bytes": "58855"
},
{
"name": "Python",
"bytes": "77402"
},
{
"name": "Shell",
"bytes": "77270"
}
],
"symlink_target": ""
}
|
import logging
import urlparse
import click
import termcolor
import mutagen.id3
import mutagen.mp3
import requests
import soundcloud
client = soundcloud.Client(client_id='<Your CLIENT_ID here>')
log = logging.getLogger('SoundDrizzle')
def _bold(text):
return termcolor.colored(text, attrs=['bold'])
@click.command()
@click.argument('track_url')
@click.argument('destination', required=False)
def pour(track_url, destination=None):
try:
track = resolve(track_url)
destination = destination or filename_for_track(track)
track_details = client.get(track.stream_url, allow_redirects=False)
click.echo('Resolved link to {} by {}'.format(_bold(track.title), _bold(track.user['username'])))
stream_url = track_details.location
download(stream_url, destination)
click.echo('Successfully downloaded track to {}'.format(_bold(destination)))
add_metadata(destination, track)
click.echo('Done! Enjoy listening offline!')
except Exception as e:
click.echo('Problem downloading track: {}'.format(e))
def resolve(track_url):
"""
Resolves the URL to an actual track from the SoundCloud API.
If the track resolves to more than one possible track, it takes the first search result.
:returns: The track dictionary from the SoundCloud API
"""
try:
path = urlparse.urlparse(track_url).path
tracks = client.get('/tracks', q=path)
if tracks:
return tracks[0]
else:
raise ValueError('Track not found for URL {}'.format(track_url))
except Exception as e:
raise ValueError('Error obtaining track by url: {}'.format(str(e)))
def filename_for_track(track):
"""
:return: A safe filename for the given track
"""
artist = track.user['permalink']
title = track.title
return '{}-{}.mp3'.format(artist, title).lower().replace(' ', '_').replace('/', '_')
def download(url, target_file, chunk_size=4096):
"""
Simple requests downloader
"""
r = requests.get(url, stream=True)
with open(target_file, 'w+') as out:
# And this is why I love Armin Ronacher:
with click.progressbar(r.iter_content(chunk_size=chunk_size),
int(r.headers['Content-Length'])/chunk_size,
label='Downloading...') as chunks:
for chunk in chunks:
out.write(chunk)
def add_metadata(track_file, track_data):
"""
Adds artist and title from the track data, and downloads the cover and embeds it in the MP3 tags.
"""
# This needs some exception handling!
# We don't always know what type the cover is!
mp3 = mutagen.mp3.MP3(track_file)
mp3['TPE1'] = mutagen.id3.TPE1(encoding=3, text=track_data.user['username'])
mp3['TIT2'] = mutagen.id3.TIT2(encoding=3, text=track_data.title)
cover_bytes = requests.get(track_data.artwork_url, stream=True).raw.read()
mp3.tags.add(mutagen.id3.APIC(encoding=3, mime='image/jpeg', type=3, desc='Front cover', data=cover_bytes))
mp3.save()
|
{
"content_hash": "bed43f4e7c4c5f6f0dd1cddcda3c9122",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 111,
"avg_line_length": 31.97938144329897,
"alnum_prop": 0.6470019342359767,
"repo_name": "wonderb0lt/sounddrizzle",
"id": "2429f2f18a9e007e69274928acace3919efdad3c",
"size": "3102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sounddrizzle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3917"
}
],
"symlink_target": ""
}
|
"""Utilities used in the Kadenze Academy Course on Deep Learning w/ Tensorflow.
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Parag K. Mital
Copyright Parag K. Mital, June 2016.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import urllib
import numpy as np
import zipfile
import os
from scipy.io import wavfile
def download(path):
"""Use urllib to download a file.
Parameters
----------
path : str
Url to download
Returns
-------
path : str
Location of downloaded file.
"""
import os
from six.moves import urllib
fname = path.split('/')[-1]
if os.path.exists(fname):
return fname
print('Downloading ' + path)
def progress(count, block_size, total_size):
if count % 20 == 0:
print('Downloaded %02.02f/%02.02f MB' % (
count * block_size / 1024.0 / 1024.0,
total_size / 1024.0 / 1024.0), end='\r')
filepath, _ = urllib.request.urlretrieve(
path, filename=fname, reporthook=progress)
return filepath
def download_and_extract_tar(path, dst):
"""Download and extract a tar file.
Parameters
----------
path : str
Url to tar file to download.
dst : str
Location to save tar file contents.
"""
import tarfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
tarfile.open(filepath, 'r:gz').extractall(dst)
def download_and_extract_zip(path, dst):
"""Download and extract a zip file.
Parameters
----------
path : str
Url to zip file to download.
dst : str
Location to save zip file contents.
"""
import zipfile
filepath = download(path)
if not os.path.exists(dst):
os.makedirs(dst)
zf = zipfile.ZipFile(file=filepath)
zf.extractall(dst)
def load_audio(filename, b_normalize=True):
"""Load the audiofile at the provided filename using scipy.io.wavfile.
Optionally normalizes the audio to the maximum value.
Parameters
----------
filename : str
File to load.
b_normalize : bool, optional
Normalize to the maximum value.
"""
sr, s = wavfile.read(filename)
if b_normalize:
s = s.astype(np.float32)
s = (s / np.max(np.abs(s)))
s -= np.mean(s)
return s
def corrupt(x):
"""Take an input tensor and add uniform masking.
Parameters
----------
x : Tensor/Placeholder
Input to corrupt.
Returns
-------
x_corrupted : Tensor
50 pct of values corrupted.
"""
return tf.multiply(x, tf.cast(tf.random_uniform(shape=tf.shape(x),
minval=0,
maxval=2,
dtype=tf.int32), tf.float32))
def interp(l, r, n_samples):
"""Intepolate between the arrays l and r, n_samples times.
Parameters
----------
l : np.ndarray
Left edge
r : np.ndarray
Right edge
n_samples : int
Number of samples
Returns
-------
arr : np.ndarray
Inteporalted array
"""
return np.array([
l + step_i / (n_samples - 1) * (r - l)
for step_i in range(n_samples)])
def make_latent_manifold(corners, n_samples):
"""Create a 2d manifold out of the provided corners: n_samples * n_samples.
Parameters
----------
corners : list of np.ndarray
The four corners to intepolate.
n_samples : int
Number of samples to use in interpolation.
Returns
-------
arr : np.ndarray
Stacked array of all 2D interpolated samples
"""
left = interp(corners[0], corners[1], n_samples)
right = interp(corners[2], corners[3], n_samples)
embedding = []
for row_i in range(n_samples):
embedding.append(interp(left[row_i], right[row_i], n_samples))
return np.vstack(embedding)
def imcrop_tosquare(img):
"""Make any image a square image.
Parameters
----------
img : np.ndarray
Input image to crop, assumed at least 2d.
Returns
-------
crop : np.ndarray
Cropped image.
"""
size = np.min(img.shape[:2])
extra = img.shape[:2] - size
crop = img
for i in np.flatnonzero(extra):
crop = np.take(crop, extra[i] // 2 + np.r_[:size], axis=i)
return crop
def slice_montage(montage, img_h, img_w, n_imgs):
"""Slice a montage image into n_img h x w images.
Performs the opposite of the montage function. Takes a montage image and
slices it back into a N x H x W x C image.
Parameters
----------
montage : np.ndarray
Montage image to slice.
img_h : int
Height of sliced image
img_w : int
Width of sliced image
n_imgs : int
Number of images to slice
Returns
-------
sliced : np.ndarray
Sliced images as 4d array.
"""
sliced_ds = []
for i in range(int(np.sqrt(n_imgs))):
for j in range(int(np.sqrt(n_imgs))):
sliced_ds.append(montage[
1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w])
return np.array(sliced_ds)
def montage(images, saveto='montage.png'):
"""Draw all images as a montage separated by 1 pixel borders.
Also saves the file to the destination specified by `saveto`.
Parameters
----------
images : numpy.ndarray
Input array to create montage of. Array should be:
batch x height x width x channels.
saveto : str
Location to save the resulting montage image.
Returns
-------
m : numpy.ndarray
Montage image.
"""
if isinstance(images, list):
images = np.array(images)
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
if len(images.shape) == 4 and images.shape[3] == 3:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5
else:
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter]
m[1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w] = this_img
plt.imsave(arr=m, fname=saveto)
return m
def montage_filters(W):
"""Draws all filters (n_input * n_output filters) as a
montage image separated by 1 pixel borders.
Parameters
----------
W : Tensor
Input tensor to create montage of.
Returns
-------
m : numpy.ndarray
Montage image.
"""
W = np.reshape(W, [W.shape[0], W.shape[1], 1, W.shape[2] * W.shape[3]])
n_plots = int(np.ceil(np.sqrt(W.shape[-1])))
m = np.ones(
(W.shape[0] * n_plots + n_plots + 1,
W.shape[1] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < W.shape[-1]:
m[1 + i + i * W.shape[0]:1 + i + (i + 1) * W.shape[0],
1 + j + j * W.shape[1]:1 + j + (j + 1) * W.shape[1]] = (
np.squeeze(W[:, :, :, this_filter]))
return m
def get_celeb_files(dst='img_align_celeba', max_images=100):
"""Download the first 100 images of the celeb dataset.
Files will be placed in a directory 'img_align_celeba' if one
doesn't exist.
Returns
-------
files : list of strings
Locations to the first 100 images of the celeb net dataset.
"""
# Create a directory
if not os.path.exists(dst):
os.mkdir(dst)
# Now perform the following 100 times:
for img_i in range(1, max_images + 1):
# create a string using the current loop counter
f = '000%03d.jpg' % img_i
if not os.path.exists(os.path.join(dst, f)):
# and get the url with that string appended the end
url = 'https://s3.amazonaws.com/cadl/celeb-align/' + f
# We'll print this out to the console so we can see how far we've gone
print(url, end='\r')
# And now download the url to a location inside our new directory
urllib.request.urlretrieve(url, os.path.join(dst, f))
files = [os.path.join(dst, file_i)
for file_i in os.listdir(dst)
if '.jpg' in file_i][:max_images]
return files
def get_celeb_imgs(max_images=100):
"""Load the first `max_images` images of the celeb dataset.
Returns
-------
imgs : list of np.ndarray
List of the first 100 images from the celeb dataset
"""
return [plt.imread(f_i) for f_i in get_celeb_files(max_images=max_images)]
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gauss2d(mean, stddev, ksize):
"""Use Tensorflow to compute a 2D Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed 2D Gaussian Kernel using Tensorflow.
"""
z = gauss(mean, stddev, ksize)
g = tf.Graph()
with tf.Session(graph=g):
z_2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
return z_2d.eval()
def convolve(img, kernel):
"""Use Tensorflow to convolve a 4D image with a 4D kernel.
Parameters
----------
img : np.ndarray
4-dimensional image shaped N x H x W x C
kernel : np.ndarray
4-dimensional image shape K_H, K_W, C_I, C_O corresponding to the
kernel's height and width, the number of input channels, and the
number of output channels. Note that C_I should = C.
Returns
-------
result : np.ndarray
Convolved result.
"""
g = tf.Graph()
with tf.Session(graph=g):
convolved = tf.nn.conv2d(img, kernel, strides=[1, 1, 1, 1], padding='SAME')
res = convolved.eval()
return res
def gabor(ksize=32):
"""Use Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.multiply(wave, z_2d)
return gabor.eval()
def build_submission(filename, file_list, optional_file_list=()):
"""Helper utility to check homework assignment submissions and package them.
Parameters
----------
filename : str
Output zip file name
file_list : tuple
Tuple of files to include
"""
# check each file exists
for part_i, file_i in enumerate(file_list):
if not os.path.exists(file_i):
print('\nYou are missing the file {}. '.format(file_i) +
'It does not look like you have completed Part {}.'.format(
part_i + 1))
def zipdir(path, zf):
for root, dirs, files in os.walk(path):
for file in files:
# make sure the files are part of the necessary file list
if file.endswith(file_list) or file.endswith(optional_file_list):
zf.write(os.path.join(root, file))
# create a zip file with the necessary files
zipf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
zipdir('.', zipf)
zipf.close()
print('Your assignment zip file has been created!')
print('Now submit the file:\n{}\nto Kadenze for grading!'.format(
os.path.abspath(filename)))
def normalize(a, s=0.1):
'''Normalize the image range for visualization'''
return np.uint8(np.clip(
(a - a.mean()) / max(a.std(), 1e-4) * s + 0.5,
0, 1) * 255)
# %%
def weight_variable(shape, **kwargs):
'''Helper function to create a weight variable initialized with
a normal distribution
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.stack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
# %%
def bias_variable(shape, **kwargs):
'''Helper function to create a bias variable initialized with
a constant value.
Parameters
----------
shape : list
Size of weight variable
'''
if isinstance(shape, list):
initial = tf.random_normal(tf.stack(shape), mean=0.0, stddev=0.01)
initial.set_shape(shape)
else:
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial, **kwargs)
def binary_cross_entropy(z, x, name=None):
"""Binary Cross Entropy measures cross entropy of a binary variable.
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Parameters
----------
z : tf.Tensor
A `Tensor` of the same type and shape as `x`.
x : tf.Tensor
A `Tensor` of type `float32` or `float64`.
"""
with tf.variable_scope(name or 'bce'):
eps = 1e-12
return (-(x * tf.log(z + eps) +
(1. - x) * tf.log(1. - z + eps)))
def conv2d(x, n_output,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='conv2d', reuse=None):
"""Helper for creating a 2d convolution operation.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of convolution
"""
with tf.variable_scope(name or 'conv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, x.get_shape()[-1], n_output],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d(
name='conv',
input=x,
filter=W,
strides=[1, d_h, d_w, 1],
padding=padding)
b = tf.get_variable(
name='b',
shape=[n_output],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=conv,
bias=b)
return h, W
def deconv2d(x, n_output_h, n_output_w, n_output_ch, n_input_ch=None,
k_h=5, k_w=5, d_h=2, d_w=2,
padding='SAME', name='deconv2d', reuse=None):
"""Deconvolution helper.
Parameters
----------
x : tf.Tensor
Input tensor to convolve.
n_output_h : int
Height of output
n_output_w : int
Width of output
n_output_ch : int
Number of filters.
k_h : int, optional
Kernel height
k_w : int, optional
Kernel width
d_h : int, optional
Height stride
d_w : int, optional
Width stride
padding : str, optional
Padding type: "SAME" or "VALID"
name : str, optional
Variable scope
Returns
-------
op : tf.Tensor
Output of deconvolution
"""
with tf.variable_scope(name or 'deconv2d', reuse=reuse):
W = tf.get_variable(
name='W',
shape=[k_h, k_w, n_output_ch, n_input_ch or x.get_shape()[-1]],
initializer=tf.contrib.layers.xavier_initializer_conv2d())
conv = tf.nn.conv2d_transpose(
name='conv_t',
value=x,
filter=W,
output_shape=tf.stack(
[tf.shape(x)[0], n_output_h, n_output_w, n_output_ch]),
strides=[1, d_h, d_w, 1],
padding=padding)
conv.set_shape([None, n_output_h, n_output_w, n_output_ch])
b = tf.get_variable(
name='b',
shape=[n_output_ch],
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(name='h', value=conv, bias=b)
return h, W
def lrelu(features, leak=0.2):
"""Leaky rectifier.
Parameters
----------
features : tf.Tensor
Input to apply leaky rectifier to.
leak : float, optional
Percentage of leak.
Returns
-------
op : tf.Tensor
Resulting output of applying leaky rectifier activation.
"""
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * features + f2 * abs(features)
def linear(x, n_output, name=None, activation=None, reuse=None):
"""Fully connected layer.
Parameters
----------
x : tf.Tensor
Input tensor to connect
n_output : int
Number of output neurons
name : None, optional
Scope to apply
Returns
-------
h, W : tf.Tensor, tf.Tensor
Output of fully connected layer and the weight matrix
"""
if len(x.get_shape()) != 2:
x = flatten(x, reuse=reuse)
n_input = x.get_shape().as_list()[1]
with tf.variable_scope(name or "fc", reuse=reuse):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
initializer=tf.constant_initializer(0.0))
h = tf.nn.bias_add(
name='h',
value=tf.matmul(x, W),
bias=b)
if activation:
h = activation(h)
return h, W
def flatten(x, name=None, reuse=None):
"""Flatten Tensor to 2-dimensions.
Parameters
----------
x : tf.Tensor
Input tensor to flatten.
name : None, optional
Variable scope for flatten operations
Returns
-------
flattened : tf.Tensor
Flattened tensor.
"""
with tf.variable_scope('flatten'):
dims = x.get_shape().as_list()
if len(dims) == 4:
flattened = tf.reshape(
x,
shape=[-1, dims[1] * dims[2] * dims[3]])
elif len(dims) == 2 or len(dims) == 1:
flattened = x
else:
raise ValueError('Expected n dimensions of 1, 2 or 4. Found:',
len(dims))
return flattened
def to_tensor(x):
"""Convert 2 dim Tensor to a 4 dim Tensor ready for convolution.
Performs the opposite of flatten(x). If the tensor is already 4-D, this
returns the same as the input, leaving it unchanged.
Parameters
----------
x : tf.Tesnor
Input 2-D tensor. If 4-D already, left unchanged.
Returns
-------
x : tf.Tensor
4-D representation of the input.
Raises
------
ValueError
If the tensor is not 2D or already 4D.
"""
if len(x.get_shape()) == 2:
n_input = x.get_shape().as_list()[1]
x_dim = np.sqrt(n_input)
if x_dim == int(x_dim):
x_dim = int(x_dim)
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 1], name='reshape')
elif np.sqrt(n_input / 3) == int(np.sqrt(n_input / 3)):
x_dim = int(np.sqrt(n_input / 3))
x_tensor = tf.reshape(
x, [-1, x_dim, x_dim, 3], name='reshape')
else:
x_tensor = tf.reshape(
x, [-1, 1, 1, n_input], name='reshape')
elif len(x.get_shape()) == 4:
x_tensor = x
else:
raise ValueError('Unsupported input dimensions')
return x_tensor
|
{
"content_hash": "814f446bb937935399cb1042dcf69662",
"timestamp": "",
"source": "github",
"line_count": 782,
"max_line_length": 83,
"avg_line_length": 26.888746803069054,
"alnum_prop": 0.5469634279735578,
"repo_name": "niazangels/CADL",
"id": "f6efedd2a426da7b409bf88327231b6192e0d8c4",
"size": "21027",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "session-5/libs/utils.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "70654230"
},
{
"name": "Python",
"bytes": "365894"
},
{
"name": "Shell",
"bytes": "429"
}
],
"symlink_target": ""
}
|
import time
import gc
import sys
import mxnet as mx
from mxnet.gluon import nn
from mxnet.contrib import quantization
#shape, num_hidden:
sizes = [
(( 1, 224), 512),
(( 1, 224), 4096),
(( 16, 1024), 1024),
(( 32, 4096), 1024),
(( 32, 4096), 4096),
((512, 512), 4096)]
rounds = 1000
warmup = 10
test_header = "--no_test_header" not in sys.argv
table_header = "--no_table_header" not in sys.argv
table_left_colums = "--no_size_column" not in sys.argv
dump_graph = "--dump_graph" in sys.argv
def dump_graph_fn(net, postfix):
if dump_graph:
net.export("/tmp/fc_add_" + postfix)
def operator_string(elemwise_add):
return 'elemwise_add' if elemwise_add else 'npi_add'
def print_header(header):
print("\n")
print(header if test_header else "", "\n")
if table_header:
if table_left_colums:
print("| Shape | Hidden | Mean [ms] |" )
print("|------------:|-------:|----------:|" )
else:
print(" Mean [ms] |" )
print("----------:|" )
def print_value(shape, hidden, mean):
if table_left_colums:
print("| ({:4},{:4}) | {:6} | {:9.3f} |".format(shape[0], shape[1], hidden, mean))
else:
print(" {:9.3f} |".format(mean))
def measure(net, data0, data1, data2, shape, nhid):
mx.nd.waitall()
gc.collect()
gc.disable()
for i in range(rounds + warmup):
if i == warmup:
start_time = time.time()
o = net(data0, data1, data2)
o.wait_to_read()
end_time = time.time()
run_time = (end_time - start_time)
print_value(shape, nhid, 1000 * run_time / rounds)
gc.enable()
class FCWithSum(nn.HybridBlock):
def __init__(self, num_in, num_hidden, elemwise_add, **kwargs):
super(FCWithSum, self).__init__(**kwargs)
self.fc0 = nn.Dense(units=num_hidden, in_units=num_in)
self.fc1 = nn.Dense(units=num_hidden)
self.elemwise_add = elemwise_add
def forward(self, data0, data1, data2):
_fc0 = self.fc0(data0)
_fc1 = self.fc1(data1)
if self.elemwise_add:
_sum0 = mx.nd.elemwise_add(data2.as_nd_ndarray(), _fc0.as_nd_ndarray()).as_np_ndarray()
_sum1 = mx.nd.elemwise_add(_fc1.as_nd_ndarray(), _sum0.as_nd_ndarray()).as_np_ndarray()
else:
_sum0 = data2 + _fc0
_sum1 = _fc1 + _sum0
return _sum1
def benchmark_float(elemwise_add, broadcast=False):
header = operator_string(elemwise_add) + ', float' + (' , broadcast' if broadcast else "")
print_header(header)
for shape, nhid in sizes:
net = FCWithSum(shape[1], nhid, elemwise_add)
net.initialize()
net.hybridize(static_alloc=True, static_shape=True)
data0 = mx.np.random.uniform(size=shape, low=-1.0, high=1.0)
data1 = mx.np.random.uniform(size=shape, low=-1.0, high=1.0)
shape2 = (shape[0], nhid)
if broadcast and not elemwise_add:
# broadcast is allowed only for npi_add version
shape2 = (1, 1)
data2 = mx.np.random.uniform(size=shape2, low=-1.0, high=1.0)
net.optimize_for(data0, data1, data2, backend='ONEDNN')
measure(net, data0, data1, data2, shape, nhid)
dump_graph_fn(net, operator_string(elemwise_add) + '_float')
class CalibIter(mx.io.DataIter):
def __init__(self, batch, data_shape, batch_size):
super(CalibIter, self).__init__(batch_size)
self.label_shape = (batch_size,)
self.data_shape = data_shape
if isinstance(data_shape, tuple):
self.provide_data = [('data', data_shape)]
else:
self.provide_data = data_shape
self.provide_label = []
self.batch = batch
def __iter__(self):
yield self.batch
def benchmark_int8(quantize_mode, quantize_granularity, elemwise_add, broadcast = False):
header = operator_string(elemwise_add) + ', mode = ' + quantize_mode + \
', granularity = ' + quantize_granularity + (' , broadcast' if broadcast else "")
print_header(header)
for shape, nhid in sizes:
net = FCWithSum(shape[1], nhid, elemwise_add)
net.initialize()
net.hybridize(static_alloc=True, static_shape=True)
data0 = mx.np.random.uniform(size=shape, low=-1.0, high=1.0)
data1 = mx.np.random.uniform(size=shape, low=-1.0, high=1.0)
shape2 = (shape[0], nhid)
if broadcast and not elemwise_add:
# broadcast is allowed only for npi_add
shape2 = (shape[0], 1)
data2 = mx.np.random.uniform(size=shape2, low=-1.0, high=1.0)
data = mx.gluon.data.ArrayDataset(data0, data1, data2)
calib_data = mx.gluon.data.DataLoader(data, batch_size=1)
net = quantization.quantize_net(net,
device=mx.cpu(),
exclude_layers=None,
exclude_operators=None,
calib_mode='naive',
calib_data=calib_data,
num_calib_batches=1,
quantize_mode=quantize_mode,
quantize_granularity=quantize_granularity
)
net.hybridize(static_alloc=True, static_shape=True)
measure(net, data0, data1, data2, shape, nhid)
dump_graph_fn(net, operator_string(elemwise_add) + \
'_' + str(quantize_mode) + '_' + str(quantize_granularity))
for elemwise_add in [True, False]:
benchmark_float(elemwise_add)
for quantize_mode in ['smart', 'full']:
for quantize_granularity in ['tensor-wise', 'channel-wise']:
for elemwise_add in [True, False]:
benchmark_int8(quantize_mode, quantize_granularity, elemwise_add)
# Benchmark FC + npi_add with broadcasted input
benchmark_float(False, True)
# Benchmark quantized FC + npi_add with broadcasted input
for quantize_mode in ['smart', 'full']:
for quantize_granularity in ['tensor-wise', 'channel-wise']:
benchmark_int8(quantize_mode, quantize_granularity, False, True)
|
{
"content_hash": "ab7a0dd55870ea0542d499cbce82b656",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 99,
"avg_line_length": 38.608695652173914,
"alnum_prop": 0.5701415701415702,
"repo_name": "DickJC123/mxnet",
"id": "6cf2f929ecedd9b9f4f0a6fe3ed5d989542e3ea4",
"size": "7002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmark/python/dnnl/fc_add.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151356"
},
{
"name": "C++",
"bytes": "12029257"
},
{
"name": "CMake",
"bytes": "213440"
},
{
"name": "Cuda",
"bytes": "1528224"
},
{
"name": "Cython",
"bytes": "26285"
},
{
"name": "Dockerfile",
"bytes": "54893"
},
{
"name": "Groovy",
"bytes": "132682"
},
{
"name": "Jupyter Notebook",
"bytes": "1889643"
},
{
"name": "Makefile",
"bytes": "8991"
},
{
"name": "PowerShell",
"bytes": "6699"
},
{
"name": "Python",
"bytes": "8615578"
},
{
"name": "Shell",
"bytes": "172547"
}
],
"symlink_target": ""
}
|
"""Functional tests for common/users.py."""
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
import webapp2
from common import users
from tests.functional import actions
from google.appengine.api import users as gae_users
class TestHandler(webapp2.RequestHandler):
def get(self):
logging.warning('In get')
class TestRequestContext(webapp2.RequestContext):
def __enter__(self):
logging.warning('In __enter__')
return super(TestRequestContext, self).__enter__()
def __exit__(self, exc_type, exc_value, traceback):
logging.warning('In __exit__')
return super(TestRequestContext, self).__exit__(
exc_type, exc_value, traceback)
class TestService(users.AppEnginePassthroughUsersService):
@classmethod
def get_request_context_class(cls):
return TestRequestContext
class TestBase(actions.TestBase):
def setUp(self):
super(TestBase, self).setUp()
self.old_users_service = users.UsersServiceManager.get()
def tearDown(self):
users.UsersServiceManager.set(self.old_users_service)
super(TestBase, self).tearDown()
class AppEnginePassthroughUsersServiceTest(TestBase):
def setUp(self):
super(AppEnginePassthroughUsersServiceTest, self).setUp()
self.destination_url = 'http://destination'
self.email = 'user@example.com'
users.UsersServiceManager.set(
users.AppEnginePassthroughUsersService)
def assert_service_results_equal_and_not_none(
self, users_result, gae_users_result):
self.assertIsNotNone(users_result)
self.assertIsNotNone(gae_users_result)
self.assertEqual(users_result, gae_users_result)
def test_create_login_url_delegates_to_gae_users_service(self):
users_result = users.create_login_url(
dest_url=self.destination_url, _auth_domain='is_ignored',
federated_identity='federated_identity')
gae_users_result = gae_users.create_login_url(
dest_url=self.destination_url, _auth_domain='is_ignored',
federated_identity='federated_identity')
self.assert_service_results_equal_and_not_none(
users_result, gae_users_result)
def test_create_logout_url_delegates_to_gae_users_service(self):
users_result = users.create_logout_url('destination')
gae_users_result = gae_users.create_logout_url('destination')
self.assert_service_results_equal_and_not_none(
users_result, gae_users_result)
def test_federated_email_resolver_returns_none(self):
service = users.UsersServiceManager.get()
self.assertIsNone(
service.get_federated_email_resolver_class().get('any_user_id'))
def test_get_current_user_delegates_to_gae_users_service(self):
actions.login(self.email)
users_result = users.get_current_user()
gae_users_result = gae_users.get_current_user()
self.assert_service_results_equal_and_not_none(
users_result, gae_users_result)
def test_get_email_update_policy_class_returns_noop_impl(self):
service = users.UsersServiceManager.get()
email_update_policy = service.get_email_update_policy_class()
self.assertIsNone(email_update_policy.apply('unused'))
self.assertIs(users.EmailUpdatePolicy, email_update_policy)
def test_get_federated_email_resolver_class_returns_noop_impl(self):
service = users.UsersServiceManager.get()
email_resolver = service.get_federated_email_resolver_class()
self.assertIsNone(email_resolver.get('unused'))
self.assertIs(users.FederatedEmailResolver, email_resolver)
def test_get_mailer_returns_noop_impl(self):
service = users.UsersServiceManager.get()
mailer = service.get_mailer_class()
self.assertEquals((None, None), mailer.send_async('unused', 'unused'))
self.assertIs(users.Mailer, mailer)
def test_get_template_resolver_class_returns_noop_impl(self):
service = users.UsersServiceManager.get()
template_resolver = service.get_template_resolver_class()
self.assertIsNone(
template_resolver.get('unused', unused_locale='unused'))
self.assertEquals(
(None, None, None),
template_resolver.get_email_templates(
'unused', unused_locale='unused'))
def test_get_service_name(self):
self.assertEqual(
'common.users.AppEnginePassthroughUsersService',
users.AppEnginePassthroughUsersService.get_service_name())
def test_is_current_user_admin_delegates_to_gae_users_service(self):
actions.login(self.email, is_admin=True)
users_result = users.is_current_user_admin()
gae_users_result = users.is_current_user_admin()
self.assertTrue(users_result)
self.assertTrue(gae_users_result)
class AuthInterceptorAndRequestHooksTest(TestBase):
LOG_LEVEL = logging.WARNING
def getApp(self):
return users.AuthInterceptorWSGIApplication([('/', TestHandler)])
def setUp(self):
super(AuthInterceptorAndRequestHooksTest, self).setUp()
users.UsersServiceManager.set(TestService)
def test_request_context_hooks_bracket_request_methods(self):
self.testapp.get('/')
self.assertLogContains(
'WARNING: In __enter__\n'
'WARNING: In get\n'
'WARNING: In __exit__\n')
def test_requests_raise_descriptive_exception_if_users_service_unset(self):
users.UsersServiceManager.set(None)
with self.assertRaisesRegexp(Exception, 'Users service not set.'):
response = self.testapp.get('/')
class PublicExceptionsAndClassesIdentityTests(TestBase):
def assert_all_is(self, expected_list, actual):
for expected in expected_list:
self.assertIs(expected, actual)
def test_users_classes_are_app_engine_users_classes(self):
self.assert_all_is([users.User, users._User], gae_users.User)
def test_users_exceptions_are_app_engine_users_exceptions(self):
self.assert_all_is([users.Error, users._Error], gae_users.Error)
self.assert_all_is(
[users.NotAllowedError, users._NotAllowedError],
gae_users.NotAllowedError)
self.assert_all_is(
[users.RedirectTooLongError, users._RedirectTooLongError],
gae_users.RedirectTooLongError)
self.assert_all_is(
[users.UserNotFoundError, users._UserNotFoundError],
gae_users.UserNotFoundError)
|
{
"content_hash": "d5f459efeabe662932bd744b9666203e",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 79,
"avg_line_length": 34.675392670157066,
"alnum_prop": 0.6717499622527555,
"repo_name": "wijnandb/CodeCult-Scratch",
"id": "a2f5de029543e9f6aa6fbff5c7df79e1d74c929f",
"size": "7221",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/functional/common_users.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "84710"
},
{
"name": "HTML",
"bytes": "278836"
},
{
"name": "JavaScript",
"bytes": "545967"
},
{
"name": "Python",
"bytes": "4349997"
},
{
"name": "Shell",
"bytes": "29051"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/corellia/shared_corl_imprv_wall_4x16_s01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "ed9f2139d0567717c48363860db3db47",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 24.46153846153846,
"alnum_prop": 0.6981132075471698,
"repo_name": "anhstudios/swganh",
"id": "b5f325df9132e8457168299584077e450ba82f06",
"size": "463",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/static/structure/corellia/shared_corl_imprv_wall_4x16_s01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
import random as rd
import rospy
from std_msgs.msg import *
if __name__ == '__main__':
rospy.init_node('std_msgs')
pub_bool = rospy.Publisher('Bool', Bool, queue_size=10)
pub_colorrgba = rospy.Publisher('ColorRGBA', ColorRGBA, queue_size=10)
pub_duration = rospy.Publisher('Duration', Duration, queue_size=10)
pub_empty = rospy.Publisher('Empty', Empty, queue_size=10)
pub_float32 = rospy.Publisher('Float32', Float32, queue_size=10)
pub_float64 = rospy.Publisher('Float64', Float64, queue_size=10)
pub_int16 = rospy.Publisher('Int16', Int16, queue_size=10)
pub_int32 = rospy.Publisher('Int32', Int32, queue_size=10)
pub_int64 = rospy.Publisher('Int64', Int64, queue_size=10)
pub_int8 = rospy.Publisher('Int8', Int8, queue_size=10)
pub_string = rospy.Publisher('String', String, queue_size=10)
pub_time = rospy.Publisher('Time', Time, queue_size=10)
pub_uint16 = rospy.Publisher('UInt16', UInt16, queue_size=10)
pub_uint32 = rospy.Publisher('UInt32', UInt32, queue_size=10)
pub_uint64 = rospy.Publisher('UInt64', UInt64, queue_size=10)
pub_uint8 = rospy.Publisher('UInt8', UInt8, queue_size=10)
r = rospy.Rate(2) # [Hz]
while not rospy.is_shutdown():
pub_bool.publish(Bool(data=rd.choice([True, False])))
pub_colorrgba.publish(ColorRGBA(r=255, g=255, b=255, a=0))
pub_empty.publish(Empty())
pub_float32.publish(Float32(data=rd.uniform(-10.0, 10.0)))
pub_float64.publish(Float64(data=rd.uniform(-10.0, 10.0)))
pub_string.publish(String(data=rd.choice(['hello', 'world'])))
pub_uint16.publish(UInt16(data=rd.choice([0, 1, 2])))
pub_uint32.publish(UInt32(data=rd.choice([0, 1, 2])))
pub_uint64.publish(UInt64(data=rd.choice([0, 1, 2])))
pub_uint8.publish(UInt8(data=rd.choice([0, 1, 2])))
r.sleep()
|
{
"content_hash": "7ecaa5800bd91d74d32700cd99b916de",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 44.625,
"alnum_prop": 0.6834733893557423,
"repo_name": "gaug-cns/ros-control-center",
"id": "aa09b4495b6ea9e4c6d8dd272ccc8f1174b31922",
"size": "1804",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ros_test_project/scripts/node_std_msgs.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "6213"
},
{
"name": "CSS",
"bytes": "790"
},
{
"name": "HTML",
"bytes": "11909"
},
{
"name": "Python",
"bytes": "6241"
},
{
"name": "TypeScript",
"bytes": "21191"
}
],
"symlink_target": ""
}
|
"This module provides various low-level inter-Actor transport implementations."
from datetime import timedelta
from thespian.system.timing import ExpirationTimer
from thespian.system.utilis import thesplog
import logging
DEFAULT_MAX_TRANSMIT_PERIOD = timedelta(minutes=5)
TRANSMIT_RETRY_PERIOD = timedelta(seconds=35)
MAX_TRANSMIT_RETRIES = 20
MAX_SHOWLEN = 150
MAX_BACKOFF_DELAY = timedelta(seconds=7, milliseconds=329)
MIN_BACKOFF_DELAY = timedelta(milliseconds=20)
BACKOFF_FACTOR = 1.7
class Thespian__UpdateWork(object):
"""Returned from the transmit run() method to cause the transmit send
to be called with this same object. This object is not
actually transmitted, but this send causes the transmit queues
to be checked in the context of the main thread) that has a
chance of seeing alternative work (like a signal-driven exit
request).
"""
pass
class Thespian__Run__Result(object):
"""Base class for values returned from the transport run() method. In
general, a truthy value means continue and a false-ish value
(the default) means halt.
"""
def __nonzero__(self): return False
def __bool__(self): return False
class Thespian__Run_Expired(Thespian__Run__Result):
"""Returned from the transport run() method if the run time has expired."""
pass
class Thespian__Run_Terminated(Thespian__Run__Result):
"""Returned from the transport run() method if the transport has been
shutdown and terminated and is no longer functional."""
pass
class Thespian__Run_Errored(Thespian__Run_Terminated):
"""Returned from the transport run() method if an internal error has
occurred. Usually terminal"""
def __init__(self, err):
self.error = err
class Thespian__Run_HandlerResult(Thespian__Run__Result):
"""Returned handler result (false-ish). Individual handlers should
return a simple value that the transport's run method wraps in
this object.
"""
def __init__(self, val):
self.return_value = val
def __nonzero__(self): return self.return_value != 0
def __bool__(self): return bool(self.return_value)
# ----------------------------------------------------------------------
class TransportInit__Base(object): pass
class ExternalInterfaceTransportInit(TransportInit__Base):
"""Used as first argument to Transport __init__ to indicate that this
is an external process interfacing to the ActorSystem via the
Transport.
"""
pass
class TransmitOnly(object):
"""Passed *as a class* to transport.run as the "handler" to indicate
that no incoming message processing should occur and as soon as
a transmit completes, returning the number of remaining
transmits queued in the transport layer.
If there are no queued transmits in the transport layer, the run()
call returns immediately with a value of 0.
Note that the transport layer may handle multiple transmits in
parallel; calling run() with this argument may allow several
transmits to progress through transmit stages---possibly even
to completion. The run() return indicates only that a single
transmit has completed and should be called soon if there are
still transmits pending to complete their transmit progress.
Also note that the timeout argument to the run() method can
cause it to return without actually completing any transmits.
"""
pass
# ----------------------------------------------------------------------
class ReceiveEnvelope(Thespian__Run__Result):
"Represents the message received along with the sender's address"
def __init__(self, sender, msg):
self._sender = sender
self._message = msg
@property
def sender(self): return self._sender
@property
def message(self): return self._message
def identify(self):
smsg = str(self.message)
if len(smsg) > MAX_SHOWLEN:
smsg = smsg[:MAX_SHOWLEN] + '...'
msgt = str(type(self.message))
if smsg == msgt:
return 'ReceiveEnvelope(from: %s, msg: %s)'%(self.sender, smsg)
return 'ReceiveEnvelope(from: %s, %s msg: %s)'%(self.sender, msgt, smsg)
def __str__(self): return self.identify()
# As a Thespian__Run__Result, this is false-ish because the caller
# supplied no receive handler, so the run should stop looping and
# return this value to the caller.
def __nonzero__(self): return False
def __bool__(self): return False
# ----------------------------------------------------------------------
class ResultCallback(object):
def __init__(self, onSuccess=None, onFailure=None, nextCallback=None):
self._successTo = onSuccess
self._failureTo = onFailure
self._thenTo = nextCallback
self._called = False
def resultCallback(self, withResult, withValue):
"""This is called by the transport to perform the success or failure
callback operation. Exceptions are swallowed and do not
escape. All callbacks in the chain are called in sequence.
"""
if not self._called:
self._called = True
try:
((self._successTo
if withResult else
self._failureTo) or (lambda r, m: None))(withResult, withValue)
except Exception as ex:
thesplog('Exception in callback: %s', ex, exc_info=True, level=logging.ERROR)
# Ensure additional callbacks are still called even if a callback gets an exceptions.
if self._thenTo:
self._thenTo.resultCallback(withResult, withValue)
# ----------------------------------------------------------------------
def backoffDelay(curDelay=0):
adjtime = curDelay or MIN_BACKOFF_DELAY
if not isinstance(adjtime, timedelta): adjtime = timedelta(seconds=adjtime)
return min(MAX_BACKOFF_DELAY,
timedelta(days = adjtime.days * BACKOFF_FACTOR,
seconds = (adjtime.seconds * BACKOFF_FACTOR),
microseconds = (adjtime.microseconds * BACKOFF_FACTOR)))
class PauseWithBackoff(object):
def backoffPause(self, startPausing=False):
if startPausing:
self._lastPauseLength = backoffDelay(getattr(self, '_lastPauseLength', 0))
self._pauseUntil = ExpirationTimer(self._lastPauseLength)
return self._lastPauseLength
elif hasattr(self, '_pauseUntil'):
if not self._pauseUntil.expired():
return self._pauseUntil.remaining()
delattr(self, '_pauseUntil')
return timedelta(0)
# ----------------------------------------------------------------------
class TransmitIntent(PauseWithBackoff):
"""An individual transmission of data can be encapsulated by a
"transmit intent", which identifies the message and the target
address, and which has a callback for eventual success or
failure indication. Transmit intents may be chained together
to represent a series of outbound transmits. Adding a transmit
intent to the chain may block when the chain reaches an upper
threshold, and remain blocked until enough transmits have
occured (successful or failed) to reduce the size of the chain
below a minimum threshold. This acts to implement server-side
flow control in the system as a whole (although it can
introduce a deadlock scenario if multiple actors form a
transmit loop that is blocked at any point in the loop, so a
transmit intent will fail if it reaches a maximum number of
retries without success).
The TransmitIntent is constructed with a target address, the
message to send, and optional onSuccess and onError callbacks
(both defaulting to None). The callbacks are passed the
TransmitIntent when the transport is finished with it,
selecting the appropriate callback based on the completion
status (the `result' property will reveal the SendStatus actual
result of the attempt). A callback of None will simply discard
the TransmitIntent without passing it to a callback.
The TransmitIntent is passed to the transport that should
perform the intent; the transport may attach its own additional
data to the intent during that processing.
"""
def __init__(self, targetAddr, msg, onSuccess=None, onError=None, maxPeriod=None,
retryPeriod=TRANSMIT_RETRY_PERIOD):
super(TransmitIntent, self).__init__()
self._targetAddr = targetAddr
self._message = msg
self._callbackTo = ResultCallback(onSuccess, onError)
self._resultsts = None
self._quitTime = ExpirationTimer(maxPeriod or DEFAULT_MAX_TRANSMIT_PERIOD)
self._attempts = 0
self.transmit_retry_period = retryPeriod
@property
def targetAddr(self): return self._targetAddr
@property
def message(self): return self._message
def changeTargetAddr(self, newAddr): self._targetAddr = newAddr
def changeMessage(self, newMessage): self._message = newMessage
@property
def result(self): return self._resultsts
@result.setter
def result(self, setResult):
if not isinstance(setResult, SendStatus.BASE):
raise TypeError('TransmitIntent result must be a SendStatus (got %s)'%type(setResult))
self._resultsts = setResult
def completionCallback(self):
"This is called by the transport to perform the success or failure callback operation."
if not self.result:
if self.result == SendStatus.DeadTarget:
# Do not perform logging in case admin or logdirector
# is dead (this will recurse infinitely).
# logging.getLogger('Thespian').warning('Dead target: %s', self.targetAddr)
pass
else:
thesplog('completion error: %s', str(self), level=logging.INFO)
self._callbackTo.resultCallback(self.result, self)
def addCallback(self, onSuccess=None, onFailure=None):
self._callbackTo = ResultCallback(onSuccess, onFailure, self._callbackTo)
def tx_done(self, status):
self.result = status
self.completionCallback()
def awaitingTXSlot(self):
self._awaitingTXSlot = True
def retry(self, immediately=False):
if self._attempts > MAX_TRANSMIT_RETRIES:
return False
if self._quitTime.expired():
return False
self._attempts += 1
if immediately:
self._retryTime = ExpirationTimer(0)
else:
self._retryTime = ExpirationTimer(self._attempts * self.transmit_retry_period)
return True
def timeToRetry(self, socketAvail=False):
if socketAvail and hasattr(self, '_awaitingTXSlot'):
delattr(self, '_awaitingTXSlot')
if hasattr(self, '_retryTime'):
delattr(self, '_retryTime')
return True
if hasattr(self, '_retryTime'):
retryNow = self._retryTime.expired()
if retryNow:
delattr(self, '_retryTime')
return retryNow
return socketAvail
def delay(self):
if getattr(self, '_awaitingTXSlot', False):
if self._quitTime.expired():
return timedelta(seconds=0)
return max(timedelta(milliseconds=10), (self._quitTime.remaining()) / 2)
return max(timedelta(seconds=0),
min(self._quitTime.remaining(),
getattr(self, '_retryTime', self._quitTime).remaining(),
getattr(self, '_pauseUntil', self._quitTime).remaining()))
def expired(self):
return self._quitTime.expired()
def expiration(self):
return self._quitTime
def __str__(self):
return '************* %s' % self.identify()
def identify(self):
try:
smsg = str(self.message)
except Exception:
smsg = '<msg-cannot-convert-to-ascii>'
if len(smsg) > MAX_SHOWLEN:
smsg = smsg[:MAX_SHOWLEN] + '...'
return 'TransportIntent(' + '-'.join(filter(None, [
str(self.targetAddr),
'pending' if self.result is None else '='+str(self.result),
'' if self.result is not None else 'ExpiresIn_' + str(self.delay()),
'WAITSLOT' if getattr(self, '_awaitingTXSlot', False) else None,
'retry#%d'%self._attempts if self._attempts else '',
str(type(self.message)), smsg,
'quit_%s'%str(self._quitTime.remaining()),
'retry_%s'%str(self._retryTime.remaining()) if getattr(self, '_retryTime', None) else None,
'pause_%s'%str(self._pauseUntil.remaining()) if getattr(self, '_pauseUntil', None) else None,
])) + ')'
class SendStatus(object):
class BASE(object):
_isGood = True
def __bool__(self): return self._isGood # Python3
def __nonzero__(self): return self._isGood # Python2
def __str__(self): return '-+'[bool(self)]+self.__class__.__name__
class SENDSTS_SENT(BASE): pass
class SENDSTS_NOTSENT(BASE):
"Has not been sent, has not been actively rejected; still pending usually"
_isGood = False
class BadPacketError(BASE, Exception):
"Remote rejected transmit, (a return value or an exception)"
_isGood = False
class SENDSTS_EXPIRED(BASE):
"Transmit intent expired before send completed."
_isGood = False
class SENDSTS_FAILED(BASE): _isGood = False
class SENDSTS_DEADTARGET(BASE): _isGood = False
Sent = SENDSTS_SENT()
NotSent = SENDSTS_NOTSENT()
BadPacket = BadPacketError('BadPacket SendStatus')
Failed = SENDSTS_FAILED()
Expired = SENDSTS_EXPIRED()
DeadTarget = SENDSTS_DEADTARGET()
class ForwardMessage(object):
"Used as a wrapper when forwarding messages via intermediaries"
# n.b. ForwardMessage is not based the ActorSystemMessage base class
# because it only exists at the transport layer.
def __init__(self, fwdMessage, fwdTo, fwdFrom, fwdChain=None):
self.fwdMessage = fwdMessage
self.fwdTo = fwdTo # final destination
self.fwdFrom = fwdFrom # original sender
self.fwdTargets = (fwdChain or []) + [fwdTo] # list of targets; last is fwdTo
def __str__(self):
return 'FWD(%s)%s->%s->%s'%(str(self.fwdMessage),
str(self.fwdFrom),
'->'.join(list(map(str, self.fwdTargets))),
str(self.fwdTo))
|
{
"content_hash": "a0a5e8337e83fe85fa36a49296a47c62",
"timestamp": "",
"source": "github",
"line_count": 369,
"max_line_length": 105,
"avg_line_length": 40.235772357723576,
"alnum_prop": 0.6263218158550549,
"repo_name": "godaddy/Thespian",
"id": "8e8a6d4b03265a591da9fa9c0a7d737af9579b19",
"size": "14847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thespian/system/transport/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1111138"
},
{
"name": "Shell",
"bytes": "48"
}
],
"symlink_target": ""
}
|
import logging
import pkgutil
from functools import lru_cache
from socket import AF_INET, AF_INET6
from typing import Any, Dict, Iterator, Optional, Tuple, Type
# noinspection PyPackageRequirements
import urllib3.util.connection as urllib3_util_connection
# noinspection PyPackageRequirements
import urllib3.util.ssl_ as urllib3_util_ssl
from streamlink import __version__, plugins
from streamlink.exceptions import NoPluginError, PluginError
from streamlink.logger import StreamlinkLogger
from streamlink.options import Options
from streamlink.plugin.api.http_session import HTTPSession
from streamlink.plugin.plugin import Matcher, NORMAL_PRIORITY, NO_PRIORITY, Plugin
from streamlink.utils.l10n import Localization
from streamlink.utils.module import load_module
from streamlink.utils.url import update_scheme
# Ensure that the Logger class returned is Streamslink's for using the API (for backwards compatibility)
logging.setLoggerClass(StreamlinkLogger)
log = logging.getLogger(__name__)
# noinspection PyUnresolvedReferences
_original_allowed_gai_family = urllib3_util_connection.allowed_gai_family # type: ignore[attr-defined]
# options which support `key1=value1;key2=value2;...` strings as value
_OPTIONS_HTTP_KEYEQUALSVALUE = {"http-cookies": "cookies", "http-headers": "headers", "http-query-params": "params"}
def _parse_keyvalue_string(value: str) -> Iterator[Tuple[str, str]]:
for keyval in value.split(";"):
try:
key, val = keyval.split("=", 1)
yield key.strip(), val.strip()
except ValueError:
continue
class PythonDeprecatedWarning(UserWarning):
pass
class Streamlink:
"""
The Streamlink session is used to load and resolve plugins, and to store options used by plugins and stream implementations.
"""
http: HTTPSession
"""
An instance of Streamlink's :class:`requests.Session` subclass.
Used for any kind of HTTP request made by plugin and stream implementations.
"""
def __init__(
self,
options: Optional[Dict[str, Any]] = None
):
"""
:param options: Custom options
"""
self.http = HTTPSession()
self.options = Options({
"interface": None,
"ipv4": False,
"ipv6": False,
"hls-live-edge": 3,
"hls-segment-ignore-names": [],
"hls-segment-stream-data": False,
"hls-playlist-reload-attempts": 3,
"hls-playlist-reload-time": "default",
"hls-start-offset": 0,
"hls-duration": None,
"ringbuffer-size": 1024 * 1024 * 16, # 16 MB
"stream-segment-attempts": 3,
"stream-segment-threads": 1,
"stream-segment-timeout": 10.0,
"stream-timeout": 60.0,
"ffmpeg-ffmpeg": None,
"ffmpeg-no-validation": False,
"ffmpeg-fout": None,
"ffmpeg-video-transcode": None,
"ffmpeg-audio-transcode": None,
"ffmpeg-copyts": False,
"ffmpeg-start-at-zero": False,
"mux-subtitles": False,
"locale": None,
"user-input-requester": None,
})
if options:
self.options.update(options)
self.plugins: Dict[str, Type[Plugin]] = {}
self.load_builtin_plugins()
def set_option(self, key: str, value: Any):
"""
Sets general options used by plugins and streams originating from this session object.
:param key: key of the option
:param value: value to set the option to
**Available options**:
======================== =========================================
interface (str) Set the network interface,
default: ``None``
ipv4 (bool) Resolve address names to IPv4 only.
This option overrides ipv6, default: ``False``
ipv6 (bool) Resolve address names to IPv6 only.
This option overrides ipv4, default: ``False``
hls-live-edge (int) How many segments from the end
to start live streams on, default: ``3``
hls-segment-ignore-names (str[]) List of segment names without
file endings which should get filtered out,
default: ``[]``
hls-segment-stream-data (bool) Stream HLS segment downloads,
default: ``False``
http-proxy (str) Specify an HTTP proxy to use for
all HTTP requests
https-proxy (str) Specify an HTTPS proxy to use for
all HTTPS requests
http-cookies (dict or str) A dict or a semicolon ``;``
delimited str of cookies to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-headers (dict or str) A dict or semicolon ``;``
delimited str of headers to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-query-params (dict or str) A dict or an ampersand ``&``
delimited string of query parameters to
add to each HTTP request,
e.g. ``foo=bar&baz=qux``
http-trust-env (bool) Trust HTTP settings set in the
environment, such as environment
variables (HTTP_PROXY, etc.) and
~/.netrc authentication
http-ssl-verify (bool) Verify SSL certificates,
default: ``True``
http-disable-dh (bool) Disable SSL Diffie-Hellman key exchange
http-ssl-cert (str or tuple) SSL certificate to use,
can be either a .pem file (str) or a
.crt/.key pair (tuple)
http-timeout (float) General timeout used by all HTTP
requests except the ones covered by
other options, default: ``20.0``
ringbuffer-size (int) The size of the internal ring
buffer used by most stream types,
default: ``16777216`` (16MB)
ffmpeg-ffmpeg (str) Specify the location of the
ffmpeg executable use by Muxing streams
e.g. ``/usr/local/bin/ffmpeg``
ffmpeg-no-validation (bool) Disable FFmpeg validation and version logging.
default: ``False``
ffmpeg-verbose (bool) Log stderr from ffmpeg to the
console
ffmpeg-verbose-path (str) Specify the location of the
ffmpeg stderr log file
ffmpeg-fout (str) The output file format
when muxing with ffmpeg
e.g. ``matroska``
ffmpeg-video-transcode (str) The codec to use if transcoding
video when muxing with ffmpeg
e.g. ``h264``
ffmpeg-audio-transcode (str) The codec to use if transcoding
audio when muxing with ffmpeg
e.g. ``aac``
ffmpeg-copyts (bool) When used with ffmpeg, do not shift input timestamps.
ffmpeg-start-at-zero (bool) When used with ffmpeg and copyts,
shift input timestamps, so they start at zero
default: ``False``
mux-subtitles (bool) Mux available subtitles into the
output stream.
stream-segment-attempts (int) How many attempts should be done
to download each segment, default: ``3``.
stream-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``.
stream-segment-timeout (float) Segment connect and read
timeout, default: ``10.0``.
stream-timeout (float) Timeout for reading data from
stream, default: ``60.0``.
locale (str) Locale setting, in the RFC 1766 format
e.g. en_US or es_ES
default: ``system locale``.
user-input-requester (UserInputRequester) instance of UserInputRequester
to collect input from the user at runtime.
default: ``None``.
======================== =========================================
"""
if key == "interface":
for scheme, adapter in self.http.adapters.items():
if scheme not in ("http://", "https://"):
continue
if not value:
adapter.poolmanager.connection_pool_kw.pop("source_address")
else:
adapter.poolmanager.connection_pool_kw.update(
# https://docs.python.org/3/library/socket.html#socket.create_connection
source_address=(value, 0)
)
self.options.set(key, None if not value else value)
elif key == "ipv4" or key == "ipv6":
self.options.set(key, value)
if not value:
urllib3_util_connection.allowed_gai_family = _original_allowed_gai_family # type: ignore[attr-defined]
elif key == "ipv4":
self.options.set("ipv6", False)
urllib3_util_connection.allowed_gai_family = (lambda: AF_INET) # type: ignore[attr-defined]
else:
self.options.set("ipv4", False)
urllib3_util_connection.allowed_gai_family = (lambda: AF_INET6) # type: ignore[attr-defined]
elif key in ("http-proxy", "https-proxy"):
self.http.proxies["http"] = update_scheme("https://", value, force=False)
self.http.proxies["https"] = self.http.proxies["http"]
if key == "https-proxy":
log.warning("The https-proxy option has been deprecated in favor of a single http-proxy option")
elif key in _OPTIONS_HTTP_KEYEQUALSVALUE:
getattr(self.http, _OPTIONS_HTTP_KEYEQUALSVALUE[key]).update(
value if isinstance(value, dict) else dict(_parse_keyvalue_string(value))
)
elif key == "http-trust-env":
self.http.trust_env = value
elif key == "http-ssl-verify":
self.http.verify = value
elif key == "http-disable-dh":
default_ciphers = [
item
for item in urllib3_util_ssl.DEFAULT_CIPHERS.split(":") # type: ignore[attr-defined]
if item != "!DH"
]
if value:
default_ciphers.append("!DH")
urllib3_util_ssl.DEFAULT_CIPHERS = ":".join(default_ciphers) # type: ignore[attr-defined]
elif key == "http-ssl-cert":
self.http.cert = value
elif key == "http-timeout":
self.http.timeout = value
# deprecated: {dash,hls}-segment-attempts
elif key in ("dash-segment-attempts", "hls-segment-attempts"):
self.options.set("stream-segment-attempts", int(value))
# deprecated: {dash,hls}-segment-threads
elif key in ("dash-segment-threads", "hls-segment-threads"):
self.options.set("stream-segment-threads", int(value))
# deprecated: {dash,hls}-segment-timeout
elif key in ("dash-segment-timeout", "hls-segment-timeout"):
self.options.set("stream-segment-timeout", float(value))
# deprecated: {hls,dash,http-stream}-timeout
elif key in ("dash-timeout", "hls-timeout", "http-stream-timeout"):
self.options.set("stream-timeout", float(value))
else:
self.options.set(key, value)
def get_option(self, key: str):
"""
Returns the current value of the specified option.
:param key: key of the option
"""
if key == "http-proxy":
return self.http.proxies.get("http")
elif key == "https-proxy":
return self.http.proxies.get("https")
elif key == "http-cookies":
return self.http.cookies
elif key == "http-headers":
return self.http.headers
elif key == "http-query-params":
return self.http.params
elif key == "http-trust-env":
return self.http.trust_env
elif key == "http-ssl-verify":
return self.http.verify
elif key == "http-ssl-cert":
return self.http.cert
elif key == "http-timeout":
return self.http.timeout
else:
return self.options.get(key)
def set_plugin_option(self, plugin: str, key: str, value: Any) -> None:
"""
Sets plugin specific options used by plugins originating from this session object.
:param plugin: name of the plugin
:param key: key of the option
:param value: value to set the option to
"""
if plugin in self.plugins:
plugincls = self.plugins[plugin]
plugincls.set_option(key, value)
def get_plugin_option(self, plugin: str, key: str) -> Optional[Any]:
"""
Returns the current value of the plugin specific option.
:param plugin: name of the plugin
:param key: key of the option
"""
if plugin in self.plugins:
plugincls = self.plugins[plugin]
return plugincls.get_option(key)
@lru_cache(maxsize=128)
def resolve_url(
self,
url: str,
follow_redirect: bool = True,
) -> Tuple[str, Type[Plugin], str]:
"""
Attempts to find a plugin that can use this URL.
The default protocol (https) will be prefixed to the URL if not specified.
Return values of this method are cached via :meth:`functools.lru_cache`.
:param url: a URL to match against loaded plugins
:param follow_redirect: follow redirects
:raises NoPluginError: on plugin resolve failure
"""
url = update_scheme("https://", url, force=False)
matcher: Matcher
candidate: Optional[Tuple[str, Type[Plugin]]] = None
priority = NO_PRIORITY
for name, plugin in self.plugins.items():
if plugin.matchers:
for matcher in plugin.matchers:
if matcher.priority > priority and matcher.pattern.match(url) is not None:
candidate = name, plugin
priority = matcher.priority
# TODO: remove deprecated plugin resolver
elif hasattr(plugin, "can_handle_url") and callable(plugin.can_handle_url) and plugin.can_handle_url(url):
prio = plugin.priority(url) if hasattr(plugin, "priority") and callable(plugin.priority) else NORMAL_PRIORITY
if prio > priority:
log.warning(f"Resolved plugin {name} with deprecated can_handle_url API")
candidate = name, plugin
priority = prio
if candidate:
return candidate[0], candidate[1], url
if follow_redirect:
# Attempt to handle a redirect URL
try:
res = self.http.head(url, allow_redirects=True, acceptable_status=[501]) # type: ignore[call-arg]
# Fall back to GET request if server doesn't handle HEAD.
if res.status_code == 501:
res = self.http.get(url, stream=True)
if res.url != url:
return self.resolve_url(res.url, follow_redirect=follow_redirect)
except PluginError:
pass
raise NoPluginError
def resolve_url_no_redirect(self, url: str) -> Tuple[str, Type[Plugin], str]:
"""
Attempts to find a plugin that can use this URL.
The default protocol (https) will be prefixed to the URL if not specified.
:param url: a URL to match against loaded plugins
:raises NoPluginError: on plugin resolve failure
"""
return self.resolve_url(url, follow_redirect=False)
def streams(self, url: str, **params):
"""
Attempts to find a plugin and extracts streams from the *url* if a plugin was found.
:param url: a URL to match against loaded plugins
:param params: Additional keyword arguments passed to :meth:`streamlink.plugin.Plugin.streams`
:raises NoPluginError: on plugin resolve failure
:return: A :class:`dict` of stream names and :class:`streamlink.stream.Stream` instances
"""
pluginname, pluginclass, resolved_url = self.resolve_url(url)
plugin = pluginclass(self, resolved_url)
return plugin.streams(**params)
def get_plugins(self):
"""Returns the loaded plugins for the session."""
return self.plugins
def load_builtin_plugins(self):
self.load_plugins(plugins.__path__[0])
def load_plugins(self, path: str) -> bool:
"""
Attempt to load plugins from the path specified.
:param path: full path to a directory where to look for plugins
:return: success
"""
success = False
for loader, name, ispkg in pkgutil.iter_modules([path]):
# set the full plugin module name
# use the "streamlink.plugins." prefix even for sideloaded plugins
module_name = f"streamlink.plugins.{name}"
try:
mod = load_module(module_name, path)
except ImportError:
log.exception(f"Failed to load plugin {name} from {path}\n")
continue
if not hasattr(mod, "__plugin__") or not issubclass(mod.__plugin__, Plugin):
continue
success = True
plugin = mod.__plugin__
if name in self.plugins:
log.debug(f"Plugin {name} is being overridden by {mod.__file__}")
self.plugins[name] = plugin
return success
@property
def version(self):
return __version__
@property
def localization(self):
return Localization(self.get_option("locale"))
__all__ = ["Streamlink"]
|
{
"content_hash": "02b869fc8055d7a64d3f7fb17b1437fd",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 128,
"avg_line_length": 39.51975051975052,
"alnum_prop": 0.5473196906728391,
"repo_name": "bastimeyer/streamlink",
"id": "c4f6235f585451205fbda10b19538d389745fc71",
"size": "19009",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/streamlink/session.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1511755"
},
{
"name": "Shell",
"bytes": "6427"
}
],
"symlink_target": ""
}
|
from setuptools import setup
if __name__ == "__main__":
setup()
|
{
"content_hash": "04e38f3fac518a6424a2a6a5f7e8d218",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 28,
"avg_line_length": 17.25,
"alnum_prop": 0.5797101449275363,
"repo_name": "PyO3/setuptools-rust",
"id": "25536050b29963bbfbb07ad400439bb16afbb82a",
"size": "92",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3062"
},
{
"name": "Python",
"bytes": "71505"
}
],
"symlink_target": ""
}
|
import requests
from bs4 import BeautifulSoup
from json import loads
import re
from ..config import config
def seek(chk_key, epi, params):
tepi = epi
chk_key = str(chk_key)
try:
int(chk_key)
except ValueError:
query_url = ("http://search.bilibili.com/bangumi"
"?keyword=%s") % (chk_key, )
html_content = requests.get(query_url, timeout=2, proxies=config['seekerProxies']).text
bs = BeautifulSoup(html_content, "html.parser")
s_bgmlist = bs.find('div', class_="ajax-render")
try:
season_id = s_bgmlist.find('a', class_="title").get('href')
season_id = re.findall(r'\d+', season_id)
if len(season_id):
season_id = season_id[0]
else:
raise AttributeError
except AttributeError:
return 0
else:
season_id = chk_key
api_url = ("http://app.bilibili.com/bangumi/seasoninfo/%s.ver"
"?callback=seasonListCallback") % (season_id,)
apiRes = requests.get(api_url, timeout=2).text
apiRes = re.sub(r"^.+?\(", '', apiRes)
apiRes = re.sub(r"\);", '', apiRes)
apiRes = loads(apiRes)
epi_list = apiRes['result']['episodes']
av_name = apiRes['result']['title']
try:
for epi in epi_list:
if epi['index'] == str(tepi):
av_id = epi['av_id']
av_page = epi['page']
break
else:
raise IndexError
except IndexError:
return 0
link = ("http://www.bilibili.com/video/"
"av%s/index_%s.html") % (av_id, av_page)
title = "%s - %d from Bilibili" % (av_name, tepi)
return [{'link': link, 'title': title}]
if __name__ == '__main__':
test_r = seek("食戟之灵 贰之皿", 10, ())
print(test_r)
|
{
"content_hash": "ae8133796df175de6aafdfaacf790b1d",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 95,
"avg_line_length": 29.516129032258064,
"alnum_prop": 0.533879781420765,
"repo_name": "chienius/anicolle",
"id": "a67539c5a5f74efcce4ac165bfd7584afaf945e0",
"size": "1867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anicolle/seeker/bilibili.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16612"
}
],
"symlink_target": ""
}
|
from torch._six import container_abcs
from itertools import repeat
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
def _list_with_default(out_size, defaults):
if isinstance(out_size, int):
return out_size
if len(defaults) <= len(out_size):
raise ValueError('Input dimension should be at least {}'.format(len(out_size) + 1))
return [v if v is not None else d for v, d in zip(out_size, defaults[-len(out_size):])]
|
{
"content_hash": "a361c9526c57922a9d8db3ea3ae21678",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 91,
"avg_line_length": 27.82608695652174,
"alnum_prop": 0.640625,
"repo_name": "ryfeus/lambda-packs",
"id": "2b8ebd642b000a6d05606db16f3e921d16b51338",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytorch/source/torch/nn/modules/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
import pytest
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas import (timedelta_range, date_range, Series, Timedelta,
DatetimeIndex, TimedeltaIndex, Index, DataFrame,
Int64Index, _np_version_under1p8)
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_index_equal)
from ..datetimelike import DatetimeLike
randn = np.random.randn
class TestTimedeltaIndex(DatetimeLike):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def setup_method(self, method):
self.indices = dict(index=tm.makeTimedeltaIndex(10))
self.setup_indices()
def create_index(self):
return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_shift(self):
# test shift for TimedeltaIndex
# err8083
drange = self.create_index()
result = drange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
tm.assert_index_equal(result, expected)
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pytimedelta(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=pd.Timedelta(0)) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=np.timedelta64(0, 's')) == 1
assert idx.get_loc(idx[1], 'pad',
tolerance=timedelta(0)) == 1
with tm.assert_raises_regex(ValueError, 'must be convertible'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
assert idx.get_loc('1 day 1 hour', method) == loc
def test_get_loc_nat(self):
tidx = TimedeltaIndex(['1 days 01:00:00', 'NaT', '2 days 01:00:00'])
assert tidx.get_loc(pd.NaT) == 1
assert tidx.get_loc(None) == 1
assert tidx.get_loc(float('nan')) == 1
assert tidx.get_loc(np.nan) == 1
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
res = idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour'))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
def test_numeric_compat(self):
idx = self._holder(np.arange(5, dtype='int64'))
didx = self._holder(np.arange(5, dtype='int64') ** 2)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result,
self._holder(np.arange(5, dtype='int64') * 5))
result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='float64') + 0.1)
tm.assert_index_equal(result, self._holder(np.arange(
5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
# invalid
pytest.raises(TypeError, lambda: idx * idx)
pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3)))
pytest.raises(ValueError, lambda: idx * np.array([1, 2]))
def test_pickle_compat_construction(self):
pass
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_fillna_timedelta(self):
# GH 11343
idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])
exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])
tm.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)
exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])
idx.fillna(pd.Timedelta('3 hour'))
exp = pd.Index(
[pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)
tm.assert_index_equal(idx.fillna('x'), exp)
def test_difference_freq(self):
# GH14323: Difference of TimedeltaIndex should not preserve frequency
index = timedelta_range("0 days", "5 days", freq="D")
other = timedelta_range("1 days", "4 days", freq="D")
expected = TimedeltaIndex(["0 days", "5 days"], freq=None)
idx_diff = index.difference(other)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other)
expected = TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal('freq', idx_diff, expected)
def test_take(self):
tds = ['1day 02:00:00', '1 day 04:00:00', '1 day 10:00:00']
idx = TimedeltaIndex(start='1d', end='2d', freq='H', name='idx')
expected = TimedeltaIndex(tds, freq=None, name='idx')
taken1 = idx.take([2, 4, 10])
taken2 = idx[[2, 4, 10]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, TimedeltaIndex)
assert taken.freq is None
assert taken.name == expected.name
def test_take_fill_value(self):
# GH 12631
idx = pd.TimedeltaIndex(['1 days', '2 days', '3 days'],
name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'],
name='xxx')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.TimedeltaIndex(['2 days', '1 days', '3 days'],
name='xxx')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_factorize(self):
idx1 = TimedeltaIndex(['1 day', '1 day', '2 day', '2 day', '3 day',
'3 day'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = TimedeltaIndex(['1 day', '2 day', '3 day'])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = timedelta_range('1 day', periods=4, freq='s')
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
def test_join_self(self):
index = timedelta_range('1 day', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
tm.assert_index_equal(index, joined)
def test_slice_keeps_name(self):
# GH4226
dr = pd.timedelta_range('1d', '5d', freq='H', name='timebucket')
assert dr[1:].name == dr.name
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='td')
str(df)
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
assert cols.dtype == np.dtype('O')
assert cols.dtype == joined.dtype
tm.assert_index_equal(cols, joined)
def test_sort_values(self):
idx = TimedeltaIndex(['4d', '1d', '2d'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0]),
check_dtype=False)
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1]),
check_dtype=False)
def test_get_duplicates(self):
idx = TimedeltaIndex(['1 day', '2 day', '2 day', '3 day', '3day',
'4day'])
result = idx.get_duplicates()
ex = TimedeltaIndex(['2 day', '3day'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = TimedeltaIndex(['1 day 00:00:05', '1 day 00:00:01',
'1 day 00:00:02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_misc_coverage(self):
rng = timedelta_range('1 day', periods=5)
result = rng.groupby(rng.days)
assert isinstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(['3d', '1d', '2d'])
assert not idx.equals(list(idx))
non_td = Index(list('abc'))
assert not idx.equals(list(non_td))
def test_map(self):
rng = timedelta_range('1 day', periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = Int64Index([f(x) for x in rng])
tm.assert_index_equal(result, exp)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
if _np_version_under1p8:
# cannot test array because np.datetime('nat') returns today's date
cases = [(tdidx1, tdidx2)]
else:
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
exp = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, exp)
# raise TypeError for now
pytest.raises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
def test_total_seconds(self):
# GH 10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
s = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with nat
s[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(s.dt.total_seconds(), s_expt)
# with both nat
s = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(s.dt.total_seconds(),
Series([np.nan, np.nan], index=[0, 1]))
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range('1 days', '10 days')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_pickle(self):
rng = timedelta_range('1 days', periods=10)
rng_p = tm.round_trip_pickle(rng)
tm.assert_index_equal(rng, rng_p)
def test_hash_error(self):
index = timedelta_range('1 days', periods=10)
with tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__):
hash(index)
def test_append_join_nondatetimeindex(self):
rng = timedelta_range('1 days', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timedelta)
# it works
rng.join(idx, how='outer')
def test_append_numpy_bug_1681(self):
td = timedelta_range('1 days', '10 days', freq='2D')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': td}, index=td)
str(c)
result = a.append(c)
assert (result['B'] == td).all()
def test_fields(self):
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
tm.assert_index_equal(rng.days, Index([1, 1], dtype='int64'))
tm.assert_index_equal(
rng.seconds,
Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13],
dtype='int64'))
tm.assert_index_equal(
rng.microseconds,
Index([100 * 1000 + 123, 100 * 1000 + 123], dtype='int64'))
tm.assert_index_equal(rng.nanoseconds,
Index([456, 456], dtype='int64'))
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))
tm.assert_series_equal(s.dt.seconds, Series(
[10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1]))
# preserve name (GH15589)
rng.name = 'name'
assert rng.days.name == 'name'
def test_freq_conversion(self):
# doc example
# series
td = Series(date_range('20130101', periods=4)) - \
Series(date_range('20121201', periods=4))
td[2] += timedelta(minutes=5, seconds=3)
td[3] = np.nan
result = td / np.timedelta64(1, 'D')
expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan
])
assert_series_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Series([31, 31, 31, np.nan])
assert_series_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_series_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_series_equal(result, expected)
# tdi
td = TimedeltaIndex(td)
result = td / np.timedelta64(1, 'D')
expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[D]')
expected = Index([31, 31, 31, np.nan])
assert_index_equal(result, expected)
result = td / np.timedelta64(1, 's')
expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3,
np.nan])
assert_index_equal(result, expected)
result = td.astype('timedelta64[s]')
assert_index_equal(result, expected)
class TestSlicing(object):
def test_timedelta(self):
# this is valid too
index = date_range('1/1/2000', periods=50, freq='B')
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
assert tm.equalContents(index, back)
assert shifted.freq == index.freq
assert shifted.freq == back.freq
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH4134, buggy with timedeltas
rng = date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestTimeSeries(object):
_multiprocess_can_split_ = True
def test_series_box_timedelta(self):
rng = timedelta_range('1 day 1 s', periods=5, freq='h')
s = Series(rng)
assert isinstance(s[1], Timedelta)
assert isinstance(s.iat[2], Timedelta)
|
{
"content_hash": "8fe789763ff62b18de2fa598488114f6",
"timestamp": "",
"source": "github",
"line_count": 599,
"max_line_length": 79,
"avg_line_length": 37.010016694490815,
"alnum_prop": 0.5344850917948487,
"repo_name": "linebp/pandas",
"id": "79fe0a864f24689b2da019af05f0a170025b10cf",
"size": "22169",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pandas/tests/indexes/timedeltas/test_timedelta.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6920"
},
{
"name": "C",
"bytes": "492693"
},
{
"name": "C++",
"bytes": "17353"
},
{
"name": "HTML",
"bytes": "551706"
},
{
"name": "Makefile",
"bytes": "907"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "11946251"
},
{
"name": "R",
"bytes": "9964"
},
{
"name": "Shell",
"bytes": "22404"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.