text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from abc import abstractmethod
__author__ = 'Casey Bajema'
class _ProcessingScript():
"""
Abstract processing script that should be sub-classed to provide datasets with custom processing.
"""
@abstractmethod
def process_it(self, data_entry):
"""
Process the data_entry in some way and return the result(s)
:param data_entry: The received data_entry object
:return: None, a data_entry object or an array of data_entry objects that are ready to re-ingest or store.
"""
pass
class _SamplingScript():
"""
Abstract sampling script that should be sub-classed to provide custom data sampling times.
"""
def is_sample(self, datetime):
"""
Test if the ingester should use the data at the given time
:param datetime: Datetime representing the date and time to the millisecond.
:return: True or False if the ingester should sample at the time given.
"""
pass
|
{
"content_hash": "bb65bfc6d128e718f8f7dd00fc759b59",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 114,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.6578680203045685,
"repo_name": "jcu-eresearch/jcu.dc24.ingesterapi",
"id": "45e67c44d3fd1da6456c886fba5d6dfa617eb203",
"size": "985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jcudc24ingesterapi/ingester_scripts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "114873"
}
],
"symlink_target": ""
}
|
from .timediff import time_slice_diffs
from .tsdiffplot import plot_tsdiffs, plot_tsdiffs_image
from .screens import screen
from ..utils import pca
|
{
"content_hash": "ef72b8ef5d3aab847ba725b8054947f4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 56,
"avg_line_length": 37,
"alnum_prop": 0.8108108108108109,
"repo_name": "arokem/nipy",
"id": "676711ecdd54d904104a05ccb26c91f7252616f4",
"size": "304",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nipy/algorithms/diagnostics/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1601255"
},
{
"name": "C++",
"bytes": "999"
},
{
"name": "Makefile",
"bytes": "2786"
},
{
"name": "Matlab",
"bytes": "5508"
},
{
"name": "Python",
"bytes": "3047221"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from traits.api import on_trait_change, Dict
from pychron.pipeline.plot.models.figure_model import FigureModel
# from pychron.processing.plot.panels.series_panel import SeriesPanel, DashboardSeriesPanel
from pychron.pipeline.plot.panels.regression_series_panel import RegressionSeriesPanel
from pychron.pipeline.plot.panels.series_panel import SeriesPanel, DashboardSeriesPanel
from six.moves import zip
class RegressionSeriesModel(FigureModel):
_panel_klass = RegressionSeriesPanel
def _make_panels(self):
gs = [self._panel_klass(analyses=[a], plot_options=self.plot_options) for a in self.analyses]
for gi in gs:
gi.make_figures()
if self.plot_options.auto_generate_title:
for i, gi in enumerate(gs):
gi.title = self.plot_options.generate_title(gi.analyses, i)
elif self.titles:
for ti, gi in zip(self.titles, gs):
gi.title = ti
return gs
# ============= EOF =============================================
|
{
"content_hash": "81b99bd0157c646d7528cd44c3ea0724",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 101,
"avg_line_length": 36.89655172413793,
"alnum_prop": 0.6635514018691588,
"repo_name": "UManPychron/pychron",
"id": "8abbe975269318e95bbe067668b0577bf3c2a269",
"size": "2006",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pychron/pipeline/plot/models/regression_series_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "279"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "40346"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10234954"
},
{
"name": "Shell",
"bytes": "10753"
}
],
"symlink_target": ""
}
|
class Solution(object):
def minTransfers(self, transactions):
"""
:type transactions: List[List[int]]
:rtype: int
"""
table = collections.Counter()
for lender, borrower, amt in transactions:
table[lender] -= amt
table[borrower] += amt
debts = [x for x in table.values() if x != 0]
return self.dfs(debts, 0, 0)
def dfs(self, debts, start, count):
while start < len(debts) and debts[start] == 0: start += 1
res = sys.maxint
prev = 0
for i in range(start + 1, len(debts)):
if debts[i] != prev and debts[i] * debts[start] < 0:
prev = debts[i]
debts[i] += debts[start]
res = min(res, self.dfs(debts, start+1, count+1))
debts[i] -= debts[start]
return res if res < sys.maxint else count
|
{
"content_hash": "f461c117680cef00a531cc196bb635f6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 66,
"avg_line_length": 37.541666666666664,
"alnum_prop": 0.5138734739178691,
"repo_name": "Mlieou/lXXtcode",
"id": "64469508f69f9fd5d58ecabe31d4883b9d148d7a",
"size": "901",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "leetcode/python/ex_465.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "44654"
},
{
"name": "Java",
"bytes": "46838"
},
{
"name": "Python",
"bytes": "186767"
},
{
"name": "Shell",
"bytes": "127"
}
],
"symlink_target": ""
}
|
"""Create a dictionary containing useful information for the ingestion
process.
The ``file_dict`` contains various information that can be used by
``ingest.py`` (e.g. filesystem paths, observational metadata) and can
be used as a data container that can be easily passed around to various
functions.
Authors
-------
Matthew Bourque
Use
---
This module and its functionars are intended to be imported and
used by ``acsql.ingest.ingest.py`` as such:
::
from ascql.ingest.make_file_dict import get_detector
from ascql.ingest.make_file_dict import get_metadata_from_test_files
from ascql.ingest.make_file_dict import get_proposid
from acsql.ingest.make_file_dict import make_file_dict
make_file_dict(filename)
get_detector(filename)
get_metadata_from_test_files(rootname_path, keyword)
get_proposid(filename)
Dependencies
------------
External library dependencies include:
- ``astropy``
"""
import glob
import logging
import os
from astropy.io import fits
from acsql.utils import utils
from acsql.utils.utils import SETTINGS
def get_detector(filename):
"""Return the ``detector`` associated with the given ``filename``,
if possible.
Parameters
----------
filename : str
The path to the file to attempt to get the ``detector`` header
keyword from.
Returns
-------
detector : str
The detector (e.g. ``WFC``)
"""
if 'jit' in filename:
detector = fits.getval(filename, 'config', 0)
if detector == 'S/C': # FGS observation
detector = None
else:
detector = detector.lower().split('/')[1]
else:
detector = fits.getval(filename, 'detector', 0).lower()
return detector
def get_metadata_from_test_files(rootname_path, keyword):
"""Return the value of the given ``keyword`` and ``rootname_path``.
The given ``rootname_path`` is checked for various filetypes that
are beleived to have the ``keyword`` that is sought, in order
of most likeliness: ``raw``, ``flt``, ``spt``, ``drz``, and
``jit``. If a candidate file is found, it is used to determine
the value of the ``keyword`` in the primary header. If no
candidate file exists, or the ``keyword`` value cannot be
determined from the primary header, a ``value`` of ``None`` is
returned, essentially ending the ingestion process for the given
rootname.
Parameters
----------
rootname_path : str
The path to the rootname in the MAST cache.
keyword : str
The header keyword to determine the value of (e.g.
``detector``)
Returns
-------
value : str or None
The header keyword value.
"""
raw = glob.glob(os.path.join(rootname_path, '*raw.fits'))
flt = glob.glob(os.path.join(rootname_path, '*flt.fits'))
spt = glob.glob(os.path.join(rootname_path, '*spt.fits'))
drz = glob.glob(os.path.join(rootname_path, '*drz.fits'))
jit = glob.glob(os.path.join(rootname_path, '*jit.fits'))
for test_files in [raw, flt, spt, drz, jit]:
try:
test_file = test_files[0]
if keyword == 'detector':
value = get_detector(test_file)
elif keyword == 'proposid':
value = get_proposid(test_file)
break
except (IndexError, KeyError):
value = None
if not value:
logging.warning('Cannot determine {} for {}'\
.format(keyword, rootname_path))
return value
def get_proposid(filename):
"""Return the proposal ID from the primary header of the given
``filename``.
Parameters
----------
filename : str
The path to the file to get the ``proposid`` form.
Returns
-------
proposid : int
The proposal ID (e.g. ``12345``).
"""
proposid = str(fits.getval(filename, 'proposid', 0))
return proposid
def make_file_dict(filename):
"""Create a dictionary that holds information that is useful for
the ingestion process. This dictionary can then be passed around
the various functions of the module.
Parameters
----------
filename : str
The path to the file.
Returns
-------
file_dict : dict
A dictionary containing various data useful for the ingestion
process.
"""
file_dict = {}
# Filename related keywords
file_dict['filename'] = os.path.abspath(filename)
file_dict['dirname'] = os.path.dirname(filename)
file_dict['basename'] = os.path.basename(filename)
file_dict['rootname'] = file_dict['basename'].split('_')[0][:-1]
file_dict['full_rootname'] = file_dict['basename'].split('_')[0]
file_dict['filetype'] = file_dict['basename'].split('.fits')[0].split('_')[-1]
file_dict['proposid'] = file_dict['basename'][0:4]
file_dict['proposid_int'] = get_metadata_from_test_files(file_dict['dirname'], 'proposid')
# Metadata kewords
file_dict['detector'] = get_metadata_from_test_files(file_dict['dirname'], 'detector')
if file_dict['detector']:
file_dict['file_exts'] = getattr(utils, '{}_FILE_EXTS'.format(file_dict['detector'].upper()))[file_dict['filetype']]
# JPEG related kewords
if file_dict['filetype'] in ['raw', 'flt', 'flc']:
file_dict['jpg_filename'] = file_dict['basename'].replace('.fits', '.jpg')
file_dict['jpg_dst'] = os.path.join(SETTINGS['jpeg_dir'], file_dict['proposid_int'], file_dict['jpg_filename'])
file_dict['thumbnail_filename'] = file_dict['basename'].replace('.fits', '.thumb')
file_dict['thumbnail_dst'] = os.path.join(SETTINGS['thumbnail_dir'], file_dict['proposid_int'], file_dict['thumbnail_filename'])
else:
file_dict['jpg_filename'] = None
file_dict['jpg_dst'] = None
file_dict['thumbnail_filename'] = None
file_dict['thumbnail_dst'] = None
return file_dict
|
{
"content_hash": "512f7be36826d125fb1efb150b63c83e",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 136,
"avg_line_length": 31.230366492146597,
"alnum_prop": 0.6233025984911986,
"repo_name": "bourque/acsql",
"id": "4eda3d5e3f1761375bd8ee9b6cb875190896d647",
"size": "5965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acsql/ingest/make_file_dict.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "64277"
}
],
"symlink_target": ""
}
|
from .models import FollowOrganization
from rest_framework import serializers
class FollowOrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = FollowOrganization
fields = ('voter_we_vote_id', 'organization_we_vote_id', 'following_status')
|
{
"content_hash": "94109665ad0a1cc90f2e0c750d58dec1",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 84,
"avg_line_length": 35.125,
"alnum_prop": 0.7580071174377224,
"repo_name": "wevote/WebAppPublic",
"id": "5acc9ecc15a409ec232c74ed00251dd4867b852c",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "follow/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8022"
},
{
"name": "HTML",
"bytes": "131153"
},
{
"name": "JavaScript",
"bytes": "296860"
},
{
"name": "Python",
"bytes": "1700558"
},
{
"name": "Shell",
"bytes": "252"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import uuid
import pytest
@pytest.fixture
def cochrane_review(conn):
review = {
'id': uuid.uuid1().hex,
'study_type': 'MIX',
'file_name': 'Sulpiride for schizophrenia [v9.0-For publication].rm5',
'meta_source': 'http://datastore.opentrials.net/uploads/d7823d80-6f81-11e6-87af-931e370d0cf8/cochrane_schizophrenia_reviews.zip',
'doi_id': '10.1002/14651858.CD001162',
'study_id': 'STD-Soni-1990',
'robs': [
{
'result': 'YES',
'rob_id': 'QIT-02',
'group_id': '',
'modified': '',
'rob_name': 'Allocation concealment?',
'study_id': 'STD-Soni-1990',
'group_name': '', 'rob_description':
'Was allocation adequately concealed?',
'result_description': 'A - Adequate',
},
],
'refs': [
{
'no': '',
'pg': '233-8',
'vl': '5', 'type':
'JOURNAL_ARTICLE',
'year': '1990',
'title': 'Sulpiride in negative schizophrenia - a placebo-controlled double-blind assessment',
'source': 'Human Psychopharmacology Clinical and Experimental',
'authors': 'Soni SD, Mallik A, Schiff AA',
'country': '',
'identifiers': [],
},
],
}
review_id = conn['warehouse']['cochrane_reviews'].insert(review)
return review_id
|
{
"content_hash": "0f95faf2b097deecf6aaedd5f28dd402",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 137,
"avg_line_length": 35.744680851063826,
"alnum_prop": 0.5047619047619047,
"repo_name": "arthurSena/processors",
"id": "9afeb946ab801365b97b7cd0fc86f8ea23b3f254",
"size": "1704",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/fixtures/warehouse/cochrane_reviews.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "414"
},
{
"name": "Python",
"bytes": "253590"
}
],
"symlink_target": ""
}
|
import unittest
from robotide.controller.stepcontrollers import StepController
class FakeStep(StepController):
def __init__(self):
pass
class UpdatingArgumentsTest(unittest.TestCase):
def test_converting_last_empty_cell_without_args(self):
self.assertEqual(FakeStep()._change_last_empty_to_empty_var([], None), [])
def test_converting_last_empty_cell_with_single_value(self):
self.assertEqual(FakeStep()._change_last_empty_to_empty_var([''], None),
['${EMPTY}'])
def test_converting_last_empty_cell_with_multiple_values(self):
self.assertEqual(FakeStep()._change_last_empty_to_empty_var(['Foo', '', ''], None),
['Foo', '', '${EMPTY}'])
def test_converting_last_empty_cell_with_comment(self):
self.assertEqual(FakeStep()._change_last_empty_to_empty_var([''], 'comment'),
[''])
class StepContainsKeywordTest(unittest.TestCase, FakeStep):
@property
def keyword(self):
return self._keyword
@property
def args(self):
return self._args
def setUp(self):
self._keyword = 'Foo'
self._args = ['Bar']
def _verify_contains(self, keyword):
self.assertTrue(self.contains_keyword(keyword))
def _verify_does_not_contain(self, keyword):
self.assertFalse(self.contains_keyword(keyword))
def test_contains_keyword_in_keyword_position(self):
self._verify_contains('Foo')
def test_contains_keyword_in_argument_position(self):
self._verify_contains('Bar')
def test_does_not_contain_keyword(self):
self._verify_does_not_contain('FooBar')
def test_contains_keyword_with_given_prefix(self):
self._args += ['Given Keyword']
self._verify_contains('Keyword')
def test_contains_keyword_with_when_prefix(self):
self._keyword = 'When Something'
self._verify_contains('SomeThing')
def test_contains_keyword_with_then_prefix(self):
self._args = ['Then anything']
self._verify_contains('anythinG')
def test_contains_keyword_with_and_prefix(self):
self._keyword = 'and Nothing Else'
self._verify_contains('nothingelse')
def test_does_not_remove_too_many_prefixes(self):
self._keyword = 'Then And Nothing'
self._verify_contains('And Nothing')
self._verify_does_not_contain('Nothing')
def test_matches_to_keyword_with_prefix_word(self):
self._keyword = 'Then came John'
self._verify_contains('Then came John')
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "79939922bad08dab5daccbd0ae65aaaa",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 91,
"avg_line_length": 31.202380952380953,
"alnum_prop": 0.6344906524227394,
"repo_name": "fingeronthebutton/RIDE",
"id": "28bd3ee424f2c1fc78b127e43290c485847411a3",
"size": "2621",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "utest/controller/test_stepcontrollers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21370"
},
{
"name": "HTML",
"bytes": "110675"
},
{
"name": "JavaScript",
"bytes": "41401"
},
{
"name": "Python",
"bytes": "2902622"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db.models.signals import post_save
from toucan.issues.signals import issue_created_signal
from .handlers import issue_created
from .models import Profile
def create_user_profile(**kwargs):
user = kwargs['instance']
Profile.objects.get_or_create(user=user)
post_save.connect(create_user_profile, sender=settings.AUTH_USER_MODEL)
issue_created_signal.connect(issue_created)
|
{
"content_hash": "73beb5ea44e89ce44599335f1eb0d9ca",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 71,
"avg_line_length": 26.875,
"alnum_prop": 0.7906976744186046,
"repo_name": "mcallistersean/b2-issue-tracker",
"id": "f6f32d2bbed37768f0217c601fa4e469aafd2f57",
"size": "430",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "toucan/user_profile/signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39873"
},
{
"name": "HTML",
"bytes": "55461"
},
{
"name": "JavaScript",
"bytes": "90880"
},
{
"name": "Python",
"bytes": "133899"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url, include
from account.forms import LoginUsernameForm, SignupForm, ChangePasswordForm, SettingsForm, PasswordResetForm, PasswordResetTokenForm
from .base import ViewConfig
patch = "http://pinaxproject.com/pinax-design/patches/django-user-accounts.svg"
label = "dua"
title = "Django User Accounts"
views = [
ViewConfig(pattern=r"^account/signup/$", template="account/signup.html", name="account_signup", pattern_kwargs={}, form=SignupForm()),
ViewConfig(pattern=r"^account/login/$", template="account/login.html", name="account_login", pattern_kwargs={}, form=LoginUsernameForm(), ACCOUNT_OPEN_SIGNUP=True),
ViewConfig(pattern=r"^account/logout/$", template="account/logout.html", name="account_logout", pattern_kwargs={}),
ViewConfig(pattern=r"^account/confirm_email/(?P<key>\w+)/$", template="account/email_confirm.html", name="account_confirm_email", pattern_kwargs={"key": "abc"}, confirmation={"key": "foo", "email_address": {"email": "example@sample.com"}}),
ViewConfig(pattern=r"^account/password/$", template="account/password_change.html", name="account_password", pattern_kwargs={}, form=ChangePasswordForm(user=None)),
ViewConfig(pattern=r"^account/password/reset/$", template="account/password_reset.html", name="account_password_reset", pattern_kwargs={}, form=PasswordResetForm()),
ViewConfig(pattern=r"^account/password/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$", template="account/password_reset_token.html", name="account_password_reset_token", pattern_kwargs={"uidb36": "aaa", "token": "123"}, form=PasswordResetTokenForm()),
ViewConfig(pattern=r"^account/settings/$", template="account/settings.html", name="account_settings", pattern_kwargs={}, form=SettingsForm()),
ViewConfig(pattern=r"^account/delete/$", template="account/delete.html", name="account_delete", pattern_kwargs={}),
]
urlpatterns = [
view.url()
for view in views
]
url = url("", include("pinax_theme_tester.configs.dua"))
|
{
"content_hash": "fad1c4a9dec7ef0fed578afca435e8cf",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 256,
"avg_line_length": 79.68,
"alnum_prop": 0.7289156626506024,
"repo_name": "pinax/pinax_theme_tester",
"id": "968158da7515462de94d8a85a894a5f12d91ca40",
"size": "1992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinax_theme_tester/configs/dua.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33235"
},
{
"name": "HTML",
"bytes": "14763"
},
{
"name": "JavaScript",
"bytes": "5760"
},
{
"name": "Python",
"bytes": "44942"
}
],
"symlink_target": ""
}
|
from deap import base
from deap import tools
from copy import deepcopy
import random
from . .import parameterOperations
from .import Creator
getPromoterFromMap = lambda x: [x[z] for z in list(x.keys())]
def constructPhenotype(stratSettings, chrconf, Individue):
Settings = {}
GeneSize = 2
R = lambda V, lim: (lim[1] - lim[0]) * V / (33 * chrconf['GeneSize']) + lim[0]
PromotersPath = {v: k for k, v in Individue.PromoterMap.items()}
# print(PromotersPath)
#print(Individue[:])
Promoters = list(PromotersPath.keys())
for C in Individue:
for BP in range(len(C)):
if C[BP] in Promoters:
read_window = C[BP + 1: BP + 1 + GeneSize]
read_window = [V for V in read_window if type(V) == int and V < 33]
Value = sum(read_window)
ParameterName = PromotersPath[C[BP]]
Value = R(Value, stratSettings[ParameterName])
Settings[ParameterName] = Value
_Settings = parameterOperations.expandNestedParameters(Settings)
return _Settings
def getToolbox(Strategy, genconf, Attributes):
toolbox = base.Toolbox()
creator = Creator.init(base.Fitness, {'promoterMap': None, 'Strategy': Strategy})
# creator.create("FitnessMax", base.Fitness, weights=(1.0, 3))
toolbox.register("mate", pachytene)
toolbox.register("mutate", mutate)
PromoterMap = initPromoterMap(Attributes)
toolbox.register(
"newind", initInd, creator.Individual, PromoterMap, genconf.chromosome
)
toolbox.register("population", tools.initRepeat, list, toolbox.newind)
toolbox.register(
"constructPhenotype", constructPhenotype, Attributes, genconf.chromosome
)
return toolbox
def initPromoterMap(ParameterRanges):
PRK = list(ParameterRanges.keys())
Promoters = [x for x in PRK]
space = list(range(120, 240))
random.shuffle(space)
PromoterValues = [space.pop() for x in Promoters]
PromoterMap = dict(zip(Promoters, PromoterValues))
# print(ParameterRanges)
assert (len(PRK) == len(list(PromoterMap.keys())))
return PromoterMap
def initChromosomes(PromoterMap, chrconf):
Promoters = getPromoterFromMap(PromoterMap)
PromoterPerChr = round(len(Promoters) / chrconf['Density']) + 1
_promoters = deepcopy(Promoters)
Chromosomes = [[] for k in range(PromoterPerChr)]
while _promoters:
for c in range(len(Chromosomes)):
if random.random() < 0.3:
if _promoters:
promoter = _promoters.pop(random.randrange(0, len(_promoters)))
Chromosomes[c].append(promoter)
for G in range(chrconf['GeneSize']):
Chromosomes[c].append(random.randrange(0, 33))
return Chromosomes
def initInd(Individual, PromoterMap, chrconf):
i = Individual()
i[:] = initChromosomes(PromoterMap, chrconf)
i.PromoterMap = PromoterMap
return i
def generateUID():
Chars = string.ascii_uppercase + string.digits
UID = ''.join(random.choices(Chars), k=6)
return UID
def chromossomeCrossover(chr1, chr2):
if len(chr1) != len(chr2):
top_bottom = 1 if random.random() < 0.5 else -1
len_diff = abs(len(chr1) - len(chr2))
else:
top_bottom = 1
len_diff = 0
offset = random.randrange(0, len_diff + 1)
minor = chr1 if len(chr1) < len(chr2) else chr2
major = chr2 if len(chr1) < len(chr2) else chr1
cut_point = random.randrange(0, len(minor))
for k in range(cut_point, len(minor)):
Buffer = major[k + offset]
major[k + offset] = minor[k]
minor[k] = Buffer
def pachytene(ind1, ind2):
if len(ind1) != len(ind2):
return
ind1 = deepcopy(ind1)
ind2 = deepcopy(ind2)
ind1[:] = sorted(ind1, key=len)
ind2[:] = sorted(ind2, key=len)
childChr = []
for W in range(len(ind1)):
chromossomeCrossover(ind1[W], ind2[W])
childChr.append(random.choice([ind1[W], ind2[W]]))
return ind1, ind2
def mutate(ind, mutpb=0.001, mutagg=12):
for C in range(len(ind)):
for BP in range(len(ind[C])):
if BP < 100: # case BP is common base value;
if random.random() < mutpb:
ind[C][BP] += random.choice(range(-mutagg, mutagg))
else: # case BP is in fact a promoter;
pass
return ind,
def clone(Chr): #!!review this
cut_point = random.randrange(- len(Chr), len(Chr))
if not cut_point:
cut_point = 1
if cut_point > 0:
new_chr = chr[:cut_point]
if cut_point < 0:
new_chr = chr[cut_point:]
Chr += new + Chr
|
{
"content_hash": "fff30e7d624b3f8e2b3270d2cb318aad",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 85,
"avg_line_length": 32.4375,
"alnum_prop": 0.6184971098265896,
"repo_name": "Gab0/gekkoJaponicus",
"id": "0ab9b73f3e5d125048f808e1cc7706421eba951b",
"size": "4685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "promoterz/representation/chromosome.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1589"
},
{
"name": "Python",
"bytes": "103754"
}
],
"symlink_target": ""
}
|
import flask
import auth
import config
import model
import util
from main import app
microsoft_config = dict(
access_token_method='POST',
access_token_url='https://login.live.com/oauth20_token.srf',
authorize_url='https://login.live.com/oauth20_authorize.srf',
base_url='https://apis.live.net/v5.0/',
consumer_key=config.CONFIG_DB.microsoft_client_id,
consumer_secret=config.CONFIG_DB.microsoft_client_secret,
request_token_params={'scope': 'wl.emails'},
)
microsoft = auth.create_oauth_app(microsoft_config, 'microsoft')
@app.route('/api/auth/callback/microsoft/')
def microsoft_authorized():
response = microsoft.authorized_response()
if response is None:
flask.flash('You denied the request to sign in.')
return flask.redirect(util.get_next_url())
flask.session['oauth_token'] = (response['access_token'], '')
me = microsoft.get('me')
if me.data.get('error', {}):
return 'Unknown error: error:%s error_description:%s' % (
me['error']['code'],
me['error']['message'],
)
user_db = retrieve_user_from_microsoft(me.data)
return auth.signin_user_db(user_db)
@microsoft.tokengetter
def get_microsoft_oauth_token():
return flask.session.get('oauth_token')
@app.route('/signin/microsoft/')
def signin_microsoft():
return auth.signin_oauth(microsoft)
def retrieve_user_from_microsoft(response):
auth_id = 'microsoft_%s' % response['id']
user_db = model.User.get_by('auth_ids', auth_id)
if user_db:
return user_db
email = response['emails']['preferred'] or response['emails']['account']
return auth.create_user_db(
auth_id=auth_id,
name=response.get('name', ''),
username=email,
email=email,
verified=bool(email),
)
|
{
"content_hash": "1a2879713d49c5158a777efff0c8bac4",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 74,
"avg_line_length": 27.693548387096776,
"alnum_prop": 0.6924868957483984,
"repo_name": "lipis/electron-crash-reporter",
"id": "89e16d7a7f130e8a5a0c1720e6590aace47da9b1",
"size": "1734",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "main/auth/microsoft.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5399"
},
{
"name": "CoffeeScript",
"bytes": "16008"
},
{
"name": "HTML",
"bytes": "82075"
},
{
"name": "JavaScript",
"bytes": "65"
},
{
"name": "Python",
"bytes": "129250"
}
],
"symlink_target": ""
}
|
DEBUG = True
SECRET_KEY = 'secret'
SQLALCHEMY_DATABASE_URI = 'sqlite://'
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 8
WTF_CSRF_ENABLED = False
CSRF_SESSION_KEY = 'secret'
|
{
"content_hash": "c92f2ac5aa53a5a85206c0812c9a40b5",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 37,
"avg_line_length": 19.88888888888889,
"alnum_prop": 0.7094972067039106,
"repo_name": "jstutters/flask-application-template",
"id": "c0591070ba1dbeceaa4378d3aa209c950cc1e911",
"size": "179",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/default_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "669"
},
{
"name": "Python",
"bytes": "1771"
}
],
"symlink_target": ""
}
|
import warnings
from django.conf import settings
from django.core import urlresolvers
from django.core.exceptions import ImproperlyConfigured
from django.contrib.comments.models import Comment
from django.contrib.comments.forms import CommentForm
from django.utils.importlib import import_module
warnings.warn("django.contrib.comments is deprecated and will be removed before Django 1.8.", DeprecationWarning)
DEFAULT_COMMENTS_APP = 'django.contrib.comments'
def get_comment_app():
"""
Get the comment app (i.e. "django.contrib.comments") as defined in the settings
"""
# Make sure the app's in INSTALLED_APPS
comments_app = get_comment_app_name()
if comments_app not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("The COMMENTS_APP (%r) "\
"must be in INSTALLED_APPS" % settings.COMMENTS_APP)
# Try to import the package
try:
package = import_module(comments_app)
except ImportError as e:
raise ImproperlyConfigured("The COMMENTS_APP setting refers to "\
"a non-existing package. (%s)" % e)
return package
def get_comment_app_name():
"""
Returns the name of the comment app (either the setting value, if it
exists, or the default).
"""
return getattr(settings, 'COMMENTS_APP', DEFAULT_COMMENTS_APP)
def get_model():
"""
Returns the comment model class.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_model"):
return get_comment_app().get_model()
else:
return Comment
def get_form():
"""
Returns the comment ModelForm class.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form"):
return get_comment_app().get_form()
else:
return CommentForm
def get_form_target():
"""
Returns the target URL for the comment form submission view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_form_target"):
return get_comment_app().get_form_target()
else:
return urlresolvers.reverse("django.contrib.comments.views.comments.post_comment")
def get_flag_url(comment):
"""
Get the URL for the "flag this comment" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_flag_url"):
return get_comment_app().get_flag_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.flag",
args=(comment.id,))
def get_delete_url(comment):
"""
Get the URL for the "delete this comment" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_delete_url"):
return get_comment_app().get_delete_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.delete",
args=(comment.id,))
def get_approve_url(comment):
"""
Get the URL for the "approve this comment from moderation" view.
"""
if get_comment_app_name() != DEFAULT_COMMENTS_APP and hasattr(get_comment_app(), "get_approve_url"):
return get_comment_app().get_approve_url(comment)
else:
return urlresolvers.reverse("django.contrib.comments.views.moderation.approve",
args=(comment.id,))
|
{
"content_hash": "a413a409f5f2768510c5ea1ab65a2757",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 113,
"avg_line_length": 37,
"alnum_prop": 0.6503737780333525,
"repo_name": "makinacorpus/django",
"id": "0b3fcebc51f8b592260d4b60857214961945c79c",
"size": "3478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/comments/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "98175"
},
{
"name": "Python",
"bytes": "8391980"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
from amocrm import BaseContact, BaseLead, fields, LeadsManager
class Contact(BaseContact):
position = fields.CustomField(u'Должность')
site = fields.CustomField(u'Сайт')
email = fields.EnumCustomField(u'Email', enum='WORK')
phone = fields.EnumCustomField(u'Телефон', enum='WORK')
class Lead(BaseLead):
position = fields.CustomField(u'Должность')
site = fields.CustomField(u'Сайт')
#email = fields.CustomField(u'Email')
phone = fields.EnumCustomField(u'Телефон', enum='WORK')
status = fields._StatusTypeField()
|
{
"content_hash": "aba55e9b345f632332514f02d2e59cae",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 39.142857142857146,
"alnum_prop": 0.718978102189781,
"repo_name": "mcmaxwell/idea_digital_agency",
"id": "206757285e9ab68f74d62b19fa9595122a1235aa",
"size": "613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "idea/info/crm_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "625335"
},
{
"name": "Dockerfile",
"bytes": "1579"
},
{
"name": "HTML",
"bytes": "795052"
},
{
"name": "JavaScript",
"bytes": "1941111"
},
{
"name": "PHP",
"bytes": "14721"
},
{
"name": "Python",
"bytes": "422853"
},
{
"name": "Shell",
"bytes": "718"
}
],
"symlink_target": ""
}
|
"""Command for adding instances to target pools."""
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.compute.lib import base_classes
from googlecloudsdk.compute.lib import utils
class AddInstances(base_classes.NoOutputAsyncMutator):
"""Add instances to a target pool."""
@staticmethod
def Args(parser):
parser.add_argument(
'--instances',
type=arg_parsers.ArgList(min_length=1),
action=arg_parsers.FloatingListValuesCatcher(),
help='Specifies a list of instances to add to the target pool.',
metavar='INSTANCE',
required=True)
utils.AddZoneFlag(
parser,
resource_type='instances',
operation_type='add to the target pool')
parser.add_argument(
'name',
help='The name of the target pool to which to add the instances.')
@property
def service(self):
return self.compute.targetPools
@property
def method(self):
return 'AddInstance'
@property
def resource_type(self):
return 'targetPools'
def CreateRequests(self, args):
instance_refs = self.CreateZonalReferences(
args.instances, args.zone, resource_type='instances')
instances = [
self.messages.InstanceReference(instance=instance_ref.SelfLink())
for instance_ref in instance_refs]
unique_regions = set(utils.ZoneNameToRegionName(instance_ref.zone)
for instance_ref in instance_refs)
# Check that all regions are the same.
if len(unique_regions) > 1:
raise calliope_exceptions.ToolException(
'Instances must all be in the same region as the target pool.')
target_pool_ref = self.CreateRegionalReference(
args.name, unique_regions.pop(),
resource_type='targetPools')
request = self.messages.ComputeTargetPoolsAddInstanceRequest(
region=target_pool_ref.region,
project=self.project,
targetPool=target_pool_ref.Name(),
targetPoolsAddInstanceRequest=(
self.messages.TargetPoolsAddInstanceRequest(instances=instances)))
return [request]
AddInstances.detailed_help = {
'brief': 'Add instances to a target pool',
'DESCRIPTION': """\
*{command}* is used to add one or more instances to a target pool.
For more information on health checks and load balancing, see
link:https://developers.google.com/compute/docs/load-balancing/[].
""",
}
|
{
"content_hash": "d4bb7fc7da65a6c6d39e8ab533b5112d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 32.17948717948718,
"alnum_prop": 0.6844621513944223,
"repo_name": "wemanuel/smry",
"id": "7c0b33dfab9c7e52fede58d5972bc20b170bf009",
"size": "2560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/target_pools/add_instances.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3990"
},
{
"name": "Groff",
"bytes": "1221174"
},
{
"name": "HTML",
"bytes": "1873470"
},
{
"name": "JavaScript",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "6032"
},
{
"name": "PHP",
"bytes": "16660"
},
{
"name": "Python",
"bytes": "47139164"
},
{
"name": "Shell",
"bytes": "37102"
},
{
"name": "SourcePawn",
"bytes": "1160"
}
],
"symlink_target": ""
}
|
import os.path
from setuptools import setup, find_packages
def read_file_contents(pathname):
"""
Reads the contents of a given file relative to the directory
containing this file and returns it.
:param filename:
The file to open and read contents from.
"""
return open(os.path.join(os.path.dirname(__file__), pathname), 'rb').read()
setup(
name="Pycco",
version="0.2.0",
description="""A Python port of Docco: the original quick-and-dirty,
hundred-line-long, literate-programming-style documentation generator.
""",
long_description=read_file_contents('README'),
author="Nick Fitzgerald",
author_email="fitzgen@gmail.com",
license="MIT License",
url="http://fitzgen.github.com/pycco",
keywords=' '.join(['python',
'literate',
'programming',
'documentation',
'generator',
]),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Documentation',
'Topic :: Utilities',
],
packages=find_packages(),
entry_points={
'console_scripts': [
'pycco = pycco:main',
]
},
install_requires=[
'markdown',
'pygments',
'pystache',
'smartypants',
'cssmin',
],
)
|
{
"content_hash": "523c9647ce4f3b2e0cc41a0645e834db",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 31.24561403508772,
"alnum_prop": 0.5373385738349242,
"repo_name": "mhils/pycco",
"id": "5644552f6a0c4199fcf2a2411f81535cc1846ada",
"size": "1806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23434"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images.images import views
from openstack_dashboard.dashboards.admin.images import forms
from openstack_dashboard.dashboards.admin.images \
import tables as project_tables
class IndexView(tables.DataTableView):
table_class = project_tables.AdminImagesTable
template_name = 'admin/images/index.html'
def has_prev_data(self, table):
return self._prev
def has_more_data(self, table):
return self._more
def get_data(self):
images = []
filters = {'is_public': None}
prev_marker = self.request.GET.get(
project_tables.AdminImagesTable._meta.prev_pagination_param, None)
if prev_marker is not None:
sort_dir = 'asc'
marker = prev_marker
else:
sort_dir = 'desc'
marker = self.request.GET.get(
project_tables.AdminImagesTable._meta.pagination_param, None)
try:
images, self._more, self._prev = api.glance.image_list_detailed(
self.request,
marker=marker,
paginate=True,
filters=filters,
sort_dir=sort_dir)
if prev_marker is not None:
images = sorted(images, key=lambda image:
getattr(image, 'created_at'), reverse=True)
except Exception:
self._prev = False
self._more = False
msg = _('Unable to retrieve image list.')
exceptions.handle(self.request, msg)
return images
class CreateView(views.CreateView):
template_name = 'admin/images/create.html'
form_class = forms.AdminCreateImageForm
success_url = reverse_lazy('horizon:admin:images:index')
class UpdateView(views.UpdateView):
template_name = 'admin/images/update.html'
form_class = forms.AdminUpdateImageForm
success_url = reverse_lazy('horizon:admin:images:index')
class DetailView(views.DetailView):
"""Admin placeholder for image detail view."""
pass
|
{
"content_hash": "c45d2cd3bf806c5cead3765e864c213b",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 78,
"avg_line_length": 31.15068493150685,
"alnum_prop": 0.63896218117854,
"repo_name": "aaronorosen/horizon-congress",
"id": "6fdf04f56cb48c586f5c139c116180e4b1272e96",
"size": "3038",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/images/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "282571"
},
{
"name": "JavaScript",
"bytes": "697632"
},
{
"name": "Python",
"bytes": "3559404"
},
{
"name": "Shell",
"bytes": "15387"
}
],
"symlink_target": ""
}
|
"""
`ftfy.badness` contains a heuristic that detects likely mojibake.
This heuristic signals to ftfy which segments of text need to be fixed, and
also indicates when the text can stop being fixed.
The design of this heuristic is that we categorize the approximately 400
Unicode characters that occur in UTF-8 mojibake, specifically the characters
that come from mixing up UTF-8 with the other encodings we support. We
identify sequences and contexts of these characters that are much more likely
to be mojibake than intended strings, such as lowercase accented letters
followed immediately by currency symbols.
"""
import warnings
import re
# There are only 403 characters that occur in known UTF-8 mojibake, and we can
# characterize them:
MOJIBAKE_CATEGORIES = {
# Characters that appear in many different contexts. Sequences that contain
# them are not inherently mojibake
"common": (
"\N{NO-BREAK SPACE}"
"\N{SOFT HYPHEN}"
"\N{MIDDLE DOT}"
"\N{ACUTE ACCENT}"
"\N{EN DASH}"
"\N{EM DASH}"
"\N{HORIZONTAL BAR}"
"\N{HORIZONTAL ELLIPSIS}"
"\N{RIGHT SINGLE QUOTATION MARK}"
),
# the C1 control character range, which have no uses outside of mojibake anymore
"c1": "\x80-\x9f",
# Characters that are nearly 100% used in mojibake
"bad": (
"\N{BROKEN BAR}"
"\N{CURRENCY SIGN}"
"\N{DIAERESIS}"
"\N{NOT SIGN}"
"\N{MACRON}"
"\N{PILCROW SIGN}"
"\N{SECTION SIGN}"
"\N{CEDILLA}"
"\N{LATIN SMALL LETTER F WITH HOOK}"
"\N{MODIFIER LETTER CIRCUMFLEX ACCENT}" # it's not a modifier
"\N{CARON}"
"\N{BREVE}"
"\N{OGONEK}"
"\N{SMALL TILDE}"
"\N{DAGGER}"
"\N{DOUBLE DAGGER}"
"\N{PER MILLE SIGN}"
"\N{REVERSED NOT SIGN}"
"\N{LOZENGE}"
"\ufffd"
# Theoretically these would appear in 'numeric' contexts, but when they
# co-occur with other mojibake characters, it's not really ambiguous
"\N{FEMININE ORDINAL INDICATOR}"
"\N{MASCULINE ORDINAL INDICATOR}"
),
"currency": (
"\N{CENT SIGN}"
"\N{POUND SIGN}"
"\N{YEN SIGN}"
"\N{PESETA SIGN}"
"\N{EURO SIGN}"
),
"start_punctuation": (
"\N{INVERTED EXCLAMATION MARK}"
"\N{LEFT-POINTING DOUBLE ANGLE QUOTATION MARK}"
"\N{INVERTED QUESTION MARK}"
"\N{COPYRIGHT SIGN}"
"\N{GREEK TONOS}"
"\N{GREEK DIALYTIKA TONOS}"
"\N{LEFT SINGLE QUOTATION MARK}"
"\N{SINGLE LOW-9 QUOTATION MARK}"
"\N{LEFT DOUBLE QUOTATION MARK}"
"\N{DOUBLE LOW-9 QUOTATION MARK}"
"\N{BULLET}"
"\N{SINGLE LEFT-POINTING ANGLE QUOTATION MARK}"
"\uf8ff" # OS-specific symbol, usually the Apple logo
),
"end_punctuation": (
"\N{REGISTERED SIGN}"
"\N{RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK}"
"\N{DOUBLE ACUTE ACCENT}"
"\N{RIGHT DOUBLE QUOTATION MARK}"
"\N{SINGLE RIGHT-POINTING ANGLE QUOTATION MARK}"
"\N{TRADE MARK SIGN}"
),
"numeric": (
"\N{SUPERSCRIPT TWO}"
"\N{SUPERSCRIPT THREE}"
"\N{SUPERSCRIPT ONE}"
"\N{PLUS-MINUS SIGN}"
"\N{VULGAR FRACTION ONE QUARTER}"
"\N{VULGAR FRACTION ONE HALF}"
"\N{VULGAR FRACTION THREE QUARTERS}"
"\N{MULTIPLICATION SIGN}"
"\N{MICRO SIGN}"
"\N{DIVISION SIGN}"
"\N{FRACTION SLASH}"
"\N{PARTIAL DIFFERENTIAL}"
"\N{INCREMENT}"
"\N{N-ARY PRODUCT}"
"\N{N-ARY SUMMATION}"
"\N{SQUARE ROOT}"
"\N{INFINITY}"
"\N{INTERSECTION}"
"\N{INTEGRAL}"
"\N{ALMOST EQUAL TO}"
"\N{NOT EQUAL TO}"
"\N{IDENTICAL TO}"
"\N{LESS-THAN OR EQUAL TO}"
"\N{GREATER-THAN OR EQUAL TO}"
"\N{NUMERO SIGN}"
),
# Letters that might be used to make emoticon faces (kaomoji), and
# therefore might need to appear in more improbable-looking contexts.
#
# These are concatenated character ranges for use in a regex. I know
# they look like faces themselves. I think expressing the ranges like
# this helps to illustrate why we need to be careful with these
# characters.
"kaomoji": (
"Ò-Ö"
"Ù-Ü"
"ò-ö"
"ø-ü"
"\N{LATIN CAPITAL LETTER O WITH DOUBLE ACUTE}"
"\N{DEGREE SIGN}"
),
"upper_accented": (
# LATIN CAPITAL LETTER A WITH GRAVE - LATIN CAPITAL LETTER N WITH TILDE
"\xc0-\xd1"
# skip capital O's and U's that could be used in kaomoji, but
# include Ø because it's very common in Arabic mojibake:
"\N{LATIN CAPITAL LETTER O WITH STROKE}"
"\N{LATIN CAPITAL LETTER U WITH DIAERESIS}"
"\N{LATIN CAPITAL LETTER Y WITH ACUTE}"
"\N{LATIN CAPITAL LETTER A WITH BREVE}"
"\N{LATIN CAPITAL LETTER A WITH OGONEK}"
"\N{LATIN CAPITAL LETTER C WITH ACUTE}"
"\N{LATIN CAPITAL LETTER C WITH CARON}"
"\N{LATIN CAPITAL LETTER D WITH CARON}"
"\N{LATIN CAPITAL LETTER D WITH STROKE}"
"\N{LATIN CAPITAL LETTER E WITH OGONEK}"
"\N{LATIN CAPITAL LETTER E WITH CARON}"
"\N{LATIN CAPITAL LETTER G WITH BREVE}"
"\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}"
"\N{LATIN CAPITAL LETTER L WITH ACUTE}"
"\N{LATIN CAPITAL LETTER L WITH CARON}"
"\N{LATIN CAPITAL LETTER L WITH STROKE}"
"\N{LATIN CAPITAL LETTER N WITH ACUTE}"
"\N{LATIN CAPITAL LETTER N WITH CARON}"
"\N{LATIN CAPITAL LIGATURE OE}"
"\N{LATIN CAPITAL LETTER R WITH CARON}"
"\N{LATIN CAPITAL LETTER S WITH ACUTE}"
"\N{LATIN CAPITAL LETTER S WITH CEDILLA}"
"\N{LATIN CAPITAL LETTER S WITH CARON}"
"\N{LATIN CAPITAL LETTER T WITH CEDILLA}"
"\N{LATIN CAPITAL LETTER T WITH CARON}"
"\N{LATIN CAPITAL LETTER U WITH RING ABOVE}"
"\N{LATIN CAPITAL LETTER U WITH DOUBLE ACUTE}"
"\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}"
"\N{LATIN CAPITAL LETTER Z WITH ACUTE}"
"\N{LATIN CAPITAL LETTER Z WITH DOT ABOVE}"
"\N{LATIN CAPITAL LETTER Z WITH CARON}"
"\N{CYRILLIC CAPITAL LETTER GHE WITH UPTURN}"
),
"lower_accented": (
"\N{LATIN SMALL LETTER SHARP S}"
# LATIN SMALL LETTER A WITH GRAVE - LATIN SMALL LETTER N WITH TILDE
"\xe0-\xf1"
# skip o's and u's that could be used in kaomoji
"\N{LATIN SMALL LETTER A WITH BREVE}"
"\N{LATIN SMALL LETTER A WITH OGONEK}"
"\N{LATIN SMALL LETTER C WITH ACUTE}"
"\N{LATIN SMALL LETTER C WITH CARON}"
"\N{LATIN SMALL LETTER D WITH CARON}"
"\N{LATIN SMALL LETTER D WITH STROKE}"
"\N{LATIN SMALL LETTER E WITH OGONEK}"
"\N{LATIN SMALL LETTER E WITH CARON}"
"\N{LATIN SMALL LETTER G WITH BREVE}"
"\N{LATIN SMALL LETTER L WITH ACUTE}"
"\N{LATIN SMALL LETTER L WITH CARON}"
"\N{LATIN SMALL LETTER L WITH STROKE}"
"\N{LATIN SMALL LIGATURE OE}"
"\N{LATIN SMALL LETTER R WITH ACUTE}"
"\N{LATIN SMALL LETTER S WITH ACUTE}"
"\N{LATIN SMALL LETTER S WITH CEDILLA}"
"\N{LATIN SMALL LETTER S WITH CARON}"
"\N{LATIN SMALL LETTER T WITH CARON}"
"\N{LATIN SMALL LETTER U WITH DIAERESIS}"
"\N{LATIN SMALL LETTER Z WITH ACUTE}"
"\N{LATIN SMALL LETTER Z WITH DOT ABOVE}"
"\N{LATIN SMALL LETTER Z WITH CARON}"
"\N{CYRILLIC SMALL LETTER GHE WITH UPTURN}"
"\N{LATIN SMALL LIGATURE FI}"
"\N{LATIN SMALL LIGATURE FL}"
),
"upper_common": (
"\N{LATIN CAPITAL LETTER THORN}"
"\N{GREEK CAPITAL LETTER ALPHA}-\N{GREEK CAPITAL LETTER OMEGA}"
# not included under 'accented' because these can commonly
# occur at ends of words, in positions where they'd be detected
# as mojibake
"\N{GREEK CAPITAL LETTER ALPHA WITH TONOS}"
"\N{GREEK CAPITAL LETTER EPSILON WITH TONOS}"
"\N{GREEK CAPITAL LETTER ETA WITH TONOS}"
"\N{GREEK CAPITAL LETTER IOTA WITH TONOS}"
"\N{GREEK CAPITAL LETTER OMICRON WITH TONOS}"
"\N{GREEK CAPITAL LETTER UPSILON WITH TONOS}"
"\N{GREEK CAPITAL LETTER OMEGA WITH TONOS}"
"\N{GREEK CAPITAL LETTER IOTA WITH DIALYTIKA}"
"\N{GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA}"
"\N{CYRILLIC CAPITAL LETTER IO}-\N{CYRILLIC CAPITAL LETTER YA}"
),
"lower_common": (
# lowercase thorn does not appear in mojibake
"\N{GREEK SMALL LETTER ALPHA}-\N{GREEK SMALL LETTER OMEGA}"
"\N{GREEK SMALL LETTER ALPHA WITH TONOS}"
"\N{GREEK SMALL LETTER EPSILON WITH TONOS}"
"\N{GREEK SMALL LETTER ETA WITH TONOS}"
"\N{GREEK SMALL LETTER IOTA WITH TONOS}"
"\N{GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS}"
"\N{CYRILLIC SMALL LETTER A}-\N{CYRILLIC SMALL LETTER DZHE}"
),
"box": (
# omit the single horizontal line, might be used in kaomoji
"│┌┐┘├┤┬┼"
"\N{BOX DRAWINGS DOUBLE HORIZONTAL}-\N{BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL}"
"▀▄█▌▐░▒▓"
),
}
# We can now build a regular expression that detects unlikely juxtapositions
# of characters, mostly based on their categories.
#
# Another regular expression, which detects sequences that look more specifically
# like UTF-8 mojibake, appears in chardata.py.
#
# This is a verbose regular expression, with whitespace added for somewhat more
# readability. Remember that the only spaces that count as literal spaces in this
# expression are ones inside character classes (square brackets).
BADNESS_RE = re.compile(
r"""
[{c1}]
|
[{bad}{lower_accented}{upper_accented}{box}{start_punctuation}{end_punctuation}{currency}{numeric}] [{bad}]
|
[a-zA-Z] [{lower_common}{upper_common}] [{bad}]
|
[{bad}] [{lower_accented}{upper_accented}{box}{start_punctuation}{end_punctuation}{currency}{numeric}]
|
[{lower_accented}{lower_common}{box}{end_punctuation}{currency}{numeric}] [{upper_accented}]
|
[{box}{end_punctuation}{currency}{numeric}] [{lower_accented}]
|
# leave out [upper_accented][currency] without further info, because it's used in some
# fancy leetspeak-esque writing
[{lower_accented}{box}{end_punctuation}] [{currency}]
|
\s [{upper_accented}] [{currency}]
|
[{upper_accented}{box}] [{numeric}]
|
[{lower_accented}{upper_accented}{box}{currency}{end_punctuation}] [{start_punctuation}] [{numeric}]
|
[{lower_accented}{upper_accented}{currency}{numeric}{box}] [{end_punctuation}] [{start_punctuation}]
|
[{currency}{numeric}{box}] [{start_punctuation}]
|
[a-z] [{upper_accented}] [{start_punctuation}{currency}]
|
[{box}] [{kaomoji}]
|
[{lower_accented}{upper_accented}{currency}{numeric}{start_punctuation}{end_punctuation}] [{box}]
|
[{box}] [{end_punctuation}]
|
[{lower_accented}{upper_accented}] [{end_punctuation}] \w
|
# The ligature œ when not followed by an unaccented Latin letter
[Œœ][^A-Za-z]
|
# Common Windows-1252 2-character mojibake that isn't covered by the cases above
[ÂÃÎÐ][€Šš¢£Ÿž\xa0\xad®©°·»{start_punctuation}{end_punctuation}–—´]
|
× [²³]
|
# Windows-1252 mojibake of Arabic words needs to include the 'common' characters.
# To compensate, we require four characters to be matched.
[ØÙ] [{common}{currency}{bad}{numeric}{start_punctuation}ŸŠ®°µ»]
[ØÙ] [{common}{currency}{bad}{numeric}{start_punctuation}ŸŠ®°µ»]
|
# Windows-1252 mojibake that starts 3-character sequences for some South Asian
# alphabets
à[²µ¹¼½¾]
|
# MacRoman mojibake that isn't covered by the cases above
√[±∂†≠®™´≤≥¥µø]
|
≈[°¢]
|
‚Ä[ìîïòôúùû†°¢π]
|
‚[âó][àä°ê]
|
# Windows-1251 mojibake of characters in the U+2000 range
вЂ
|
# Windows-1251 mojibake of Latin-1 characters and/or the Cyrillic alphabet.
# Because the 2-character sequences involved here may be common, we require
# seeing a 3-character sequence.
[ВГРС][{c1}{bad}{start_punctuation}{end_punctuation}{currency}°µ][ВГРС]
|
# A distinctive five-character sequence of Cyrillic letters, which can be
# Windows-1251 mojibake on top of Latin-1 mojibake of Windows-1252 characters.
# Require a Latin letter nearby.
ГўВЂВ.[A-Za-z ]
|
# Windows-1252 encodings of 'à' and 'á', as well as \xa0 itself
Ã[\xa0¡]
|
[a-z]\s?[ÃÂ][ ]
|
^[ÃÂ][ ]
|
# Cases where  precedes a character as an encoding of exactly the same
# character, and the character is common enough
[a-z.,?!{end_punctuation}] Â [ {start_punctuation}{end_punctuation}]
|
# Windows-1253 mojibake of characters in the U+2000 range
β€[™\xa0Ά\xad®°]
|
# Windows-1253 mojibake of Latin-1 characters and/or the Greek alphabet
[ΒΓΞΟ][{c1}{bad}{start_punctuation}{end_punctuation}{currency}°][ΒΓΞΟ]
""".format(
**MOJIBAKE_CATEGORIES
),
re.VERBOSE,
)
def sequence_weirdness(text: str) -> int:
"""
This was the name of the heuristic used in ftfy 2.x through 5.x. As an
attempt at compatibility with external code that calls the heuristic
directly, we redirect to our new heuristic, :func:`badness`.
"""
warnings.warn(
"`sequence_weirdness()` is an old heuristic, and the current "
"closest equivalent is `ftfy.badness.badness()`"
)
return badness(text)
def badness(text: str) -> int:
"""
Get the 'badness' of a sequence of text, counting the number of unlikely
character sequences. A badness greater than 0 indicates that some of it
seems to be mojibake.
"""
return len(BADNESS_RE.findall(text))
def is_bad(text: str) -> bool:
"""
Returns true iff the given text looks like it contains mojibake.
This can be faster than `badness`, because it returns when the first match
is found to a regex instead of counting matches. Note that as strings get
longer, they have a higher chance of returning True for `is_bad(string)`.
"""
return bool(BADNESS_RE.search(text))
|
{
"content_hash": "f02bc37c7cadf6ef50847482017a4247",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 111,
"avg_line_length": 36.505102040816325,
"alnum_prop": 0.6175401816911251,
"repo_name": "rspeer/python-ftfy",
"id": "7b6cce546f1ffd0065321a5efed0226cfc01c41c",
"size": "14494",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ftfy/badness.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "69499"
},
{
"name": "Python",
"bytes": "116586"
}
],
"symlink_target": ""
}
|
from h2o import H2OFrame
from tests import pyunit_utils
def pubdev_6393():
locations = [['location'],
['�X県 A市 '], # First observation contains replacement character for unknown char
['X県 B市']]
frame = H2OFrame(locations, header=True, column_types=['enum'])
assert frame.ncols == 1
assert frame.nrows == len(locations) - 1
frame_categories= frame['location'].categories()
print(frame_categories)
frame_converted = frame['location'].ascharacter().asfactor()
assert frame_converted.ncols == 1
assert frame_converted.nrows == len(locations) - 1
frame_converted_categories = frame_converted.categories();
print(frame_converted_categories)
# Check for the representation of categoricals to be exactly the same
# No explicit check for any specific behavior, the behavior of Categorical and asFactor should be the same
for i in range(0,len(frame_converted_categories)):
assert frame_categories[i] == frame_converted_categories[i]
if __name__ == "__main__":
pyunit_utils.standalone_test(pubdev_6393)
else:
pubdev_6393()
|
{
"content_hash": "9578738b64588f622e6f1d0aab392245",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 110,
"avg_line_length": 35.4375,
"alnum_prop": 0.6790123456790124,
"repo_name": "h2oai/h2o-3",
"id": "7c6065eb5e12c8d4dae7cad85bc6a3ccab709bf5",
"size": "1199",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_jira/pyunit_pubdev_6393.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12803"
},
{
"name": "CSS",
"bytes": "882321"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "DIGITAL Command Language",
"bytes": "106"
},
{
"name": "Dockerfile",
"bytes": "10459"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "205646"
},
{
"name": "HCL",
"bytes": "36232"
},
{
"name": "HTML",
"bytes": "8018117"
},
{
"name": "HiveQL",
"bytes": "3985"
},
{
"name": "Java",
"bytes": "15981357"
},
{
"name": "JavaScript",
"bytes": "148426"
},
{
"name": "Jupyter Notebook",
"bytes": "20638329"
},
{
"name": "Makefile",
"bytes": "46043"
},
{
"name": "PHP",
"bytes": "800"
},
{
"name": "Python",
"bytes": "8188608"
},
{
"name": "R",
"bytes": "4149977"
},
{
"name": "Ruby",
"bytes": "64"
},
{
"name": "Sass",
"bytes": "23790"
},
{
"name": "Scala",
"bytes": "4845"
},
{
"name": "Shell",
"bytes": "214495"
},
{
"name": "Smarty",
"bytes": "1792"
},
{
"name": "TeX",
"bytes": "554940"
}
],
"symlink_target": ""
}
|
from django import template
from oscar.core.loading import get_class
get_nodes = get_class('dashboard.menu', 'get_nodes')
register = template.Library()
@register.simple_tag
def dashboard_navigation(user):
return get_nodes(user)
|
{
"content_hash": "7741ddf883ce2c36cb6c40da8234eb13",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 52,
"avg_line_length": 21.454545454545453,
"alnum_prop": 0.7584745762711864,
"repo_name": "solarissmoke/django-oscar",
"id": "e7ef3386d7669338e19d1fef1b0580373b3127ee",
"size": "236",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/oscar/templatetags/dashboard_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "529"
},
{
"name": "HTML",
"bytes": "562906"
},
{
"name": "JavaScript",
"bytes": "40879"
},
{
"name": "Makefile",
"bytes": "4234"
},
{
"name": "Python",
"bytes": "2199293"
},
{
"name": "SCSS",
"bytes": "21362"
},
{
"name": "Shell",
"bytes": "308"
}
],
"symlink_target": ""
}
|
import sys
import io
from math import ceil
import xdg.BaseDirectory
from .gui_server import start_qml_gui
from mycroft.tts import TTS
import os
import os.path
import time
import curses
import textwrap
import json
import mycroft.version
from threading import Thread, Lock
from mycroft.messagebus.client import MessageBusClient
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
from mycroft.configuration import Configuration
import locale
# Curses uses LC_ALL to determine how to display chars set it to system
# default
locale.setlocale(locale.LC_ALL, "") # Set LC_ALL to user default
preferred_encoding = locale.getpreferredencoding()
bSimple = False
bus = None # Mycroft messagebus connection
config = {} # Will be populated by the Mycroft configuration
event_thread = None
history = []
chat = [] # chat history, oldest at the lowest index
line = ""
scr = None
log_line_offset = 0 # num lines back in logs to show
log_line_lr_scroll = 0 # amount to scroll left/right for long lines
longest_visible_line = 0 # for HOME key
auto_scroll = True
# for debugging odd terminals
last_key = ""
show_last_key = False
show_gui = None # None = not initialized, else True/False
gui_text = []
log_lock = Lock()
max_log_lines = 5000
mergedLog = []
filteredLog = []
default_log_filters = ["mouth.viseme", "mouth.display", "mouth.icon"]
log_filters = list(default_log_filters)
log_files = []
find_str = None
cy_chat_area = 7 # default chat history height (in lines)
size_log_area = 0 # max number of visible log lines, calculated during draw
# Values used to display the audio meter
show_meter = True
meter_peak = 20
meter_cur = -1
meter_thresh = -1
SCR_MAIN = 0
SCR_HELP = 1
SCR_SKILLS = 2
screen_mode = SCR_MAIN
subscreen = 0 # for help pages, etc.
REDRAW_FREQUENCY = 10 # seconds between full redraws
last_redraw = time.time() - (REDRAW_FREQUENCY - 1) # seed for 1s redraw
screen_lock = Lock()
is_screen_dirty = True
# Curses color codes (reassigned at runtime)
CLR_HEADING = 0
CLR_FIND = 0
CLR_CHAT_RESP = 0
CLR_CHAT_QUERY = 0
CLR_CMDLINE = 0
CLR_INPUT = 0
CLR_LOG1 = 0
CLR_LOG2 = 0
CLR_LOG_DEBUG = 0
CLR_LOG_ERROR = 0
CLR_LOG_CMDMESSAGE = 0
CLR_METER_CUR = 0
CLR_METER = 0
# Allow Ctrl+C catching...
ctrl_c_was_pressed = False
def ctrl_c_handler(signum, frame):
global ctrl_c_was_pressed
ctrl_c_was_pressed = True
def ctrl_c_pressed():
global ctrl_c_was_pressed
if ctrl_c_was_pressed:
ctrl_c_was_pressed = False
return True
else:
return False
##############################################################################
# Helper functions
def clamp(n, smallest, largest):
""" Force n to be between smallest and largest, inclusive """
return max(smallest, min(n, largest))
def handleNonAscii(text):
"""
If default locale supports UTF-8 reencode the string otherwise
remove the offending characters.
"""
if preferred_encoding == 'ASCII':
return ''.join([i if ord(i) < 128 else ' ' for i in text])
else:
return text.encode(preferred_encoding)
##############################################################################
# Settings
filename = "mycroft_cli.conf"
def load_mycroft_config(bus):
""" Load the mycroft config and connect it to updates over the messagebus.
"""
Configuration.set_config_update_handlers(bus)
return Configuration.get()
def connect_to_mycroft():
""" Connect to the mycroft messagebus and load and register config
on the bus.
Sets the bus and config global variables
"""
global bus
global config
bus = connect_to_messagebus()
config = load_mycroft_config(bus)
def load_settings():
global log_filters
global cy_chat_area
global show_last_key
global max_log_lines
global show_meter
config_file = None
# Old location
path = os.path.join(os.path.expanduser("~"), ".mycroft_cli.conf")
if os.path.isfile(path):
LOG.warning(" ===============================================")
LOG.warning(" == DEPRECATION WARNING ==")
LOG.warning(" ===============================================")
LOG.warning(" You still have a config file at " +
path)
LOG.warning(" Note that this location is deprecated and will" +
" not be used in the future")
LOG.warning(" Please move it to " +
os.path.join(xdg.BaseDirectory.xdg_config_home, 'mycroft',
filename))
config_file = path
# Check XDG_CONFIG_DIR
if config_file is None:
for conf_dir in xdg.BaseDirectory.load_config_paths('mycroft'):
xdg_file = os.path.join(conf_dir, filename)
if os.path.isfile(xdg_file):
config_file = xdg_file
break
# Check /etc/mycroft
if config_file is None:
config_file = os.path.join("/etc/mycroft", filename)
try:
with io.open(config_file, 'r') as f:
config = json.load(f)
if "filters" in config:
# Disregard the filtering of DEBUG messages
log_filters = [f for f in config["filters"] if f != "DEBUG"]
if "cy_chat_area" in config:
cy_chat_area = config["cy_chat_area"]
if "show_last_key" in config:
show_last_key = config["show_last_key"]
if "max_log_lines" in config:
max_log_lines = config["max_log_lines"]
if "show_meter" in config:
show_meter = config["show_meter"]
except Exception as e:
LOG.info("Ignoring failed load of settings file")
def save_settings():
config = {}
config["filters"] = log_filters
config["cy_chat_area"] = cy_chat_area
config["show_last_key"] = show_last_key
config["max_log_lines"] = max_log_lines
config["show_meter"] = show_meter
config_file = os.path.join(
xdg.BaseDirectory.save_config_path("mycroft"), filename)
with io.open(config_file, 'w') as f:
f.write(str(json.dumps(config, ensure_ascii=False)))
##############################################################################
# Log file monitoring
class LogMonitorThread(Thread):
def __init__(self, filename, logid):
global log_files
Thread.__init__(self)
self.filename = filename
self.st_results = os.stat(filename)
self.logid = str(logid)
log_files.append(filename)
def run(self):
while True:
try:
st_results = os.stat(self.filename)
# Check if file has been modified since last read
if not st_results.st_mtime == self.st_results.st_mtime:
self.read_file_from(self.st_results.st_size)
self.st_results = st_results
set_screen_dirty()
except OSError:
# ignore any file IO exceptions, just try again
pass
time.sleep(0.1)
def read_file_from(self, bytefrom):
global meter_cur
global meter_thresh
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with io.open(self.filename) as fh:
fh.seek(bytefrom)
while True:
line = fh.readline()
if line == "":
break
# Allow user to filter log output
ignore = False
if find_str:
if find_str not in line:
ignore = True
else:
for filtered_text in log_filters:
if filtered_text in line:
ignore = True
break
with log_lock:
if ignore:
mergedLog.append(self.logid + line.rstrip())
else:
if bSimple:
print(line.rstrip())
else:
filteredLog.append(self.logid + line.rstrip())
mergedLog.append(self.logid + line.rstrip())
if not auto_scroll:
log_line_offset += 1
# Limit log to max_log_lines
if len(mergedLog) >= max_log_lines:
with log_lock:
cToDel = len(mergedLog) - max_log_lines
if len(filteredLog) == len(mergedLog):
del filteredLog[:cToDel]
del mergedLog[:cToDel]
# release log_lock before calling to prevent deadlock
if len(filteredLog) != len(mergedLog):
rebuild_filtered_log()
def start_log_monitor(filename):
if os.path.isfile(filename):
thread = LogMonitorThread(filename, len(log_files))
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
class MicMonitorThread(Thread):
def __init__(self, filename):
Thread.__init__(self)
self.filename = filename
self.st_results = None
def run(self):
while True:
try:
st_results = os.stat(self.filename)
if (not self.st_results or
not st_results.st_ctime == self.st_results.st_ctime or
not st_results.st_mtime == self.st_results.st_mtime):
self.read_mic_level()
self.st_results = st_results
set_screen_dirty()
except Exception:
# Ignore whatever failure happened and just try again later
pass
time.sleep(0.2)
def read_mic_level(self):
global meter_cur
global meter_thresh
with io.open(self.filename, 'r') as fh:
line = fh.readline()
# Just adjust meter settings
# Ex:Energy: cur=4 thresh=1.5 muted=0
cur_text, thresh_text, _ = line.split(' ')[-3:]
meter_thresh = float(thresh_text.split('=')[-1])
meter_cur = float(cur_text.split('=')[-1])
class ScreenDrawThread(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
global scr
global screen_lock
global is_screen_dirty
global log_lock
while scr:
try:
if is_screen_dirty:
# Use a lock to prevent screen corruption when drawing
# from multiple threads
with screen_lock:
is_screen_dirty = False
if screen_mode == SCR_MAIN:
with log_lock:
do_draw_main(scr)
elif screen_mode == SCR_HELP:
do_draw_help(scr)
finally:
time.sleep(0.01)
def start_mic_monitor(filename):
if os.path.isfile(filename):
thread = MicMonitorThread(filename)
thread.setDaemon(True) # this thread won't prevent prog from exiting
thread.start()
def add_log_message(message):
""" Show a message for the user (mixed in the logs) """
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
message = "@" + message # the first byte is a code
filteredLog.append(message)
mergedLog.append(message)
if log_line_offset != 0:
log_line_offset = 0 # scroll so the user can see the message
set_screen_dirty()
def clear_log():
global filteredLog
global mergedLog
global log_line_offset
global log_lock
with log_lock:
mergedLog = []
filteredLog = []
log_line_offset = 0
def rebuild_filtered_log():
global filteredLog
global mergedLog
global log_lock
with log_lock:
filteredLog = []
for line in mergedLog:
# Apply filters
ignore = False
if find_str and find_str != "":
# Searching log
if find_str not in line:
ignore = True
else:
# Apply filters
for filtered_text in log_filters:
if filtered_text and filtered_text in line:
ignore = True
break
if not ignore:
filteredLog.append(line)
##############################################################################
# Capturing output from Mycroft
def handle_speak(event):
global chat
utterance = event.data.get('utterance')
utterance = TTS.remove_ssml(utterance)
if bSimple:
print(">> " + utterance)
else:
chat.append(">> " + utterance)
set_screen_dirty()
def handle_utterance(event):
global chat
global history
utterance = event.data.get('utterances')[0]
history.append(utterance)
chat.append(utterance)
set_screen_dirty()
def connect(bus):
""" Run the mycroft messagebus referenced by bus.
Args:
bus: Mycroft messagebus instance
"""
bus.run_forever()
##############################################################################
# Capturing the messagebus
def handle_message(msg):
# TODO: Think this thru a little bit -- remove this logging within core?
# add_log_message(msg)
pass
##############################################################################
# "Graphic primitives"
def draw(x, y, msg, pad=None, pad_chr=None, clr=None):
"""Draw a text to the screen
Args:
x (int): X coordinate (col), 0-based from upper-left
y (int): Y coordinate (row), 0-based from upper-left
msg (str): string to render to screen
pad (bool or int, optional): if int, pads/clips to given length, if
True use right edge of the screen.
pad_chr (char, optional): pad character, default is space
clr (int, optional): curses color, Defaults to CLR_LOG1.
"""
if y < 0 or y > curses.LINES or x < 0 or x > curses.COLS:
return
if x + len(msg) > curses.COLS:
s = msg[:curses.COLS - x]
else:
s = msg
if pad:
ch = pad_chr or " "
if pad is True:
pad = curses.COLS # pad to edge of screen
s += ch * (pad - x - len(msg))
else:
# pad to given length (or screen width)
if x + pad > curses.COLS:
pad = curses.COLS - x
s += ch * (pad - len(msg))
if not clr:
clr = CLR_LOG1
scr.addstr(y, x, s, clr)
##############################################################################
# Screen handling
def init_screen():
global CLR_HEADING
global CLR_FIND
global CLR_CHAT_RESP
global CLR_CHAT_QUERY
global CLR_CMDLINE
global CLR_INPUT
global CLR_LOG1
global CLR_LOG2
global CLR_LOG_DEBUG
global CLR_LOG_ERROR
global CLR_LOG_CMDMESSAGE
global CLR_METER_CUR
global CLR_METER
if curses.has_colors():
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
bg = curses.COLOR_BLACK
for i in range(1, curses.COLORS):
curses.init_pair(i + 1, i, bg)
# Colors (on black backgound):
# 1 = white 5 = dk blue
# 2 = dk red 6 = dk purple
# 3 = dk green 7 = dk cyan
# 4 = dk yellow 8 = lt gray
CLR_HEADING = curses.color_pair(1)
CLR_CHAT_RESP = curses.color_pair(4)
CLR_CHAT_QUERY = curses.color_pair(7)
CLR_FIND = curses.color_pair(4)
CLR_CMDLINE = curses.color_pair(7)
CLR_INPUT = curses.color_pair(7)
CLR_LOG1 = curses.color_pair(3)
CLR_LOG2 = curses.color_pair(6)
CLR_LOG_DEBUG = curses.color_pair(4)
CLR_LOG_ERROR = curses.color_pair(2)
CLR_LOG_CMDMESSAGE = curses.color_pair(2)
CLR_METER_CUR = curses.color_pair(2)
CLR_METER = curses.color_pair(4)
def scroll_log(up, num_lines=None):
global log_line_offset
# default to a half-page
if not num_lines:
num_lines = size_log_area // 2
with log_lock:
if up:
log_line_offset -= num_lines
else:
log_line_offset += num_lines
if log_line_offset > len(filteredLog):
log_line_offset = len(filteredLog) - 10
if log_line_offset < 0:
log_line_offset = 0
set_screen_dirty()
def _do_meter(height):
if not show_meter or meter_cur == -1:
return
# The meter will look something like this:
#
# 8.4 *
# *
# -*- 2.4
# *
# *
# *
# Where the left side is the current level and the right side is
# the threshold level for 'silence'.
global scr
global meter_peak
if meter_cur > meter_peak:
meter_peak = meter_cur + 1
scale = meter_peak
if meter_peak > meter_thresh * 3:
scale = meter_thresh * 3
h_cur = clamp(int((float(meter_cur) / scale) * height), 0, height - 1)
h_thresh = clamp(
int((float(meter_thresh) / scale) * height), 0, height - 1)
clr = curses.color_pair(4) # dark yellow
str_level = "{0:3} ".format(int(meter_cur)) # e.g. ' 4'
str_thresh = "{0:4.2f}".format(meter_thresh) # e.g. '3.24'
meter_width = len(str_level) + len(str_thresh) + 4
for i in range(0, height):
meter = ""
if i == h_cur:
# current energy level
meter = str_level
else:
meter = " " * len(str_level)
if i == h_thresh:
# add threshold indicator
meter += "--- "
else:
meter += " "
if i == h_thresh:
# 'silence' threshold energy level
meter += str_thresh
# draw the line
meter += " " * (meter_width - len(meter))
scr.addstr(curses.LINES - 1 - i, curses.COLS -
len(meter) - 1, meter, clr)
# draw an asterisk if the audio energy is at this level
if i <= h_cur:
if meter_cur > meter_thresh:
clr_bar = curses.color_pair(3) # dark green for loud
else:
clr_bar = curses.color_pair(5) # dark blue for 'silent'
scr.addstr(curses.LINES - 1 - i, curses.COLS - len(str_thresh) - 4,
"*", clr_bar)
def _do_gui(gui_width):
clr = curses.color_pair(2) # dark red
x = curses.COLS - gui_width
y = 3
draw(
x,
y,
" " +
make_titlebar(
"= GUI",
gui_width -
1) +
" ",
clr=CLR_HEADING)
cnt = len(gui_text) + 1
if cnt > curses.LINES - 15:
cnt = curses.LINES - 15
for i in range(0, cnt):
draw(x, y + 1 + i, " !", clr=CLR_HEADING)
if i < len(gui_text):
draw(x + 2, y + 1 + i, gui_text[i], pad=gui_width - 3)
else:
draw(x + 2, y + 1 + i, "*" * (gui_width - 3))
draw(x + (gui_width - 1), y + 1 + i, "!", clr=CLR_HEADING)
draw(x, y + cnt, " " + "-" * (gui_width - 2) + " ", clr=CLR_HEADING)
def set_screen_dirty():
global is_screen_dirty
global screen_lock
with screen_lock:
is_screen_dirty = True
def do_draw_main(scr):
global log_line_offset
global longest_visible_line
global last_redraw
global auto_scroll
global size_log_area
if time.time() - last_redraw > REDRAW_FREQUENCY:
# Do a full-screen redraw periodically to clear and
# noise from non-curses text that get output to the
# screen (e.g. modules that do a 'print')
scr.clear()
last_redraw = time.time()
else:
scr.erase()
# Display log output at the top
cLogs = len(filteredLog) + 1 # +1 for the '--end--'
size_log_area = curses.LINES - (cy_chat_area + 5)
start = clamp(cLogs - size_log_area, 0, cLogs - 1) - log_line_offset
end = cLogs - log_line_offset
if start < 0:
end -= start
start = 0
if end > cLogs:
end = cLogs
auto_scroll = (end == cLogs)
# adjust the line offset (prevents paging up too far)
log_line_offset = cLogs - end
# Top header and line counts
if find_str:
scr.addstr(0, 0, "Search Results: ", CLR_HEADING)
scr.addstr(0, 16, find_str, CLR_FIND)
scr.addstr(0, 16 + len(find_str), " ctrl+X to end" +
" " * (curses.COLS - 31 - 12 - len(find_str)) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
else:
scr.addstr(0, 0, "Log Output:" + " " * (curses.COLS - 31) +
str(start) + "-" + str(end) + " of " + str(cLogs),
CLR_HEADING)
ver = " mycroft-core " + mycroft.version.CORE_VERSION_STR + " ==="
scr.addstr(1, 0, "=" * (curses.COLS - 1 - len(ver)), CLR_HEADING)
scr.addstr(1, curses.COLS - 1 - len(ver), ver, CLR_HEADING)
y = 2
for i in range(start, end):
if i >= cLogs - 1:
log = ' ^--- NEWEST ---^ '
else:
log = filteredLog[i]
logid = log[0]
if len(log) > 25 and log[5] == '-' and log[8] == '-':
log = log[11:] # skip logid & date at the front of log line
else:
log = log[1:] # just skip the logid
# Categorize log line
if "| DEBUG |" in log:
log = log.replace("Skills ", "")
clr = CLR_LOG_DEBUG
elif "| ERROR |" in log:
clr = CLR_LOG_ERROR
else:
if logid == "1":
clr = CLR_LOG1
elif logid == "@":
clr = CLR_LOG_CMDMESSAGE
else:
clr = CLR_LOG2
# limit output line to screen width
len_line = len(log)
if len(log) > curses.COLS:
start = len_line - (curses.COLS - 4) - log_line_lr_scroll
if start < 0:
start = 0
end = start + (curses.COLS - 4)
if start == 0:
log = log[start:end] + "~~~~" # start....
elif end >= len_line - 1:
log = "~~~~" + log[start:end] # ....end
else:
log = "~~" + log[start:end] + "~~" # ..middle..
if len_line > longest_visible_line:
longest_visible_line = len_line
scr.addstr(y, 0, handleNonAscii(log), clr)
y += 1
# Log legend in the lower-right
y_log_legend = curses.LINES - (3 + cy_chat_area)
scr.addstr(y_log_legend, curses.COLS // 2 + 2,
make_titlebar("Log Output Legend", curses.COLS // 2 - 2),
CLR_HEADING)
scr.addstr(y_log_legend + 1, curses.COLS // 2 + 2,
"DEBUG output",
CLR_LOG_DEBUG)
if len(log_files) > 0:
scr.addstr(y_log_legend + 2, curses.COLS // 2 + 2,
os.path.basename(log_files[0]) + ", other",
CLR_LOG2)
if len(log_files) > 1:
scr.addstr(y_log_legend + 3, curses.COLS // 2 + 2,
os.path.basename(log_files[1]), CLR_LOG1)
# Meter
y_meter = y_log_legend
if show_meter:
scr.addstr(y_meter, curses.COLS - 14, " Mic Level ",
CLR_HEADING)
# History log in the middle
y_chat_history = curses.LINES - (3 + cy_chat_area)
chat_width = curses.COLS // 2 - 2
chat_out = []
scr.addstr(y_chat_history, 0, make_titlebar("History", chat_width),
CLR_HEADING)
# Build a nicely wrapped version of the chat log
idx_chat = len(chat) - 1
while len(chat_out) < cy_chat_area and idx_chat >= 0:
if chat[idx_chat][0] == '>':
wrapper = textwrap.TextWrapper(initial_indent="",
subsequent_indent=" ",
width=chat_width)
else:
wrapper = textwrap.TextWrapper(width=chat_width)
chatlines = wrapper.wrap(chat[idx_chat])
for txt in reversed(chatlines):
if len(chat_out) >= cy_chat_area:
break
chat_out.insert(0, txt)
idx_chat -= 1
# Output the chat
y = curses.LINES - (2 + cy_chat_area)
for txt in chat_out:
if txt.startswith(">> ") or txt.startswith(" "):
clr = CLR_CHAT_RESP
else:
clr = CLR_CHAT_QUERY
scr.addstr(y, 1, handleNonAscii(txt), clr)
y += 1
if show_gui and curses.COLS > 20 and curses.LINES > 20:
_do_gui(curses.COLS - 20)
# Command line at the bottom
ln = line
if len(line) > 0 and line[0] == ":":
scr.addstr(curses.LINES - 2, 0, "Command ('help' for options):",
CLR_CMDLINE)
scr.addstr(curses.LINES - 1, 0, ":", CLR_CMDLINE)
ln = line[1:]
else:
prompt = "Input (':' for command, Ctrl+C to quit)"
if show_last_key:
prompt += " === keycode: " + last_key
scr.addstr(curses.LINES - 2, 0,
make_titlebar(prompt,
curses.COLS - 1),
CLR_HEADING)
scr.addstr(curses.LINES - 1, 0, ">", CLR_HEADING)
_do_meter(cy_chat_area + 2)
scr.addstr(curses.LINES - 1, 2, ln[-(curses.COLS - 3):], CLR_INPUT)
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def make_titlebar(title, bar_length):
return title + " " + ("=" * (bar_length - 1 - len(title)))
##############################################################################
# Help system
help_struct = [('Log Scrolling shortcuts',
[("Up / Down / PgUp / PgDn",
"scroll thru history"),
("Ctrl+T / Ctrl+PgUp",
"scroll to top of logs (jump to oldest)"),
("Ctrl+B / Ctrl+PgDn",
"scroll to bottom of logs" + "(jump to newest)"),
("Left / Right",
"scroll long lines left/right"),
("Home / End",
"scroll to start/end of long lines")]),
("Query History shortcuts",
[("Ctrl+N / Ctrl+Left",
"previous query"),
("Ctrl+P / Ctrl+Right",
"next query")]),
("General Commands (type ':' to enter command mode)",
[(":quit or :exit",
"exit the program"),
(":meter (show|hide)",
"display the microphone level"),
(":keycode (show|hide)",
"display typed key codes (mainly debugging)"),
(":history (# lines)",
"set size of visible history buffer"),
(":clear",
"flush the logs")]),
("Log Manipulation Commands",
[(":filter 'STR'",
"adds a log filter (optional quotes)"),
(":filter remove 'STR'",
"removes a log filter"),
(":filter (clear|reset)",
"reset filters"),
(":filter (show|list)",
"display current filters"),
(":find 'STR'",
"show logs containing 'str'"),
(":log level (DEBUG|INFO|ERROR)",
"set logging level"),
(":log bus (on|off)",
"control logging of messagebus messages")]),
("Skill Debugging Commands",
[(":skills",
"list installed Skills"),
(":api SKILL",
"show Skill's public API"),
(":activate SKILL",
"activate Skill, e.g. 'activate skill-wiki'"),
(":deactivate SKILL",
"deactivate Skill"),
(":keep SKILL",
"deactivate all Skills except the indicated Skill")])]
help_longest = 0
for s in help_struct:
for ent in s[1]:
help_longest = max(help_longest, len(ent[0]))
HEADER_SIZE = 2
HEADER_FOOTER_SIZE = 4
def num_help_pages():
lines = 0
for section in help_struct:
lines += 3 + len(section[1])
return ceil(lines / (curses.LINES - HEADER_FOOTER_SIZE))
def do_draw_help(scr):
def render_header():
scr.addstr(0, 0, center(25) + "Mycroft Command Line Help", CLR_HEADING)
scr.addstr(1, 0, "=" * (curses.COLS - 1), CLR_HEADING)
def render_help(txt, y_pos, i, first_line, last_line, clr):
if i >= first_line and i < last_line:
scr.addstr(y_pos, 0, txt, clr)
y_pos += 1
return y_pos
def render_footer(page, total):
text = "Page {} of {} [ Any key to continue ]".format(page, total)
scr.addstr(curses.LINES - 1, 0, center(len(text)) + text, CLR_HEADING)
scr.erase()
render_header()
y = HEADER_SIZE
page = subscreen + 1
# Find first and last taking into account the header and footer
first = subscreen * (curses.LINES - HEADER_FOOTER_SIZE)
last = first + (curses.LINES - HEADER_FOOTER_SIZE)
i = 0
for section in help_struct:
y = render_help(section[0], y, i, first, last, CLR_HEADING)
i += 1
y = render_help("=" * (curses.COLS - 1), y, i, first, last,
CLR_HEADING)
i += 1
for line in section[1]:
words = line[1].split()
ln = line[0].ljust(help_longest + 1)
for w in words:
if len(ln) + 1 + len(w) < curses.COLS:
ln += " " + w
else:
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
ln = " ".ljust(help_longest + 2) + w
y = render_help(ln, y, i, first, last, CLR_CMDLINE)
i += 1
y = render_help(" ", y, i, first, last, CLR_CMDLINE)
i += 1
if i > last:
break
render_footer(page, num_help_pages())
# Curses doesn't actually update the display until refresh() is called
scr.refresh()
def show_help():
global screen_mode
global subscreen
if screen_mode != SCR_HELP:
screen_mode = SCR_HELP
subscreen = 0
set_screen_dirty()
def show_next_help():
global screen_mode
global subscreen
if screen_mode == SCR_HELP:
subscreen += 1
if subscreen >= num_help_pages():
screen_mode = SCR_MAIN
set_screen_dirty()
##############################################################################
# Skill debugging
def show_skills(skills):
"""Show list of loaded Skills in as many column as necessary."""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Loaded Skills", CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 0
prepare_page()
col_width = 0
skill_names = sorted(skills.keys())
for skill in skill_names:
if skills[skill]['active']:
color = curses.color_pair(4)
else:
color = curses.color_pair(2)
scr.addstr(row, column, " {}".format(skill), color)
row += 1
col_width = max(col_width, len(skill))
if row == curses.LINES - 2 and column > 0 and skill != skill_names[-1]:
column = 0
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
wait_for_any_key()
prepare_page()
elif row == curses.LINES - 2:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
column += col_width + 2
col_width = 0
if column > curses.COLS - 20:
# End of screen
break
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def show_skill_api(skill, data):
"""Show available help on Skill's API."""
global scr
global screen_mode
if not scr:
return
screen_mode = SCR_SKILLS
row = 2
column = 0
def prepare_page():
global scr
nonlocal row
nonlocal column
scr.erase()
scr.addstr(0, 0, center(25) + "Skill-API for {}".format(skill),
CLR_CMDLINE)
scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE)
row = 2
column = 4
prepare_page()
for key in data:
color = curses.color_pair(4)
scr.addstr(row, column, "{} ({})".format(key, data[key]['type']),
CLR_HEADING)
row += 2
if 'help' in data[key]:
help_text = data[key]['help'].split('\n')
for line in help_text:
scr.addstr(row, column + 2, line, color)
row += 1
row += 2
else:
row += 1
if row == curses.LINES - 5:
scr.addstr(curses.LINES - 1, 0,
center(23) + "Press any key to continue", CLR_HEADING)
scr.refresh()
wait_for_any_key()
prepare_page()
elif row == curses.LINES - 5:
# Reached bottom of screen, start at top and move output to a
# New column
row = 2
scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return",
CLR_HEADING)
scr.refresh()
def center(str_len):
# generate number of characters needed to center a string
# of the given length
return " " * ((curses.COLS - str_len) // 2)
##############################################################################
# Main UI lopo
def _get_cmd_param(cmd, keyword):
# Returns parameter to a command. Will de-quote.
# Ex: find 'abc def' returns: abc def
# find abc def returns: abc def
if isinstance(keyword, list):
for w in keyword:
cmd = cmd.replace(w, "").strip()
else:
cmd = cmd.replace(keyword, "").strip()
if not cmd:
return None
last_char = cmd[-1]
if last_char == '"' or last_char == "'":
parts = cmd.split(last_char)
return parts[-2]
else:
parts = cmd.split(" ")
return parts[-1]
def wait_for_any_key():
"""Block until key is pressed.
This works around curses.error that can occur on old versions of ncurses.
"""
while True:
try:
scr.get_wch() # blocks
except curses.error:
# Loop if get_wch throws error
time.sleep(0.05)
else:
break
def handle_cmd(cmd):
global show_meter
global screen_mode
global log_filters
global cy_chat_area
global find_str
global show_last_key
if "show" in cmd and "log" in cmd:
pass
elif "help" in cmd:
show_help()
elif "exit" in cmd or "quit" in cmd:
return 1
elif "keycode" in cmd:
# debugging keyboard
if "hide" in cmd or "off" in cmd:
show_last_key = False
elif "show" in cmd or "on" in cmd:
show_last_key = True
elif "meter" in cmd:
# microphone level meter
if "hide" in cmd or "off" in cmd:
show_meter = False
elif "show" in cmd or "on" in cmd:
show_meter = True
elif "find" in cmd:
find_str = _get_cmd_param(cmd, "find")
rebuild_filtered_log()
elif "filter" in cmd:
if "show" in cmd or "list" in cmd:
# display active filters
add_log_message("Filters: " + str(log_filters))
return
if "reset" in cmd or "clear" in cmd:
log_filters = list(default_log_filters)
else:
# extract last word(s)
param = _get_cmd_param(cmd, "filter")
if param:
if "remove" in cmd and param in log_filters:
log_filters.remove(param)
else:
log_filters.append(param)
rebuild_filtered_log()
add_log_message("Filters: " + str(log_filters))
elif "clear" in cmd:
clear_log()
elif "log" in cmd:
# Control logging behavior in all Mycroft processes
if "level" in cmd:
level = _get_cmd_param(cmd, ["log", "level"])
bus.emit(Message("mycroft.debug.log", data={'level': level}))
elif "bus" in cmd:
state = _get_cmd_param(cmd, ["log", "bus"]).lower()
if state in ["on", "true", "yes"]:
bus.emit(Message("mycroft.debug.log", data={'bus': True}))
elif state in ["off", "false", "no"]:
bus.emit(Message("mycroft.debug.log", data={'bus': False}))
elif "history" in cmd:
# extract last word(s)
lines = int(_get_cmd_param(cmd, "history"))
if not lines or lines < 1:
lines = 1
max_chat_area = curses.LINES - 7
if lines > max_chat_area:
lines = max_chat_area
cy_chat_area = lines
elif "skills" in cmd:
# List loaded skill
message = bus.wait_for_response(
Message('skillmanager.list'), reply_type='mycroft.skills.list')
if message:
show_skills(message.data)
wait_for_any_key()
screen_mode = SCR_MAIN
set_screen_dirty()
elif "deactivate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.deactivate", data={'skill': s}))
else:
add_log_message('Usage :deactivate SKILL [SKILL2] [...]')
elif "keep" in cmd:
s = cmd.split()
if len(s) > 1:
bus.emit(Message("skillmanager.keep", data={'skill': s[1]}))
else:
add_log_message('Usage :keep SKILL')
elif "activate" in cmd:
skills = cmd.split()[1:]
if len(skills) > 0:
for s in skills:
bus.emit(Message("skillmanager.activate", data={'skill': s}))
else:
add_log_message('Usage :activate SKILL [SKILL2] [...]')
elif "api" in cmd:
parts = cmd.split()
if len(parts) < 2:
return
skill = parts[1]
message = bus.wait_for_response(Message('{}.public_api'.format(skill)))
if message:
show_skill_api(skill, message.data)
scr.get_wch() # blocks
screen_mode = SCR_MAIN
set_screen_dirty()
# TODO: More commands
return 0 # do nothing upon return
def handle_is_connected(msg):
add_log_message("Connected to Messagebus!")
# start_qml_gui(bus, gui_text)
def handle_reconnecting():
add_log_message("Looking for Messagebus websocket...")
def gui_main(stdscr):
global scr
global bus
global line
global log_line_lr_scroll
global longest_visible_line
global find_str
global last_key
global history
global screen_lock
global show_gui
global config
scr = stdscr
init_screen()
scr.keypad(1)
scr.notimeout(True)
bus.on('speak', handle_speak)
bus.on('message', handle_message)
bus.on('recognizer_loop:utterance', handle_utterance)
bus.on('connected', handle_is_connected)
bus.on('reconnecting', handle_reconnecting)
add_log_message("Establishing Mycroft Messagebus connection...")
gui_thread = ScreenDrawThread()
gui_thread.setDaemon(True) # this thread won't prevent prog from exiting
gui_thread.start()
hist_idx = -1 # index, from the bottom
c = 0
try:
while True:
set_screen_dirty()
c = 0
code = 0
try:
if ctrl_c_pressed():
# User hit Ctrl+C. treat same as Ctrl+X
c = 24
else:
# Don't block, this allows us to refresh the screen while
# waiting on initial messagebus connection, etc
scr.timeout(1)
c = scr.get_wch() # unicode char or int for special keys
if c == -1:
continue
except curses.error:
# This happens in odd cases, such as when you Ctrl+Z
# the CLI and then resume. Curses fails on get_wch().
continue
if isinstance(c, int):
code = c
else:
code = ord(c)
# Convert VT100 ESC codes generated by some terminals
if code == 27:
# NOTE: Not sure exactly why, but the screen can get corrupted
# if we draw to the screen while doing a scr.getch(). So
# lock screen updates until the VT100 sequence has been
# completely read.
with screen_lock:
scr.timeout(0)
c1 = -1
start = time.time()
while c1 == -1:
c1 = scr.getch()
if time.time() - start > 1:
break # 1 second timeout waiting for ESC code
c2 = -1
while c2 == -1:
c2 = scr.getch()
if time.time() - start > 1: # 1 second timeout
break # 1 second timeout waiting for ESC code
if c1 == 79 and c2 == 120:
c = curses.KEY_UP
elif c1 == 79 and c2 == 116:
c = curses.KEY_LEFT
elif c1 == 79 and c2 == 114:
c = curses.KEY_DOWN
elif c1 == 79 and c2 == 118:
c = curses.KEY_RIGHT
elif c1 == 79 and c2 == 121:
c = curses.KEY_PPAGE # aka PgUp
elif c1 == 79 and c2 == 115:
c = curses.KEY_NPAGE # aka PgDn
elif c1 == 79 and c2 == 119:
c = curses.KEY_HOME
elif c1 == 79 and c2 == 113:
c = curses.KEY_END
else:
c = c1
if c1 != -1:
last_key = str(c) + ",ESC+" + str(c1) + "+" + str(c2)
code = c
else:
last_key = "ESC"
else:
if code < 33:
last_key = str(code)
else:
last_key = str(code)
scr.timeout(-1) # resume blocking
if code == 27: # Hitting ESC twice clears the entry line
hist_idx = -1
line = ""
elif c == curses.KEY_RESIZE:
# Generated by Curses when window/screen has been resized
y, x = scr.getmaxyx()
curses.resizeterm(y, x)
# resizeterm() causes another curses.KEY_RESIZE, so
# we need to capture that to prevent a loop of resizes
c = scr.get_wch()
elif screen_mode == SCR_HELP:
# in Help mode, any key goes to next page
show_next_help()
continue
elif c == '\n' or code == 10 or code == 13 or code == 343:
# ENTER sends the typed line to be processed by Mycroft
if line == "":
continue
if line[:1] == ":":
# Lines typed like ":help" are 'commands'
if handle_cmd(line[1:]) == 1:
break
else:
# Treat this as an utterance
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()],
'lang': config.get('lang', 'en-us')},
{'client_name': 'mycroft_cli',
'source': 'debug_cli',
'destination': ["skills"]}
))
hist_idx = -1
line = ""
elif code == 16 or code == 545: # Ctrl+P or Ctrl+Left (Previous)
# Move up the history stack
hist_idx = clamp(hist_idx + 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif code == 14 or code == 560: # Ctrl+N or Ctrl+Right (Next)
# Move down the history stack
hist_idx = clamp(hist_idx - 1, -1, len(history) - 1)
if hist_idx >= 0:
line = history[len(history) - hist_idx - 1]
else:
line = ""
elif c == curses.KEY_LEFT:
# scroll long log lines left
log_line_lr_scroll += curses.COLS // 4
elif c == curses.KEY_RIGHT:
# scroll long log lines right
log_line_lr_scroll -= curses.COLS // 4
if log_line_lr_scroll < 0:
log_line_lr_scroll = 0
elif c == curses.KEY_HOME:
# HOME scrolls log lines all the way to the start
log_line_lr_scroll = longest_visible_line
elif c == curses.KEY_END:
# END scrolls log lines all the way to the end
log_line_lr_scroll = 0
elif c == curses.KEY_UP:
scroll_log(False, 1)
elif c == curses.KEY_DOWN:
scroll_log(True, 1)
elif c == curses.KEY_NPAGE: # aka PgDn
# PgDn to go down a page in the logs
scroll_log(True)
elif c == curses.KEY_PPAGE: # aka PgUp
# PgUp to go up a page in the logs
scroll_log(False)
elif code == 2 or code == 550: # Ctrl+B or Ctrl+PgDn
scroll_log(True, max_log_lines)
elif code == 20 or code == 555: # Ctrl+T or Ctrl+PgUp
scroll_log(False, max_log_lines)
elif code == curses.KEY_BACKSPACE or code == 127:
# Backspace to erase a character in the utterance
line = line[:-1]
elif code == 6: # Ctrl+F (Find)
line = ":find "
elif code == 7: # Ctrl+G (start GUI)
if show_gui is None:
start_qml_gui(bus, gui_text)
show_gui = not show_gui
elif code == 18: # Ctrl+R (Redraw)
scr.erase()
elif code == 24: # Ctrl+X (Exit)
if find_str:
# End the find session
find_str = None
rebuild_filtered_log()
elif line.startswith(":"):
# cancel command mode
line = ""
else:
# exit CLI
break
elif code > 31 and isinstance(c, str):
# Accept typed character in the utterance
line += c
finally:
scr.erase()
scr.refresh()
scr = None
def simple_cli():
global bSimple
bSimple = True
bus.on('speak', handle_speak)
try:
while True:
# Sleep for a while so all the output that results
# from the previous command finishes before we print.
time.sleep(1.5)
print("Input (Ctrl+C to quit):")
line = sys.stdin.readline()
bus.emit(Message("recognizer_loop:utterance",
{'utterances': [line.strip()]},
{'client_name': 'mycroft_simple_cli',
'source': 'debug_cli',
'destination': ["skills"]}))
except KeyboardInterrupt as e:
# User hit Ctrl+C to quit
print("")
except KeyboardInterrupt as e:
LOG.exception(e)
event_thread.exit()
sys.exit()
def connect_to_messagebus():
""" Connect to the mycroft messagebus and launch a thread handling the
connection.
Returns: WebsocketClient
"""
bus = MessageBusClient() # Mycroft messagebus connection
event_thread = Thread(target=connect, args=[bus])
event_thread.setDaemon(True)
event_thread.start()
return bus
|
{
"content_hash": "12ac80ba76e5c8dcfccc6aa576d5b6d6",
"timestamp": "",
"source": "github",
"line_count": 1551,
"max_line_length": 79,
"avg_line_length": 31.711154094132816,
"alnum_prop": 0.5015045543266102,
"repo_name": "forslund/mycroft-core",
"id": "9b5b263e6637e37c817640439f9fb45591bd93a4",
"size": "49764",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "mycroft/client/text/text_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3791"
},
{
"name": "Python",
"bytes": "1371642"
},
{
"name": "QML",
"bytes": "18805"
},
{
"name": "Shell",
"bytes": "83796"
}
],
"symlink_target": ""
}
|
import unittest
import vodka.app
@vodka.app.register("app_a")
class AppA(vodka.app.Application):
pass
@vodka.app.register("app_b")
class AppB(vodka.app.Application):
pass
class TestApp(unittest.TestCase):
def test_register(self):
vodka.app.register(AppA)
assert vodka.app.applications.get("app_a") == AppA
with self.assertRaises(KeyError):
@vodka.app.register("app_a")
class AppC(vodka.app.Application):
pass
def test_get_application(self):
vodka.app.register(AppB)
assert vodka.app.get_application("app_b") == AppB
with self.assertRaises(KeyError):
vodka.app.get_application("does_not_exist")
|
{
"content_hash": "a14c1f6d89dff9a58020e04972743204",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 58,
"avg_line_length": 24.75862068965517,
"alnum_prop": 0.6392757660167131,
"repo_name": "20c/vodka",
"id": "f00ff8507d1ebe194fe2cc94b4043bd2f31b42a2",
"size": "718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "260"
},
{
"name": "Python",
"bytes": "118007"
}
],
"symlink_target": ""
}
|
"""
Filename: lae.py
Authors: Thomas J. Sargent, John Stachurski,
Computes a sequence of marginal densities for a continuous state space
Markov chain :math:`X_t` where the transition probabilities can be represented
as densities. The estimate of the marginal density of :math:`X_t` is
.. math::
\frac{1}{n} \sum_{i=0}^n p(X_{t-1}^i, y)
This is a density in y.
References
----------
http://quant-econ.net/py/stationary_densities.html
"""
from textwrap import dedent
import numpy as np
class LAE(object):
"""
An instance is a representation of a look ahead estimator associated
with a given stochastic kernel p and a vector of observations X.
Parameters
----------
p : function
The stochastic kernel. A function p(x, y) that is vectorized in
both x and y
X : array_like(float)
A vector containing observations
Attributes
----------
p, X : see Parameters
Examples
--------
>>> psi = LAE(p, X)
>>> y = np.linspace(0, 1, 100)
>>> psi(y) # Evaluate look ahead estimate at grid of points y
"""
def __init__(self, p, X):
X = X.flatten() # So we know what we're dealing with
n = len(X)
self.p, self.X = p, X.reshape((n, 1))
def __repr__(self):
return self.__str__()
def __str__(self):
m = """\
Look ahead estimator
- number of observations : {n}
"""
return dedent(m.format(n=self.X.size))
def __call__(self, y):
"""
A vectorized function that returns the value of the look ahead
estimate at the values in the array y.
Parameters
----------
y : array_like(float)
A vector of points at which we wish to evaluate the look-
ahead estimator
Returns
-------
psi_vals : array_like(float)
The values of the density estimate at the points in y
"""
k = len(y)
v = self.p(self.X, y.reshape((1, k)))
psi_vals = np.mean(v, axis=0) # Take mean along each row
return psi_vals.flatten()
|
{
"content_hash": "e5255cfa635360319961a248fdf6583e",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 78,
"avg_line_length": 24.367816091954023,
"alnum_prop": 0.5712264150943396,
"repo_name": "jviada/QuantEcon.py",
"id": "c8d9204f05bc23565825ea67bbdbb1e11070eea0",
"size": "2120",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "quantecon/lae.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "413983"
},
{
"name": "Shell",
"bytes": "219"
}
],
"symlink_target": ""
}
|
import numpy as np
from ._base import osd
from skbio.diversity._util import _validate_counts_vector
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def chao1(counts, bias_corrected=True):
r"""Calculate chao1 richness estimator.
Uses the bias-corrected version unless `bias_corrected` is ``False`` *and*
there are both singletons and doubletons.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
bias_corrected : bool, optional
Indicates whether or not to use the bias-corrected version of the
equation. If ``False`` *and* there are both singletons and doubletons,
the uncorrected version will be used. The biased-corrected version will
be used otherwise.
Returns
-------
double
Computed chao1 richness estimator.
See Also
--------
chao1_ci
Notes
-----
The uncorrected version is based on Equation 6 in [1]_:
.. math::
chao1=S_{obs}+\frac{F_1^2}{2F_2}
where :math:`F_1` and :math:`F_2` are the count of singletons and
doubletons, respectively.
The bias-corrected version is defined as
.. math::
chao1=S_{obs}+\frac{F_1(F_1-1)}{2(F_2+1)}
References
----------
.. [1] Chao, A. 1984. Non-parametric estimation of the number of classes in
a population. Scandinavian Journal of Statistics 11, 265-270.
"""
counts = _validate_counts_vector(counts)
o, s, d = osd(counts)
if not bias_corrected and s and d:
return o + s ** 2 / (d * 2)
else:
return o + s * (s - 1) / (2 * (d + 1))
@experimental(as_of="0.4.0")
def chao1_ci(counts, bias_corrected=True, zscore=1.96):
"""Calculate chao1 confidence interval.
Parameters
----------
counts : 1-D array_like, int
Vector of counts.
bias_corrected : bool, optional
Indicates whether or not to use the bias-corrected version of the
equation. If ``False`` *and* there are both singletons and doubletons,
the uncorrected version will be used. The biased-corrected version will
be used otherwise.
zscore : scalar, optional
Score to use for confidence. Default of 1.96 is for a 95% confidence
interval.
Returns
-------
tuple
chao1 confidence interval as ``(lower_bound, upper_bound)``.
See Also
--------
chao1
Notes
-----
The implementation here is based on the equations in the EstimateS manual
[1]_. Different equations are employed to calculate the chao1 variance and
confidence interval depending on `bias_corrected` and the presence/absence
of singletons and/or doubletons.
Specifically, the following EstimateS equations are used:
1. No singletons, Equation 14.
2. Singletons but no doubletons, Equations 7, 13.
3. Singletons and doubletons, ``bias_corrected=True``, Equations 6, 13.
4. Singletons and doubletons, ``bias_corrected=False``, Equations 5, 13.
References
----------
.. [1] http://viceroy.eeb.uconn.edu/estimates/
"""
counts = _validate_counts_vector(counts)
o, s, d = osd(counts)
if s:
chao = chao1(counts, bias_corrected)
chaovar = _chao1_var(counts, bias_corrected)
return _chao_confidence_with_singletons(chao, o, chaovar, zscore)
else:
n = counts.sum()
return _chao_confidence_no_singletons(n, o, zscore)
def _chao1_var(counts, bias_corrected=True):
"""Calculates chao1 variance using decision rules in EstimateS."""
o, s, d = osd(counts)
if not d:
c = chao1(counts, bias_corrected)
return _chao1_var_no_doubletons(s, c)
if not s:
n = counts.sum()
return _chao1_var_no_singletons(n, o)
if bias_corrected:
return _chao1_var_bias_corrected(s, d)
else:
return _chao1_var_uncorrected(s, d)
def _chao1_var_uncorrected(singles, doubles):
"""Calculates chao1, uncorrected.
From EstimateS manual, equation 5.
"""
r = singles / doubles
return doubles * (.5 * r ** 2 + r ** 3 + .24 * r ** 4)
def _chao1_var_bias_corrected(s, d):
"""Calculates chao1 variance, bias-corrected.
`s` is the number of singletons and `d` is the number of doubletons.
From EstimateS manual, equation 6.
"""
return (s * (s - 1) / (2 * (d + 1)) + (s * (2 * s - 1) ** 2) /
(4 * (d + 1) ** 2) + (s ** 2 * d * (s - 1) ** 2) /
(4 * (d + 1) ** 4))
def _chao1_var_no_doubletons(s, chao1):
"""Calculates chao1 variance in absence of doubletons.
From EstimateS manual, equation 7.
`s` is the number of singletons, and `chao1` is the estimate of the mean of
Chao1 from the same dataset.
"""
return s * (s - 1) / 2 + s * (2 * s - 1) ** 2 / 4 - s ** 4 / (4 * chao1)
def _chao1_var_no_singletons(n, o):
"""Calculates chao1 variance in absence of singletons.
`n` is the number of individuals and `o` is the number of observed OTUs.
From EstimateS manual, equation 8.
"""
return o * np.exp(-n / o) * (1 - np.exp(-n / o))
def _chao_confidence_with_singletons(chao, observed, var_chao, zscore=1.96):
"""Calculates confidence bounds for chao1 or chao2.
Uses Eq. 13 of EstimateS manual.
`zscore` is the score to use for confidence. The default of 1.96 is for 95%
confidence.
"""
T = chao - observed
# if no diff betweeh chao and observed, CI is just point estimate of
# observed
if T == 0:
return observed, observed
K = np.exp(abs(zscore) * np.sqrt(np.log(1 + (var_chao / T ** 2))))
return observed + T / K, observed + T * K
def _chao_confidence_no_singletons(n, s, zscore=1.96):
"""Calculates confidence bounds for chao1/chao2 in absence of singletons.
Uses Eq. 14 of EstimateS manual.
`n` is the number of individuals and `s` is the number of OTUs.
"""
P = np.exp(-n / s)
return (max(s, s / (1 - P) - zscore * np.sqrt((s * P / (1 - P)))),
s / (1 - P) + zscore * np.sqrt(s * P / (1 - P)))
|
{
"content_hash": "6050c2ee5e163e0a89d456dd300328eb",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 79,
"avg_line_length": 28.85308056872038,
"alnum_prop": 0.6125164257555847,
"repo_name": "anderspitman/scikit-bio",
"id": "0df695ea95f5044a165f06c7a9a3981084c47697",
"size": "6442",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "skbio/diversity/alpha/_chao1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39087"
},
{
"name": "CSS",
"bytes": "4379"
},
{
"name": "Groff",
"bytes": "377"
},
{
"name": "Jupyter Notebook",
"bytes": "210926"
},
{
"name": "Makefile",
"bytes": "1054"
},
{
"name": "Python",
"bytes": "2502133"
}
],
"symlink_target": ""
}
|
import server.dbconnect as dbconnect
import server.dbreader as dbreader
import argparse
import codecs
import random
parser = argparse.ArgumentParser(description="Write a whole shitload of poems")
parser.add_argument('count', type=int, help="number of poems to write")
parser.add_argument('--output', action='store', type=str, default='poems.txt', help="Number of separate processes to run")
args = parser.parse_args()
if __name__ == '__main__':
conn = dbconnect.MySQLDatabaseConnection.connectionWithConfiguration('digitalocean')
cursor = conn.connection.cursor(dictionary=True)
query = """SELECT COUNT(*) FROM cached_poems WHERE complete=1;"""
cursor.execute(query);
res = cursor.fetchall()
if not res:
print "Could not count cached poems"
exit(1)
poem_count = res[0]['COUNT(*)']
with codecs.open(args.output, "w", "utf-8") as outf:
for _ in range(args.count):
query = """SELECT * FROM cached_poems WHERE complete=1 LIMIT 1 OFFSET %s;"""
values = (random.randint(0, poem_count), )
cursor.execute(query, values)
res = cursor.fetchall()[0]
title = dbreader.pageTitleForPageID(conn, res['page_id'])
outf.write("{} -- {}:\n\n".format(res['id'], title));
for i in range(14):
col = "line_{}".format(i)
lineID = res[col]
line = dbreader.textForLineID(conn, lineID)
outf.write(line)
outf.write("\n")
outf.write('\n\n\n')
conn.close()
|
{
"content_hash": "9ea88c38f812380b37f6e8c3921f768d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 122,
"avg_line_length": 39.2,
"alnum_prop": 0.6116071428571429,
"repo_name": "starakaj/wikisonnet",
"id": "3fe85b0a0ad7a5518b66be1f8a6e77b1a972d58f",
"size": "1568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/print_cached_poems.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "959"
},
{
"name": "Python",
"bytes": "135990"
}
],
"symlink_target": ""
}
|
from django.db import models
from articles.models import Article
class BadPerson(models.Model):
username = models.CharField("IG username", max_length=100, blank=True, null=True)
user_id = models.CharField("IG user ID", max_length=100, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
verbose_name = 'Bad Instagrammer'
verbose_name_plural = 'Bad People'
ordering = ['-timestamp',]
def __unicode__(self):
return self.username
class IGTag(models.Model):
tag = models.CharField("IG tag", max_length=200, blank=True)
ig_id = models.CharField("IG ID", max_length=200, blank=True, null=True, default=None)
articles = models.ManyToManyField(Article, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
verbose_name = 'Instagram Tag'
verbose_name_plural = 'Instagram Tags'
ordering = ['-timestamp',]
def __unicode__(self):
return self.tag
def shown_in_articles(self):
return "\n".join([p.title for p in self.articles.all()])
# Create your models here.
class InstaPost(models.Model):
insta_id = models.CharField("IG id", max_length=200, blank=True)
username = models.CharField("IG username", max_length=100, blank=True)
link = models.URLField("Link to IG post", max_length=100, blank=True)
profile_picture = models.URLField("Profile Pic URL", max_length=500, blank=True)
thumbnail = models.CharField("thumbnail URL", max_length=200, blank=True)
#tag = models.CharField("IG tag", max_length=200, blank=True)
tag = models.ForeignKey(IGTag, null=True, blank=True)
standard_resolution = models.CharField("image URL", max_length=200, blank=True)
caption_text = models.TextField("Caption", blank=True)
created_time = models.DateTimeField(null=True, blank=True)
active = models.BooleanField(default=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
verbose_name = 'Instagram Post'
verbose_name_plural = 'Instagram Posts'
ordering = ['-created_time', ]
def __unicode__(self):
return self.insta_id
|
{
"content_hash": "b3d39e9ebc88b1c64a906c55d6aee298",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 90,
"avg_line_length": 41.50847457627118,
"alnum_prop": 0.6843609636586362,
"repo_name": "davogler/POSTv3",
"id": "4066eda5f51641456b6cf8cb56cc8cadffb28e5c",
"size": "2449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insta/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "144324"
},
{
"name": "HTML",
"bytes": "282463"
},
{
"name": "JavaScript",
"bytes": "244051"
},
{
"name": "Python",
"bytes": "358932"
}
],
"symlink_target": ""
}
|
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String
from sqlalchemy.orm import sessionmaker
engine = create_engine('mysql://root:pass@127.0.0.1/test?charset=utf8', echo=True)
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(40))
fullname = Column(String(40))
password = Column(String(40))
def __repr__(self):
return "<User(name='%s', fullname='%s', password='%s')>" % (
self.name, self.fullname, self.password)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
print 'start query'
session = Session()
q = session.query(User).filter_by(name='Sun').one()
q.name = 'Sun11'
q.password = 'pass'
session.commit()
print 'start filter'
t = session.query(User).filter_by(name='Sun11').one()
print 'start print'
print t
|
{
"content_hash": "d7c0364244a02b0686758debf67435e8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 82,
"avg_line_length": 27.054054054054053,
"alnum_prop": 0.6873126873126874,
"repo_name": "li-ma/homework",
"id": "f369b931ee74b220d1370532259a61731278bbfb",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test-sql-2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20091"
},
{
"name": "Python",
"bytes": "130929"
},
{
"name": "Shell",
"bytes": "6056"
}
],
"symlink_target": ""
}
|
"""A convenience and compatibility wrapper for RawConfigParser."""
import shlex
try:
from configparser import RawConfigParser
except ImportError:
from ConfigParser import RawConfigParser
__all__ = ('ConfigParser',)
class ConfigParser(RawConfigParser):
"""A simple wrapper for RawConfigParser to make options access easier."""
def get(self, section, option, default=None, **kwargs):
"""
Return value of option in given configuration section as a string.
If option is not set, return default instead (defaults to None).
"""
return (RawConfigParser.get(self, section, option, **kwargs)
if self.has_option(section, option) else default)
def getint(self, section, option, default=None):
"""
Return value of option in given configuration section as an integer.
If option is not set, return default (defaults to None).
Raises ValueError if the value cannot be converted to an integer.
"""
val = self.get(section, option)
return default if val is None else int(val)
def getfloat(self, section, option, default=None):
"""
Return value of option in given configuration section as a float.
If option is not set, return default (defaults to None).
Raises ValueError if the value cannot be converted to a float.
"""
val = self.get(section, option)
return default if val is None else float(val)
def getboolean(self, section, option, default=False):
"""
Return value of option in given configuration section as a boolean.
A configuration option is considered true when it has one of the
following values: '1', 'on', 'true' or 'yes'. The comparison is
case-insensitive. All other values are considered false.
If option is not set or empty, return default (defaults to False).
"""
val = self.get(section, option)
return val.lower() in ('1', 'on', 'true', 'yes') if val else default
def getlist(self, section, option, default=None):
"""
Return value of option in given section as a list of strings.
If option is not set, return default (defaults to an empty list).
The option value is split into list tokens using one of two strategies:
* If the value contains any newlines, i.e. it was written in the
configuration file using continuation lines, the value is split at
newlines and empty items are discarded.
* Otherwise, the value is split according to unix shell parsing rules.
Items are separated by whitespace, but items can be enclosed in
single or double quotes to preserve spaces in them.
Example::
[test]
option2 =
one
two three
four
sive six
option1 = one "two three" four 'five six'
"""
if not self.has_option(section, option):
return [] if default is None else default
value = self.get(section, option)
if '\n' in value:
return [item.strip()
for item in value.splitlines() if item.strip()]
else:
return shlex.split(value)
def set(self, section, option, value):
"""
Set option in given configuration section to value.
If section does not exist yet, it is added implicitly.
"""
if not self.has_section(section):
self.add_section(section)
RawConfigParser.set(self, section, option, value)
|
{
"content_hash": "d5fac8963295f30ebb5edc5d4410d3fe",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 79,
"avg_line_length": 32.810810810810814,
"alnum_prop": 0.6227347611202636,
"repo_name": "yloiseau/Watson",
"id": "a72ee9066b8e7149f9ec02d43654b23ec0b7f503",
"size": "3666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watson/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "839"
},
{
"name": "Python",
"bytes": "93914"
}
],
"symlink_target": ""
}
|
"""This plugin allows rules to contain regular expression tags."""
import re
import oa.errors
import oa.plugins.base
from oa.regex import Regex
# This splits value in the corresponding tags
SPLIT_TAGS = Regex(r"(<[^<>]+>)")
class ReplaceTags(oa.plugins.base.BasePlugin):
"""Replace tags in various rules."""
eval_rules = ()
options = {
"replace_start": ("str", "<"),
"replace_end": ("str", ">"),
# These configs defines the tags
"replace_pre": ("append", []),
"replace_inter": ("append", []),
"replace_post": ("append", []),
"replace_tag": ("append", []),
# This config defines the rules that will
# have their values inspected for tags
"replace_rules": ("append_split", []),
}
def prepare_tags(self, which="tag"):
"""Prepare the configured tags for easy replacement.
Valid options for which are: pre, intern, post, tag.
This converts the list of defined TAG to a dictionary
and stores it back in the context. The dictionary maps
the full tag name, including the start and end
characters, to their values.
"""
# Extra text besides the tag name, for example:
# <inter A>
extra = "%s " % which
if which == "tag":
extra = ""
template = "%s%s%%s%s" % (
self["replace_start"],
extra,
self["replace_end"]
)
result = {}
for config in self["replace_%s" % which]:
try:
tag_name, value = config.split(None, 1)
except ValueError:
self.ctxt.err("Invalid replace tag: %r", config)
continue
full_name = template % tag_name
if full_name in result:
self.ctxt.err("Redefining replace tag: %r", full_name)
result[template % tag_name] = value
# Replace the list with a dictionary in the global
# context.
self["replace_%s" % which] = result
def get_metatags(self, rule_value, which):
"""Check the rule value for meta tags and return
the value and the adjusted rule.
>>> self.get_metatags("/<post P3>(?!tion)/", "post")
>>> ('{3}', '/(?!tion)/')
"""
result = []
for tag, tag_value in self["replace_%s" % which].items():
if tag in rule_value:
result.append(tag_value)
rule_value = rule_value.replace(tag, "")
return "".join(result), rule_value
def replace_tags(self, rule_value):
"""Replace a single rule result."""
pre_replace, rule_value = self.get_metatags(rule_value, "pre")
inter_replace, rule_value = self.get_metatags(rule_value, "inter")
post_replace, rule_value = self.get_metatags(rule_value, "post")
results = []
replace_tags = self["replace_tag"]
splits = SPLIT_TAGS.split(rule_value)
for i, value in enumerate(splits):
try:
replace_value = replace_tags[value]
except KeyError:
# This is not a tag just add it to the result
results.append(value)
continue
results.append(pre_replace)
results.append(replace_value)
results.append(post_replace)
# Check the next value in the list to see if
# it's also a tag. If so then add the INTER.
try:
if splits[i + 1] == '' and splits[i + 2] in replace_tags:
# The split will actually return a empty string
# in these cases.
results.append(inter_replace)
except IndexError:
pass
return "".join(results)
def finish_parsing_start(self, results):
"""All configuration file have been read. Check the existing
rules and replace any available tags.
"""
super(ReplaceTags, self).finish_parsing_start(results)
for which in ("pre", "inter", "post", "tag"):
self.prepare_tags(which)
for rule_name in self["replace_rules"]:
try:
rule_results = results[rule_name]
except KeyError:
self.ctxt.err("No such rule defined: %s", rule_name)
continue
rule_value = rule_results["value"]
new_rule_value = self.replace_tags(rule_value)
self.ctxt.log.debug("Replaced %r with %r in %s", rule_value,
new_rule_value, rule_name)
rule_results["value"] = new_rule_value
|
{
"content_hash": "4ce46c1632bd7bc81b5c985a1408539c",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 74,
"avg_line_length": 35.91538461538462,
"alnum_prop": 0.5410152066823731,
"repo_name": "SpamExperts/OrangeAssassin",
"id": "9746ed20188c4fdbbcafdb90acf8ad372f642c45",
"size": "4669",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oa/plugins/replace_tags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1485966"
}
],
"symlink_target": ""
}
|
"""Add ``creating_job_id`` to ``DagRun`` table
Revision ID: 364159666cbd
Revises: 52d53670a240
Create Date: 2020-10-10 09:08:07.332456
"""
from __future__ import annotations
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "364159666cbd"
down_revision = "52d53670a240"
branch_labels = None
depends_on = None
airflow_version = "2.0.0"
def upgrade():
"""Apply Add ``creating_job_id`` to ``DagRun`` table"""
op.add_column("dag_run", sa.Column("creating_job_id", sa.Integer))
def downgrade():
"""Unapply Add job_id to DagRun table"""
op.drop_column("dag_run", "creating_job_id")
|
{
"content_hash": "f3f1135acebc0d694d04125bf2bb0c0b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 23.178571428571427,
"alnum_prop": 0.6933744221879815,
"repo_name": "apache/airflow",
"id": "f0dd1c4d969a3e19fe8660b4422c8926af3c4695",
"size": "1436",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/migrations/versions/0071_2_0_0_add_job_id_to_dagrun_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
}
|
class GoalAssociation:
def __init__(self,associationId,envName,goalName,goalDimName,aType,subGoalName,subGoalDimName,alternativeId,rationale):
self.theId = associationId
self.theEnvironmentName = envName
self.theGoal = goalName
self.theGoalDimension = goalDimName
self.theAssociationType = aType
self.theSubGoal = subGoalName
self.theSubGoalDimension = subGoalDimName
self.theAlternativeId = alternativeId
self.theRationale = rationale
def id(self): return self.theId
def environment(self): return self.theEnvironmentName
def goal(self): return self.theGoal
def goalDimension(self): return self.theGoalDimension
def type(self): return self.theAssociationType
def subGoal(self): return self.theSubGoal
def subGoalDimension(self): return self.theSubGoalDimension
def alternative(self): return self.theAlternativeId
def rationale(self): return self.theRationale
def __str__(self): return self.theEnvironmentName + ' / ' + self.theGoal + ' / ' + self.theGoalDimension + ' / ' + self.theAssociationType + ' / ' + self.theSubGoal + ' / ' + self.theSubGoalDimension + ' / ' + str(self.theAlternativeId)
|
{
"content_hash": "698269e83f45d8ce7b6071e1fed4be10",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 239,
"avg_line_length": 50.30434782608695,
"alnum_prop": 0.7536732929991357,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "a4069f307d2054c8210678de5273de68234066db",
"size": "1956",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/GoalAssociation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
}
|
import asyncio
import sys
import warnings
from io import BytesIO
from itertools import islice
from typing import AsyncIterator
from azure.core.exceptions import HttpResponseError, ResourceModifiedError
from .._download import _ChunkDownloader
from .._shared.request_handlers import validate_and_format_range_headers
from .._shared.response_handlers import process_storage_error, parse_length_from_content_range
async def process_content(data):
if data is None:
raise ValueError("Response cannot be None.")
try:
return data.response.body()
except Exception as error:
raise HttpResponseError(message="Download stream interrupted.", response=data.response, error=error)
class _AsyncChunkDownloader(_ChunkDownloader):
def __init__(self, **kwargs):
super(_AsyncChunkDownloader, self).__init__(**kwargs)
self.stream_lock = asyncio.Lock() if kwargs.get('parallel') else None
self.progress_lock = asyncio.Lock() if kwargs.get('parallel') else None
async def process_chunk(self, chunk_start):
chunk_start, chunk_end = self._calculate_range(chunk_start)
chunk_data = await self._download_chunk(chunk_start, chunk_end - 1)
length = chunk_end - chunk_start
if length > 0:
await self._write_to_stream(chunk_data, chunk_start)
await self._update_progress(length)
async def yield_chunk(self, chunk_start):
chunk_start, chunk_end = self._calculate_range(chunk_start)
return await self._download_chunk(chunk_start, chunk_end - 1)
async def _update_progress(self, length):
if self.progress_lock:
async with self.progress_lock: # pylint: disable=not-async-context-manager
self.progress_total += length
else:
self.progress_total += length
if self.progress_hook:
await self.progress_hook(self.progress_total, self.total_size)
async def _write_to_stream(self, chunk_data, chunk_start):
if self.stream_lock:
async with self.stream_lock: # pylint: disable=not-async-context-manager
self.stream.seek(self.stream_start + (chunk_start - self.start_index))
self.stream.write(chunk_data)
else:
self.stream.write(chunk_data)
async def _download_chunk(self, chunk_start, chunk_end):
range_header, range_validation = validate_and_format_range_headers(
chunk_start,
chunk_end,
check_content_md5=self.validate_content
)
try:
_, response = await self.client.download(
range=range_header,
range_get_content_md5=range_validation,
validate_content=self.validate_content,
data_stream_total=self.total_size,
download_stream_current=self.progress_total,
**self.request_options
)
if response.properties.etag != self.etag:
raise ResourceModifiedError(message="The file has been modified while downloading.")
except HttpResponseError as error:
process_storage_error(error)
chunk_data = await process_content(response)
return chunk_data
class _AsyncChunkIterator(object):
"""Async iterator for chunks in blob download stream."""
def __init__(self, size, content, downloader, chunk_size):
self.size = size
self._chunk_size = chunk_size
self._current_content = content
self._iter_downloader = downloader
self._iter_chunks = None
self._complete = (size == 0)
def __len__(self):
return self.size
def __iter__(self):
raise TypeError("Async stream must be iterated asynchronously.")
def __aiter__(self):
return self
async def __anext__(self):
"""Iterate through responses."""
if self._complete:
raise StopAsyncIteration("Download complete")
if not self._iter_downloader:
# cut the data obtained from initial GET into chunks
if len(self._current_content) > self._chunk_size:
return self._get_chunk_data()
self._complete = True
return self._current_content
if not self._iter_chunks:
self._iter_chunks = self._iter_downloader.get_chunk_offsets()
# initial GET result still has more than _chunk_size bytes of data
if len(self._current_content) >= self._chunk_size:
return self._get_chunk_data()
try:
chunk = next(self._iter_chunks)
self._current_content += await self._iter_downloader.yield_chunk(chunk)
except StopIteration:
self._complete = True
# it's likely that there some data left in self._current_content
if self._current_content:
return self._current_content
raise StopAsyncIteration("Download complete")
return self._get_chunk_data()
def _get_chunk_data(self):
chunk_data = self._current_content[: self._chunk_size]
self._current_content = self._current_content[self._chunk_size:]
return chunk_data
class StorageStreamDownloader(object): # pylint: disable=too-many-instance-attributes
"""A streaming object to download from Azure Storage.
:ivar str name:
The name of the file being downloaded.
:ivar: str path:
The full path of the file.
:ivar str share:
The name of the share where the file is.
:ivar ~azure.storage.fileshare.FileProperties properties:
The properties of the file being downloaded. If only a range of the data is being
downloaded, this will be reflected in the properties.
:ivar int size:
The size of the total data in the stream. This will be the byte range if specified,
otherwise the total size of the file.
"""
def __init__(
self,
client=None,
config=None,
start_range=None,
end_range=None,
validate_content=None,
max_concurrency=1,
name=None,
path=None,
share=None,
encoding=None,
**kwargs
):
self.name = name
self.path = path
self.share = share
self.properties = None
self.size = None
self._client = client
self._config = config
self._start_range = start_range
self._end_range = end_range
self._max_concurrency = max_concurrency
self._encoding = encoding
self._validate_content = validate_content
self._progress_hook = kwargs.pop('progress_hook', None)
self._request_options = kwargs
self._location_mode = None
self._download_complete = False
self._current_content = None
self._file_size = None
self._response = None
self._etag = None
# The service only provides transactional MD5s for chunks under 4MB.
# If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
# chunk so a transactional MD5 can be retrieved.
self._first_get_size = self._config.max_single_get_size if not self._validate_content \
else self._config.max_chunk_get_size
initial_request_start = self._start_range if self._start_range is not None else 0
if self._end_range is not None and self._end_range - self._start_range < self._first_get_size:
initial_request_end = self._end_range
else:
initial_request_end = initial_request_start + self._first_get_size - 1
self._initial_range = (initial_request_start, initial_request_end)
def __len__(self):
return self.size
async def _setup(self):
self._response = await self._initial_request()
self.properties = self._response.properties
self.properties.name = self.name
self.properties.path = self.path
self.properties.share = self.share
# Set the content length to the download size instead of the size of
# the last range
self.properties.size = self.size
# Overwrite the content range to the user requested range
self.properties.content_range = 'bytes {0}-{1}/{2}'.format(
self._start_range,
self._end_range,
self._file_size
)
# Overwrite the content MD5 as it is the MD5 for the last range instead
# of the stored MD5
# TODO: Set to the stored MD5 when the service returns this
self.properties.content_md5 = None
if self.size == 0:
self._current_content = b""
else:
self._current_content = await process_content(self._response)
async def _initial_request(self):
range_header, range_validation = validate_and_format_range_headers(
self._initial_range[0],
self._initial_range[1],
start_range_required=False,
end_range_required=False,
check_content_md5=self._validate_content)
try:
location_mode, response = await self._client.download(
range=range_header,
range_get_content_md5=range_validation,
validate_content=self._validate_content,
data_stream_total=None,
download_stream_current=0,
**self._request_options)
# Check the location we read from to ensure we use the same one
# for subsequent requests.
self._location_mode = location_mode
# Parse the total file size and adjust the download size if ranges
# were specified
self._file_size = parse_length_from_content_range(response.properties.content_range)
if self._file_size is None:
raise ValueError("Required Content-Range response header is missing or malformed.")
if self._end_range is not None:
# Use the length unless it is over the end of the file
self.size = min(self._file_size, self._end_range - self._start_range + 1)
elif self._start_range is not None:
self.size = self._file_size - self._start_range
else:
self.size = self._file_size
except HttpResponseError as error:
if self._start_range is None and error.response and error.response.status_code == 416:
# Get range will fail on an empty file. If the user did not
# request a range, do a regular get request in order to get
# any properties.
try:
_, response = await self._client.download(
validate_content=self._validate_content,
data_stream_total=0,
download_stream_current=0,
**self._request_options)
except HttpResponseError as error:
process_storage_error(error)
# Set the download size to empty
self.size = 0
self._file_size = 0
else:
process_storage_error(error)
# If the file is small, the download is complete at this point.
# If file size is large, download the rest of the file in chunks.
if response.properties.size == self.size:
self._download_complete = True
self._etag = response.properties.etag
return response
def chunks(self):
# type: () -> AsyncIterator[bytes]
"""Iterate over chunks in the download stream.
:rtype: AsyncIterator[bytes]
"""
if self.size == 0 or self._download_complete:
iter_downloader = None
else:
data_end = self._file_size
if self._end_range is not None:
# Use the length unless it is over the end of the file
data_end = min(self._file_size, self._end_range + 1)
iter_downloader = _AsyncChunkDownloader(
client=self._client,
total_size=self.size,
chunk_size=self._config.max_chunk_get_size,
current_progress=self._first_get_size,
start_range=self._initial_range[1] + 1, # Start where the first download ended
end_range=data_end,
stream=None,
parallel=False,
validate_content=self._validate_content,
use_location=self._location_mode,
etag=self._etag,
**self._request_options)
return _AsyncChunkIterator(
size=self.size,
content=self._current_content,
downloader=iter_downloader,
chunk_size=self._config.max_chunk_get_size
)
async def readall(self):
# type: () -> bytes
"""Download the contents of this file.
This operation is blocking until all data is downloaded.
:rtype: bytes
"""
stream = BytesIO()
await self.readinto(stream)
data = stream.getvalue()
if self._encoding:
return data.decode(self._encoding)
return data
async def content_as_bytes(self, max_concurrency=1):
"""Download the contents of this file.
This operation is blocking until all data is downloaded.
:keyword int max_concurrency:
The number of parallel connections with which to download.
:rtype: bytes
"""
warnings.warn(
"content_as_bytes is deprecated, use readall instead",
DeprecationWarning
)
self._max_concurrency = max_concurrency
return await self.readall()
async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
"""Download the contents of this file, and decode as text.
This operation is blocking until all data is downloaded.
:keyword int max_concurrency:
The number of parallel connections with which to download.
:param str encoding:
Test encoding to decode the downloaded bytes. Default is UTF-8.
:rtype: str
"""
warnings.warn(
"content_as_text is deprecated, use readall instead",
DeprecationWarning
)
self._max_concurrency = max_concurrency
self._encoding = encoding
return await self.readall()
async def readinto(self, stream):
"""Download the contents of this file to a stream.
:param stream:
The stream to download to. This can be an open file-handle,
or any writable stream. The stream must be seekable if the download
uses more than one parallel connection.
:returns: The number of bytes read.
:rtype: int
"""
# the stream must be seekable if parallel download is required
parallel = self._max_concurrency > 1
if parallel:
error_message = "Target stream handle must be seekable."
if sys.version_info >= (3,) and not stream.seekable():
raise ValueError(error_message)
try:
stream.seek(stream.tell())
except (NotImplementedError, AttributeError):
raise ValueError(error_message)
# Write the content to the user stream
stream.write(self._current_content)
if self._progress_hook:
await self._progress_hook(len(self._current_content), self.size)
if self._download_complete:
return self.size
data_end = self._file_size
if self._end_range is not None:
# Use the length unless it is over the end of the file
data_end = min(self._file_size, self._end_range + 1)
downloader = _AsyncChunkDownloader(
client=self._client,
total_size=self.size,
chunk_size=self._config.max_chunk_get_size,
current_progress=self._first_get_size,
start_range=self._initial_range[1] + 1, # start where the first download ended
end_range=data_end,
stream=stream,
parallel=parallel,
validate_content=self._validate_content,
use_location=self._location_mode,
progress_hook=self._progress_hook,
etag=self._etag,
**self._request_options)
dl_tasks = downloader.get_chunk_offsets()
running_futures = [
asyncio.ensure_future(downloader.process_chunk(d))
for d in islice(dl_tasks, 0, self._max_concurrency)
]
while running_futures:
# Wait for some download to finish before adding a new one
_done, running_futures = await asyncio.wait(
running_futures, return_when=asyncio.FIRST_COMPLETED)
try:
next_chunk = next(dl_tasks)
except StopIteration:
break
else:
running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
if running_futures:
# Wait for the remaining downloads to finish
await asyncio.wait(running_futures)
return self.size
async def download_to_stream(self, stream, max_concurrency=1):
"""Download the contents of this file to a stream.
:param stream:
The stream to download to. This can be an open file-handle,
or any writable stream. The stream must be seekable if the download
uses more than one parallel connection.
:returns: The properties of the downloaded file.
:rtype: Any
"""
warnings.warn(
"download_to_stream is deprecated, use readinto instead",
DeprecationWarning
)
self._max_concurrency = max_concurrency
await self.readinto(stream)
return self.properties
|
{
"content_hash": "e2bcd66580647445a36cde7699df7f3f",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 108,
"avg_line_length": 38.574786324786324,
"alnum_prop": 0.5978507727247548,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ffcddb2e78431a46e05eb8c11cfbfdcee6e4f278",
"size": "18407",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-storage-file-share/azure/storage/fileshare/aio/_download_async.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import PIL.Image
from wagtail.wagtailimages.backends.base import BaseImageBackend
class PillowBackend(BaseImageBackend):
def __init__(self, params):
super(PillowBackend, self).__init__(params)
def open_image(self, input_file):
image = PIL.Image.open(input_file)
return image
def save_image(self, image, output, format):
image.save(output, format, quality=self.quality)
def resize(self, image, size):
if image.mode in ['1', 'P']:
image = image.convert('RGB')
return image.resize(size, PIL.Image.ANTIALIAS)
def crop(self, image, crop_box):
return image.crop(crop_box)
def image_data_as_rgb(self, image):
# https://github.com/thumbor/thumbor/blob/f52360dc96eedd9fc914fcf19eaf2358f7e2480c/thumbor/engines/pil.py#L206-L215
if image.mode not in ['RGB', 'RGBA']:
if 'A' in image.mode:
image = image.convert('RGBA')
else:
image = image.convert('RGB')
return image.mode, image.tostring()
|
{
"content_hash": "8edefeec634c1c77242f0bb4510ddaad",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 123,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.6309090909090909,
"repo_name": "benemery/wagtail",
"id": "a1963f9317137360c07f724769b36d11e67f24f5",
"size": "1100",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "wagtail/wagtailimages/backends/pillow.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "134640"
},
{
"name": "D",
"bytes": "2012"
},
{
"name": "JavaScript",
"bytes": "52648"
},
{
"name": "Python",
"bytes": "1033196"
},
{
"name": "Ruby",
"bytes": "1275"
},
{
"name": "Shell",
"bytes": "9525"
}
],
"symlink_target": ""
}
|
"""
usage: zerows [-h]
"""
__author__ = "Sebastian Łach"
__copyright__ = "Copyright 2015, Sebastian Łach"
__credits__ = ["Sebastian Łach", ]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Sebastian Łach"
__email__ = "root@slach.eu"
from json import loads
from zmq import Context as ZMQContext, REQ
from zmq.eventloop.zmqstream import ZMQStream
from zmq.eventloop.ioloop import install as zmq_ioloop_install
zmq_ioloop_install()
import tornado
import tornado.web
import tornado.websocket
from tornado.log import app_log
from tornado.options import define, parse_command_line, options
from tornado.web import Application
from tornado.ioloop import IOLoop
# define application options
define('port', type=int, default=8080, help='application port number')
define('router', type=str, default='tcp://localhost:5559', help='router url')
ERROR_INVALID_REQUEST = b'{"error": "invalid request"}'
def load_message(message):
try:
return loads(message)
except ValueError as e:
app_log.debug(e)
return None
class ZeroMQHandler(tornado.websocket.WebSocketHandler):
def __init__(self, *args, **kwargs):
super(ZeroMQHandler, self).__init__(*args, **kwargs)
self.socket = None
self.stream = None
def open(self):
settings = self.application.settings
self.socket = settings['zeromq']['context'].socket(REQ)
self.socket.connect(settings['zeromq']['url'])
self.stream = ZMQStream(self.socket, settings['ioloop'])
self.stream.on_recv(self.on_dispatch)
def on_message(self, message):
request = load_message(message)
if request:
data = message.encode('utf8')
self.stream.send(data)
else:
self.write_message(ERROR_INVALID_REQUEST)
def on_dispatch(self, messages):
for message in messages:
data = message.encode('utf8')
self.write_message(data)
def on_close(self):
self.stream.close()
self.socket.close()
def check_origin(self, origin):
return True
def data_received(self, chunk):
pass
def main():
"""Main entry-point"""
parse_command_line()
application = Application(
[
(r'/', ZeroMQHandler),
],
ioloop=IOLoop.current(),
zeromq=dict(
context=ZMQContext(),
url=options.router,
)
)
app_log.info(application.settings)
application.listen(options.port)
application.settings['ioloop'].start()
if __name__ == '__main__':
main()
|
{
"content_hash": "ef63a784ec90ee1d574af3ec281031a4",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 77,
"avg_line_length": 25.116504854368934,
"alnum_prop": 0.6331658291457286,
"repo_name": "sebastianlach/zerows",
"id": "4029f66b9043f776d57b1021e4857d228a6d12f7",
"size": "2638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerows/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3651"
}
],
"symlink_target": ""
}
|
"""User friendly container for Google Cloud Bigtable Column Family."""
import datetime
from google.protobuf import duration_pb2
from gcloud._helpers import _total_seconds
from gcloud.bigtable._generated import bigtable_table_data_pb2 as data_pb2
from gcloud.bigtable._generated import (
bigtable_table_service_messages_pb2 as messages_pb2)
def _timedelta_to_duration_pb(timedelta_val):
"""Convert a Python timedelta object to a duration protobuf.
.. note::
The Python timedelta has a granularity of microseconds while
the protobuf duration type has a duration of nanoseconds.
:type timedelta_val: :class:`datetime.timedelta`
:param timedelta_val: A timedelta object.
:rtype: :class:`google.protobuf.duration_pb2.Duration`
:returns: A duration object equivalent to the time delta.
"""
seconds_decimal = _total_seconds(timedelta_val)
# Truncate the parts other than the integer.
seconds = int(seconds_decimal)
if seconds_decimal < 0:
signed_micros = timedelta_val.microseconds - 10**6
else:
signed_micros = timedelta_val.microseconds
# Convert nanoseconds to microseconds.
nanos = 1000 * signed_micros
return duration_pb2.Duration(seconds=seconds, nanos=nanos)
def _duration_pb_to_timedelta(duration_pb):
"""Convert a duration protobuf to a Python timedelta object.
.. note::
The Python timedelta has a granularity of microseconds while
the protobuf duration type has a duration of nanoseconds.
:type duration_pb: :class:`google.protobuf.duration_pb2.Duration`
:param duration_pb: A protobuf duration object.
:rtype: :class:`datetime.timedelta`
:returns: The converted timedelta object.
"""
return datetime.timedelta(
seconds=duration_pb.seconds,
microseconds=(duration_pb.nanos / 1000.0),
)
class GarbageCollectionRule(object):
"""Garbage collection rule for column families within a table.
Cells in the column family (within a table) fitting the rule will be
deleted during garbage collection.
.. note::
This class is a do-nothing base class for all GC rules.
.. note::
A string ``gc_expression`` can also be used with API requests, but
that value would be superceded by a ``gc_rule``. As a result, we
don't support that feature and instead support via native classes.
"""
def __ne__(self, other):
return not self.__eq__(other)
class MaxVersionsGCRule(GarbageCollectionRule):
"""Garbage collection limiting the number of versions of a cell.
:type max_num_versions: int
:param max_num_versions: The maximum number of versions
"""
def __init__(self, max_num_versions):
self.max_num_versions = max_num_versions
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.max_num_versions == self.max_num_versions
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.data_pb2.GcRule`
:returns: The converted current object.
"""
return data_pb2.GcRule(max_num_versions=self.max_num_versions)
class MaxAgeGCRule(GarbageCollectionRule):
"""Garbage collection limiting the age of a cell.
:type max_age: :class:`datetime.timedelta`
:param max_age: The maximum age allowed for a cell in the table.
"""
def __init__(self, max_age):
self.max_age = max_age
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.max_age == self.max_age
def to_pb(self):
"""Converts the garbage collection rule to a protobuf.
:rtype: :class:`.data_pb2.GcRule`
:returns: The converted current object.
"""
max_age = _timedelta_to_duration_pb(self.max_age)
return data_pb2.GcRule(max_age=max_age)
class GCRuleUnion(GarbageCollectionRule):
"""Union of garbage collection rules.
:type rules: list
:param rules: List of :class:`GarbageCollectionRule`.
"""
def __init__(self, rules):
self.rules = rules
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.rules == self.rules
def to_pb(self):
"""Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.data_pb2.GcRule`
:returns: The converted current object.
"""
union = data_pb2.GcRule.Union(
rules=[rule.to_pb() for rule in self.rules])
return data_pb2.GcRule(union=union)
class GCRuleIntersection(GarbageCollectionRule):
"""Intersection of garbage collection rules.
:type rules: list
:param rules: List of :class:`GarbageCollectionRule`.
"""
def __init__(self, rules):
self.rules = rules
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.rules == self.rules
def to_pb(self):
"""Converts the intersection into a single GC rule as a protobuf.
:rtype: :class:`.data_pb2.GcRule`
:returns: The converted current object.
"""
intersection = data_pb2.GcRule.Intersection(
rules=[rule.to_pb() for rule in self.rules])
return data_pb2.GcRule(intersection=intersection)
class ColumnFamily(object):
"""Representation of a Google Cloud Bigtable Column Family.
We can use a :class:`ColumnFamily` to:
* :meth:`create` itself
* :meth:`update` itself
* :meth:`delete` itself
:type column_family_id: str
:param column_family_id: The ID of the column family. Must be of the
form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type table: :class:`Table <gcloud.bigtable.table.Table>`
:param table: The table that owns the column family.
:type gc_rule: :class:`GarbageCollectionRule`
:param gc_rule: (Optional) The garbage collection settings for this
column family.
"""
def __init__(self, column_family_id, table, gc_rule=None):
self.column_family_id = column_family_id
self._table = table
self.gc_rule = gc_rule
@property
def name(self):
"""Column family name used in requests.
.. note::
This property will not change if ``column_family_id`` does not, but
the return value is not cached.
The table name is of the form
``"projects/../zones/../clusters/../tables/../columnFamilies/.."``
:rtype: str
:returns: The column family name.
"""
return self._table.name + '/columnFamilies/' + self.column_family_id
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.column_family_id == self.column_family_id and
other._table == self._table and
other.gc_rule == self.gc_rule)
def __ne__(self, other):
return not self.__eq__(other)
def create(self):
"""Create this column family."""
if self.gc_rule is None:
column_family = data_pb2.ColumnFamily()
else:
column_family = data_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb())
request_pb = messages_pb2.CreateColumnFamilyRequest(
name=self._table.name,
column_family_id=self.column_family_id,
column_family=column_family,
)
client = self._table._cluster._client
# We expect a `.data_pb2.ColumnFamily`. We ignore it since the only
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client._table_stub.CreateColumnFamily(request_pb,
client.timeout_seconds)
def update(self):
"""Update this column family.
.. note::
Only the GC rule can be updated. By changing the column family ID,
you will simply be referring to a different column family.
"""
request_kwargs = {'name': self.name}
if self.gc_rule is not None:
request_kwargs['gc_rule'] = self.gc_rule.to_pb()
request_pb = data_pb2.ColumnFamily(**request_kwargs)
client = self._table._cluster._client
# We expect a `.data_pb2.ColumnFamily`. We ignore it since the only
# data it contains are the GC rule and the column family ID already
# stored on this instance.
client._table_stub.UpdateColumnFamily(request_pb,
client.timeout_seconds)
def delete(self):
"""Delete this column family."""
request_pb = messages_pb2.DeleteColumnFamilyRequest(name=self.name)
client = self._table._cluster._client
# We expect a `google.protobuf.empty_pb2.Empty`
client._table_stub.DeleteColumnFamily(request_pb,
client.timeout_seconds)
def _gc_rule_from_pb(gc_rule_pb):
"""Convert a protobuf GC rule to a native object.
:type gc_rule_pb: :class:`.data_pb2.GcRule`
:param gc_rule_pb: The GC rule to convert.
:rtype: :class:`GarbageCollectionRule` or :data:`NoneType <types.NoneType>`
:returns: An instance of one of the native rules defined
in :module:`column_family` or :data:`None` if no values were
set on the protobuf passed in.
:raises: :class:`ValueError <exceptions.ValueError>` if the rule name
is unexpected.
"""
rule_name = gc_rule_pb.WhichOneof('rule')
if rule_name is None:
return None
if rule_name == 'max_num_versions':
return MaxVersionsGCRule(gc_rule_pb.max_num_versions)
elif rule_name == 'max_age':
max_age = _duration_pb_to_timedelta(gc_rule_pb.max_age)
return MaxAgeGCRule(max_age)
elif rule_name == 'union':
return GCRuleUnion([_gc_rule_from_pb(rule)
for rule in gc_rule_pb.union.rules])
elif rule_name == 'intersection':
rules = [_gc_rule_from_pb(rule)
for rule in gc_rule_pb.intersection.rules]
return GCRuleIntersection(rules)
else:
raise ValueError('Unexpected rule name', rule_name)
|
{
"content_hash": "f4757ab97cb10dba0f1ca39abcc1ae4f",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 79,
"avg_line_length": 33.25961538461539,
"alnum_prop": 0.6255179724390479,
"repo_name": "huangkuan/hack",
"id": "c0d9060316a40a13d63d6b9fee11b4eaac766662",
"size": "10974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/gcloud/bigtable/column_family.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2573"
},
{
"name": "Makefile",
"bytes": "888"
},
{
"name": "Protocol Buffer",
"bytes": "90433"
},
{
"name": "Python",
"bytes": "5447434"
}
],
"symlink_target": ""
}
|
from SenseCells.tts import tts
import random
import json, requests
def who_are_you():
messages = ["Hi! I'm Scarlett, your own personal assistant! I'm self learning, and you to-do task list will keep on increasing!",
"Scarlett! Didn't I tell you before?",
"Scarlett! Didn't I tell you before? You are very forgetting!",
"You ask that so many times! I'm Scarlett!"]
tts(random.choice(messages))
pass
def undefined(message):
print("The text is {}".format(message))
tts("I dont know what that means!")
pass
def how_am_i():
messages = ["You are very handsome!",
"You look very good and smart",
"My kneews go weak when I see you.",
"Damn, I cant believe you are real, for you are that sexy!"]
tts(random.choice(messages))
pass
def tell_joke():
messages = ["What happends to a frogs car when it breaks down? It gets toad away!",
"No, I always forget the punch line!",
"What do I look to you? A Joker! I'm no joker! Fuck off and don't ask for a joke! Haha just kidding, shoot right away another one!"]
url = "http://api.icndb.com/jokes/random"
resp = requests.get(url)
# print(resp.content.decode())
try:
data = resp.json()
except ValueError:
data = random.choice(messages)
# data = json.loads(resp.text)
#
# print(data)
tts(data)
pass
def who_am_i(name):
message = "You are {}, my boss, and Scarlett is you own assistant!"
tts(message)
pass
def where_born():
message = "I was created by Debasish, a magician who brought me to life, in India! While Smoking and chilling in Himalayas!"
tts(message)
pass
def how_are_you():
message = "I'm fine. Thank you! And you?"
tts(message)
pass
def how_am_myself_if_fine():
message = "That's great to hear you are doing fine! How can I help you today?"
tts(message)
pass
def how_am_myself_not_fine():
message = "Aww! Thats so sad. Whats bothering you? Is there anything I can fulfill to make you day good?"
tts(message)
pass
|
{
"content_hash": "c85db4e47ef7132422d736039b0b3264",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 148,
"avg_line_length": 28.025974025974026,
"alnum_prop": 0.6195551436515292,
"repo_name": "debasishdebs/Scarlett-PA",
"id": "b88f3ce762988387acd38ce8e8a3e03bed5274dd",
"size": "2158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GreyMatter/general_conversations.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54211"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2014,掌阅科技
All rights reserved.
摘 要: common_base.py
创 建 者: warship
创建日期: 2015-10-09
"""
from handler.bases import RestHandler
class CommonBaseHandler(RestHandler):
"""提供各个模块公用的业务处理逻辑
"""
def parse_query(self, model):
'''解析查询条件
Args:
column: 列名
operator: 操作
query: 查询值
'''
column = self.get_arguments('column') or self.get_arguments('column[]')
operator = self.get_arguments('operator') or self.get_arguments('operator[]')
query = self.get_arguments('query') or self.get_arguments('query[]')
clauses = []
for i, q in enumerate(query):
if not q:
continue
model_column = getattr(model, column[i])
op = operator[i]
if op == '=':
clause = (model_column == q)
elif op == '>':
clause = (model_column > q)
elif op == '>=':
clause = (model_column >= q)
elif op == '<':
clause = (model_column < q)
elif op == '<=':
clause = (model_column <= q)
else:
if column[i] == 'id':
# 对id做特殊处理,不支持like操作
clause = (model_column == q)
else:
clause = (model_column ** "{}%".format(q))
clauses.append(clause)
if hasattr(model, 'deleted'):
clauses.append(getattr(model, 'deleted') == '0')
elif hasattr(model, 'disable'):
clauses.append(getattr(model, 'disable') == '0')
clauses.append(getattr(model, 'id') >= 0)
return clauses
@staticmethod
def ajax_ok(forward='', forward_confirm='', close_current=True):
'''响应bjui ok的状态
'''
res = {
'statusCode': '200',
'message': '成功',
'tabid': '_' + forward.replace('/', '_'),
'closeCurrent': close_current,
'forward': forward,
'forwardConfirm': forward_confirm
}
return res
@staticmethod
def ajax_timeout(forward='', forward_confirm='', close_current=False):
'''响应bjui超时状态
'''
res = {
"statusCode": "301",
"message": "会话超时",
"closeCurrent": close_current,
"forward": forward,
"forwardConfirm": forward_confirm
}
return res
@staticmethod
def ajax_dialog_ok(forward='', forward_confirm='', close_current=False):
'''响应对话框 OK
'''
res = {
"statusCode": "200",
"message": "成功",
"tabid": "",
"dialogid": "dialog-mask",
"closeCurrent": close_current,
"forward": forward,
"forwardConfirm": forward_confirm
}
return res
@staticmethod
def ajax_popup(forward='', forward_confirm='', close_current=False, code='', msg=''):
'''信息提示
'''
res = {
'statusCode': code,
'message': msg,
'closeCurrent': close_current,
'forward': forward,
'forwardConfirm': forward_confirm
}
return res
|
{
"content_hash": "9b7db59c96d8f70d5ed91d171a069632",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 89,
"avg_line_length": 29.536363636363635,
"alnum_prop": 0.47768544167436133,
"repo_name": "yanjf2015/zkdash",
"id": "6e2d84c1915294d4bc4223813e093bea482aebb4",
"size": "3492",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "handler/bases/common_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "202395"
},
{
"name": "HTML",
"bytes": "84108"
},
{
"name": "JavaScript",
"bytes": "1737634"
},
{
"name": "PHP",
"bytes": "44496"
},
{
"name": "Python",
"bytes": "116260"
},
{
"name": "Shell",
"bytes": "404"
}
],
"symlink_target": ""
}
|
from ducktape.mark import matrix
from ducktape.utils.util import wait_until
from ducktape.mark.resource import cluster
from kafkatest.tests.verifiable_consumer_test import VerifiableConsumerTest
from kafkatest.services.kafka import TopicPartition
import signal
class OffsetValidationTest(VerifiableConsumerTest):
TOPIC = "test_topic"
NUM_PARTITIONS = 1
def __init__(self, test_context):
super(OffsetValidationTest, self).__init__(test_context, num_consumers=3, num_producers=1,
num_zk=1, num_brokers=2, topics={
self.TOPIC : { 'partitions': self.NUM_PARTITIONS, 'replication-factor': 2 }
})
def rolling_bounce_consumers(self, consumer, keep_alive=0, num_bounces=5, clean_shutdown=True):
for _ in range(num_bounces):
for node in consumer.nodes[keep_alive:]:
consumer.stop_node(node, clean_shutdown)
wait_until(lambda: len(consumer.dead_nodes()) == 1,
timeout_sec=self.session_timeout_sec+5,
err_msg="Timed out waiting for the consumer to shutdown")
consumer.start_node(node)
self.await_all_members(consumer)
self.await_consumed_messages(consumer)
def bounce_all_consumers(self, consumer, keep_alive=0, num_bounces=5, clean_shutdown=True):
for _ in range(num_bounces):
for node in consumer.nodes[keep_alive:]:
consumer.stop_node(node, clean_shutdown)
wait_until(lambda: len(consumer.dead_nodes()) == self.num_consumers - keep_alive, timeout_sec=10,
err_msg="Timed out waiting for the consumers to shutdown")
for node in consumer.nodes[keep_alive:]:
consumer.start_node(node)
self.await_all_members(consumer)
self.await_consumed_messages(consumer)
def rolling_bounce_brokers(self, consumer, num_bounces=5, clean_shutdown=True):
for _ in range(num_bounces):
for node in self.kafka.nodes:
self.kafka.restart_node(node, clean_shutdown=True)
self.await_all_members(consumer)
self.await_consumed_messages(consumer)
def setup_consumer(self, topic, **kwargs):
# collect verifiable consumer events since this makes debugging much easier
consumer = super(OffsetValidationTest, self).setup_consumer(topic, **kwargs)
self.mark_for_collect(consumer, 'verifiable_consumer_stdout')
return consumer
@cluster(num_nodes=7)
def test_broker_rolling_bounce(self):
"""
Verify correct consumer behavior when the brokers are consecutively restarted.
Setup: single Kafka cluster with one producer writing messages to a single topic with one
partition, an a set of consumers in the same group reading from the same topic.
- Start a producer which continues producing new messages throughout the test.
- Start up the consumers and wait until they've joined the group.
- In a loop, restart each broker consecutively, waiting for the group to stabilize between
each broker restart.
- Verify delivery semantics according to the failure type and that the broker bounces
did not cause unexpected group rebalances.
"""
partition = TopicPartition(self.TOPIC, 0)
producer = self.setup_producer(self.TOPIC)
consumer = self.setup_consumer(self.TOPIC)
producer.start()
self.await_produced_messages(producer)
consumer.start()
self.await_all_members(consumer)
num_rebalances = consumer.num_rebalances()
# TODO: make this test work with hard shutdowns, which probably requires
# pausing before the node is restarted to ensure that any ephemeral
# nodes have time to expire
self.rolling_bounce_brokers(consumer, clean_shutdown=True)
unexpected_rebalances = consumer.num_rebalances() - num_rebalances
assert unexpected_rebalances == 0, \
"Broker rolling bounce caused %d unexpected group rebalances" % unexpected_rebalances
consumer.stop_all()
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records %d did not match consumed position %d" % \
(consumer.total_consumed(), consumer.current_position(partition))
@cluster(num_nodes=7)
@matrix(clean_shutdown=[True], bounce_mode=["all", "rolling"])
def test_consumer_bounce(self, clean_shutdown, bounce_mode):
"""
Verify correct consumer behavior when the consumers in the group are consecutively restarted.
Setup: single Kafka cluster with one producer and a set of consumers in one group.
- Start a producer which continues producing new messages throughout the test.
- Start up the consumers and wait until they've joined the group.
- In a loop, restart each consumer, waiting for each one to rejoin the group before
restarting the rest.
- Verify delivery semantics according to the failure type.
"""
partition = TopicPartition(self.TOPIC, 0)
producer = self.setup_producer(self.TOPIC)
consumer = self.setup_consumer(self.TOPIC)
producer.start()
self.await_produced_messages(producer)
consumer.start()
self.await_all_members(consumer)
if bounce_mode == "all":
self.bounce_all_consumers(consumer, clean_shutdown=clean_shutdown)
else:
self.rolling_bounce_consumers(consumer, clean_shutdown=clean_shutdown)
consumer.stop_all()
if clean_shutdown:
# if the total records consumed matches the current position, we haven't seen any duplicates
# this can only be guaranteed with a clean shutdown
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records %d did not match consumed position %d" % \
(consumer.total_consumed(), consumer.current_position(partition))
else:
# we may have duplicates in a hard failure
assert consumer.current_position(partition) <= consumer.total_consumed(), \
"Current position %d greater than the total number of consumed records %d" % \
(consumer.current_position(partition), consumer.total_consumed())
@cluster(num_nodes=7)
@matrix(clean_shutdown=[True], static_membership=[True, False], bounce_mode=["all", "rolling"], num_bounces=[5])
def test_static_consumer_bounce(self, clean_shutdown, static_membership, bounce_mode, num_bounces):
"""
Verify correct static consumer behavior when the consumers in the group are restarted. In order to make
sure the behavior of static members are different from dynamic ones, we take both static and dynamic
membership into this test suite.
Setup: single Kafka cluster with one producer and a set of consumers in one group.
- Start a producer which continues producing new messages throughout the test.
- Start up the consumers as static/dynamic members and wait until they've joined the group.
- In a loop, restart each consumer except the first member (note: may not be the leader), and expect no rebalance triggered
during this process if the group is in static membership.
"""
partition = TopicPartition(self.TOPIC, 0)
producer = self.setup_producer(self.TOPIC)
producer.start()
self.await_produced_messages(producer)
self.session_timeout_sec = 60
consumer = self.setup_consumer(self.TOPIC, static_membership=static_membership)
consumer.start()
self.await_all_members(consumer)
num_revokes_before_bounce = consumer.num_revokes_for_alive()
num_keep_alive = 1
if bounce_mode == "all":
self.bounce_all_consumers(consumer, keep_alive=num_keep_alive, num_bounces=num_bounces)
else:
self.rolling_bounce_consumers(consumer, keep_alive=num_keep_alive, num_bounces=num_bounces)
num_revokes_after_bounce = consumer.num_revokes_for_alive() - num_revokes_before_bounce
check_condition = num_revokes_after_bounce != 0
# under static membership, the live consumer shall not revoke any current running partitions,
# since there is no global rebalance being triggered.
if static_membership:
check_condition = num_revokes_after_bounce == 0
assert check_condition, \
"Total revoked count %d does not match the expectation of having 0 revokes as %d" % \
(num_revokes_after_bounce, check_condition)
consumer.stop_all()
if clean_shutdown:
# if the total records consumed matches the current position, we haven't seen any duplicates
# this can only be guaranteed with a clean shutdown
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records %d did not match consumed position %d" % \
(consumer.total_consumed(), consumer.current_position(partition))
else:
# we may have duplicates in a hard failure
assert consumer.current_position(partition) <= consumer.total_consumed(), \
"Current position %d greater than the total number of consumed records %d" % \
(consumer.current_position(partition), consumer.total_consumed())
@cluster(num_nodes=10)
@matrix(num_conflict_consumers=[1, 2], fencing_stage=["stable", "all"])
def test_fencing_static_consumer(self, num_conflict_consumers, fencing_stage):
"""
Verify correct static consumer behavior when there are conflicting consumers with same group.instance.id.
- Start a producer which continues producing new messages throughout the test.
- Start up the consumers as static members and wait until they've joined the group. Some conflict consumers will be configured with
- the same group.instance.id.
- Let normal consumers and fencing consumers start at the same time, and expect only unique consumers left.
"""
partition = TopicPartition(self.TOPIC, 0)
producer = self.setup_producer(self.TOPIC)
producer.start()
self.await_produced_messages(producer)
self.session_timeout_sec = 60
consumer = self.setup_consumer(self.TOPIC, static_membership=True)
self.num_consumers = num_conflict_consumers
conflict_consumer = self.setup_consumer(self.TOPIC, static_membership=True)
# wait original set of consumer to stable stage before starting conflict members.
if fencing_stage == "stable":
consumer.start()
self.await_members(consumer, len(consumer.nodes))
conflict_consumer.start()
self.await_members(conflict_consumer, num_conflict_consumers)
self.await_members(consumer, len(consumer.nodes) - num_conflict_consumers)
assert len(consumer.dead_nodes()) == num_conflict_consumers
else:
consumer.start()
conflict_consumer.start()
wait_until(lambda: len(consumer.joined_nodes()) + len(conflict_consumer.joined_nodes()) == len(consumer.nodes),
timeout_sec=self.session_timeout_sec,
err_msg="Timed out waiting for consumers to join, expected total %d joined, but only see %d joined from"
"normal consumer group and %d from conflict consumer group" % \
(len(consumer.nodes), len(consumer.joined_nodes()), len(conflict_consumer.joined_nodes()))
)
wait_until(lambda: len(consumer.dead_nodes()) + len(conflict_consumer.dead_nodes()) == len(conflict_consumer.nodes),
timeout_sec=self.session_timeout_sec,
err_msg="Timed out waiting for fenced consumers to die, expected total %d dead, but only see %d dead in"
"normal consumer group and %d dead in conflict consumer group" % \
(len(conflict_consumer.nodes), len(consumer.dead_nodes()), len(conflict_consumer.dead_nodes()))
)
@cluster(num_nodes=7)
@matrix(clean_shutdown=[True], enable_autocommit=[True, False])
def test_consumer_failure(self, clean_shutdown, enable_autocommit):
partition = TopicPartition(self.TOPIC, 0)
consumer = self.setup_consumer(self.TOPIC, enable_autocommit=enable_autocommit)
producer = self.setup_producer(self.TOPIC)
consumer.start()
self.await_all_members(consumer)
partition_owner = consumer.owner(partition)
assert partition_owner is not None
# startup the producer and ensure that some records have been written
producer.start()
self.await_produced_messages(producer)
# stop the partition owner and await its shutdown
consumer.kill_node(partition_owner, clean_shutdown=clean_shutdown)
wait_until(lambda: len(consumer.joined_nodes()) == (self.num_consumers - 1) and consumer.owner(partition) != None,
timeout_sec=self.session_timeout_sec*2+5,
err_msg="Timed out waiting for consumer to close")
# ensure that the remaining consumer does some work after rebalancing
self.await_consumed_messages(consumer, min_messages=1000)
consumer.stop_all()
if clean_shutdown:
# if the total records consumed matches the current position, we haven't seen any duplicates
# this can only be guaranteed with a clean shutdown
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records %d did not match consumed position %d" % \
(consumer.total_consumed(), consumer.current_position(partition))
else:
# we may have duplicates in a hard failure
assert consumer.current_position(partition) <= consumer.total_consumed(), \
"Current position %d greater than the total number of consumed records %d" % \
(consumer.current_position(partition), consumer.total_consumed())
# if autocommit is not turned on, we can also verify the last committed offset
if not enable_autocommit:
assert consumer.last_commit(partition) == consumer.current_position(partition), \
"Last committed offset %d did not match last consumed position %d" % \
(consumer.last_commit(partition), consumer.current_position(partition))
@cluster(num_nodes=7)
@matrix(clean_shutdown=[True, False], enable_autocommit=[True, False])
def test_broker_failure(self, clean_shutdown, enable_autocommit):
partition = TopicPartition(self.TOPIC, 0)
consumer = self.setup_consumer(self.TOPIC, enable_autocommit=enable_autocommit)
producer = self.setup_producer(self.TOPIC)
producer.start()
consumer.start()
self.await_all_members(consumer)
num_rebalances = consumer.num_rebalances()
# shutdown one of the brokers
# TODO: we need a way to target the coordinator instead of picking arbitrarily
self.kafka.signal_node(self.kafka.nodes[0], signal.SIGTERM if clean_shutdown else signal.SIGKILL)
# ensure that the consumers do some work after the broker failure
self.await_consumed_messages(consumer, min_messages=1000)
# verify that there were no rebalances on failover
assert num_rebalances == consumer.num_rebalances(), "Broker failure should not cause a rebalance"
consumer.stop_all()
# if the total records consumed matches the current position, we haven't seen any duplicates
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records %d did not match consumed position %d" % \
(consumer.total_consumed(), consumer.current_position(partition))
# if autocommit is not turned on, we can also verify the last committed offset
if not enable_autocommit:
assert consumer.last_commit(partition) == consumer.current_position(partition), \
"Last committed offset %d did not match last consumed position %d" % \
(consumer.last_commit(partition), consumer.current_position(partition))
@cluster(num_nodes=7)
def test_group_consumption(self):
"""
Verifies correct group rebalance behavior as consumers are started and stopped.
In particular, this test verifies that the partition is readable after every
expected rebalance.
Setup: single Kafka cluster with a group of consumers reading from one topic
with one partition while the verifiable producer writes to it.
- Start the consumers one by one, verifying consumption after each rebalance
- Shutdown the consumers one by one, verifying consumption after each rebalance
"""
consumer = self.setup_consumer(self.TOPIC)
producer = self.setup_producer(self.TOPIC)
partition = TopicPartition(self.TOPIC, 0)
producer.start()
for num_started, node in enumerate(consumer.nodes, 1):
consumer.start_node(node)
self.await_members(consumer, num_started)
self.await_consumed_messages(consumer)
for num_stopped, node in enumerate(consumer.nodes, 1):
consumer.stop_node(node)
if num_stopped < self.num_consumers:
self.await_members(consumer, self.num_consumers - num_stopped)
self.await_consumed_messages(consumer)
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records %d did not match consumed position %d" % \
(consumer.total_consumed(), consumer.current_position(partition))
assert consumer.last_commit(partition) == consumer.current_position(partition), \
"Last committed offset %d did not match last consumed position %d" % \
(consumer.last_commit(partition), consumer.current_position(partition))
class AssignmentValidationTest(VerifiableConsumerTest):
TOPIC = "test_topic"
NUM_PARTITIONS = 6
def __init__(self, test_context):
super(AssignmentValidationTest, self).__init__(test_context, num_consumers=3, num_producers=0,
num_zk=1, num_brokers=2, topics={
self.TOPIC : { 'partitions': self.NUM_PARTITIONS, 'replication-factor': 1 },
})
@cluster(num_nodes=6)
@matrix(assignment_strategy=["org.apache.kafka.clients.consumer.RangeAssignor",
"org.apache.kafka.clients.consumer.RoundRobinAssignor",
"org.apache.kafka.clients.consumer.StickyAssignor"])
def test_valid_assignment(self, assignment_strategy):
"""
Verify assignment strategy correctness: each partition is assigned to exactly
one consumer instance.
Setup: single Kafka cluster with a set of consumers in the same group.
- Start the consumers one by one
- Validate assignment after every expected rebalance
"""
consumer = self.setup_consumer(self.TOPIC, assignment_strategy=assignment_strategy)
for num_started, node in enumerate(consumer.nodes, 1):
consumer.start_node(node)
self.await_members(consumer, num_started)
assert self.valid_assignment(self.TOPIC, self.NUM_PARTITIONS, consumer.current_assignment()), \
"expected valid assignments of %d partitions when num_started %d: %s" % \
(self.NUM_PARTITIONS, num_started, \
[(str(node.account), a) for node, a in consumer.current_assignment().items()])
|
{
"content_hash": "778ab060ff2c972cd2f23f8c66adc2cd",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 139,
"avg_line_length": 48.70120481927711,
"alnum_prop": 0.6539013408539904,
"repo_name": "noslowerdna/kafka",
"id": "131123f55ff3ce43591daa0e1cd6d95a0e12a30a",
"size": "20992",
"binary": false,
"copies": "3",
"ref": "refs/heads/trunk",
"path": "tests/kafkatest/tests/client/consumer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "30250"
},
{
"name": "Dockerfile",
"bytes": "6334"
},
{
"name": "HTML",
"bytes": "3739"
},
{
"name": "Java",
"bytes": "19769473"
},
{
"name": "Python",
"bytes": "909872"
},
{
"name": "Scala",
"bytes": "6861865"
},
{
"name": "Shell",
"bytes": "96905"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
}
|
"""Logging utilities."""
# pylint: disable=unused-import
# pylint: disable=g-bad-import-order
# pylint: disable=invalid-name
import logging as _logging
import os as _os
import sys as _sys
import _thread
import time as _time
import traceback as _traceback
from logging import DEBUG
from logging import ERROR
from logging import FATAL
from logging import INFO
from logging import WARN
import threading
from tensorflow.python.util.tf_export import tf_export
# Don't use this directly. Use get_logger() instead.
_logger = None
_logger_lock = threading.Lock()
def _get_caller(offset=3):
"""Returns a code and frame object for the lowest non-logging stack frame."""
# Use sys._getframe(). This avoids creating a traceback object.
# pylint: disable=protected-access
f = _sys._getframe(offset)
# pylint: enable=protected-access
our_file = f.f_code.co_filename
f = f.f_back
while f:
code = f.f_code
if code.co_filename != our_file:
return code, f
f = f.f_back
return None, None
# The definition of `findCaller` changed in Python 3.2,
# and further changed in Python 3.8
if _sys.version_info.major >= 3 and _sys.version_info.minor >= 8:
def _logger_find_caller(stack_info=False, stacklevel=1): # pylint: disable=g-wrong-blank-lines
code, frame = _get_caller(4)
sinfo = None
if stack_info:
sinfo = '\n'.join(_traceback.format_stack())
if code:
return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
else:
return '(unknown file)', 0, '(unknown function)', sinfo
elif _sys.version_info.major >= 3 and _sys.version_info.minor >= 2:
def _logger_find_caller(stack_info=False): # pylint: disable=g-wrong-blank-lines
code, frame = _get_caller(4)
sinfo = None
if stack_info:
sinfo = '\n'.join(_traceback.format_stack())
if code:
return (code.co_filename, frame.f_lineno, code.co_name, sinfo)
else:
return '(unknown file)', 0, '(unknown function)', sinfo
else:
def _logger_find_caller(): # pylint: disable=g-wrong-blank-lines
code, frame = _get_caller(4)
if code:
return (code.co_filename, frame.f_lineno, code.co_name)
else:
return '(unknown file)', 0, '(unknown function)'
@tf_export('get_logger')
def get_logger():
"""Return TF logger instance."""
global _logger
# Use double-checked locking to avoid taking lock unnecessarily.
if _logger:
return _logger
_logger_lock.acquire()
try:
if _logger:
return _logger
# Scope the TensorFlow logger to not conflict with users' loggers.
logger = _logging.getLogger('tensorflow')
# Override findCaller on the logger to skip internal helper functions
logger.findCaller = _logger_find_caller
# Don't further configure the TensorFlow logger if the root logger is
# already configured. This prevents double logging in those cases.
if not _logging.getLogger().handlers:
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells.
if _sys.ps1: _interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# If we are in an interactive environment (like Jupyter), set loglevel
# to INFO and pipe the output to stdout.
if _interactive:
logger.setLevel(INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler.
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT, None))
logger.addHandler(_handler)
_logger = logger
return _logger
finally:
_logger_lock.release()
@tf_export(v1=['logging.log'])
def log(level, msg, *args, **kwargs):
get_logger().log(level, msg, *args, **kwargs)
@tf_export(v1=['logging.debug'])
def debug(msg, *args, **kwargs):
get_logger().debug(msg, *args, **kwargs)
@tf_export(v1=['logging.error'])
def error(msg, *args, **kwargs):
get_logger().error(msg, *args, **kwargs)
@tf_export(v1=['logging.fatal'])
def fatal(msg, *args, **kwargs):
get_logger().fatal(msg, *args, **kwargs)
@tf_export(v1=['logging.info'])
def info(msg, *args, **kwargs):
get_logger().info(msg, *args, **kwargs)
@tf_export(v1=['logging.warn'])
def warn(msg, *args, **kwargs):
get_logger().warning(msg, *args, **kwargs)
@tf_export(v1=['logging.warning'])
def warning(msg, *args, **kwargs):
get_logger().warning(msg, *args, **kwargs)
_level_names = {
FATAL: 'FATAL',
ERROR: 'ERROR',
WARN: 'WARN',
INFO: 'INFO',
DEBUG: 'DEBUG',
}
# Mask to convert integer thread ids to unsigned quantities for logging
# purposes
_THREAD_ID_MASK = 2 * _sys.maxsize + 1
_log_prefix = None # later set to google2_log_prefix
# Counter to keep track of number of log entries per token.
_log_counter_per_token = {}
@tf_export(v1=['logging.TaskLevelStatusMessage'])
def TaskLevelStatusMessage(msg):
error(msg)
@tf_export(v1=['logging.flush'])
def flush():
raise NotImplementedError()
# Code below is taken from pyglib/logging
@tf_export(v1=['logging.vlog'])
def vlog(level, msg, *args, **kwargs):
get_logger().log(level, msg, *args, **kwargs)
def _GetNextLogCountPerToken(token):
"""Wrapper for _log_counter_per_token.
Args:
token: The token for which to look up the count.
Returns:
The number of times this function has been called with
*token* as an argument (starting at 0)
"""
global _log_counter_per_token # pylint: disable=global-variable-not-assigned
_log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1)
return _log_counter_per_token[token]
@tf_export(v1=['logging.log_every_n'])
def log_every_n(level, msg, n, *args):
"""Log 'msg % args' at level 'level' once per 'n' times.
Logs the 1st call, (N+1)st call, (2N+1)st call, etc.
Not threadsafe.
Args:
level: The level at which to log.
msg: The message to be logged.
n: The number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _GetNextLogCountPerToken(_GetFileAndLine())
log_if(level, msg, not (count % n), *args)
@tf_export(v1=['logging.log_first_n'])
def log_first_n(level, msg, n, *args): # pylint: disable=g-bad-name
"""Log 'msg % args' at level 'level' only first 'n' times.
Not threadsafe.
Args:
level: The level at which to log.
msg: The message to be logged.
n: The number of times this should be called before it is logged.
*args: The args to be substituted into the msg.
"""
count = _GetNextLogCountPerToken(_GetFileAndLine())
log_if(level, msg, count < n, *args)
@tf_export(v1=['logging.log_if'])
def log_if(level, msg, condition, *args):
"""Log 'msg % args' at level 'level' only if condition is fulfilled."""
if condition:
vlog(level, msg, *args)
def _GetFileAndLine():
"""Returns (filename, linenumber) for the stack frame."""
code, f = _get_caller()
if not code:
return ('<unknown>', 0)
return (code.co_filename, f.f_lineno)
def google2_log_prefix(level, timestamp=None, file_and_line=None):
"""Assemble a logline prefix using the google2 format."""
# pylint: disable=global-variable-not-assigned
global _level_names
# pylint: enable=global-variable-not-assigned
# Record current time
now = timestamp or _time.time()
now_tuple = _time.localtime(now)
now_microsecond = int(1e6 * (now % 1.0))
(filename, line) = file_and_line or _GetFileAndLine()
basename = _os.path.basename(filename)
# Severity string
severity = 'I'
if level in _level_names:
severity = _level_names[level][0]
s = '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] ' % (
severity,
now_tuple[1], # month
now_tuple[2], # day
now_tuple[3], # hour
now_tuple[4], # min
now_tuple[5], # sec
now_microsecond,
_get_thread_id(),
basename,
line)
return s
@tf_export(v1=['logging.get_verbosity'])
def get_verbosity():
"""Return how much logging output will be produced."""
return get_logger().getEffectiveLevel()
@tf_export(v1=['logging.set_verbosity'])
def set_verbosity(v):
"""Sets the threshold for what messages will be logged."""
get_logger().setLevel(v)
def _get_thread_id():
"""Get id of current thread, suitable for logging as an unsigned quantity."""
thread_id = _thread.get_ident()
return thread_id & _THREAD_ID_MASK
_log_prefix = google2_log_prefix
tf_export(v1=['logging.DEBUG']).export_constant(__name__, 'DEBUG')
tf_export(v1=['logging.ERROR']).export_constant(__name__, 'ERROR')
tf_export(v1=['logging.FATAL']).export_constant(__name__, 'FATAL')
tf_export(v1=['logging.INFO']).export_constant(__name__, 'INFO')
tf_export(v1=['logging.WARN']).export_constant(__name__, 'WARN')
|
{
"content_hash": "0602e7fb4247932a903979bae739c705",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 97,
"avg_line_length": 28.21518987341772,
"alnum_prop": 0.6654329295648272,
"repo_name": "yongtang/tensorflow",
"id": "168dd34aa227e31eb5c61e31df866f60ca56d8da",
"size": "9606",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/platform/tf_logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1368342"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125162438"
},
{
"name": "CMake",
"bytes": "179878"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2118448"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792868"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11205807"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300198"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42642473"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621427"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14607"
},
{
"name": "Starlark",
"bytes": "7577804"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
__author__ = 'ad'
import os.path
from collections import OrderedDict
from pyapi.libraries.pyraml_parser_master import pyraml
from pyapi.libraries.pyraml_parser_master.pyraml import parser
from pyapi.libraries.pyraml_parser_master.pyraml.entities import RamlResource, RamlMethod, RamlQueryParameter
fixtures_dir = os.path.join(os.path.dirname(__file__), '../', 'samples')
def test_resource_nested():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/resource-nested.yaml'))
assert isinstance(p.resources, OrderedDict), p.resources
assert len(p.resources) == 1, p.resources
# Validate root resource
assert "/media" in p.resources, p.resources
root_resource = p.resources["/media"]
assert isinstance(root_resource, RamlResource), p.resources
assert root_resource.parentResource is None, p.resources
assert root_resource.methods is not None, p.resources
assert root_resource.description == "Media Description", root_resource
assert "get" in root_resource.methods, p.resources
assert isinstance(root_resource.methods["get"], RamlMethod), p.resources
assert root_resource.methods["get"].notNull, p.resources
# validate sub-resources
assert root_resource.resources is not None, root_resource
assert "/search" in root_resource.resources is not None, root_resource
assert root_resource.resources["/search"].displayName == "Media Search", root_resource
assert root_resource.resources["/search"].description == "Media Search Description", root_resource
assert "get" in root_resource.resources["/search"].methods, root_resource
assert root_resource.resources["/search"].methods["get"].notNull, root_resource
assert "/tags" in root_resource.resources is not None, root_resource
assert root_resource.resources["/tags"].displayName == "Tags", root_resource
assert root_resource.resources["/tags"].description == "Tags Description", root_resource
assert "get" in root_resource.resources["/tags"].methods, root_resource
assert root_resource.resources["/tags"].methods["get"].notNull, root_resource
# /media/tags has their own resource /search
tag_resource = root_resource.resources["/tags"]
assert tag_resource.resources is not None, tag_resource
assert "/search" in tag_resource.resources, tag_resource
assert tag_resource.resources["/search"].displayName == "Tag Search", tag_resource
assert tag_resource.resources["/search"].description == "Tag Search Description", tag_resource
assert tag_resource.resources["/search"].methods["get"].notNull, root_resource
# Ensure than every sub-resource have correct parentResource
assert root_resource.resources["/search"].parentResource is root_resource
assert root_resource.resources["/tags"].parentResource is root_resource
assert tag_resource.resources["/search"].parentResource is tag_resource
def test_resource_with_responses():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/null-elements.yaml'))
assert isinstance(p.resources, OrderedDict), p.resources
assert "/leagues" in p.resources, p
leagues_resource = p.resources["/leagues"]
assert leagues_resource.displayName == "Leagues", leagues_resource
assert leagues_resource.description is None, leagues_resource
assert leagues_resource.methods, leagues_resource
assert leagues_resource.methods["get"], leagues_resource
leagues_resource_get = leagues_resource.methods["get"]
assert leagues_resource_get.responses, leagues_resource_get
assert leagues_resource_get.responses[200], leagues_resource_get
assert leagues_resource_get.responses[200].body, leagues_resource_get
assert "application/json" in leagues_resource_get.responses[200].body, leagues_resource_get
assert "text/xml" in leagues_resource_get.responses[200].body, leagues_resource_get
def test_resource_with_params():
p = pyraml.parser.load(os.path.join(fixtures_dir, '../samples/params', 'param-types.yaml'))
assert isinstance(p.resources, OrderedDict), p.resources
assert "/simple" in p.resources, p
simple_res = p.resources["/simple"]
assert "get" in simple_res.methods, simple_res
queryParameters = simple_res.methods["get"].queryParameters
assert "name" in queryParameters, queryParameters
assert "age" in queryParameters, queryParameters
assert "price" in queryParameters, queryParameters
assert "time" in queryParameters, queryParameters
assert "alive" in queryParameters, queryParameters
assert "default-enum" in queryParameters, queryParameters
queryParam1 = queryParameters["name"]
assert isinstance(queryParam1, RamlQueryParameter), queryParam1
assert queryParam1.example == "two", queryParam1
assert queryParam1.enum == ["one", "two", "three"], queryParam1
assert queryParam1.displayName == "name name", queryParam1
assert queryParam1.description == "name description"
assert queryParam1.default == "three", queryParam1
assert queryParam1.minLength == 3, queryParam1
assert queryParam1.type == "string", queryParam1
assert queryParam1.maxLength == 5, queryParam1
assert queryParam1.pattern == '[a-z]{3,5}', queryParam1
assert queryParam1.required == False, queryParam1
assert queryParam1.repeat == False, queryParam1
|
{
"content_hash": "0dbe41f1f7f04bf3e2053454e7b25788",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 109,
"avg_line_length": 47.95495495495496,
"alnum_prop": 0.742250610557956,
"repo_name": "mpetyx/pyapi",
"id": "c081296dd7fc3ee4d8665f48c7999049123b44a8",
"size": "5323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/raml/tests/test_resources.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "443"
},
{
"name": "Python",
"bytes": "147307"
}
],
"symlink_target": ""
}
|
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirements = [
"pycrypto",
"coloredlogs"
]
test_requirements = [
'pytest',
'lettuce'
]
setup(
name='flypwd',
version='1.1.5',
description='Library to store and retrieve passwords',
long_description=readme + '\n\n' + history,
author='Giuseppe Acito',
author_email='giuseppe.acito@gmail.com',
url='https://github.com/giupo/flypwd',
packages=[
'flypwd',
],
package_dir={'flypwd':
'flypwd'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='flypwd',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
cmdclass={'test': PyTest},
test_suite='tests',
tests_require=test_requirements,
entry_points={
'console_scripts':['flypwd=flypwd:main']
}
)
|
{
"content_hash": "dab2d214c425e102f41007af1b6f2370",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 66,
"avg_line_length": 25.35820895522388,
"alnum_prop": 0.6038846380223661,
"repo_name": "giupo/flypwd",
"id": "d389c9ea89fb335331679aaae85b29319e19dfe9",
"size": "1746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "310"
},
{
"name": "Gherkin",
"bytes": "540"
},
{
"name": "Makefile",
"bytes": "1300"
},
{
"name": "Python",
"bytes": "14122"
}
],
"symlink_target": ""
}
|
__author__ = 'shelman'
'''
JSON API definition.
'''
import json, logging, inspect, functools
class APIError(Exception):
'''
the base APIError which contains error(required), data(optional) and message(optional).
'''
def __init__(self, error, data='', message=''):
super(APIError, self).__init__(message)
self.error = error
self.data = data
self.message = message
class APIValueError(APIError):
'''
Indicate the input value has error or invalid. The data specifies the error field of input form.
'''
def __init__(self, field, message=''):
super(APIValueError, self).__init__('value:invalid', field, message)
class APIResourceNotFoundError(APIError):
'''
Indicate the resource was not found. The data specifies the resource name.
'''
def __init__(self, field, message=''):
super(APIResourceNotFoundError, self).__init__('value:notfound', field, message)
class APIPermissionError(APIError):
'''
Indicate the api has no permission.
'''
def __init__(self, message=''):
super(APIPermissionError, self).__init__('permission:forbidden', 'permission', message)
|
{
"content_hash": "b5992b0d0d6bb0ed3fb4772ab87bca6e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 100,
"avg_line_length": 30.973684210526315,
"alnum_prop": 0.6457094307561597,
"repo_name": "Shelmanxie/blogAppWeb",
"id": "211e45c373f64e744047062e97611c58b0feb170",
"size": "1225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blogWebApp/www/apis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "191"
},
{
"name": "HTML",
"bytes": "6426"
},
{
"name": "JavaScript",
"bytes": "14140"
},
{
"name": "Python",
"bytes": "136302"
}
],
"symlink_target": ""
}
|
import threading
from .local import Proxy
__bus = None
__local = threading.local()
def get_current_bus():
"""Gets the current bus."""
return __bus
def set_current_bus(bus):
"""Sets the current bus."""
global __bus
__bus = bus
def get_transport_message():
"""Gets the transport message received."""
if hasattr(__local, 'transport_message'):
return __local.transport_message
return None
def set_transport_message(message):
"""Sets the transport message received."""
if message:
__local.transport_message = message
else:
del __local.transport_message
current_bus = Proxy(get_current_bus)
transport_message = Proxy(get_transport_message)
|
{
"content_hash": "6594147e0833265dd0b8287f119d62cf",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 48,
"avg_line_length": 18.842105263157894,
"alnum_prop": 0.6522346368715084,
"repo_name": "viniciuschiele/simplebus",
"id": "44b4c06beff05bca2b0aba6bfc9bd744ecbdb96a",
"size": "1318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplebus/state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71433"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import unittest
import spotify
import tests
from tests import mock
@mock.patch('spotify.playlist_unseen_tracks.lib', spec=spotify.lib)
class PlaylistUnseenTracksTest(unittest.TestCase):
# TODO Test that the collection releases sp_playlistcontainer and
# sp_playlist when no longer referenced.
def setUp(self):
self.session = tests.create_session_mock()
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_normal_usage(self, track_lib_mock, lib_mock):
sp_playlistcontainer = spotify.ffi.cast('sp_playlistcontainer *', 42)
sp_playlist = spotify.ffi.cast('sp_playlist *', 43)
total_num_tracks = 3
sp_tracks = [
spotify.ffi.cast('sp_track *', 44 + i)
for i in range(total_num_tracks)
]
def func(sp_pc, sp_p, sp_t, num_t):
for i in range(min(total_num_tracks, num_t)):
sp_t[i] = sp_tracks[i]
return total_num_tracks
lib_mock.sp_playlistcontainer_get_unseen_tracks.side_effect = func
tracks = spotify.PlaylistUnseenTracks(
self.session, sp_playlistcontainer, sp_playlist
)
# Collection keeps references to container and playlist:
lib_mock.sp_playlistcontainer_add_ref.assert_called_with(
sp_playlistcontainer
)
lib_mock.sp_playlist_add_ref.assert_called_with(sp_playlist)
# Getting collection and length causes no tracks to be retrieved:
self.assertEqual(len(tracks), total_num_tracks)
self.assertEqual(
lib_mock.sp_playlistcontainer_get_unseen_tracks.call_count, 1
)
lib_mock.sp_playlistcontainer_get_unseen_tracks.assert_called_with(
sp_playlistcontainer, sp_playlist, mock.ANY, 0
)
# Getting items causes more tracks to be retrieved:
track0 = tracks[0]
self.assertEqual(
lib_mock.sp_playlistcontainer_get_unseen_tracks.call_count, 2
)
lib_mock.sp_playlistcontainer_get_unseen_tracks.assert_called_with(
sp_playlistcontainer, sp_playlist, mock.ANY, total_num_tracks
)
self.assertIsInstance(track0, spotify.Track)
self.assertEqual(track0._sp_track, sp_tracks[0])
# Getting already retrieved tracks causes no new retrieval:
track1 = tracks[1]
self.assertEqual(
lib_mock.sp_playlistcontainer_get_unseen_tracks.call_count, 2
)
self.assertIsInstance(track1, spotify.Track)
self.assertEqual(track1._sp_track, sp_tracks[1])
# Getting item with negative index
track2 = tracks[-3]
self.assertEqual(track2._sp_track, track0._sp_track)
self.assertEqual(
lib_mock.sp_playlistcontainer_get_unseen_tracks.call_count, 2
)
def test_raises_error_on_failure(self, lib_mock):
sp_playlistcontainer = spotify.ffi.cast('sp_playlistcontainer *', 42)
sp_playlist = spotify.ffi.cast('sp_playlist *', 43)
lib_mock.sp_playlistcontainer_get_unseen_tracks.return_value = -3
with self.assertRaises(spotify.Error):
spotify.PlaylistUnseenTracks(
self.session, sp_playlistcontainer, sp_playlist
)
@mock.patch('spotify.track.lib', spec=spotify.lib)
def test_getitem_with_slice(self, track_lib_mock, lib_mock):
sp_playlistcontainer = spotify.ffi.cast('sp_playlistcontainer *', 42)
sp_playlist = spotify.ffi.cast('sp_playlist *', 43)
total_num_tracks = 3
sp_tracks = [
spotify.ffi.cast('sp_track *', 44 + i)
for i in range(total_num_tracks)
]
def func(sp_pc, sp_p, sp_t, num_t):
for i in range(min(total_num_tracks, num_t)):
sp_t[i] = sp_tracks[i]
return total_num_tracks
lib_mock.sp_playlistcontainer_get_unseen_tracks.side_effect = func
tracks = spotify.PlaylistUnseenTracks(
self.session, sp_playlistcontainer, sp_playlist
)
result = tracks[0:2]
# Only a subslice of length 2 is returned
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], spotify.Track)
self.assertEqual(result[0]._sp_track, sp_tracks[0])
self.assertIsInstance(result[1], spotify.Track)
self.assertEqual(result[1]._sp_track, sp_tracks[1])
def test_getitem_raises_index_error_on_too_low_index(self, lib_mock):
sp_playlistcontainer = spotify.ffi.cast('sp_playlistcontainer *', 42)
sp_playlist = spotify.ffi.cast('sp_playlist *', 43)
lib_mock.sp_playlistcontainer_get_unseen_tracks.return_value = 0
tracks = spotify.PlaylistUnseenTracks(
self.session, sp_playlistcontainer, sp_playlist
)
with self.assertRaises(IndexError) as ctx:
tracks[-1]
self.assertEqual(str(ctx.exception), 'list index out of range')
def test_getitem_raises_index_error_on_too_high_index(self, lib_mock):
sp_playlistcontainer = spotify.ffi.cast('sp_playlistcontainer *', 42)
sp_playlist = spotify.ffi.cast('sp_playlist *', 43)
lib_mock.sp_playlistcontainer_get_unseen_tracks.return_value = 0
tracks = spotify.PlaylistUnseenTracks(
self.session, sp_playlistcontainer, sp_playlist
)
with self.assertRaises(IndexError) as ctx:
tracks[1]
self.assertEqual(str(ctx.exception), 'list index out of range')
def test_getitem_raises_type_error_on_non_integral_index(self, lib_mock):
sp_playlistcontainer = spotify.ffi.cast('sp_playlistcontainer *', 42)
sp_playlist = spotify.ffi.cast('sp_playlist *', 43)
lib_mock.sp_playlistcontainer_get_unseen_tracks.return_value = 0
tracks = spotify.PlaylistUnseenTracks(
self.session, sp_playlistcontainer, sp_playlist
)
with self.assertRaises(TypeError):
tracks['abc']
def test_repr(self, lib_mock):
sp_playlistcontainer = spotify.ffi.cast('sp_playlistcontainer *', 42)
sp_playlist = spotify.ffi.cast('sp_playlist *', 43)
lib_mock.sp_playlistcontainer_get_unseen_tracks.return_value = 0
tracks = spotify.PlaylistUnseenTracks(
self.session, sp_playlistcontainer, sp_playlist
)
self.assertEqual(repr(tracks), 'PlaylistUnseenTracks([])')
|
{
"content_hash": "e171c218e32a19cd00c5dfb07a613f74",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 77,
"avg_line_length": 38.194117647058825,
"alnum_prop": 0.6396118897273987,
"repo_name": "mopidy/pyspotify",
"id": "0cbd8e5aebcf70d9ece94a7ed03486d65f4f5c66",
"size": "6493",
"binary": false,
"copies": "1",
"ref": "refs/heads/v2.x/master",
"path": "tests/test_playlist_unseen_tracks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "171987"
},
{
"name": "Python",
"bytes": "642108"
}
],
"symlink_target": ""
}
|
"""
Classes that generate content for ImgDataset.
These classes deal with the distributin of labour. The work may be done
online, in worker threads or in owrker processes.
"""
__authors__ = "Nicu Tofan"
__copyright__ = "Copyright 2015, Nicu Tofan"
__credits__ = ["Nicu Tofan"]
__license__ = "3-clause BSD"
__maintainer__ = "Nicu Tofan"
__email__ = "nicu.tofan@gmail.com"
import cProfile
from datetime import datetime
import dill
import functools
import logging
import multiprocessing
import numpy
import os
import Queue
import threading
import time
import zmq
#from pyl2extra.datasets.img_dataset.dataset import ImgDataset
from pyl2extra.utils import slice_count
class Generator(object):
"""
The class is used to generate content.
"""
def __init__(self):
#: associated dataset - the bound is created in setup() method
self.dataset = None
super(Generator, self).__init__()
def is_inline(self):
"""
Tell if this generator works on the same thread as the requester.
Returns
-------
inline : bool
True if the thread will block waiting for the result, False if
the result is generated in paralel.
"""
raise NotImplementedError()
def setup(self, dataset):
"""
Called by the dataset once it initialized itself.
"""
self.dataset = dataset
#assert isinstance(dataset, ImgDataset)
def tear_down(self):
"""
Called by the dataset fromits tear_down() method.
"""
pass
def __hash__(self):
"""
Called by built-in function hash() and for operations on members
of hashed collections including set, frozenset, and dict.
"""
return hash(self.__class__.__name__)
def get(self, source, next_index):
"""
The get method used by the dataset to retreive batches of data.
Parameters
----------
source : touple of str
A tuple of source identifiers (strings) to indicate the
source for the data to retreive. The iterator will receive
a ``data_specs`` argument consisting of ``(space, source)``.
next_index : list or slice object
The indexes of the examples to retreive specified either as a
list or as a slice.
Returns
-------
next_batch : tuple
The result is a tuple of batches, one for each ``source`` that
was requested. Each batch in the tuple should follow the
dataspecs for the dataset.
"""
raise NotImplementedError()
def _prep_get(self, source, next_index):
"""
Common opperations for a get() call.
"""
count = slice_count(next_index)
# prepare for iteration
idx_features = -1
idx_targets = -1
result = []
for i, src in enumerate(source):
if src == 'features':
idx_features = i
result.append(numpy.zeros(shape=(count,
self.dataset.shape[0],
self.dataset.shape[1],
3)))
elif src == 'targets':
idx_targets = i
result.append(numpy.zeros(shape=(count, 1), dtype='int32'))
else:
raise ValueError('%s implements <features> and <targets>; '
'<%s> is not among these.' %
(str(self.__class__.__name__), src))
return count, result, idx_features, idx_targets
def __getstate__(self):
"""
Help pickle this instance.
"""
return {'dataset': self.dataset}
def __setstate__(self, state):
"""
Help un-pickle this instance.
"""
self.dataset = state['dataset']
class Basket(object):
"""
Holds a number of processed images
"""
def __init__(self, batch=None, classes=None):
#: a list of processed images in the form of a numpy.ndarray
self.batch = batch
#: the classes that corespond to processed images
self.classes = classes
#: a number that identifies this instance
self.ident = None
self.assign_id()
id_factory = 1
def __len__(self):
"""
Get the number of processed images.
"""
if self.batch is None:
return 0
else:
return self.batch.shape[0]
def assign_id(self):
"""
Assign an unique identifier to this instance.
"""
#: a number that identifies this instance
self.ident = Basket.id_factory
Basket.id_factory = Basket.id_factory + 1
class InlineGen(Generator):
"""
Generates the content while the other parties wait on the same thread.
"""
def __init__(self, profile=False):
self.profile = profile
if profile:
self.profile_file = '/dev/shm/pyl2x-adj-' + datetime.now().strftime("%Y%m%d-%H%M%S")
self.profile_cnt = 1
super(InlineGen, self).__init__()
@functools.wraps(Generator.is_inline)
def is_inline(self):
return True
@functools.wraps(Generator.is_inline)
def get(self, source, next_index):
if self.profile:
profiler = cProfile.Profile()
profiler.enable()
count, result, idx_features, idx_targets = self._prep_get(source,
next_index)
# iterate to collect data
for i in range(count):
fpath = self.dataset.data_provider.cnext()
trg, categ = self.dataset.data_provider.read(fpath)
categ = self.dataset.data_provider.categ2int(categ)
trg = numpy.reshape(trg,
(1, trg.shape[0], trg.shape[1], trg.shape[2]))
if idx_features > -1:
trg = self.dataset.process(trg)
result[idx_features][i, :, :, :] = trg[0, :, :, 0:3]
if idx_targets > -1:
result[idx_targets][i][0] = categ
if self.profile:
profiler.disable()
profiler.dump_stats('%s.%d.profile' % (self.profile_file,
self.profile_cnt))
self.profile_cnt = self.profile_cnt + 1
return tuple(result)
def __getstate__(self):
"""
Help pickle this instance.
"""
return super(InlineGen, self).__getstate__()
def __setstate__(self, state):
"""
Help un-pickle this instance.
"""
super(InlineGen, self).__setstate__(state)
class AsyncMixin(object):
"""
Functionality that is common to threads and processes.
"""
def __init__(self, cache_refill_count=64, keep_baskets=0):
#: list of cached batches; each list entry is a basket
self.baskets = []
#: number of cached images (in all baskets)
self.cached_images = 0
#: if the cache has fewer than this number of images request refill
self.cache_refill_treshold = 5256
#: number of images to retreive by each thread
self.cache_refill_count = cache_refill_count
#: on termination counts the workers that exited
self.finish = 0
#: one time trigger ofr the threads to exit
self._should_terminate = False
#: number of workers to use
self.workers_count = 0
#: number of baskets to keep arraound if we don't have enough data
self.keep_baskets = keep_baskets
#: the list of baskets kept around
self.baskets_backup = []
#: number of unique examples
self.uniq_ex = 0
def _get(self, source, next_index):
"""
Get method common implementation.
"""
count, result, idx_features, idx_targets = self._prep_get(source,
next_index)
assert count > 0
logging.debug('get a batch of %d images (%d cached)',
count, self.cached_images)
# deals with both bootstraping and saving for future
self.cache_first_batch()
# where inside result array we're placing the data
offset = 0
while count > 0:
self._new_or_backup(count)
# get a basket from our list
basket = self.get_basket()
if basket is None:
continue
# copy the things in place
to_copy = min(count, len(basket))
if idx_features > -1:
btc = basket.batch[0:to_copy, :, :, 0:3]
result[idx_features][offset:offset+to_copy, :, :, :] = btc
if idx_targets > -1:
btc = basket.classes[0:to_copy]
result[idx_targets][offset:offset+to_copy, 0] = btc
count = count - to_copy
# the basket was larger so we have to put it back
if len(basket) > to_copy:
logging.debug("Inefficient use of baskets: %d needed, %d in basket",
to_copy, len(basket))
leftover = Basket()
leftover.ident = basket.ident
leftover.batch = basket.batch[to_copy:, :, :, :]
leftover.classes = basket.classes[to_copy:]
self.add_basket(leftover, False)
self.basked_done(basket)
# make sure we're ready for next round
refill = self.cache_refill_treshold - self.cached_images
assert self.cache_refill_count > 0
while refill > 0:
self.push_request(self.cache_refill_count)
refill = refill - self.cache_refill_count
return tuple(result)
def basked_done(self, basket):
"""
A basket was received and it was extracted from queue.
After the baskets are used they are normally discarded. If we're
unable to provide examples fast enough the network will block
waiting (sometimes for tens of seconds). To alleviate that, we keep
arround the old examples and we serve them when there are no
new baskets.
"""
if self.keep_baskets == 0:
return
assert self.keep_baskets > 0
lkb = len(self.baskets_backup)
if lkb >= self.keep_baskets:
# make room for the new basket
lkb = lkb - self.keep_baskets + 1
self.baskets_backup = self.baskets_backup[lkb:]
self.baskets_backup.append(basket)
logging.debug('basket of %d images cached; %d baskets in cache',
len(basket), len(self.baskets_backup))
def _new_or_backup(self, count):
"""
Replacement for `_wait_for_data()` that either gets examples from
queue or from the backup list.
"""
if len(self.baskets) > 0:
return
if len(self.baskets_backup) == 0:
self._wait_for_data(count)
else:
if self._starving():
refill = max(self.cache_refill_count, count)
while refill > 0:
self.push_request(self.cache_refill_count)
refill = refill - self.cache_refill_count
self.add_basket(self.baskets_backup, False)
self.baskets_backup = []
def __getstate__(self):
"""
Help pickle this instance.
"""
return super(AsyncMixin, self).__getstate__()
def __setstate__(self, state):
"""
Help un-pickle this instance.
"""
super(AsyncMixin, self).__setstate__(state)
def _setup(self):
"""
Common setup for asyncroneous providers.
"""
# delayed initialization state
self.bootstrap_state = 3
# (should be a customizable param.) - number of examples in 1st batch
self.bootstrap_count = 1024
def cache_first_batch(self):
"""
If conditions are optimal and the user wants first batch saved do that now.
We check if the batch was already saved and we do nothing in that case.
"""
if self.bootstrap_state == 0:
# caching is disabled, so nothing to do
return
elif self.bootstrap_state == 1:
# there is already a cache entry saved (either by this run or loaded)
return
elif self.bootstrap_state == 3:
# delayed initialization from first get call
self.cached_first_batch = self.dataset.get_cache_loc()
if self.cached_first_batch is None:
logging.debug('Bootstrapping is disables')
self.bootstrap_state = 0
return
self.categ_file = os.path.join(self.cached_first_batch,
'bootstrap.categs.npy')
self.cached_first_batch = os.path.join(self.cached_first_batch,
'bootstrap.npy')
logging.debug('Bootstrapping location: %s', self.cached_first_batch)
if os.path.isfile(self.cached_first_batch):
# we have a file, so load it
array = numpy.load(self.cached_first_batch)
self.set_continous(array, numpy.load(self.categ_file))
self.bootstrap_state = 1
logging.debug('A bootstrap batch of %d examples was loaded from %s',
array.shape[0], self.cached_first_batch)
return
else:
logging.debug('Bootstrapping file missing')
self.bootstrap_state = 4
return
elif self.bootstrap_state == 4:
# we need to save a consistent batch for future runs
if self.uniq_ex < self.bootstrap_count:
return
numpy_cache, categs = self.get_continous(self.bootstrap_count)
if numpy_cache is None:
return
numpy.save(self.cached_first_batch, numpy_cache)
numpy.save(self.categ_file, categs)
del numpy_cache
del categs
self.bootstrap_state = 1
logging.debug('A bootstrap batch of %d examples was saved at %s',
self.bootstrap_count, self.cached_first_batch)
def get_continous(self, count):
"""
Get a numpy array with specified number of examples.
This should only be called in main thread.
"""
assert self.uniq_ex >= count
result = None
offset = 0
categs = None
ident_seen = []
uniq_baskets = 0
for bask in self.baskets:
if bask.ident in ident_seen:
continue
ident_seen.append(bask.ident)
uniq_baskets = uniq_baskets + 1
if result is None:
shape = list(bask.batch.shape)
shape[0] = count
result = numpy.empty(shape=shape, dtype=bask.batch.dtype)
categs = numpy.empty(shape=(count), dtype=bask.classes.dtype)
this_run = min(count, len(bask))
result[offset:offset+this_run, :, :, :] = bask.batch[0:this_run, :, :, :]
categs[offset:offset+this_run] = bask.classes[0:this_run]
count = count - this_run
offset = offset + this_run
if count == 0:
break
if count != 0:
logging.debug('LOGIC ERROR! uniq_ex (%d) should '
'reflect the number of unique baskets (%d) '
'among all batches (%d)',
self.uniq_ex, uniq_baskets, len(self.baskets))
return None, None
return result, categs
def set_continous(self, array, categs, brand_new=True):
"""
Initialize the basket with examples created in a previous run.
"""
basket = Basket()
basket.batch = array
basket.classes = categs
self.add_basket(basket, brand_new)
def _process_image(dataset, trg, categ, i, basket, basket_sz):
"""
Process image and append it to the basket.
"""
# process this image
trg = numpy.reshape(trg,
(1, trg.shape[0],
trg.shape[1],
trg.shape[2]))
trg = dataset.process(trg)
# and append it to our batch
if basket.batch is None:
basket.batch = numpy.empty(shape=(basket_sz,
trg.shape[1],
trg.shape[2],
trg.shape[3]),
dtype=trg.dtype)
basket.classes = numpy.empty(shape=(basket_sz),
dtype='int32')
# and we're done with this image
basket.batch[i, :, :, :] = trg
basket.classes[i] = categ
class ThreadedGen(Generator, AsyncMixin):
"""
Generates the content using separate threads in same process.
Parameters
----------
count : int, optional
The number of worker threads to use. If None, same number of threads
as the number of cores minus one are used.
"""
def __init__(self, count=None):
if count is None:
count = multiprocessing.cpu_count()
count = count - 1 if count > 1 else 1
elif count < 0:
raise ValueError("Number of processes must be a positive integer")
#: the list of active threads
self.threads = []
#: the queue to pass messages
self.queue = Queue.Queue()
#: semaphore
self.gen_semaphore = threading.BoundedSemaphore(count)
super(ThreadedGen, self).__init__()
self.workers_count = count
@functools.wraps(Generator.is_inline)
def is_inline(self):
return False
@functools.wraps(Generator.setup)
def setup(self, dataset):
"""
Starts the threads and waits for orders.
"""
self.dataset = dataset
assert self.workers_count > 0
# start threads
for i in range(self.workers_count):
thr = threading.Thread(target=ThreadedGen.worker,
args=(self, i),
name='ThreadedGen[%d]' % i)
#thr.daemon = True
self.threads.append(thr)
thr.start()
self._setup()
@functools.wraps(Generator.get)
def get(self, source, next_index):
return self._get(source, next_index)
def _wait_for_data(self, count):
"""
Waits for some provider to deliver its data.
"""
timeout_count = 100
while len(self.baskets) == 0:
if self._starving():
refill = max(self.cache_refill_count, count)
while refill > 0:
self.push_request(self.cache_refill_count)
refill = refill - self.cache_refill_count
logging.debug('main threads sleeps waiting for data (%d)',
timeout_count)
# see if, instead of waiting useless here we can process some
# images online ourselves.
time.sleep(0.1)
timeout_count = timeout_count - 1
if timeout_count <= 0:
raise RuntimeError('Timeout waiting for a thread to provide '
'processed images in ThreadedGen.')
def push_request(self, count):
"""
Adds a request for a specified number of images to the queue.
"""
self.queue.put((count))
def _starving(self):
"""
Tell if the queue is empty.
"""
return self.queue.empty
def pop_request(self):
"""
Gets a request for a specified number of images from the queue.
The method asks the data provider for file paths.
"""
count = self.queue.get()
result = []
for i in range(count):
self.gen_semaphore.acquire()
fpath = self.dataset.data_provider.cnext()
self.gen_semaphore.release()
result.append(fpath)
return result
def add_basket(self, basket, brand_new):
"""
Appends a basket to the list.
Also, keeps `cached_images` syncronized.
"""
if isinstance(basket, Basket):
basket = [basket]
self.gen_semaphore.acquire()
for bsk in reversed(basket):
self.cached_images = self.cached_images + len(bsk)
if brand_new:
self.uniq_ex = self.uniq_ex + len(bsk)
bsk.assign_id()
self.baskets.append(bsk)
self.gen_semaphore.release()
def get_basket(self):
"""
Extracts a basket from the list.
Also, keeps `cached_images` syncronized.
"""
self.gen_semaphore.acquire()
if len(self.baskets) == 0:
result = None
else:
result = self.baskets.pop()
self.cached_images = self.cached_images - len(result)
self.gen_semaphore.release()
return result
def done_request(self, thid, basket):
"""
A thread reports that it is done with a basket.
"""
count = len(basket)
logging.debug('thread %d done with a request of %d images',
thid, count)
self.add_basket(basket, True)
self.queue.task_done()
def thread_ended(self, thid):
"""
Show yourself out.
"""
logging.debug("thread %d is done", thid)
self.gen_semaphore.acquire()
self.finish = self.finish + 1
self.gen_semaphore.release()
@functools.wraps(Generator.tear_down)
def tear_down(self):
"""
Terminates all threads.
"""
logging.debug('ThreadedGen is being terminated; '
'%d items in queue '
'%d running threads.',
self.queue.qsize(), self.workers_count - self.finish)
self._should_terminate = True
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
self.queue.join()
self.queue = None
self.gen_semaphore = None
self.threads = None
logging.debug('ThreadedGen was being terminated')
@staticmethod
def worker(myself, thid):
"""
Thread entry point.
"""
logging.debug("thread %d starts", thid)
while not myself._should_terminate:
# get next request from queue
req = myself.pop_request()
# nothing to do so take a nap
if req is None:
time.sleep(0.2)
continue
basket = Basket()
basket_sz = len(req)
logging.debug("thread %d will process %d images", thid, basket_sz)
for i, fpath in enumerate(req):
# read the file using data provider
myself.gen_semaphore.acquire()
b_ok = False
try:
trg, categ = myself.dataset.data_provider.read(fpath)
categ = myself.dataset.data_provider.categ2int(categ)
b_ok = True
except IOError, exc:
logging.error('Exception in worker loop: %s', str(exc))
myself.gen_semaphore.release()
if b_ok:
_process_image(myself.dataset, trg, categ,
i, basket, basket_sz)
# and we're done with this batch
myself.done_request(thid, basket)
myself.thread_ended(thid)
class ProcessGen(Generator, AsyncMixin):
"""
Generates the content using separate processes.
Parameters
----------
count : int, optional
The number of worker processes to use. If None, same number of
processes as the number of cores minus one are used.
Notes
-----
The 0MQ part of the class was heavily inspired by
``Python Multiprocessing with ZeroMQ`` TAO_ post.
Some parts wre copied straight from provided code_.
_code: https://github.com/taotetek/blog_examples/blob/master/python_multiprocessing_with_zeromq/workqueue_example.py
_TAO: http://taotetek.net/2011/02/02/python-multiprocessing-with-zeromq/
"""
if 0:
RESULTS_ADDRESS = 'tcp://127.0.0.1:12460'
CONTROL_ADDRESS = 'tcp://127.0.0.1:12461'
VENTILATOR_ADDRESS = 'tcp://127.0.0.1:12462'
else:
RESULTS_ADDRESS = 'ipc:///tmp/pyl2x-procgen-results.ipc'
CONTROL_ADDRESS = 'ipc:///tmp/pyl2x-procgen-control.ipc'
VENTILATOR_ADDRESS = 'ipc:///tmp/pyl2x-procgen-ventilator.ipc'
CTRL_FINISH = 'FINISHED'
def __init__(self, count=None):
if count is None:
count = multiprocessing.cpu_count()
count = count - 1 if count > 1 else 1
elif count < 0:
raise ValueError("Number of processes must be a positive integer")
super(ProcessGen, self).__init__()
self.workers_count = count
#: number of requests send that were not fulfilled, yet
self.outstanding_requests = 0
#: keep various processes from returning same files
self.provider_offset = 0
#: maximum number of outstanding requests
self.max_outstanding = 64
#: number of seconds to wait before declaring timeout
self.wait_timeout = 660
#: number of extra images to request
self.xcount = 16
self.xcountcrt = 0
#: used by receiver
self.gen_semaphore = threading.BoundedSemaphore(count)
@functools.wraps(Generator.is_inline)
def is_inline(self):
return False
@functools.wraps(Generator.setup)
def setup(self, dataset):
"""
Starts the processes and waits for orders.
"""
self.dataset = dataset
self.outstanding_requests = 0
self.dataset_provided = False
# the thread used for receiving data
self.receiverth = threading.Thread(target=ProcessGen.receiver_worker,
args=(self,),
name='ProcessGenReceiver')
#thr.daemon = True
self.receiverth.start()
# Create a pool of workers to distribute work to
assert self.workers_count > 0
self.worker_pool = range(self.workers_count)
for wrk_num in range(len(self.worker_pool)):
multiprocessing.Process(target=worker, args=(wrk_num,)).start()
# Initialize a zeromq context
self.context = zmq.Context()
# Set up a channel to receive results
self.results_rcv = self.context.socket(zmq.PULL)
self.results_rcv.bind(ProcessGen.RESULTS_ADDRESS)
# Set up a channel to send control commands
self.control_sender = self.context.socket(zmq.PUB)
self.control_sender.bind(ProcessGen.CONTROL_ADDRESS)
# Set up a channel to send work
self.ventilator_send = self.context.socket(zmq.PUSH)
self.ventilator_send.bind(ProcessGen.VENTILATOR_ADDRESS)
self._setup()
# Give everything a second to spin up and connect
time.sleep(0.5)
@functools.wraps(Generator.tear_down)
def tear_down(self):
"""
Terminates all components.
"""
logging.debug('ProcessGen is being terminated; ')
self._should_terminate = True
# Signal to all workers that we are finsihed
self.control_sender.send(dill.dumps(ProcessGen.CTRL_FINISH))
logging.debug('ProcessGen was being terminated')
@functools.wraps(Generator.get)
def get(self, source, next_index):
if not self.dataset_provided:
# send workers a copy of the dataset
self.control_sender.send(dill.dumps(self.dataset))
self.dataset_provided = True
time.sleep(0.5)
refill = self.cache_refill_treshold
assert self.cache_refill_count > 0
while refill > 0:
self.push_request(self.cache_refill_count)
refill = refill - self.cache_refill_count
return self._get(source, next_index)
def _starving(self):
"""
Tell if the queue is empty.
"""
return self.outstanding_requests == 0
def _wait_for_data(self, count):
"""
Waits for some provider to deliver its data.
"""
timeout_count = self.wait_timeout * 10
while len(self.baskets) == 0:
if self._starving():
refill = max(self.cache_refill_count, count)
while refill > 0:
self.push_request(self.cache_refill_count)
refill = refill - self.cache_refill_count
#else:
# self.receive_all_messages()
# if len(self.baskets) != 0:
# break
# see if, instead of waiting useless here we can process some
# images online ourselves.
time.sleep(0.1)
timeout_count = timeout_count - 1
if timeout_count <= 0:
raise RuntimeError('Timeout waiting for a process to provide '
'processed images in ProcessGen.')
def push_request(self, count):
"""
Adds a request for a specified number of images.
Sends a request for a specified number of images down a zeromq "PUSH"
connection to be processed by listening workers, in a round robin
load balanced fashion.
Parameters
----------
count : int
Number of images to retreive.
"""
if self.outstanding_requests >= self.max_outstanding:
# logging.debug('The number of outstanding requests is too '
# 'high (%d); request for %d images ignored',
# self.outstanding_requests, count)
return
# self.xcount = 16
# if self.xcountcrt >= self.xcount:
# self.xcountcrt = 0
# count = count + self.xcountcrt
# self.xcountcrt = self.xcountcrt + 1
self.outstanding_requests = self.outstanding_requests + 1
work_message = {'offset': self.provider_offset, 'count' : count}
self.provider_offset = self.provider_offset + count
self.ventilator_send.send_json(work_message)
def receive_all_messages(self, no_block=True):
"""
The "results_manager" function receives each result
from multiple workers.
"""
b_done = False
baskets = []
while not b_done:
try:
if no_block:
flags = zmq.NOBLOCK
else:
self.results_rcv.pool(timeout=1*1000)
flags = 0
basket = self.results_rcv.recv_pyobj(flags=flags)
self.outstanding_requests = self.outstanding_requests - 1
if len(basket) > 0:
logging.debug('A basket of %d examples has been '
'received; %d outstanding requests, '
'%d cached images',
len(basket),
self.outstanding_requests,
self.cached_images)
baskets.append(basket)
else:
logging.error("Empty basket received")
#assert self.outstanding_requests >= 0
except zmq.ZMQError as exc:
if exc.errno == zmq.EAGAIN:
b_done = True
else:
raise
if len(baskets) > 0:
self.add_basket(baskets, True)
#logging.debug("Received all messages; %d outstanding requests",
# self.outstanding_requests)
def add_basket(self, basket, brand_new):
"""
Appends a basket to the list.
Also, keeps `cached_images` syncronized.
"""
if isinstance(basket, Basket):
basket = [basket]
self.gen_semaphore.acquire()
for bsk in reversed(basket):
self.cached_images = self.cached_images + len(bsk)
self.baskets.append(bsk)
if brand_new:
self.uniq_ex = self.uniq_ex + len(bsk)
bsk.assign_id()
self.gen_semaphore.release()
def get_basket(self):
"""
Extracts a basket from the list.
Also, keeps `cached_images` syncronized.
"""
while True:
if len(self.baskets) == 0:
return None
else:
self.gen_semaphore.acquire()
result = self.baskets.pop()
self.gen_semaphore.release()
if result.batch is None:
continue
self.cached_images = self.cached_images - len(result)
return result
# The "ventilator" function generates a list of numbers from 0 to 10000, and
#
@staticmethod
def receiver_worker(myself):
"""
Thread entry point.
"""
logging.debug("worker thread starts")
time.sleep(0.5)
while not myself._should_terminate:
myself.receive_all_messages(no_block=True)
time.sleep(0.01)
# The "worker" functions listen on a zeromq PULL connection for "work"
# (numbers to be processed) from the ventilator, square those numbers,
# and send the results down another zeromq PUSH connection to the
# results manager.
def worker(wrk_num):
"""
Worker process for `ProcessGen`.
"""
logging.debug("worker process %d starts", wrk_num)
# Initialize a zeromq context
context = zmq.Context()
# Set up a channel to receive work from the ventilator
work_rcv = context.socket(zmq.PULL)
work_rcv.connect(ProcessGen.VENTILATOR_ADDRESS)
# Set up a channel to send result of work to the results reporter
results_sender = context.socket(zmq.PUSH)
results_sender.connect(ProcessGen.RESULTS_ADDRESS)
# Set up a channel to receive control messages over
control_rcv = context.socket(zmq.SUB)
control_rcv.connect(ProcessGen.CONTROL_ADDRESS)
control_rcv.setsockopt(zmq.SUBSCRIBE, "")
# Set up a poller to multiplex the work receiver and control receiver channels
poller = zmq.Poller()
poller.register(work_rcv, zmq.POLLIN)
poller.register(control_rcv, zmq.POLLIN)
dataset = None
# def pop_request(offset, count):
# """
# Gets a list of files to process
# """
# result = []
# count = count * (wrk_num+1)
# for i in range(count):
# fpath = dataset.data_provider.get(offset, count)
# result.append(fpath)
# return result
# Loop and accept messages from both channels, acting accordingly
while True:
socks = dict(poller.poll())
# If the message came from work_rcv channel, square the number
# and send the answer to the results reporter
if socks.get(work_rcv) == zmq.POLLIN and not dataset is None:
work_message = work_rcv.recv_json()
files = dataset.data_provider.get(work_message['offset'],
work_message['count'])
basket = Basket()
basket_sz = len(files)
logging.debug("worker process %d will process %d images",
wrk_num, basket_sz)
for i, fpath in enumerate(files):
b_ok = False
try:
trg, categ = dataset.data_provider.read(fpath)
categ = dataset.data_provider.categ2int(categ)
b_ok = True
except IOError, exc:
logging.error('Exception in worker loop: %s', str(exc))
if b_ok:
_process_image(dataset, trg, categ,
i, basket, basket_sz)
if len(basket) == 0:
logging.error("Worker %d sending empty basket", wrk_num)
results_sender.send_pyobj(basket)
# If the message came over the control channel, shut down the worker.
if socks.get(control_rcv) == zmq.POLLIN:
control_message = dill.loads(control_rcv.recv())
if isinstance(control_message, basestring):
if control_message == ProcessGen.CTRL_FINISH:
logging.info("Worker %i received FINSHED, quitting!",
wrk_num)
break
elif 'ImgDataset' in str(control_message.__class__):
dataset = control_message
def gen_from_string(gen_name):
"""
Creates a generator based on a string key.
Parameters
----------
gen_name : str
A string identifying the type of Generator to use.
Returns
-------
adj : Generator
The instance that was constructed.
"""
if gen_name == 'inline':
return InlineGen()
elif gen_name == 'threads':
return ThreadedGen()
elif gen_name == 'process':
return ProcessGen()
else:
raise ValueError('%s is not a known Generator name' % gen_name)
|
{
"content_hash": "3e94296518c6a9caf7346982c7637e5d",
"timestamp": "",
"source": "github",
"line_count": 1087,
"max_line_length": 120,
"avg_line_length": 34.7442502299908,
"alnum_prop": 0.546747160219239,
"repo_name": "TNick/pyl2extra",
"id": "8643fe0efe6daa6e4aec3de6513be6d0bd2da045",
"size": "37767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyl2extra/datasets/img_dataset/generators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "11767"
},
{
"name": "Python",
"bytes": "831896"
},
{
"name": "Shell",
"bytes": "4624"
}
],
"symlink_target": ""
}
|
from decimal import Decimal
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from oscar.core.compat import AUTH_USER_MODEL
from oscar.core.utils import get_default_currency
from oscar.models.fields import AutoSlugField
from oscar.templatetags.currency_filters import currency
from . import bankcards
@python_2_unicode_compatible
class AbstractTransaction(models.Model):
"""
A transaction for a particular payment source.
These are similar to the payment events within the order app but model a
slightly different aspect of payment. Crucially, payment sources and
transactions have nothing to do with the lines of the order while payment
events do.
For example:
* A 'pre-auth' with a bankcard gateway
* A 'settle' with a credit provider (see django-oscar-accounts)
"""
source = models.ForeignKey(
'payment.Source',
on_delete=models.CASCADE,
related_name='transactions',
verbose_name=_("Source"))
# We define some sample types but don't constrain txn_type to be one of
# these as there will be domain-specific ones that we can't anticipate
# here.
AUTHORISE, DEBIT, REFUND = 'Authorise', 'Debit', 'Refund'
txn_type = models.CharField(_("Type"), max_length=128, blank=True)
amount = models.DecimalField(_("Amount"), decimal_places=2, max_digits=12)
reference = models.CharField(_("Reference"), max_length=128, blank=True)
status = models.CharField(_("Status"), max_length=128, blank=True)
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
def __str__(self):
return _(u"%(type)s of %(amount).2f") % {
'type': self.txn_type,
'amount': self.amount}
class Meta:
abstract = True
app_label = 'payment'
ordering = ['-date_created']
verbose_name = _("Transaction")
verbose_name_plural = _("Transactions")
@python_2_unicode_compatible
class AbstractSource(models.Model):
"""
A source of payment for an order.
This is normally a credit card which has been pre-authed for the order
amount, but some applications will allow orders to be paid for using
multiple sources such as cheque, credit accounts, gift cards. Each payment
source will have its own entry.
This source object tracks how much money has been authorised, debited and
refunded, which is useful when payment takes place in multiple stages.
"""
order = models.ForeignKey(
'order.Order',
on_delete=models.CASCADE,
related_name='sources',
verbose_name=_("Order"))
source_type = models.ForeignKey(
'payment.SourceType',
on_delete=models.CASCADE,
related_name="sources",
verbose_name=_("Source Type"))
currency = models.CharField(
_("Currency"), max_length=12, default=get_default_currency)
# Track the various amounts associated with this source
amount_allocated = models.DecimalField(
_("Amount Allocated"), decimal_places=2, max_digits=12,
default=Decimal('0.00'))
amount_debited = models.DecimalField(
_("Amount Debited"), decimal_places=2, max_digits=12,
default=Decimal('0.00'))
amount_refunded = models.DecimalField(
_("Amount Refunded"), decimal_places=2, max_digits=12,
default=Decimal('0.00'))
# Reference number for this payment source. This is often used to look up
# a transaction model for a particular payment partner.
reference = models.CharField(_("Reference"), max_length=255, blank=True)
# A customer-friendly label for the source, eg XXXX-XXXX-XXXX-1234
label = models.CharField(_("Label"), max_length=128, blank=True)
# A dictionary of submission data that is stored as part of the
# checkout process, where we need to pass an instance of this class around
submission_data = None
# We keep a list of deferred transactions that are only actually saved when
# the source is saved for the first time
deferred_txns = None
class Meta:
abstract = True
app_label = 'payment'
verbose_name = _("Source")
verbose_name_plural = _("Sources")
def __str__(self):
description = _("Allocation of %(amount)s from type %(type)s") % {
'amount': currency(self.amount_allocated, self.currency),
'type': self.source_type}
if self.reference:
description += _(" (reference: %s)") % self.reference
return description
def save(self, *args, **kwargs):
super(AbstractSource, self).save(*args, **kwargs)
if self.deferred_txns:
for txn in self.deferred_txns:
self._create_transaction(*txn)
def create_deferred_transaction(self, txn_type, amount, reference=None,
status=None):
"""
Register the data for a transaction that can't be created yet due to FK
constraints. This happens at checkout where create an payment source
and a transaction but can't save them until the order model exists.
"""
if self.deferred_txns is None:
self.deferred_txns = []
self.deferred_txns.append((txn_type, amount, reference, status))
def _create_transaction(self, txn_type, amount, reference='',
status=''):
self.transactions.create(
txn_type=txn_type, amount=amount,
reference=reference, status=status)
# =======
# Actions
# =======
def allocate(self, amount, reference='', status=''):
"""
Convenience method for ring-fencing money against this source
"""
self.amount_allocated += amount
self.save()
self._create_transaction(
AbstractTransaction.AUTHORISE, amount, reference, status)
allocate.alters_data = True
def debit(self, amount=None, reference='', status=''):
"""
Convenience method for recording debits against this source
"""
if amount is None:
amount = self.balance
self.amount_debited += amount
self.save()
self._create_transaction(
AbstractTransaction.DEBIT, amount, reference, status)
debit.alters_data = True
def refund(self, amount, reference='', status=''):
"""
Convenience method for recording refunds against this source
"""
self.amount_refunded += amount
self.save()
self._create_transaction(
AbstractTransaction.REFUND, amount, reference, status)
refund.alters_data = True
# ==========
# Properties
# ==========
@property
def balance(self):
"""
Return the balance of this source
"""
return (self.amount_allocated - self.amount_debited +
self.amount_refunded)
@property
def amount_available_for_refund(self):
"""
Return the amount available to be refunded
"""
return self.amount_debited - self.amount_refunded
@python_2_unicode_compatible
class AbstractSourceType(models.Model):
"""
A type of payment source.
This could be an external partner like PayPal or DataCash,
or an internal source such as a managed account.
"""
name = models.CharField(_("Name"), max_length=128)
code = AutoSlugField(
_("Code"), max_length=128, populate_from='name', unique=True,
help_text=_("This is used within forms to identify this source type"))
class Meta:
abstract = True
app_label = 'payment'
verbose_name = _("Source Type")
verbose_name_plural = _("Source Types")
def __str__(self):
return self.name
@python_2_unicode_compatible
class AbstractBankcard(models.Model):
"""
Model representing a user's bankcard. This is used for two purposes:
1. The bankcard form will return an instance of this model that can be
used with payment gateways. In this scenario, the instance will
have additional attributes (start_date, issue_number, ccv) that
payment gateways need but that we don't save.
2. To keep a record of a user's bankcards and allow them to be
re-used. This is normally done using the 'partner reference'.
.. warning::
Some of the fields of this model (name, expiry_date) are considered
"cardholder data" under PCI DSS v2. Hence, if you use this model and
store those fields then the requirements for PCI compliance will be
more stringent.
"""
user = models.ForeignKey(
AUTH_USER_MODEL,
on_delete=models.CASCADE,
related_name='bankcards',
verbose_name=_("User"))
card_type = models.CharField(_("Card Type"), max_length=128)
# Often you don't actually need the name on the bankcard
name = models.CharField(_("Name"), max_length=255, blank=True)
# We store an obfuscated version of the card number, just showing the last
# 4 digits.
number = models.CharField(_("Number"), max_length=32)
# We store a date even though only the month is visible. Bankcards are
# valid until the last day of the month.
expiry_date = models.DateField(_("Expiry Date"))
# For payment partners who are storing the full card details for us
partner_reference = models.CharField(
_("Partner Reference"), max_length=255, blank=True)
# Temporary data not persisted to the DB
start_date = None
issue_number = None
ccv = None
def __str__(self):
return _(u"%(card_type)s %(number)s (Expires: %(expiry)s)") % {
'card_type': self.card_type,
'number': self.number,
'expiry': self.expiry_month()}
def __init__(self, *args, **kwargs):
# Pop off the temporary data
self.start_date = kwargs.pop('start_date', None)
self.issue_number = kwargs.pop('issue_number', None)
self.ccv = kwargs.pop('ccv', None)
super(AbstractBankcard, self).__init__(*args, **kwargs)
# Initialise the card-type
if self.id is None:
self.card_type = bankcards.bankcard_type(self.number)
if self.card_type is None:
self.card_type = 'Unknown card type'
class Meta:
abstract = True
app_label = 'payment'
verbose_name = _("Bankcard")
verbose_name_plural = _("Bankcards")
def save(self, *args, **kwargs):
if not self.number.startswith('X'):
self.prepare_for_save()
super(AbstractBankcard, self).save(*args, **kwargs)
def prepare_for_save(self):
# This is the first time this card instance is being saved. We
# remove all sensitive data
self.number = u"XXXX-XXXX-XXXX-%s" % self.number[-4:]
self.start_date = self.issue_number = self.ccv = None
@property
def cvv(self):
return self.ccv
@property
def obfuscated_number(self):
return u'XXXX-XXXX-XXXX-%s' % self.number[-4:]
def start_month(self, format='%m/%y'):
return self.start_date.strftime(format)
def expiry_month(self, format='%m/%y'):
return self.expiry_date.strftime(format)
|
{
"content_hash": "8c5ef1bb46afacd18151a2a4e250e228",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 79,
"avg_line_length": 35.31987577639752,
"alnum_prop": 0.6316715026817902,
"repo_name": "sonofatailor/django-oscar",
"id": "38d8fef7236b6c5d11d443a25c1a87f11970ff6e",
"size": "11373",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/oscar/apps/payment/abstract_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "939276"
},
{
"name": "HTML",
"bytes": "522590"
},
{
"name": "JavaScript",
"bytes": "271655"
},
{
"name": "Makefile",
"bytes": "2322"
},
{
"name": "Python",
"bytes": "1887022"
},
{
"name": "Shell",
"bytes": "1642"
}
],
"symlink_target": ""
}
|
from .. import BITOID, VARBITOID
from ..bitwise import Varbit, Bit
from . import lib
def varbit_pack(x, pack = lib.varbit_pack):
return pack((x.bits, x.data))
def varbit_unpack(x, unpack = lib.varbit_unpack):
return Varbit.from_bits(*unpack(x))
oid_to_io = {
BITOID : (varbit_pack, varbit_unpack, Bit),
VARBITOID : (varbit_pack, varbit_unpack, Varbit),
}
oid_to_type = {
BITOID : Bit,
VARBITOID : Varbit,
}
|
{
"content_hash": "92bafd84ccc386ae5dcb89b58be12112",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 50,
"avg_line_length": 21.94736842105263,
"alnum_prop": 0.6882494004796164,
"repo_name": "python-postgres/fe",
"id": "2eb36e965b8a7ef486a2f5e2a0bbcd7b67c1e52c",
"size": "417",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "postgresql/types/io/pg_bitwise.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "57620"
},
{
"name": "Python",
"bytes": "711458"
}
],
"symlink_target": ""
}
|
print 'hello,esc'
|
{
"content_hash": "082972cc2ebd4005ea2055b893503777",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 17,
"avg_line_length": 9.5,
"alnum_prop": 0.6842105263157895,
"repo_name": "HW-X/XCopter",
"id": "7501deb5f0e02cd67f7492a4137082a683b81316",
"size": "62",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/driver/esc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "71"
},
{
"name": "C++",
"bytes": "7288"
},
{
"name": "Python",
"bytes": "1523"
}
],
"symlink_target": ""
}
|
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__all__=["hinfinity_filter"]
from .hinfinity_filter import *
|
{
"content_hash": "88077e988b9eb25eb7d3f02c83e081b1",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 23.238095238095237,
"alnum_prop": 0.7254098360655737,
"repo_name": "barney-NG/pyCAMTracker",
"id": "67fb1c3fdee4c7b0a89bbad2359fccee0cca6e0b",
"size": "512",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/filterpy/hinfinity/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "538994"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django import forms
from django.db.models.functions import Lower
from django.utils.translation import ugettext_lazy as _
from tracpro.charts import filters
from tracpro.contacts.models import Contact
from tracpro.polls.models import Poll, Question
from .models import BaselineTerm
class BaselineTermForm(forms.ModelForm):
""" Form for Baseline Term """
class Meta:
model = BaselineTerm
fields = ('name', 'org', 'start_date', 'end_date',
'baseline_poll', 'baseline_question',
'follow_up_poll', 'follow_up_question',
'y_axis_title')
widgets = {
'start_date': forms.widgets.DateInput(attrs={'class': 'datepicker'}),
'end_date': forms.widgets.DateInput(attrs={'class': 'datepicker'}),
'org': forms.HiddenInput()
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
org = self.user.get_org()
super(BaselineTermForm, self).__init__(*args, **kwargs)
if org:
polls = Poll.objects.active().by_org(org).order_by(Lower('name'))
self.fields['baseline_poll'].queryset = polls
self.fields['follow_up_poll'].queryset = polls
def clean(self, *args, **kwargs):
cleaned_data = super(BaselineTermForm, self).clean()
start_date = cleaned_data.get("start_date")
end_date = cleaned_data.get("end_date")
if start_date and end_date and start_date > end_date:
raise forms.ValidationError(
_("Start date should be before end date."))
baseline_question = cleaned_data.get("baseline_question")
follow_up_question = cleaned_data.get("follow_up_question")
if baseline_question and follow_up_question and baseline_question == follow_up_question:
raise forms.ValidationError(
_("Baseline and follow up questions should be different."))
return cleaned_data
class QuestionModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return "%s: %s" % (obj.poll.name, obj.name)
class SpoofDataForm(forms.Form):
""" Form to create spoofed poll data """
contacts = forms.ModelMultipleChoiceField(
queryset=Contact.objects.order_by(Lower('name')),
help_text=_("Select contacts for this set of spoofed data."),
widget=forms.widgets.SelectMultiple(attrs={'size': '20'}),
)
start_date = forms.DateField(
help_text=_("Baseline poll data will be submitted on this date. "
"Follow up data will start on this date."))
end_date = forms.DateField(
help_text=_("Follow up data will end on this date. "))
baseline_question = QuestionModelChoiceField(
queryset=Question.objects.order_by(Lower('poll__name'), 'text'),
help_text=_("Select a baseline question which will have numeric "
"answers only."))
follow_up_question = QuestionModelChoiceField(
label=_("Follow Up Question"),
queryset=Question.objects.order_by(Lower('poll__name'), 'text'),
help_text=_("Select a follow up question which will have "
"numeric answers only."))
baseline_minimum = forms.IntegerField(
help_text=_("A baseline answer will be created for each contact "
"within the minimum/maximum range."))
baseline_maximum = forms.IntegerField(
help_text=_("A baseline answer will be created for each contact "
"within the minimum/maximum range."))
follow_up_minimum = forms.IntegerField(
label=_("Follow Up Minimum"),
help_text=_("Follow up answers will be created for each contact "
"within the minimum/maximum range."))
follow_up_maximum = forms.IntegerField(
label=_("Follow Up Maximum"),
help_text=_("Follow up answers will be created for each contact "
"within the minimum/maximum range."))
def __init__(self, *args, **kwargs):
org = kwargs.pop('org')
super(SpoofDataForm, self).__init__(*args, **kwargs)
if org:
contacts = Contact.objects.active().by_org(org).order_by(Lower('name'))
self.fields['contacts'].queryset = contacts
questions = Question.objects.filter(poll__in=Poll.objects.active().by_org(org))
self.fields['baseline_question'].queryset = questions
self.fields['follow_up_question'].queryset = questions
def clean(self, *args, **kwargs):
cleaned_data = super(SpoofDataForm, self).clean()
start_date = cleaned_data.get("start_date")
end_date = cleaned_data.get("end_date")
if start_date and end_date and start_date > end_date:
raise forms.ValidationError(
_("Start date should be before end date."))
baseline_question = cleaned_data.get("baseline_question")
follow_up_question = cleaned_data.get("follow_up_question")
if baseline_question and follow_up_question and baseline_question == follow_up_question:
raise forms.ValidationError(
_("Baseline and follow up questions should be different."))
baseline_minimum = cleaned_data.get("baseline_minimum")
baseline_maximum = cleaned_data.get("baseline_maximum")
if baseline_minimum and baseline_maximum and baseline_minimum > baseline_maximum:
raise forms.ValidationError(
_("Baseline maximum should exceed or equal minimum."))
follow_up_minimum = cleaned_data.get("follow_up_minimum")
follow_up_maximum = cleaned_data.get("follow_up_maximum")
if follow_up_minimum and follow_up_maximum and follow_up_minimum > follow_up_maximum:
raise forms.ValidationError(
_("Follow up maximum should exceed or equal minimum."))
return cleaned_data
class BaselineTermFilterForm(filters.DateRangeFilter, filters.DataFieldFilter,
filters.FilterForm):
goal = forms.FloatField(
required=False,
label=_("Goal"),
help_text=_("If specified, this value will be used instead of "
"baseline data."))
region = forms.ModelChoiceField(
required=False,
label=_("Contact panel"),
queryset=None,
empty_label=_("All panels"),
help_text=_("If specified, only responses from contacts in this "
"panel will be shown."))
def __init__(self, baseline_term, data_regions, *args, **kwargs):
if not kwargs.get('data'):
# Set valid data if None (or {}) was provided.
# Form will always be considered bound.
kwargs['data'] = {
'date_range': 'custom',
'start_date': baseline_term.start_date,
'end_date': baseline_term.end_date,
}
super(BaselineTermFilterForm, self).__init__(*args, **kwargs)
self.fields['start_date'].required = True
self.fields['end_date'].required = True
if data_regions is None:
queryset = self.org.regions.filter(is_active=True)
else:
queryset = data_regions
queryset = queryset.filter(pk__in=baseline_term.get_regions()).order_by(Lower('name'))
self.fields['region'].queryset = queryset
def filter_contacts(self, queryset=None):
contacts = super(BaselineTermFilterForm, self).filter_contacts(queryset)
region = self.cleaned_data.get('region')
if region:
contacts = contacts.filter(region=region)
return contacts
|
{
"content_hash": "d2bfeb51e84b472036eb449a5dbac6c5",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 96,
"avg_line_length": 42.45054945054945,
"alnum_prop": 0.6172663732850117,
"repo_name": "rapidpro/tracpro",
"id": "125d9c84dfd14386ff339c44f20341f4cea23db7",
"size": "7726",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tracpro/baseline/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43233"
},
{
"name": "CoffeeScript",
"bytes": "6712"
},
{
"name": "HTML",
"bytes": "120223"
},
{
"name": "JavaScript",
"bytes": "284327"
},
{
"name": "Makefile",
"bytes": "2587"
},
{
"name": "Python",
"bytes": "646831"
}
],
"symlink_target": ""
}
|
"""Unit test for treadmill.runtime.
"""
import errno
import socket
import unittest
import mock
import treadmill
import treadmill.rulefile
import treadmill.runtime
from treadmill import exc
class RuntimeTest(unittest.TestCase):
"""Tests for treadmill.runtime."""
@mock.patch('socket.socket.bind', mock.Mock())
def test__allocate_sockets(self):
"""Test allocating sockets.
"""
# access protected module _allocate_sockets
# pylint: disable=w0212
socket.socket.bind.side_effect = [
socket.error(errno.EADDRINUSE, 'In use'),
mock.DEFAULT,
mock.DEFAULT,
mock.DEFAULT
]
sockets = treadmill.runtime._allocate_sockets(
'prod', '0.0.0.0', socket.SOCK_STREAM, 3
)
self.assertEqual(3, len(sockets))
@mock.patch('socket.socket.bind', mock.Mock())
def test__allocate_sockets_fail(self):
"""Test allocating sockets when all are taken.
"""
# access protected module _allocate_sockets
# pylint: disable=w0212
socket.socket.bind.side_effect = socket.error(errno.EADDRINUSE,
'In use')
with self.assertRaises(exc.ContainerSetupError):
treadmill.runtime._allocate_sockets(
'prod', '0.0.0.0', socket.SOCK_STREAM, 3
)
@mock.patch('socket.socket', mock.Mock(autospec=True))
@mock.patch('treadmill.runtime._allocate_sockets', mock.Mock())
def test_allocate_network_ports(self):
"""Test network port allocation.
"""
# access protected module _allocate_network_ports
# pylint: disable=w0212
treadmill.runtime._allocate_sockets.side_effect = \
lambda _x, _y, _z, count: [socket.socket()] * count
mock_socket = socket.socket.return_value
mock_socket.getsockname.side_effect = [
('unused', 50001),
('unused', 60001),
('unused', 10000),
('unused', 10001),
('unused', 10002),
('unused', 12345),
('unused', 54321),
]
manifest = {
'type': 'native',
'environment': 'dev',
'endpoints': [
{
'name': 'http',
'port': 8000,
'proto': 'tcp',
}, {
'name': 'ssh',
'port': 0,
'proto': 'tcp',
}, {
'name': 'dns',
'port': 5353,
'proto': 'udp',
}, {
'name': 'port0',
'port': 0,
'proto': 'udp',
}
],
'ephemeral_ports': {'tcp': 3, 'udp': 0},
}
treadmill.runtime.allocate_network_ports(
'1.2.3.4',
manifest
)
# in the updated manifest, make sure that real_port is specificed from
# the ephemeral range as returnd by getsockname.
self.assertEqual(
8000,
manifest['endpoints'][0]['port']
)
self.assertEqual(
50001,
manifest['endpoints'][0]['real_port']
)
self.assertEqual(
60001,
manifest['endpoints'][1]['port']
)
self.assertEqual(
60001,
manifest['endpoints'][1]['real_port']
)
self.assertEqual(
5353,
manifest['endpoints'][2]['port']
)
self.assertEqual(
12345,
manifest['endpoints'][2]['real_port']
)
self.assertEqual(
54321,
manifest['endpoints'][3]['port']
)
self.assertEqual(
54321,
manifest['endpoints'][3]['real_port']
)
self.assertEqual(
[10000, 10001, 10002],
manifest['ephemeral_ports']['tcp']
)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "738d67723a90e2e1f54eab7cccd31958",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 78,
"avg_line_length": 28.255172413793105,
"alnum_prop": 0.48132780082987553,
"repo_name": "ThoughtWorksInc/treadmill",
"id": "dd9af715b1d27f785ce535bb70d2f9ceb53ef4a7",
"size": "4097",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/runtime_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "63"
},
{
"name": "Python",
"bytes": "2244673"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "56861"
}
],
"symlink_target": ""
}
|
from django.db import models
class LocalStorage(models.Model):
name = models.CharField(max_length=50, unique=True, null=False)
path = models.CharField(max_length=255, unique=True, null=False)
def __unicode__(self):
return "%s (%s)" % (self.name, self.path)
class RemoteStorage(models.Model):
path = models.CharField(max_length=255, unique=True, null=False)
def __unicode__(self):
return "%s" % self.path
class StorageMap(models.Model):
local_ptr = models.ForeignKey(LocalStorage, verbose_name="Local")
remote_ptr = models.ForeignKey(RemoteStorage, verbose_name="Remote")
min_ratio = models.FloatField(null=False, default=2.0)
class Meta:
unique_together = (("local_ptr", "remote_ptr"),)
def __unicode__(self):
msg = "%s <==> %s: min rating: %f"
return msg % (self.local_ptr,
self.remote_ptr,
self.min_ratio)
class Torrent(models.Model):
storage_map_ptr = models.ForeignKey(StorageMap,
verbose_name="Storage")
name = models.TextField(unique=False)
idhash = models.CharField("Hash", max_length=40, unique=True, db_index=True)
def __unicode__(self):
return "%s: %s(%s)" % (self.storage_map_ptr, self.name, self.idhash)
class TorrentFile(models.Model):
torent_ptr = models.ForeignKey(Torrent, verbose_name="Torrent")
path = models.TextField(unique=False)
def __unicode__(self):
return "%s: %s" % (self.torent_ptr.name, self.path)
class Setting(models.Model):
name = models.CharField(max_length=50, unique=True, null=False)
value = models.CharField(max_length=255, unique=False, null=False)
def __unicode__(self):
return "%s = %s" % (self.name, self.value)
class LoadLog(models.Model):
RES_PROGRESS = 0
RES_SUCCESS = 1
RES_FAILED = 2
RES_NOT_FOUND = 3
RESULT_CHOICES = (
(RES_PROGRESS, 'Progress'),
(RES_SUCCESS, 'Success'),
(RES_FAILED, 'Failed'),
(RES_NOT_FOUND, 'Not found'),
)
result = models.IntegerField(default=RES_PROGRESS, choices=RESULT_CHOICES)
torent_ptr = models.ForeignKey(Torrent, unique=False, null=True,
verbose_name="Torrent")
text = models.TextField(unique=False)
created = models.DateTimeField(auto_now_add=True)
finished = models.DateTimeField(auto_now=True)
def __unicode__(self):
return "%s: %s" % (self.text,
LoadLog.RESULT_CHOICES[self.result][1])
|
{
"content_hash": "fbb1e8aae4ab2a5a436f620c64d575f0",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 80,
"avg_line_length": 32.1,
"alnum_prop": 0.6152647975077882,
"repo_name": "ReanGD/web-home-manage",
"id": "248062871329c9b41b8c9a731bbcb9bdb340e159",
"size": "2568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "85370"
},
{
"name": "HTML",
"bytes": "79565"
},
{
"name": "JavaScript",
"bytes": "35100"
},
{
"name": "Python",
"bytes": "40034"
},
{
"name": "Shell",
"bytes": "153"
},
{
"name": "TypeScript",
"bytes": "84436"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.views import *
urlpatterns = patterns('spirit.views.user',
url(r'^login/$', 'custom_login', {'template_name': 'spirit/user/login.html'}, name='user-login'),
url(r'^logout/$', logout, {'next_page': '/', }, name='user-logout'),
url(r'^register/$', 'register', name='user-register'),
url(r'^resend-activation/$', 'resend_activation_email', name='resend-activation'),
url(r'^activation/(?P<pk>\d+)/(?P<token>[0-9A-Za-z_\-\.]+)/$', 'registration_activation',
name='registration-activation'),
url(r'^email-change/(?P<token>[0-9A-Za-z_\-\.]+)/$', 'email_change_confirm',
name='email-change-confirm'),
url(r'^password-reset/$', 'custom_reset_password', {'template_name': 'spirit/user/password_reset_form.html',
'email_template_name': 'spirit/user/password_reset_email.html',
'subject_template_name': 'spirit/user/'
'password_reset_subject.txt',
'post_reset_redirect': reverse_lazy('spirit:password-reset-done')},
name='password-reset'),
url(r'^password-reset/done/$', password_reset_done, {'template_name': 'spirit/user/password_reset_done.html', },
name='password-reset-done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[\w\-]+)/$', password_reset_confirm,
{'template_name': 'spirit/user/password_reset_confirm.html',
'post_reset_redirect': reverse_lazy('spirit:password-reset-complete')},
name='password-reset-confirm'),
url(r'^reset/done/$', password_reset_complete, {'template_name': 'spirit/user/password_reset_complete.html', },
name='password-reset-complete'),
url(r'^$', 'profile_update', name='profile-update'),
url(r'^passwrod-change/$', 'profile_password_change', name='profile-password-change'),
url(r'^email-change/$', 'profile_email_change', name='profile-email-change'),
url(r'^(?P<pk>\d+)/$', 'profile_comments', kwargs={'slug': "", }, name='profile-detail'),
url(r'^(?P<pk>\d+)/(?P<slug>[\w-]+)/$', 'profile_comments', name='profile-detail'),
url(r'^topics/(?P<pk>\d+)/$', 'profile_topics', kwargs={'slug': "", }, name='profile-topics'),
url(r'^topics/(?P<pk>\d+)/(?P<slug>[\w-]+)/$', 'profile_topics', name='profile-topics'),
url(r'^likes/(?P<pk>\d+)/$', 'profile_likes', kwargs={'slug': "", }, name='profile-likes'),
url(r'^likes/(?P<pk>\d+)/(?P<slug>[\w-]+)/$', 'profile_likes', name='profile-likes'),
url(r'^menu/$', 'user_menu', name='user-menu'),
)
|
{
"content_hash": "23f07b18f374238b5de9fa8ef1866403",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 123,
"avg_line_length": 59.46808510638298,
"alnum_prop": 0.5713774597495528,
"repo_name": "Si-elegans/Web-based_GUI_Tools",
"id": "0ba88595e0d87c43ab3f3dee3c8867c29a50cc95",
"size": "2819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spirit/urls/user.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "183432"
},
{
"name": "HTML",
"bytes": "821815"
},
{
"name": "JavaScript",
"bytes": "5240621"
},
{
"name": "Python",
"bytes": "2130547"
}
],
"symlink_target": ""
}
|
import sys
import random
import math
from sets import Set
# adds node attribute of which shard node should be placed on
num_shards = 8
num_runs = 1
capacity = 84000/num_shards
assignments = dict()
shard_sizes = [0] * num_shards
LDG = True
G = {}
def load(argv):
assert(len(argv) == 2)
print 'loading graph from file'
inputfile = open(argv[1], 'r')
for line in inputfile:
if line[0] == '#': # ignore comments
continue
edge = line.split()
assert(len(edge) == 2)
n0 = int(edge[0])
n1 = int(edge[1])
if n0 not in G:
G[n0] = Set([])
if n1 not in G:
G[n1] = Set([])
G[n0].add(n1)
inputfile.close()
def get_balanced_assignment(tied_shards):
min_size = shard_sizes[tied_shards[0]] #pick one as min
min_indices = []
for s in tied_shards:
if shard_sizes[s] < min_size:
min_size = shard_sizes[s]
min_indices = [s]
elif shard_sizes[s] == min_size:
min_indices.append(s)
assert(len(min_indices) > 0)
return random.choice(min_indices)
def penalty(shard):
return 1.0 - (float(shard_sizes[shard])/float(capacity))
def get_intersection_scores(node):
shard_scores = [0] * num_shards
for nbr in G[node]:
if nbr in assignments:
shard_scores[assignments[nbr]] += 1
return shard_scores
def clustering_multiplier(num_mutual_friends):
return math.log(2 + num_mutual_friends)
def calc_mutual_friends(n1, n2):
return len(G[n1] & G[n2])
def get_clustering_scores(node):
shard_scores = [0] * num_shards
for nbr in G[node]:
if nbr in assignments:
mutual_friends = calc_mutual_friends(node, nbr)
shard_scores[assignments[nbr]] += clustering_multiplier(mutual_friends)
return shard_scores
def get_ldg_assignment(node):
if LDG:
shard_scores = get_intersection_scores(node)
else:
shard_scores = get_clustering_scores(node)
arg_max = 0.0
max_indices = []
for i in range(num_shards):
val = (float(shard_scores[i])*penalty(i))
if arg_max < val:
arg_max = val
max_indices = [i]
elif arg_max == val:
max_indices.append(i)
assert(len(max_indices) > 0)
if len(max_indices) is 1:
return max_indices[0]
else:
return get_balanced_assignment(max_indices)
def get_hash_assignment(node):
return node % num_shards
print 'partitioning graph onto ' + str(num_shards) + ' shards using LDG with a capacity constant of ' + str(capacity)
load(sys.argv)
for run in range(num_runs):
moved = 0
for n in G:
orig_loc = -1
if n in assignments:
shard_sizes[assignments[n]] -= 1
orig_loc = assignments[n]
put_on_shard = get_ldg_assignment(n)
#put_on_shard = get_hash_assignment(n)
assignments[n] = put_on_shard
shard_sizes[put_on_shard] += 1
if orig_loc != -1 and orig_loc != put_on_shard:
moved += 1
print 'Completed run ' + str(run) + ', moved node count = ' + str(moved)
print shard_sizes
'''
colors = [float(assignments[n])/float(num_shards) for n in G.nodes()]
print 'trying to draw graph...'
nx.draw_circular(G, node_color=colors)
plt.show()
'''
fname = sys.argv[1].rsplit('.',1)
if len(fname) == 1:
fileout = open(fname[0] + '-partitioned.', 'w')
else:
fileout = open(fname[0] + '-partitioned.' + fname[1], 'w')
fileout.write('#' + str(len(assignments)) + '\n')
for (k,v) in assignments.iteritems():
fileout.write(str(k) + ' ' + str(v) + '\n')
for n in G:
for nbr in G[n]:
line = str(n) + ' ' + str(nbr)
if random.random() > 0.9:
line += ' color blue\n'
else:
line += '\n'
fileout.write(line)
fileout.close()
print 'finshed writing assignments'
|
{
"content_hash": "5aaa8dd910411fdffcb0a4aaab55373b",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 117,
"avg_line_length": 28.26086956521739,
"alnum_prop": 0.5848717948717949,
"repo_name": "dubey/weaver",
"id": "53d379bf3a79e1557b8982343583203c86dca009",
"size": "3924",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/static_partitioning/stream_partition.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "28104"
},
{
"name": "C++",
"bytes": "1167195"
},
{
"name": "M4",
"bytes": "2567"
},
{
"name": "Makefile",
"bytes": "15173"
},
{
"name": "Objective-C",
"bytes": "583"
},
{
"name": "Python",
"bytes": "164225"
},
{
"name": "Shell",
"bytes": "28932"
}
],
"symlink_target": ""
}
|
"""Test RasterAttributeTables.
Rewrite of
http://trac.osgeo.org/gdal/browser/trunk/autotest/gcore/rat.py
"""
import os
import unittest
from osgeo import gdal
import unittest
from autotest2.gcore import gcore_util
from autotest2.gdrivers import gdrivers_util
class RatTest(unittest.TestCase):
def setUp(self):
rat = gdal.RasterAttributeTable()
rat.CreateColumn('Value', gdal.GFT_Integer, gdal.GFU_MinMax)
rat.CreateColumn('Count', gdal.GFT_Integer, gdal.GFU_PixelCount)
rat.SetRowCount(3)
rat.SetValueAsInt(0, 0, 10)
rat.SetValueAsInt(0, 1, 100)
rat.SetValueAsInt(1, 0, 11)
rat.SetValueAsInt(1, 1, 200)
rat.SetValueAsInt(2, 0, 12)
rat.SetValueAsInt(2, 1, 90)
self.rat = rat
def CheckRat(self, rat):
self.assertEqual(rat.GetColumnCount(), 2)
self.assertEqual(rat.GetRowCount(), 3)
self.assertEqual(rat.GetNameOfCol(0), 'Value')
self.assertEqual(rat.GetNameOfCol(1), 'Count')
self.assertEqual(rat.GetUsageOfCol(1), gdal.GFU_PixelCount)
self.assertEqual(rat.GetTypeOfCol(1), gdal.GFT_Integer)
self.assertEqual(rat.GetRowOfValue(11.0), 1)
self.assertEqual(rat.GetValueAsInt(1, 1), 200)
def testRat01Clone(self):
rat = self.rat.Clone()
self.CheckRat(rat)
@unittest.skip('Needs gdal > 1.10.0')
@gdrivers_util.SkipIfDriverMissing(gdrivers_util.PNM_DRIVER)
def testRat02PnmPlusAuxXml(self):
with gdrivers_util.ConfigOption('GDAL_PAM_ENABLED', 'ON'):
with gcore_util.TestTemporaryDirectory() as tmpdir:
tmp_filepath = os.path.join(tmpdir, 'rat2.pnm')
drv = gdal.GetDriverByName('PNM')
# TODO(schwehr): Use context manager.
dst = drv.Create(tmp_filepath, 10, 10, 1, gdal.GDT_Byte)
self.CheckRat(self.rat)
dst.GetRasterBand(1).SetDefaultRAT(self.rat)
dst = None
src = gdal.Open(tmp_filepath, gdal.GA_Update)
rat = src.GetRasterBand(1).GetDefaultRAT()
self.CheckRat(rat)
src.GetRasterBand(1).SetDefaultRAT(None)
src = None
src = gdal.Open(tmp_filepath)
self.assertIsNotNone(src.GetRasterBand(1).GetDefaultRAT())
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "96a32f3559410039296edd7c05388773",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 68,
"avg_line_length": 28.710526315789473,
"alnum_prop": 0.6828597616865261,
"repo_name": "schwehr/gdal-autotest2",
"id": "40b7351e560ff4bccdc0ec2c6a221969924d8c72",
"size": "3982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/gcore/rat_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "725676"
},
{
"name": "Python",
"bytes": "1073608"
}
],
"symlink_target": ""
}
|
"""Support for Logi Circle sensors."""
from __future__ import annotations
import logging
from typing import Any
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_BATTERY_CHARGING,
CONF_MONITORED_CONDITIONS,
CONF_SENSORS,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.util.dt import as_local
from .const import ATTRIBUTION, DEVICE_BRAND, DOMAIN as LOGI_CIRCLE_DOMAIN, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a sensor for a Logi Circle device. Obsolete."""
_LOGGER.warning("Logi Circle no longer works with sensor platform configuration")
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a Logi Circle sensor based on a config entry."""
devices = await hass.data[LOGI_CIRCLE_DOMAIN].cameras
time_zone = str(hass.config.time_zone)
monitored_conditions = entry.data.get(CONF_SENSORS).get(CONF_MONITORED_CONDITIONS)
entities = [
LogiSensor(device, time_zone, description)
for description in SENSOR_TYPES
if description.key in monitored_conditions
for device in devices
if device.supports_feature(description.key)
]
async_add_entities(entities, True)
class LogiSensor(SensorEntity):
"""A sensor implementation for a Logi Circle camera."""
def __init__(self, camera, time_zone, description: SensorEntityDescription):
"""Initialize a sensor for Logi Circle camera."""
self.entity_description = description
self._camera = camera
self._attr_unique_id = f"{camera.mac_address}-{description.key}"
self._attr_name = f"{camera.name} {description.name}"
self._activity: dict[Any, Any] = {}
self._tz = time_zone
@property
def device_info(self):
"""Return information about the device."""
return {
"name": self._camera.name,
"identifiers": {(LOGI_CIRCLE_DOMAIN, self._camera.id)},
"model": self._camera.model_name,
"sw_version": self._camera.firmware,
"manufacturer": DEVICE_BRAND,
}
@property
def extra_state_attributes(self):
"""Return the state attributes."""
state = {
ATTR_ATTRIBUTION: ATTRIBUTION,
"battery_saving_mode": (
STATE_ON if self._camera.battery_saving else STATE_OFF
),
"microphone_gain": self._camera.microphone_gain,
}
if self.entity_description.key == "battery_level":
state[ATTR_BATTERY_CHARGING] = self._camera.charging
return state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
sensor_type = self.entity_description.key
if sensor_type == "battery_level" and self._attr_native_value is not None:
return icon_for_battery_level(
battery_level=int(self._attr_native_value), charging=False
)
if sensor_type == "recording_mode" and self._attr_native_value is not None:
return "mdi:eye" if self._attr_native_value == STATE_ON else "mdi:eye-off"
if sensor_type == "streaming_mode" and self._attr_native_value is not None:
return (
"mdi:camera"
if self._attr_native_value == STATE_ON
else "mdi:camera-off"
)
return self.entity_description.icon
async def async_update(self):
"""Get the latest data and updates the state."""
_LOGGER.debug("Pulling data from %s sensor", self.name)
await self._camera.update()
if self.entity_description.key == "last_activity_time":
last_activity = await self._camera.get_last_activity(force_refresh=True)
if last_activity is not None:
last_activity_time = as_local(last_activity.end_time_utc)
self._attr_native_value = (
f"{last_activity_time.hour:0>2}:{last_activity_time.minute:0>2}"
)
else:
state = getattr(self._camera, self.entity_description.key, None)
if isinstance(state, bool):
self._attr_native_value = STATE_ON if state is True else STATE_OFF
else:
self._attr_native_value = state
|
{
"content_hash": "515398e4249e458c5043e8ddd33b28bf",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 88,
"avg_line_length": 37.541666666666664,
"alnum_prop": 0.6275249722530521,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "506711525879f0d94deff52e36a118b2052972d4",
"size": "4505",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/logi_circle/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
'''OpenGL extension HP.image_transform
This module customises the behaviour of the
OpenGL.raw.GL.HP.image_transform to provide a more
Python-friendly API
Overview (from the spec)
This extension provides support for scaling, rotation, and translation
of two-dimensional pixel rectangles at a fixed location in the pixel
transfer process. The 2D image transformation attributes are specified
as individual values so that that implementations may easily detect
scaling and rotation values that lend themselves to optimization. 2D
image transformation occurs immediately after the post-convolution color
table stage of the pixel pipeline. This extension also defines a color
table that is applied immediately after the image transformation operation.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/HP/image_transform.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.HP.image_transform import *
### END AUTOGENERATED SECTION
|
{
"content_hash": "e0e7d9b3de3462d5913714e4e137c8a3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 76,
"avg_line_length": 42.46153846153846,
"alnum_prop": 0.8152173913043478,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "35bb3647ae100f85d0a037e9e72c6b3de8c307ca",
"size": "1104",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/GL/HP/image_transform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
DOCUMENTATION = '''
---
module: panos_lic
short_description: apply authcode to a device/instance
description:
- Apply an authcode to a device.
- The authcode should have been previously registered on the Palo Alto Networks support portal.
- The device should have Internet access.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
deprecated:
alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead.
removed_in: "2.12"
why: Consolidating code base.
options:
auth_code:
description:
- authcode to be applied
required: true
force:
description:
- whether to apply authcode even if device is already licensed
required: false
default: "false"
type: bool
extends_documentation_fragment: panos
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: fetch license
panos_lic:
ip_address: "192.168.1.1"
password: "paloalto"
auth_code: "IBADCODE"
register: result
- name: Display serialnumber (if already registered)
debug:
var: "{{result.serialnumber}}"
'''
RETURN = '''
serialnumber:
description: serialnumber of the device in case that it has been already registered
returned: success
type: str
sample: 007200004214
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_serial(xapi, module):
xapi.op(cmd="show system info", cmd_xml=True)
r = xapi.element_root
serial = r.find('.//serial')
if serial is None:
module.fail_json(msg="No <serial> tag in show system info")
serial = serial.text
return serial
def apply_authcode(xapi, module, auth_code):
try:
xapi.op(cmd='request license fetch auth-code "%s"' % auth_code,
cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def fetch_authcode(xapi, module):
try:
xapi.op(cmd='request license fetch', cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
auth_code=dict(),
username=dict(default='admin'),
force=dict(type='bool', default=False)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
auth_code = module.params["auth_code"]
force = module.params['force']
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if not force:
serialnumber = get_serial(xapi, module)
if serialnumber != 'unknown':
return module.exit_json(changed=False, serialnumber=serialnumber)
if auth_code:
apply_authcode(xapi, module, auth_code)
else:
fetch_authcode(xapi, module)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main()
|
{
"content_hash": "ea39a6561452211ed9467beca1c432ba",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 99,
"avg_line_length": 26.39072847682119,
"alnum_prop": 0.6220828105395232,
"repo_name": "thaim/ansible",
"id": "c7427f6f7b1a07c5aa68696caebef4da66b3d5a2",
"size": "4794",
"binary": false,
"copies": "40",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/network/panos/_panos_lic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
'''
In this module, we implement binary search in Python both
recrusively and iteratively
Assumption: Array is sorted
Time complexity: O(log n)
'''
def binary_search_recursive(arr, left, right, value):
'''
Recursive implementation of binary search of a sorted array
Return index of the value found else return None
'''
if arr and left <= right:
middle = left + (right - left) / 2
if arr[middle] == value:
return middle
if arr[middle] > value:
return binary_search_recursive(arr, left, middle - 1, value)
return binary_search_recursive(arr, middle + 1, right, value)
return None
def binary_search_iterative(arr, left, right, value):
'''
Iterative implementation of binary search of a sorted array
Return index of the value of found else return None
'''
if arr:
while left <= right:
middle = left + (right - left) / 2
if arr[middle] == value:
return middle
elif arr[middle] > value:
right = middle - 1
else:
left = middle + 1
return None
|
{
"content_hash": "dc20e0e437e370ff37b79be3ca290fe3",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 72,
"avg_line_length": 27.523809523809526,
"alnum_prop": 0.5934256055363322,
"repo_name": "ueg1990/aids",
"id": "5b9c65650a7d03ab53094f02dab6e79bf43127d1",
"size": "1156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aids/sorting_and_searching/binary_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28251"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class VirtualMachineIdentity(Model):
"""Identity for the virtual machine.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar principal_id: The principal id of virtual machine identity. This
property will only be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the virtual machine. This
property will only be provided for a system assigned identity.
:vartype tenant_id: str
:param type: The type of identity used for the virtual machine. The type
'SystemAssigned, UserAssigned' includes both an implicitly created
identity and a set of user assigned identities. The type 'None' will
remove any identities from the virtual machine. Possible values include:
'SystemAssigned', 'UserAssigned', 'SystemAssigned, UserAssigned', 'None'
:type type: str or
~azure.mgmt.compute.v2017_12_01.models.ResourceIdentityType
:param identity_ids: The list of user identities associated with the
Virtual Machine. The user identity references will be ARM resource ids in
the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/identities/{identityName}'.
:type identity_ids: list[str]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'ResourceIdentityType'},
'identity_ids': {'key': 'identityIds', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(VirtualMachineIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
self.identity_ids = kwargs.get('identity_ids', None)
|
{
"content_hash": "d4bd42669ea859bd30d2a2d842e6b714",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 136,
"avg_line_length": 43.319148936170215,
"alnum_prop": 0.6768172888015717,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "52a97e8b7172b68ac11edfa47a71dd0ef79d735b",
"size": "2510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2017_12_01/models/virtual_machine_identity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""Test the new API for making and checking interface declarations
"""
import unittest
from zope.interface._compat import _skip_under_py3k, _u
class _Py3ClassAdvice(object):
def _run_generated_code(self, code, globs, locs,
fails_under_py3k=True,
):
import warnings
from zope.interface._compat import PYTHON3
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
if not PYTHON3:
exec(code, globs, locs)
self.assertEqual(len(log), 0) # no longer warn
return True
else:
try:
exec(code, globs, locs)
except TypeError:
return False
else:
if fails_under_py3k:
self.fail("Didn't raise TypeError")
class NamedTests(unittest.TestCase):
def test_class(self):
from zope.interface.declarations import named
@named(_u('foo'))
class Foo(object):
pass
self.assertEqual(Foo.__component_name__, _u('foo'))
def test_function(self):
from zope.interface.declarations import named
@named(_u('foo'))
def doFoo(object):
pass
self.assertEqual(doFoo.__component_name__, _u('foo'))
def test_instance(self):
from zope.interface.declarations import named
class Foo(object):
pass
foo = Foo()
named(_u('foo'))(foo)
self.assertEqual(foo.__component_name__, _u('foo'))
class DeclarationTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import Declaration
return Declaration
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_no_bases(self):
decl = self._makeOne()
self.assertEqual(list(decl.__bases__), [])
def test_ctor_w_interface_in_bases(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne(IFoo)
self.assertEqual(list(decl.__bases__), [IFoo])
def test_ctor_w_implements_in_bases(self):
from zope.interface.declarations import Implements
impl = Implements()
decl = self._makeOne(impl)
self.assertEqual(list(decl.__bases__), [impl])
def test_changed_wo_existing__v_attrs(self):
decl = self._makeOne()
decl.changed(decl) # doesn't raise
self.assertFalse('_v_attrs' in decl.__dict__)
def test_changed_w_existing__v_attrs(self):
decl = self._makeOne()
decl._v_attrs = object()
decl.changed(decl)
self.assertFalse('_v_attrs' in decl.__dict__)
def test___contains__w_self(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne()
self.assertFalse(decl in decl)
def test___contains__w_unrelated_iface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne()
self.assertFalse(IFoo in decl)
def test___contains__w_base_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne(IFoo)
self.assertTrue(IFoo in decl)
def test___iter___empty(self):
decl = self._makeOne()
self.assertEqual(list(decl), [])
def test___iter___single_base(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne(IFoo)
self.assertEqual(list(decl), [IFoo])
def test___iter___multiple_bases(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
decl = self._makeOne(IFoo, IBar)
self.assertEqual(list(decl), [IFoo, IBar])
def test___iter___inheritance(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar', (IFoo,))
decl = self._makeOne(IBar)
self.assertEqual(list(decl), [IBar]) #IBar.interfaces() omits bases
def test___iter___w_nested_sequence_overlap(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
decl = self._makeOne(IBar, (IFoo, IBar))
self.assertEqual(list(decl), [IBar, IFoo])
def test_flattened_empty(self):
from zope.interface.interface import Interface
decl = self._makeOne()
self.assertEqual(list(decl.flattened()), [Interface])
def test_flattened_single_base(self):
from zope.interface.interface import Interface
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decl = self._makeOne(IFoo)
self.assertEqual(list(decl.flattened()), [IFoo, Interface])
def test_flattened_multiple_bases(self):
from zope.interface.interface import Interface
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
decl = self._makeOne(IFoo, IBar)
self.assertEqual(list(decl.flattened()), [IFoo, IBar, Interface])
def test_flattened_inheritance(self):
from zope.interface.interface import Interface
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar', (IFoo,))
decl = self._makeOne(IBar)
self.assertEqual(list(decl.flattened()), [IBar, IFoo, Interface])
def test_flattened_w_nested_sequence_overlap(self):
from zope.interface.interface import Interface
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
decl = self._makeOne(IBar, (IFoo, IBar))
# Note that decl.__iro__ has IFoo first.
self.assertEqual(list(decl.flattened()), [IFoo, IBar, Interface])
def test___sub___unrelated_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
before = self._makeOne(IFoo)
after = before - IBar
self.assertTrue(isinstance(after, self._getTargetClass()))
self.assertEqual(list(after), [IFoo])
def test___sub___related_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
before = self._makeOne(IFoo)
after = before - IFoo
self.assertEqual(list(after), [])
def test___sub___related_interface_by_inheritance(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar', (IFoo,))
before = self._makeOne(IBar)
after = before - IBar
self.assertEqual(list(after), [])
def test___add___unrelated_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
before = self._makeOne(IFoo)
after = before + IBar
self.assertTrue(isinstance(after, self._getTargetClass()))
self.assertEqual(list(after), [IFoo, IBar])
def test___add___related_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
IBaz = InterfaceClass('IBaz')
before = self._makeOne(IFoo, IBar)
other = self._makeOne(IBar, IBaz)
after = before + other
self.assertEqual(list(after), [IFoo, IBar, IBaz])
class ImplementsTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import Implements
return Implements
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_no_bases(self):
impl = self._makeOne()
self.assertEqual(impl.inherit, None)
self.assertEqual(impl.declared, ())
self.assertEqual(impl.__name__, '?')
self.assertEqual(list(impl.__bases__), [])
def test___repr__(self):
impl = self._makeOne()
impl.__name__ = 'Testing'
self.assertEqual(repr(impl), '<implementedBy Testing>')
def test___reduce__(self):
from zope.interface.declarations import implementedBy
impl = self._makeOne()
self.assertEqual(impl.__reduce__(), (implementedBy, (None,)))
class Test_implementedByFallback(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import implementedByFallback
return implementedByFallback(*args, **kw)
def test_dictless_wo_existing_Implements_wo_registrations(self):
class Foo(object):
__slots__ = ('__implemented__',)
foo = Foo()
foo.__implemented__ = None
self.assertEqual(list(self._callFUT(foo)), [])
def test_dictless_wo_existing_Implements_cant_assign___implemented__(self):
class Foo(object):
def _get_impl(self): return None
def _set_impl(self, val): raise TypeError
__implemented__ = property(_get_impl, _set_impl)
def __call__(self): pass #act like a factory
foo = Foo()
self.assertRaises(TypeError, self._callFUT, foo)
def test_dictless_wo_existing_Implements_w_registrations(self):
from zope.interface import declarations
class Foo(object):
__slots__ = ('__implemented__',)
foo = Foo()
foo.__implemented__ = None
reg = object()
with _MonkeyDict(declarations,
'BuiltinImplementationSpecifications') as specs:
specs[foo] = reg
self.assertTrue(self._callFUT(foo) is reg)
def test_dictless_w_existing_Implements(self):
from zope.interface.declarations import Implements
impl = Implements()
class Foo(object):
__slots__ = ('__implemented__',)
foo = Foo()
foo.__implemented__ = impl
self.assertTrue(self._callFUT(foo) is impl)
def test_dictless_w_existing_not_Implements(self):
from zope.interface.interface import InterfaceClass
class Foo(object):
__slots__ = ('__implemented__',)
foo = Foo()
IFoo = InterfaceClass('IFoo')
foo.__implemented__ = (IFoo,)
self.assertEqual(list(self._callFUT(foo)), [IFoo])
def test_w_existing_attr_as_Implements(self):
from zope.interface.declarations import Implements
impl = Implements()
class Foo(object):
__implemented__ = impl
self.assertTrue(self._callFUT(Foo) is impl)
def test_builtins_added_to_cache(self):
from zope.interface import declarations
from zope.interface.declarations import Implements
from zope.interface._compat import _BUILTINS
with _MonkeyDict(declarations,
'BuiltinImplementationSpecifications') as specs:
self.assertEqual(list(self._callFUT(tuple)), [])
self.assertEqual(list(self._callFUT(list)), [])
self.assertEqual(list(self._callFUT(dict)), [])
for typ in (tuple, list, dict):
spec = specs[typ]
self.assertTrue(isinstance(spec, Implements))
self.assertEqual(repr(spec),
'<implementedBy %s.%s>'
% (_BUILTINS, typ.__name__))
def test_builtins_w_existing_cache(self):
from zope.interface import declarations
t_spec, l_spec, d_spec = object(), object(), object()
with _MonkeyDict(declarations,
'BuiltinImplementationSpecifications') as specs:
specs[tuple] = t_spec
specs[list] = l_spec
specs[dict] = d_spec
self.assertTrue(self._callFUT(tuple) is t_spec)
self.assertTrue(self._callFUT(list) is l_spec)
self.assertTrue(self._callFUT(dict) is d_spec)
def test_oldstyle_class_no_assertions(self):
# TODO: Figure out P3 story
class Foo:
pass
self.assertEqual(list(self._callFUT(Foo)), [])
def test_no_assertions(self):
# TODO: Figure out P3 story
class Foo(object):
pass
self.assertEqual(list(self._callFUT(Foo)), [])
def test_w_None_no_bases_not_factory(self):
class Foo(object):
__implemented__ = None
foo = Foo()
self.assertRaises(TypeError, self._callFUT, foo)
def test_w_None_no_bases_w_factory(self):
from zope.interface.declarations import objectSpecificationDescriptor
class Foo(object):
__implemented__ = None
def __call__(self):
pass
foo = Foo()
foo.__name__ = 'foo'
spec = self._callFUT(foo)
self.assertEqual(spec.__name__,
'zope.interface.tests.test_declarations.foo')
self.assertTrue(spec.inherit is foo)
self.assertTrue(foo.__implemented__ is spec)
self.assertTrue(foo.__providedBy__ is objectSpecificationDescriptor)
self.assertFalse('__provides__' in foo.__dict__)
def test_w_None_no_bases_w_class(self):
from zope.interface.declarations import ClassProvides
class Foo(object):
__implemented__ = None
spec = self._callFUT(Foo)
self.assertEqual(spec.__name__,
'zope.interface.tests.test_declarations.Foo')
self.assertTrue(spec.inherit is Foo)
self.assertTrue(Foo.__implemented__ is spec)
self.assertTrue(isinstance(Foo.__providedBy__, ClassProvides))
self.assertTrue(isinstance(Foo.__provides__, ClassProvides))
self.assertEqual(Foo.__provides__, Foo.__providedBy__)
def test_w_existing_Implements(self):
from zope.interface.declarations import Implements
impl = Implements()
class Foo(object):
__implemented__ = impl
self.assertTrue(self._callFUT(Foo) is impl)
class Test_implementedBy(Test_implementedByFallback):
# Repeat tests for C optimizations
def _callFUT(self, *args, **kw):
from zope.interface.declarations import implementedBy
return implementedBy(*args, **kw)
class Test_classImplementsOnly(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import classImplementsOnly
return classImplementsOnly(*args, **kw)
def test_no_existing(self):
from zope.interface.declarations import ClassProvides
from zope.interface.interface import InterfaceClass
class Foo(object):
pass
ifoo = InterfaceClass('IFoo')
self._callFUT(Foo, ifoo)
spec = Foo.__implemented__
self.assertEqual(spec.__name__,
'zope.interface.tests.test_declarations.Foo')
self.assertTrue(spec.inherit is None)
self.assertTrue(Foo.__implemented__ is spec)
self.assertTrue(isinstance(Foo.__providedBy__, ClassProvides))
self.assertTrue(isinstance(Foo.__provides__, ClassProvides))
self.assertEqual(Foo.__provides__, Foo.__providedBy__)
def test_w_existing_Implements(self):
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
impl = Implements(IFoo)
impl.declared = (IFoo,)
class Foo(object):
__implemented__ = impl
impl.inherit = Foo
self._callFUT(Foo, IBar)
# Same spec, now different values
self.assertTrue(Foo.__implemented__ is impl)
self.assertEqual(impl.inherit, None)
self.assertEqual(impl.declared, (IBar,))
class Test_classImplements(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import classImplements
return classImplements(*args, **kw)
def test_no_existing(self):
from zope.interface.declarations import ClassProvides
from zope.interface.interface import InterfaceClass
class Foo(object):
pass
IFoo = InterfaceClass('IFoo')
self._callFUT(Foo, IFoo)
spec = Foo.__implemented__
self.assertEqual(spec.__name__,
'zope.interface.tests.test_declarations.Foo')
self.assertTrue(spec.inherit is Foo)
self.assertTrue(Foo.__implemented__ is spec)
self.assertTrue(isinstance(Foo.__providedBy__, ClassProvides))
self.assertTrue(isinstance(Foo.__provides__, ClassProvides))
self.assertEqual(Foo.__provides__, Foo.__providedBy__)
def test_w_existing_Implements(self):
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
impl = Implements(IFoo)
impl.declared = (IFoo,)
class Foo(object):
__implemented__ = impl
impl.inherit = Foo
self._callFUT(Foo, IBar)
# Same spec, now different values
self.assertTrue(Foo.__implemented__ is impl)
self.assertEqual(impl.inherit, Foo)
self.assertEqual(impl.declared, (IFoo, IBar,))
def test_w_existing_Implements_w_bases(self):
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
IBaz = InterfaceClass('IBaz', IFoo)
b_impl = Implements(IBaz)
impl = Implements(IFoo)
impl.declared = (IFoo,)
class Base1(object):
__implemented__ = b_impl
class Base2(object):
__implemented__ = b_impl
class Foo(Base1, Base2):
__implemented__ = impl
impl.inherit = Foo
self._callFUT(Foo, IBar)
# Same spec, now different values
self.assertTrue(Foo.__implemented__ is impl)
self.assertEqual(impl.inherit, Foo)
self.assertEqual(impl.declared, (IFoo, IBar,))
self.assertEqual(impl.__bases__, (IFoo, IBar, b_impl))
class Test__implements_advice(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import _implements_advice
return _implements_advice(*args, **kw)
def test_no_existing_implements(self):
from zope.interface.declarations import classImplements
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
class Foo(object):
__implements_advice_data__ = ((IFoo,), classImplements)
self._callFUT(Foo)
self.assertFalse('__implements_advice_data__' in Foo.__dict__)
self.assertTrue(isinstance(Foo.__implemented__, Implements))
self.assertEqual(list(Foo.__implemented__), [IFoo])
class Test_implementer(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import implementer
return implementer
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_oldstyle_class(self):
# TODO Py3 story
from zope.interface.declarations import ClassProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
class Foo:
pass
decorator = self._makeOne(IFoo)
returned = decorator(Foo)
self.assertTrue(returned is Foo)
spec = Foo.__implemented__
self.assertEqual(spec.__name__,
'zope.interface.tests.test_declarations.Foo')
self.assertTrue(spec.inherit is Foo)
self.assertTrue(Foo.__implemented__ is spec)
self.assertTrue(isinstance(Foo.__providedBy__, ClassProvides))
self.assertTrue(isinstance(Foo.__provides__, ClassProvides))
self.assertEqual(Foo.__provides__, Foo.__providedBy__)
def test_newstyle_class(self):
from zope.interface.declarations import ClassProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
class Foo(object):
pass
decorator = self._makeOne(IFoo)
returned = decorator(Foo)
self.assertTrue(returned is Foo)
spec = Foo.__implemented__
self.assertEqual(spec.__name__,
'zope.interface.tests.test_declarations.Foo')
self.assertTrue(spec.inherit is Foo)
self.assertTrue(Foo.__implemented__ is spec)
self.assertTrue(isinstance(Foo.__providedBy__, ClassProvides))
self.assertTrue(isinstance(Foo.__provides__, ClassProvides))
self.assertEqual(Foo.__provides__, Foo.__providedBy__)
def test_nonclass_cannot_assign_attr(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decorator = self._makeOne(IFoo)
self.assertRaises(TypeError, decorator, object())
def test_nonclass_can_assign_attr(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
class Foo(object):
pass
foo = Foo()
decorator = self._makeOne(IFoo)
returned = decorator(foo)
self.assertTrue(returned is foo)
spec = foo.__implemented__
self.assertEqual(spec.__name__, '?')
self.assertTrue(spec.inherit is None)
self.assertTrue(foo.__implemented__ is spec)
class Test_implementer_only(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import implementer_only
return implementer_only
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_function(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decorator = self._makeOne(IFoo)
def _function(): pass
self.assertRaises(ValueError, decorator, _function)
def test_method(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
decorator = self._makeOne(IFoo)
class Bar:
def _method(): pass
self.assertRaises(ValueError, decorator, Bar._method)
def test_oldstyle_class(self):
# TODO Py3 story
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
old_spec = Implements(IBar)
class Foo:
__implemented__ = old_spec
decorator = self._makeOne(IFoo)
returned = decorator(Foo)
self.assertTrue(returned is Foo)
spec = Foo.__implemented__
self.assertEqual(spec.__name__, '?')
self.assertTrue(spec.inherit is None)
self.assertTrue(Foo.__implemented__ is spec)
def test_newstyle_class(self):
from zope.interface.declarations import Implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass('IFoo')
IBar = InterfaceClass('IBar')
old_spec = Implements(IBar)
class Foo(object):
__implemented__ = old_spec
decorator = self._makeOne(IFoo)
returned = decorator(Foo)
self.assertTrue(returned is Foo)
spec = Foo.__implemented__
self.assertEqual(spec.__name__, '?')
self.assertTrue(spec.inherit is None)
self.assertTrue(Foo.__implemented__ is spec)
# Test '_implements' by way of 'implements{,Only}', its only callers.
class Test_implementsOnly(unittest.TestCase, _Py3ClassAdvice):
def _getFUT(self):
from zope.interface.declarations import implementsOnly
return implementsOnly
def test_simple(self):
import warnings
from zope.interface.declarations import implementsOnly
from zope.interface._compat import PYTHON3
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'implementsOnly': implementsOnly,
'IFoo': IFoo,
}
locs = {}
CODE = "\n".join([
'class Foo(object):'
' implementsOnly(IFoo)',
])
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
try:
exec(CODE, globs, locs)
except TypeError:
if not PYTHON3:
raise
else:
if PYTHON3:
self.fail("Didn't raise TypeError")
Foo = locs['Foo']
spec = Foo.__implemented__
self.assertEqual(list(spec), [IFoo])
self.assertEqual(len(log), 0) # no longer warn
def test_called_once_from_class_w_bases(self):
from zope.interface.declarations import implements
from zope.interface.declarations import implementsOnly
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
globs = {'implements': implements,
'implementsOnly': implementsOnly,
'IFoo': IFoo,
'IBar': IBar,
}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' implements(IFoo)',
'class Bar(Foo):'
' implementsOnly(IBar)',
])
if self._run_generated_code(CODE, globs, locs):
Bar = locs['Bar']
spec = Bar.__implemented__
self.assertEqual(list(spec), [IBar])
class Test_implements(unittest.TestCase, _Py3ClassAdvice):
def _getFUT(self):
from zope.interface.declarations import implements
return implements
def test_called_from_function(self):
import warnings
from zope.interface.declarations import implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'implements': implements, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'def foo():',
' implements(IFoo)'
])
if self._run_generated_code(CODE, globs, locs, False):
foo = locs['foo']
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
self.assertRaises(TypeError, foo)
self.assertEqual(len(log), 0) # no longer warn
def test_called_twice_from_class(self):
import warnings
from zope.interface.declarations import implements
from zope.interface.interface import InterfaceClass
from zope.interface._compat import PYTHON3
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
globs = {'implements': implements, 'IFoo': IFoo, 'IBar': IBar}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' implements(IFoo)',
' implements(IBar)',
])
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
try:
exec(CODE, globs, locs)
except TypeError:
if not PYTHON3:
self.assertEqual(len(log), 0) # no longer warn
else:
self.fail("Didn't raise TypeError")
def test_called_once_from_class(self):
from zope.interface.declarations import implements
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'implements': implements, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' implements(IFoo)',
])
if self._run_generated_code(CODE, globs, locs):
Foo = locs['Foo']
spec = Foo.__implemented__
self.assertEqual(list(spec), [IFoo])
class ProvidesClassTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import ProvidesClass
return ProvidesClass
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_simple_class_one_interface(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
spec = self._makeOne(Foo, IFoo)
self.assertEqual(list(spec), [IFoo])
def test___reduce__(self):
from zope.interface.declarations import Provides # the function
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
spec = self._makeOne(Foo, IFoo)
klass, args = spec.__reduce__()
self.assertTrue(klass is Provides)
self.assertEqual(args, (Foo, IFoo))
def test___get___class(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
spec = self._makeOne(Foo, IFoo)
Foo.__provides__ = spec
self.assertTrue(Foo.__provides__ is spec)
def test___get___instance(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
spec = self._makeOne(Foo, IFoo)
Foo.__provides__ = spec
def _test():
foo = Foo()
return foo.__provides__
self.assertRaises(AttributeError, _test)
class Test_Provides(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import Provides
return Provides(*args, **kw)
def test_no_cached_spec(self):
from zope.interface import declarations
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
cache = {}
class Foo(object):
pass
with _Monkey(declarations, InstanceDeclarations=cache):
spec = self._callFUT(Foo, IFoo)
self.assertEqual(list(spec), [IFoo])
self.assertTrue(cache[(Foo, IFoo)] is spec)
def test_w_cached_spec(self):
from zope.interface import declarations
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
prior = object()
class Foo(object):
pass
cache = {(Foo, IFoo): prior}
with _Monkey(declarations, InstanceDeclarations=cache):
spec = self._callFUT(Foo, IFoo)
self.assertTrue(spec is prior)
class Test_directlyProvides(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import directlyProvides
return directlyProvides(*args, **kw)
def test_w_normal_object(self):
from zope.interface.declarations import ProvidesClass
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
obj = Foo()
self._callFUT(obj, IFoo)
self.assertTrue(isinstance(obj.__provides__, ProvidesClass))
self.assertEqual(list(obj.__provides__), [IFoo])
def test_w_class(self):
from zope.interface.declarations import ClassProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
self._callFUT(Foo, IFoo)
self.assertTrue(isinstance(Foo.__provides__, ClassProvides))
self.assertEqual(list(Foo.__provides__), [IFoo])
@_skip_under_py3k
def test_w_non_descriptor_aware_metaclass(self):
# There are no non-descriptor-aware types in Py3k
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class MetaClass(type):
def __getattribute__(self, name):
# Emulate metaclass whose base is not the type object.
if name == '__class__':
return self
return type.__getattribute__(self, name)
class Foo(object):
__metaclass__ = MetaClass
obj = Foo()
self.assertRaises(TypeError, self._callFUT, obj, IFoo)
def test_w_classless_object(self):
from zope.interface.declarations import ProvidesClass
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
the_dict = {}
class Foo(object):
def __getattribute__(self, name):
# Emulate object w/o any class
if name == '__class__':
return None
try:
return the_dict[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
the_dict[name] = value
obj = Foo()
self._callFUT(obj, IFoo)
self.assertTrue(isinstance(the_dict['__provides__'], ProvidesClass))
self.assertEqual(list(the_dict['__provides__']), [IFoo])
class Test_alsoProvides(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import alsoProvides
return alsoProvides(*args, **kw)
def test_wo_existing_provides(self):
from zope.interface.declarations import ProvidesClass
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
obj = Foo()
self._callFUT(obj, IFoo)
self.assertTrue(isinstance(obj.__provides__, ProvidesClass))
self.assertEqual(list(obj.__provides__), [IFoo])
def test_w_existing_provides(self):
from zope.interface.declarations import directlyProvides
from zope.interface.declarations import ProvidesClass
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
class Foo(object):
pass
obj = Foo()
directlyProvides(obj, IFoo)
self._callFUT(obj, IBar)
self.assertTrue(isinstance(obj.__provides__, ProvidesClass))
self.assertEqual(list(obj.__provides__), [IFoo, IBar])
class Test_noLongerProvides(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import noLongerProvides
return noLongerProvides(*args, **kw)
def test_wo_existing_provides(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
obj = Foo()
self._callFUT(obj, IFoo)
self.assertEqual(list(obj.__provides__), [])
def test_w_existing_provides_hit(self):
from zope.interface.declarations import directlyProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
obj = Foo()
directlyProvides(obj, IFoo)
self._callFUT(obj, IFoo)
self.assertEqual(list(obj.__provides__), [])
def test_w_existing_provides_miss(self):
from zope.interface.declarations import directlyProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
class Foo(object):
pass
obj = Foo()
directlyProvides(obj, IFoo)
self._callFUT(obj, IBar)
self.assertEqual(list(obj.__provides__), [IFoo])
def test_w_iface_implemented_by_class(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@implementer(IFoo)
class Foo(object):
pass
obj = Foo()
self.assertRaises(ValueError, self._callFUT, obj, IFoo)
class ClassProvidesBaseFallbackTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import ClassProvidesBaseFallback
return ClassProvidesBaseFallback
def _makeOne(self, klass, implements):
# Don't instantiate directly: the C version can't have attributes
# assigned.
class Derived(self._getTargetClass()):
def __init__(self, k, i):
self._cls = k
self._implements = i
return Derived(klass, implements)
def test_w_same_class_via_class(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
cpbp = Foo.__provides__ = self._makeOne(Foo, IFoo)
self.assertTrue(Foo.__provides__ is cpbp)
def test_w_same_class_via_instance(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
foo = Foo()
cpbp = Foo.__provides__ = self._makeOne(Foo, IFoo)
self.assertTrue(foo.__provides__ is IFoo)
def test_w_different_class(self):
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
class Bar(Foo):
pass
bar = Bar()
cpbp = Foo.__provides__ = self._makeOne(Foo, IFoo)
self.assertRaises(AttributeError, getattr, Bar, '__provides__')
self.assertRaises(AttributeError, getattr, bar, '__provides__')
class ClassProvidesBaseTests(ClassProvidesBaseFallbackTests):
# Repeat tests for C optimizations
def _getTargetClass(self):
from zope.interface.declarations import ClassProvidesBase
return ClassProvidesBase
class ClassProvidesTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import ClassProvides
return ClassProvides
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_w_simple_metaclass(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
@implementer(IFoo)
class Foo(object):
pass
cp = Foo.__provides__ = self._makeOne(Foo, type(Foo), IBar)
self.assertTrue(Foo.__provides__ is cp)
self.assertEqual(list(Foo().__provides__), [IFoo])
def test___reduce__(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
@implementer(IFoo)
class Foo(object):
pass
cp = Foo.__provides__ = self._makeOne(Foo, type(Foo), IBar)
self.assertEqual(cp.__reduce__(),
(self._getTargetClass(), (Foo, type(Foo), IBar)))
class Test_directlyProvidedBy(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import directlyProvidedBy
return directlyProvidedBy(*args, **kw)
def test_wo_declarations_in_class_or_instance(self):
class Foo(object):
pass
foo = Foo()
self.assertEqual(list(self._callFUT(foo)), [])
def test_w_declarations_in_class_but_not_instance(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@implementer(IFoo)
class Foo(object):
pass
foo = Foo()
self.assertEqual(list(self._callFUT(foo)), [])
def test_w_declarations_in_instance_but_not_class(self):
from zope.interface.declarations import directlyProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
foo = Foo()
directlyProvides(foo, IFoo)
self.assertEqual(list(self._callFUT(foo)), [IFoo])
def test_w_declarations_in_instance_and_class(self):
from zope.interface.declarations import directlyProvides
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
@implementer(IFoo)
class Foo(object):
pass
foo = Foo()
directlyProvides(foo, IBar)
self.assertEqual(list(self._callFUT(foo)), [IBar])
class Test_classProvides(unittest.TestCase, _Py3ClassAdvice):
def _getFUT(self):
from zope.interface.declarations import classProvides
return classProvides
def test_called_from_function(self):
import warnings
from zope.interface.declarations import classProvides
from zope.interface.interface import InterfaceClass
from zope.interface._compat import PYTHON3
IFoo = InterfaceClass("IFoo")
globs = {'classProvides': classProvides, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'def foo():',
' classProvides(IFoo)'
])
exec(CODE, globs, locs)
foo = locs['foo']
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
self.assertRaises(TypeError, foo)
if not PYTHON3:
self.assertEqual(len(log), 0) # no longer warn
def test_called_twice_from_class(self):
import warnings
from zope.interface.declarations import classProvides
from zope.interface.interface import InterfaceClass
from zope.interface._compat import PYTHON3
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
globs = {'classProvides': classProvides, 'IFoo': IFoo, 'IBar': IBar}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' classProvides(IFoo)',
' classProvides(IBar)',
])
with warnings.catch_warnings(record=True) as log:
warnings.resetwarnings()
try:
exec(CODE, globs, locs)
except TypeError:
if not PYTHON3:
self.assertEqual(len(log), 0) # no longer warn
else:
self.fail("Didn't raise TypeError")
def test_called_once_from_class(self):
from zope.interface.declarations import classProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'classProvides': classProvides, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' classProvides(IFoo)',
])
if self._run_generated_code(CODE, globs, locs):
Foo = locs['Foo']
spec = Foo.__providedBy__
self.assertEqual(list(spec), [IFoo])
# Test _classProvides_advice through classProvides, its only caller.
class Test_provider(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations import provider
return provider
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_w_class(self):
from zope.interface.declarations import ClassProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@self._makeOne(IFoo)
class Foo(object):
pass
self.assertTrue(isinstance(Foo.__provides__, ClassProvides))
self.assertEqual(list(Foo.__provides__), [IFoo])
class Test_moduleProvides(unittest.TestCase):
def _getFUT(self):
from zope.interface.declarations import moduleProvides
return moduleProvides
def test_called_from_function(self):
from zope.interface.declarations import moduleProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'__name__': 'zope.interface.tests.foo',
'moduleProvides': moduleProvides, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'def foo():',
' moduleProvides(IFoo)'
])
exec(CODE, globs, locs)
foo = locs['foo']
self.assertRaises(TypeError, foo)
def test_called_from_class(self):
from zope.interface.declarations import moduleProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'__name__': 'zope.interface.tests.foo',
'moduleProvides': moduleProvides, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'class Foo(object):',
' moduleProvides(IFoo)',
])
try:
exec(CODE, globs, locs)
except TypeError:
pass
else:
assert False, 'TypeError not raised'
def test_called_once_from_module_scope(self):
from zope.interface.declarations import moduleProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'__name__': 'zope.interface.tests.foo',
'moduleProvides': moduleProvides, 'IFoo': IFoo}
CODE = "\n".join([
'moduleProvides(IFoo)',
])
exec(CODE, globs)
spec = globs['__provides__']
self.assertEqual(list(spec), [IFoo])
def test_called_twice_from_module_scope(self):
from zope.interface.declarations import moduleProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
globs = {'__name__': 'zope.interface.tests.foo',
'moduleProvides': moduleProvides, 'IFoo': IFoo}
locs = {}
CODE = "\n".join([
'moduleProvides(IFoo)',
'moduleProvides(IFoo)',
])
try:
exec(CODE, globs)
except TypeError:
pass
else:
assert False, 'TypeError not raised'
class Test_getObjectSpecificationFallback(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import getObjectSpecificationFallback
return getObjectSpecificationFallback(*args, **kw)
def test_wo_existing_provides_classless(self):
the_dict = {}
class Foo(object):
def __getattribute__(self, name):
# Emulate object w/o any class
if name == '__class__':
raise AttributeError(name)
try:
return the_dict[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
the_dict[name] = value
foo = Foo()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [])
def test_existing_provides_is_spec(self):
from zope.interface.declarations import directlyProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
def foo():
pass
directlyProvides(foo, IFoo)
spec = self._callFUT(foo)
self.assertTrue(spec is foo.__provides__)
def test_existing_provides_is_not_spec(self):
def foo():
pass
foo.__provides__ = object() # not a valid spec
spec = self._callFUT(foo)
self.assertEqual(list(spec), [])
def test_existing_provides(self):
from zope.interface.declarations import directlyProvides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
foo = Foo()
directlyProvides(foo, IFoo)
spec = self._callFUT(foo)
self.assertEqual(list(spec), [IFoo])
def test_wo_provides_on_class_w_implements(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@implementer(IFoo)
class Foo(object):
pass
foo = Foo()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [IFoo])
def test_wo_provides_on_class_wo_implements(self):
class Foo(object):
pass
foo = Foo()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [])
class Test_getObjectSpecification(Test_getObjectSpecificationFallback):
# Repeat tests for C optimizations
def _callFUT(self, *args, **kw):
from zope.interface.declarations import getObjectSpecification
return getObjectSpecification(*args, **kw)
class Test_providedByFallback(unittest.TestCase):
def _callFUT(self, *args, **kw):
from zope.interface.declarations import providedByFallback
return providedByFallback(*args, **kw)
def test_wo_providedBy_on_class_wo_implements(self):
class Foo(object):
pass
foo = Foo()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [])
def test_w_providedBy_valid_spec(self):
from zope.interface.declarations import Provides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = Provides(Foo, IFoo)
spec = self._callFUT(foo)
self.assertEqual(list(spec), [IFoo])
def test_w_providedBy_invalid_spec(self):
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = object()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [])
def test_w_providedBy_invalid_spec_class_w_implements(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@implementer(IFoo)
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = object()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [IFoo])
def test_w_providedBy_invalid_spec_w_provides_no_provides_on_class(self):
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = object()
expected = foo.__provides__ = object()
spec = self._callFUT(foo)
self.assertTrue(spec is expected)
def test_w_providedBy_invalid_spec_w_provides_diff_provides_on_class(self):
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = object()
expected = foo.__provides__ = object()
Foo.__provides__ = object()
spec = self._callFUT(foo)
self.assertTrue(spec is expected)
def test_w_providedBy_invalid_spec_w_provides_same_provides_on_class(self):
from zope.interface.declarations import implementer
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
@implementer(IFoo)
class Foo(object):
pass
foo = Foo()
foo.__providedBy__ = object()
foo.__provides__ = Foo.__provides__ = object()
spec = self._callFUT(foo)
self.assertEqual(list(spec), [IFoo])
class Test_providedBy(Test_providedByFallback):
# Repeat tests for C optimizations
def _callFUT(self, *args, **kw):
from zope.interface.declarations import providedBy
return providedBy(*args, **kw)
class ObjectSpecificationDescriptorFallbackTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.declarations \
import ObjectSpecificationDescriptorFallback
return ObjectSpecificationDescriptorFallback
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_accessed_via_class(self):
from zope.interface.declarations import Provides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
class Foo(object):
pass
Foo.__provides__ = Provides(Foo, IFoo)
Foo.__providedBy__ = self._makeOne()
self.assertEqual(list(Foo.__providedBy__), [IFoo])
def test_accessed_via_inst_wo_provides(self):
from zope.interface.declarations import implementer
from zope.interface.declarations import Provides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
@implementer(IFoo)
class Foo(object):
pass
Foo.__provides__ = Provides(Foo, IBar)
Foo.__providedBy__ = self._makeOne()
foo = Foo()
self.assertEqual(list(foo.__providedBy__), [IFoo])
def test_accessed_via_inst_w_provides(self):
from zope.interface.declarations import directlyProvides
from zope.interface.declarations import implementer
from zope.interface.declarations import Provides
from zope.interface.interface import InterfaceClass
IFoo = InterfaceClass("IFoo")
IBar = InterfaceClass("IBar")
IBaz = InterfaceClass("IBaz")
@implementer(IFoo)
class Foo(object):
pass
Foo.__provides__ = Provides(Foo, IBar)
Foo.__providedBy__ = self._makeOne()
foo = Foo()
directlyProvides(foo, IBaz)
self.assertEqual(list(foo.__providedBy__), [IBaz, IFoo])
class ObjectSpecificationDescriptorTests(
ObjectSpecificationDescriptorFallbackTests):
# Repeat tests for C optimizations
def _getTargetClass(self):
from zope.interface.declarations import ObjectSpecificationDescriptor
return ObjectSpecificationDescriptor
# Test _normalizeargs through its callers.
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
self.to_restore = dict([(key, getattr(module, key)) for key in kw])
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
class _MonkeyDict(object):
# context-manager for restoring a dict w/in a module in the scope of a test.
def __init__(self, module, attrname, **kw):
self.module = module
self.target = getattr(module, attrname)
self.to_restore = self.target.copy()
self.target.clear()
self.target.update(kw)
def __enter__(self):
return self.target
def __exit__(self, exc_type, exc_val, exc_tb):
self.target.clear()
self.target.update(self.to_restore)
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(DeclarationTests),
unittest.makeSuite(ImplementsTests),
unittest.makeSuite(Test_implementedByFallback),
unittest.makeSuite(Test_implementedBy),
unittest.makeSuite(Test_classImplementsOnly),
unittest.makeSuite(Test_classImplements),
unittest.makeSuite(Test__implements_advice),
unittest.makeSuite(Test_implementer),
unittest.makeSuite(Test_implementer_only),
unittest.makeSuite(Test_implements),
unittest.makeSuite(Test_implementsOnly),
unittest.makeSuite(ProvidesClassTests),
unittest.makeSuite(Test_Provides),
unittest.makeSuite(Test_directlyProvides),
unittest.makeSuite(Test_alsoProvides),
unittest.makeSuite(Test_noLongerProvides),
unittest.makeSuite(ClassProvidesBaseFallbackTests),
unittest.makeSuite(ClassProvidesTests),
unittest.makeSuite(Test_directlyProvidedBy),
unittest.makeSuite(Test_classProvides),
unittest.makeSuite(Test_provider),
unittest.makeSuite(Test_moduleProvides),
unittest.makeSuite(Test_getObjectSpecificationFallback),
unittest.makeSuite(Test_getObjectSpecification),
unittest.makeSuite(Test_providedByFallback),
unittest.makeSuite(Test_providedBy),
unittest.makeSuite(ObjectSpecificationDescriptorFallbackTests),
unittest.makeSuite(ObjectSpecificationDescriptorTests),
))
|
{
"content_hash": "60239cf6ac30459a4f0b732f2d191580",
"timestamp": "",
"source": "github",
"line_count": 1584,
"max_line_length": 80,
"avg_line_length": 36.19507575757576,
"alnum_prop": 0.6114628573421939,
"repo_name": "nzavagli/UnrealPy",
"id": "1b9533c831e58cadc8741cd861ab88b2d09f3e84",
"size": "57969",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/zope.interface-4.1.2/src/zope/interface/tests/test_declarations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886156"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925097"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
}
|
from collections.abc import Iterable
import subprocess
from numbers import Integral
import openmc
from openmc import VolumeCalculation
def _run(args, output, cwd):
# Launch a subprocess
p = subprocess.Popen(args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True)
# Capture and re-print OpenMC output in real-time
lines = []
while True:
# If OpenMC is finished, break loop
line = p.stdout.readline()
if not line and p.poll() is not None:
break
lines.append(line)
if output:
# If user requested output, print to screen
print(line, end='')
# Raise an exception if return status is non-zero
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, ' '.join(args),
''.join(lines))
def plot_geometry(output=True, openmc_exec='openmc', cwd='.'):
"""Run OpenMC in plotting mode
Parameters
----------
output : bool, optional
Capture OpenMC output from standard out
openmc_exec : str, optional
Path to OpenMC executable
cwd : str, optional
Path to working directory to run in
Raises
------
subprocess.CalledProcessError
If the `openmc` executable returns a non-zero status
"""
_run([openmc_exec, '-p'], output, cwd)
def plot_inline(plots, openmc_exec='openmc', cwd='.', convert_exec='convert'):
"""Display plots inline in a Jupyter notebook.
This function requires that you have a program installed to convert PPM
files to PNG files. Typically, that would be `ImageMagick
<https://www.imagemagick.org>`_ which includes a `convert` command.
Parameters
----------
plots : Iterable of openmc.Plot
Plots to display
openmc_exec : str
Path to OpenMC executable
cwd : str, optional
Path to working directory to run in
convert_exec : str, optional
Command that can convert PPM files into PNG files
Raises
------
subprocess.CalledProcessError
If the `openmc` executable returns a non-zero status
"""
from IPython.display import Image, display
if not isinstance(plots, Iterable):
plots = [plots]
# Create plots.xml
openmc.Plots(plots).export_to_xml()
# Run OpenMC in geometry plotting mode
plot_geometry(False, openmc_exec, cwd)
images = []
if plots is not None:
for p in plots:
if p.filename is not None:
ppm_file = '{}.ppm'.format(p.filename)
else:
ppm_file = 'plot_{}.ppm'.format(p.id)
png_file = ppm_file.replace('.ppm', '.png')
subprocess.check_call([convert_exec, ppm_file, png_file])
images.append(Image(png_file))
display(*images)
def calculate_volumes(threads=None, output=True, cwd='.',
openmc_exec='openmc', mpi_args=None):
"""Run stochastic volume calculations in OpenMC.
This function runs OpenMC in stochastic volume calculation mode. To specify
the parameters of a volume calculation, one must first create a
:class:`openmc.VolumeCalculation` instance and assign it to
:attr:`openmc.Settings.volume_calculations`. For example:
>>> vol = openmc.VolumeCalculation(domains=[cell1, cell2], samples=100000)
>>> settings = openmc.Settings()
>>> settings.volume_calculations = [vol]
>>> settings.export_to_xml()
>>> openmc.calculate_volumes()
Parameters
----------
threads : int, optional
Number of OpenMP threads. If OpenMC is compiled with OpenMP threading
enabled, the default is implementation-dependent but is usually equal to
the number of hardware threads available (or a value set by the
:envvar:`OMP_NUM_THREADS` environment variable).
output : bool, optional
Capture OpenMC output from standard out
openmc_exec : str, optional
Path to OpenMC executable. Defaults to 'openmc'.
mpi_args : list of str, optional
MPI execute command and any additional MPI arguments to pass,
e.g. ['mpiexec', '-n', '8'].
cwd : str, optional
Path to working directory to run in. Defaults to the current working
directory.
Raises
------
subprocess.CalledProcessError
If the `openmc` executable returns a non-zero status
See Also
--------
openmc.VolumeCalculation
"""
args = [openmc_exec, '--volume']
if isinstance(threads, Integral) and threads > 0:
args += ['-s', str(threads)]
if mpi_args is not None:
args = mpi_args + args
_run(args, output, cwd)
def run(particles=None, threads=None, geometry_debug=False,
restart_file=None, tracks=False, output=True, cwd='.',
openmc_exec='openmc', mpi_args=None):
"""Run an OpenMC simulation.
Parameters
----------
particles : int, optional
Number of particles to simulate per generation.
threads : int, optional
Number of OpenMP threads. If OpenMC is compiled with OpenMP threading
enabled, the default is implementation-dependent but is usually equal to
the number of hardware threads available (or a value set by the
:envvar:`OMP_NUM_THREADS` environment variable).
geometry_debug : bool, optional
Turn on geometry debugging during simulation. Defaults to False.
restart_file : str, optional
Path to restart file to use
tracks : bool, optional
Write tracks for all particles. Defaults to False.
output : bool
Capture OpenMC output from standard out
cwd : str, optional
Path to working directory to run in. Defaults to the current working
directory.
openmc_exec : str, optional
Path to OpenMC executable. Defaults to 'openmc'.
mpi_args : list of str, optional
MPI execute command and any additional MPI arguments to pass,
e.g. ['mpiexec', '-n', '8'].
Raises
------
subprocess.CalledProcessError
If the `openmc` executable returns a non-zero status
"""
args = [openmc_exec]
if isinstance(particles, Integral) and particles > 0:
args += ['-n', str(particles)]
if isinstance(threads, Integral) and threads > 0:
args += ['-s', str(threads)]
if geometry_debug:
args.append('-g')
if isinstance(restart_file, str):
args += ['-r', restart_file]
if tracks:
args.append('-t')
if mpi_args is not None:
args = mpi_args + args
_run(args, output, cwd)
|
{
"content_hash": "cd4e44f2f5c69b37e3a2b49ead48656c",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 80,
"avg_line_length": 31.443396226415093,
"alnum_prop": 0.6299129912991299,
"repo_name": "johnnyliu27/openmc",
"id": "8ad2bd8960a5851403a6e1f35cefa85a506c70fc",
"size": "6666",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "openmc/executor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7619"
},
{
"name": "C++",
"bytes": "825710"
},
{
"name": "CMake",
"bytes": "33163"
},
{
"name": "Dockerfile",
"bytes": "1427"
},
{
"name": "Fortran",
"bytes": "1089808"
},
{
"name": "Python",
"bytes": "2433489"
},
{
"name": "Shell",
"bytes": "2986"
}
],
"symlink_target": ""
}
|
import time
import datetime
import logging
from . import daemon
from . import db
from . import common as c
from . import strings as s
from zhihu import ZhihuClient
def calc_message(pattern, me, you, new_follower_num):
offset = datetime.timedelta(hours=8)
china_now = datetime.datetime.utcnow() + offset
my_name = me.name
follower_num = me.follower_num - new_follower_num
your_name = you.name
return pattern.format(now=china_now, my_name=my_name,
follower_num=follower_num, your_name=your_name)
class BackendCode(daemon.DaemonProcess):
def at_exit(self):
pass
def run(self, database, msg, interval, log_file, max_old=10):
c.check_type(database, 'database', str)
L = logging.getLogger('qqqfome-backend')
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
fh = logging.FileHandler(log_file)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(formatter)
L.setLevel(logging.DEBUG)
L.addHandler(fh)
L.addHandler(sh)
try:
L.info(s.log_connected_to_db.format(database))
conn = db.connect_db(database)
L.info(s.success)
except FileNotFoundError:
L.exception(s.log_file_not_exist.format(database))
L.info(s.exit)
return
# get cookies from database
cookies = db.get_cookies(conn)
if not cookies:
L.exception(s.log_no_cookies_in_database)
L.info(s.exit)
return
L.info(s.log_get_cookies_from_database)
L.debug(cookies)
try:
client = ZhihuClient(cookies)
L.info(s.log_build_zhihu_client)
except Exception as e:
L.exception(e)
return
while True:
L.info(s.log_start_a_pass)
i = 0
while i < 5:
try:
L.info(s.log_build_me)
me = client.me()
break
except Exception as e:
L.exception(e)
i += 1
else:
L.error(s.log_fail_to_build_me)
L.info(s.exit)
return
try:
follower_num = me.follower_num
except Exception as e:
L.exception(e)
L.info(s.log_get_follower_num_failed)
L.info(s.log_finish_a_pass)
time.sleep(interval)
continue
L.info(s.log_get_follower_num.format(follower_num))
db.log_to_db(conn, follower_num, s.log_start_a_pass)
continue_in_db = 0
new_follower_num = 0
try:
for follower in me.followers:
L.info(s.log_check_follower.format(
follower.name, follower.id))
if db.is_in_db(conn, follower.id):
L.info(s.log_follower_in_db.format(follower.id))
continue_in_db += 1
else:
L.info(s.log_follower_not_in_db.format(follower.name))
continue_in_db = 0
L.info(s.log_send_message.format(follower.name))
try:
message = calc_message(msg, me, follower,
new_follower_num)
new_follower_num += 1
except Exception as e:
L.exception(e)
message = msg
L.debug(message)
i = 0
while i < 5:
try:
me.send_message(follower, message)
break
except Exception as e:
L.exception(e)
L.debug(s.log_send_failed)
i += 1
else:
L.info(s.log_send_pass)
continue
L.info(s.success)
L.info(s.log_add_user_to_db.format(
follower.name))
db.add_user_to_db(conn, follower)
if continue_in_db == max_old:
L.info(s.log_continue_reach_max.format(max_old))
break
except Exception as e:
L.exception(e)
L.info(s.log_finish_a_pass)
time.sleep(interval)
|
{
"content_hash": "9ff83fe011df83e7858a726be713fd97",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 78,
"avg_line_length": 32.13907284768212,
"alnum_prop": 0.4609519884607459,
"repo_name": "7sDream/qqqfome",
"id": "baf3f76e149c7ff1ee8fa24cfc1f8844ba271fc2",
"size": "4853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qqqfome/backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27130"
}
],
"symlink_target": ""
}
|
def application(environ, start_response):
body = "v1".encode()
start_response('200', [('Content-Length', str(len(body)))])
return [body]
|
{
"content_hash": "90773236c7b45537b4b7e22bf1d3c864",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 63,
"avg_line_length": 30,
"alnum_prop": 0.6333333333333333,
"repo_name": "nginx/unit",
"id": "08f7dd64c878616872aa5ecb3a1a0375bad73b92",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/python/restart/v1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2948852"
},
{
"name": "C++",
"bytes": "49444"
},
{
"name": "Dockerfile",
"bytes": "3409"
},
{
"name": "Go",
"bytes": "19213"
},
{
"name": "HTML",
"bytes": "847"
},
{
"name": "Java",
"bytes": "780057"
},
{
"name": "JavaScript",
"bytes": "87653"
},
{
"name": "Makefile",
"bytes": "32267"
},
{
"name": "PHP",
"bytes": "7022"
},
{
"name": "Perl",
"bytes": "9359"
},
{
"name": "Python",
"bytes": "706893"
},
{
"name": "Raku",
"bytes": "1497"
},
{
"name": "Roff",
"bytes": "6774"
},
{
"name": "Ruby",
"bytes": "9880"
},
{
"name": "Shell",
"bytes": "14683"
}
],
"symlink_target": ""
}
|
"""Tests for `tf.data.experimental.prefetch_to_device()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
# TODO(b/117581999): add eager coverage when supported.
class PrefetchToDeviceTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.graph_only_combinations())
def testPrefetchToDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@combinations.generate(test_base.graph_only_combinations())
def testPrefetchToSameDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device(
"/job:localhost/replica:0/task:0/device:CPU:0"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@combinations.generate(test_base.graph_only_combinations())
def testPrefetchDictToDevice(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual({"a": i}, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@combinations.generate(test_base.graph_only_combinations())
def testPrefetchSparseTensorsToDevice(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i*[1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
actual = self.evaluate(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@combinations.generate(test_base.default_test_combinations())
def testPrefetchToDeviceGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/gpu:0"))
self.assertDatasetProduces(device_dataset, list(range(10)))
@combinations.generate(test_base.graph_only_combinations())
def testPrefetchToDeviceWithReInit(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@combinations.generate(test_base.graph_only_combinations())
def testPrefetchToDeviceGpuWithReInit(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/gpu:0"))
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@combinations.generate(test_base.eager_only_combinations())
def testPrefetchToDevicePlacement(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.prefetch_to_device("/gpu:0"))
self.assertEqual(device_dataset._variant_tensor.device,
"/job:localhost/replica:0/task:0/device:GPU:0")
if __name__ == "__main__":
test.main()
|
{
"content_hash": "2e341ea9ca0dc5e69f99f9250a57fc2d",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 78,
"avg_line_length": 38.759803921568626,
"alnum_prop": 0.7088655621601113,
"repo_name": "aldian/tensorflow",
"id": "611fbab4b8b14fe2101ddd07529fc1da4f436196",
"size": "8596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/kernel_tests/prefetch_to_device_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
}
|
import structlog
from enum import Enum
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from voltha.protos.common_pb2 import OperStatus, AdminState
class AdtnPort(object):
"""
A class similar to the 'Port' class in the VOLTHA
"""
class State(Enum):
INITIAL = 0 # Created and initialization in progress
RUNNING = 1 # PON port contacted, ONU discovery active
STOPPED = 2 # Disabled
DELETING = 3 # Cleanup
def __init__(self, parent, **kwargs):
assert parent, 'parent is None'
assert 'port_no' in kwargs, 'Port number not found'
self.log = structlog.get_logger(device_id=parent.device_id)
self._parent = parent
self._port_no = kwargs.get('port_no')
# Set the following in your derived class. These names are used in
# various ways. Typically, the physical port name will be used during
# device handler conversations with the hardware (REST, NETCONF, ...)
# while the logical port name is what the outside world (ONOS, SEBA, ...)
# uses. All ports have a physical port name, but only ports exposed through
# VOLTHA as a logical port will have a logical port name
self._physical_port_name = None
self._logical_port_name = None
self._label = None
self._port = None
self.sync_tick = 20.0
self.sync_deferred = None # For sync of PON config to hardware
# TODO: Deprecate 'enabled' and use admin_state instead may want initial to always be
# disabled and then in derived classes, set it in the 'reset' method called on startup.
self._enabled = True
self._admin_state = AdminState.ENABLED
self._oper_status = OperStatus.DISCOVERED
self._state = AdtnPort.State.INITIAL
self.deferred = None # General purpose
# Statistics
self.rx_packets = 0
self.rx_bytes = 0
self.tx_packets = 0
self.tx_bytes = 0
self.timestamp = 0 # UTC when KPI items last updated
def __del__(self):
self.stop()
def get_port(self):
"""
Get the VOLTHA PORT object for this port
:return: VOLTHA Port object
"""
raise NotImplementedError('Add to your derived class')
@property
def port_no(self):
return self._port_no
@property
def intf_id(self):
return self.port_no
@property
def physical_port_name(self):
return self._physical_port_name
@property
def logical_port_name(self):
return self._logical_port_name
@property # For backwards compatibility
def name(self):
return self._logical_port_name
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def olt(self):
return self._parent
@property
def admin_state(self):
return self._admin_state
@admin_state.setter
def admin_state(self, value):
if self._admin_state != value:
self._admin_state = value
if self._admin_state == AdminState.ENABLED:
self.start()
else:
self.stop()
@property
def enabled(self):
return self._admin_state == AdminState.ENABLED
@enabled.setter
def enabled(self, value):
assert isinstance(value, bool), 'enabled is a boolean'
self.admin_state = AdminState.ENABLED if value else AdminState.DISABLED
@property
def oper_status(self):
return self._oper_status
@property
def adapter_agent(self):
return self.olt.adapter_agent
def get_logical_port(self):
"""
Get the VOLTHA logical port for this port. For PON ports, a logical port
is not currently created, so always return None
:return: VOLTHA logical port or None if not supported
"""
return None
def cancel_deferred(self):
d1, self.deferred = self.deferred, None
d2, self.sync_deferred = self.sync_deferred, None
for d in [d1, d2]:
try:
if d is not None and not d.called:
d.cancel()
except Exception:
pass
def _update_adapter_agent(self):
raise NotImplementedError('Add to your derived class')
def start(self):
"""
Start/enable this PON and start ONU discover
"""
if self.state == AdtnPort.State.RUNNING:
return succeed('Running')
self.log.info('start-port')
self.cancel_deferred()
self.state = AdtnPort.State.INITIAL
self._oper_status = OperStatus.ACTIVATING
self._enabled = True
# Do the rest of the startup in an async method
self.deferred = reactor.callLater(0.5, self.finish_startup)
self._update_adapter_agent()
return succeed('Scheduled')
def finish_startup(self):
if self.state == AdtnPort.State.INITIAL:
self.log.debug('final-startup')
# If here, initial settings were successfully written to hardware
self._enabled = True
self._admin_state = AdminState.ENABLED
self._oper_status = OperStatus.ACTIVE # TODO: is this correct, how do we tell GRPC
self.state = AdtnPort.State.RUNNING
self.sync_deferred = reactor.callLater(self.sync_tick,
self.sync_hardware)
self._update_adapter_agent()
@inlineCallbacks
def stop(self):
if self.state == AdtnPort.State.STOPPED:
self.log.debug('already stopped')
returnValue('Stopped')
self.log.info('stopping')
try:
self.cancel_deferred()
self._enabled = False
self._admin_state = AdminState.DISABLED
self._oper_status = OperStatus.UNKNOWN
self._update_adapter_agent()
self.state = AdtnPort.State.STOPPED
self.deferred = self.finish_stop()
yield self.deferred
except Exception as e:
self.log.exception('stop-failed', e=e)
returnValue('Stopped')
@inlineCallbacks
def finish_stop(self):
pass # Add to your derived class if needed
returnValue(None)
def restart(self):
if self.state == AdtnPort.State.RUNNING or self.state == AdtnPort.State.STOPPED:
start_it = (self.state == AdtnPort.State.RUNNING)
self.state = AdtnPort.State.INITIAL
return self.start() if start_it else self.stop()
return succeed('nop')
def delete(self):
"""
Parent device is being deleted. Do not change any config but
stop all polling
"""
self.log.info('Deleting')
self.state = AdtnPort.State.DELETING
self.cancel_deferred()
def sync_hardware(self):
raise NotImplementedError('Add to your derived class')
# TODO: Continue to consolidate port functionality
|
{
"content_hash": "e6988ee8b85fc2dafd7f420862924e78",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 95,
"avg_line_length": 30.29957805907173,
"alnum_prop": 0.6004734716613285,
"repo_name": "opencord/voltha",
"id": "5dbb543195c5058a2adee1b3e4130fca2269cad1",
"size": "7762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voltha/adapters/adtran_olt/port.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "30265"
},
{
"name": "Dockerfile",
"bytes": "2881"
},
{
"name": "Go",
"bytes": "181529"
},
{
"name": "Jinja",
"bytes": "25855"
},
{
"name": "Makefile",
"bytes": "76329"
},
{
"name": "Python",
"bytes": "9758796"
},
{
"name": "RobotFramework",
"bytes": "10188"
},
{
"name": "Ruby",
"bytes": "1126"
},
{
"name": "Shell",
"bytes": "758475"
},
{
"name": "XSLT",
"bytes": "175917"
}
],
"symlink_target": ""
}
|
import sys
import itk
itk.auto_progress(2)
inputImage = sys.argv[1]
outputImage = sys.argv[2]
lowerThreshold = int(sys.argv[3])
upperThreshold = int(sys.argv[4])
outsideValue = int(sys.argv[5])
insideValue = int(sys.argv[6])
PixelType = itk.UC
Dimension = 2
ImageType = itk.Image[PixelType, Dimension]
ReaderType = itk.ImageFileReader[ImageType]
reader = ReaderType.New()
reader.SetFileName(inputImage)
FilterType = itk.BinaryThresholdImageFilter[ImageType, ImageType]
thresholdFilter = FilterType.New()
thresholdFilter.SetInput(reader.GetOutput())
thresholdFilter.SetLowerThreshold(lowerThreshold)
thresholdFilter.SetUpperThreshold(upperThreshold)
thresholdFilter.SetOutsideValue(outsideValue)
thresholdFilter.SetInsideValue(insideValue)
WriterType = itk.ImageFileWriter[ImageType]
writer = WriterType.New()
writer.SetFileName(outputImage)
writer.SetInput(thresholdFilter.GetOutput())
writer.Update()
|
{
"content_hash": "b324b00966cd696f893bb53dddac2ecb",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 65,
"avg_line_length": 24.64864864864865,
"alnum_prop": 0.8103070175438597,
"repo_name": "LucasGandel/ITK",
"id": "d626fa29dbf0cfdbab7c37758cf620a2e1c55635",
"size": "1707",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Modules/Filtering/Thresholding/wrapping/test/BinaryThresholdImageFilterTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "306"
},
{
"name": "C",
"bytes": "29710189"
},
{
"name": "C++",
"bytes": "46556252"
},
{
"name": "CMake",
"bytes": "1998251"
},
{
"name": "CSS",
"bytes": "24960"
},
{
"name": "DIGITAL Command Language",
"bytes": "709"
},
{
"name": "FORTRAN",
"bytes": "2241251"
},
{
"name": "HTML",
"bytes": "208088"
},
{
"name": "Io",
"bytes": "1833"
},
{
"name": "Java",
"bytes": "28598"
},
{
"name": "Lex",
"bytes": "6877"
},
{
"name": "Makefile",
"bytes": "13192"
},
{
"name": "Objective-C",
"bytes": "49378"
},
{
"name": "Objective-C++",
"bytes": "6591"
},
{
"name": "OpenEdge ABL",
"bytes": "85139"
},
{
"name": "Perl",
"bytes": "19692"
},
{
"name": "Prolog",
"bytes": "4406"
},
{
"name": "Python",
"bytes": "808900"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "Shell",
"bytes": "130535"
},
{
"name": "Tcl",
"bytes": "74786"
},
{
"name": "XSLT",
"bytes": "195448"
},
{
"name": "Yacc",
"bytes": "20428"
}
],
"symlink_target": ""
}
|
from robot.model import SuiteVisitor
class ResultVisitor(SuiteVisitor):
def visit_result(self, result):
if self.start_result(result) is not False:
result.suite.visit(self)
result.statistics.visit(self)
result.errors.visit(self)
self.end_result(result)
def start_result(self, result):
pass
def end_result(self, result):
pass
def visit_statistics(self, stats):
if self.start_statistics(stats) is not False:
stats.total.visit(self)
stats.tags.visit(self)
stats.suite.visit(self)
self.end_statistics(stats)
def start_statistics(self, stats):
pass
def end_statistics(self, stats):
pass
def visit_total_statistics(self, stats):
if self.start_total_statistics(stats) is not False:
for stat in stats:
stat.visit(self)
self.end_total_statistics(stats)
def start_total_statistics(self, stats):
pass
def end_total_statistics(self, stats):
pass
def visit_tag_statistics(self, stats):
if self.start_tag_statistics(stats) is not False:
for stat in stats:
stat.visit(self)
self.end_tag_statistics(stats)
def start_tag_statistics(self, stats):
pass
def end_tag_statistics(self, stats):
pass
def visit_suite_statistics(self, stats):
if self.start_suite_statistics(stats) is not False:
for stat in stats:
stat.visit(self)
self.end_suite_statistics(stats)
def start_suite_statistics(self, stats):
pass
def end_suite_statistics(self, suite_stats):
pass
def visit_stat(self, stat):
if self.start_stat(stat) is not False:
self.end_stat(stat)
def start_stat(self, stat):
pass
def end_stat(self, stat):
pass
def visit_errors(self, errors):
self.start_errors(errors)
for msg in errors:
msg.visit(self)
self.end_errors(errors)
def start_errors(self, errors):
pass
def end_errors(self, errors):
pass
|
{
"content_hash": "4219c17db20f06af2cf48c755482ac9c",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 59,
"avg_line_length": 25.045454545454547,
"alnum_prop": 0.5893829401088929,
"repo_name": "Senseg/robotframework",
"id": "5e0882649653293aa5ec984208321ee5e68a7823",
"size": "2810",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/robot/result/visitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "716"
},
{
"name": "Java",
"bytes": "48873"
},
{
"name": "JavaScript",
"bytes": "149654"
},
{
"name": "Python",
"bytes": "1637427"
},
{
"name": "Shell",
"bytes": "1323"
}
],
"symlink_target": ""
}
|
import collections
import uuid
from oslo_config import cfg
from rally.common import broker
from rally.common.i18n import _
from rally.common import logging
from rally.common import objects
from rally.common import utils as rutils
from rally.common import validation
from rally import consts
from rally import exceptions
from rally import osclients
from rally.plugins.openstack import credential
from rally.plugins.openstack.services.identity import identity
from rally.plugins.openstack.wrappers import network
from rally.task import context
from rally.common import opts
opts.register()
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
RESOURCE_MANAGEMENT_WORKERS_DESCR = ("The number of concurrent threads to use "
"for serving users context.")
PROJECT_DOMAIN_DESCR = "ID of domain in which projects will be created."
USER_DOMAIN_DESCR = "ID of domain in which users will be created."
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="users", platform="openstack", order=100)
class UserGenerator(context.Context):
"""Creates specified amount of keystone users and tenants."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"oneOf": [
{"description": "Create new temporary users and tenants.",
"properties": {
"tenants": {
"type": "integer",
"minimum": 1,
"description": "The number of tenants to create."
},
"users_per_tenant": {
"type": "integer",
"minimum": 1,
"description": "The number of users to create per one "
"tenant."},
"resource_management_workers": {
"type": "integer",
"minimum": 1,
"description": RESOURCE_MANAGEMENT_WORKERS_DESCR},
"project_domain": {
"type": "string",
"description": PROJECT_DOMAIN_DESCR},
"user_domain": {
"type": "string",
"description": USER_DOMAIN_DESCR},
"user_choice_method": {
"$ref": "#/definitions/user_choice_method"}},
"additionalProperties": False},
# TODO(andreykurilin): add ability to specify users here.
{"description": "Use existing users and tenants.",
"properties": {
"user_choice_method": {
"$ref": "#/definitions/user_choice_method"}
},
"additionalProperties": False}
],
"definitions": {
"user_choice_method": {
"enum": ["random", "round_robin"],
"description": "The mode of balancing usage of users between "
"scenario iterations."}
}
}
DEFAULT_CONFIG = {"user_choice_method": "random"}
DEFAULT_FOR_NEW_USERS = {
"tenants": 1,
"users_per_tenant": 1,
"resource_management_workers":
cfg.CONF.users_context.resource_management_workers,
}
def __init__(self, context):
super(UserGenerator, self).__init__(context)
deployment = objects.Deployment.get(context["task"]["deployment_uuid"])
existing_users = deployment.get_credentials_for("openstack")["users"]
if existing_users and not (set(self.config) - {"user_choice_method"}):
self.existing_users = existing_users
else:
self.existing_users = []
self.credential = context["admin"]["credential"]
project_domain = (self.credential.project_domain_name or
cfg.CONF.users_context.project_domain)
user_domain = (self.credential.user_domain_name or
cfg.CONF.users_context.user_domain)
self.DEFAULT_FOR_NEW_USERS["project_domain"] = project_domain
self.DEFAULT_FOR_NEW_USERS["user_domain"] = user_domain
with self.config.unlocked():
for key, value in self.DEFAULT_FOR_NEW_USERS.items():
self.config.setdefault(key, value)
def _remove_default_security_group(self):
"""Delete default security group for tenants."""
clients = osclients.Clients(self.credential)
if consts.Service.NEUTRON not in clients.services().values():
return
use_sg, msg = network.wrap(clients, self).supports_extension(
"security-group")
if not use_sg:
LOG.debug("Security group context is disabled: %s", msg)
return
for user, tenant_id in rutils.iterate_per_tenants(
self.context["users"]):
with logging.ExceptionLogger(
LOG, _("Unable to delete default security group")):
uclients = osclients.Clients(user["credential"])
security_groups = uclients.neutron().list_security_groups()
default = [sg for sg in security_groups["security_groups"]
if sg["name"] == "default"]
if default:
clients.neutron().delete_security_group(default[0]["id"])
def _create_tenants(self):
threads = self.config["resource_management_workers"]
tenants = collections.deque()
def publish(queue):
for i in range(self.config["tenants"]):
args = (self.config["project_domain"], self.task["uuid"], i)
queue.append(args)
def consume(cache, args):
domain, task_id, i = args
if "client" not in cache:
clients = osclients.Clients(self.credential)
cache["client"] = identity.Identity(
clients, name_generator=self.generate_random_name)
tenant = cache["client"].create_project(domain_name=domain)
tenant_dict = {"id": tenant.id, "name": tenant.name, "users": []}
tenants.append(tenant_dict)
# NOTE(msdubov): consume() will fill the tenants list in the closure.
broker.run(publish, consume, threads)
tenants_dict = {}
for t in tenants:
tenants_dict[t["id"]] = t
return tenants_dict
def _create_users(self):
# NOTE(msdubov): This should be called after _create_tenants().
threads = self.config["resource_management_workers"]
users_per_tenant = self.config["users_per_tenant"]
default_role = cfg.CONF.users_context.keystone_default_role
users = collections.deque()
def publish(queue):
for tenant_id in self.context["tenants"]:
for user_id in range(users_per_tenant):
username = self.generate_random_name()
password = str(uuid.uuid4())
args = (username, password, self.config["project_domain"],
self.config["user_domain"], tenant_id)
queue.append(args)
def consume(cache, args):
username, password, project_dom, user_dom, tenant_id = args
if "client" not in cache:
clients = osclients.Clients(self.credential)
cache["client"] = identity.Identity(
clients, name_generator=self.generate_random_name)
client = cache["client"]
user = client.create_user(username, password=password,
project_id=tenant_id,
domain_name=user_dom,
default_role=default_role)
user_credential = credential.OpenStackCredential(
auth_url=self.credential.auth_url,
username=user.name,
password=password,
tenant_name=self.context["tenants"][tenant_id]["name"],
permission=consts.EndpointPermission.USER,
project_domain_name=project_dom,
user_domain_name=user_dom,
endpoint_type=self.credential.endpoint_type,
https_insecure=self.credential.https_insecure,
https_cacert=self.credential.https_cacert,
region_name=self.credential.region_name,
profiler_hmac_key=self.credential.profiler_hmac_key)
users.append({"id": user.id,
"credential": user_credential,
"tenant_id": tenant_id})
# NOTE(msdubov): consume() will fill the users list in the closure.
broker.run(publish, consume, threads)
return list(users)
def _get_consumer_for_deletion(self, func_name):
def consume(cache, resource_id):
if "client" not in cache:
clients = osclients.Clients(self.credential)
cache["client"] = identity.Identity(clients)
getattr(cache["client"], func_name)(resource_id)
return consume
def _delete_tenants(self):
threads = self.config["resource_management_workers"]
def publish(queue):
for tenant_id in self.context["tenants"]:
queue.append(tenant_id)
broker.run(publish, self._get_consumer_for_deletion("delete_project"),
threads)
self.context["tenants"] = {}
def _delete_users(self):
threads = self.config["resource_management_workers"]
def publish(queue):
for user in self.context["users"]:
queue.append(user["id"])
broker.run(publish, self._get_consumer_for_deletion("delete_user"),
threads)
self.context["users"] = []
def create_users(self):
"""Create tenants and users, using the broker pattern."""
threads = self.config["resource_management_workers"]
LOG.debug("Creating %(tenants)d tenants using %(threads)s threads",
{"tenants": self.config["tenants"], "threads": threads})
self.context["tenants"] = self._create_tenants()
if len(self.context["tenants"]) < self.config["tenants"]:
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg=_("Failed to create the requested number of tenants."))
users_num = self.config["users_per_tenant"] * self.config["tenants"]
LOG.debug("Creating %(users)d users using %(threads)s threads",
{"users": users_num, "threads": threads})
self.context["users"] = self._create_users()
for user in self.context["users"]:
self.context["tenants"][user["tenant_id"]]["users"].append(user)
if len(self.context["users"]) < users_num:
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg=_("Failed to create the requested number of users."))
def use_existing_users(self):
LOG.debug("Using existing users")
for user_credential in self.existing_users:
user_clients = user_credential.clients()
user_id = user_clients.keystone.auth_ref.user_id
tenant_id = user_clients.keystone.auth_ref.project_id
if tenant_id not in self.context["tenants"]:
self.context["tenants"][tenant_id] = {
"id": tenant_id,
"name": user_credential.tenant_name
}
self.context["users"].append({
"credential": user_credential,
"id": user_id,
"tenant_id": tenant_id
})
@logging.log_task_wrapper(LOG.info, _("Enter context: `users`"))
def setup(self):
self.context["users"] = []
self.context["tenants"] = {}
self.context["user_choice_method"] = self.config["user_choice_method"]
if self.existing_users:
self.use_existing_users()
else:
self.create_users()
@logging.log_task_wrapper(LOG.info, _("Exit context: `users`"))
def cleanup(self):
"""Delete tenants and users, using the broker pattern."""
if self.existing_users:
# nothing to do here.
return
else:
self._remove_default_security_group()
self._delete_users()
self._delete_tenants()
|
{
"content_hash": "047130862ac9b3dd6509d8f984786fc8",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 79,
"avg_line_length": 40.530744336569576,
"alnum_prop": 0.5606834877036091,
"repo_name": "yeming233/rally",
"id": "0e45cee5972965ec06607d524ab3efb97804a02a",
"size": "13154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rally/plugins/openstack/context/keystone/users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46940"
},
{
"name": "Python",
"bytes": "2561223"
},
{
"name": "Shell",
"bytes": "43366"
}
],
"symlink_target": ""
}
|
from lxml import etree
from healthvaultlib.utils.xmlutils import XmlUtils
from healthvaultlib.itemtypes.healthrecorditem import HealthRecordItem
class Pregnancy(HealthRecordItem):
def __init__(self, thing_xml=None):
super(Pregnancy, self).__init__()
self.type_id = '46d485cf-2b84-429d-9159-83152ba801f4'
if thing_xml is not None:
self.thing_xml = thing_xml
self.parse_thing()
def __str__(self):
return 'Pregnancy'
def parse_thing(self):
super(Pregnancy, self).parse_thing()
xmlutils = XmlUtils(self.thing_xml)
def write_xml(self):
thing = super(Pregnancy, self).write_xml()
data_xml = etree.Element('data-xml')
pregnancy = etree.Element('pregnancy')
data_xml.append(pregnancy)
thing.append(data_xml)
return thing
|
{
"content_hash": "b32dcc41c5349a7e075e1555bbc5975d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 70,
"avg_line_length": 28.566666666666666,
"alnum_prop": 0.6417736289381564,
"repo_name": "rajeevs1992/pyhealthvault",
"id": "ade62a24891ae7c2d883cc714bcc14c5cd540121",
"size": "857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/healthvaultlib/itemtypes/pregnancy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198247"
}
],
"symlink_target": ""
}
|
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import DeserializationError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class NetworkInterfacesOperations(object):
"""NetworkInterfacesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def _delete_initial(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
api_version = "2017-08-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns None or
ClientRawResponse if raw=true
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, network_interface_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkInterface or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_08_01.models.NetworkInterface or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-08-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def _create_or_update_initial(
self, resource_group_name, network_interface_name, parameters, custom_headers=None, raw=False, **operation_config):
api_version = "2017-08-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkInterface')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, network_interface_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network
interface operation.
:type parameters:
~azure.mgmt.network.v2017_08_01.models.NetworkInterface
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
NetworkInterface or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_08_01.models.NetworkInterface]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2017_08_01.models.NetworkInterfacePaged[~azure.mgmt.network.v2017_08_01.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-08-01"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2017_08_01.models.NetworkInterfacePaged[~azure.mgmt.network.v2017_08_01.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-08-01"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def _get_effective_route_table_initial(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
api_version = "2017-08-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_effective_route_table(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
EffectiveRouteListResult or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_08_01.models.EffectiveRouteListResult]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('EffectiveRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def _list_effective_network_security_groups_initial(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
api_version = "2017-08-01"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_effective_network_security_groups(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
EffectiveNetworkSecurityGroupListResult or ClientRawResponse if
raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_08_01.models.EffectiveNetworkSecurityGroupListResult]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
if raw:
return raw_result
# Construct and send request
def long_running_send():
return raw_result.response
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
header_parameters = {}
header_parameters['x-ms-client-request-id'] = raw_result.response.request.headers['x-ms-client-request-id']
return self._client.send(
request, header_parameters, stream=False, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_virtual_machine_scale_set_vm_network_interfaces(
self, resource_group_name, virtual_machine_scale_set_name, virtualmachine_index, custom_headers=None, raw=False, **operation_config):
"""Gets information about all network interfaces in a virtual machine in a
virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2017_08_01.models.NetworkInterfacePaged[~azure.mgmt.network.v2017_08_01.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-03-30"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_virtual_machine_scale_set_network_interfaces(
self, resource_group_name, virtual_machine_scale_set_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkInterface
:rtype:
~azure.mgmt.network.v2017_08_01.models.NetworkInterfacePaged[~azure.mgmt.network.v2017_08_01.models.NetworkInterface]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-03-30"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkInterfacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_virtual_machine_scale_set_network_interface(
self, resource_group_name, virtual_machine_scale_set_name, virtualmachine_index, network_interface_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine
scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkInterface or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_08_01.models.NetworkInterface or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2017-03-30"
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
{
"content_hash": "372a4446205bef4403fd91a0a87fdede",
"timestamp": "",
"source": "github",
"line_count": 901,
"max_line_length": 242,
"avg_line_length": 46.244173140954494,
"alnum_prop": 0.6427542840685451,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "36ff50f1851d0dff257a1989459747137a7ebdb8",
"size": "42140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_08_01/operations/network_interfaces_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
"""
Manages addition of Amber output information to segments.
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
from .TrajOutput import TrajOutput
################################### CLASSES ###################################
class AmberTrajOutput(TrajOutput):
"""
Manages addition of Amber output information to segments.
"""
def receive_segment(self, **kwargs):
"""
Receives a trajectory segment and sends to each target.
Arugments:
kwargs (dict): Additional keyword arguments
"""
import os
while True:
segment = yield
segment_crd = "{0}/{1:04d}/{1:04d}{2}.crd".format(self.outpath,
int(segment.number), self.suffix)
if not os.path.isfile(segment_crd) or self.force:
segment.outputs.append(
dict(
filename = segment_crd,
format = "crdbox",
selection = self.selection))
for target in self.targets:
target.send(segment)
@staticmethod
def add_subparser(level1_subparser, level2_subparsers, level3_classes):
"""
Adds subparser for this input format to nascent parser.
Arguments:
level1_subparser (Subparser): Level 1 subparser to which level
2 subparser will be added
level2_subparsers (Subparsers): Nascent collection of level 2
subparsers to which level 2 subparser will be added
level3_classes (list): Classes for which level 3 subparsers
will be added
Returns:
(*Subparser*, *Subparsers*): New level 2 subparser and
associated collection of level 3 subparsers
"""
level2_subparser = level2_subparsers.add_parser(
name = "amber",
usage = "convert.py {0} amber".format(level1_subparser.name),
help = "Amber crd text output")
setattr(level2_subparser, "name", "amber")
level3_subparsers = level2_subparser.add_subparsers(
title = "Converter")
for level3_class in level3_classes:
level3_subparser = level3_class.add_subparser(level1_subparser,
level2_subparser, level3_subparsers)
arg_groups = {ag.title: ag
for ag in level3_subparser._action_groups}
AmberTrajOutput.add_shared_args(level3_subparser)
level3_subparser.set_defaults(output_coroutine=AmberTrajOutput)
return level2_subparser, level3_subparsers
|
{
"content_hash": "793eddce91b9101fb1cd37cf27a59cb9",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 38.4,
"alnum_prop": 0.5751488095238095,
"repo_name": "KarlTDebiec/md_format_converter",
"id": "d48425205e4dfdfa299d71afe10580f63814b9cb",
"size": "2950",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "AmberTrajOutput.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "45195"
},
{
"name": "Shell",
"bytes": "660"
},
{
"name": "Tcl",
"bytes": "8081"
}
],
"symlink_target": ""
}
|
from knesset_data.dataservice.votes import Vote as DataserviceVote, VoteMember as DataserviceVoteMember
from knesset_data.html_scrapers.votes import HtmlVote
from laws.models import Vote, VoteAction
from simple.scrapers import hebrew_strftime
from simple.scrapers.management import BaseKnessetDataserviceCollectionCommand
from mks.models import Member
from simple.management.commands.syncdata import Command as SyncdataCommand
from links.models import Link
from django.contrib.contenttypes.models import ContentType
from optparse import make_option
from sys import stdout
import csv
class VoteScraperException(Exception):
def __init__(self, *args, **kwargs):
super(VoteScraperException, self).__init__(*args, **kwargs)
class Command(BaseKnessetDataserviceCollectionCommand):
DATASERVICE_CLASS = DataserviceVote
option_list = BaseKnessetDataserviceCollectionCommand.option_list + (
make_option('--validate-votes-pages', dest='validatevotepages',
help="validate votes between (and including) given page range\npages in this case are based on vote id ascending, so you'll have the same page number each time"),
make_option('--validate-skip-to', dest='validateskipto',
help="skip to the given vote id (for use with --validate-votes-pages)"),
make_option('--create-vote-src-id', dest='createvotesrcid',
help="create the given vote/s from the comma-separated src ids (assuming they don't already exist in DB)"),
make_option('--validate-output-file', dest='validateoutputfile',
help="where to write the validation results to (defaults to stdout)"),
make_option('--validate-fix', dest='validatefix', action='store_true',
help="try to fix some problems directly in DB which are safe to automatically fix")
)
help = "Scrape votes data from the knesset"
dataservice_model_map = {
# model attribute name | dataservice attribute name, or lambda to get the value
'src_id': 'id',
'title': lambda vote: u'{vote} - {sess}'.format(vote=vote.item_dscr, sess=vote.sess_item_dscr),
'time_string': lambda vote: u'יום %s'%hebrew_strftime(vote.datetime),
'importance': lambda vote: 1,
'time': 'datetime',
'meeting_number': "session_num",
'vote_number': 'nbr_in_sess',
'src_url': lambda vote: "http://www.knesset.gov.il/vote/heb/Vote_Res_Map.asp?vote_id_t=%s"%vote.id
}
def _get_dataservice_model_kwargs(self, dataservice_vote):
return {
k: getattr(dataservice_vote, v) if isinstance(v, str) else v(dataservice_vote)
for k,v in self.dataservice_model_map.iteritems()
}
def _update_or_create_vote(self, dataservice_vote, oknesset_vote=None):
vote_kwargs = self._get_dataservice_model_kwargs(dataservice_vote)
if oknesset_vote:
[setattr(oknesset_vote, k, v) for k,v in vote_kwargs.iteritems()]
oknesset_vote.save()
else:
oknesset_vote = Vote.objects.create(**vote_kwargs)
self._add_vote_actions(dataservice_vote, oknesset_vote)
oknesset_vote.update_vote_properties()
SyncdataCommand().find_synced_protocol(oknesset_vote)
Link.objects.create(
title=u'ההצבעה באתר הכנסת',
url='http://www.knesset.gov.il/vote/heb/Vote_Res_Map.asp?vote_id_t=%s' % oknesset_vote.src_id,
content_type=ContentType.objects.get_for_model(oknesset_vote), object_pk=str(oknesset_vote.id)
)
return oknesset_vote
# if v.full_text_url != None:
# l = Link(title=u'מסמך הצעת החוק באתר הכנסת', url=v.full_text_url, content_type=ContentType.objects.get_for_model(v), object_pk=str(v.id))
# l.save()
def _add_vote_actions(self, dataservice_vote, oknesset_vote):
for member_id, vote_result_code in HtmlVote.get_from_vote_id(dataservice_vote.id).member_votes:
member_qs = Member.objects.filter(pk=member_id)
if member_qs.exists():
member = member_qs.first()
vote_type = self._resolve_vote_type(vote_result_code)
vote_action, created = VoteAction.objects.get_or_create(vote=oknesset_vote, member=member,
defaults={'type': vote_type,
'party': member.current_party})
if created:
vote_action.save()
else:
raise VoteScraperException('vote %s: could not find member id %s' % (dataservice_vote.id, member_id))
def _has_existing_object(self, dataservice_vote):
qs = Vote.objects.filter(src_id=dataservice_vote.id)
return qs.exists()
def _create_new_object(self, dataservice_vote):
return self._update_or_create_vote(dataservice_vote)
def _resolve_vote_type(cls, vote_result_code):
return {
'voted for': u'for',
'voted against': u'against',
'abstain': u'abstain',
'did not vote': u'no-vote',
}[vote_result_code]
def recreate_objects(self, vote_ids):
recreated_votes = []
for vote_id in vote_ids:
oknesset_vote = Vote.objects.get(id=int(vote_id))
vote_src_id = oknesset_vote.src_id
dataservice_vote = self.DATASERVICE_CLASS.get(vote_src_id)
VoteAction.objects.filter(vote=oknesset_vote).delete()
Link.objects.filter(content_type=ContentType.objects.get_for_model(oknesset_vote), object_pk=oknesset_vote.id).delete()
recreated_votes.append(self._update_or_create_vote(dataservice_vote, oknesset_vote))
return recreated_votes
def _validate_vote(self, dataservice_vote, csv_writer, fix=False):
# check the basic metadata
qs = Vote.objects.filter(src_id=dataservice_vote.id)
if qs.count() != 1:
if fix:
self._log_info('could not find corresponding vote in DB, creating it now')
self._create_new_object(dataservice_vote)
else:
error = 'could not find corresponding vote in DB (qs.count=%s)'%(qs.count(),)
self._log_warn(error)
csv_writer.writerow([dataservice_vote.id, '', error.encode('utf-8')])
else:
oknesset_vote = qs.first()
for attr_name, expected_value in self._get_dataservice_model_kwargs(dataservice_vote).iteritems():
actual_value = getattr(oknesset_vote, attr_name)
if attr_name == 'time_string':
# remove some unprintable artifacts which for some reason are in the old scraper's votes
actual_value = actual_value.replace(u"\u200f", "").replace(u"\xa0"," ")
if attr_name == 'title' and actual_value != expected_value:
# try a slightly different format which exists in DB in some cases
actual_value = actual_value.replace(u" - הצעת חוק", u" - חוק")
if actual_value != expected_value:
if fix and attr_name in ['title', 'src_url']:
self._log_info('fixing mismatch in %s attribute'%(attr_name,))
setattr(oknesset_vote, attr_name, expected_value)
oknesset_vote.save()
else:
error = 'value mismatch for %s (expected="%s", actual="%s")'%(attr_name, expected_value, actual_value)
self._log_warn(error)
csv_writer.writerow([dataservice_vote.id, oknesset_vote.id, error.encode('utf-8')])
# validate the vote counts
for type_title, oknesset_count, dataservice_count in zip(
('for', 'against', 'abstain'),
[oknesset_vote.actions.filter(type=t).count() for t in 'for', 'against', 'abstain'],
[int(getattr(dataservice_vote, t)) for t in 'total_for', 'total_against', 'total_abstain']
):
if oknesset_count != dataservice_count:
error = 'mismatch in %s count (expected=%s, actual=%s)'%(type_title, dataservice_count, oknesset_count)
self._log_warn(error)
csv_writer.writerow([dataservice_vote.id, oknesset_vote.id, error.encode('utf-8')])
def _validate_vote_pages(self, out, pages, skip_to_vote_id, try_to_fix):
writer = csv.writer(out)
writer.writerow(['knesset vote id', 'open knesset vote id', 'error'])
for page in pages:
self._log_info('downloading page %s'%page)
votes = DataserviceVote.get_page(order_by=('id', 'asc'), page_num=page)
self._log_info('downloaded %s votes'%len(votes))
if len(votes) < 1:
self._log_warn('no votes in the page')
else:
self._log_info(' first vote date: %s'%votes[0].datetime)
for vote in votes:
if not skip_to_vote_id or int(vote.id) >= int(skip_to_vote_id):
self._log_info('validating vote %s'%vote.id)
self._validate_vote(vote, writer, fix=try_to_fix)
def _handle_noargs(self, **options):
if options.get('createvotesrcid'):
src_ids = [int(i) for i in options['createvotesrcid'].split(',')]
self._log_info('downloading %s votes'%len(src_ids))
dataservice_votes = []
for src_id in src_ids:
self._log_info('downloading vote %s'%src_id)
dataservice_vote = DataserviceVote.get(src_id)
dataservice_votes.append(dataservice_vote)
self._log_info('downloaded all votes data, will create them now')
oknesset_votes = []
for dataservice_vote in dataservice_votes:
if self._has_existing_object(dataservice_vote):
raise VoteScraperException('vote already exists in DB: %s'%dataservice_vote.id)
else:
oknesset_vote = self._create_new_object(dataservice_vote)
oknesset_votes.append(oknesset_vote)
self._log_info('created vote %s (%s)'%(oknesset_vote, oknesset_vote.pk))
self._log_info('done, created %s votes'%len(oknesset_votes))
elif options.get('validatevotepages'):
from_page, to_page = [int(p) for p in options['validatevotepages'].split('-')]
skip_to_vote_id = options.get('validateskipto', None)
output_file_name = options.get('validateoutputfile', None)
try_to_fix = options.get('validatefix', False);
if from_page > to_page:
# we support reverse pages as well!
pages = reversed(range(to_page, from_page+1))
else:
pages = range(from_page, to_page+1)
if output_file_name:
out = open(output_file_name, 'wb')
else:
out = stdout
self._validate_vote_pages(out, pages, skip_to_vote_id, try_to_fix)
if output_file_name:
out.close()
self._log_info('done')
else:
return super(Command, self)._handle_noargs(**options)
|
{
"content_hash": "3a53bd5e5fcc80a819870c9ab4aa4e5b",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 182,
"avg_line_length": 54.199052132701425,
"alnum_prop": 0.5926897516614201,
"repo_name": "noamelf/Open-Knesset",
"id": "64f4e4470f6362eb008a97e11eabfdb52ffa43ea",
"size": "11503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "laws/management/commands/scrape_votes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "347567"
},
{
"name": "HTML",
"bytes": "699567"
},
{
"name": "JavaScript",
"bytes": "216606"
},
{
"name": "Python",
"bytes": "4353274"
},
{
"name": "Shell",
"bytes": "245"
}
],
"symlink_target": ""
}
|
"""
WSGI config for ask project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ask.settings")
application = get_wsgi_application()
|
{
"content_hash": "82cdce6242a15c64e8a426ff3109dcf6",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 23.9375,
"alnum_prop": 0.7650130548302873,
"repo_name": "bbb1991/stepic_course_web",
"id": "acca1d7ec4c46afd2424533640a4733476bf3c22",
"size": "383",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "ask/ask/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Nginx",
"bytes": "1465"
},
{
"name": "Python",
"bytes": "6482"
},
{
"name": "Shell",
"bytes": "1436"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Similarity'
db.create_table(u'djangoorm_similarity', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('object_ctype', self.gf('django.db.models.fields.PositiveIntegerField')()),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('object_site', self.gf('django.db.models.fields.PositiveIntegerField')()),
('score', self.gf('django.db.models.fields.FloatField')(default=None, null=True, blank=True)),
('related_object_ctype', self.gf('django.db.models.fields.PositiveIntegerField')()),
('related_object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('related_object_site', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal(u'djangoorm', ['Similarity'])
# Adding unique constraint on 'Similarity', fields ['object_ctype', 'object_id', 'object_site', 'related_object_ctype', 'related_object_id', 'related_object_site']
db.create_unique(u'djangoorm_similarity', ['object_ctype', 'object_id', 'object_site', 'related_object_ctype', 'related_object_id', 'related_object_site'])
# Adding model 'Recommendation'
db.create_table(u'djangoorm_recommendation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('object_ctype', self.gf('django.db.models.fields.PositiveIntegerField')()),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('object_site', self.gf('django.db.models.fields.PositiveIntegerField')()),
('user', self.gf('django.db.models.fields.PositiveIntegerField')()),
('score', self.gf('django.db.models.fields.FloatField')(default=None, null=True, blank=True)),
))
db.send_create_signal(u'djangoorm', ['Recommendation'])
# Adding unique constraint on 'Recommendation', fields ['object_ctype', 'object_id', 'user']
db.create_unique(u'djangoorm_recommendation', ['object_ctype', 'object_id', 'user'])
def backwards(self, orm):
# Removing unique constraint on 'Recommendation', fields ['object_ctype', 'object_id', 'user']
db.delete_unique(u'djangoorm_recommendation', ['object_ctype', 'object_id', 'user'])
# Removing unique constraint on 'Similarity', fields ['object_ctype', 'object_id', 'object_site', 'related_object_ctype', 'related_object_id', 'related_object_site']
db.delete_unique(u'djangoorm_similarity', ['object_ctype', 'object_id', 'object_site', 'related_object_ctype', 'related_object_id', 'related_object_site'])
# Deleting model 'Similarity'
db.delete_table(u'djangoorm_similarity')
# Deleting model 'Recommendation'
db.delete_table(u'djangoorm_recommendation')
models = {
u'djangoorm.recommendation': {
'Meta': {'ordering': "[u'-score']", 'unique_together': "((u'object_ctype', u'object_id', u'user'),)", 'object_name': 'Recommendation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_ctype': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_site': ('django.db.models.fields.PositiveIntegerField', [], {}),
'score': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'djangoorm.similarity': {
'Meta': {'ordering': "[u'-score']", 'unique_together': "((u'object_ctype', u'object_id', u'object_site', u'related_object_ctype', u'related_object_id', u'related_object_site'),)", 'object_name': 'Similarity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_ctype': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_site': ('django.db.models.fields.PositiveIntegerField', [], {}),
'related_object_ctype': ('django.db.models.fields.PositiveIntegerField', [], {}),
'related_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'related_object_site': ('django.db.models.fields.PositiveIntegerField', [], {}),
'score': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['djangoorm']
|
{
"content_hash": "0eee8fe5ddb9b9ea50c03ea915b83990",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 221,
"avg_line_length": 62.73076923076923,
"alnum_prop": 0.6249744533006335,
"repo_name": "python-recsys/django-recommends",
"id": "fef860d0d7f0b04581e27dc8053b1f5327e5b68c",
"size": "4917",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "recommends/storages/djangoorm/south_migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1583"
},
{
"name": "Python",
"bytes": "99141"
}
],
"symlink_target": ""
}
|
import re
def dollars_to_math(source):
r"""
Replace dollar signs with backticks.
More precisely, do a regular expression search. Replace a plain
dollar sign ($) by a backtick (`). Replace an escaped dollar sign
(\$) by a dollar sign ($). Don't change a dollar sign preceded or
followed by a backtick (`$ or $`), because of strings like
"``$HOME``". Don't make any changes on lines starting with
spaces, because those are indented and hence part of a block of
code or examples.
This also doesn't replaces dollar signs enclosed in curly braces,
to avoid nested math environments, such as ::
$f(n) = 0 \text{ if $n$ is prime}$
Thus the above line would get changed to
`f(n) = 0 \text{ if $n$ is prime}`
"""
s = "\n".join(source)
if s.find("$") == -1:
return
# This searches for "$blah$" inside a pair of curly braces --
# don't change these, since they're probably coming from a nested
# math environment. So for each match, we replace it with a temporary
# string, and later on we substitute the original back.
global _data
_data = {}
def repl(matchobj):
global _data
s = matchobj.group(0)
t = "___XXX_REPL_%d___" % len(_data)
_data[t] = s
return t
s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s)
# matches $...$
dollars = re.compile(r"(?<!\$)(?<!\\)\$([^\$]+?)\$")
# regular expression for \$
slashdollar = re.compile(r"\\\$")
s = dollars.sub(r":math:`\1`", s)
s = slashdollar.sub(r"$", s)
# change the original {...} things in:
for r in _data:
s = s.replace(r, _data[r])
# now save results in "source"
source[:] = [s]
def process_dollars(app, docname, source):
dollars_to_math(source)
def mathdollar_docstrings(app, what, name, obj, options, lines):
dollars_to_math(lines)
def setup(app):
app.connect("source-read", process_dollars)
app.connect('autodoc-process-docstring', mathdollar_docstrings)
|
{
"content_hash": "d7faf02a9fb8e14177eccd116adf3b6a",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 74,
"avg_line_length": 32.42857142857143,
"alnum_prop": 0.5952031326480666,
"repo_name": "stefanv/selective-inference",
"id": "c2c055fadb7ca7b14ba67901f4b42ad3c04197b6",
"size": "2429",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "doc/source/sphinxext/math_dollar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "269"
},
{
"name": "C++",
"bytes": "13148"
},
{
"name": "Python",
"bytes": "566054"
},
{
"name": "R",
"bytes": "11134"
},
{
"name": "TeX",
"bytes": "3355"
}
],
"symlink_target": ""
}
|
import Image
import select
import v4l2capture
import cv2
import numpy
import time
import grip
from flask import Flask, render_template, Response
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
# Open the video device.
video = v4l2capture.Video_device("/dev/video0")
# Suggest an image size to the device. The device may choose and
# return another size if it doesn't support the suggested one.
size_x, size_y = video.set_format(640, 480)
# Create a buffer to store image data in. This must be done before
# calling 'start' if v4l2capture is compiled with libv4l2. Otherwise
# raises IOError.
video.create_buffers(2)
# Send the buffer to the device. Some devices require this to be done
# before calling 'start'.
video.queue_all_buffers()
# Start the device. This lights the LED if it's a camera that has one.
video.start()
gp = grip.GripPipeline()
print "starting"
while True:
# Wait for the device to fill the buffer.
select.select((video,), (), ())
# The rest is easy :-)
image_data = video.read_and_queue()
image = Image.fromstring("RGB", (size_x, size_y), image_data, "raw", "BGR")
im_array = numpy.array(image)
cv2.imshow('image',gp.process(im_array))
ret, jpeg = cv2.imencode('.jpg', im_array)
jpeg_bytes = jpeg.tobytes()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.close()
image.save("image.jpg")
print "Saved image.jpg (Size: " + str(size_x) + " x " + str(size_y) + ")"
|
{
"content_hash": "d48606547baf7818370c62a6177b3b51",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 76,
"avg_line_length": 25.257142857142856,
"alnum_prop": 0.6838235294117647,
"repo_name": "frc1769/vision2017",
"id": "83ee12b38facbdc0dcf54b3df92a7f4e1379a7f9",
"size": "2206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/capture_picture.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18657"
},
{
"name": "Shell",
"bytes": "626"
}
],
"symlink_target": ""
}
|
import rclpy
from dynamic_stack_decider.abstract_action_element import AbstractActionElement
from bitbots_msgs.action import Dynup
from action_msgs.msg import GoalStatus
class GetWalkready(AbstractActionElement):
def __init__(self, blackboard, dsd, parameters=None):
super().__init__(blackboard, dsd, parameters=None)
self.direction = 'walkready'
self.first_perform = True
def perform(self, reevaluate=False):
# deactivate falling since it will be wrongly detected
self.do_not_reevaluate()
if self.first_perform:
# get the animation that should be played
# defined by implementations of this abstract class
# try to start animation
success = self.start_animation()
# if we fail, we need to abort this action
if not success:
self.blackboard.node.get_logger().error("Could not start animation. Will abort play animation action!")
return self.pop()
self.first_perform = False
return
if self.animation_finished():
# we are finished playing this animation
return self.pop()
def start_animation(self):
"""
This will NOT wait by itself. You have to check
animation_finished()
by yourself.
:return:
"""
first_try = self.blackboard.dynup_action_client.wait_for_server(timeout_sec=1.0)
if not first_try:
server_running = False
while not server_running and rclpy.ok():
self.blackboard.node.get_logger().warn(
"Dynup Action Server not running! Dynup cannot work without dynup server!"
"Will now wait until server is accessible!",
throttle_duration_sec=10.0)
server_running = self.blackboard.dynup_action_client.wait_for_server(timeout_sec=1)
if server_running:
self.blackboard.node.get_logger().warn("Dynup server now running, hcm will go on.")
else:
self.blackboard.node.get_logger().warn("Dynup server did not start.")
return False
goal = Dynup.Goal()
goal.direction = self.direction
self.dynup_action_current_goal = self.blackboard.dynup_action_client.send_goal_async(goal)
return True
def animation_finished(self):
return (self.dynup_action_current_goal.done() and self.dynup_action_current_goal.result().status == GoalStatus.STATUS_SUCCEEDED) \
or self.dynup_action_current_goal.cancelled()
|
{
"content_hash": "237289919f61d6a05791a402d2ea6218",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 138,
"avg_line_length": 43.193548387096776,
"alnum_prop": 0.6086631814787155,
"repo_name": "bit-bots/bitbots_behaviour",
"id": "38989d8512c4335610950199c31b1cb1a74cd22c",
"size": "2678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitbots_body_behavior/bitbots_body_behavior/actions/get_walkready.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "12619"
},
{
"name": "Python",
"bytes": "92540"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Makey.cover_pic'
db.add_column(u'catalog_makey', 'cover_pic',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Image'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Makey.cover_pic'
db.delete_column(u'catalog_makey', 'cover_pic_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecfistoreitem': {
'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecomment': {
'Meta': {'unique_together': "(('user', 'comment'),)", 'object_name': 'LikeComment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Comment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likenote': {
'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproduct': {
'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductdescription': {
'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproducttutorial': {
'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeshop': {
'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likevideo': {
'Meta': {'unique_together': "(('user', 'video'),)", 'object_name': 'LikeVideo'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Video']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'cover_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_parts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'catalog.makeyimage': {
'Meta': {'object_name': 'MakeyImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey_id': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.newproduct': {
'Meta': {'object_name': 'NewProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.newuser': {
'Meta': {'object_name': 'NewUser'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'product_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeProduct']", 'to': u"orm['auth.User']"}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shop_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeShop']", 'to': u"orm['auth.User']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.userflags': {
'Meta': {'object_name': 'UserFlags'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userinteraction': {
'Meta': {'object_name': 'UserInteraction'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'event_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'catalog.video': {
'Meta': {'object_name': 'Video'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.votemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteproductreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteshopreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votetutorial': {
'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catalog']
|
{
"content_hash": "3cbed7c34ede17679b66283513c9c98a",
"timestamp": "",
"source": "github",
"line_count": 566,
"max_line_length": 224,
"avg_line_length": 79.07773851590106,
"alnum_prop": 0.5429420438804237,
"repo_name": "Makeystreet/makeystreet",
"id": "305a01b476d9745b14ab3c0da04a0da7c4d19d54",
"size": "44782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "woot/apps/catalog/migrations/0084_auto__add_field_makey_cover_pic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1893401"
},
{
"name": "HTML",
"bytes": "2253311"
},
{
"name": "JavaScript",
"bytes": "1698946"
},
{
"name": "Python",
"bytes": "9010343"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from pip.req import parse_requirements
import uuid
requirements = parse_requirements('requirements.txt', session=uuid.uuid1())
install_requires = [str(r.req) for r in requirements]
setup(
name='mediafire',
version='0.6.0',
author='Roman Yepishev',
author_email='rye@keypressure.com',
packages=['mediafire', 'mediafire.media'],
url='https://github.com/MediaFire/mediafire-python-open-sdk',
license='BSD',
description='Python MediaFire client library',
long_description=open('README.rst').read(),
install_requires=install_requires,
keywords="mediafire cloud files sdk storage api upload",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License'
]
)
|
{
"content_hash": "c21ef7f7fcfc47f4ed91e10547c5f047",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 35,
"alnum_prop": 0.6630541871921182,
"repo_name": "roman-yepishev/mediafire-python-open-sdk",
"id": "4a246152ef337f7f307cb48d83f8b1f50977e3f5",
"size": "1015",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "122947"
}
],
"symlink_target": ""
}
|
import os
import re
import pathFile
class searchFile:
def searchFiles(self, wordList, dirPath):
for dirpath, dirnames, filenames in os.walk(dirPath):
for file in filenames:
fileNameExt = os.path.splitext(file)
fileExt = fileNameExt[-1]
if fileExt and fileExt == '.as' or fileExt == '.mxml' :
self.searchInFile(dirpath, file, wordList)
def searchInFile(self, dirpath, file, wordList):
fileToOpen = dirpath + '\\' + file
f = open(fileToOpen , 'r')
text = f.read()
list = re.findall('getText\(\'.*?\'' , text)
for line in list:
line = line.split('\'')
wordList.append(line[1])
list2 = re.findall('getText\(".*?"' , text)
for line in list2:
# if re.search(':', line):
# line = line.split(':')
# for subLine in line:
# subLine = subLine.split('\"')
# wordList.append(subLine[1])
#
# elif re.search('\+', line):
# line = line.split('\+')
# for subLine in line:
# subLine = subLine.split('\"')
# if (len(subLine) >= 2 ):
# wordList.append(subLine[1])
#
# else:
line = line.split('"')
wordList.append(line[1])
f.close()
def makeTokenList(self):
wordList = []
self.searchFiles(wordList, pathFile.dirPathFundation)
self.searchFiles(wordList, pathFile.dirPathNavBarAlone)
self.searchFiles(wordList, pathFile.dirPathRaVis)
self.searchFiles(wordList, pathFile.dirPathPearltreesAssets)
self.searchFiles(wordList, pathFile.dirPathSettings)
return wordList
|
{
"content_hash": "1f927a972d8c0720928b12aa1c7e5186",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 72,
"avg_line_length": 31.41269841269841,
"alnum_prop": 0.48155634158665994,
"repo_name": "pearltrees/tree-shape-visualization",
"id": "d17b33c641927c30296988aad20a979675139350",
"size": "1979",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/languageSynchronizer/searchFiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "2392527"
},
{
"name": "CSS",
"bytes": "4827"
},
{
"name": "JavaScript",
"bytes": "198933"
}
],
"symlink_target": ""
}
|
""" Main test module for all utils. TODO: Add more tests here"""
from utils import is_playstyle_valid
def test_is_playstyle_valid():
""" Checks that the playstyle passed in is valid"""
result = is_playstyle_valid("awakened")
assert result is False
result = is_playstyle_valid("SUCcesSION")
assert result is True
result = is_playstyle_valid("SUCcesSION")
assert result is True
result = is_playstyle_valid("awakenING")
assert result is True
result = is_playstyle_valid("aakenING")
assert result is False
result = is_playstyle_valid(None)
assert result is False
|
{
"content_hash": "d66fa6a779533c20a0dce63a028a972d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 64,
"avg_line_length": 26.869565217391305,
"alnum_prop": 0.6925566343042071,
"repo_name": "pachev/gsbot",
"id": "05c36d778fb04435433ecb660fcd75f4bdabac36",
"size": "618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51097"
}
],
"symlink_target": ""
}
|
import datetime
from oslo_utils import timeutils
from tempest.api.identity import base
from tempest import config
from tempest.lib import decorators
CONF = config.CONF
class ApplicationCredentialsV3Test(base.BaseApplicationCredentialsV3Test):
"""Test application credentials"""
def _list_app_creds(self, name=None):
kwargs = dict(user_id=self.user_id)
if name:
kwargs.update(name=name)
return self.non_admin_app_creds_client.list_application_credentials(
**kwargs)['application_credentials']
@decorators.idempotent_id('8080c75c-eddc-4786-941a-c2da7039ae61')
def test_create_application_credential(self):
"""Test creating application credential"""
app_cred = self.create_application_credential()
# Check that the secret appears in the create response
secret = app_cred['secret']
# Check that the secret is not retrievable after initial create
app_cred = self.non_admin_app_creds_client.show_application_credential(
user_id=self.user_id,
application_credential_id=app_cred['id']
)['application_credential']
self.assertNotIn('secret', app_cred)
# Check that the application credential is functional
_, resp = self.non_admin_token.get_token(
app_cred_id=app_cred['id'],
app_cred_secret=secret,
auth_data=True
)
self.assertEqual(resp['project']['id'], self.project_id)
@decorators.idempotent_id('852daf0c-42b5-4239-8466-d193d0543ed3')
def test_create_application_credential_expires(self):
"""Test creating application credential with expire time"""
expires_at = timeutils.utcnow() + datetime.timedelta(hours=1)
app_cred = self.create_application_credential(expires_at=expires_at)
expires_str = expires_at.isoformat()
self.assertEqual(expires_str, app_cred['expires_at'])
@decorators.idempotent_id('529936eb-aa5d-463d-9f79-01c113d3b88f')
def test_create_application_credential_access_rules(self):
if not CONF.identity_feature_enabled.access_rules:
raise self.skipException("Application credential access rules are "
"not available in this environment")
access_rules = [
{
"path": "/v2.1/servers/*/ips",
"method": "GET",
"service": "compute"
}
]
app_cred = self.create_application_credential(
access_rules=access_rules)
access_rule_resp = app_cred['access_rules'][0]
access_rule_resp.pop('id')
self.assertDictEqual(access_rules[0], access_rule_resp)
@decorators.idempotent_id('ff0cd457-6224-46e7-b79e-0ada4964a8a6')
def test_list_application_credentials(self):
"""Test listing application credentials"""
self.create_application_credential()
self.create_application_credential()
app_creds = self._list_app_creds()
self.assertEqual(2, len(app_creds))
@decorators.idempotent_id('9bb5e5cc-5250-493a-8869-8b665f6aa5f6')
def test_query_application_credentials(self):
"""Test listing application credentials filtered by name"""
self.create_application_credential()
app_cred_two = self.create_application_credential()
app_cred_two_name = app_cred_two['name']
app_creds = self._list_app_creds(name=app_cred_two_name)
self.assertEqual(1, len(app_creds))
self.assertEqual(app_cred_two_name, app_creds[0]['name'])
|
{
"content_hash": "a1b85ed867dfd8014f109be403c69964",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 39.45054945054945,
"alnum_prop": 0.6523676880222842,
"repo_name": "openstack/tempest",
"id": "06734aa63896b4750642e8c6ca6c932125cf9d6b",
"size": "4223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/identity/v3/test_application_credentials.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5364077"
},
{
"name": "Shell",
"bytes": "8684"
}
],
"symlink_target": ""
}
|
import os
import sys
import numpy as np
sys.path.append(os.getcwd())
def noise_sampler(bs):
return np.random.normal(0.0, 1.0, [bs, 25])
if __name__ == '__main__':
from a_nice_mc.objectives.bayes_logistic_regression.german import German
from a_nice_mc.models.discriminator import MLPDiscriminator
from a_nice_mc.models.generator import create_nice_network
from a_nice_mc.train.wgan_nll import Trainer
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
energy_fn = German(batch_size=32)
discriminator = MLPDiscriminator([800, 800, 800])
generator = create_nice_network(
25, 50,
[
([400], 'v1', False),
([400, 400], 'x1', True),
([400], 'v2', False),
]
)
trainer = Trainer(generator, energy_fn, discriminator, noise_sampler, b=16, m=2)
trainer.train(bootstrap_steps=5000, bootstrap_burn_in=1000, bootstrap_discard_ratio=0.5)
|
{
"content_hash": "e3ff0564845a5b34f18fe0c9cad70148",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 92,
"avg_line_length": 29,
"alnum_prop": 0.6390086206896551,
"repo_name": "ermongroup/a-nice-mc",
"id": "7c85b0ffa551313478dddaea42de42528b67645e",
"size": "928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/nice_german.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "91929"
},
{
"name": "Python",
"bytes": "49991"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from .models import Book, Author, BookInstance, Genre
def index(request):
"""
View function for home page of site
"""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
# Available Books (status = 'a')
num_instances_available = BookInstance.objects.filter(status__exact='a').count()
num_authors = Author.objects.count() # The 'all()' is implied by default
num_genres = Genre.objects.count()
num_the = BookInstance.objects.filter(book__title__contains='The').count()
# Number of visits to this view, as counted in the sessions variable.
# if num_visits is not iniialized, set it to zero
num_visits = request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits+1
# Render the HTML template index.html with the data in the context varable
return render(
request,
'index.html',
context = {
'num_books':num_books,
'num_instances':num_instances,
'num_instances_available':num_instances_available,
'num_authors':num_authors,
'num_genres':num_genres,
'num_the':num_the,
'num_visits':num_visits,
},
)
# Import a generic list View that we can use to list our Books
from django.views import generic
class BookListView(generic.ListView):
model = Book
# Because the model is called Book, ListView will look for the template
# file named book_list
paginate_by = 10 # Display 10 books at a time if more than 10
class BookDetailView(generic.DetailView):
model = Book
# This function will look for a template called book_detail
class AuthorListView(generic.ListView):
model = Author
paginate_by = 10
class AuthorDetailView(generic.DetailView):
model = Author
from django.contrib.auth.mixins import LoginRequiredMixin
class LoanedBooksByUserListView(LoginRequiredMixin,generic.ListView):
"""
Generic class-based view listing books on loan to current user.
"""
model = BookInstance
template_name = 'catalog/bookinstance_list_borrowed_user.html'
paginate_by = 10
def get_queryset(self):
# Restrict our query to just the Bookinstance objects for current user
# We re-implemented get_queryset.
return BookInstance.objects.filter(borrower=self.request.user).filter(status__exact='o').order_by('due_back')
from django.contrib.auth.mixins import PermissionRequiredMixin
class LoanedBooksByUserListViewLibrarian(PermissionRequiredMixin, generic.ListView):
"""
Generic class-based view listing all books on loan for librarians
"""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_librarian.html'
paginate_by = 10
def get_queryset(self):
# Create a queryset of all the loaned books
return BookInstance.objects.filter(status__exact='o').order_by('due_back')
from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
import datetime
from .forms import RenewBookForm
@permission_required('catalog.can_mark_returned')
def renew_book_librarian(request, pk):
"""
View function for renewing a specific BookInstance by librarian
"""
book_inst=get_object_or_404(BookInstance, pk = pk)
# If this is a POST request then process the Form data
if request.method == 'POST':
# Create a form instance and populate it with data from the request (binding):
form = RenewBookForm(request.POST)
# Check if the form is valid:
if form.is_valid():
# process the data in form.cleaned_data as required (here we just write it to the model due_back field)
book_inst.due_back = form.cleaned_data['renewal_date']
book_inst.save()
# redirect to a new URL:
return HttpResponseRedirect(reverse('all-borrowed') )
# If this is a GET (or any other method) create the default form.
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks=3)
form = RenewBookForm(initial={'renewal_date': proposed_renewal_date,})
return render(request, 'catalog/book_renew_librarian.html', {'form': form, 'bookinst':book_inst})
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
class AuthorCreate(CreateView):
model = Author
fields = '__all__'
initial={'date_of_death':'12/10/2016',}
class AuthorUpdate(UpdateView):
model = Author
fields = ['first_name','last_name','date_of_birth','date_of_death']
class AuthorDelete(DeleteView):
model = Author
success_url = reverse_lazy('authors')
class BookCreate(CreateView):
model = Book
fields = '__all__'
class BookUpdate(UpdateView):
model = Book
fields = '__all__'
class BookDelete(DeleteView):
model = Book
success_url = reverse_lazy('books')
|
{
"content_hash": "6ab3b443f8ebb169eea05c564e8a4029",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 117,
"avg_line_length": 33.753246753246756,
"alnum_prop": 0.6902654867256637,
"repo_name": "CaptainNoble/django_local_library",
"id": "e9bbf7fdcbb9db3d11aeb8a9928b4340365d3730",
"size": "5198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalog/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "71"
},
{
"name": "HTML",
"bytes": "11419"
},
{
"name": "Python",
"bytes": "44425"
}
],
"symlink_target": ""
}
|
import socket
import pygame
from pygame.locals import *
class RCTest(object):
"""
Testing communication from Controller, to RiPi, to Arduino, to Motor
Removing the need when controlling by keyboard to have both Wifi and
Serial connected to Controller
Only required connection is wifi from Controller to RiPi, making the car
completely remote controlled
"""
def __init__(self):
pygame.init()
self.send_inst = True
self.server_socket = socket.socket()
self.server_socket.bind(('192.168.43.178', 8001))
self.server_socket.listen(0)
self.connection, self.client_address = self.server_socket.accept()
self.steer()
def steer(self):
"""
Decision maker based on button press from Controller
No switch statement in python so elif is used
"""
try:
print "Connection from: ", self.client_address
while self.send_inst:
for event in pygame.event.get():
if event.type == KEYDOWN:
# Returns state of keys in keyboard of controller
key_input = pygame.key.get_pressed()
# complex orders
if key_input[pygame.K_UP]:
print("Forward")
self.server_socket.send(str(1))
elif key_input[pygame.K_DOWN]:
print("Reverse")
self.server_socket.send(str(2))
elif key_input[pygame.K_RIGHT]:
print("Right")
self.server_socket.send(str(3))
elif key_input[pygame.K_LEFT]:
print("Left")
self.server_socket.send(str(4))
# exit
elif key_input[pygame.K_x] or key_input[pygame.K_q]:
print 'Exit'
self.send_inst = False
self.server_socket.send(str(0))
break
elif event.type == pygame.KEYUP:
self.server_socket.send(str(5))
finally:
self.connection.close()
self.server_socket.close()
if __name__ == '__main__':
RCTest()
|
{
"content_hash": "8f06d7dc62fa96b575aed609f290f8b5",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 76,
"avg_line_length": 36.35820895522388,
"alnum_prop": 0.4852216748768473,
"repo_name": "bmcivor/lego_autono",
"id": "abe267b3a20e40b863550e63c1f22eb6afcbee4b",
"size": "2436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/final_socket_serial_drive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "8405"
},
{
"name": "C",
"bytes": "1839"
},
{
"name": "C++",
"bytes": "98069"
},
{
"name": "Python",
"bytes": "34401"
},
{
"name": "Shell",
"bytes": "3170"
}
],
"symlink_target": ""
}
|
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1DeploymentCause(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'type': 'str',
'image_trigger': 'V1DeploymentCauseImageTrigger'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'type': 'type',
'image_trigger': 'imageTrigger'
}
def __init__(self, type=None, image_trigger=None):
"""
V1DeploymentCause - a model defined in Swagger
"""
self._type = type
self._image_trigger = image_trigger
@property
def type(self):
"""
Gets the type of this V1DeploymentCause.
Type of the trigger that resulted in the creation of a new deployment
:return: The type of this V1DeploymentCause.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1DeploymentCause.
Type of the trigger that resulted in the creation of a new deployment
:param type: The type of this V1DeploymentCause.
:type: str
"""
self._type = type
@property
def image_trigger(self):
"""
Gets the image_trigger of this V1DeploymentCause.
ImageTrigger contains the image trigger details, if this trigger was fired based on an image change
:return: The image_trigger of this V1DeploymentCause.
:rtype: V1DeploymentCauseImageTrigger
"""
return self._image_trigger
@image_trigger.setter
def image_trigger(self, image_trigger):
"""
Sets the image_trigger of this V1DeploymentCause.
ImageTrigger contains the image trigger details, if this trigger was fired based on an image change
:param image_trigger: The image_trigger of this V1DeploymentCause.
:type: V1DeploymentCauseImageTrigger
"""
self._image_trigger = image_trigger
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1DeploymentCause.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "eab6a79deca6e491bec483ddd27757b6",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 107,
"avg_line_length": 27.772727272727273,
"alnum_prop": 0.5826513911620295,
"repo_name": "detiber/lib_openshift",
"id": "acf35f17e5b47eb6c18511b30f63cd60be5b5d67",
"size": "4294",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib_openshift/models/v1_deployment_cause.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "61305"
},
{
"name": "Python",
"bytes": "6202851"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
}
|
'''
@summary: Handles all notifications for api, including alerts from metron.
Note: Ensure username and password needs hidden / parsed from encrypted file.
@author: devopsec
'''
import sys
import traceback
from api import app, time_funcs
from api.parse_json import *
from api.decorators import async
from api.sql.models import user_data
from api.company.endpoint import companyUtils
from flask import jsonify, request, json, render_template
from flask_mail import Mail, Message
from flask_restful import Resource, reqparse
from starbase import Connection
import requests, base64, subprocess, os
# email server config #
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = 'threatdetectionservice@gmail.com'
app.config['MAIL_PASSWORD'] = 'flyball2011'
app.config['MAIL_ASCII_ATTACHMENTS'] = False
app.config['MAIL_DEBUG'] = True
app.config['MAIL_DEFAULT_SENDER'] = 'threatdetectionservice@gmail.com'
app.config['MAIL_DEFAULT_ADMIN'] = ['threatdetectionservice@gmail.com']
# create mail object #
mail = Mail(app)
# define hbase vars #
metronHBaseRestURL = "http://10.10.10.154"
metronHbaseRestPort = 9082
metronHBaseTable = "enrichment"
metronHBaseCF = "assets"
apiBaseRestUrl = "http://0.0.0.0:7777"
apiCompanyEndpoint = "/api/company/"
assetQueryURL = metronHBaseRestURL + ":" + str(metronHbaseRestPort) + "/" + metronHBaseTable
# abstraction of email and sms functions
@async
def send_async_email(app, msg):
''' sends mail asynchronously '''
with app.app_context():
mail.send(msg)
def send_email(recipients, text_body, html_body=None, subject="Threat Notification Service",
sender="threatdetectionservice@gmail.com", threat_data=None):
''' recipients and text_body params are required '''
# add threat_data if provided
if not threat_data == None:
text_body += threat_data
msg = Message(recipients=recipients, subject=subject, html=html_body, sender=sender)
if not html_body == None:
pass
# TODO finish html template for emails
# msg.html = render_template('email.html', threat_level=threat_level, incident_type=incident_type,
# incident_source=incident_source, incident_time=incident_time,
# attack_method=attack_method, source=source, destination=destination)
else:
pass
# msg.html = html_body
send_async_email(app, msg)
@async
def send_sms(to, msg, frm="12485042987", threat_data=None):
''' to, frm, and msg params are required '''
CWD = os.getcwd()
script_path = os.path.join(CWD, 'notification', 'flowroute', 'send_sms.py')
# DEBUG
print("path " + script_path)
# TODO make path cross-platform compatible
python2_env = {"PYTHONPATH": "/usr/bin/python2.7"}
if not threat_data == None:
msg += threat_data
# convert to arg into string representation for cmd line
# toStrList = []
# for number in to:
# toStrList.append(str(number))
cmd = "python2.7 {0} -t {1} -f {2} -m '{3}'".format(script_path, to, frm, msg)
print(cmd)
subprocess.run(cmd, env=python2_env, shell=True)
#out_str = subprocess.check_output(cmd, shell=True)
# DEBUG
# print(cmd)
# print(out_str)
class manageNotifications(Resource):
''' Handles processing of notifications with following functions:
Post threat notification into an email or sms message and alert users
Process threat-intel and conditionally alert user as threat notification '''
threat_data = None # class variable
contact_info = [] # class variable
def post(self):
''' process a notification '''
MASS_ALERT_FLAG = False
URL = request.url
# process alert #
if URL.find("api/notifications/alert") > 0:
try:
parser = reqparse.RequestParser()
parser.add_argument('threat_intel', type=dict, location='json')
args = parser.parse_args()
if args['threat_intel']['_source']['is_alert'] == "true":
# gather necessary info from threat_intel
manageNotifications.threat_data = {
"index": args['threat_intel']['_index'],
"score": args['threat_intel']['_score'],
"threat_level": args['threat_intel']['_source']['threat.triage.level'],
"source": args['threat_intel']['_source']['source.type'],
"ip_src_addr": args['threat_intel']['_source']['ip_src_addr'],
"ip_dst_addr": args['threat_intel']['_source']['ip_dst_addr'],
"url": args['threat_intel']['_source']['url'],
"time": time_funcs.convert_epoch_ts(args['threat_intel']['_source']['timestamp'])
}
# TODO enrich threat-intel in metron with source company name
# TODO check threat_data to find where alert is from (what company)
company = "ALL"
company = "Flyball-Labs"
if company == "ALL": # for alerting all companies, in event of a data breach
MASS_ALERT_FLAG = True
response = companyUtils.get_all_poc_list()
else: # alert a single company
response = companyUtils.get_company_poc_list(company)
# gather contact info from company & get notification settings for each poc
if MASS_ALERT_FLAG == True:
if response['response'] == 200:
all_poc_list = response['all_company_poc']
for co in all_poc_list:
for poc in co['poc']:
user = user_data.query.filter_by(username=poc).first()
manageNotifications.contact_info.append({
"name": user.firstname,
"phone": user.phone_number,
"email": user.email,
"alert_type": user.notification['alert_type'],
"notification_type": user.notification['notification_type']
})
else: # could not get poc list
return jsonify(
response = 400,
message = "Could not obtain POC list"
)
else:
if response['response'] == 200:
poc_list = response['poc']
for poc in poc_list:
user = user_data.query.filter_by(username=poc).first()
manageNotifications.contact_info.append({
"name": user.firstname,
"phone": user.phone_number,
"email": user.email,
"alert_type": user.notification['alert_type'],
"notification_type": user.notification['notification_type']
})
else: # could not get poc list
return jsonify(
response = 400,
message = "Could not obtain POC list"
)
# iterate through contact info and send message if score >= user setting
for contact in manageNotifications.contact_info:
if manageNotifications.threat_data['score'] >= contact['alert_type']:
if contact['notification_type'] == "email":
send_email(recipients=[contact['email']],
text_body="Hello " + contact['name'] + ",\n\nThere was a threat detected on your network at " +
manageNotifications.threat_data['time'] + "\nA summary of the details are provided below.\n" +
"For more information, login to your account, and view the ThreatDetection Service Dashboard.\n",
threat_data=json_encode(manageNotifications.threat_data))
elif contact['notification_type'] == "sms":
send_sms(to=contact['phone'],
msg="Hello " + contact['name'] + ",\n\nThere was a threat detected on your network at " +
manageNotifications.threat_data['time'] + "\nA summary of the details are provided below.\n" +
"For more information, login to your account, and view the ThreatDetection Service Dashboard.\n",
threat_data=json_encode(manageNotifications.threat_data))
return jsonify(
response = 200,
message = "Alert parsing successful"
)
except Exception as e:
# DEBUG only (security risk : TMI)
print("Unexpected error:", sys.exc_info()[0]) # sys info
print(type(e)) # the exception instance
print(e.args) # arguments stored in .args
print(e) # the actual error
traceback.print_tb(e.__traceback__) # print stack trace
# send email #
if URL.find("api/notifications/email") > 0:
try:
parser = reqparse.RequestParser()
parser.add_argument('recipients', type=list, location='json')
parser.add_argument('subject', type=str, location='json')
parser.add_argument('text_body', type=str, location='json')
parser.add_argument('html_body', type=str, location='json')
parser.add_argument('sender', type=str, location='json')
args = parser.parse_args()
# DEBUG
# return jsonify(
# subject = args['subject'],
# recipients = args['recipients'],
# text_body = args['text_body'],
# html_body = args['html_body'],
# sender = args['sender']
# )
send_email(args['subject'], args['recipients'], args['text_body'],
args['html_body'], args['sender'])
return jsonify(
response = 200,
message = 'Email delivery success'
)
except Exception as e:
return {'error' : str(e)} # DEBUG only (security risk : TMI)
#send sms
elif URL.find("api/notifications/sms") > 0:
try:
parser = reqparse.RequestParser()
parser.add_argument('to', type=int, location='json')
parser.add_argument('frm', type=int, location='json')
parser.add_argument('msg', type=str, location='json')
args = parser.parse_args()
# DEBUG
# return jsonify(
# to = json_decode(args['to']),
# frm = args['frm'],
# msg = args['msg']
# )
send_sms(to=args['to'], frm=args['frm'], msg=args['msg'])
return jsonify(
response = 200,
message = 'SMS delivery success'
)
except Exception as e:
return {'error' : str(e)} # DEBUG only (security risk : TMI)
def get(self, threat_id):
''' get threat-intel data for threat notification '''
assetFullQueryURL = assetQueryURL + "/" + threat_id
print(assetFullQueryURL)
try:
response = requests.get(assetFullQueryURL, headers={"Accept" : "application/json"})
jData = response.json()
except:
return "Server Down"
decodedList = []
for row in jData['Row']:
# Decode the key into ascii #
#rowKey = base64.b64decode(row['key']).decode('ascii')
dColumn = {}
for cell in row['Cell']:
columnname = base64.b64decode(cell['column']).decode('ascii')
value = base64.b64decode(cell['$']).decode('ascii')
dColumn[columnname] = value
decodedList.append (dColumn)
return jsonify(threat_intel=decodedList)
|
{
"content_hash": "86ff3c679d5fb06d2ae2beb434d15a68",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 150,
"avg_line_length": 45.68771929824561,
"alnum_prop": 0.5206205360571384,
"repo_name": "devopsec/threatdetectionservice",
"id": "cbf4af6021bbb775bee88c2d4d0175cfd090348d",
"size": "13021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/notification/endpoint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "CSS",
"bytes": "60463"
},
{
"name": "HTML",
"bytes": "73698"
},
{
"name": "JavaScript",
"bytes": "6500"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "166187"
},
{
"name": "Shell",
"bytes": "24573"
}
],
"symlink_target": ""
}
|
import sys
import csv
import re
# Define input template filename to resulting output filename mapping
files = { 'template_sysctl_static' : 'sysctl_static_',
'template_sysctl_runtime' : 'sysctl_runtime_',
'template_sysctl' : 'sysctl_' }
def output_checkfile(serviceinfo):
# get the items out of the list
sysctl_var, sysctl_val = serviceinfo
# convert variable name to a format suitable for 'id' tags
sysctl_var_id = re.sub('[-\.]', '_', sysctl_var)
# open the template files and perform the conversions
for sysctlfile in files.keys():
with open(sysctlfile, 'r') as templatefile:
filestring = templatefile.read()
filestring = filestring.replace("SYSCTLID", sysctl_var_id)
filestring = filestring.replace("SYSCTLVAR", sysctl_var)
filestring = filestring.replace("SYSCTLVAL", sysctl_val)
# write the check
with open("./output/" + files[sysctlfile] + sysctl_var_id +
".xml", 'w+') as outputfile:
outputfile.write(filestring)
outputfile.close()
def main():
if len(sys.argv) < 2:
print ("Provide a CSV file containing lines of the format: " +
"sysctlvariable,sysctlvalue")
sys.exit(1)
with open(sys.argv[1], 'r') as csv_file:
# put the CSV line's items into a list
sysctl_lines = csv.reader(csv_file)
for line in sysctl_lines:
output_checkfile(line)
sys.exit(0)
if __name__ == "__main__":
main()
|
{
"content_hash": "0b94e06640ebe9f757652a608f997b10",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 36.18604651162791,
"alnum_prop": 0.6041131105398457,
"repo_name": "rprevette/clip",
"id": "fad51423d5ee5f61e86dcdd4ec31958243137ad2",
"size": "1575",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packages/scap-security-guide/scap-security-guide-0.1.25/shared/oval/templates/create_sysctl_checks.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "209"
},
{
"name": "C",
"bytes": "13809"
},
{
"name": "Groff",
"bytes": "294054"
},
{
"name": "HTML",
"bytes": "1333"
},
{
"name": "Makefile",
"bytes": "88510"
},
{
"name": "Python",
"bytes": "95048"
},
{
"name": "Shell",
"bytes": "17539"
}
],
"symlink_target": ""
}
|
"""Test HomematicIP Cloud setup process."""
from asynctest import CoroutineMock, Mock, patch
from homeassistant.components.homematicip_cloud.const import (
CONF_ACCESSPOINT,
CONF_AUTHTOKEN,
DOMAIN as HMIPC_DOMAIN,
HMIPC_AUTHTOKEN,
HMIPC_HAPID,
HMIPC_NAME,
)
from homeassistant.components.homematicip_cloud.hap import HomematicipHAP
from homeassistant.config_entries import ENTRY_STATE_LOADED, ENTRY_STATE_NOT_LOADED
from homeassistant.const import CONF_NAME
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
async def test_config_with_accesspoint_passed_to_config_entry(hass):
"""Test that config for a accesspoint are loaded via config entry."""
entry_config = {
CONF_ACCESSPOINT: "ABC123",
CONF_AUTHTOKEN: "123",
CONF_NAME: "name",
}
# no config_entry exists
assert len(hass.config_entries.async_entries(HMIPC_DOMAIN)) == 0
# no acccesspoint exists
assert not hass.data.get(HMIPC_DOMAIN)
assert (
await async_setup_component(hass, HMIPC_DOMAIN, {HMIPC_DOMAIN: entry_config})
is True
)
# config_entry created for access point
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 1
assert config_entries[0].data == {
"authtoken": "123",
"hapid": "ABC123",
"name": "name",
}
# defined access_point created for config_entry
assert isinstance(hass.data[HMIPC_DOMAIN]["ABC123"], HomematicipHAP)
async def test_config_already_registered_not_passed_to_config_entry(hass):
"""Test that an already registered accesspoint does not get imported."""
mock_config = {HMIPC_AUTHTOKEN: "123", HMIPC_HAPID: "ABC123", HMIPC_NAME: "name"}
MockConfigEntry(domain=HMIPC_DOMAIN, data=mock_config).add_to_hass(hass)
# one config_entry exists
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 1
assert config_entries[0].data == {
"authtoken": "123",
"hapid": "ABC123",
"name": "name",
}
# config_enty has no unique_id
assert not config_entries[0].unique_id
entry_config = {
CONF_ACCESSPOINT: "ABC123",
CONF_AUTHTOKEN: "123",
CONF_NAME: "name",
}
assert (
await async_setup_component(hass, HMIPC_DOMAIN, {HMIPC_DOMAIN: entry_config})
is True
)
# no new config_entry created / still one config_entry
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 1
assert config_entries[0].data == {
"authtoken": "123",
"hapid": "ABC123",
"name": "name",
}
# config_enty updated with unique_id
assert config_entries[0].unique_id == "ABC123"
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
mock_config = {HMIPC_AUTHTOKEN: "123", HMIPC_HAPID: "ABC123", HMIPC_NAME: "name"}
MockConfigEntry(domain=HMIPC_DOMAIN, data=mock_config).add_to_hass(hass)
with patch("homeassistant.components.homematicip_cloud.HomematicipHAP") as mock_hap:
instance = mock_hap.return_value
instance.async_setup = CoroutineMock(return_value=True)
instance.home.id = "1"
instance.home.modelType = "mock-type"
instance.home.name = "mock-name"
instance.home.currentAPVersion = "mock-ap-version"
instance.async_reset = CoroutineMock(return_value=True)
assert await async_setup_component(hass, HMIPC_DOMAIN, {}) is True
assert mock_hap.return_value.mock_calls[0][0] == "async_setup"
assert hass.data[HMIPC_DOMAIN]["ABC123"]
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 1
assert config_entries[0].state == ENTRY_STATE_LOADED
await hass.config_entries.async_unload(config_entries[0].entry_id)
assert config_entries[0].state == ENTRY_STATE_NOT_LOADED
assert mock_hap.return_value.mock_calls[3][0] == "async_reset"
# entry is unloaded
assert hass.data[HMIPC_DOMAIN] == {}
async def test_hmip_dump_hap_config_services(hass, mock_hap_with_service):
"""Test dump configuration services."""
with patch("pathlib.Path.write_text", return_value=Mock()) as write_mock:
await hass.services.async_call(
"homematicip_cloud", "dump_hap_config", {"anonymize": True}, blocking=True
)
home = mock_hap_with_service.home
assert home.mock_calls[-1][0] == "download_configuration"
assert home.mock_calls
assert write_mock.mock_calls
|
{
"content_hash": "6c02443533eb148c99fff092476f0feb",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 88,
"avg_line_length": 35.883720930232556,
"alnum_prop": 0.6731475480665371,
"repo_name": "Teagan42/home-assistant",
"id": "ee63dba3c97b89cf7c43add542acee7eecf7958b",
"size": "4629",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/homematicip_cloud/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Adding indexes to large tables. These indexes should be created concurrently,
# unfortunately we can't run migrations outside of a transaction until Django
# 1.10. So until then these should be run manually.
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
dependencies = [
('sentry', '0009_auto_20191101_1608'),
]
operations = [
migrations.AlterUniqueTogether(
name='pagerdutyservice',
unique_together=set([]),
),
]
|
{
"content_hash": "34982a953d94a2da1c353beffe65b481",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 88,
"avg_line_length": 40.16129032258065,
"alnum_prop": 0.6907630522088354,
"repo_name": "beeftornado/sentry",
"id": "a678cc905e5e33fde68264ba854031ad19695506",
"size": "1269",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/migrations/0010_auto_20191104_1641.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
|
{
"content_hash": "c6014f46ff78cea65ad6a65f585ecf1e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 67,
"avg_line_length": 26.807692307692307,
"alnum_prop": 0.6183644189383071,
"repo_name": "happyx2/asspy",
"id": "03baeaf5583178ff12686af7b72f430d7a8070ff",
"size": "786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asspy/venv/Scripts/f2py.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "C",
"bytes": "226973"
},
{
"name": "C++",
"bytes": "225122"
},
{
"name": "CSS",
"bytes": "17"
},
{
"name": "HTML",
"bytes": "2802"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "101812"
}
],
"symlink_target": ""
}
|
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
from core import unpackerjs
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[vidbux.py] url="+page_url)
if ".html" not in page_url:
logger.info("[vidbux.py] URL incompleta")
data = scrapertools.cache_page(page_url)
patron = '<input name="fname" type="hidden" value="([^"]+)">'
matches = re.compile(patron,re.DOTALL).findall(data)
page_url = page_url+"/"+matches[0]+".html"
# Lo pide una vez
scrapertools.cache_page( page_url , headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14']] )
# Lo pide una segunda vez, como si hubieras hecho click en el banner
patron = 'http\:\/\/www\.vidbux\.com/([^\/]+)/(.*?)\.html'
matches = re.compile(patron,re.DOTALL).findall(page_url)
logger.info("[vidbux.py] fragmentos de la URL")
scrapertools.printMatches(matches)
codigo = ""
nombre = ""
if len(matches)>0:
codigo = matches[0][0]
nombre = matches[0][1]
post = "op=download1&usr_login=&id="+codigo+"&fname="+nombre+"&referer=&method_free=Free+Stream"
data = scrapertools.cache_page( page_url , post=post, headers=[['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'],['Referer',page_url]] )
# Extrae el trozo cifrado
patron = '<div id="embedcontmvshre"[^>]+>(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
data = ""
if len(matches)>0:
data = matches[0]
logger.info("[vidbux.py] bloque packed="+data)
else:
logger.info("[vidbux.py] no encuentra bloque packed="+data)
return ""
# Lo descifra
descifrado = unpackerjs.unpackjs(data)
# Extrae la URL del vídeo
logger.info("descifrado="+descifrado)
# Extrae la URL
patron = '<param name="src"value="([^"]+)"/>'
matches = re.compile(patron,re.DOTALL).findall(descifrado)
scrapertools.printMatches(matches)
video_urls = []
if len(matches)>0:
video_urls.append( ["[vidbux]",matches[0]])
for video_url in video_urls:
logger.info("[vidbux.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos de este servidor en el texto pasado
def find_videos(text):
encontrados = set()
devuelve = []
# http://www.vidbux.com/3360qika02mo/whale.wars.s04e10.hdtv.xvid-momentum.avi.html
patronvideos = '(http://www.vidbux.com/[A-Z0-9a-z]+/.*?html)'
logger.info("[vidbux.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(text)
for match in matches:
titulo = "[vidbux]"
url = match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'vidbux' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://www.vidbux.com/qya0qmf3k502
patronvideos = 'http://www.vidbux.com/([\w]+)'
logger.info("[vidbux.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(text)
for match in matches:
titulo = "[vidbux]"
url = "http://www.vidbux.com/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'vidbux' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
{
"content_hash": "a6e6e3cea45d377830550a0b7cd960c3",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 200,
"avg_line_length": 35.72380952380952,
"alnum_prop": 0.6070381231671554,
"repo_name": "jose36/plugin.video.Jmdl1",
"id": "573b69f2534099dfa35bf96d1d057c53921d36ea",
"size": "4015",
"binary": false,
"copies": "35",
"ref": "refs/heads/master",
"path": "servers/vidbux.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "602821"
}
],
"symlink_target": ""
}
|
"""Add annotation queries
Revision ID: 1d808cef0787
Revises: 1c15bafd311a
Create Date: 2015-08-17 14:31:52.751784
"""
# revision identifiers, used by Alembic.
revision = '1d808cef0787'
down_revision = '1c15bafd311a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('query',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=200), nullable=True),
sa.Column('expression', sa.Text(), nullable=True),
sa.Column('require_active', sa.Boolean(), nullable=True),
sa.Column('require_coverage_profile', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
op.create_table('annotation_query',
sa.Column('annotation_id', sa.Integer(), nullable=False),
sa.Column('query_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['annotation_id'], ['annotation.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['query_id'], ['query.id'], ondelete='CASCADE')
)
op.drop_table('sample_frequency')
op.drop_column('annotation', 'global_frequency')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('annotation', sa.Column('global_frequency', sa.BOOLEAN(), nullable=True))
op.create_table('sample_frequency',
sa.Column('annotation_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('sample_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['annotation_id'], [u'annotation.id'], name=u'sample_frequency_annotation_id_fkey', ondelete=u'CASCADE'),
sa.ForeignKeyConstraint(['sample_id'], [u'sample.id'], name=u'sample_frequency_sample_id_fkey', ondelete=u'CASCADE')
)
op.drop_table('query')
op.drop_table('annotation_query')
### end Alembic commands ###
|
{
"content_hash": "ebec1a0d78d44285ba841468e44f3704",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 133,
"avg_line_length": 38.411764705882355,
"alnum_prop": 0.6860643185298622,
"repo_name": "varda/varda",
"id": "e79d8664eb0e27257fb79e51c2299013e9b02d8a",
"size": "1959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/1d808cef0787_add_annotation_queries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "353449"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import sys
if __name__ == "__main__":
os.chdir("builds")
version = sys.argv[1]
with open("vale_{0}_checksums.txt".format(version), "w+") as f:
for asset in [e for e in os.listdir(".") if not e.endswith(".txt")]:
if version in asset:
checksum = subprocess.check_output([
"shasum",
"-a",
"256",
asset
])
f.write("{0}".format(checksum))
|
{
"content_hash": "02a13c403f0e96fc7488f7e72149b5d1",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 76,
"avg_line_length": 29.11111111111111,
"alnum_prop": 0.45229007633587787,
"repo_name": "ValeLint/vale",
"id": "e8d1c04993a02f4235d847128a4a0ac6032bab67",
"size": "524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixtures/scripts/shasum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "286"
},
{
"name": "C++",
"bytes": "5169"
},
{
"name": "CSS",
"bytes": "3777"
},
{
"name": "Gherkin",
"bytes": "46887"
},
{
"name": "Go",
"bytes": "83057"
},
{
"name": "HTML",
"bytes": "5160"
},
{
"name": "Haskell",
"bytes": "680"
},
{
"name": "Java",
"bytes": "343"
},
{
"name": "Lua",
"bytes": "560"
},
{
"name": "Makefile",
"bytes": "1829"
},
{
"name": "PHP",
"bytes": "241"
},
{
"name": "Python",
"bytes": "677"
},
{
"name": "R",
"bytes": "419"
},
{
"name": "Ruby",
"bytes": "2484"
},
{
"name": "Rust",
"bytes": "832"
},
{
"name": "Scala",
"bytes": "333"
},
{
"name": "Shell",
"bytes": "191"
},
{
"name": "Swift",
"bytes": "374"
},
{
"name": "TeX",
"bytes": "1336"
}
],
"symlink_target": ""
}
|
poem = '''a narrow fellow in the grass
occasionally rides;
you may have met him, did you not,
his notice sudden is.
the grass divides as with a comb,
a spotted shaft is seen;
and then it closes at your feet
and opens further on.
he likes a boggy acre,
a floor too cool for corn.
yet when a child, and barefoot,
i more than once, at morn,
have passed, i thought, a whip-lash
unbraiding in the sun,
when, stooping to secure it,
it wrinkled, and was gone.
several of nature's people
i know, and they know me;
i feel for them a transport
of cordiality;
but never met this fellow,
attended or alone,
without a tighter breathing,
and zero at the bone.'''
# print(poem)
freq = {}
for letter in poem:
if letter in freq:
freq[letter] += 1
else:
freq[letter] = 1
inv_freq = {v:k for k, v in freq.items()}
# say([56,38,44,56,29])
def say(numbers_list):
answer = ""
for number in numbers_list:
answer += inv_freq[number]
return answer
# print(say([56,38,44,56,29]))
letter_set = set(freq.keys())
with open("linuxwords", "r") as fp:
longest_word = ""
for line in fp:
line_set = set(line)
if line_set <= letter_set:
if len(line) > len(longest_word):
longest_word = line
print(letter_set)
print(longest_word)
|
{
"content_hash": "441887b41bf78780fcec43a776fa44a1",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 45,
"avg_line_length": 20.492537313432837,
"alnum_prop": 0.6227239621267298,
"repo_name": "johnobrien/puzzles.bostonpython.com",
"id": "645712efdd708599e6c54772342496a05abe146a",
"size": "1373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_poetry/poetry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14840"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("elections", "0045_auto_20181001_1437")]
operations = [
# assume all elections that already exist are approved
migrations.RunSQL(
"""
UPDATE elections_election SET suggested_status='approved'
""",
reverse_sql="""
UPDATE elections_election SET suggested_status='suggested'
""",
)
]
|
{
"content_hash": "0129ea144c7dde13f9c9ed64966cf458",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 26.15,
"alnum_prop": 0.5984703632887189,
"repo_name": "DemocracyClub/EveryElection",
"id": "6e3f48a0403e64834451cb3bf66f4549d864a02c",
"size": "547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "every_election/apps/elections/migrations/0046_update_status.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "37294"
},
{
"name": "JavaScript",
"bytes": "3930"
},
{
"name": "Python",
"bytes": "548734"
},
{
"name": "SCSS",
"bytes": "3314"
}
],
"symlink_target": ""
}
|
import os, sys, glob
import argparse
import yaml
import multiprocessing as MP
import itertools as IT
import healpy as HP
import numpy as NP
import astropy.cosmology as cosmology
import astropy.constants as FCNST
import progressbar as PGB
import time, warnings
from astroutils import cosmotile
from astroutils import mathops as OPS
from astroutils import constants as CNST
from astroutils import catalog as SM
import astroutils
import ipdb as PDB
astroutils_path = astroutils.__path__[0]+'/'
if __name__ == '__main__':
## Parse input arguments
parser = argparse.ArgumentParser(description='Program to tile cosmological coeval cubes to lightcone healpix cube')
input_group = parser.add_argument_group('Input parameters', 'Input specifications')
input_group.add_argument('-p', '--parmsfile', dest='parmsfile', required=False, type=str, default=astroutils_path+'examples/cosmotile/cosmotile_parms.yaml', help='Config file for processing cosmological coeval cubes')
args = vars(parser.parse_args())
parmsfile = args['parmsfile']
with open(parmsfile, 'r') as pfile:
parms = yaml.safe_load(pfile)
indir = parms['dirstruct']['indir']
infile_prefix = parms['dirstruct']['infile_prefix']
if infile_prefix is None:
infile_prefix = ''
infile_suffix = parms['dirstruct']['infile_suffix']
if infile_suffix is None:
infile_suffix = ''
save = parms['dirstruct']['write']
outdir = parms['dirstruct']['outdir']
outfile_prefix = parms['dirstruct']['outfile_prefix']
if outfile_prefix is None:
outfile = outdir + 'light_cone_surfaces'
elif isinstance(outfile_prefix, str):
outfile = outdir + outfile_prefix + '_light_cone_surfaces'
else:
raise TypeError('Output filename prefix must be set to None or a string')
cube_source = parms['sim']['source']
rest_freq = parms['output']['rest_frequency']
nside = parms['output']['nside']
theta_range = parms['output']['theta_range']
phi_range = parms['output']['phi_range']
angres = parms['output']['angres']
cosmoparms = parms['sim']['cosmo']
if cosmoparms['name'] is None:
cosmo = None
elif cosmoparms['name'].lower() == 'custom':
h = cosmoparms['h']
H0 = 100.0 * h
Om0 = cosmoparms['Om0']
Ode0 = cosmoparms['Ode0']
if Ode0 is None:
Ode0 = 1.0 - Om0
Ob0 = cosmoparms['Ob0'] / h**2
w0 = cosmoparms['w0']
cosmo = cosmology.wCDM(H0, Om0, Ode0, w0=w0, Ob0=Ob0)
elif cosmoparms['name'].lower() == 'wmap9':
cosmo = cosmology.WMAP9
else:
raise ValueError('{0} preset not currently accepted for cosmology'.format(cosmoparms['name'].lower()))
process_stage = parms['sim']['process_stage']
units = parms['sim']['units']
if not isinstance(units, str):
raise TypeError('Input units must be a string')
if units not in ['mK', 'K']:
raise ValueError('Supported units are "mK" and "K"')
if units == 'mK':
conv_factor = 1e-3
units = 'K'
else:
conv_factor = 1.0
is_healpix = False
if nside is not None:
if HP.isnsideok(nside):
is_healpix = True
else:
raise ValueError('Invalid nside presented')
theta_phi = None
else:
theta_range = NP.asarray(theta_range)
phi_range = NP.asarray(phi_range)
theta_range = NP.sort(theta_range)
phi_range = NP.sort(phi_range)
nside_patch = 1
angres_patch = HP.nside2resol(nside_patch)
while angres_patch > NP.radians(angres):
nside_patch *= 2
angres_patch = HP.nside2resol(nside_patch)
pixarea_patch = HP.nside2pixarea(nside_patch)
theta, phi = HP.pix2ang(nside_patch, NP.arange(HP.nside2npix(nside_patch)))
select_ind = NP.logical_and(NP.logical_and(theta >= NP.radians(theta_range[0]), theta <= NP.radians(theta_range[1])), NP.logical_and(phi >= NP.radians(phi_range[0]), phi <= NP.radians(phi_range[1])))
theta = theta[select_ind]
phi = phi[select_ind]
theta_phi = NP.degrees(NP.hstack((theta.reshape(-1,1), phi.reshape(-1,1))))
zout = parms['output']['redshifts']
ofreqs = parms['output']['frequencies']
save_as_skymodel = parms['output']['skymodel']
if zout is None:
if ofreqs is None:
nchan = parms['output']['nchan']
f0 = parms['output']['f0']
df = parms['output']['freq_resolution']
ofreqs = (f0 + (NP.arange(nchan) - 0.5 * nchan) * df) # in Hz
zout = rest_freq / ofreqs - 1
else:
ofreqs = NP.asarray(ofreqs)
zout = rest_freq / ofreqs - 1
else:
zout = NP.asarray(zout).reshape(-1)
ofreqs = rest_freq / (1+zout)
if NP.any(zout < 0.0):
raise ValueError('redshifts must not be negative')
if NP.any(ofreqs < 0.0):
raise ValueError('Output frequencies must not be negative')
write_mode = parms['processing']['write_mode']
if write_mode not in [None, 'append']:
raise ValueError('Input write_mode is invalid')
parallel = parms['processing']['parallel']
prll_type = parms['processing']['prll_type']
nproc = parms['processing']['nproc']
wait_after_run = parms['processing']['wait_after_run']
fname_delimiter = parms['format']['delimiter']
z_placeholder = parms['format']['z_placeholder']
z_identifier = parms['format']['z_identifier']
z_identifier_place = parms['format']['z_identifier_place']
if z_identifier is not None:
if z_identifier_place.lower() not in ['before', 'after']:
raise ValueError('z_identifier_place must be set to "before" or "after"')
elif z_identifier_place.lower() == 'before':
redshift_value_place = 1
else:
redshift_value_place = 0
if cube_source.lower() not in ['21cmfast']:
raise ValueError('{0} cubes currently not supported'.format(cube_source))
fullfnames = glob.glob(indir + infile_prefix + '*' + infile_suffix)
fullfnames = NP.asarray(fullfnames)
fnames = [fname.split('/')[-1] for fname in fullfnames]
fnames = NP.asarray(fnames)
if fnames[0].split(fname_delimiter)[-1] == 'lighttravel':
dim = int(fnames[0].split(fname_delimiter)[-3])
boxsize = float(fnames[0].split(fname_delimiter)[-2][:-3])
else:
dim = int(fnames[0].split(fname_delimiter)[-2])
boxsize = float(fnames[0].split(fname_delimiter)[-1][:-3])
cuberes = boxsize / dim # in Mpc
if z_identifier is not None:
zstr = [fname.split(fname_delimiter)[z_placeholder].split(z_identifier)[redshift_value_place] for fname in fnames]
else:
zstr = [fname.split(fname_delimiter)[z_placeholder] for fname in fnames]
zin = NP.asarray(map(float, zstr))
infreqs = rest_freq / (1+zin)
# ind = NP.logical_and(infreqs >= freq_min, infreqs <= freq_max)
# fnames = fnames[ind]
# zin = zin[ind]
# infreqs = infreqs[ind]
sortind = NP.argsort(infreqs)
fnames = fnames[sortind]
zin = zin[sortind]
infreqs = infreqs[sortind]
sortind_z_asc = NP.argsort(zin)
interpdicts = []
tiledicts = []
if prll_type == 1:
for zind,redshift in enumerate(zout):
idict = {'outvals': NP.asarray(redshift).reshape(-1), 'inpcubes': None, 'cubedims': None, 'cube_source': cube_source, 'process_stage': process_stage, 'interp_method': 'linear', 'outfiles': None, 'returncubes': True}
tdict = {'inpres': cuberes, 'nside': nside, 'theta_phi': theta_phi, 'redshift': redshift, 'freq': None, 'method': 'linear', 'rest_freq': rest_freq, 'cosmo': cosmo}
if redshift <= zin.min():
idict['invals'] = [zin.min()]
idict['cubefiles'] = [indir+fnames[-1]]
elif redshift >= zin.max():
idict['invals'] = [zin.max()]
idict['cubefiles'] = [indir+fnames[0]]
else:
insert_ind = NP.searchsorted(infreqs, ofreqs[zind])
idict['invals'] = [zin[insert_ind], zin[insert_ind-1]]
idict['cubefiles'] = [indir+fnames[insert_ind], indir+fnames[insert_ind-1]]
interpdicts += [idict]
tiledicts += [tdict]
else:
bincount, binedges, binnum, ri = OPS.binned_statistic(ofreqs, values=None, statistic='count', bins=infreqs, range=None)
for binind in range(bincount.size):
if bincount[binind] > 0:
select_ind = ri[ri[binind]:ri[binind+1]]
ofreqs_in_bin = ofreqs[select_ind]
sorted_ofreqs_in_bin = NP.sort(ofreqs_in_bin)
idict = {'invals': NP.asarray([infreqs[binind], infreqs[binind+1]]), 'outvals': sorted_ofreqs_in_bin.reshape(-1), 'inpcubes': None, 'cubedims': None, 'cube_source': cube_source, 'cubefiles': [indir+fnames[binind], indir+fnames[binind+1]], 'interp_method':'linear', 'outfiles': None, 'returncubes': True}
tdict = {'inpres': cuberes, 'nside': nside, 'theta_phi': theta_phi, 'redshift': None, 'freq': sorted_ofreqs_in_bin.reshape(-1), 'method': 'linear', 'rest_freq': rest_freq, 'cosmo': cosmo}
interpdicts += [idict]
tiledicts += [tdict]
if save_as_skymodel:
if nside is not None:
angres_patch = NP.degrees(HP.nside2resol(nside))
pixarea_patch = HP.nside2pixarea(nside)
theta, phi = NP.degrees(HP.pix2ang(nside, NP.arange(HP.nside2npix(nside))))
else:
theta = NP.degrees(theta)
phi = NP.degrees(phi)
wl = FCNST.c.to('m/s').value / ofreqs
dJy_dK = 2 * FCNST.k_B.to('J/K').value * pixarea_patch / wl**2 / CNST.Jy # nchan (in Jy/K)
radec = NP.hstack((phi.reshape(-1,1), 90.0 - theta.reshape(-1,1)))
sphsurfaces = []
if parallel:
ts = time.time()
if nproc is None:
nproc = MP.cpu_count()
assert isinstance(nproc, int), 'Number of parallel processes must be an integer'
nproc = min([nproc, len(interpdicts)])
try:
pool = MP.Pool(processes=nproc)
sphsurfaces = pool.imap(cosmotile.coeval_interp_cube_to_sphere_surface_wrapper_arg_splitter, IT.izip(interpdicts, tiledicts), chunksize=zout.size/nproc)
# progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} frequencies'.format(zout.size), PGB.ETA()], maxval=zout.size).start()
# for i,_ in enumerate(sphsurfaces):
# print '{0:0d}/{1:0d} completed'.format(i, len(interpdicts))
# progress.update(i+1)
# progress.finish()
pool.close()
pool.join()
te = time.time()
print 'Time consumed: {0:.1f} seconds'.format(te-ts)
except MemoryError:
parallel = False
pool.close()
pool.join()
del pool
sphsurfaces = []
warnings.warn('Memory requirements too high. Downgrading to serial processing.')
if not parallel:
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Frequency channels '.format(ofreqs.size), PGB.ETA()], maxval=ofreqs.size).start()
for ind in xrange(len(interpdicts)):
# sphsurfaces += [cosmotile.coeval_interp_cube_to_sphere_surface_wrapper(interpdicts[ind], tiledicts[ind])]
if write_mode == 'append':
sphsurface = cosmotile.coeval_interp_cube_to_sphere_surface_wrapper(interpdicts[ind], tiledicts[ind])
if save_as_skymodel:
init_parms = {'name': cube_source, 'frequency': ofreqs[ind], 'location': radec, 'spec_type': 'spectrum', 'spectrum': dJy_dK[ind]*sphsurface.reshape(-1,1), 'src_shape': NP.hstack((angres_patch+NP.zeros(phi.size).reshape(-1,1), angres_patch+NP.zeros(phi.size).reshape(-1,1), NP.zeros(phi.size).reshape(-1,1))), 'epoch': 'J2000', 'coords': 'radec', 'src_shape_units': ('degree', 'degree', 'degree')}
skymod = SM.SkyModel(init_file=None, init_parms=init_parms)
if ind == 0:
skymod.save(outfile, fileformat='hdf5')
# cosmotile.write_lightcone_catalog(init_parms, outfile=outfile, action='store')
else:
SM.append_SkyModel_file(outfile, skymod, 'freq', filemode='a')
else:
# cosmotile.write_lightcone_surfaces(sphpatches, units, outfile, ofreqs, cosmo=cosmo, is_healpix=is_healpix)
if ind == 0:
cosmotile.write_lightcone_surfaces(sphsurface.reshape(1,-1), units, outfile, NP.asarray(ofreqs[ind]).reshape(-1), cosmo=cosmo, is_healpix=is_healpix)
else:
cosmotile.append_lightcone_surfaces(sphsurface.reshape(1,-1), outfile, 'freq', units=units, freqs=NP.asarray(ofreqs[ind]).reshape(-1))
else:
sphsurfaces += [cosmotile.coeval_interp_cube_to_sphere_surface_wrapper(interpdicts[ind], tiledicts[ind])]
progress.update(ind+1)
progress.finish()
if parallel or (write_mode is None):
sphpatches = NP.asarray([sphsurf for sphsurf in sphsurfaces])
sphpatches = conv_factor * NP.asarray(sphpatches)
if save:
if save_as_skymodel:
if nside is not None:
angres_patch = NP.degrees(HP.nside2resol(nside))
pixarea_patch = HP.nside2pixarea(nside)
theta, phi = NP.degrees(HP.pix2ang(nside, NP.arange(HP.nside2npix(nside))))
else:
theta = NP.degrees(theta)
phi = NP.degrees(phi)
wl = FCNST.c.to('m/s').value / ofreqs
dJy_dK = 2 * FCNST.k_B.to('J/K').value * pixarea_patch / wl**2 / CNST.Jy # nchan (in Jy/K)
radec = NP.hstack((phi.reshape(-1,1), 90.0 - theta.reshape(-1,1)))
init_parms = {'name': cube_source, 'frequency': ofreqs, 'location': radec, 'spec_type': 'spectrum', 'spectrum': dJy_dK.reshape(1,-1)*sphpatches.T, 'src_shape': NP.hstack((angres_patch+NP.zeros(phi.size).reshape(-1,1), angres_patch+NP.zeros(phi.size).reshape(-1,1), NP.zeros(phi.size).reshape(-1,1))), 'epoch': 'J2000', 'coords': 'radec', 'src_shape_units': ('degree', 'degree', 'degree')}
cosmotile.write_lightcone_catalog(init_parms, outfile=outfile, action='store')
else:
theta = NP.degrees(theta)
phi = NP.degrees(phi)
radec = NP.hstack((phi.reshape(-1,1), 90.0 - theta.reshape(-1,1)))
init_parms = {'name': cube_source, 'frequency': ofreqs, 'location': radec, 'spec_type': 'spectrum', 'spectrum': pixarea_patch*sphpatches.T, 'src_shape': NP.hstack((angres_patch+NP.zeros(phi.size).reshape(-1,1), angres_patch+NP.zeros(phi.size).reshape(-1,1), NP.zeros(phi.size).reshape(-1,1))), 'epoch': 'J2000', 'coords': 'radec', 'src_shape_units': ('degree', 'degree', 'degree')}
cosmotile.write_lightcone_catalog(init_parms, outfile=outfile, action='store')
else:
cosmotile.write_lightcone_surfaces(sphpatches, units, outfile, ofreqs, cosmo=cosmo, is_healpix=is_healpix)
if wait_after_run:
PDB.set_trace()
|
{
"content_hash": "4c4840fe928bc77af976c75531233877",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 416,
"avg_line_length": 50.18122977346278,
"alnum_prop": 0.5986069908422547,
"repo_name": "nithyanandan/general",
"id": "9ae38c64e7656ac2387563c09a452623e71cf3b9",
"size": "15516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/run_cosmotile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "432012"
}
],
"symlink_target": ""
}
|
"""Support for Xiaomi Smart WiFi Socket and Smart Power Strip."""
import asyncio
from functools import partial
import logging
import voluptuous as vol
from homeassistant.components.switch import DOMAIN, PLATFORM_SCHEMA, SwitchDevice
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Miio Switch"
DATA_KEY = "switch.xiaomi_miio"
CONF_MODEL = "model"
MODEL_POWER_STRIP_V2 = "zimi.powerstrip.v2"
MODEL_PLUG_V3 = "chuangmi.plug.v3"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MODEL): vol.In(
[
"chuangmi.plug.v1",
"qmi.powerstrip.v1",
"zimi.powerstrip.v2",
"chuangmi.plug.m1",
"chuangmi.plug.m3",
"chuangmi.plug.v2",
"chuangmi.plug.v3",
"chuangmi.plug.hmi205",
"lumi.acpartner.v3",
]
),
}
)
ATTR_POWER = "power"
ATTR_TEMPERATURE = "temperature"
ATTR_LOAD_POWER = "load_power"
ATTR_MODEL = "model"
ATTR_MODE = "mode"
ATTR_POWER_MODE = "power_mode"
ATTR_WIFI_LED = "wifi_led"
ATTR_POWER_PRICE = "power_price"
ATTR_PRICE = "price"
SUCCESS = ["ok"]
FEATURE_SET_POWER_MODE = 1
FEATURE_SET_WIFI_LED = 2
FEATURE_SET_POWER_PRICE = 4
FEATURE_FLAGS_GENERIC = 0
FEATURE_FLAGS_POWER_STRIP_V1 = (
FEATURE_SET_POWER_MODE | FEATURE_SET_WIFI_LED | FEATURE_SET_POWER_PRICE
)
FEATURE_FLAGS_POWER_STRIP_V2 = FEATURE_SET_WIFI_LED | FEATURE_SET_POWER_PRICE
FEATURE_FLAGS_PLUG_V3 = FEATURE_SET_WIFI_LED
SERVICE_SET_WIFI_LED_ON = "xiaomi_miio_set_wifi_led_on"
SERVICE_SET_WIFI_LED_OFF = "xiaomi_miio_set_wifi_led_off"
SERVICE_SET_POWER_MODE = "xiaomi_miio_set_power_mode"
SERVICE_SET_POWER_PRICE = "xiaomi_miio_set_power_price"
SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
SERVICE_SCHEMA_POWER_MODE = SERVICE_SCHEMA.extend(
{vol.Required(ATTR_MODE): vol.All(vol.In(["green", "normal"]))}
)
SERVICE_SCHEMA_POWER_PRICE = SERVICE_SCHEMA.extend(
{vol.Required(ATTR_PRICE): vol.All(vol.Coerce(float), vol.Range(min=0))}
)
SERVICE_TO_METHOD = {
SERVICE_SET_WIFI_LED_ON: {"method": "async_set_wifi_led_on"},
SERVICE_SET_WIFI_LED_OFF: {"method": "async_set_wifi_led_off"},
SERVICE_SET_POWER_MODE: {
"method": "async_set_power_mode",
"schema": SERVICE_SCHEMA_POWER_MODE,
},
SERVICE_SET_POWER_PRICE: {
"method": "async_set_power_price",
"schema": SERVICE_SCHEMA_POWER_PRICE,
},
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the switch from config."""
from miio import Device, DeviceException
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
model = config.get(CONF_MODEL)
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
devices = []
unique_id = None
if model is None:
try:
miio_device = Device(host, token)
device_info = miio_device.info()
model = device_info.model
unique_id = "{}-{}".format(model, device_info.mac_address)
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
except DeviceException:
raise PlatformNotReady
if model in ["chuangmi.plug.v1", "chuangmi.plug.v3"]:
from miio import ChuangmiPlug
plug = ChuangmiPlug(host, token, model=model)
# The device has two switchable channels (mains and a USB port).
# A switch device per channel will be created.
for channel_usb in [True, False]:
device = ChuangMiPlugSwitch(name, plug, model, unique_id, channel_usb)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model in ["qmi.powerstrip.v1", "zimi.powerstrip.v2"]:
from miio import PowerStrip
plug = PowerStrip(host, token, model=model)
device = XiaomiPowerStripSwitch(name, plug, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model in [
"chuangmi.plug.m1",
"chuangmi.plug.m3",
"chuangmi.plug.v2",
"chuangmi.plug.hmi205",
]:
from miio import ChuangmiPlug
plug = ChuangmiPlug(host, token, model=model)
device = XiaomiPlugGenericSwitch(name, plug, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
elif model in ["lumi.acpartner.v3"]:
from miio import AirConditioningCompanionV3
plug = AirConditioningCompanionV3(host, token)
device = XiaomiAirConditioningCompanionSwitch(name, plug, model, unique_id)
devices.append(device)
hass.data[DATA_KEY][host] = device
else:
_LOGGER.error(
"Unsupported device found! Please create an issue at "
"https://github.com/rytilahti/python-miio/issues "
"and provide the following data: %s",
model,
)
return False
async_add_entities(devices, update_before_add=True)
async def async_service_handler(service):
"""Map services to methods on XiaomiPlugGenericSwitch."""
method = SERVICE_TO_METHOD.get(service.service)
params = {
key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID
}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
devices = [
device
for device in hass.data[DATA_KEY].values()
if device.entity_id in entity_ids
]
else:
devices = hass.data[DATA_KEY].values()
update_tasks = []
for device in devices:
if not hasattr(device, method["method"]):
continue
await getattr(device, method["method"])(**params)
update_tasks.append(device.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
for plug_service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[plug_service].get("schema", SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, plug_service, async_service_handler, schema=schema
)
class XiaomiPlugGenericSwitch(SwitchDevice):
"""Representation of a Xiaomi Plug Generic."""
def __init__(self, name, plug, model, unique_id):
"""Initialize the plug switch."""
self._name = name
self._plug = plug
self._model = model
self._unique_id = unique_id
self._icon = "mdi:power-socket"
self._available = False
self._state = None
self._state_attrs = {ATTR_TEMPERATURE: None, ATTR_MODEL: self._model}
self._device_features = FEATURE_FLAGS_GENERIC
self._skip_update = False
@property
def should_poll(self):
"""Poll the plug."""
return True
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a plug command handling error messages."""
from miio import DeviceException
try:
result = await self.hass.async_add_executor_job(
partial(func, *args, **kwargs)
)
_LOGGER.debug("Response received from plug: %s", result)
# The Chuangmi Plug V3 returns 0 on success on usb_on/usb_off.
if func in ["usb_on", "usb_off"] and result == 0:
return True
return result == SUCCESS
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
self._available = False
return False
async def async_turn_on(self, **kwargs):
"""Turn the plug on."""
result = await self._try_command("Turning the plug on failed.", self._plug.on)
if result:
self._state = True
self._skip_update = True
async def async_turn_off(self, **kwargs):
"""Turn the plug off."""
result = await self._try_command("Turning the plug off failed.", self._plug.off)
if result:
self._state = False
self._skip_update = True
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._plug.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._state_attrs[ATTR_TEMPERATURE] = state.temperature
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
async def async_set_wifi_led_on(self):
"""Turn the wifi led on."""
if self._device_features & FEATURE_SET_WIFI_LED == 0:
return
await self._try_command(
"Turning the wifi led on failed.", self._plug.set_wifi_led, True
)
async def async_set_wifi_led_off(self):
"""Turn the wifi led on."""
if self._device_features & FEATURE_SET_WIFI_LED == 0:
return
await self._try_command(
"Turning the wifi led off failed.", self._plug.set_wifi_led, False
)
async def async_set_power_price(self, price: int):
"""Set the power price."""
if self._device_features & FEATURE_SET_POWER_PRICE == 0:
return
await self._try_command(
"Setting the power price of the power strip failed.",
self._plug.set_power_price,
price,
)
class XiaomiPowerStripSwitch(XiaomiPlugGenericSwitch):
"""Representation of a Xiaomi Power Strip."""
def __init__(self, name, plug, model, unique_id):
"""Initialize the plug switch."""
super().__init__(name, plug, model, unique_id)
if self._model == MODEL_POWER_STRIP_V2:
self._device_features = FEATURE_FLAGS_POWER_STRIP_V2
else:
self._device_features = FEATURE_FLAGS_POWER_STRIP_V1
self._state_attrs[ATTR_LOAD_POWER] = None
if self._device_features & FEATURE_SET_POWER_MODE == 1:
self._state_attrs[ATTR_POWER_MODE] = None
if self._device_features & FEATURE_SET_WIFI_LED == 1:
self._state_attrs[ATTR_WIFI_LED] = None
if self._device_features & FEATURE_SET_POWER_PRICE == 1:
self._state_attrs[ATTR_POWER_PRICE] = None
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._plug.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.is_on
self._state_attrs.update(
{ATTR_TEMPERATURE: state.temperature, ATTR_LOAD_POWER: state.load_power}
)
if self._device_features & FEATURE_SET_POWER_MODE == 1 and state.mode:
self._state_attrs[ATTR_POWER_MODE] = state.mode.value
if self._device_features & FEATURE_SET_WIFI_LED == 1 and state.wifi_led:
self._state_attrs[ATTR_WIFI_LED] = state.wifi_led
if (
self._device_features & FEATURE_SET_POWER_PRICE == 1
and state.power_price
):
self._state_attrs[ATTR_POWER_PRICE] = state.power_price
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
async def async_set_power_mode(self, mode: str):
"""Set the power mode."""
if self._device_features & FEATURE_SET_POWER_MODE == 0:
return
from miio.powerstrip import PowerMode
await self._try_command(
"Setting the power mode of the power strip failed.",
self._plug.set_power_mode,
PowerMode(mode),
)
class ChuangMiPlugSwitch(XiaomiPlugGenericSwitch):
"""Representation of a Chuang Mi Plug V1 and V3."""
def __init__(self, name, plug, model, unique_id, channel_usb):
"""Initialize the plug switch."""
name = "{} USB".format(name) if channel_usb else name
if unique_id is not None and channel_usb:
unique_id = "{}-{}".format(unique_id, "usb")
super().__init__(name, plug, model, unique_id)
self._channel_usb = channel_usb
if self._model == MODEL_PLUG_V3:
self._device_features = FEATURE_FLAGS_PLUG_V3
self._state_attrs[ATTR_WIFI_LED] = None
if self._channel_usb is False:
self._state_attrs[ATTR_LOAD_POWER] = None
async def async_turn_on(self, **kwargs):
"""Turn a channel on."""
if self._channel_usb:
result = await self._try_command(
"Turning the plug on failed.", self._plug.usb_on
)
else:
result = await self._try_command(
"Turning the plug on failed.", self._plug.on
)
if result:
self._state = True
self._skip_update = True
async def async_turn_off(self, **kwargs):
"""Turn a channel off."""
if self._channel_usb:
result = await self._try_command(
"Turning the plug on failed.", self._plug.usb_off
)
else:
result = await self._try_command(
"Turning the plug on failed.", self._plug.off
)
if result:
self._state = False
self._skip_update = True
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._plug.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
if self._channel_usb:
self._state = state.usb_power
else:
self._state = state.is_on
self._state_attrs[ATTR_TEMPERATURE] = state.temperature
if state.wifi_led:
self._state_attrs[ATTR_WIFI_LED] = state.wifi_led
if self._channel_usb is False and state.load_power:
self._state_attrs[ATTR_LOAD_POWER] = state.load_power
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
class XiaomiAirConditioningCompanionSwitch(XiaomiPlugGenericSwitch):
"""Representation of a Xiaomi AirConditioning Companion."""
def __init__(self, name, plug, model, unique_id):
"""Initialize the acpartner switch."""
super().__init__(name, plug, model, unique_id)
self._state_attrs.update({ATTR_TEMPERATURE: None, ATTR_LOAD_POWER: None})
async def async_turn_on(self, **kwargs):
"""Turn the socket on."""
result = await self._try_command(
"Turning the socket on failed.", self._plug.socket_on
)
if result:
self._state = True
self._skip_update = True
async def async_turn_off(self, **kwargs):
"""Turn the socket off."""
result = await self._try_command(
"Turning the socket off failed.", self._plug.socket_off
)
if result:
self._state = False
self._skip_update = True
async def async_update(self):
"""Fetch state from the device."""
from miio import DeviceException
# On state change the device doesn't provide the new state immediately.
if self._skip_update:
self._skip_update = False
return
try:
state = await self.hass.async_add_executor_job(self._plug.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.power_socket == "on"
self._state_attrs[ATTR_LOAD_POWER] = state.load_power
except DeviceException as ex:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
|
{
"content_hash": "c780b42ae0a22d0559a692582f5ac6d2",
"timestamp": "",
"source": "github",
"line_count": 553,
"max_line_length": 88,
"avg_line_length": 32.63110307414105,
"alnum_prop": 0.5873649210307564,
"repo_name": "fbradyirl/home-assistant",
"id": "8188d7911889ca2a28ccfc64c335c1308a3afad0",
"size": "18045",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/xiaomi_miio/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.