commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
e7cdc06db9fb1d8b132a339d50f5c22f0b81254b | Fix shape error in test_iterables | yarikoptic/NiPy-OLD,yarikoptic/NiPy-OLD | neuroimaging/modalities/fmri/fmristat/tests/test_iterables.py | neuroimaging/modalities/fmri/fmristat/tests/test_iterables.py | import numpy as np
from numpy.random import standard_normal as noise
from neuroimaging.testing import funcfile, anatfile
from neuroimaging.core.api import load_image
from neuroimaging.modalities.fmri.api import fromimage, fmri_generator
from neuroimaging.core.image.generators import *
from neuroimaging.fixes.scipy.stats.models.regression import OLSModel as ols_model
fd = np.asarray(load_image(funcfile))
fi = fromimage(load_image(funcfile)) # I think it makes more
# sense to use fd instead of fi
# for GLM purposes -- reduces some
# noticeable overhead in creating
# the array from FmriImage.list
# create a design matrix, model and contrast matrix
design = noise((fd.shape[0],3))
model = ols_model(design)
cmatrix = np.array([[1,0,0],[0,1,0]])
# two prototypical functions in a GLM analysis
def fit(input):
return model.fit(input).resid
def contrast(results):
return results.Fcontrast(cmatrix)
def result_generator(datag):
for i, fdata in datag:
yield i, model.fit(fdata)
def flatten_generator(ing):
for i, r in ing:
r.shape = (r.shape[0], np.product(r.shape[1:]))
yield i, r
def unflatten_generator(ing):
for i, r in ing:
r.shape = (2,20)
yield i, r
def contrast_generator(resultg):
for i, r in resultg:
yield i, np.asarray(contrast(r))
"""
Fit a model, iterating over the slices of an array
associated to an FmriImage.
"""
c = np.zeros(fd.shape[1:]) + 0.5
write_data(c, unflatten_generator(contrast_generator(result_generator(flatten_generator(fmri_generator(fd))))))
"""
Fit a model, iterating over the array associated to an FmriImage,
iterating over a list of ROIs defined by binary regions
of the same shape as a frame of FmriImage
"""
a = np.asarray(fd[0]) # this might really be an anatomical image or
# AR(1) coefficients
p = np.greater(a, a.mean())
d = np.ones(fd.shape[1:]) * 2.
write_data(d, contrast_generator(result_generator(flatten_generator(fmri_generator(fd, parcels(p))))))
assert np.allclose(d, c)
e = np.zeros(fd.shape[1:]) + 3.
write_data(e, f_generator(contrast, result_generator(flatten_generator(fmri_generator(fd, parcels(p))))))
assert np.allclose(d, e)
| import numpy as np
from numpy.random import standard_normal as noise
from neuroimaging.testing import funcfile, anatfile
from neuroimaging.core.api import load_image
from neuroimaging.modalities.fmri.api import fromimage, fmri_generator
from neuroimaging.core.image.generators import *
from neuroimaging.fixes.scipy.stats.models.regression import OLSModel as ols_model
fd = np.asarray(load_image(funcfile))
fi = fromimage(load_image(funcfile)) # I think it makes more
# sense to use fd instead of fi
# for GLM purposes -- reduces some
# noticeable overhead in creating
# the array from FmriImage.list
# create a design matrix, model and contrast matrix
design = noise((fd.shape[0],3))
model = ols_model(design)
cmatrix = np.array([[1,0,0],[0,1,0]])
# two prototypical functions in a GLM analysis
def fit(input):
return model.fit(input).resid
def contrast(results):
return results.Fcontrast(cmatrix)
def result_generator(datag):
for i, fdata in datag:
yield i, model.fit(fdata)
def flatten_generator(ing):
for i, r in ing:
r.shape = (r.shape[0], np.product(r.shape[1:]))
yield i, r
def unflatten_generator(ing):
for i, r in ing:
r.shape = (20,20)
yield i, r
def contrast_generator(resultg):
for i, r in resultg:
print np.asarray(contrast(r)).shape
yield i, np.asarray(contrast(r))
"""
Fit a model, iterating over the slices of an array
associated to an FmriImage.
"""
c = np.zeros(fd.shape[1:]) + 0.5
write_data(c, unflatten_generator(contrast_generator(result_generator(flatten_generator(fmri_generator(fd))))))
"""
Fit a model, iterating over the array associated to an FmriImage,
iterating over a list of ROIs defined by binary regions
of the same shape as a frame of FmriImage
"""
a = np.asarray(fd[0]) # this might really be an anatomical image or
# AR(1) coefficients
p = np.greater(a, a.mean())
d = np.ones(fd.shape[1:]) * 2.
write_data(d, contrast_generator(result_generator(flatten_generator(fmri_generator(fd, parcels(p))))))
assert np.allclose(d, c)
e = np.zeros(fd.shape[1:]) + 3.
write_data(e, f_generator(contrast, result_generator(flatten_generator(fmri_generator(fd, parcels(p))))))
assert np.allclose(d, e)
| bsd-3-clause | Python |
fe40b07ee55550399f7dd14253a161b16711bdfc | Fix pre-commit issues in test_simple_model. | Z2PackDev/TBmodels,Z2PackDev/TBmodels | tests/test_simple_model.py | tests/test_simple_model.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
"""Tests for creating a simple tight-binding model."""
import pytest
from parameters import T_VALUES, KPT
@pytest.mark.parametrize('t1', T_VALUES)
@pytest.mark.parametrize('k', KPT)
def test_simple(t1, get_model, k, compare_data, models_equal, compare_isclose):
"""Regression test for a simple manually created tight-binding model."""
model = get_model(*t1)
compare_isclose(model.hamilton(k), tag='hamilton')
compare_isclose(model.eigenval(k), tag='eigenval')
compare_data(models_equal, model)
@pytest.mark.parametrize('t1', T_VALUES)
def test_invalid_dim(t1, get_model):
"""
Check that an error is raised when the dimension does not match
the hopping matrix keys.
"""
with pytest.raises(ValueError):
get_model(*t1, dim=2)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2015-2018, ETH Zurich, Institut fuer Theoretische Physik
# Author: Dominik Gresch <greschd@gmx.ch>
import pytest
from parameters import T_VALUES, KPT
@pytest.mark.parametrize('t1', T_VALUES)
@pytest.mark.parametrize('k', KPT)
def test_simple(t1, get_model, k, compare_data, models_equal, compare_isclose):
m = get_model(*t1)
compare_isclose(m.hamilton(k), tag='hamilton')
compare_isclose(m.eigenval(k), tag='eigenval')
compare_data(models_equal, m)
@pytest.mark.parametrize('t1', T_VALUES)
def test_invalid_R(t1, get_model):
with pytest.raises(ValueError):
m = get_model(*t1, dim=2)
| apache-2.0 | Python |
08ca2d9c19311bab3911ef0094c77c6c85c6f43e | Replace yaml.dump with yaml.safe_dump | kevin-zhaoshuai/zun,kevin-zhaoshuai/zun,kevin-zhaoshuai/zun | zun/common/yamlutils.py | zun/common/yamlutils.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
def load(s):
try:
yml_dict = yaml.safe_load(s)
except yaml.YAMLError as exc:
msg = 'An error occurred during YAML parsing.'
if hasattr(exc, 'problem_mark'):
msg += ' Error position: (%s:%s)' % (exc.problem_mark.line + 1,
exc.problem_mark.column + 1)
raise ValueError(msg)
if not isinstance(yml_dict, dict) and not isinstance(yml_dict, list):
raise ValueError('The source is not a YAML mapping or list.')
if isinstance(yml_dict, dict) and len(yml_dict) < 1:
raise ValueError('Could not find any element in your YAML mapping.')
return yml_dict
def dump(s):
return yaml.safe_dump(s)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
if hasattr(yaml, 'CSafeDumper'):
yaml_dumper = yaml.CSafeDumper
else:
yaml_dumper = yaml.SafeDumper
def load(s):
try:
yml_dict = yaml.safe_load(s)
except yaml.YAMLError as exc:
msg = 'An error occurred during YAML parsing.'
if hasattr(exc, 'problem_mark'):
msg += ' Error position: (%s:%s)' % (exc.problem_mark.line + 1,
exc.problem_mark.column + 1)
raise ValueError(msg)
if not isinstance(yml_dict, dict) and not isinstance(yml_dict, list):
raise ValueError('The source is not a YAML mapping or list.')
if isinstance(yml_dict, dict) and len(yml_dict) < 1:
raise ValueError('Could not find any element in your YAML mapping.')
return yml_dict
def dump(s):
return yaml.dump(s, Dumper=yaml_dumper)
| apache-2.0 | Python |
7716f279724d0866ec9488478b187bc34031a77b | Add docstrings to schema.py | Hardtack/formencode_jsonschema | formencode_jsonschema/schema.py | formencode_jsonschema/schema.py | from marshmallow import Schema, fields
from formencode.api import Validator as FormencodeValidator
from formencode.schema import Schema as FormencodeSchema
from .converters import DEFAULT_CONVERTERS, SchemaDelegate
class JSONSchema(Schema, SchemaDelegate):
"""
Marshmallow schema for convert Formencode's schema to JSON schema.
You can add more converters by overriding ``__validator_converters__``
field.
"""
type = fields.Constant('object')
properties = fields.Method('get_properties')
required = fields.Method('get_required')
__validator_converters__ = DEFAULT_CONVERTERS
def get_required(self, schema: FormencodeSchema):
fields = schema.fields
required = []
for name, validator in fields.items():
if self.is_required(validator):
required.append(name)
return required
def get_properties(self, schema: FormencodeSchema):
fields = schema.fields
properties = {}
for name, validator in fields.items():
properties[name] = self.convert_validator(validator)
return properties
def handle_unknown_validator(self, validator: FormencodeValidator):
"""When schema found unknown validator, handle that here."""
raise ValueError(
"Can not convert a validator {validator!r}"
.format(validator=validator))
# Delegate implementations
def can_convert(self, validator: FormencodeValidator):
for converter in self.__validator_converters__:
if converter.can_convert(validator, self):
return True
return False
def is_required(self, validator: FormencodeValidator):
for converter in self.__validator_converters__:
if converter.can_convert(validator, self) and \
converter.is_required(validator, self):
return True
return False
def convert_validator(self, validator: FormencodeValidator):
for converter in self.__validator_converters__:
if converter.can_convert(validator, self):
return converter.convert(validator, self)
return self.handle_unknown_validator(validator)
| from marshmallow import Schema, fields
from formencode.api import Validator as FormencodeValidator
from formencode.schema import Schema as FormencodeSchema
from .converters import DEFAULT_CONVERTERS, SchemaDelegate
class JSONSchema(Schema, SchemaDelegate):
type = fields.Constant('object')
properties = fields.Method('get_properties')
required = fields.Method('get_required')
__validator_converters__ = DEFAULT_CONVERTERS
def get_required(self, schema: FormencodeSchema):
fields = schema.fields
required = []
for name, validator in fields.items():
if self.is_required(validator):
required.append(name)
return required
def get_properties(self, schema: FormencodeSchema):
fields = schema.fields
properties = {}
for name, validator in fields.items():
properties[name] = self.convert_validator(validator)
return properties
def handle_unknown_validator(self, validator: FormencodeValidator):
raise ValueError(
"Can not convert a validator {validator!r}"
.format(validator=validator))
def can_convert(self, validator: FormencodeValidator):
for converter in self.__validator_converters__:
if converter.can_convert(validator, self):
return True
return False
def is_required(self, validator: FormencodeValidator):
for converter in self.__validator_converters__:
if converter.can_convert(validator, self) and \
converter.is_required(validator, self):
return True
return False
def convert_validator(self, validator: FormencodeValidator):
for converter in self.__validator_converters__:
if converter.can_convert(validator, self):
return converter.convert(validator, self)
return self.handle_unknown_validator(validator)
| mit | Python |
a571e5539c89e7f5b815df40cc51520c38eeb670 | Correct user return value | drcapulet/sentry,BuildingLink/sentry,BayanGroup/sentry,SilentCircle/sentry,JackDanger/sentry,ewdurbin/sentry,JamesMura/sentry,beni55/sentry,TedaLIEz/sentry,looker/sentry,drcapulet/sentry,ewdurbin/sentry,ngonzalvez/sentry,mvaled/sentry,looker/sentry,songyi199111/sentry,mvaled/sentry,gencer/sentry,BayanGroup/sentry,felixbuenemann/sentry,Natim/sentry,fuziontech/sentry,korealerts1/sentry,zenefits/sentry,mitsuhiko/sentry,rdio/sentry,boneyao/sentry,TedaLIEz/sentry,JamesMura/sentry,pauloschilling/sentry,BuildingLink/sentry,camilonova/sentry,korealerts1/sentry,1tush/sentry,1tush/sentry,kevinastone/sentry,argonemyth/sentry,ifduyue/sentry,songyi199111/sentry,Kryz/sentry,fotinakis/sentry,felixbuenemann/sentry,mvaled/sentry,wujuguang/sentry,kevinastone/sentry,ifduyue/sentry,wong2/sentry,ewdurbin/sentry,argonemyth/sentry,daevaorn/sentry,llonchj/sentry,gencer/sentry,ifduyue/sentry,mvaled/sentry,mvaled/sentry,zenefits/sentry,Natim/sentry,JackDanger/sentry,wong2/sentry,wujuguang/sentry,nicholasserra/sentry,jokey2k/sentry,daevaorn/sentry,gencer/sentry,JamesMura/sentry,JTCunning/sentry,fotinakis/sentry,jean/sentry,hongliang5623/sentry,ifduyue/sentry,felixbuenemann/sentry,Kryz/sentry,wong2/sentry,fuziontech/sentry,fuziontech/sentry,mvaled/sentry,JTCunning/sentry,looker/sentry,zenefits/sentry,imankulov/sentry,wujuguang/sentry,songyi199111/sentry,imankulov/sentry,1tush/sentry,kevinastone/sentry,gg7/sentry,ifduyue/sentry,boneyao/sentry,fotinakis/sentry,Kryz/sentry,Natim/sentry,drcapulet/sentry,jean/sentry,gg7/sentry,kevinlondon/sentry,imankulov/sentry,gg7/sentry,boneyao/sentry,hongliang5623/sentry,vperron/sentry,looker/sentry,TedaLIEz/sentry,BuildingLink/sentry,llonchj/sentry,JamesMura/sentry,alexm92/sentry,vperron/sentry,looker/sentry,gencer/sentry,rdio/sentry,jean/sentry,rdio/sentry,BuildingLink/sentry,kevinlondon/sentry,ngonzalvez/sentry,nicholasserra/sentry,camilonova/sentry,mitsuhiko/sentry,jean/sentry,nicholasserra/sentry,vperron/sentry,jokey2k/sentry,JackDanger/sentry,BuildingLink/sentry,zenefits/sentry,alexm92/sentry,pauloschilling/sentry,gencer/sentry,camilonova/sentry,JamesMura/sentry,ngonzalvez/sentry,pauloschilling/sentry,zenefits/sentry,jokey2k/sentry,BayanGroup/sentry,korealerts1/sentry,JTCunning/sentry,argonemyth/sentry,alexm92/sentry,beni55/sentry,hongliang5623/sentry,fotinakis/sentry,beeftornado/sentry,beeftornado/sentry,SilentCircle/sentry,beni55/sentry,jean/sentry,daevaorn/sentry,SilentCircle/sentry,llonchj/sentry,daevaorn/sentry,beeftornado/sentry,SilentCircle/sentry,rdio/sentry,kevinlondon/sentry | src/sentry/utils/social_auth.py | src/sentry/utils/social_auth.py | """
sentry.utils.social_auth
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from social_auth.backends.pipeline.user import create_user
from social_auth.exceptions import SocialAuthBaseException
class AuthNotAllowed(SocialAuthBaseException):
pass
def create_user_if_enabled(*args, **kwargs):
"""
A pipeline step for django-social-auth
Create user. Depends on get_username pipeline.
"""
if not settings.SOCIAL_AUTH_CREATE_USERS:
user = kwargs.get('user')
if user:
return {'user': user}
raise AuthNotAllowed('You must create an account before associating an identity.')
return create_user(*args, **kwargs)
| """
sentry.utils.social_auth
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from social_auth.backends.pipeline.user import create_user
from social_auth.exceptions import SocialAuthBaseException
class AuthNotAllowed(SocialAuthBaseException):
pass
def create_user_if_enabled(*args, **kwargs):
"""
A pipeline step for django-social-auth
Create user. Depends on get_username pipeline.
"""
if not settings.SOCIAL_AUTH_CREATE_USERS:
user = kwargs.get('user')
if not user:
raise AuthNotAllowed('You must create an account before associating an identity.')
return user
return create_user(*args, **kwargs)
| bsd-3-clause | Python |
96cb0b0633e37f6a71d3fed755c0bb3bba79274f | fix issue caused by the removal of launcher.py | eavatar/eavatar-me,eavatar/eavatar-me,eavatar/eavatar-me,eavatar/eavatar-me | tests/unit/test_runtime.py | tests/unit/test_runtime.py | # -*- coding: utf-8 -*-
import os
import unittest
from ava.runtime import environ
class TestRuntimeConfig(unittest.TestCase):
def setUp(self):
from ava import wrapper
wrapper.init_app_dir()
def test_should_have_dir_settings(self):
from ava.runtime.config import settings
self.assertIsNotNone(settings.get('conf_dir'))
self.assertIsNotNone(settings.get('data_dir'))
self.assertIsNotNone(settings.get('logs_dir'))
def test_should_have_logging_settings(self):
from ava.runtime.config import settings
handlers = settings['LOGGING']['handlers']
self.assertIsNotNone(handlers)
log_file = handlers['file_handler']['filename']
#print(log_file)
self.assertIsNotNone(log_file)
class TestRuntimeEnviron(unittest.TestCase):
def setUp(self):
from ava import wrapper
wrapper.init_app_dir()
def test_work_home(self):
env = environ.Environment()
self.assertTrue(os.path.isdir(env.pod_dir))
print(env.conf_dir)
self.assertTrue(os.path.isdir(env.conf_dir))
self.assertTrue(os.path.isdir(env.data_dir))
self.assertTrue(os.path.isdir(env.logs_dir))
| # -*- coding: utf-8 -*-
import os
import unittest
from ava.runtime import environ
class TestRuntimeConfig(unittest.TestCase):
def setUp(self):
from ava import launcher
launcher.init_app_dir()
def test_should_have_dir_settings(self):
from ava.runtime.config import settings
self.assertIsNotNone(settings.get('conf_dir'))
self.assertIsNotNone(settings.get('data_dir'))
self.assertIsNotNone(settings.get('logs_dir'))
def test_should_have_logging_settings(self):
from ava.runtime.config import settings
handlers = settings['LOGGING']['handlers']
self.assertIsNotNone(handlers)
log_file = handlers['file_handler']['filename']
#print(log_file)
self.assertIsNotNone(log_file)
class TestRuntimeEnviron(unittest.TestCase):
def setUp(self):
from ava import launcher
launcher.init_app_dir()
def test_work_home(self):
env = environ.Environment()
self.assertTrue(os.path.isdir(env.pod_dir))
print(env.conf_dir)
self.assertTrue(os.path.isdir(env.conf_dir))
self.assertTrue(os.path.isdir(env.data_dir))
self.assertTrue(os.path.isdir(env.logs_dir))
| apache-2.0 | Python |
113cafc12d17ddcd8dd0064139e29267cc82ff4f | implement Animes match, parse and format methods | caedus75/Renamer | src/renamer/filename/animes.py | src/renamer/filename/animes.py | # Copyright (c) 2021, Carlos Millett
# All rights reserved.
# This software may be modified and distributed under the terms
# of the Simplified BSD License. See the LICENSE file for details.
import re
from pathlib import Path
from .base import Media
from .types import Types
class Animes(Media):
_rRule = re.compile(r'\[.+\] ([\w\.\-\ ]+?)(?: S(\d))? - (\d{2,})')
def __init__(self, path: Path) -> None:
super().__init__(Types.ANIMES, path)
@classmethod
def match(cls, filename: str) -> bool:
if cls._rRule.search(filename):
return True
return False
@classmethod
def parse(cls, filename: str) -> str:
if cls._rRule.search(filename):
return cls._rRule.split(filename)[1]
return ''
@classmethod
def format(cls, filename: str) -> str:
info = cls._rRule.search(filename)
if not info:
return ''
title, season, ep = info.groups()
if season:
fmt = '{0} - S{1:0>2}E{2}'
else:
fmt = '{0} - S01E{2}'
return fmt.format(title, season, ep)
| # Copyright (c) 2021, Carlos Millett
# All rights reserved.
# This software may be modified and distributed under the terms
# of the Simplified BSD License. See the LICENSE file for details.
import re
from pathlib import Path
from .base import Media
from .types import Types
class Animes(Media):
_rRule = re.compile('')
def __init__(self, path: Path) -> None:
super().__init__(Types.ANIMES, path)
@classmethod
def match(cls, filename: str) -> bool:
return False
@classmethod
def parse(cls, filename: str) -> str:
return ''
@classmethod
def format(cls, filename: str) -> str:
return ''
| bsd-2-clause | Python |
d5e9113fbbf03fa5fadb7014d0460c02052ecbf8 | Correct the woefully incorrect distmappings table. | Fale/fedora-packages,Fale/fedora-packages,fedora-infra/fedora-packages,fedora-infra/fedora-packages,fedora-infra/fedora-packages,fedora-infra/fedora-packages,Fale/fedora-packages | fedoracommunity/search/distmappings.py | fedoracommunity/search/distmappings.py | # Global list of koji tags we care about
tags = ({'name': 'Rawhide', 'tag': 'f20'},
{'name': 'Fedora 19', 'tag': 'f19-updates'},
{'name': 'Fedora 19', 'tag': 'f19'},
{'name': 'Fedora 19 Testing', 'tag': 'f19-updates-testing'},
{'name': 'Fedora 18', 'tag': 'f18-updates'},
{'name': 'Fedora 18', 'tag': 'f18'},
{'name': 'Fedora 18 Testing', 'tag': 'f18-updates-testing'},
{'name': 'Fedora 17', 'tag': 'f17-updates'},
{'name': 'Fedora 17', 'tag': 'f17'},
{'name': 'Fedora 17 Testing', 'tag': 'f17-updates-testing'},
{'name': 'EPEL 6', 'tag': 'dist-6E-epel'},
{'name': 'EPEL 6', 'tag': 'dist-6E-epel-testing'},
{'name': 'EPEL 5', 'tag': 'dist-5E-epel'},
{'name': 'EPEL 5', 'tag': 'dist-5E-epel-testing'},
)
tags_to_name_map = {}
for t in tags:
tags_to_name_map[t['tag']] = t['name']
| # Global list of koji tags we care about
tags = ({'name': 'Rawhide', 'tag': 'f20'},
{'name': 'Fedora 19', 'tag': 'f16-updates'},
{'name': 'Fedora 19', 'tag': 'f16'},
{'name': 'Fedora 19 Testing', 'tag': 'f16-updates-testing'},
{'name': 'Fedora 18', 'tag': 'f16-updates'},
{'name': 'Fedora 18', 'tag': 'f16'},
{'name': 'Fedora 18 Testing', 'tag': 'f16-updates-testing'},
{'name': 'Fedora 17', 'tag': 'f16-updates'},
{'name': 'Fedora 17', 'tag': 'f16'},
{'name': 'Fedora 17 Testing', 'tag': 'f16-updates-testing'},
{'name': 'EPEL 6', 'tag': 'dist-6E-epel'},
{'name': 'EPEL 6', 'tag': 'dist-6E-epel-testing'},
{'name': 'EPEL 5', 'tag': 'dist-5E-epel'},
{'name': 'EPEL 5', 'tag': 'dist-5E-epel-testing'},
)
tags_to_name_map = {}
for t in tags:
tags_to_name_map[t['tag']] = t['name']
| agpl-3.0 | Python |
9a40ea36a902d3cc81bb2d4617e5905acc4c3bc2 | print the time on logs. | camswords/raspberry-pi-instagram-printer,camswords/raspberry-pi-instagram-printer | src/lib/support_team.py | src/lib/support_team.py | import sys
import datetime
class SupportTeam:
@staticmethod
def notify(message):
print "%s: %s" % (datetime.datetime.now(), message)
sys.stdout.flush()
| import sys
class SupportTeam:
@staticmethod
def notify(message):
print message
sys.stdout.flush()
| mit | Python |
435a8a09a933075336eeedcb61bfa059ebc628b7 | Add preferred_display_name to fields list for agency | gadventures/gapipy | gapipy/resources/booking/agency.py | gapipy/resources/booking/agency.py | # Python 2 and 3
from __future__ import unicode_literals
from ...models import Address
from ...models import AgencyDocument
from ...models.base import BaseModel
from .agency_chain import AgencyChain
from ..base import Resource
from ..tour import Promotion
class AgencyEmail(BaseModel):
_as_is_fields = ['type', 'address']
class Agency(Resource):
_resource_name = 'agencies'
_is_listable = False
_is_parent_resource = True
_as_is_fields = [
'id',
'href',
'name',
'booking_currencies',
'latitude',
'longitude',
'transactional_email',
'communication_preferences',
'override_agency_secondary',
'passenger_notifications',
'agent_notifications',
'preferred_display_name',
]
_date_time_fields_local = ['date_created']
_model_fields = [('address', Address)]
_resource_fields = [('agency_chain', AgencyChain)]
_model_collection_fields = [
('documents', AgencyDocument),
('emails', AgencyEmail),
('agency_chains', 'AgencyChain'),
]
_resource_collection_fields = [
('bookings', 'Booking'),
('agents', 'Agent'),
('promotions', Promotion),
]
| # Python 2 and 3
from __future__ import unicode_literals
from ...models import Address
from ...models import AgencyDocument
from ...models.base import BaseModel
from .agency_chain import AgencyChain
from ..base import Resource
from ..tour import Promotion
class AgencyEmail(BaseModel):
_as_is_fields = ['type', 'address']
class Agency(Resource):
_resource_name = 'agencies'
_is_listable = False
_is_parent_resource = True
_as_is_fields = [
'id',
'href',
'name',
'booking_currencies',
'latitude',
'longitude',
'transactional_email',
'communication_preferences',
'override_agency_secondary',
'passenger_notifications',
'agent_notifications',
]
_date_time_fields_local = ['date_created']
_model_fields = [('address', Address)]
_resource_fields = [('agency_chain', AgencyChain)]
_model_collection_fields = [
('documents', AgencyDocument),
('emails', AgencyEmail),
('agency_chains', 'AgencyChain'),
]
_resource_collection_fields = [
('bookings', 'Booking'),
('agents', 'Agent'),
('promotions', Promotion),
]
| mit | Python |
b93ca1e230043d5c91898ada1ea31e20e74116e2 | return js compatible version in check_version | cartologic/cartoview,cartologic/cartoview,cartologic/cartoview,cartologic/cartoview | cartoview/views.py | cartoview/views.py | # -*- coding: utf-8 -*-
import requests
from django.shortcuts import render
from pkg_resources import parse_version
from .log_handler import get_logger
from .version import get_current_version
logger = get_logger(__name__)
def index(request):
context = {}
return render(request, 'site_index.html', context)
def check_version(request):
r = requests.get("https://pypi.org/pypi/cartoview/json")
_version = parse_version(get_current_version())._version
release = _version.release
version = [str(x) for x in release]
current_version = ".".join(version)
context = dict(
latest_version=r.json()["info"]["version"],
current_version=current_version)
return render(
request,
"cartoview/check_version.js",
context=context,
content_type="text/javascript")
| # -*- coding: utf-8 -*-
import requests
from django.shortcuts import render
from .log_handler import get_logger
from .version import get_current_version
logger = get_logger(__name__)
def index(request):
context = {}
return render(request, 'site_index.html', context)
def check_version(request):
r = requests.get("https://pypi.org/pypi/cartoview/json")
context = dict(
latest_version=r.json()["info"]["version"],
current_version=get_current_version())
return render(
request,
"cartoview/check_version.js",
context=context,
content_type="text/javascript")
| bsd-2-clause | Python |
0cb34d55730e5d6f3f5aef10db4b1e3e1e28bd98 | change usage table to usage_events | zhujzhuo/openstack-trove,changsimon/trove,redhat-openstack/trove,changsimon/trove,denismakogon/trove-guestagent,changsimon/trove,cp16net/trove,hplustree/trove,fabian4/trove,mmasaki/trove,zhujzhuo/openstack-trove,zhangg/trove,zhujzhuo/openstack-trove,fabian4/trove,citrix-openstack-build/trove,citrix-openstack/build-trove,redhat-openstack/trove,openstack/trove,mmasaki/trove,redhat-openstack/trove,zhangg/trove,hplustree/trove,cp16net/trove,citrix-openstack/build-trove,openstack/trove,denismakogon/trove-guestagent,fabian4/trove,cp16net/trove,mmasaki/trove,citrix-openstack/build-trove,citrix-openstack-build/trove,citrix-openstack-build/trove | reddwarf/db/sqlalchemy/migrate_repo/versions/010_add_usage.py | reddwarf/db/sqlalchemy/migrate_repo/versions/010_add_usage.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import ForeignKey
from sqlalchemy.schema import Column
from sqlalchemy.schema import MetaData
from sqlalchemy.schema import UniqueConstraint
from reddwarf.db.sqlalchemy.migrate_repo.schema import create_tables
from reddwarf.db.sqlalchemy.migrate_repo.schema import DateTime
from reddwarf.db.sqlalchemy.migrate_repo.schema import drop_tables
from reddwarf.db.sqlalchemy.migrate_repo.schema import Integer
from reddwarf.db.sqlalchemy.migrate_repo.schema import String
from reddwarf.db.sqlalchemy.migrate_repo.schema import Table
meta = MetaData()
usage_events = Table('usage_events', meta,
Column('id', String(36), primary_key=True, nullable=False),
Column('instance_name', String(36)),
Column('tenant_id', String(36)),
Column('nova_instance_id', String(36)),
Column('instance_size', Integer()),
Column('nova_volume_id', String(36)),
Column('volume_size', Integer()),
Column('end_time', DateTime()),
Column('updated', DateTime()))
def upgrade(migrate_engine):
meta.bind = migrate_engine
create_tables([usage_events])
def downgrade(migrate_engine):
meta.bind = migrate_engine
drop_tables([usage_events])
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import ForeignKey
from sqlalchemy.schema import Column
from sqlalchemy.schema import MetaData
from sqlalchemy.schema import UniqueConstraint
from reddwarf.db.sqlalchemy.migrate_repo.schema import create_tables
from reddwarf.db.sqlalchemy.migrate_repo.schema import DateTime
from reddwarf.db.sqlalchemy.migrate_repo.schema import drop_tables
from reddwarf.db.sqlalchemy.migrate_repo.schema import Integer
from reddwarf.db.sqlalchemy.migrate_repo.schema import String
from reddwarf.db.sqlalchemy.migrate_repo.schema import Table
meta = MetaData()
usage = Table('usage', meta,
Column('id', String(36), primary_key=True, nullable=False),
Column('instance_name', String(36)),
Column('tenant_id', String(36)),
Column('nova_instance_id', String(36)),
Column('instance_size', Integer()),
Column('nova_volume_id', String(36)),
Column('volume_size', Integer()),
Column('end_time', DateTime()),
Column('updated', DateTime()))
def upgrade(migrate_engine):
meta.bind = migrate_engine
create_tables([usage])
def downgrade(migrate_engine):
meta.bind = migrate_engine
drop_tables([usage])
| apache-2.0 | Python |
0439458617d43fd20eaf604674e991114525d8e3 | Update purify characters | farakavco/lutino | src/lutino/persian.py | src/lutino/persian.py | # -*- coding: utf-8 -*-
_character_map = str.maketrans({
'ي': 'ی',
'ي': 'ی',
'ى': 'ی',
'ك': 'ک',
'ة': 'ه',
'ۀ': 'ه',
'ؤ': 'و',
'إ': 'ا',
'أ': 'ا',
'ء': '',
'ّ': '',
'ِ': '',
'ُ': '',
'َ': '',
'ْ': '',
'ٍ': '',
'ً': '',
'ٌ': '',
'؛': '',
'،': '',
'«': '',
'»': '',
# Eastern Arabic-Indic digits (Persian and Urdu) U+06Fn: ۰۱۲۳۴۵۶۷۸۹
'۰': '0',
'۱': '1',
'۲': '2',
'۳': '3',
'۴': '4',
'۵': '5',
'۶': '6',
'۷': '7',
'۸': '8',
'۹': '9',
# Arabic-Indic digits: U+066n: ٠١٢٣٤٥٦٧٨٩
'٠': '0',
'١': '1',
'٢': '2',
'٣': '3',
'٤': '4',
'٥': '5',
'٦': '6',
'٧': '7',
'٨': '8',
'٩': '9',
})
def purify(s):
return s.strip().translate(_character_map)
if __name__ == '__main__':
sample_input = ' يكةۀ ۱۲۳۴'
expected_output = 'یکهه 1234'
assert purify(sample_input) == expected_output
print('success')
| # -*- coding: utf-8 -*-
_character_map = str.maketrans({
'ي': 'ی',
'ي': 'ی',
'ى': 'ی',
'ك': 'ک',
'ة': 'ه',
'ۀ': 'ه',
'ؤ': 'و',
'إ': 'ا',
'أ': 'ا',
'ء': '',
'ّ': '',
'ِ': '',
'ُ': '',
'َ': '',
'ْ': '',
'ٍ': '',
'ً': '',
'ٌ': '',
'؛': '',
# Eastern Arabic-Indic digits (Persian and Urdu) U+06Fn: ۰۱۲۳۴۵۶۷۸۹
'۰': '0',
'۱': '1',
'۲': '2',
'۳': '3',
'۴': '4',
'۵': '5',
'۶': '6',
'۷': '7',
'۸': '8',
'۹': '9',
# Arabic-Indic digits: U+066n: ٠١٢٣٤٥٦٧٨٩
'٠': '0',
'١': '1',
'٢': '2',
'٣': '3',
'٤': '4',
'٥': '5',
'٦': '6',
'٧': '7',
'٨': '8',
'٩': '9',
})
def purify(s):
return s.strip().translate(_character_map)
if __name__ == '__main__':
sample_input = ' يكةۀ ۱۲۳۴'
expected_output = 'یکهه 1234'
assert purify(sample_input) == expected_output
print('success')
| apache-2.0 | Python |
590bb4cba586b5d4ae490000fbbedb3d877d90e1 | Use ascending order | geary/claslite,geary/claslite,geary/claslite,geary/claslite | web/app/rpc/handlers.py | web/app/rpc/handlers.py | # -*- coding: utf-8 -*-
"""
rpc.handlers
~~~~~~~~~~~~~~~~~~~~
CLASlite JSON-RPC handlers
:By Michael Geary - http://mg.to/
:See UNLICENSE or http://unlicense.org/ for public domain notice.
"""
from google.appengine.api import users
from tipfy import RequestHandler
from tipfy.ext.jsonrpc import JSONRPCMixin
from models import Project
class JsonService( object ):
def project_delete( self, keytext ):
owner = users.get_current_user()
key = Key( keytext )
project = db.get( key )
if owner != project.owner:
return {
'error': 'Wrong owner'
}
db.delete( project )
return {
'deleted': keytext
}
def project_list( self ):
owner = users.get_current_user()
projects = Project.gql(
'WHERE owner = :1 ORDER BY name',
owner
)
list = []
for project in projects:
list.append({
'key': str( project.key() ),
'name': project.name
})
return {
'projects': list
}
def project_new( self, name ):
owner = users.get_current_user()
project = Project( name=name, owner=owner )
project.put()
return {
'key': str( project.key() )
}
class JsonHandler( RequestHandler, JSONRPCMixin ):
jsonrpc_service = JsonService()
jsonrpc_name = 'CLASlite JSON-RPC Service',
jsonrpc_summary = 'RPC services for CLASlite web client.'
| # -*- coding: utf-8 -*-
"""
rpc.handlers
~~~~~~~~~~~~~~~~~~~~
CLASlite JSON-RPC handlers
:By Michael Geary - http://mg.to/
:See UNLICENSE or http://unlicense.org/ for public domain notice.
"""
from google.appengine.api import users
from tipfy import RequestHandler
from tipfy.ext.jsonrpc import JSONRPCMixin
from models import Project
class JsonService( object ):
def project_delete( self, keytext ):
owner = users.get_current_user()
key = Key( keytext )
project = db.get( key )
if owner != project.owner:
return {
'error': 'Wrong owner'
}
db.delete( project )
return {
'deleted': keytext
}
def project_list( self ):
owner = users.get_current_user()
projects = Project.gql(
'WHERE owner = :1 ORDER BY name DESC',
owner
)
list = []
for project in projects:
list.append({
'key': str( project.key() ),
'name': project.name
})
return {
'projects': list
}
def project_new( self, name ):
owner = users.get_current_user()
project = Project( name=name, owner=owner )
project.put()
return {
'key': str( project.key() )
}
class JsonHandler( RequestHandler, JSONRPCMixin ):
jsonrpc_service = JsonService()
jsonrpc_name = 'CLASlite JSON-RPC Service',
jsonrpc_summary = 'RPC services for CLASlite web client.'
| unlicense | Python |
7bf93097498399cecdcd5d175c0525fa649ed182 | add tests for sloccount stats | zacchiro/debsources,vivekanand1101/debsources,Debian/debsources,vivekanand1101/debsources,sophiejjj/debsources,oorestisime/debsources,matthieucan/debsources,oorestisime/debsources,clemux/debsources,devoxel/debsources,vivekanand1101/debsources,sophiejjj/debsources,clemux/debsources,devoxel/debsources,Debian/debsources,zacchiro/debsources,zacchiro/debsources,oorestisime/debsources,devoxel/debsources,vivekanand1101/debsources,matthieucan/debsources,nonas/debian-qa,nonas/debian-qa,zacchiro/debsources,oorestisime/debsources,nonas/debian-qa,oorestisime/debsources,sophiejjj/debsources,matthieucan/debsources,devoxel/debsources,clemux/debsources,sophiejjj/debsources,clemux/debsources,clemux/debsources,devoxel/debsources,sophiejjj/debsources,nonas/debian-qa,matthieucan/debsources,Debian/debsources,Debian/debsources,Debian/debsources,vivekanand1101/debsources,zacchiro/debsources,matthieucan/debsources | web/tests/test_stats.py | web/tests/test_stats.py | # Copyright (C) 2013 Stefano Zacchiroli <zack@upsilon.cc>
#
# This file is part of Debsources.
#
# Debsources is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from app import views # XXX work around while we fix circular import
import unittest
from nose.tools import istest
from nose.plugins.attrib import attr
import stats
from dbhelpers import DbTestFixture, pg_dump, TEST_DB_NAME, TEST_DB_DUMP
@attr('infra')
class Stats(unittest.TestCase, DbTestFixture):
def setUp(self):
self.db_setup(TEST_DB_NAME, TEST_DB_DUMP)
def tearDown(self):
self.db_teardown()
@istest
def sizeTotalsMatchReferenceDb(self):
sizes = {
'squeeze': 44316,
'wheezy': 39688,
'jessie': 43032,
'sid': 43032,
'experimental': 6520,
}
total_size = 122628
for suite, size in sizes.iteritems():
self.assertEqual(size, stats.size(self.session, suite=suite))
self.assertEqual(total_size, stats.size(self.session))
@istest
def slocTotalsMatchReferenceDb(self):
slocs_jessie = {
'ansic': 140353,
'asm': 65,
'awk': 25,
'cpp': 41458,
'cs': 1213,
'java': 916,
'lex': 223,
'lisp': 2167,
'ml': 5044,
'objc': 836,
'perl': 64,
'python': 2916,
'ruby': 193,
'sh': 25022,
'xml': 14932,
'yacc': 312,
}
slocs_python = 7760
slocs_cpp_exp = 36755
self.assertEqual(slocs_jessie,
stats.sloccount_summary(self.session, suite='jessie'))
self.assertEqual(slocs_python,
stats.sloccount_lang(self.session, 'python'))
self.assertEqual(slocs_cpp_exp,
stats.sloccount_lang(self.session, 'cpp',
suite='experimental'))
| # Copyright (C) 2013 Stefano Zacchiroli <zack@upsilon.cc>
#
# This file is part of Debsources.
#
# Debsources is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from app import views # XXX work around while we fix circular import
import unittest
from nose.tools import istest
from nose.plugins.attrib import attr
import stats
from dbhelpers import DbTestFixture, pg_dump, TEST_DB_NAME, TEST_DB_DUMP
@attr('infra')
class Stats(unittest.TestCase, DbTestFixture):
def setUp(self):
self.db_setup(TEST_DB_NAME, TEST_DB_DUMP)
def tearDown(self):
self.db_teardown()
@istest
def sizeTotalsMatchReferenceDb(self):
sizes = {
'squeeze': 44316,
'wheezy': 39688,
'jessie': 43032,
'sid': 43032,
'experimental': 6520,
}
total_size = 122628
for suite, size in sizes.iteritems():
self.assertEqual(size, stats.size(self.session, suite=suite))
self.assertEqual(total_size, stats.size(self.session))
@istest
def slocTotalsMatchReferenceDb(self):
pass # TODO
| agpl-3.0 | Python |
ef617d4e98d9c029225a32098cc81aeab1d6deb5 | Add link to gnu coreutils | hatchery/Genepool2,hatchery/genepool | genes/gnu_coreutils/commands.py | genes/gnu_coreutils/commands.py | #!/usr/bin/env python
from genes.posix.traits import only_posix
from genes.process.commands import run
@only_posix()
def chgrp(path, group):
run(['chgrp', group, path])
@only_posix()
def chown(path, user):
run(['chown', user, path])
@only_posix()
def groupadd(*args):
run(['groupadd'] + list(*args))
@only_posix()
def ln(*args):
run(['ln'] + list(*args))
@only_posix()
def mkdir(path, mode=None):
if mode:
run(['mkdir', '-m', mode, path])
else:
run(['mkdir', path])
@only_posix()
def useradd(*args):
# FIXME: this is a bad way to do things
# FIXME: sigh. this is going to be a pain to make it idempotent
run(['useradd'] + list(*args))
@only_posix()
def usermod(*args):
# FIXME: this is a bad way to do things
run(['usermod'] + list(*args))
| #!/usr/bin/env python
from genes.posix.traits import only_posix
from genes.process.commands import run
@only_posix()
def chgrp(path, group):
run(['chgrp', group, path])
@only_posix()
def chown(path, user):
run(['chown', user, path])
@only_posix()
def groupadd(*args):
run(['groupadd'] + list(*args))
@only_posix()
def mkdir(path, mode=None):
if mode:
run(['mkdir', '-m', mode, path])
else:
run(['mkdir', path])
@only_posix()
def useradd(*args):
# FIXME: this is a bad way to do things
# FIXME: sigh. this is going to be a pain to make it idempotent
run(['useradd'] + list(*args))
@only_posix()
def usermod(*args):
# FIXME: this is a bad way to do things
run(['usermod'] + list(*args))
| mit | Python |
e8e7a113d1d9cdbd0e1859ac5f0d1c5e372ba940 | bump version | radhermit/pychroot | chroot/_version.py | chroot/_version.py | __version__ = '0.9.12'
| __version__ = '0.9.11'
| bsd-3-clause | Python |
16e52597c0a2c4f4b0e261c71bb634d615c56334 | add scope choice list in clients admin | lafranceinsoumise/api-django,lafranceinsoumise/api-django,lafranceinsoumise/api-django,lafranceinsoumise/api-django | src/clients/admin.py | src/clients/admin.py | from django.contrib import admin
from django import forms
from django.shortcuts import reverse
from django.utils.translation import ugettext_lazy as _
from api.admin import admin_site
from . import models
from .scopes import scopes
class ClientForm(forms.ModelForm):
scopes = forms.MultipleChoiceField(choices=[(scope.name, scope.description) for scope in scopes])
@admin.register(models.Client, site=admin_site)
class ClientAdmin(admin.ModelAdmin):
form = ClientForm
fieldsets = (
(None, {
'fields': ('label', 'name', 'description')
}),
('OAuth', {
'fields': ('oauth_enabled', 'trusted', 'uris', 'scopes')
}),
(_('Role correspondant'), {
'fields': ('role_link',)
})
)
list_display = ('label', 'name', 'role_link')
readonly_fields = ('created', 'modified', 'role_link')
def role_link(self, obj):
return '<a href="%s">%s</a>' % (
reverse('admin:authentication_role_change', args=[obj.role_id]),
_('Voir le rôle')
)
role_link.allow_tags = True
role_link.short_description = _('Lien vers le rôle')
| from django.contrib import admin
from django.shortcuts import reverse
from django.utils.translation import ugettext_lazy as _
from api.admin import admin_site
from . import models
@admin.register(models.Client, site=admin_site)
class ClientAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('label', 'name', 'description')
}),
('OAuth', {
'fields': ('oauth_enabled', 'trusted', 'uris', 'scopes')
}),
(_('Role correspondant'), {
'fields': ('role_link',)
})
)
list_display = ('label', 'name', 'role_link')
readonly_fields = ('created', 'modified', 'role_link')
def role_link(self, obj):
return '<a href="%s">%s</a>' % (
reverse('admin:authentication_role_change', args=[obj.role_id]),
_('Voir le rôle')
)
role_link.allow_tags = True
role_link.short_description = _('Lien vers le rôle')
| agpl-3.0 | Python |
49c20c9217f57eb5d853613d325a4572051ee499 | use correct field | xaedes/canopen_301_402 | src/canopen_301_402/canopen_msgs/msg.py | src/canopen_301_402/canopen_msgs/msg.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import can
from canopen_301_402.constants import *
from canopen_301_402.canopen_301.cob import CanOpenId
class CanOpenMessage(object):
def __init__(self, function_code, node_id, service, data, request=False):
'''
@summary: Represents a higher level interpretation of a regular can message
@param function_code: 4 bit opencan function code
@param node_id: 7 bit node_id
@param service: CanOpenService
@param data: byte array [maximum length 8]
'''
super(CanOpenMessage, self).__init__()
self._function_code = function_code
self._node_id = node_id
self._service = service
if self._service in [CanOpenService.nmt,CanOpenService.sync]:
self._broadcast = True
else:
self._broadcast = False
self._data = data
self._request = request
@property
def node_id(self):
return self._node_id
@property
def request(self):
return self._request
@property
def function_code(self):
return self._function_code
@property
def broadcast(self):
return self._broadcast
@property
def service(self):
return self._service
@property
def data(self):
return self._data
def to_can_msg(self):
if self.broadcast:
arbitration_id = CanOpenId.encode(self.function_code, 0)
else:
arbitration_id = CanOpenId.encode(self.function_code, self.node_id)
return can.Message(arbitration_id=arbitration_id,data=self.data,extended_id=False,is_remote_frame=self.request)
@classmethod
def from_can_msg(cls, msg, canopen):
assert not hasattr(msg,"extended_id") or (msg.extended_id == False)
function_code, node_id = CanOpenId.decode(msg.arbitration_id)
connection_set = canopen.get_connection_set(node_id)
service = connection_set.determine_service(function_code, node_id)
return CanOpenMessage(function_code,node_id,service,msg.data,request=msg.is_remote_frame)
| #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import can
from canopen_301_402.constants import *
from canopen_301_402.canopen_301.cob import CanOpenId
class CanOpenMessage(object):
def __init__(self, function_code, node_id, service, data, request=False):
'''
@summary: Represents a higher level interpretation of a regular can message
@param function_code: 4 bit opencan function code
@param node_id: 7 bit node_id
@param service: CanOpenService
@param data: byte array [maximum length 8]
'''
super(CanOpenMessage, self).__init__()
self._function_code = function_code
self._node_id = node_id
self._service = service
if self._service in [CanOpenService.nmt,CanOpenService.sync]:
self._broadcast = True
else:
self._broadcast = False
self._data = data
self._request = request
@property
def node_id(self):
return self._node_id
@property
def request(self):
return self._request
@property
def function_code(self):
return self._function_code
@property
def broadcast(self):
return self._broadcast
@property
def service(self):
return self._service
@property
def data(self):
return self._data
def to_can_msg(self):
if self.broadcast:
arbitration_id = CanOpenId.encode(self.function_code, 0)
else:
arbitration_id = CanOpenId.encode(self.function_code, self.node_id)
return can.Message(arbitration_id=arbitration_id,data=self.data,extended_id=False,is_remote_frame=self.is_remote_frame)
@classmethod
def from_can_msg(cls, msg, canopen):
assert not hasattr(msg,"extended_id") or (msg.extended_id == False)
function_code, node_id = CanOpenId.decode(msg.arbitration_id)
connection_set = canopen.get_connection_set(node_id)
service = connection_set.determine_service(function_code, node_id)
return CanOpenMessage(function_code,node_id,service,msg.data,request=msg.is_remote_frame)
| mit | Python |
9ac36bb7462e26b5da9834a42af4ecd1be127d62 | Update __init__.py | selvakarthik21/newspaper,selvakarthik21/newspaper | newspaperdemo/__init__.py | newspaperdemo/__init__.py | from flask import Flask, request, render_template, redirect, url_for,jsonify
from newspaper import Article
from xml.etree import ElementTree
app = Flask(__name__)
# Debug logging
import logging
import sys
# Defaults to stdout
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
try:
log.info('Logging to console')
except:
_, ex, _ = sys.exc_info()
log.error(ex.message)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/article')
def show_article():
url_to_clean = request.args.get('url')
if not url_to_clean:
return jsonify({
'authors': '',
'title': '',
'text': '',
'keywords': '',
'summary': ''
})
article = Article(url_to_clean)
article.download()
article.parse()
try:
html_string = ElementTree.tostring(article.clean_top_node)
except:
html_string = "Error converting html to string."
try:
article.nlp()
except:
log.error("Couldn't process with NLP")
a = {
'authors': str(', '.join(article.authors)),
'title': article.title,
'text': article.text,
'keywords': str(', '.join(article.keywords)),
'summary': article.summary
}
return jsonify(a)
| from flask import Flask, request, render_template, redirect, url_for,jsonify
from newspaper import Article
from xml.etree import ElementTree
app = Flask(__name__)
# Debug logging
import logging
import sys
# Defaults to stdout
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
try:
log.info('Logging to console')
except:
_, ex, _ = sys.exc_info()
log.error(ex.message)
@app.route('/articles/show')
def index():
return render_template('index.html')
@app.route('/')
def show_article():
url_to_clean = request.args.get('url')
if not url_to_clean:
return jsonify({
'authors': '',
'title': '',
'text': '',
'keywords': '',
'summary': ''
})
article = Article(url_to_clean)
article.download()
article.parse()
try:
html_string = ElementTree.tostring(article.clean_top_node)
except:
html_string = "Error converting html to string."
try:
article.nlp()
except:
log.error("Couldn't process with NLP")
a = {
'authors': str(', '.join(article.authors)),
'title': article.title,
'text': article.text,
'keywords': str(', '.join(article.keywords)),
'summary': article.summary
}
return jsonify(a)
| mit | Python |
8ff9f79cc3705fb55c692eca811db97feb7b55e9 | Replace all TLSSocket implementations with the new OpenSSL one | freevo/kaa-base,freevo/kaa-base | src/net/tls/__init__.py | src/net/tls/__init__.py | # -* -coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# tls/__init__.py - TLS support for the Kaa Framework
# -----------------------------------------------------------------------------
# Copyright 2008-2012 Dirk Meyer, Jason Tackaberry
#
# Please see the file AUTHORS for a complete list of authors.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version
# 2.1 as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from .openssl import TLSSocket
| # -* -coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# tls/__init__.py - TLS support for the Kaa Framework
# -----------------------------------------------------------------------------
# $Id$
#
# This module wraps TLS for client and server based on tlslite. See
# http://trevp.net/tlslite/docs/public/tlslite.TLSConnection.TLSConnection-class.html
# for more information about optional paramater.
#
# -----------------------------------------------------------------------------
# Copyright 2008-2012 Dirk Meyer, Jason Tackaberry
#
# Please see the file AUTHORS for a complete list of authors.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version
# 2.1 as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# -----------------------------------------------------------------------------
from __future__ import absolute_import
from .common import *
try:
from .m2 import M2TLSSocket
except ImportError:
M2TLSSocket = None
try:
from .tlslite import TLSAuthenticationError, TLSKey, TLSLiteConnection, TLSLiteSocket
except ImportError:
TLSLiteSocket = None
try:
from .gnutls import GNUTLSSocket
except ImportError:
GNUTLSSocket = None
# FIXME: for now, keep TLSLiteSocket as general TLSSocket object. This
# must change since tlslite is not maintained anymore. IMHO the best
# solution would be to use gnutls but the python-gnutls bindings have
# no SRP support.
TLSSocket = TLSLiteSocket or M2TLSSocket or GNUTLSSocket
if TLSLiteSocket == M2TLSSocket == None:
raise ImportError('No suitable TLS backend found: tried tlslite, M2Crypto and gnutls')
| lgpl-2.1 | Python |
2df4c5e812f89e8194691415f75434f608577daa | Make redis_process.py point at the correct directory | prophile/compd,prophile/compd | src/redis_process.py | src/redis_process.py | from twisted.internet import reactor, protocol
import sys
import config
class RedisDataReceiver(protocol.ProcessProtocol):
def __init__(self, on_started):
self.on_started = on_started
def outReceived(self, data):
print >>sys.stderr, data,
if 'now ready to accept connections' in data:
self.on_started()
def errReceived(self, data):
print >>sys.stderr, data,
def run_redis_server(on_started):
print >>sys.stderr, "Starting Redis server..."
server = config.redis['server']
reactor.spawnProcess(RedisDataReceiver(on_started),
server,
args = [server, 'redis.conf'],
path = '../database/')
| from twisted.internet import reactor, protocol
import sys
import config
class RedisDataReceiver(protocol.ProcessProtocol):
def __init__(self, on_started):
self.on_started = on_started
def outReceived(self, data):
print >>sys.stderr, data,
if 'now ready to accept connections' in data:
self.on_started()
def errReceived(self, data):
print >>sys.stderr, data,
def run_redis_server(on_started):
print >>sys.stderr, "Starting Redis server..."
server = config.redis['server']
reactor.spawnProcess(RedisDataReceiver(on_started),
server,
args = [server, 'redis.conf'],
path = '../redis/')
| mit | Python |
8ac142af2afc577a47197fe9bc821cb796883f38 | Check for autoincrement before executing the instruction | darbaga/simple_compiler | virtual_machine.py | virtual_machine.py | class VirtualMachine:
def __init__(self, bytecodes, ram_size=256, executing=True):
self.bytecodes = bytecodes
self.data = [None]*ram_size
self.stack = []
self.executing = executing
self.pc = 0
def push(self, value):
"""Push something onto the stack."""
self.stack += [value]
def pop(self):
"""Pop something from the stack. Crash if empty."""
return self.stack.pop()
def read_memory(self, index):
"""Read from memory, crashing if index is out of bounds."""
return self.data[index]
def write_memory(self, index, value):
"""Write to memory. Crash if index is out of bounds."""
self.data[index] = value
def run(self):
while self.executing:
increment = self.bytecodes[self.pc].autoincrement
self.bytecodes[self.pc].execute(self)
if increment:
self.pc += 1
| class VirtualMachine:
def __init__(self, bytecodes, ram_size=256, executing=True):
self.bytecodes = bytecodes
self.data = [None]*ram_size
self.stack = []
self.executing = executing
self.pc = 0
def push(self, value):
"""Push something onto the stack."""
self.stack += [value]
def pop(self):
"""Pop something from the stack. Crash if empty."""
return self.stack.pop()
def read_memory(self, index):
"""Read from memory, crashing if index is out of bounds."""
return self.data[index]
def write_memory(self, index, value):
"""Write to memory. Crash if index is out of bounds."""
self.data[index] = value
def run(self):
while self.executing:
self.bytecodes[self.pc].execute(self)
if self.bytecodes[self.pc].autoincrement:
self.pc += 1
| bsd-3-clause | Python |
b944e86ed4a4d854a4a084dd1af52e4da2fa8090 | Update logging_conf.py added attributes to docstring | JoseALermaIII/clashcallerbot-reddit,JoseALermaIII/clashcallerbot-reddit | logging_conf.py | logging_conf.py | #! python3
# -*- coding: utf-8 -*-
"""Defines logging dictionary.
Module defines dictionary for logging.config.dictConfig()
Attributes:
LOGGING (dict): Dictionary containing definitions for the loggers, handlers,
and formatters.
"""
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'detailed': {
'format': 'F1 %(asctime)s %(name)-15s %(levelname)-8s %(message)s',
'class': 'logging.Formatter'
},
'brief': {
'format': 'F2 %(levelname)-8s: %(name)-15s: %(message)s',
'class': 'logging.Formatter'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'stream': 'ext://sys.stdout',
'formatter': 'brief'
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'DEBUG',
'filename': 'clashcaller.log',
'maxBytes': 104857600,
'backupCount': 3,
'formatter': 'detailed'
}
},
'loggers': {
'root': {
'level': 'NOTSET',
'handlers': ['console', 'file']
},
'reply': {
'level': 'DEBUG',
'handlers': ['console', 'file'],
'propagate': 0,
'qualname': 'reply'
},
'search': {
'level': 'DEBUG',
'handlers': ['console', 'file'],
'propagate': 0,
'qualname': 'search'
},
'database': {
'level': 'DEBUG',
'handlers': ['console', 'file'],
'propagate': 0,
'qualname': 'database'
}
}
}
| #! python3
# -*- coding: utf-8 -*-
"""Defines logging dictionary.
Module defines dictionary for logging.config.dictConfig()
"""
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'detailed': {
'format': 'F1 %(asctime)s %(name)-15s %(levelname)-8s %(message)s',
'class': 'logging.Formatter'
},
'brief': {
'format': 'F2 %(levelname)-8s: %(name)-15s: %(message)s',
'class': 'logging.Formatter'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'stream': 'ext://sys.stdout',
'formatter': 'brief'
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'DEBUG',
'filename': 'clashcaller.log',
'maxBytes': 104857600,
'backupCount': 3,
'formatter': 'detailed'
}
},
'loggers': {
'root': {
'level': 'NOTSET',
'handlers': ['console', 'file']
},
'reply': {
'level': 'DEBUG',
'handlers': ['console', 'file'],
'propagate': 0,
'qualname': 'reply'
},
'search': {
'level': 'DEBUG',
'handlers': ['console', 'file'],
'propagate': 0,
'qualname': 'search'
},
'database': {
'level': 'DEBUG',
'handlers': ['console', 'file'],
'propagate': 0,
'qualname': 'database'
}
}
}
| mit | Python |
450b72dd97534c7317a6256ec2b2cd30bc0a0e59 | Change API for parsing httplib cookies | p/ocookie | ocookie/httplibadapter.py | ocookie/httplibadapter.py | from . import CookieParser, CookieDict
def parse_cookies(httplib_set_cookie_headers):
cookies = []
for header in httplib_set_cookie_headers:
header = header.strip()
name, value = header.split(' ', 1)
value = value.strip()
cookie = CookieParser.parse_set_cookie_value(value)
cookies.append(cookie)
return cookies
def parse_response_cookies(httplib_response):
# RFC 2616 specifies that multiple headers can be combined into
# a single header by joining their values with commas
# (http://stackoverflow.com/questions/2454494/urllib2-multiple-set-cookie-headers-in-response).
# However, Set-Cookie headers simply cannot be combined in that way
# as they have commas in dates and also there is no standard
# way of quoting cookie values, which may also have commas.
# Therefore the correct way of accessing Set-Cookie headers
# is via httplib_response.msg.getallmatchingheaders() call.
# http://mail.python.org/pipermail/python-bugs-list/2007-March/037618.html
# Also:
# http://stackoverflow.com/questions/1649401/how-to-handle-multiple-set-cookie-header-in-http-response
headers = httplib_response.msg.getallmatchingheaders('set-cookie')
headers.extend(httplib_response.msg.getallmatchingheaders('set-cookie2'))
return parse_cookies(headers)
| from . import CookieParser, CookieDict
def parse_cookies(httplib_headers):
cookies = [
CookieParser.parse_set_cookie_value(
header[1]
) for header in httplib_headers if header[0].lower() == 'set-cookie'
]
return cookies
| bsd-2-clause | Python |
fb6a7a8684977af49bab96580a5c07c254ba164f | Clarify SubDagOperator exception | sekikn/incubator-airflow,rishibarve/incubator-airflow,saguziel/incubator-airflow,caseyching/incubator-airflow,zack3241/incubator-airflow,gilt/incubator-airflow,Tagar/incubator-airflow,wangtuanjie/airflow,andrewmchen/incubator-airflow,jfantom/incubator-airflow,wndhydrnt/airflow,wndhydrnt/airflow,owlabs/incubator-airflow,brandsoulmates/incubator-airflow,b-cuts/airflow,jbhsieh/incubator-airflow,andyxhadji/incubator-airflow,LilithWittmann/airflow,sdiazb/airflow,dgies/incubator-airflow,holygits/incubator-airflow,briceburg/airflow,cswaroop/airflow,edgarRd/incubator-airflow,criccomini/airflow,ty707/airflow,nathanielvarona/airflow,asnir/airflow,wwwslinger/airflow,jgao54/airflow,danielvdende/incubator-airflow,b-cuts/airflow,jesusfcr/airflow,artwr/airflow,dud225/incubator-airflow,apache/airflow,apache/airflow,dud225/incubator-airflow,mistercrunch/airflow,bellhops/airflow,apache/incubator-airflow,nathanielvarona/airflow,edgarRd/incubator-airflow,followyourheart/airflow,jbhsieh/incubator-airflow,NielsZeilemaker/incubator-airflow,preete-dixit-ck/incubator-airflow,moritzpein/airflow,wileeam/airflow,r39132/airflow,aminghadersohi/airflow,wooga/airflow,ProstoMaxim/incubator-airflow,N3da/incubator-airflow,patrickleotardif/airflow,sid88in/incubator-airflow,plypaul/airflow,adrpar/incubator-airflow,kerzhner/airflow,ty707/airflow,Twistbioscience/incubator-airflow,saadatqadri/airflow,juvoinc/airflow,akosel/incubator-airflow,cfei18/incubator-airflow,CloverHealth/airflow,owlabs/incubator-airflow,MetrodataTeam/incubator-airflow,mtagle/airflow,dud225/incubator-airflow,gritlogic/incubator-airflow,mtdewulf/incubator-airflow,silky/airflow,modsy/incubator-airflow,juvoinc/airflow,gtoonstra/airflow,mtagle/airflow,jiwang576/incubator-airflow,gilt/incubator-airflow,xiaowenhuman/airflow,saguziel/incubator-airflow,plypaul/airflow,juvoinc/airflow,plypaul/airflow,mtustin-handy/airflow,criccomini/airflow,mylons/incubator-airflow,xiaowenhuman/airflow,gritlogic/incubator-airflow,preete-dixit-ck/incubator-airflow,sid88in/incubator-airflow,jeremyclover/airflow,spektom/incubator-airflow,adamhaney/airflow,wangtuanjie/airflow,Chedi/airflow,cfei18/incubator-airflow,lyft/incubator-airflow,mrkm4ntr/incubator-airflow,malmiron/incubator-airflow,adamhaney/airflow,DinoCow/airflow,apache/airflow,kerzhner/airflow,jgao54/airflow,zoyahav/incubator-airflow,zodiac/incubator-airflow,jfantom/incubator-airflow,jason-z-hang/airflow,DEVELByte/incubator-airflow,plypaul/airflow,Acehaidrey/incubator-airflow,LilithWittmann/airflow,adrpar/incubator-airflow,preete-dixit-ck/incubator-airflow,juvoinc/airflow,malmiron/incubator-airflow,AllisonWang/incubator-airflow,apache/airflow,apache/incubator-airflow,MortalViews/incubator-airflow,nathanielvarona/airflow,yoziru-desu/airflow,mcanthony/airflow,artwr/airflow,john5223/airflow,briceburg/airflow,kapil-malik/airflow,johnw424/airflow,ty707/airflow,d-lee/airflow,RealImpactAnalytics/airflow,DEVELByte/incubator-airflow,mrares/incubator-airflow,wileeam/airflow,lyft/incubator-airflow,btallman/incubator-airflow,dgies/incubator-airflow,cjqian/incubator-airflow,jhsenjaliya/incubator-airflow,vineet-rh/incubator-airflow,AllisonWang/incubator-airflow,jiwang576/incubator-airflow,CloverHealth/airflow,mtustin-handy/airflow,hgrif/incubator-airflow,zoyahav/incubator-airflow,skudriashev/incubator-airflow,MortalViews/incubator-airflow,CloverHealth/airflow,r39132/airflow,jiwang576/incubator-airflow,r39132/airflow,yati-sagade/incubator-airflow,Twistbioscience/incubator-airflow,holygits/incubator-airflow,NielsZeilemaker/incubator-airflow,yati-sagade/incubator-airflow,danielvdende/incubator-airflow,Acehaidrey/incubator-airflow,storpipfugl/airflow,varantz/airflow,patrickleotardif/airflow,sid88in/incubator-airflow,stverhae/incubator-airflow,patrickleotardif/airflow,KL-WLCR/incubator-airflow,holygits/incubator-airflow,jlowin/airflow,rishibarve/incubator-airflow,wwwslinger/airflow,jwi078/incubator-airflow,easytaxibr/airflow,alexvanboxel/airflow,jason-z-hang/airflow,aminghadersohi/airflow,xiaowenhuman/airflow,mcanthony/airflow,mrares/incubator-airflow,dgies/incubator-airflow,zodiac/incubator-airflow,jhsenjaliya/incubator-airflow,danielvdende/incubator-airflow,opensignal/airflow,jlowin/airflow,spektom/incubator-airflow,cfei18/incubator-airflow,yk5/incubator-airflow,mtustin-handy/airflow,saguziel/incubator-airflow,fenglu-g/incubator-airflow,zoyahav/incubator-airflow,bolkedebruin/airflow,dmitry-r/incubator-airflow,hamedhsn/incubator-airflow,gritlogic/incubator-airflow,mistercrunch/airflow,sekikn/incubator-airflow,airbnb/airflow,jhsenjaliya/incubator-airflow,asnir/airflow,ledsusop/airflow,Acehaidrey/incubator-airflow,ledsusop/airflow,kerzhner/airflow,saadatqadri/airflow,biln/airflow,bolkedebruin/airflow,wndhydrnt/airflow,jwi078/incubator-airflow,apache/airflow,saadatqadri/airflow,mrares/incubator-airflow,cswaroop/airflow,skudriashev/incubator-airflow,nkhuyu/airflow,skudriashev/incubator-airflow,moritzpein/airflow,wxiang7/airflow,forevernull/incubator-airflow,mattuuh7/incubator-airflow,b-cuts/airflow,jampp/airflow,wolfier/incubator-airflow,caseyching/incubator-airflow,ronfung/incubator-airflow,cjqian/incubator-airflow,N3da/incubator-airflow,alexvanboxel/airflow,andyxhadji/incubator-airflow,zodiac/incubator-airflow,sergiohgz/incubator-airflow,Twistbioscience/incubator-airflow,storpipfugl/airflow,wangtuanjie/airflow,opensignal/airflow,jesusfcr/airflow,adamhaney/airflow,yiqingj/airflow,sergiohgz/incubator-airflow,ronfung/incubator-airflow,xiaowenhuman/airflow,danielvdende/incubator-airflow,brandsoulmates/incubator-airflow,lancezlin/airflow,apache/airflow,cjqian/incubator-airflow,bellhops/airflow,edgarRd/incubator-airflow,lxneng/incubator-airflow,yk5/incubator-airflow,mtagle/airflow,jfantom/incubator-airflow,biln/airflow,wxiang7/airflow,AllisonWang/incubator-airflow,mrkm4ntr/incubator-airflow,wolfier/incubator-airflow,griffinqiu/airflow,neovintage/airflow,cjqian/incubator-airflow,mtustin-handy/airflow,jiwang576/incubator-airflow,nkhuyu/airflow,silky/airflow,subodhchhabra/airflow,yoziru-desu/airflow,d-lee/airflow,dhuang/incubator-airflow,yiqingj/airflow,ProstoMaxim/incubator-airflow,ty707/airflow,sergiohgz/incubator-airflow,Fokko/incubator-airflow,CloverHealth/airflow,wwwslinger/airflow,akosel/incubator-airflow,sdiazb/airflow,forevernull/incubator-airflow,wtmmac/airflow,neovintage/airflow,briceburg/airflow,vijaysbhat/incubator-airflow,janczak10/incubator-airflow,wolfier/incubator-airflow,OpringaoDoTurno/airflow,btallman/incubator-airflow,airbnb/airflow,aminghadersohi/airflow,janczak10/incubator-airflow,bolkedebruin/airflow,apache/incubator-airflow,hamedhsn/incubator-airflow,cfei18/incubator-airflow,zack3241/incubator-airflow,NielsZeilemaker/incubator-airflow,sid88in/incubator-airflow,griffinqiu/airflow,dhuang/incubator-airflow,Chedi/airflow,skudriashev/incubator-airflow,wtmmac/airflow,opensignal/airflow,wileeam/airflow,malmiron/incubator-airflow,ronfung/incubator-airflow,cswaroop/airflow,gilt/incubator-airflow,artwr/airflow,kapil-malik/airflow,Acehaidrey/incubator-airflow,caseyching/incubator-airflow,jeremyclover/airflow,jason-z-hang/airflow,jampp/airflow,griffinqiu/airflow,mylons/incubator-airflow,sergiohgz/incubator-airflow,griffinqiu/airflow,modsy/incubator-airflow,mtdewulf/incubator-airflow,LilithWittmann/airflow,yk5/incubator-airflow,fenglu-g/incubator-airflow,followyourheart/airflow,lyft/incubator-airflow,wtmmac/airflow,MetrodataTeam/incubator-airflow,AllisonWang/incubator-airflow,dhuang/incubator-airflow,holygits/incubator-airflow,owlabs/incubator-airflow,cademarkegard/airflow,mcanthony/airflow,biln/airflow,cfei18/incubator-airflow,silky/airflow,alexvanboxel/airflow,dgies/incubator-airflow,bolkedebruin/airflow,aminghadersohi/airflow,followyourheart/airflow,neovintage/airflow,fenglu-g/incubator-airflow,wooga/airflow,jgao54/airflow,sdiazb/airflow,nathanielvarona/airflow,dud225/incubator-airflow,johnw424/airflow,danielvdende/incubator-airflow,jbhsieh/incubator-airflow,caseyching/incubator-airflow,ProstoMaxim/incubator-airflow,biln/airflow,bolkedebruin/airflow,RealImpactAnalytics/airflow,Tagar/incubator-airflow,kapil-malik/airflow,OpringaoDoTurno/airflow,ledsusop/airflow,zodiac/incubator-airflow,mrares/incubator-airflow,forevernull/incubator-airflow,artwr/airflow,Acehaidrey/incubator-airflow,adrpar/incubator-airflow,rishibarve/incubator-airflow,mcanthony/airflow,yati-sagade/incubator-airflow,ronfung/incubator-airflow,dmitry-r/incubator-airflow,d-lee/airflow,hgrif/incubator-airflow,lancezlin/airflow,followyourheart/airflow,mattuuh7/incubator-airflow,yiqingj/airflow,wxiang7/airflow,apache/incubator-airflow,DinoCow/airflow,b-cuts/airflow,jwi078/incubator-airflow,MortalViews/incubator-airflow,LilithWittmann/airflow,kerzhner/airflow,mrkm4ntr/incubator-airflow,brandsoulmates/incubator-airflow,vineet-rh/incubator-airflow,janczak10/incubator-airflow,vineet-rh/incubator-airflow,redengineer/airflow,wangtuanjie/airflow,akosel/incubator-airflow,Tagar/incubator-airflow,mylons/incubator-airflow,hgrif/incubator-airflow,subodhchhabra/airflow,owlabs/incubator-airflow,mistercrunch/airflow,Acehaidrey/incubator-airflow,cfei18/incubator-airflow,gtoonstra/airflow,d-lee/airflow,mylons/incubator-airflow,airbnb/airflow,silky/airflow,varantz/airflow,jampp/airflow,john5223/airflow,NielsZeilemaker/incubator-airflow,neovintage/airflow,moritzpein/airflow,bellhops/airflow,KL-WLCR/incubator-airflow,wolfier/incubator-airflow,Tagar/incubator-airflow,spektom/incubator-airflow,dmitry-r/incubator-airflow,briceburg/airflow,kapil-malik/airflow,jlowin/airflow,mattuuh7/incubator-airflow,johnw424/airflow,opensignal/airflow,edgarRd/incubator-airflow,john5223/airflow,modsy/incubator-airflow,jlowin/airflow,wileeam/airflow,jesusfcr/airflow,varantz/airflow,janczak10/incubator-airflow,wndhydrnt/airflow,fenglu-g/incubator-airflow,DinoCow/airflow,johnw424/airflow,lancezlin/airflow,akosel/incubator-airflow,nathanielvarona/airflow,bellhops/airflow,storpipfugl/airflow,preete-dixit-ck/incubator-airflow,nkhuyu/airflow,KL-WLCR/incubator-airflow,forevernull/incubator-airflow,MetrodataTeam/incubator-airflow,lxneng/incubator-airflow,N3da/incubator-airflow,redengineer/airflow,DinoCow/airflow,subodhchhabra/airflow,jason-z-hang/airflow,modsy/incubator-airflow,lyft/incubator-airflow,redengineer/airflow,btallman/incubator-airflow,hgrif/incubator-airflow,jesusfcr/airflow,OpringaoDoTurno/airflow,Fokko/incubator-airflow,dhuang/incubator-airflow,jampp/airflow,stverhae/incubator-airflow,moritzpein/airflow,vineet-rh/incubator-airflow,zack3241/incubator-airflow,airbnb/airflow,asnir/airflow,stverhae/incubator-airflow,jeremyclover/airflow,jgao54/airflow,mrkm4ntr/incubator-airflow,saadatqadri/airflow,asnir/airflow,easytaxibr/airflow,mtdewulf/incubator-airflow,lxneng/incubator-airflow,btallman/incubator-airflow,yati-sagade/incubator-airflow,cademarkegard/airflow,saguziel/incubator-airflow,redengineer/airflow,wtmmac/airflow,easytaxibr/airflow,sekikn/incubator-airflow,mistercrunch/airflow,gtoonstra/airflow,wooga/airflow,wooga/airflow,andrewmchen/incubator-airflow,spektom/incubator-airflow,jeremyclover/airflow,mtdewulf/incubator-airflow,andyxhadji/incubator-airflow,andrewmchen/incubator-airflow,patrickleotardif/airflow,cademarkegard/airflow,wxiang7/airflow,Chedi/airflow,wwwslinger/airflow,ProstoMaxim/incubator-airflow,criccomini/airflow,DEVELByte/incubator-airflow,jhsenjaliya/incubator-airflow,mattuuh7/incubator-airflow,hamedhsn/incubator-airflow,gilt/incubator-airflow,sdiazb/airflow,Fokko/incubator-airflow,yoziru-desu/airflow,KL-WLCR/incubator-airflow,cademarkegard/airflow,danielvdende/incubator-airflow,subodhchhabra/airflow,sekikn/incubator-airflow,MortalViews/incubator-airflow,ledsusop/airflow,zack3241/incubator-airflow,r39132/airflow,Fokko/incubator-airflow,varantz/airflow,Chedi/airflow,malmiron/incubator-airflow,vijaysbhat/incubator-airflow,vijaysbhat/incubator-airflow,yk5/incubator-airflow,gtoonstra/airflow,RealImpactAnalytics/airflow,yiqingj/airflow,vijaysbhat/incubator-airflow,andyxhadji/incubator-airflow,brandsoulmates/incubator-airflow,jfantom/incubator-airflow,hamedhsn/incubator-airflow,dmitry-r/incubator-airflow,john5223/airflow,jwi078/incubator-airflow,adrpar/incubator-airflow,Twistbioscience/incubator-airflow,lancezlin/airflow,lxneng/incubator-airflow,zoyahav/incubator-airflow,stverhae/incubator-airflow,nathanielvarona/airflow,DEVELByte/incubator-airflow,MetrodataTeam/incubator-airflow,andrewmchen/incubator-airflow,rishibarve/incubator-airflow,cswaroop/airflow,nkhuyu/airflow,N3da/incubator-airflow,mtagle/airflow,yoziru-desu/airflow,OpringaoDoTurno/airflow,storpipfugl/airflow,adamhaney/airflow,criccomini/airflow,gritlogic/incubator-airflow,RealImpactAnalytics/airflow,alexvanboxel/airflow,easytaxibr/airflow,jbhsieh/incubator-airflow | airflow/operators/subdag_operator.py | airflow/operators/subdag_operator.py | from airflow.utils import AirflowException
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
from airflow.executors import DEFAULT_EXECUTOR
class SubDagOperator(BaseOperator):
template_fields = tuple()
ui_color = '#555'
ui_fgcolor = '#fff'
@apply_defaults
def __init__(
self,
subdag,
executor=DEFAULT_EXECUTOR,
*args, **kwargs):
"""
Yo dawg. This runs a sub dag. By convention, a sub dag's dag_id
should be prefixed by its parent and a dot. As in `parent.child`.
:param subdag: the DAG object to run as a subdag of the current DAG.
:type subdag: airflow.DAG
:param dag: the parent DAG
:type subdag: airflow.DAG
"""
if 'dag' not in kwargs:
raise AirflowException("Please pass in the `dag` param")
dag = kwargs['dag']
super(SubDagOperator, self).__init__(*args, **kwargs)
if dag.dag_id + '.' + kwargs['task_id'] != subdag.dag_id:
raise AirflowException(
"The subdag's dag_id should have the form "
"'{{parent_dag_id}}.{{this_task_id}}'. Expected "
"'{d}.{t}'; received '{rcvd}'.".format(
d=dag.dag_id, t=kwargs['task_id'], rcvd=subdag.dag_id))
self.subdag = subdag
self.executor = executor
def execute(self, context):
ed = context['execution_date']
self.subdag.run(
start_date=ed, end_date=ed, donot_pickle=True,
executor=self.executor)
| from airflow.utils import AirflowException
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
from airflow.executors import DEFAULT_EXECUTOR
class SubDagOperator(BaseOperator):
template_fields = tuple()
ui_color = '#555'
ui_fgcolor = '#fff'
@apply_defaults
def __init__(
self,
subdag,
executor=DEFAULT_EXECUTOR,
*args, **kwargs):
"""
Yo dawg. This runs a sub dag. By convention, a sub dag's dag_id
should be prefixed by its parent and a dot. As in `parent.child`.
:param subdag: the DAG object to run as a subdag of the current DAG.
:type subdag: airflow.DAG
:param dag: the parent DAG
:type subdag: airflow.DAG
"""
if 'dag' not in kwargs:
raise AirflowException("Please pass in the `dag` param")
dag = kwargs['dag']
super(SubDagOperator, self).__init__(*args, **kwargs)
if dag.dag_id + '.' + kwargs['task_id'] != subdag.dag_id:
raise AirflowException(
"The subdag's dag_id should correspond to the parent's "
"'dag_id.task_id'")
self.subdag = subdag
self.executor = executor
def execute(self, context):
ed = context['execution_date']
self.subdag.run(
start_date=ed, end_date=ed, donot_pickle=True,
executor=self.executor)
| apache-2.0 | Python |
6af60de30657619398cd0890e4efaf7eb4692bc9 | Fix issue with mod_time | konrad/annotation_helper_web_app | annotate.py | annotate.py | from flask import Flask, url_for, render_template, redirect
import json
app = Flask(__name__)
data_file = "list_annotation.json"
@app.route("/")
def hello():
return "Welcome!\n"
@app.route("/show/<entity_id>")
def show(entity_id):
features = _get_features(entity_id)
return render_template(
'show.html',
entity_id=entity_id,
status=features["status"],
confirm_url=url_for("confirm", entity_id = entity_id),
reject_url=url_for("reject", entity_id = entity_id))
@app.route("/confirm/<entity_id>")
def confirm(entity_id):
_save_annotation(entity_id, "confirmed")
return "%s confirmed<br/><a href='%s'>back</a>" % (
entity_id, url_for("show", entity_id = entity_id))
@app.route("/reject/<entity_id>")
def reject(entity_id):
_save_annotation(entity_id, "reject")
return "%s rejected<br/><a href='%s'>back</a>" % (
entity_id, url_for("show", entity_id = entity_id))
def _get_features(entity_id):
entities = _entities()
return entities.get(entity_id, {"status" : "Undefined"})
def _entities():
try:
with open(data_file) as fh:
return(json.load(fh))
except IOError:
with open(data_file, "w") as fh:
fh.write("{}")
return {}
def _save_annotation(entity_id, status):
entities = _entities()
entities[entity_id] = {"status" : status, "mod_time" : ""}
with open(data_file, "w") as fh:
json.dump(entities, fh)
if __name__ == "__main__":
app.debug = True
app.run()
| from flask import Flask, url_for, render_template, redirect
import json
app = Flask(__name__)
data_file = "list_annotation.json"
@app.route("/")
def hello():
return "Welcome!\n"
@app.route("/show/<entity_id>")
def show(entity_id):
features = _get_features(entity_id)
return render_template(
'show.html',
entity_id=entity_id,
status=features["status"],
confirm_url=url_for("confirm", entity_id = entity_id),
reject_url=url_for("reject", entity_id = entity_id))
@app.route("/confirm/<entity_id>")
def confirm(entity_id):
_save_annotation(entity_id, "confirmed")
return "%s confirmed<br/><a href='%s'>back</a>" % (
entity_id, url_for("show", entity_id = entity_id))
@app.route("/reject/<entity_id>")
def reject(entity_id):
_save_annotation(entity_id, "reject")
return "%s rejected<br/><a href='%s'>back</a>" % (
entity_id, url_for("show", entity_id = entity_id))
def _get_features(entity_id):
entities = _entities()
return entities.get(entity_id, {"status" : "Undefined"})
def _entities():
try:
with open(data_file) as fh:
return(json.load(fh))
except IOError:
with open(data_file, "w") as fh:
fh.write("{}")
return {}
def _save_annotation(entity_id, status):
entities = _entities()
entities[entity_id] = {"status" : status, mod_time}
with open(data_file, "w") as fh:
json.dump(entities, fh)
if __name__ == "__main__":
app.debug = True
app.run()
| isc | Python |
e301384ecec88a5277aef87b74a16100d1f42be4 | Bump to version 0.11.0 | reubano/ckanny,reubano/ckanny | ckanny/__init__.py | ckanny/__init__.py | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
ckanny
~~~~~~
Miscellaneous CKAN utility scripts
Examples:
literal blocks::
python example_google.py
Attributes:
module_level_variable1 (int): Module level variables may be documented in
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from manager import Manager
from . import datastorer, filestorer, hdx
__title__ = 'ckanny'
__author__ = 'Reuben Cummings'
__description__ = 'Miscellaneous CKAN utility scripts'
__email__ = 'reubano@gmail.com'
__version__ = '0.11.0'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
manager = Manager()
manager.merge(datastorer.manager, namespace='ds')
manager.merge(filestorer.manager, namespace='fs')
manager.merge(hdx.manager, namespace='hdx')
@manager.command
def ver():
"""Show ckanny version"""
from . import __version__ as version
print('v%s' % version)
if __name__ == '__main__':
manager.main()
| # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
ckanny
~~~~~~
Miscellaneous CKAN utility scripts
Examples:
literal blocks::
python example_google.py
Attributes:
module_level_variable1 (int): Module level variables may be documented in
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from manager import Manager
from . import datastorer, filestorer, hdx
__title__ = 'ckanny'
__author__ = 'Reuben Cummings'
__description__ = 'Miscellaneous CKAN utility scripts'
__email__ = 'reubano@gmail.com'
__version__ = '0.10.0'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
manager = Manager()
manager.merge(datastorer.manager, namespace='ds')
manager.merge(filestorer.manager, namespace='fs')
manager.merge(hdx.manager, namespace='hdx')
@manager.command
def ver():
"""Show ckanny version"""
from . import __version__ as version
print('v%s' % version)
if __name__ == '__main__':
manager.main()
| mit | Python |
de60844c82c9b569228aa830d36235b5a377859d | Fix descriptor module to support assimilation | sassoftware/rpath-tools,sassoftware/rpath-tools | rpath_tools/client/sysdisco/descriptors.py | rpath_tools/client/sysdisco/descriptors.py | #!/usr/bin/python
from xml.etree import cElementTree as etree
from conary import conarycfg
from conary import conaryclient
from rpath_tools.client.utils.config_descriptor_cache import ConfigDescriptorCache
class Descriptors(object):
def __init__(self):
self.cfg = conarycfg.ConaryConfiguration(True)
self.client = conaryclient.ConaryClient(self.cfg)
def gather(self):
desc = None
groups = [ x for x in self.client.getUpdateItemList()
if x[0].startswith('group-') and
x[0].endswith('-appliance') ]
if len(group):
group = groups[0]
desc = ConfigDescriptorCache(self.client.getDatabase()).getDescriptor(group)
if desc:
desc.setDisplayName('ConfigurationDescriptor')
desc.addDescription('ConfigurationDescriptor')
return desc
def toxml(self, validate=False):
desc = self.gather()
if desc:
return desc.toxml(validate=validate)
return desc
if __name__ == '__main__':
import sys
from conary.lib import util
sys.excepthook = util.genExcepthook()
descriptors = Descriptors()
xml = etree.fromstring(descriptors.toxml())
print etree.tostring(xml)
| #!/usr/bin/python
from xml.etree import cElementTree as etree
from conary import conarycfg
from conary import conaryclient
from rpath_tools.client.utils.config_descriptor_cache import ConfigDescriptorCache
class Descriptors(object):
def __init__(self):
self.cfg = conarycfg.ConaryConfiguration(True)
self.client = conaryclient.ConaryClient(self.cfg)
def gather(self):
group = [ x for x in self.client.getUpdateItemList()
if x[0].startswith('group-') and
x[0].endswith('-appliance') ][0]
desc = ConfigDescriptorCache(self.client.getDatabase()).getDescriptor(group)
if desc:
desc.setDisplayName('ConfigurationDescriptor')
desc.addDescription('ConfigurationDescriptor')
return desc
def toxml(self, validate=False):
desc = self.gather()
if desc:
return desc.toxml(validate=validate)
return desc
if __name__ == '__main__':
import sys
from conary.lib import util
sys.excepthook = util.genExcepthook()
descriptors = Descriptors()
xml = etree.fromstring(descriptors.toxml())
print etree.tostring(xml)
| apache-2.0 | Python |
a0868ffe9c4ec18768626538dd687dfdb3536b23 | Deal with empty config values properly | sassoftware/rpath-tools,sassoftware/rpath-tools | rpath_tools/client/sysdisco/parsevalues.py | rpath_tools/client/sysdisco/parsevalues.py | #!/usr/conary/bin/python2.6
#
# Copyright (C) 2010 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
import xml.etree.cElementTree as etree
class ValuesParserError(Exception):
"Raised when unable to read values.xml"
class ValuesParser(object):
def __init__(self, values_xml):
self.xml = self._root(values_xml)
self.values = {}
def _root(self, values_xml):
try:
tree = etree.ElementTree(file=values_xml)
except Exception, e:
raise ValuesParserError, e
root = tree.getroot()
return root
def parse(self):
self.values = {}
self._parse(self.xml, prefix=None)
return self.values
def _parse(self, node, prefix):
for element in node:
name = element.tag.upper()
if prefix:
name = prefix + '__' + name
if element.attrib and element.attrib["list"] == "true":
self.values[name] = etree.tostring(element)
elif element.getchildren():
self._parse(element, prefix=name)
else:
self.values[name] = element.text or ''
| #!/usr/conary/bin/python2.6
#
# Copyright (C) 2010 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
import xml.etree.cElementTree as etree
class ValuesParserError(Exception):
"Raised when unable to read values.xml"
class ValuesParser(object):
def __init__(self, values_xml):
self.xml = self._root(values_xml)
self.values = {}
def _root(self, values_xml):
try:
tree = etree.ElementTree(file=values_xml)
except Exception, e:
raise ValuesParserError, e
root = tree.getroot()
return root
def parse(self):
self.values = {}
self._parse(self.xml, prefix=None)
return self.values
def _parse(self, node, prefix):
for element in node:
name = element.tag.upper()
if prefix:
name = prefix + '__' + name
if element.attrib and element.attrib["list"] == "true":
self.values[name] = etree.tostring(element)
elif element.getchildren():
self._parse(element, prefix=name)
else:
self.values[name] = element.text
| apache-2.0 | Python |
45ad104ce72748b7ca580be79fad89261068ebb2 | Use ImportError instead of ModuleNotFoundError to support python3.5 (#110) | mjpost/sacreBLEU,mjpost/sacreBLEU | sacrebleu/tokenizers/tokenizer_ja_mecab.py | sacrebleu/tokenizers/tokenizer_ja_mecab.py | # -*- coding: utf-8 -*-
try:
import MeCab
import ipadic
except ImportError:
# Don't fail until the tokenizer is actually used
MeCab = None
from .tokenizer_none import NoneTokenizer
FAIL_MESSAGE = """
Japanese tokenization requires extra dependencies, but you do not have them installed.
Please install them like so.
pip install sacrebleu[ja]
"""
class TokenizerJaMecab(NoneTokenizer):
def __init__(self):
if MeCab is None:
raise RuntimeError(FAIL_MESSAGE)
self.tagger = MeCab.Tagger(ipadic.MECAB_ARGS + " -Owakati")
# make sure the dictionary is IPA
d = self.tagger.dictionary_info()
assert d.size == 392126, \
"Please make sure to use the IPA dictionary for MeCab"
# This asserts that no user dictionary has been loaded
assert d.next is None
def __call__(self, line):
"""
Tokenizes an Japanese input line using MeCab morphological analyzer.
:param line: a segment to tokenize
:return: the tokenized line
"""
line = line.strip()
sentence = self.tagger.parse(line).strip()
return sentence
def signature(self):
"""
Returns the MeCab parameters.
:return: signature string
"""
signature = self.tagger.version() + "-IPA"
return 'ja-mecab-' + signature
| # -*- coding: utf-8 -*-
try:
import MeCab
import ipadic
except ModuleNotFoundError:
# Don't fail until the tokenizer is actually used
MeCab = None
from .tokenizer_none import NoneTokenizer
FAIL_MESSAGE = """
Japanese tokenization requires extra dependencies, but you do not have them installed.
Please install them like so.
pip install sacrebleu[ja]
"""
class TokenizerJaMecab(NoneTokenizer):
def __init__(self):
if MeCab is None:
raise RuntimeError(FAIL_MESSAGE)
self.tagger = MeCab.Tagger(ipadic.MECAB_ARGS + " -Owakati")
# make sure the dictionary is IPA
d = self.tagger.dictionary_info()
assert d.size == 392126, \
"Please make sure to use the IPA dictionary for MeCab"
# This asserts that no user dictionary has been loaded
assert d.next is None
def __call__(self, line):
"""
Tokenizes an Japanese input line using MeCab morphological analyzer.
:param line: a segment to tokenize
:return: the tokenized line
"""
line = line.strip()
sentence = self.tagger.parse(line).strip()
return sentence
def signature(self):
"""
Returns the MeCab parameters.
:return: signature string
"""
signature = self.tagger.version() + "-IPA"
return 'ja-mecab-' + signature
| apache-2.0 | Python |
7efcdb0ab12e733451e16f4ae24009e39fcd1c0c | add column api | syaning/zhihuapi-py | zhihuapi/column.py | zhihuapi/column.py | from .request import req
from .urls import column_url
class Column:
def __init__(self, slug):
self.slug = slug
def info(self):
"""Get column information"""
url = '%s/api/columns/%s' % (column_url, self.slug)
return req.get(url)
def pins(self):
"""Pin top posts."""
url = '%s/api/columns/%s/pins' % (column_url, self.slug)
return req.get(url)
def posts(self, offset=0):
"""Posts in this column.
Args:
offset: An integer.
Returns:
A list of posts.
"""
url = '%s/api/columns/%s/posts' % (column_url, self.slug)
params = {
'offset': offset,
'limit': 20
}
return req.get(url, params)
def authors(self):
"""Authors in this column."""
url = '%s/api/columns/%s/authors' % (column_url, self.slug)
return req.get(url)
| from .request import req
class Column:
def __init__(self, column_id):
self.id = column_id
| mit | Python |
3fc596db32e1c39e223c35e6a80a2f6c23321201 | Bump version to v1.12.3 | Cal-CS-61A-Staff/ok-client | client/__init__.py | client/__init__.py | __version__ = 'v1.12.3'
FILE_NAME = 'ok'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| __version__ = 'v1.12.2'
FILE_NAME = 'ok'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| apache-2.0 | Python |
971e723f0b5b62a6170784e0d812f4566e1301a9 | Update __init__.py | mitodl/django-server-status,mitodl/django-server-status | server_status/__init__.py | server_status/__init__.py | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
__version__ = '0.5.0'
| # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
__version__ = '0.6.0'
| agpl-3.0 | Python |
de8f0c504fd49eef93c7b3c356fefefec23ea7f8 | Remove usage of private '_url' property from OSC | openstack/python-mistralclient,StackStorm/python-mistralclient,openstack/python-mistralclient,StackStorm/python-mistralclient | mistralclient/osc/plugin.py | mistralclient/osc/plugin.py | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from osc_lib import utils
LOG = logging.getLogger(__name__)
DEFAULT_WORKFLOW_API_VERSION = '2'
API_VERSION_OPTION = 'os_workflow_api_version'
API_NAME = 'workflow_engine'
API_VERSIONS = {
'2': 'mistralclient.api.v2.client.Client',
}
def make_client(instance):
"""Returns a workflow_engine service client."""
version = instance._api_version[API_NAME]
workflow_client = utils.get_client_class(
API_NAME,
version,
API_VERSIONS)
LOG.debug('Instantiating workflow engine client: %s', workflow_client)
mistral_url = instance.get_endpoint_for_service_type(
'workflowv2',
interface='publicURL'
)
client = workflow_client(
mistral_url=mistral_url,
auth_token=instance.auth_ref.auth_token,
project_id=instance.auth_ref.project_id,
user_id=instance.auth_ref.user_id,
)
return client
def build_option_parser(parser):
"""Hook to add global options."""
parser.add_argument(
'--os-workflow-api-version',
metavar='<workflow-api-version>',
default=utils.env(
'OS_WORKFLOW_API_VERSION',
default=DEFAULT_WORKFLOW_API_VERSION),
help='Workflow API version, default=' +
DEFAULT_WORKFLOW_API_VERSION +
' (Env: OS_WORKFLOW_API_VERSION)')
return parser
| #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from osc_lib import utils
LOG = logging.getLogger(__name__)
DEFAULT_WORKFLOW_API_VERSION = '2'
API_VERSION_OPTION = 'os_workflow_api_version'
API_NAME = 'workflow_engine'
API_VERSIONS = {
'2': 'mistralclient.api.v2.client.Client',
}
def make_client(instance):
"""Returns a workflow_engine service client."""
version = instance._api_version[API_NAME]
workflow_client = utils.get_client_class(
API_NAME,
version,
API_VERSIONS)
LOG.debug('Instantiating workflow engine client: %s', workflow_client)
if not instance._url:
instance._url = instance.get_endpoint_for_service_type(
'workflowv2',
interface='publicURL'
)
client = workflow_client(
mistral_url=instance._url,
auth_token=instance.auth_ref.auth_token,
project_id=instance.auth_ref.project_id,
user_id=instance.auth_ref.user_id,
)
return client
def build_option_parser(parser):
"""Hook to add global options."""
parser.add_argument(
'--os-workflow-api-version',
metavar='<workflow-api-version>',
default=utils.env(
'OS_WORKFLOW_API_VERSION',
default=DEFAULT_WORKFLOW_API_VERSION),
help='Workflow API version, default=' +
DEFAULT_WORKFLOW_API_VERSION +
' (Env: OS_WORKFLOW_API_VERSION)')
return parser
| apache-2.0 | Python |
b72b001ff79fe12370b70fe99b3c344a09a03110 | Fix #19: ResourceWarning on test_encoding (test_big5.txt unclosed) | ordbogen/html5lib-python,gsnedders/html5lib-python,mindw/html5lib-python,alex/html5lib-python,mgilson/html5lib-python,mgilson/html5lib-python,mindw/html5lib-python,gsnedders/html5lib-python,dstufft/html5lib-python,alex/html5lib-python,ordbogen/html5lib-python,dstufft/html5lib-python,html5lib/html5lib-python,mgilson/html5lib-python,ordbogen/html5lib-python,alex/html5lib-python,dstufft/html5lib-python,html5lib/html5lib-python,mindw/html5lib-python,html5lib/html5lib-python | html5lib/tests/test_encoding.py | html5lib/tests/test_encoding.py | from __future__ import absolute_import, division, unicode_literals
import re
import os
import unittest
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, test_dir, errorMessage
from html5lib import HTMLParser, inputstream
class Html5EncodingTestCase(unittest.TestCase):
def test_codec_name_a(self):
self.assertEqual(inputstream.codecName("utf-8"), "utf-8")
def test_codec_name_b(self):
self.assertEqual(inputstream.codecName("utf8"), "utf-8")
def test_codec_name_c(self):
self.assertEqual(inputstream.codecName(" utf8 "), "utf-8")
def test_codec_name_d(self):
self.assertEqual(inputstream.codecName("ISO_8859--1"), "windows-1252")
def runParserEncodingTest(data, encoding):
p = HTMLParser()
t = p.parse(data, useChardet=False)
encoding = encoding.lower().decode("ascii")
assert encoding == p.tokenizer.stream.charEncoding[0], errorMessage(data, encoding, p.tokenizer.stream.charEncoding[0])
def runPreScanEncodingTest(data, encoding):
stream = inputstream.HTMLBinaryInputStream(data, chardet=False)
encoding = encoding.lower().decode("ascii")
# Very crude way to ignore irrelevant tests
if len(data) > stream.numBytesMeta:
return
assert encoding == stream.charEncoding[0], errorMessage(data, encoding, stream.charEncoding[0])
def test_encoding():
for filename in get_data_files("encoding"):
test_name = os.path.basename(filename).replace('.dat',''). \
replace('-','')
tests = TestData(filename, b"data", encoding=None)
for idx, test in enumerate(tests):
yield (runParserEncodingTest, test[b'data'], test[b'encoding'])
yield (runPreScanEncodingTest, test[b'data'], test[b'encoding'])
try:
import chardet
def test_chardet():
with open(os.path.join(test_dir, "encoding" , "chardet", "test_big5.txt"), "rb") as fp:
encoding = inputstream.HTMLInputStream(fp.read()).charEncoding
assert encoding[0].lower() == "big5"
except ImportError:
print("chardet not found, skipping chardet tests")
| from __future__ import absolute_import, division, unicode_literals
import re
import os
import unittest
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, test_dir, errorMessage
from html5lib import HTMLParser, inputstream
class Html5EncodingTestCase(unittest.TestCase):
def test_codec_name_a(self):
self.assertEqual(inputstream.codecName("utf-8"), "utf-8")
def test_codec_name_b(self):
self.assertEqual(inputstream.codecName("utf8"), "utf-8")
def test_codec_name_c(self):
self.assertEqual(inputstream.codecName(" utf8 "), "utf-8")
def test_codec_name_d(self):
self.assertEqual(inputstream.codecName("ISO_8859--1"), "windows-1252")
def runParserEncodingTest(data, encoding):
p = HTMLParser()
t = p.parse(data, useChardet=False)
encoding = encoding.lower().decode("ascii")
assert encoding == p.tokenizer.stream.charEncoding[0], errorMessage(data, encoding, p.tokenizer.stream.charEncoding[0])
def runPreScanEncodingTest(data, encoding):
stream = inputstream.HTMLBinaryInputStream(data, chardet=False)
encoding = encoding.lower().decode("ascii")
# Very crude way to ignore irrelevant tests
if len(data) > stream.numBytesMeta:
return
assert encoding == stream.charEncoding[0], errorMessage(data, encoding, stream.charEncoding[0])
def test_encoding():
for filename in get_data_files("encoding"):
test_name = os.path.basename(filename).replace('.dat',''). \
replace('-','')
tests = TestData(filename, b"data", encoding=None)
for idx, test in enumerate(tests):
yield (runParserEncodingTest, test[b'data'], test[b'encoding'])
yield (runPreScanEncodingTest, test[b'data'], test[b'encoding'])
try:
import chardet
def test_chardet():
data = open(os.path.join(test_dir, "encoding" , "chardet", "test_big5.txt"), "rb").read()
encoding = inputstream.HTMLInputStream(data).charEncoding
assert encoding[0].lower() == "big5"
except ImportError:
print("chardet not found, skipping chardet tests")
| mit | Python |
6969d33e85d95db917a07c57a3ec1d4c547a5ed4 | update python producer POC | gtfierro/cs262-project,gtfierro/cs262-project,gtfierro/cs262-project | client/producer.py | client/producer.py | import sys
import msgpack
import socket
import uuid as uuidlib
class Client:
def __init__(self, host, port, uuid=None):
self.uuid = uuid if uuid is not None else uuidlib.uuid4()
self.uuid = str(self.uuid) # coerce to string
self.host = str(host)
self.port = int(port)
self.metadata = {}
self._dirty_metadata = {}
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.host, self.port))
def subscribe(self, query):
self.s.send(msgpack.packb(query))
def add_metadata(self, d):
strd = {str(k): str(v) for k,v in d.items()}
self.metadata.update(strd)
self._dirty_metadata = strd
def publish(self, value):
message = {"UUID": self.uuid, "Metadata": self._dirty_metadata, "Value": value}
print map(hex, map(ord, msgpack.packb(message)))
self.s.send(chr(0x00)+msgpack.packb(message))
self._dirty_metadata = {}
if __name__ == '__main__':
room = int(sys.argv[1]) if len(sys.argv) > 1 else '410'
c = Client("localhost", "4444", uuid="4600a1f2-ef35-11e5-9fe7-271a9f80bc76")
c.add_metadata({"Room":str(room), "Building": "Soda", "Device": "Temperature Sensor"})
import time
i = 0
while True:
i += 1
c.publish(i)
time.sleep(1)
| import msgpack
import socket
import uuid as uuidlib
class Client:
def __init__(self, host, port, uuid=None):
self.uuid = uuid if uuid is not None else uuidlib.uuid4()
self.uuid = str(self.uuid) # coerce to string
self.host = str(host)
self.port = int(port)
self.metadata = {}
self._dirty_metadata = {}
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.host, self.port))
def subscribe(self, query):
self.s.send(msgpack.packb(query))
def add_metadata(self, d):
strd = {str(k): str(v) for k,v in d.items()}
self.metadata.update(strd)
self._dirty_metadata = strd
def publish(self, value):
message = [self.uuid, self._dirty_metadata, value]
print map(hex, map(ord, msgpack.packb(message)))
self.s.send(chr(0x00)+msgpack.packb(message))
self._dirty_metadata = {}
if __name__ == '__main__':
c = Client("localhost", "4444", uuid="4600a1f2-ef35-11e5-9fe7-271a9f80bc76")
c.add_metadata({"Room": "410", "Building": "Soda", "Device": "Temperature Sensor"})
import time
i = 0
while True:
i += 1
c.publish(i)
time.sleep(1)
| bsd-3-clause | Python |
67050468e1c2801e0aa0c7896cd8e5ffb5046f8f | optimize code test=develop | chengduoZH/Paddle,tensor-tang/Paddle,luotao1/Paddle,tensor-tang/Paddle,tensor-tang/Paddle,chengduoZH/Paddle,tensor-tang/Paddle,luotao1/Paddle,luotao1/Paddle,PaddlePaddle/Paddle,tensor-tang/Paddle,PaddlePaddle/Paddle,luotao1/Paddle,luotao1/Paddle,chengduoZH/Paddle,baidu/Paddle,chengduoZH/Paddle,PaddlePaddle/Paddle,PaddlePaddle/Paddle,baidu/Paddle,baidu/Paddle,luotao1/Paddle,PaddlePaddle/Paddle,luotao1/Paddle,chengduoZH/Paddle,baidu/Paddle,PaddlePaddle/Paddle,baidu/Paddle,PaddlePaddle/Paddle | python/paddle/fluid/transpiler/details/distribute_lookuptable_utils.py | python/paddle/fluid/transpiler/details/distribute_lookuptable_utils.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOOKUP_TABLE_TYPE = "lookup_table"
def find_distributed_lookup_table(program):
"""
Find distribute lookup table in program.
We only support one distribute table now.
:param program:
:return: table_name or None
"""
table_name = None
for op in program.global_block().ops:
if op.type == LOOKUP_TABLE_TYPE:
if op.attr('is_distributed') is True:
if table_name is None:
table_name = op.input("W")[0]
if table_name != op.input("W")[0]:
raise RuntimeError("all distributed lookup_table_ops"
" should have only one table")
else:
if table_name is not None:
assert op.input("W")[0] != table_name
return table_name
| # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOOKUP_TABLE_TYPE = "lookup_table"
def find_distributed_lookup_table(program):
# process lookup_table_op
# 1. check all lookup_table_op is distributed
# 2. check all lookup_table_op share the same table.
distributed_lookup_table_ops = []
# support only one distributed_lookup_table now
table_name = None
for op in program.global_block().ops:
if op.type == LOOKUP_TABLE_TYPE:
if op.attr('is_distributed') is True:
if table_name is None:
table_name = op.input("W")[0]
if table_name != op.input("W")[0]:
raise RuntimeError("all distributed lookup_table_ops"
" should have only one table")
distributed_lookup_table_ops.append(op)
else:
if table_name is not None:
assert op.input("W")[0] != table_name
return table_name
| apache-2.0 | Python |
3c218dff3e00ece3a84de727fa217d2d7d01b82d | Add 'gym' prefix to URL in email app | wger-project/wger,rolandgeider/wger,wger-project/wger,petervanderdoes/wger,DeveloperMal/wger,DeveloperMal/wger,rolandgeider/wger,kjagoo/wger_stark,petervanderdoes/wger,wger-project/wger,rolandgeider/wger,kjagoo/wger_stark,kjagoo/wger_stark,petervanderdoes/wger,DeveloperMal/wger,wger-project/wger,DeveloperMal/wger,rolandgeider/wger,petervanderdoes/wger,kjagoo/wger_stark | wger/email/urls.py | wger/email/urls.py | # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from django.conf.urls import url, include
from wger.email.forms import EmailListForm
from wger.email.views import email_lists
# sub patterns for email lists
patterns_email = [
url(r'^overview/gym/(?P<gym_pk>\d+)$',
email_lists.EmailLogListView.as_view(),
name='overview'),
url(r'^add/gym/(?P<gym_pk>\d+)$',
email_lists.EmailListFormPreview(EmailListForm),
name='add'),
]
urlpatterns = [
url(r'^email/', include(patterns_email, namespace="email")),
]
| # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from django.conf.urls import url, include
from wger.email.forms import EmailListForm
from wger.email.views import email_lists
# sub patterns for email lists
patterns_email = [
url(r'^overview/(?P<gym_pk>\d+)$',
email_lists.EmailLogListView.as_view(),
name='overview'),
url(r'^add/(?P<gym_pk>\d+)$',
email_lists.EmailListFormPreview(EmailListForm),
name='add'),
]
urlpatterns = [
url(r'^email/', include(patterns_email, namespace="email")),
]
| agpl-3.0 | Python |
8c191e8c985323f29ff6081b31cb4721f7841383 | fix lint | pvizeli/hassio,pvizeli/hassio | hassio/api/supervisor.py | hassio/api/supervisor.py | """Init file for HassIO supervisor rest api."""
import logging
import voluptuous as vol
from .util import api_process, api_process_hostcontroll, api_validate
from ..const import ATTR_VERSION, ATTR_CURRENT, ATTR_BETA, HASSIO_VERSION
_LOGGER = logging.getLogger(__name__)
SCHEMA_OPTIONS = vol.Schema({
# pylint: disable=no-value-for-parameter
vol.Optional(ATTR_BETA): vol.Boolean(),
})
SCHEMA_VERSION = vol.Schema({
vol.Optional(ATTR_VERSION): vol.Coerce(str),
})
class APISupervisor(object):
"""Handle rest api for supervisor functions."""
def __init__(self, config, loop, host_controll):
"""Initialize supervisor rest api part."""
self.config = config
self.loop = loop
self.host_controll = host_controll
@api_process
async def ping(self, request):
"""Return ok for signal that the api is ready."""
return True
@api_process
async def info(self, request):
"""Return host information."""
info = {
ATTR_VERSION: HASSIO_VERSION,
ATTR_CURRENT: self.config.current_hassio,
ATTR_BETA: self.config.upstream_beta,
}
return info
@api_process
async def options(self, request):
"""Set supervisor options."""
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_BETA in body:
self.config.upstream_beta = body[ATTR_BETA]
return self.config.save()
@api_process_hostcontroll
async def update(self, request):
"""Update host OS."""
body = await api_validate(SCHEMA_VERSION, request)
version = body.get(ATTR_VERSION, self.config.current_hassio)
if version == HASSIO_VERSION:
raise RuntimeError("%s is already in use.", version)
return await self.host_controll.supervisor_update(version=version)
| """Init file for HassIO supervisor rest api."""
import logging
import voluptuous as vol
from .util import api_process, api_process_hostcontroll, api_validate
from ..const import ATTR_VERSION, ATTR_CURRENT, ATTR_BETA, HASSIO_VERSION
_LOGGER = logging.getLogger(__name__)
SCHEMA_OPTIONS = vol.Schema({
vol.Optional(ATTR_BETA): vol.Boolean(),
})
SCHEMA_VERSION = vol.Schema({
vol.Optional(ATTR_VERSION): vol.Coerce(str),
})
class APISupervisor(object):
"""Handle rest api for supervisor functions."""
def __init__(self, config, loop, host_controll):
"""Initialize supervisor rest api part."""
self.config = config
self.loop = loop
self.host_controll = host_controll
@api_process
async def ping(self, request):
"""Return ok for signal that the api is ready."""
return True
@api_process
async def info(self, request):
"""Return host information."""
info = {
ATTR_VERSION: HASSIO_VERSION,
ATTR_CURRENT: self.config.current_hassio,
ATTR_BETA: self.config.upstream_beta,
}
return info
@api_process
async def options(self, request):
"""Set supervisor options."""
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_BETA in body:
self.config.upstream_beta = body[ATTR_BETA]
return self.config.save()
@api_process_hostcontroll
async def update(self, request):
"""Update host OS."""
body = await api_validate(SCHEMA_VERSION, request)
version = body.get(ATTR_VERSION, self.config.current_hassio)
if version == HASSIO_VERSION:
raise RuntimeError("%s is already in use.", version)
return await self.host_controll.supervisor_update(version=version)
| bsd-3-clause | Python |
853744e82f2740a47a3f36e003ea8d2784bafff6 | Fix bare datetime.now() in factory | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/tests/factories/user_deferrable_modal_factory.py | accelerator/tests/factories/user_deferrable_modal_factory.py | import swapper
from datetime import (
datetime,
timedelta,
)
from factory import SubFactory
from factory.django import DjangoModelFactory
from pytz import utc
from simpleuser.tests.factories.user_factory import UserFactory
from .deferrable_modal_factory import DeferrableModalFactory
UserDeferrableModal = swapper.load_model('accelerator', 'UserDeferrableModal')
class UserDeferrableModalFactory(DjangoModelFactory):
class Meta:
django_get_or_create = ('deferrable_modal', 'user',)
model = UserDeferrableModal
user = SubFactory(UserFactory)
deferrable_modal = SubFactory(DeferrableModalFactory)
is_deferred = False
deferred_to = utc.localize(datetime.now()) + timedelta(days=1)
| import swapper
from datetime import (
datetime,
timedelta,
)
from factory import SubFactory
from factory.django import DjangoModelFactory
from simpleuser.tests.factories.user_factory import UserFactory
from .deferrable_modal_factory import DeferrableModalFactory
UserDeferrableModal = swapper.load_model('accelerator', 'UserDeferrableModal')
class UserDeferrableModalFactory(DjangoModelFactory):
class Meta:
django_get_or_create = ('deferrable_modal', 'user',)
model = UserDeferrableModal
user = SubFactory(UserFactory)
deferrable_modal = SubFactory(DeferrableModalFactory)
is_deferred = False
deferred_to = datetime.now() + timedelta(days=1)
| mit | Python |
aac38613719ea514823848095e7df9fd24fac353 | Align output_regex | coala-analyzer/coala-bears,Asnelchristian/coala-bears,incorrectusername/coala-bears,yash-nisar/coala-bears,seblat/coala-bears,naveentata/coala-bears,ku3o/coala-bears,chriscoyfish/coala-bears,Asnelchristian/coala-bears,gs0510/coala-bears,mr-karan/coala-bears,refeed/coala-bears,srisankethu/coala-bears,horczech/coala-bears,coala-analyzer/coala-bears,kaustubhhiware/coala-bears,LWJensen/coala-bears,ku3o/coala-bears,aptrishu/coala-bears,coala-analyzer/coala-bears,meetmangukiya/coala-bears,arjunsinghy96/coala-bears,seblat/coala-bears,naveentata/coala-bears,dosarudaniel/coala-bears,coala-analyzer/coala-bears,arjunsinghy96/coala-bears,vijeth-aradhya/coala-bears,srisankethu/coala-bears,sounak98/coala-bears,incorrectusername/coala-bears,Asnelchristian/coala-bears,seblat/coala-bears,coala/coala-bears,mr-karan/coala-bears,Vamshi99/coala-bears,madhukar01/coala-bears,Vamshi99/coala-bears,yashtrivedi96/coala-bears,madhukar01/coala-bears,damngamerz/coala-bears,vijeth-aradhya/coala-bears,Vamshi99/coala-bears,kaustubhhiware/coala-bears,shreyans800755/coala-bears,naveentata/coala-bears,coala/coala-bears,ankit01ojha/coala-bears,madhukar01/coala-bears,horczech/coala-bears,naveentata/coala-bears,madhukar01/coala-bears,vijeth-aradhya/coala-bears,damngamerz/coala-bears,Shade5/coala-bears,srisankethu/coala-bears,gs0510/coala-bears,damngamerz/coala-bears,meetmangukiya/coala-bears,Vamshi99/coala-bears,ankit01ojha/coala-bears,LWJensen/coala-bears,incorrectusername/coala-bears,Shade5/coala-bears,SanketDG/coala-bears,yash-nisar/coala-bears,LWJensen/coala-bears,damngamerz/coala-bears,Shade5/coala-bears,meetmangukiya/coala-bears,meetmangukiya/coala-bears,kaustubhhiware/coala-bears,sounak98/coala-bears,damngamerz/coala-bears,kaustubhhiware/coala-bears,seblat/coala-bears,yash-nisar/coala-bears,Shade5/coala-bears,shreyans800755/coala-bears,Asnelchristian/coala-bears,sounak98/coala-bears,horczech/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,yash-nisar/coala-bears,chriscoyfish/coala-bears,dosarudaniel/coala-bears,madhukar01/coala-bears,srisankethu/coala-bears,yashtrivedi96/coala-bears,Vamshi99/coala-bears,aptrishu/coala-bears,coala/coala-bears,meetmangukiya/coala-bears,seblat/coala-bears,refeed/coala-bears,coala/coala-bears,SanketDG/coala-bears,Vamshi99/coala-bears,Vamshi99/coala-bears,shreyans800755/coala-bears,SanketDG/coala-bears,Asnelchristian/coala-bears,mr-karan/coala-bears,gs0510/coala-bears,aptrishu/coala-bears,sounak98/coala-bears,shreyans800755/coala-bears,refeed/coala-bears,gs0510/coala-bears,aptrishu/coala-bears,yash-nisar/coala-bears,damngamerz/coala-bears,vijeth-aradhya/coala-bears,yash-nisar/coala-bears,madhukar01/coala-bears,Vamshi99/coala-bears,horczech/coala-bears,ankit01ojha/coala-bears,seblat/coala-bears,arjunsinghy96/coala-bears,ankit01ojha/coala-bears,coala/coala-bears,incorrectusername/coala-bears,horczech/coala-bears,ku3o/coala-bears,meetmangukiya/coala-bears,gs0510/coala-bears,Asnelchristian/coala-bears,madhukar01/coala-bears,yash-nisar/coala-bears,coala/coala-bears,dosarudaniel/coala-bears,srisankethu/coala-bears,gs0510/coala-bears,arjunsinghy96/coala-bears,yashtrivedi96/coala-bears,gs0510/coala-bears,refeed/coala-bears,yash-nisar/coala-bears,gs0510/coala-bears,SanketDG/coala-bears,meetmangukiya/coala-bears,Vamshi99/coala-bears,SanketDG/coala-bears,coala/coala-bears,yashtrivedi96/coala-bears,chriscoyfish/coala-bears,aptrishu/coala-bears,damngamerz/coala-bears,refeed/coala-bears,dosarudaniel/coala-bears,chriscoyfish/coala-bears,kaustubhhiware/coala-bears,sounak98/coala-bears,meetmangukiya/coala-bears,LWJensen/coala-bears,srisankethu/coala-bears,ku3o/coala-bears,ku3o/coala-bears,chriscoyfish/coala-bears,dosarudaniel/coala-bears,ankit01ojha/coala-bears,ku3o/coala-bears,shreyans800755/coala-bears,LWJensen/coala-bears,Asnelchristian/coala-bears,srisankethu/coala-bears,refeed/coala-bears,ankit01ojha/coala-bears,mr-karan/coala-bears,SanketDG/coala-bears,shreyans800755/coala-bears,vijeth-aradhya/coala-bears,horczech/coala-bears,vijeth-aradhya/coala-bears,ankit01ojha/coala-bears,naveentata/coala-bears,seblat/coala-bears,shreyans800755/coala-bears,horczech/coala-bears,LWJensen/coala-bears,refeed/coala-bears,coala/coala-bears,horczech/coala-bears,incorrectusername/coala-bears,mr-karan/coala-bears,LWJensen/coala-bears,aptrishu/coala-bears,coala-analyzer/coala-bears,yashtrivedi96/coala-bears,incorrectusername/coala-bears,Vamshi99/coala-bears,naveentata/coala-bears,SanketDG/coala-bears,yashtrivedi96/coala-bears,yash-nisar/coala-bears,dosarudaniel/coala-bears,arjunsinghy96/coala-bears,dosarudaniel/coala-bears,coala/coala-bears,naveentata/coala-bears,madhukar01/coala-bears,incorrectusername/coala-bears,dosarudaniel/coala-bears,aptrishu/coala-bears,chriscoyfish/coala-bears,vijeth-aradhya/coala-bears,ankit01ojha/coala-bears,aptrishu/coala-bears,yashtrivedi96/coala-bears,meetmangukiya/coala-bears,yashtrivedi96/coala-bears,coala-analyzer/coala-bears,arjunsinghy96/coala-bears,LWJensen/coala-bears,coala/coala-bears,aptrishu/coala-bears,SanketDG/coala-bears,kaustubhhiware/coala-bears,kaustubhhiware/coala-bears,coala-analyzer/coala-bears,ankit01ojha/coala-bears,ku3o/coala-bears,incorrectusername/coala-bears,refeed/coala-bears,chriscoyfish/coala-bears,sounak98/coala-bears,Asnelchristian/coala-bears,Shade5/coala-bears,madhukar01/coala-bears,gs0510/coala-bears,aptrishu/coala-bears,chriscoyfish/coala-bears,sounak98/coala-bears,shreyans800755/coala-bears,damngamerz/coala-bears,coala-analyzer/coala-bears,LWJensen/coala-bears,yash-nisar/coala-bears,coala/coala-bears,Shade5/coala-bears,naveentata/coala-bears,ku3o/coala-bears,srisankethu/coala-bears,damngamerz/coala-bears,mr-karan/coala-bears,coala/coala-bears,refeed/coala-bears,srisankethu/coala-bears,vijeth-aradhya/coala-bears,horczech/coala-bears,sounak98/coala-bears,kaustubhhiware/coala-bears,ankit01ojha/coala-bears,mr-karan/coala-bears,shreyans800755/coala-bears,aptrishu/coala-bears,ankit01ojha/coala-bears,shreyans800755/coala-bears,Vamshi99/coala-bears,ku3o/coala-bears,shreyans800755/coala-bears,yash-nisar/coala-bears,refeed/coala-bears,arjunsinghy96/coala-bears,yashtrivedi96/coala-bears,dosarudaniel/coala-bears,kaustubhhiware/coala-bears,horczech/coala-bears,Shade5/coala-bears,arjunsinghy96/coala-bears,seblat/coala-bears,Asnelchristian/coala-bears,arjunsinghy96/coala-bears,sounak98/coala-bears,Shade5/coala-bears,mr-karan/coala-bears,horczech/coala-bears,srisankethu/coala-bears,damngamerz/coala-bears,vijeth-aradhya/coala-bears,Shade5/coala-bears,incorrectusername/coala-bears,damngamerz/coala-bears,SanketDG/coala-bears,srisankethu/coala-bears,naveentata/coala-bears | bears/verilog/VerilogLintBear.py | bears/verilog/VerilogLintBear.py | from coalib.bearlib.abstractions.Linter import linter
@linter(executable='verilator',
output_format='regex',
use_stderr=True,
output_regex=r'\%(?:(?P<severity>Error|Warning.*?).*?):'
r'.+?:(?P<line>.+?): (?P<message>.+)')
class VerilogLintBear:
"""
Analyze Verilog code using ``verilator`` and checks for all lint
related and code style related warning messages. It supports the
synthesis subset of Verilog, plus initial statements, proper
blocking/non-blocking assignments, functions, tasks.
It also warns about unused code when a specified signal is never sinked,
and unoptimized code due to some construct, with which the
optimization of the specified signal or block is disabled.
This is done using the ``--lint-only`` command. For more information visit
<http://www.veripool.org/projects/verilator/wiki/Manual-verilator>.
"""
LANGUAGES = "Verilog"
@staticmethod
def create_arguments(filename, file, config_file):
return '--lint-only', filename
| from coalib.bearlib.abstractions.Linter import linter
@linter(executable='verilator',
output_format='regex',
use_stderr=True,
output_regex=r'\%(?:(?P<severity>Error|Warning.*?).*?):'
r'.+?:(?P<line>.+?): '
r'(?P<message>.+)')
class VerilogLintBear:
"""
Analyze Verilog code using ``verilator`` and checks for all lint
related and code style related warning messages. It supports the
synthesis subset of Verilog, plus initial statements, proper
blocking/non-blocking assignments, functions, tasks.
It also warns about unused code when a specified signal is never sinked,
and unoptimized code due to some construct, with which the
optimization of the specified signal or block is disabled.
This is done using the ``--lint-only`` command. For more information visit
<http://www.veripool.org/projects/verilator/wiki/Manual-verilator>.
"""
LANGUAGES = "Verilog"
@staticmethod
def create_arguments(filename, file, config_file):
return '--lint-only', filename
| agpl-3.0 | Python |
24892e1242469a43585f4efc8caa60d1cc50c1a0 | rework the logic here | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | scripts/dbutil/unknown_hads.py | scripts/dbutil/unknown_hads.py | """
I look at the unknown HADS table and see if any of these stations exist
in the mesosite database, if so, then I set online to true!
"""
import iemdb
HADS = iemdb.connect('hads')
MESOSITE = iemdb.connect('mesosite')
hcursor = HADS.cursor()
hcursor2 = HADS.cursor()
mcursor = MESOSITE.cursor()
# look for unknown
hcursor.execute("""SELECT distinct nwsli, network from unknown
WHERE network != '' and network is not null""")
for row in hcursor:
id = row[0]
network = row[1]
mcursor.execute("""
SELECT online from stations where network = '%s' and id = '%s'
""" % (network, id))
row = mcursor.fetchone()
if row is None:
print 'Site %s [%s] is unknown!' % (id, network)
elif row[0] == False:
mcursor.execute("""
update stations SET online = 't' where network = '%s' and id = '%s'
""" % (network, id))
hcursor2.execute("""DELETE from unknown where nwsli = '%s' and network = '%s'""" % (id, network))
else:
print 'Site %s [%s] was unknown, but online in DB?' % (id, network)
hcursor2.close()
HADS.commit()
mcursor.close()
MESOSITE.commit()
| """
I look at the unknown HADS table and see if any of these stations exist
in the mesosite database, if so, then I set online to true!
"""
import iemdb
HADS = iemdb.connect('hads')
MESOSITE = iemdb.connect('mesosite')
hcursor = HADS.cursor()
hcursor2 = HADS.cursor()
mcursor = MESOSITE.cursor()
# look for unknown
hcursor.execute("""SELECT distinct nwsli, network from unknown
WHERE network != '' and network is not null""")
for row in hcursor:
id = row[0]
network = row[1]
mcursor.execute("""
SELECT * from stations where network = '%s' and id = '%s' and online = 'f'
""" % (network, id))
row = mcursor.fetchone()
if row:
mcursor.execute("""
update stations SET online = 't' where network = '%s' and id = '%s'
""" % (network, id))
hcursor2.execute("""DELETE from unknown where nwsli = '%s' and network = '%s'""" % (id, network))
else:
print 'Site %s [%s] was unknown, but online in DB?' % (id, network)
hcursor2.close()
HADS.commit()
mcursor.close()
MESOSITE.commit()
| mit | Python |
000039812308a4352e652d9423b8fd20defb1f7f | Update version | MissiaL/hikvision-client | hikvisionapi/__init__.py | hikvisionapi/__init__.py | from .hikvisionapi import Client
__title__ = 'hikvisionapi'
__version__ = '0.1.8'
__author__ = 'Petr Alekseev'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 Petr Alekseev' | from .hikvisionapi import Client
__title__ = 'hikvisionapi'
__version__ = '0.1.7'
__author__ = 'Petr Alekseev'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 Petr Alekseev' | mit | Python |
11a329ee1b35621c77a36b9f742403e7194d0418 | Fix for invalid timestamps in rate limit headers | housecanary/hc-api-python | housecanary/utilities.py | housecanary/utilities.py | """Utility functions for hc-api-python"""
from datetime import datetime
def get_readable_time_string(seconds):
"""Returns human readable string from number of seconds"""
seconds = int(seconds)
minutes = seconds / 60
seconds = seconds % 60
hours = minutes / 60
minutes = minutes % 60
days = hours / 24
hours = hours % 24
result = ""
if days > 0:
result += "%d %s " % (days, "Day" if (days == 1) else "Days")
if hours > 0:
result += "%d %s " % (hours, "Hour" if (hours == 1) else "Hours")
if minutes > 0:
result += "%d %s " % (minutes, "Minute" if (minutes == 1) else "Minutes")
if seconds > 0:
result += "%d %s " % (seconds, "Second" if (seconds == 1) else "Seconds")
return result.strip()
def get_datetime_from_timestamp(timestamp):
"""Return datetime from unix timestamp"""
try:
return datetime.fromtimestamp(int(timestamp))
except:
return None
def get_rate_limits(response):
"""Returns a list of rate limit information from a given response's headers."""
rate_limits = []
periods = response.headers['X-RateLimit-Period'].split(',')
limits = response.headers['X-RateLimit-Limit'].split(',')
remaining = response.headers['X-RateLimit-Remaining'].split(',')
reset = response.headers['X-RateLimit-Reset'].split(',')
for idx, period in enumerate(periods):
rate_limit = {}
limit_period = get_readable_time_string(period)
rate_limit["period"] = limit_period
rate_limit["period_seconds"] = period
rate_limit["request_limit"] = limits[idx]
rate_limit["requests_remaining"] = remaining[idx]
reset_datetime = get_datetime_from_timestamp(reset[idx])
rate_limit["reset"] = reset_datetime
if reset_datetime is not None:
seconds_remaining = (reset_datetime - datetime.now()).seconds
else:
seconds_remaining = 0
rate_limit["reset_in_seconds"] = seconds_remaining
rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining)
rate_limits.append(rate_limit)
return rate_limits
| """Utility functions for hc-api-python"""
from datetime import datetime
def get_readable_time_string(seconds):
"""Returns human readable string from number of seconds"""
seconds = int(seconds)
minutes = seconds / 60
seconds = seconds % 60
hours = minutes / 60
minutes = minutes % 60
days = hours / 24
hours = hours % 24
result = ""
if days > 0:
result += "%d %s " % (days, "Day" if (days == 1) else "Days")
if hours > 0:
result += "%d %s " % (hours, "Hour" if (hours == 1) else "Hours")
if minutes > 0:
result += "%d %s " % (minutes, "Minute" if (minutes == 1) else "Minutes")
if seconds > 0:
result += "%d %s " % (seconds, "Second" if (seconds == 1) else "Seconds")
return result.strip()
def get_datetime_from_timestamp(timestamp):
"""Return datetime from unix timestamp"""
if timestamp is None:
return None
return datetime.fromtimestamp(int(timestamp))
def get_rate_limits(response):
"""Returns a list of rate limit information from a given response's headers."""
rate_limits = []
periods = response.headers['X-RateLimit-Period'].split(',')
limits = response.headers['X-RateLimit-Limit'].split(',')
remaining = response.headers['X-RateLimit-Remaining'].split(',')
reset = response.headers['X-RateLimit-Reset'].split(',')
for idx, period in enumerate(periods):
rate_limit = {}
limit_period = get_readable_time_string(period)
rate_limit["period"] = limit_period
rate_limit["period_seconds"] = period
rate_limit["request_limit"] = limits[idx]
rate_limit["requests_remaining"] = remaining[idx]
reset_datetime = get_datetime_from_timestamp(reset[idx])
rate_limit["reset"] = reset_datetime
if reset_datetime is not None:
seconds_remaining = (reset_datetime - datetime.now()).seconds
else:
seconds_remaining = 0
rate_limit["reset_in_seconds"] = seconds_remaining
rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining)
rate_limits.append(rate_limit)
return rate_limits
| mit | Python |
e3f3fd8ff029f3a4b35e986014fac7875ebd0d9e | fix django deprecation warning | unbit/davvy | davvy/admin.py | davvy/admin.py | from django.contrib import admin
from davvy.models import *
from django import forms
class PropInline(admin.TabularInline):
fields = ['resource', 'name']
model = Prop
class ResourceAdminForm(forms.ModelForm):
class Meta:
model = Resource
exclude = []
widgets = {
'file': forms.TextInput(attrs={'size': '64'})
}
class ResourceAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'displayname', 'user')
form = ResourceAdminForm
inlines = [PropInline]
admin.site.register(Resource, ResourceAdmin)
admin.site.register(Prop)
| from django.contrib import admin
from davvy.models import *
from django import forms
class PropInline(admin.TabularInline):
model = Prop
class ResourceAdminForm(forms.ModelForm):
class Meta:
model = Resource
widgets = {
'file': forms.TextInput(attrs={'size': '64'})
}
class ResourceAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'displayname', 'user')
form = ResourceAdminForm
inlines = [PropInline]
admin.site.register(Resource, ResourceAdmin)
admin.site.register(Prop)
| mit | Python |
5ed29c78837620eb8f2b41db7f4d35c8893a114e | Update version.py | VUIIS/dax,VUIIS/dax | dax/version.py | dax/version.py | VERSION = '2.9.4'
| VERSION = '2.9.3-dev0'
| mit | Python |
385442560251d47fc09a9115b4ccd49b92c94926 | Update version.py | VUIIS/dax,VUIIS/dax | dax/version.py | dax/version.py | VERSION = '0.9.1'
| VERSION = '0.9.dev'
| mit | Python |
42ab1b76c065b2886f475b1aace6580ed0860da3 | update version to 0.7.1 | fsantini/python-e3dc | e3dc/__init__.py | e3dc/__init__.py | """E3DC Library for Python.
Python class to connect to an E3/DC system.
Copyright 2017 Francesco Santini <francesco.santini@gmail.com>.
Licensed under a MIT license. See LICENSE for details.
"""
from ._e3dc import E3DC, AuthenticationError, PollError
from ._e3dc_rscp_local import CommunicationError, RSCPAuthenticationError
from ._e3dc_rscp_web import RequestTimeoutError, SocketNotReady
from ._rscpLib import FrameError
__all__ = [
"E3DC",
"AuthenticationError",
"PollError",
"CommunicationError",
"RSCPAuthenticationError",
"RequestTimeoutError",
"SocketNotReady",
"FrameError",
]
__version__ = "0.7.1"
| """E3DC Library for Python.
Python class to connect to an E3/DC system.
Copyright 2017 Francesco Santini <francesco.santini@gmail.com>.
Licensed under a MIT license. See LICENSE for details.
"""
from ._e3dc import E3DC, AuthenticationError, PollError
from ._e3dc_rscp_local import CommunicationError, RSCPAuthenticationError
from ._e3dc_rscp_web import RequestTimeoutError, SocketNotReady
from ._rscpLib import FrameError
__all__ = [
"E3DC",
"AuthenticationError",
"PollError",
"CommunicationError",
"RSCPAuthenticationError",
"RequestTimeoutError",
"SocketNotReady",
"FrameError",
]
__version__ = "0.7.0"
| mit | Python |
592eb945d4f3f46f70457609da2c94d50b7e5dcd | Fix test_consolidate_tarballs_job to pass a dictionary | jpfeil/toil-scripts,jpfeil/toil-scripts,BD2KGenomics/toil-scripts,BD2KGenomics/toil-scripts | src/toil_scripts/lib/test/test_files.py | src/toil_scripts/lib/test/test_files.py | import os
import tarfile
from toil.job import Job
def test_mkdir_p(tmpdir):
import os
from toil_scripts.lib.files import mkdir_p
work_dir = str(tmpdir)
dir_path = os.path.join(work_dir, 'test')
assert os.path.exists(work_dir)
mkdir_p(work_dir)
mkdir_p(dir_path)
assert os.path.isdir(dir_path)
def test_tarball_files(tmpdir):
from toil_scripts.lib.files import tarball_files
work_dir = str(tmpdir)
fpath = os.path.join(work_dir, 'output_file')
with open(fpath, 'wb') as fout:
fout.write(os.urandom(1024))
tarball_files(output_dir=work_dir, tar_name='test.tar', file_paths=[fpath])
assert os.path.exists(os.path.join(work_dir, 'test.tar'))
def test_move_files(tmpdir):
from toil_scripts.lib.files import move_files
work_dir = str(tmpdir)
os.mkdir(os.path.join(work_dir, 'test'))
fpath = os.path.join(work_dir, 'output_file')
with open(fpath, 'wb') as fout:
fout.write(os.urandom(1024))
move_files([fpath], os.path.join(work_dir, 'test'))
assert os.path.exists(os.path.join(work_dir, 'test', 'output_file'))
def test_consolidate_tarballs_job(tmpdir):
options = Job.Runner.getDefaultOptions(os.path.join(str(tmpdir), 'test_store'))
Job.Runner.startToil(Job.wrapJobFn(_consolidate_tarball_job_setup), options)
def _consolidate_tarball_job_setup(job):
from toil_scripts.lib.files import consolidate_tarballs_job
# Create test file
work_dir = job.fileStore.getLocalTempDir()
fpath = os.path.join(work_dir, 'output_file')
with open(fpath, 'wb') as fout:
fout.write(os.urandom(1024))
# Create test tarballs
fpath1 = os.path.join(work_dir, 'test1.tar.gz')
fpath2 = os.path.join(work_dir, 'test2.tar.gz')
with tarfile.open(fpath1, 'w:gz') as f_out:
f_out.add(fpath)
with tarfile.open(fpath2, 'w:gz') as f_out:
f_out.add(fpath)
id1 = job.fileStore.writeGlobalFile(fpath1)
id2 = job.fileStore.writeGlobalFile(fpath2)
job.addChildJobFn(consolidate_tarballs_job, dict(test1=id1, test2=id2))
| import os
import tarfile
from toil.job import Job
def test_mkdir_p(tmpdir):
import os
from toil_scripts.lib.files import mkdir_p
work_dir = str(tmpdir)
dir_path = os.path.join(work_dir, 'test')
assert os.path.exists(work_dir)
mkdir_p(work_dir)
mkdir_p(dir_path)
assert os.path.isdir(dir_path)
def test_tarball_files(tmpdir):
from toil_scripts.lib.files import tarball_files
work_dir = str(tmpdir)
fpath = os.path.join(work_dir, 'output_file')
with open(fpath, 'wb') as fout:
fout.write(os.urandom(1024))
tarball_files(output_dir=work_dir, tar_name='test.tar', file_paths=[fpath])
assert os.path.exists(os.path.join(work_dir, 'test.tar'))
def test_move_files(tmpdir):
from toil_scripts.lib.files import move_files
work_dir = str(tmpdir)
os.mkdir(os.path.join(work_dir, 'test'))
fpath = os.path.join(work_dir, 'output_file')
with open(fpath, 'wb') as fout:
fout.write(os.urandom(1024))
move_files([fpath], os.path.join(work_dir, 'test'))
assert os.path.exists(os.path.join(work_dir, 'test', 'output_file'))
def test_consolidate_tarballs_job(tmpdir):
options = Job.Runner.getDefaultOptions(os.path.join(str(tmpdir), 'test_store'))
Job.Runner.startToil(Job.wrapJobFn(_consolidate_tarball_job_setup), options)
def _consolidate_tarball_job_setup(job):
from toil_scripts.lib.files import consolidate_tarballs_job
# Create test file
work_dir = job.fileStore.getLocalTempDir()
fpath = os.path.join(work_dir, 'output_file')
with open(fpath, 'wb') as fout:
fout.write(os.urandom(1024))
# Create test tarballs
fpath1 = os.path.join(work_dir, 'test1.tar.gz')
fpath2 = os.path.join(work_dir, 'test2.tar.gz')
with tarfile.open(fpath1, 'w:gz') as f_out:
f_out.add(fpath)
with tarfile.open(fpath2, 'w:gz') as f_out:
f_out.add(fpath)
id1 = job.fileStore.writeGlobalFile(fpath1)
id2 = job.fileStore.writeGlobalFile(fpath2)
job.addChildJobFn(consolidate_tarballs_job, test1=id1, test2=id2)
| apache-2.0 | Python |
f1b00e67df3ffe6a14ae5e0584547770c780715a | Remove unused import | fontify/fontify,fontify/fontify,fontify/fontify,fontify/fontify | hello.py | hello.py | import os
import subprocess
from time import sleep
from flask import Flask
from flask import request
from flask import url_for
from flask import send_from_directory
from flask import make_response
from flask import render_template
from flask import jsonify
from pdfkit import from_string
from data import get_chars
from data import get_sample_chars
from data import TMPL_OPTIONS
from werkzeug import secure_filename
UPLOAD_FOLDER = '/tmp'
ALLOWED_EXTENSIONS = set(['jpg', 'png'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/")
def index():
return render_template('index.html')
@app.route("/finish")
def finish():
return render_template('finish.html')
@app.route("/template")
def template():
html = render_template(
'template.html',
chars=get_chars(),
sample=get_sample_chars()
)
pdf = from_string(
html,
False,
options=TMPL_OPTIONS,
css='static/template.css'
)
response = make_response(pdf)
response.headers['Content-Disposition'] = "filename=template.pdf"
response.mimetype = 'application/pdf'
return response
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route("/upload-file", methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
font_name = request.form['font-name']
return_url = url_for('uploaded_file', filename=filename)
# subprocess.call(["python", "scripts/fontify.py"])
sleep(5)
return jsonify(
rul=return_url,
font_name=font_name,
filename=filename
)
return ''
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
if __name__ == "__main__":
app.run(debug=True)
| import os
import subprocess
from time import sleep
from flask import Flask
from flask import request
from flask import redirect
from flask import url_for
from flask import send_from_directory
from flask import make_response
from flask import render_template
from flask import jsonify
from pdfkit import from_string
from data import get_chars
from data import get_sample_chars
from data import TMPL_OPTIONS
from werkzeug import secure_filename
UPLOAD_FOLDER = '/tmp'
ALLOWED_EXTENSIONS = set(['jpg', 'png'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
@app.route("/")
def index():
return render_template('index.html')
@app.route("/finish")
def finish():
return render_template('finish.html')
@app.route("/template")
def template():
html = render_template(
'template.html',
chars=get_chars(),
sample=get_sample_chars()
)
pdf = from_string(
html,
False,
options=TMPL_OPTIONS,
css='static/template.css'
)
response = make_response(pdf)
response.headers['Content-Disposition'] = "filename=template.pdf"
response.mimetype = 'application/pdf'
return response
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route("/upload-file", methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
font_name = request.form['font-name']
return_url = url_for('uploaded_file', filename=filename)
# subprocess.call(["python", "scripts/fontify.py"])
sleep(5)
return jsonify(rul=return_url, font_name=font_name, filename=filename)
return ''
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
if __name__ == "__main__":
app.run(debug=True)
| mit | Python |
0bf0a2d06cca9440e3ddc5df796a7b8464816007 | Add trac.nci | ScottWales/mosrs-setup | mosrs/access.py | mosrs/access.py | #!/usr/bin/env python
"""
Copyright 2016 ARC Centre of Excellence for Climate Systems Science
author: Scott Wales <scott.wales@unimelb.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . import gpg
from getpass import getpass
from hashlib import md5
import requests
import os
def main():
passwd = getpass('Please enter your password for user %s: '%os.environ['USER'])
# Test the password
r = requests.get('https://access-svn.nci.org.au/svn/um',
auth=(os.environ['USER'], passwd), verify=False)
if r.status_code == 401:
print('ERROR: Bad password for user %s'%os.environ['USER'])
return
r.raise_for_status()
realm = '<https://access-svn.nci.org.au:443> AccessCollab'
key = md5(realm).hexdigest()
gpg.preset_passphrase(key, passwd)
nemo_realm = '<https://access-svn.nci.org.au:443> nemo'
nemo_key = md5(nemo_realm).hexdigest()
gpg.preset_passphrase(nemo_key, passwd)
realm = '<https://trac.nci.org.au:443> NCI Projects'
key = md5(realm).hexdigest()
gpg.preset_passphrase(key, passwd)
print('SUCCESS: Password saved in gpg-agent for user %s'%os.environ['USER'])
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""
Copyright 2016 ARC Centre of Excellence for Climate Systems Science
author: Scott Wales <scott.wales@unimelb.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . import gpg
from getpass import getpass
from hashlib import md5
import requests
import os
def main():
realm = '<https://access-svn.nci.org.au:443> AccessCollab'
key = md5(realm).hexdigest()
passwd = getpass('Please enter your password for user %s: '%os.environ['USER'])
# Test the password
r = requests.get('https://access-svn.nci.org.au/svn/um',
auth=(os.environ['USER'], passwd), verify=False)
if r.status_code == 401:
print('ERROR: Bad password for user %s'%os.environ['USER'])
return
r.raise_for_status()
gpg.preset_passphrase(key, passwd)
print('SUCCESS: Password saved in gpg-agent for user %s'%os.environ['USER'])
nemo_realm = '<https://access-svn.nci.org.au:443> nemo'
nemo_key = md5(nemo_realm).hexdigest()
gpg.preset_passphrase(nemo_key, passwd)
if __name__ == '__main__':
main()
| apache-2.0 | Python |
5bc8a73a7a080109abc642c3fc1908e0709a6a47 | refactor urls | dubirajara/django_my_ideas_wall,dubirajara/django_my_ideas_wall,dubirajara/django_my_ideas_wall,dubirajara/django_my_ideas_wall | myideas/urls.py | myideas/urls.py | """myideas URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from myideas.core import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home, name='home'),
url(r'^ideas/(?P<slug>[\w-]+)/$', views.idea_details, name='idea_details'),
url(r'^update/(?P<slug>[\w-]+)/$', views.idea_update, name='update'),
url(r'^(?P<slug>[\w-]+)/delete/$', views.idea_delete, name='delete'),
url(r'^profile/(\w+)/$', views.profile, name='profile'),
url(r'^by_tags/(?P<tags>[\w-]+)/$', views.by_tags, name='by_tags'),
url(r'^ideas_form/', views.idea_create, name='ideas_form'),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^oauth/', include('social.apps.django_app.urls', namespace='social')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
admin.site.site_header = 'Ideas Webapp Admin' | """myideas URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from myideas.core import views
#from myideas.core.views import IdeaUpdateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.home, name='home'),
url(r'^ideas/(?P<slug>[\w-]+)/$', views.idea_details, name='idea_details'),
url(r'^update/(?P<slug>[\w-]+)/$', views.idea_update, name='update'),
url(r'^(?P<slug>[\w-]+)/delete/$', views.idea_delete, name='delete'),
url(r'^profile/(\w+)/$', views.profile, name='profile'),
url(r'^by_tags/(?P<tags>[\w-]+)/$', views.by_tags, name='by_tags'),
url(r'^ideas_form/', views.idea_create, name='ideas_form'),
url(r'^accounts/', include('registration.backends.default.urls')),
url(r'^oauth/', include('social.apps.django_app.urls', namespace='social')),
#url(r'^ideas/(?P<slug>[-\w]+)/update/$', IdeaUpdateView.as_view(), name='update'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
admin.site.site_header = 'Ideas Webapp Admin' | agpl-3.0 | Python |
e0b7217caaf4b94c879f43f2ee95584c469687db | Fix o8d importer to read card IDs to make the sanitized cards | mrroach/CentralServer,mrroach/CentralServer,mrroach/CentralServer | csrv/model/read_o8d.py | csrv/model/read_o8d.py | # Read an OCTGN deck
from xml.etree import ElementTree
def read_file(filename):
filedata = open(filename).read()
return read(filedata)
def read(filedata):
root = ElementTree.fromstring(filedata)
identity = []
cards = []
for section in root.getchildren():
if len(section) == 1:
dest = identity
else:
dest = cards
for card in section.getchildren():
for i in range(int(card.get('qty'))):
# Read the last 5 digits of card#id
dest.append("Card{}".format(card.get('id')[-5:]))
return (identity[0], cards)
| # Read an OCTGN deck
from xml.etree import ElementTree
def read_file(filename):
filedata = open(filename).read()
return read(filedata)
def read(filedata):
root = ElementTree.fromstring(filedata)
identity = []
cards = []
for section in root.getchildren():
if len(section) == 1:
dest = identity
else:
dest = cards
for card in section.getchildren():
for i in range(int(card.get('qty'))):
dest.append(card.text)
return (identity[0], cards)
| apache-2.0 | Python |
bc2aee65bf486691f25081e2ca2c319a4e18af33 | simplify `examples.pn_diode` | scott-maddox/obpds | src/examples/pn_diode.py | src/examples/pn_diode.py | #
# Copyright (c) 2015, Scott J Maddox
#
# This file is part of Open Band Parameters Device Simulator (OBPDS).
#
# OBPDS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OBPDS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OBPDS. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# Make sure we import the local obpds version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from obpds import *
# Layers
p = Layer(1*um, Material(GaAs, 1e17/cm3))
n = Layer(1*um, Material(GaAs, -1e17/cm3))
# Device
d = Device(layers=[p,n])
# Simulate and show the equilibrium band profile using the default method.
d.show_equilibrium() | #
# Copyright (c) 2015, Scott J Maddox
#
# This file is part of Open Band Parameters Device Simulator (OBPDS).
#
# OBPDS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OBPDS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OBPDS. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
# Make sure we import the local obpds version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from obpds import GaAs, cm3, um, Material, Layer, OhmicContact, LayerStructure
# Layers
p = Layer(1*um, Material(GaAs, 1e17/cm3))
n = Layer(1*um, Material(GaAs, -1e17/cm3))
# Contacts
top = OhmicContact()
bottom = OhmicContact()
# Layer Structure
ls = LayerStructure([top, p, n, bottom])
# ls.show_composition() # show the composition vs. depth
# ls.show_doping() # show the doping vs. depth
# ls.show_flatband() # show the flatband profile vs. depth
# Simulate and show the equilibrium band profile using the default method.
ls.show_equilibrium() | agpl-3.0 | Python |
365d0b9ed1b9669964b4dbaead82098af793a8ad | Return empty list for get_service_tasks() rather than None | dcos/shakedown | shakedown/dcos/service.py | shakedown/dcos/service.py | from dcos import mesos
def get_service(service_name, inactive=False, completed=False):
""" Returns a dictionary describing a service, or None """
services = mesos.get_master().frameworks(inactive=inactive, completed=completed)
for service in services:
if service['name'] == service_name:
return service
return None
def get_service_framework_id(service_name, inactive=False, completed=False):
""" Returns the framework ID for a service, or None """
service = get_service(service_name, inactive, completed)
if service is not None and service['id']:
return service['id']
return None
def get_service_tasks(service_name, inactive=False, completed=False):
""" Returns all the task IDs associated with a service, or None """
service = get_service(service_name, inactive, completed)
if service is not None and service['tasks']:
return service['tasks']
return []
def get_service_ips(service_name, task_name=None, inactive=False, completed=False):
""" Returns all the IPS associated with a service, or an empty set """
service_tasks = get_service_tasks(service_name, inactive, completed)
ips = set([])
for task in service_tasks:
if task_name is not None:
if task['name'] == task_name:
if task['statuses'][0]['container_status']['network_infos'][0]['ip_address']:
ips.add(task['statuses'][0]['container_status']['network_infos'][0]['ip_address'])
else:
if task['statuses'][0]['container_status']['network_infos'][0]['ip_address']:
ips.add(task['statuses'][0]['container_status']['network_infos'][0]['ip_address'])
return ips
| from dcos import mesos
def get_service(service_name, inactive=False, completed=False):
""" Returns a dictionary describing a service, or None """
services = mesos.get_master().frameworks(inactive=inactive, completed=completed)
for service in services:
if service['name'] == service_name:
return service
return None
def get_service_framework_id(service_name, inactive=False, completed=False):
""" Returns the framework ID for a service, or None """
service = get_service(service_name, inactive, completed)
if service is not None and service['id']:
return service['id']
return None
def get_service_tasks(service_name, inactive=False, completed=False):
""" Returns all the task IDs associated with a service, or None """
service = get_service(service_name, inactive, completed)
if service is not None and service['tasks']:
return service['tasks']
return None
def get_service_ips(service_name, task_name=None, inactive=False, completed=False):
""" Returns all the IPS associated with a service, or None """
service_tasks = get_service_tasks(service_name, inactive, completed)
ips = set([])
for task in service_tasks:
if task_name is not None:
if task['name'] == task_name:
if task['statuses'][0]['container_status']['network_infos'][0]['ip_address']:
ips.add(task['statuses'][0]['container_status']['network_infos'][0]['ip_address'])
else:
if task['statuses'][0]['container_status']['network_infos'][0]['ip_address']:
ips.add(task['statuses'][0]['container_status']['network_infos'][0]['ip_address'])
return ips
| apache-2.0 | Python |
fe84b70fc8d61e59ba76aa0fffe6fba018c34272 | Add CRUD functionnal tests to service | stackforge/solum,devdattakulkarni/test-solum,ed-/solum,ed-/solum,gilbertpilz/solum,openstack/solum,gilbertpilz/solum,ed-/solum,openstack/solum,stackforge/solum,gilbertpilz/solum,devdattakulkarni/test-solum,gilbertpilz/solum,ed-/solum | functionaltests/api/v1/test_service.py | functionaltests/api/v1/test_service.py | # -*- coding: utf-8 -*-
#
# Copyright 2013 - Noorul Islam K M
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from functionaltests.api import base
sample_data = {"name": "test_service",
"description": "A test to create service",
"project_id": "project_id",
"user_id": "user_id",
"service_type": "mysql",
"read_only": True}
class TestServiceController(base.TestCase):
def _assert_output_expected(self, body_data, data):
self.assertEqual(body_data['user_id'], data['user_id'])
self.assertEqual(body_data['project_id'], data['project_id'])
self.assertEqual(body_data['description'], data['description'])
self.assertEqual(body_data['name'], data['name'])
self.assertEqual(body_data['service_type'], data['service_type'])
self.assertEqual(body_data['read_only'], data['read_only'])
self.assertIsNotNone(body_data['uuid'])
def _delete_service(self, uuid):
resp, _ = self.client.delete('v1/services/%s' % uuid)
self.assertEqual(resp.status, 204)
def _create_service(self):
jsondata = json.dumps(sample_data)
resp, body = self.client.post('v1/services', jsondata)
self.assertEqual(resp.status, 201)
out_data = json.loads(body)
uuid = out_data['uuid']
self.assertIsNotNone(uuid)
return uuid
def test_services_get_all(self):
resp, body = self.client.get('v1/services')
data = json.loads(body)
self.assertEqual(resp.status, 200)
self.assertEqual(data, [])
def test_services_create(self):
sample_json = json.dumps(sample_data)
resp, body = self.client.post('v1/services', sample_json)
self.assertEqual(resp.status, 201)
json_data = json.loads(body)
self._assert_output_expected(json_data, sample_data)
self._delete_service(json_data['uuid'])
def test_services_get(self):
uuid = self._create_service()
resp, body = self.client.get('v1/services/%s' % uuid)
self.assertEqual(resp.status, 200)
json_data = json.loads(body)
self._assert_output_expected(json_data, sample_data)
self._delete_service(uuid)
def test_services_put(self):
uuid = self._create_service()
updated_data = {"name": "test_service updated",
"description": "A test to create service updated",
"project_id": "project_id updated",
"user_id": "user_id updated",
"service_type": "mysql updated",
"read_only": False}
updated_json = json.dumps(updated_data)
resp, body = self.client.put('v1/services/%s' % uuid, updated_json)
self.assertEqual(resp.status, 200)
json_data = json.loads(body)
self._assert_output_expected(json_data, updated_data)
self._delete_service(uuid)
def test_services_delete(self):
uuid = self._create_service()
resp, body = self.client.delete('v1/services/%s' % uuid)
self.assertEqual(resp.status, 204)
self.assertEqual(body, '')
| # -*- coding: utf-8 -*-
#
# Copyright 2013 - Noorul Islam K M
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from functionaltests.api import base
class TestServiceController(base.TestCase):
def test_services_get_all(self):
resp, body = self.client.get('v1/services')
data = json.loads(body)
self.assertEqual(resp.status, 200)
self.assertEqual(data, [])
| apache-2.0 | Python |
5d054a20205d03bbc4e7c3acd9bc97bc25b989e8 | change migration dependency | praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem | gem/migrations/0020_profiledatarule.py | gem/migrations/0020_profiledatarule.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-08-03 14:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtail_personalisation', '0013_auto_20170803_1654'),
('gem', '0019_change_setting_bbm_ga_account_to_bbm_ga_tracking_code'),
]
operations = [
migrations.CreateModel(
name='ProfileDataRule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('field', models.CharField(max_length=255)),
('operator', models.CharField(choices=[(b'lt', 'Less than'), (b'lte', 'Less than or equal'), (b'gt', 'Greater than'), (b'gte', 'Greater than or equal'), (b'eq', 'Equal'), (b'neq', 'Not equal'), (b'ol', 'Older than'), (b'ole', 'Older than or equal'), (b'yg', 'Younger than'), (b'yge', 'Younger than or equal'), (b'eqa', 'Of age'), (b'reg', 'Regex')], default=b'eq', help_text='Age operators work only on dates, please input the age you want to compare in "value". When using greater/less than on text field, it would compare it by alphabetical order, where dates are compared to the specified date by chronological order.', max_length=3)),
('value', models.CharField(help_text='If the selected field is a text field you can just input text. In case of dates, please use format "YYYY-MM-DD" and "YYYY-MM-DD HH:MM" for date-times. For regex please refer to the usage docs. If it is a choice field, please input anything, save and the error message displayed below this field should guide you with possible values.', max_length=255)),
('segment', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gem_profiledatarule_related', related_query_name='%(app_label)s_%(class)ss', to='wagtail_personalisation.Segment')),
],
options={
'verbose_name': 'Profile Data Rule',
},
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-08-03 14:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtail_personalisation', '0013_auto_20170803_1654'),
('gem', '0019_auto_20170815_1639'),
]
operations = [
migrations.CreateModel(
name='ProfileDataRule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('field', models.CharField(max_length=255)),
('operator', models.CharField(choices=[(b'lt', 'Less than'), (b'lte', 'Less than or equal'), (b'gt', 'Greater than'), (b'gte', 'Greater than or equal'), (b'eq', 'Equal'), (b'neq', 'Not equal'), (b'ol', 'Older than'), (b'ole', 'Older than or equal'), (b'yg', 'Younger than'), (b'yge', 'Younger than or equal'), (b'eqa', 'Of age'), (b'reg', 'Regex')], default=b'eq', help_text='Age operators work only on dates, please input the age you want to compare in "value". When using greater/less than on text field, it would compare it by alphabetical order, where dates are compared to the specified date by chronological order.', max_length=3)),
('value', models.CharField(help_text='If the selected field is a text field you can just input text. In case of dates, please use format "YYYY-MM-DD" and "YYYY-MM-DD HH:MM" for date-times. For regex please refer to the usage docs. If it is a choice field, please input anything, save and the error message displayed below this field should guide you with possible values.', max_length=255)),
('segment', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='gem_profiledatarule_related', related_query_name='%(app_label)s_%(class)ss', to='wagtail_personalisation.Segment')),
],
options={
'verbose_name': 'Profile Data Rule',
},
),
]
| bsd-2-clause | Python |
1c982d906d28b817e6dd1567f11f8f88ccbdf90c | handle post query API calls | biothings/biothings_explorer,biothings/biothings_explorer | biothings_explorer/dispatcher.py | biothings_explorer/dispatcher.py | # -*- coding: utf-8 -*-
"""
biothings_explorer.dispatcher
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains code that biothings_explorer use to communicate to and receive from APIs. It serves as a glue between "apicall" module and "api_output_parser" module.
"""
from collections import defaultdict
from .registry import Registry
from .apicall import BioThingsCaller
from .api_output_parser import OutputParser
class Dispatcher():
def __init__(self, edges, values, batch_mode=False):
self.edges = edges
self.registry = Registry().registry
self.batch_mode = batch_mode
self.values = self.preprocess_input_values(values)
self.caller = BioThingsCaller(batch_mode=batch_mode)
def preprocess_input_values(self, values):
if not self.batch_mode:
return values
else:
if type(values) == str:
return values
elif type(values) == list:
return ','.join(values)
else:
raise ValueError('{} should be str or list'.format(values))
def fetch_schema_mapping_file(self, api):
"""Fetch schema mapping file from the registry"""
return self.registry[api]['mapping']
def subset_mapping_file(self, edge, mapping_file):
"""Only maintain a subset of mapping file based on edge label"""
return {k:v for (k,v) in mapping_file.items() if k in ["@context", "@type", edge["label"]]}
def dispatch(self):
"""send request to and parse response from API"""
results = defaultdict(list)
for _edge in self.edges.values():
mapping = self.fetch_schema_mapping_file(_edge['api'])
subset_mapping = self.subset_mapping_file(_edge, mapping)
response = self.caller.call_api(_edge['api'],
_edge['input_field'],
_edge['output_field'],
self.values)
_res = OutputParser(response, subset_mapping,
_edge['label'],
self.batch_mode,
_edge['api']).parse()
if not self.batch_mode:
results[_edge['label']] += _res
else:
results[_edge['label']].append(_res)
return dict(results)
| # -*- coding: utf-8 -*-
"""
biothings_explorer.dispatcher
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains code that biothings_explorer use to communicate to and receive from APIs. It serves as a glue between "apicall" module and "api_output_parser" module.
"""
from collections import defaultdict
from .registry import Registry
from .apicall import BioThingsCaller
from .api_output_parser import OutputParser
class Dispatcher():
def __init__(self, edges, values):
self.edges = edges
self.registry = Registry().registry
self.values = values
self.caller = BioThingsCaller()
def fetch_schema_mapping_file(self, api):
"""Fetch schema mapping file from the registry"""
return self.registry[api]['mapping']
def subset_mapping_file(self, edge, mapping_file):
"""Only maintain a subset of mapping file based on edge label"""
return {k:v for (k,v) in mapping_file.items() if k in ["@context", "@type", edge["label"]]}
def dispatch(self):
"""send request to and parse response from API"""
results = defaultdict(list)
for _edge in self.edges.values():
mapping = self.fetch_schema_mapping_file(_edge['api'])
subset_mapping = self.subset_mapping_file(_edge, mapping)
response = self.caller.call_api(_edge['api'],
_edge['input_field'],
_edge['output_field'],
self.values)
_res = OutputParser(response, subset_mapping,
_edge['label'], _edge['api']).parse()
results[_edge['label']] += _res
return dict(results)
| apache-2.0 | Python |
b257c33d114faf719c3a5d6fa78c6f1654d7bcc3 | Change GAM overlapped knots unit test assert | michalkurka/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,h2oai/h2o-3 | h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7798_overlapped_knots.py | h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7798_overlapped_knots.py | import h2o
import numpy as np
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
from tests import pyunit_utils
def knots_error():
# load and prepare California housing dataset
np.random.seed(1234)
data = h2o.H2OFrame(
python_obj={'C1': list(np.random.randint(0, 9, size=1000)),
'target': list(np.random.randint(0, 2, size=1000))
})
# use only 3 features and transform into classification problem
feature_names = ['C1']
data['target'] = data['target'].asfactor()
# split into train and validation sets
train, test = data.split_frame([0.8], seed=1234)
# build the GAM model
h2o_model = H2OGeneralizedAdditiveEstimator(family='binomial',
gam_columns=feature_names,
scale=[1],
num_knots=[10],
)
try:
h2o_model.train(x=feature_names, y='target', training_frame=train)
assert False, "Number of knots validation should have failed"
except Exception as ex:
exception = str(ex)
assert ("H2OModelBuilderIllegalArgumentException" in exception) and \
(("has cardinality lower than the number of knots" in exception) or \
("does not have enough values to generate well-defined knots" in exception))
print("Error correctly raised when cardinality < num_knots")
if __name__ == "__main__":
h2o.init(ip='192.168.1.163', port=54321, strict_version_check=False)
pyunit_utils.standalone_test(knots_error())
else:
h2o.init(ip='192.168.1.163', port=54321, strict_version_check=False)
knots_error()
| import h2o
import numpy as np
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
from tests import pyunit_utils
def knots_error():
# load and prepare California housing dataset
np.random.seed(1234)
data = h2o.H2OFrame(
python_obj={'C1': list(np.random.randint(0, 9, size=1000)),
'target': list(np.random.randint(0, 2, size=1000))
})
# use only 3 features and transform into classification problem
feature_names = ['C1']
data['target'] = data['target'].asfactor()
# split into train and validation sets
train, test = data.split_frame([0.8], seed=1234)
# build the GAM model
h2o_model = H2OGeneralizedAdditiveEstimator(family='binomial',
gam_columns=feature_names,
scale=[1],
num_knots=[10],
)
try:
h2o_model.train(x=feature_names, y='target', training_frame=train)
assert False, "Number of knots validation should have failed"
except Exception as ex:
exception = str(ex)
assert ("H2OModelBuilderIllegalArgumentException" in exception) or \
("has cardinality lower than the number of knots" in exception) or \
("does not have enough values to generate well-defined knots" in exception)
print("Error correctly raised when cardinality < num_knots")
if __name__ == "__main__":
pyunit_utils.standalone_test(knots_error())
else:
knots_error()
| apache-2.0 | Python |
d9b283647cfb4909faf161090012d21699b313e8 | Improve mail sending logic. | cjluo/money-monkey | email_sender.py | email_sender.py | import os
import smtplib
import logging
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
class EmailSender:
def __init__(self, address):
self._address = address
def send_email(self, title, image_pairs, delete=True):
from_address = 'monkey@luckyserver.com'
to_address = self._address
msg = MIMEMultipart('related')
msg['Subject'] = title
msg['From'] = from_address
msg['To'] = to_address
msg_alt = MIMEMultipart('alternative')
msg.attach(msg_alt)
i = 0
text = ''
for symbol in image_pairs:
text += '<img src="cid:image' + str(i) + '"><br>'
image_file = open(image_pairs[symbol], 'rb').read()
image = MIMEImage(image_file, name=symbol)
image.add_header('Content-ID', '<image' + str(i) + '>')
msg.attach(image)
i += 1
text = MIMEText(text, 'html')
msg_alt.attach(text)
logger = logging.getLogger()
s = smtplib.SMTP('localhost')
try:
s.sendmail(from_address, to_address, msg.as_string())
logger.info("mail sent, subject %s" % title)
except Exception as exception:
logger.error("mail failed %s" % str(exception))
finally:
s.quit()
if delete:
for symbol in image_pairs:
os.remove(image_pairs[symbol])
| import os
import smtplib
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
class EmailSender:
def __init__(self, address):
self._address = address
def send_email(self, title, image_pairs, delete=True):
from_address = 'monkey@luckyserver.com'
to_address = self._address
msg = MIMEMultipart('related')
msg['Subject'] = title
msg['From'] = from_address
msg['To'] = to_address
msg_alt = MIMEMultipart('alternative')
msg.attach(msg_alt)
i = 0
text = ''
for symbol in image_pairs:
text += '<img src="cid:image' + str(i) + '"><br>'
image_file = open(image_pairs[symbol], 'rb').read()
image = MIMEImage(image_file, name=symbol)
image.add_header('Content-ID', '<image' + str(i) + '>')
msg.attach(image)
i += 1
text = MIMEText(text, 'html')
msg_alt.attach(text)
s = smtplib.SMTP('localhost')
s.sendmail(from_address, to_address, msg.as_string())
s.quit()
if delete:
for symbol in image_pairs:
os.remove(image_pairs[symbol])
| mit | Python |
62bff21a5931fb55a41de893ad59c90202dcb84f | Hide dimagi users | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | auditcare/management/commands/generate_request_report.py | auditcare/management/commands/generate_request_report.py | import csv
import re
from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import LabelCommand
from corehq.apps.users.models import WebUser
from dimagi.utils.couch.database import iter_docs
from auditcare.models import NavigationEventAudit
def navigation_event_ids_by_user(user):
database = NavigationEventAudit.get_db()
return {row['id'] for row in database.view('auditcare/urlpath_by_user_date',
startkey=[user ],
endkey=[user, {}],
reduce=False,
include_docs=False,
)}
def request_was_made_to_domain(domain, request_path):
return request_path.startswith('/a/' + domain + '/')
def log_events(writer, domain, user, override_user=""):
for event in iter_docs(NavigationEventAudit.get_db(), navigation_event_ids_by_user(user)):
doc = NavigationEventAudit.wrap(event)
if request_was_made_to_domain(domain, doc.request_path):
log_event(writer, doc, override_user)
def log_event(writer, event, override_user=""):
if override_user:
event.user = override_user
writer.writerow([event.user, event.event_date, event.ip_address, event.request_path])
class Command(LabelCommand):
args = 'domain filename'
help = """Generate request report"""
option_list = LabelCommand.option_list +\
(make_option('--display-superuser', action='store_true',
dest='display_superuser', default=False,
help="Include superusers in report, otherwise 'Dimagi User'"),)
def handle(self, *args, **options):
domain, filename = args
display_superuser = options["display_superuser"]
dimagi_username = ""
if not display_superuser:
dimagi_username = "Dimagi Support"
users = {u.username for u in WebUser.by_domain(domain)}
super_users = {u['username'] for u in User.objects.filter(is_superuser=True).values('username')}
super_users = super_users - users
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
for user in users:
log_events(writer, domain, user)
for user in super_users:
log_events(writer, domain, user, dimagi_username)
| import csv
import re
from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import LabelCommand
from corehq.apps.users.models import WebUser
from dimagi.utils.couch.database import iter_docs
from auditcare.models import NavigationEventAudit
def navigation_event_ids_by_user(user):
database = NavigationEventAudit.get_db()
return {row['id'] for row in database.view('auditcare/urlpath_by_user_date',
startkey=[user ],
endkey=[user, {}],
reduce=False,
include_docs=False,
)}
def request_was_made_to_domain(domain, request_path):
return request_path.startswith('/a/' + domain + '/')
def get_users(domain, no_superuser=False):
users = [u.username for u in WebUser.by_domain(domain)]
if not no_superuser:
super_users = [u['username'] for u in User.objects.filter(is_superuser=True).values('username')]
return set(users + super_users)
class Command(LabelCommand):
args = 'domain filename'
help = """Generate request report"""
option_list = LabelCommand.option_list +\
(make_option('--no-superuser', action='store_true', dest='no_superuser', default=False,
help="Include superusers in report"),)
def handle(self, *args, **options):
domain, filename = args
no_superuser = options["no_superuser"]
users = get_users(domain, no_superuser)
with open(filename, 'wb') as csvfile:
writer = csv.writer(csvfile)
for user in users:
for event in iter_docs(NavigationEventAudit.get_db(), navigation_event_ids_by_user(user)):
doc = NavigationEventAudit.wrap(event)
if request_was_made_to_domain(domain, doc.request_path):
writer.writerow([doc.user, doc.event_date, doc.ip_address, doc.request_path])
| bsd-3-clause | Python |
5e96f80047205112f54c9798584d42a07560086d | Handle invalid hash | django-blog-zinnia/zinnia-url-shortener-hashids | zinnia_hashids/views.py | zinnia_hashids/views.py | """Views for Zinnia Hashids"""
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.views.generic.base import RedirectView
from zinnia.models.entry import Entry
from zinnia_hashids.factory import hashids
class EntryHashids(RedirectView):
"""
View for handling the hashids of an Entry,
simply do a redirection.
"""
permanent = True
def get_redirect_url(self, **kwargs):
"""
Get entry corresponding to 'pk' encoded by hashids
in the 'token' variable and return the get_absolute_url
of the entry.
"""
try:
unhashed_pk = hashids.decode(kwargs['token'])[0]
except IndexError:
raise Http404('Invalid hash')
entry = get_object_or_404(Entry.published, pk=unhashed_pk)
return entry.get_absolute_url()
| """Views for Zinnia Hashids"""
from django.shortcuts import get_object_or_404
from django.views.generic.base import RedirectView
from zinnia.models.entry import Entry
from zinnia_hashids.factory import hashids
class EntryHashids(RedirectView):
"""
View for handling the hashids of an Entry,
simply do a redirection.
"""
permanent = True
def get_redirect_url(self, **kwargs):
"""
Get entry corresponding to 'pk' encoded by hashids
in the 'token' variable and return the get_absolute_url
of the entry.
"""
unhashed_pk = hashids.decode(kwargs['token'])[0]
entry = get_object_or_404(Entry.published, pk=unhashed_pk)
return entry.get_absolute_url()
| bsd-3-clause | Python |
b81cf1def5027f6a905ba5f47ca137bed1cf83e4 | Add service tests | infOpen/ansible-role-elasticsearch | tests/testinfra/test_installation.py | tests/testinfra/test_installation.py | """
Role tests
"""
import pytest
# pytestmark = pytest.mark.docker_images(
pytestmark = pytest.mark.docker_images('infopen/ubuntu-xenial-ssh-py27:0.2.0')
def test_packages(Package):
"""
Tests about packages installed on all systems
"""
packages = [
'python-apt-common', 'python-apt', 'elasticsearch', 'openjdk-8-jre'
]
for package in packages:
assert Package(package).is_installed is True
def test_group(Group):
"""
Test about elasticsearch group
"""
assert Group('elasticsearch').exists
def test_user(User):
"""
Test about elasticsearch user
"""
user = User('elasticsearch')
assert user.exists
assert user.group == 'elasticsearch'
assert user.shell == '/bin/false'
assert user.home == '/home/elasticsearch'
def test_data_folder(File):
"""
Test about elasticsearch folders
"""
folder = File('/var/lib/elasticsearch')
assert folder.exists
assert folder.is_directory
assert folder.user == 'elasticsearch'
assert folder.group == 'elasticsearch'
def test_config_files(File):
"""
Test about all configuration files
"""
files = [
'/etc/elasticsearch/elasticsearch.yml',
'/etc/elasticsearch/logging.yml',
'/etc/default/elasticsearch',
]
for cur_file in files:
cfg_file = File(cur_file)
assert cfg_file.exists
assert cfg_file.is_file
assert cfg_file.user == 'elasticsearch'
assert cfg_file.group == 'elasticsearch'
logrotate_file = File('/etc/logrotate.d/elasticsearch')
assert logrotate_file.exists
assert logrotate_file.is_file
assert logrotate_file.user == 'root'
assert logrotate_file.group == 'root'
def test_process(Process):
"""
Test about elasticsearch processus
"""
assert len(Process.filter(user='elastic+')) == 1
def test_service(Command, Service, Socket):
"""
Test about elasticsearch service
"""
assert Service('ssh').is_enabled
assert Service('ssh').is_running
assert Command('systemctl status sshd').rc == 0
assert Socket("tcp://0.0.0.0:22").is_listening
| """
Role tests
"""
import pytest
# pytestmark = pytest.mark.docker_images(
pytestmark = pytest.mark.docker_images('infopen/ubuntu-xenial-ssh-py27:0.2.0')
def test_packages(Package):
"""
Tests about packages installed on all systems
"""
packages = [
'python-apt-common', 'python-apt', 'elasticsearch', 'openjdk-8-jre'
]
for package in packages:
assert Package(package).is_installed is True
def test_group(Group):
"""
Test about elasticsearch group
"""
assert Group('elasticsearch').exists
def test_user(User):
"""
Test about elasticsearch user
"""
user = User('elasticsearch')
assert user.exists
assert user.group == 'elasticsearch'
assert user.shell == '/bin/false'
assert user.home == '/home/elasticsearch'
def test_data_folder(File):
"""
Test about elasticsearch folders
"""
folder = File('/var/lib/elasticsearch')
assert folder.exists
assert folder.is_directory
assert folder.user == 'elasticsearch'
assert folder.group == 'elasticsearch'
def test_config_files(File):
"""
Test about all configuration files
"""
files = [
'/etc/elasticsearch/elasticsearch.yml',
'/etc/elasticsearch/logging.yml',
'/etc/default/elasticsearch',
]
for cur_file in files:
cfg_file = File(cur_file)
assert cfg_file.exists
assert cfg_file.is_file
assert cfg_file.user == 'elasticsearch'
assert cfg_file.group == 'elasticsearch'
logrotate_file = File('/etc/logrotate.d/elasticsearch')
assert logrotate_file.exists
assert logrotate_file.is_file
assert logrotate_file.user == 'root'
assert logrotate_file.group == 'root'
def test_process(Process):
"""
Test about elasticsearch processus
"""
assert len(Process.filter(user='elastic+')) == 1
| mit | Python |
ffd530c147c0ab5aa503c273a25438335ee39474 | Add a basic raid model (copying off of Firebase). | bryanveloso/avalonstar-tv,bryanveloso/avalonstar-tv,bryanveloso/avalonstar-tv | avalonstar/apps/broadcasts/models.py | avalonstar/apps/broadcasts/models.py | # -*- coding: utf-8 -*-
from django.db import models
from apps.games.models import Game
class Series(models.Model):
name = models.CharField(max_length=200)
class Meta:
ordering = ['name']
verbose_name_plural = u'series'
def __unicode__(self):
return u'%s' % self.name
@staticmethod
def autocomplete_search_fields():
return ('name__exact', 'name__icontains')
class Broadcast(models.Model):
# Metadata.
number = models.IntegerField(blank=True, null=True)
airdate = models.DateField()
status = models.CharField(blank=True, max_length=200,
help_text=u'Loosely related to Twitch\'s status field. Does not need to match. Will display on overlays.')
notes = models.TextField(blank=True)
# Connections.
games = models.ManyToManyField(Game, related_name='appears_on')
series = models.ForeignKey(Series, blank=True, null=True, related_name='broadcasts',
help_text=u'Is this episode part of an ongoing series (i.e., "Whatever Wednesdays", etc.)?')
# Statuses.
is_charity = models.BooleanField('is for charity?', default=False,
help_text=u'Is a charity fundraiser involved in this episode?')
is_marathon = models.BooleanField('is a marathon?', default=False,
help_text=u'Is this a marathon episode (longer than 12 hours)?')
class Meta:
get_latest_by = 'airdate'
ordering = ['-airdate']
def __unicode__(self):
return u'Episode %s' % self.number
class Raid(models.Model):
broadcast = models.ForeignKey(Broadcast, related_name='raids')
raider = models.CharField(blank=True, max_length=200)
timestamp = models.CharField(blank=True, max_length=13,
help_text=u'Entered as a weird ass UNIX timestamp for legacy Firebase reasons.')
class Meta:
order_with_respect_to = u'broadcast'
| # -*- coding: utf-8 -*-
from django.db import models
from apps.games.models import Game
class Series(models.Model):
name = models.CharField(max_length=200)
class Meta:
ordering = ['name']
verbose_name_plural = u'series'
def __unicode__(self):
return u'%s' % self.name
@staticmethod
def autocomplete_search_fields():
return ('name__exact', 'name__icontains')
class Broadcast(models.Model):
# Metadata.
number = models.IntegerField(blank=True, null=True)
airdate = models.DateField()
status = models.CharField(blank=True, max_length=200,
help_text=u'Loosely related to Twitch\'s status field. Does not need to match. Will display on overlays.')
notes = models.TextField(blank=True)
# Connections.
games = models.ManyToManyField(Game, related_name='appears_on')
series = models.ForeignKey(Series, blank=True, null=True, related_name='broadcasts',
help_text=u'Is this episode part of an ongoing series (i.e., "Whatever Wednesdays", etc.)?')
# Statuses.
is_charity = models.BooleanField('is for charity?', default=False,
help_text=u'Is a charity fundraiser involved in this episode?')
is_marathon = models.BooleanField('is a marathon?', default=False,
help_text=u'Is this a marathon episode (longer than 12 hours)?')
class Meta:
get_latest_by = 'airdate'
ordering = ['-airdate']
def __unicode__(self):
return u'Episode %s' % self.number
| apache-2.0 | Python |
a1f5585ac9b50d4ae61278d5c66bc141c7929fa9 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/01e5021627fcdfd0fcae3456606df43764dae69b. | Intel-tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,karllessard/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "01e5021627fcdfd0fcae3456606df43764dae69b"
TFRT_SHA256 = "06053bb593f95542bdb0ce5d8cd59247479e2149d7f86c762ef09259e35b456a"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "28bd500e25ee9535bf35aba5913761331654246e"
TFRT_SHA256 = "6c27234b671fdb97635e443c90136e0c9fd470de0c5e28aae4e556fc76b394e5"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| apache-2.0 | Python |
e49ca700f94991d9e8f55ba0326f03f58662db87 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/75fd5a749fc9a17b438ff73143f3270069341730. | karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "75fd5a749fc9a17b438ff73143f3270069341730"
TFRT_SHA256 = "d3a51600e7dcb992f0bdccabc5f0d6d4f9a4eaaac0a792676382785676862dc6"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "efb0edfc9451adb9b6db659866c36226163ead2f"
TFRT_SHA256 = "3fa412c5a202c0008f0b358366fd4d6a37ea3619fb7a7ad69fe1b3ecfddc037e"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| apache-2.0 | Python |
1ba3ce3410f6b15195adf1512b06f4be42b80624 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/98379ff657e2c7c14d7008c750de6c6cd7b19224. | paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,karllessard/tensorflow | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "98379ff657e2c7c14d7008c750de6c6cd7b19224"
TFRT_SHA256 = "24e9d0f2af0d7c1ab4b939cb155112496950b97bf47d917635972f97d39bdbbb"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "4bac11449d3f91c1a63620538ec22ff26ba626b2"
TFRT_SHA256 = "fd2366a9cfce4ec4a495196b911105d90f84e5e7b4725fdc75dc28b49005d6f7"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| apache-2.0 | Python |
1d836a832bc4f5263be8918f903d3e5d446c9c8f | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/5bc3e29b4c5a01fdad04bd973a82dc0c1400edec. | tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,yongtang/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,sarvex/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,karllessard/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,paolodedios/tensorflow,sarvex/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,karllessard/tensorflow,karllessard/tensorflow,sarvex/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,tensorflow/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,sarvex/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,karllessard/tensorflow | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "5bc3e29b4c5a01fdad04bd973a82dc0c1400edec"
TFRT_SHA256 = "d373392efc65bf624149d2cd8f5a0895b68c63025416f62074519720640dcbcf"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "64633c22f096f63117104a5ccf88c2ec2ec1022a"
TFRT_SHA256 = "9fb3bd426daf66c0f8f8cd81c4392548bd971d460f36c5c013b851007bd1c569"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| apache-2.0 | Python |
7eb5dbd50a3790e8bfd821ac06d74427619e09f4 | generalize detection of a C compiler for use as the preprocessor of the legion C header file for python bindings | StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion | bindings/python/legion_cffi_build.py | bindings/python/legion_cffi_build.py | #!/usr/bin/env python3
# Copyright 2021 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import subprocess
def find_legion_header():
def try_prefix(prefix_dir):
legion_h_path = os.path.join(prefix_dir, 'legion.h')
if os.path.exists(legion_h_path):
return prefix_dir, legion_h_path
# We should always be in an in-source build, so just find the file
# relative to the source directory.
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
runtime_dir = os.path.join(root_dir, 'runtime')
result = try_prefix(runtime_dir)
if result:
return result
raise Exception('Unable to locate legion.h header file')
def build(defines_dir, output_dir):
prefix_dir, legion_h_path = find_legion_header()
if defines_dir is not None:
# For CMake, need to be told where the defines directory is:
build_flags = ['-I', defines_dir]
else:
# For Make, legion_defines.h is in the source directory:
build_flags = ['-I', os.path.dirname(os.path.realpath(__file__))]
# Check to see if the user specified a C compiler with the CC environment variable, if not assume there is a built-in C compiler
compiler = os.getenv('CC', 'cc')
header = subprocess.check_output([compiler, '-I', prefix_dir] + build_flags + ['-DLEGION_USE_PYTHON_CFFI', '-E', '-P', legion_h_path]).decode('utf-8')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'legion_cffi.py.in')) as f:
content = f.read()
content = content.format(header=repr(header))
if output_dir is None:
output_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(output_dir, 'legion_cffi.py'), 'wb') as f:
f.write(content.encode('utf-8'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--defines-dir', required=False)
parser.add_argument('--output-dir', required=False)
args = parser.parse_args()
build(args.defines_dir, args.output_dir)
| #!/usr/bin/env python3
# Copyright 2021 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import os
import subprocess
def find_legion_header():
def try_prefix(prefix_dir):
legion_h_path = os.path.join(prefix_dir, 'legion.h')
if os.path.exists(legion_h_path):
return prefix_dir, legion_h_path
# We should always be in an in-source build, so just find the file
# relative to the source directory.
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
runtime_dir = os.path.join(root_dir, 'runtime')
result = try_prefix(runtime_dir)
if result:
return result
raise Exception('Unable to locate legion.h header file')
def build(defines_dir, output_dir):
prefix_dir, legion_h_path = find_legion_header()
if defines_dir is not None:
# For CMake, need to be told where the defines directory is:
build_flags = ['-I', defines_dir]
else:
# For Make, legion_defines.h is in the source directory:
build_flags = ['-I', os.path.dirname(os.path.realpath(__file__))]
header = subprocess.check_output(['gcc', '-I', prefix_dir] + build_flags + ['-DLEGION_USE_PYTHON_CFFI', '-E', '-P', legion_h_path]).decode('utf-8')
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'legion_cffi.py.in')) as f:
content = f.read()
content = content.format(header=repr(header))
if output_dir is None:
output_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(output_dir, 'legion_cffi.py'), 'wb') as f:
f.write(content.encode('utf-8'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--defines-dir', required=False)
parser.add_argument('--output-dir', required=False)
args = parser.parse_args()
build(args.defines_dir, args.output_dir)
| apache-2.0 | Python |
1df54f205b6f88a5760ea57476527d25826d932d | Revert of Revert "[Telemetry] Remove power metric from jsgamebench benchmark" (https://codereview.chromium.org/199333006/) | Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,Jonekee/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,Just-D/chromium-1,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,Chilledheart/chromium,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,Just-D/chromium-1,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,dushu1203/chromium.src,dednal/chromium.src,Just-D/chromium-1,M4sse/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,markYoungH/chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,ltilve/chromium,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ltilve/chromium,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,M4sse/chromium.src,Jonekee/chromium.src,Just-D/chromium-1,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,axinging/chromium-crosswalk,axinging/chromium-crosswalk,ltilve/chromium,dednal/chromium.src,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,dednal/chromium.src,Jonekee/chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,patrickm/chromium.src,chuan9/chromium-crosswalk,dednal/chromium.src,ondra-novak/chromium.src,ltilve/chromium,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,littlstar/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,M4sse/chromium.src,Chilledheart/chromium,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,littlstar/chromium.src,chuan9/chromium-crosswalk,markYoungH/chromium.src,dushu1203/chromium.src,patrickm/chromium.src,Fireblend/chromium-crosswalk,ltilve/chromium,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,littlstar/chromium.src,Just-D/chromium-1,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,Just-D/chromium-1,Fireblend/chromium-crosswalk,Jonekee/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,Chilledheart/chromium,patrickm/chromium.src,littlstar/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,dednal/chromium.src,bright-sparks/chromium-spacewalk,Chilledheart/chromium,Just-D/chromium-1,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,patrickm/chromium.src,jaruba/chromium.src,jaruba/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,patrickm/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,jaruba/chromium.src,patrickm/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,krieger-od/nwjs_chromium.src,Chilledheart/chromium,patrickm/chromium.src,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,dednal/chromium.src,markYoungH/chromium.src,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,hgl888/chromium-crosswalk,patrickm/chromium.src,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,littlstar/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src | tools/perf/benchmarks/jsgamebench.py | tools/perf/benchmarks/jsgamebench.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Facebook's JSGameBench benchmark."""
import os
from telemetry import test
from telemetry.page import page_measurement
from telemetry.page import page_set
class _JsgamebenchMeasurement(page_measurement.PageMeasurement):
def __init__(self):
super(_JsgamebenchMeasurement, self).__init__()
def MeasurePage(self, page, tab, results):
tab.ExecuteJavaScript('UI.call({}, "perftest")')
tab.WaitForJavaScriptExpression(
'document.getElementById("perfscore0") != null', 1800)
js_get_results = 'document.getElementById("perfscore0").innerHTML'
result = int(tab.EvaluateJavaScript(js_get_results))
results.Add('Score', 'score (bigger is better)', result)
class Jsgamebench(test.Test):
"""Counts how many animating sprites can move around on the screen at once."""
test = _JsgamebenchMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../page_sets/data/jsgamebench.json',
'pages': [
{ 'url': 'http://localhost/' }
]
}, os.path.dirname(__file__))
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Facebook's JSGameBench benchmark."""
import os
from metrics import power
from telemetry import test
from telemetry.page import page_measurement
from telemetry.page import page_set
class _JsgamebenchMeasurement(page_measurement.PageMeasurement):
def __init__(self):
super(_JsgamebenchMeasurement, self).__init__()
self._power_metric = power.PowerMetric()
def CustomizeBrowserOptions(self, options):
power.PowerMetric.CustomizeBrowserOptions(options)
def DidNavigateToPage(self, page, tab):
self._power_metric.Start(page, tab)
def MeasurePage(self, page, tab, results):
tab.ExecuteJavaScript('UI.call({}, "perftest")')
tab.WaitForJavaScriptExpression(
'document.getElementById("perfscore0") != null', 1800)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results)
js_get_results = 'document.getElementById("perfscore0").innerHTML'
result = int(tab.EvaluateJavaScript(js_get_results))
results.Add('Score', 'score (bigger is better)', result)
class Jsgamebench(test.Test):
"""Counts how many animating sprites can move around on the screen at once."""
test = _JsgamebenchMeasurement
def CreatePageSet(self, options):
return page_set.PageSet.FromDict({
'archive_data_file': '../page_sets/data/jsgamebench.json',
'pages': [
{ 'url': 'http://localhost/' }
]
}, os.path.dirname(__file__))
| bsd-3-clause | Python |
d4e44429c49a0aa3523ec27a20484a71d92b4ddf | Update tests to use sparse tensor | nolanliou/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,manipopopo/tensorflow,hsaputra/tensorflow,ZhangXinNan/tensorflow,gunan/tensorflow,gojira/tensorflow,jbedorf/tensorflow,snnn/tensorflow,lakshayg/tensorflow,gautam1858/tensorflow,ravindrapanda/tensorflow,davidzchen/tensorflow,Bismarrck/tensorflow,davidzchen/tensorflow,jwlawson/tensorflow,ageron/tensorflow,adit-chandra/tensorflow,girving/tensorflow,zasdfgbnm/tensorflow,benoitsteiner/tensorflow-xsmm,AnishShah/tensorflow,nburn42/tensorflow,caisq/tensorflow,renyi533/tensorflow,benoitsteiner/tensorflow-xsmm,tensorflow/tensorflow,Xeralux/tensorflow,tensorflow/tensorflow-pywrap_saved_model,jwlawson/tensorflow,JingJunYin/tensorflow,drpngx/tensorflow,freedomtan/tensorflow,arborh/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,xodus7/tensorflow,aselle/tensorflow,xzturn/tensorflow,ghchinoy/tensorflow,gunan/tensorflow,jendap/tensorflow,snnn/tensorflow,ghchinoy/tensorflow,frreiss/tensorflow-fred,asimshankar/tensorflow,AnishShah/tensorflow,caisq/tensorflow,ghchinoy/tensorflow,rabipanda/tensorflow,drpngx/tensorflow,dendisuhubdy/tensorflow,jart/tensorflow,Intel-tensorflow/tensorflow,alsrgv/tensorflow,xzturn/tensorflow,drpngx/tensorflow,allenlavoie/tensorflow,xodus7/tensorflow,cxxgtxy/tensorflow,paolodedios/tensorflow,eaplatanios/tensorflow,lukeiwanski/tensorflow,AnishShah/tensorflow,ageron/tensorflow,ZhangXinNan/tensorflow,jbedorf/tensorflow,ppwwyyxx/tensorflow,ppwwyyxx/tensorflow,jwlawson/tensorflow,nolanliou/tensorflow,paolodedios/tensorflow,allenlavoie/tensorflow,Bismarrck/tensorflow,aldian/tensorflow,jbedorf/tensorflow,Mistobaan/tensorflow,JingJunYin/tensorflow,gunan/tensorflow,aselle/tensorflow,renyi533/tensorflow,brchiu/tensorflow,chemelnucfin/tensorflow,nburn42/tensorflow,freedomtan/tensorflow,xodus7/tensorflow,zasdfgbnm/tensorflow,Xeralux/tensorflow,ghchinoy/tensorflow,petewarden/tensorflow,caisq/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,renyi533/tensorflow,apark263/tensorflow,Intel-Corporation/tensorflow,jalexvig/tensorflow,dendisuhubdy/tensorflow,seanli9jan/tensorflow,xodus7/tensorflow,nolanliou/tensorflow,nburn42/tensorflow,ppwwyyxx/tensorflow,meteorcloudy/tensorflow,av8ramit/tensorflow,xzturn/tensorflow,Intel-Corporation/tensorflow,freedomtan/tensorflow,kobejean/tensorflow,apark263/tensorflow,xzturn/tensorflow,caisq/tensorflow,gautam1858/tensorflow,chemelnucfin/tensorflow,yongtang/tensorflow,dongjoon-hyun/tensorflow,rabipanda/tensorflow,seanli9jan/tensorflow,jhseu/tensorflow,ZhangXinNan/tensorflow,lukeiwanski/tensorflow,Intel-tensorflow/tensorflow,hfp/tensorflow-xsmm,theflofly/tensorflow,Intel-tensorflow/tensorflow,arborh/tensorflow,brchiu/tensorflow,snnn/tensorflow,aselle/tensorflow,jbedorf/tensorflow,ZhangXinNan/tensorflow,ravindrapanda/tensorflow,renyi533/tensorflow,arborh/tensorflow,gojira/tensorflow,davidzchen/tensorflow,Mistobaan/tensorflow,gautam1858/tensorflow,zasdfgbnm/tensorflow,brchiu/tensorflow,dongjoon-hyun/tensorflow,jendap/tensorflow,benoitsteiner/tensorflow-xsmm,frreiss/tensorflow-fred,dongjoon-hyun/tensorflow,kevin-coder/tensorflow-fork,tensorflow/tensorflow-pywrap_saved_model,gunan/tensorflow,girving/tensorflow,dendisuhubdy/tensorflow,zasdfgbnm/tensorflow,petewarden/tensorflow,benoitsteiner/tensorflow-xsmm,cxxgtxy/tensorflow,ageron/tensorflow,meteorcloudy/tensorflow,chemelnucfin/tensorflow,dancingdan/tensorflow,chemelnucfin/tensorflow,aldian/tensorflow,drpngx/tensorflow,alshedivat/tensorflow,nburn42/tensorflow,alsrgv/tensorflow,yanchen036/tensorflow,paolodedios/tensorflow,ageron/tensorflow,dendisuhubdy/tensorflow,alsrgv/tensorflow,DavidNorman/tensorflow,hfp/tensorflow-xsmm,jbedorf/tensorflow,av8ramit/tensorflow,aldian/tensorflow,nolanliou/tensorflow,kobejean/tensorflow,alsrgv/tensorflow,aam-at/tensorflow,snnn/tensorflow,allenlavoie/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,annarev/tensorflow,gojira/tensorflow,jendap/tensorflow,renyi533/tensorflow,jhseu/tensorflow,codrut3/tensorflow,hfp/tensorflow-xsmm,gunan/tensorflow,ghchinoy/tensorflow,renyi533/tensorflow,freedomtan/tensorflow,paolodedios/tensorflow,dongjoon-hyun/tensorflow,asimshankar/tensorflow,asimshankar/tensorflow,snnn/tensorflow,lakshayg/tensorflow,kevin-coder/tensorflow-fork,apark263/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,DavidNorman/tensorflow,chemelnucfin/tensorflow,benoitsteiner/tensorflow-xsmm,aldian/tensorflow,chemelnucfin/tensorflow,aam-at/tensorflow,AnishShah/tensorflow,Intel-tensorflow/tensorflow,kevin-coder/tensorflow-fork,girving/tensorflow,benoitsteiner/tensorflow-xsmm,annarev/tensorflow,jwlawson/tensorflow,Bismarrck/tensorflow,gojira/tensorflow,av8ramit/tensorflow,dongjoon-hyun/tensorflow,gautam1858/tensorflow,seanli9jan/tensorflow,eadgarchen/tensorflow,eaplatanios/tensorflow,nolanliou/tensorflow,alshedivat/tensorflow,Bismarrck/tensorflow,adit-chandra/tensorflow,av8ramit/tensorflow,theflofly/tensorflow,zasdfgbnm/tensorflow,jart/tensorflow,ZhangXinNan/tensorflow,manipopopo/tensorflow,apark263/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Bismarrck/tensorflow,Intel-tensorflow/tensorflow,zasdfgbnm/tensorflow,sarvex/tensorflow,brchiu/tensorflow,adit-chandra/tensorflow,Mistobaan/tensorflow,Intel-Corporation/tensorflow,alshedivat/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,av8ramit/tensorflow,aselle/tensorflow,DavidNorman/tensorflow,karllessard/tensorflow,arborh/tensorflow,annarev/tensorflow,av8ramit/tensorflow,JingJunYin/tensorflow,ppwwyyxx/tensorflow,xzturn/tensorflow,alsrgv/tensorflow,snnn/tensorflow,alshedivat/tensorflow,jalexvig/tensorflow,aselle/tensorflow,kobejean/tensorflow,jbedorf/tensorflow,gunan/tensorflow,eaplatanios/tensorflow,adit-chandra/tensorflow,drpngx/tensorflow,theflofly/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,hehongliang/tensorflow,brchiu/tensorflow,frreiss/tensorflow-fred,drpngx/tensorflow,jwlawson/tensorflow,xodus7/tensorflow,ravindrapanda/tensorflow,manipopopo/tensorflow,hehongliang/tensorflow,yanchen036/tensorflow,jhseu/tensorflow,petewarden/tensorflow,eadgarchen/tensorflow,caisq/tensorflow,aldian/tensorflow,DavidNorman/tensorflow,jendap/tensorflow,yanchen036/tensorflow,freedomtan/tensorflow,yongtang/tensorflow,JingJunYin/tensorflow,hsaputra/tensorflow,snnn/tensorflow,hsaputra/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,dongjoon-hyun/tensorflow,zasdfgbnm/tensorflow,rabipanda/tensorflow,dancingdan/tensorflow,gojira/tensorflow,asimshankar/tensorflow,AnishShah/tensorflow,yongtang/tensorflow,hehongliang/tensorflow,eaplatanios/tensorflow,DavidNorman/tensorflow,aam-at/tensorflow,aselle/tensorflow,manipopopo/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Xeralux/tensorflow,kobejean/tensorflow,dancingdan/tensorflow,Mistobaan/tensorflow,ageron/tensorflow,zasdfgbnm/tensorflow,meteorcloudy/tensorflow,adit-chandra/tensorflow,jbedorf/tensorflow,gautam1858/tensorflow,rabipanda/tensorflow,jalexvig/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,dongjoon-hyun/tensorflow,kevin-coder/tensorflow-fork,AnishShah/tensorflow,jwlawson/tensorflow,hsaputra/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,caisq/tensorflow,theflofly/tensorflow,asimshankar/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,kevin-coder/tensorflow-fork,davidzchen/tensorflow,freedomtan/tensorflow,allenlavoie/tensorflow,seanli9jan/tensorflow,brchiu/tensorflow,Xeralux/tensorflow,Bismarrck/tensorflow,gojira/tensorflow,hehongliang/tensorflow,petewarden/tensorflow,snnn/tensorflow,DavidNorman/tensorflow,davidzchen/tensorflow,adit-chandra/tensorflow,asimshankar/tensorflow,paolodedios/tensorflow,aam-at/tensorflow,yongtang/tensorflow,dongjoon-hyun/tensorflow,lakshayg/tensorflow,meteorcloudy/tensorflow,xzturn/tensorflow,kobejean/tensorflow,theflofly/tensorflow,xodus7/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aam-at/tensorflow,ZhangXinNan/tensorflow,frreiss/tensorflow-fred,kevin-coder/tensorflow-fork,girving/tensorflow,jalexvig/tensorflow,chemelnucfin/tensorflow,cxxgtxy/tensorflow,eadgarchen/tensorflow,meteorcloudy/tensorflow,ageron/tensorflow,aselle/tensorflow,AnishShah/tensorflow,dendisuhubdy/tensorflow,gautam1858/tensorflow,Bismarrck/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,alshedivat/tensorflow,benoitsteiner/tensorflow-xsmm,Intel-Corporation/tensorflow,hfp/tensorflow-xsmm,ageron/tensorflow,ZhangXinNan/tensorflow,jart/tensorflow,renyi533/tensorflow,kevin-coder/tensorflow-fork,av8ramit/tensorflow,dendisuhubdy/tensorflow,jalexvig/tensorflow,gunan/tensorflow,hsaputra/tensorflow,davidzchen/tensorflow,jart/tensorflow,dendisuhubdy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,codrut3/tensorflow,apark263/tensorflow,alshedivat/tensorflow,zasdfgbnm/tensorflow,hsaputra/tensorflow,ZhangXinNan/tensorflow,jendap/tensorflow,tensorflow/tensorflow,meteorcloudy/tensorflow,ppwwyyxx/tensorflow,jart/tensorflow,jbedorf/tensorflow,hehongliang/tensorflow,paolodedios/tensorflow,apark263/tensorflow,jwlawson/tensorflow,brchiu/tensorflow,chemelnucfin/tensorflow,dancingdan/tensorflow,theflofly/tensorflow,ravindrapanda/tensorflow,apark263/tensorflow,girving/tensorflow,yanchen036/tensorflow,eaplatanios/tensorflow,jhseu/tensorflow,yongtang/tensorflow,hfp/tensorflow-xsmm,freedomtan/tensorflow,jhseu/tensorflow,JingJunYin/tensorflow,nburn42/tensorflow,alsrgv/tensorflow,gunan/tensorflow,jbedorf/tensorflow,DavidNorman/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,hsaputra/tensorflow,asimshankar/tensorflow,lukeiwanski/tensorflow,nolanliou/tensorflow,Xeralux/tensorflow,kobejean/tensorflow,ppwwyyxx/tensorflow,brchiu/tensorflow,dongjoon-hyun/tensorflow,sarvex/tensorflow,Bismarrck/tensorflow,meteorcloudy/tensorflow,freedomtan/tensorflow,aam-at/tensorflow,girving/tensorflow,theflofly/tensorflow,jart/tensorflow,dendisuhubdy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,cxxgtxy/tensorflow,codrut3/tensorflow,jbedorf/tensorflow,av8ramit/tensorflow,ravindrapanda/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,kevin-coder/tensorflow-fork,rabipanda/tensorflow,theflofly/tensorflow,frreiss/tensorflow-fred,eadgarchen/tensorflow,tensorflow/tensorflow,jalexvig/tensorflow,Mistobaan/tensorflow,av8ramit/tensorflow,petewarden/tensorflow,petewarden/tensorflow,karllessard/tensorflow,aam-at/tensorflow,girving/tensorflow,JingJunYin/tensorflow,dancingdan/tensorflow,benoitsteiner/tensorflow-xsmm,freedomtan/tensorflow,aam-at/tensorflow,alshedivat/tensorflow,nburn42/tensorflow,manipopopo/tensorflow,petewarden/tensorflow,kobejean/tensorflow,arborh/tensorflow,ghchinoy/tensorflow,dendisuhubdy/tensorflow,asimshankar/tensorflow,jwlawson/tensorflow,tensorflow/tensorflow-pywrap_saved_model,lukeiwanski/tensorflow,seanli9jan/tensorflow,ZhangXinNan/tensorflow,alsrgv/tensorflow,gojira/tensorflow,snnn/tensorflow,ageron/tensorflow,eaplatanios/tensorflow,yongtang/tensorflow,arborh/tensorflow,annarev/tensorflow,eadgarchen/tensorflow,caisq/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,ppwwyyxx/tensorflow,annarev/tensorflow,JingJunYin/tensorflow,aselle/tensorflow,drpngx/tensorflow,rabipanda/tensorflow,alsrgv/tensorflow,nolanliou/tensorflow,Intel-Corporation/tensorflow,cxxgtxy/tensorflow,freedomtan/tensorflow,ppwwyyxx/tensorflow,dendisuhubdy/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,codrut3/tensorflow,tensorflow/tensorflow,Mistobaan/tensorflow,dongjoon-hyun/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,arborh/tensorflow,ravindrapanda/tensorflow,hfp/tensorflow-xsmm,jendap/tensorflow,karllessard/tensorflow,sarvex/tensorflow,davidzchen/tensorflow,ravindrapanda/tensorflow,DavidNorman/tensorflow,caisq/tensorflow,adit-chandra/tensorflow,nburn42/tensorflow,Bismarrck/tensorflow,nburn42/tensorflow,petewarden/tensorflow,sarvex/tensorflow,ghchinoy/tensorflow,DavidNorman/tensorflow,manipopopo/tensorflow,girving/tensorflow,xzturn/tensorflow,ghchinoy/tensorflow,frreiss/tensorflow-fred,codrut3/tensorflow,AnishShah/tensorflow,jendap/tensorflow,apark263/tensorflow,alsrgv/tensorflow,xodus7/tensorflow,manipopopo/tensorflow,aam-at/tensorflow,zasdfgbnm/tensorflow,benoitsteiner/tensorflow-xsmm,paolodedios/tensorflow,cxxgtxy/tensorflow,jalexvig/tensorflow,alshedivat/tensorflow,ageron/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,lukeiwanski/tensorflow,apark263/tensorflow,lakshayg/tensorflow,karllessard/tensorflow,eaplatanios/tensorflow,rabipanda/tensorflow,gojira/tensorflow,AnishShah/tensorflow,xzturn/tensorflow,kevin-coder/tensorflow-fork,hfp/tensorflow-xsmm,arborh/tensorflow,kobejean/tensorflow,renyi533/tensorflow,ghchinoy/tensorflow,Intel-tensorflow/tensorflow,ageron/tensorflow,kobejean/tensorflow,zasdfgbnm/tensorflow,asimshankar/tensorflow,gunan/tensorflow,theflofly/tensorflow,girving/tensorflow,kobejean/tensorflow,freedomtan/tensorflow,renyi533/tensorflow,adit-chandra/tensorflow,jhseu/tensorflow,alshedivat/tensorflow,manipopopo/tensorflow,AnishShah/tensorflow,jart/tensorflow,codrut3/tensorflow,davidzchen/tensorflow,theflofly/tensorflow,ghchinoy/tensorflow,xzturn/tensorflow,Intel-Corporation/tensorflow,gunan/tensorflow,gojira/tensorflow,jwlawson/tensorflow,lukeiwanski/tensorflow,paolodedios/tensorflow,lakshayg/tensorflow,drpngx/tensorflow,annarev/tensorflow,gojira/tensorflow,lakshayg/tensorflow,allenlavoie/tensorflow,nburn42/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,kevin-coder/tensorflow-fork,jwlawson/tensorflow,cxxgtxy/tensorflow,caisq/tensorflow,kobejean/tensorflow,annarev/tensorflow,codrut3/tensorflow,xodus7/tensorflow,Intel-tensorflow/tensorflow,lukeiwanski/tensorflow,dancingdan/tensorflow,apark263/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,chemelnucfin/tensorflow,DavidNorman/tensorflow,dancingdan/tensorflow,brchiu/tensorflow,dancingdan/tensorflow,seanli9jan/tensorflow,jhseu/tensorflow,eaplatanios/tensorflow,gojira/tensorflow,jendap/tensorflow,davidzchen/tensorflow,hsaputra/tensorflow,manipopopo/tensorflow,jhseu/tensorflow,DavidNorman/tensorflow,lukeiwanski/tensorflow,drpngx/tensorflow,Mistobaan/tensorflow,ghchinoy/tensorflow,seanli9jan/tensorflow,aam-at/tensorflow,adit-chandra/tensorflow,Bismarrck/tensorflow,gautam1858/tensorflow,dongjoon-hyun/tensorflow,davidzchen/tensorflow,eadgarchen/tensorflow,xzturn/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,snnn/tensorflow,eadgarchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,eadgarchen/tensorflow,seanli9jan/tensorflow,annarev/tensorflow,sarvex/tensorflow,tensorflow/tensorflow,jendap/tensorflow,Xeralux/tensorflow,chemelnucfin/tensorflow,benoitsteiner/tensorflow-xsmm,codrut3/tensorflow,hfp/tensorflow-xsmm,ghchinoy/tensorflow,seanli9jan/tensorflow,nburn42/tensorflow,tensorflow/tensorflow,jalexvig/tensorflow,jhseu/tensorflow,seanli9jan/tensorflow,aldian/tensorflow,yongtang/tensorflow,alsrgv/tensorflow,tensorflow/tensorflow-pywrap_saved_model,jalexvig/tensorflow,xodus7/tensorflow,eadgarchen/tensorflow,Intel-tensorflow/tensorflow,Mistobaan/tensorflow,petewarden/tensorflow,theflofly/tensorflow,ZhangXinNan/tensorflow,freedomtan/tensorflow,hfp/tensorflow-xsmm,yanchen036/tensorflow,Xeralux/tensorflow,eaplatanios/tensorflow,gunan/tensorflow,annarev/tensorflow,caisq/tensorflow,arborh/tensorflow,ravindrapanda/tensorflow,jendap/tensorflow,rabipanda/tensorflow,alshedivat/tensorflow,Mistobaan/tensorflow,eaplatanios/tensorflow,jart/tensorflow,Xeralux/tensorflow,Xeralux/tensorflow,ppwwyyxx/tensorflow,xzturn/tensorflow,chemelnucfin/tensorflow,ppwwyyxx/tensorflow,lukeiwanski/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,drpngx/tensorflow,nburn42/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,dancingdan/tensorflow,adit-chandra/tensorflow,Xeralux/tensorflow,jendap/tensorflow,snnn/tensorflow,petewarden/tensorflow,karllessard/tensorflow,girving/tensorflow,dancingdan/tensorflow,allenlavoie/tensorflow,meteorcloudy/tensorflow,aselle/tensorflow,jhseu/tensorflow,jwlawson/tensorflow,renyi533/tensorflow,karllessard/tensorflow,chemelnucfin/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jbedorf/tensorflow,jalexvig/tensorflow,lukeiwanski/tensorflow,girving/tensorflow,rabipanda/tensorflow,allenlavoie/tensorflow,jart/tensorflow,aam-at/tensorflow,eaplatanios/tensorflow,av8ramit/tensorflow,petewarden/tensorflow,petewarden/tensorflow,hehongliang/tensorflow,allenlavoie/tensorflow,allenlavoie/tensorflow,AnishShah/tensorflow,aldian/tensorflow,sarvex/tensorflow,theflofly/tensorflow,renyi533/tensorflow,hfp/tensorflow-xsmm,jalexvig/tensorflow,ravindrapanda/tensorflow,arborh/tensorflow,alshedivat/tensorflow,davidzchen/tensorflow,jhseu/tensorflow,aselle/tensorflow,nolanliou/tensorflow,xzturn/tensorflow,benoitsteiner/tensorflow-xsmm,nolanliou/tensorflow,manipopopo/tensorflow,hfp/tensorflow-xsmm,ageron/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,ppwwyyxx/tensorflow,alsrgv/tensorflow,av8ramit/tensorflow,brchiu/tensorflow,aldian/tensorflow,jhseu/tensorflow,gautam1858/tensorflow,Xeralux/tensorflow,Bismarrck/tensorflow,manipopopo/tensorflow,nolanliou/tensorflow,arborh/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,arborh/tensorflow,eadgarchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,kevin-coder/tensorflow-fork,lakshayg/tensorflow,hsaputra/tensorflow,JingJunYin/tensorflow,lakshayg/tensorflow,yongtang/tensorflow,dancingdan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,yanchen036/tensorflow,karllessard/tensorflow,DavidNorman/tensorflow,adit-chandra/tensorflow,Intel-tensorflow/tensorflow,aam-at/tensorflow,Intel-tensorflow/tensorflow,hsaputra/tensorflow,codrut3/tensorflow,annarev/tensorflow,Mistobaan/tensorflow,hehongliang/tensorflow,xodus7/tensorflow,allenlavoie/tensorflow,davidzchen/tensorflow,meteorcloudy/tensorflow,Intel-Corporation/tensorflow,rabipanda/tensorflow,frreiss/tensorflow-fred,alsrgv/tensorflow,JingJunYin/tensorflow,adit-chandra/tensorflow,tensorflow/tensorflow-pywrap_saved_model,ZhangXinNan/tensorflow,yanchen036/tensorflow,JingJunYin/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,renyi533/tensorflow,yanchen036/tensorflow,aselle/tensorflow,apark263/tensorflow,asimshankar/tensorflow,xodus7/tensorflow,meteorcloudy/tensorflow,seanli9jan/tensorflow,codrut3/tensorflow,Mistobaan/tensorflow,ageron/tensorflow,karllessard/tensorflow,jbedorf/tensorflow,asimshankar/tensorflow,allenlavoie/tensorflow,brchiu/tensorflow,jart/tensorflow,frreiss/tensorflow-fred,rabipanda/tensorflow | tensorflow/contrib/libsvm/python/kernel_tests/decode_libsvm_op_test.py | tensorflow/contrib/libsvm/python/kernel_tests/decode_libsvm_op_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeLibsvm op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.libsvm.python.ops import libsvm_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class DecodeLibsvmOpTest(test.TestCase):
def testBasic(self):
with self.test_session() as sess:
content = ["1 1:3.4 2:0.5 4:0.231",
"1 2:2.5 3:0.1 5:0.503",
"2 3:2.5 2:0.1 1:0.105"]
label, indices, values, shape = libsvm_ops.decode_libsvm(content,
num_features=6)
feature = sparse_ops.sparse_to_dense(indices, shape, values,
validate_indices=False)
self.assertAllEqual(label.get_shape().as_list(), [3])
label, feature = sess.run([label, feature])
self.assertAllEqual(label, [1, 1, 2])
self.assertAllClose(feature, [[0, 3.4, 0.5, 0, 0.231, 0],
[0, 0, 2.5, 0.1, 0, 0.503],
[0, 0.105, 0.1, 2.5, 0, 0]])
if __name__ == "__main__":
test.main()
| # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DecodeLibsvm op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.libsvm.python.ops import libsvm_ops
from tensorflow.python.platform import test
class DecodeLibsvmOpTest(test.TestCase):
def testBasic(self):
with self.test_session() as sess:
content = ["1 1:3.4 2:0.5 4:0.231",
"1 2:2.5 3:0.1 5:0.503",
"2 3:2.5 2:0.1 1:0.105"]
label, feature = libsvm_ops.decode_libsvm(content, num_features=6)
# shape inference
self.assertAllEqual(label.get_shape().as_list(), [3])
self.assertAllEqual(feature.get_shape().as_list(), [3, 6])
# sess.run()
label, feature = sess.run([label, feature])
self.assertAllEqual(label, [1, 1, 2])
self.assertAllClose(feature, [[0, 3.4, 0.5, 0, 0.231, 0],
[0, 0, 2.5, 0.1, 0, 0.503],
[0, 0.105, 0.1, 2.5, 0, 0]])
if __name__ == "__main__":
test.main()
| apache-2.0 | Python |
d058c6dd08e2cd740094c5ceff265d215c9a0d7f | Bump version number to 0.3 | ftobia/ham,ftobia/ham | ham/_version.py | ham/_version.py | __version__ = '0.3'
| __version__ = '0.2'
| mit | Python |
537eb2d0b067cf4c87b334ee6a3b9c883a37b25c | Bump version to 1.0.1-machtfit-58 | machtfit/django-oscar,machtfit/django-oscar,machtfit/django-oscar | src/oscar/__init__.py | src/oscar/__init__.py | import os
# Use 'dev', 'beta', or 'final' as the 4th element to indicate release type.
VERSION = (1, 0, 1, 'machtfit', 58)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
return '{}.{}.{}-{}-{}'.format(*VERSION)
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.dashboard',
'oscar.apps.dashboard.orders',
'oscar.apps.dashboard.offers',
# 3rd-party apps that oscar depends on
'treebeard',
'sorl.thumbnail',
'django_tables2',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
| import os
# Use 'dev', 'beta', or 'final' as the 4th element to indicate release type.
VERSION = (1, 0, 1, 'machtfit', 57)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
return '{}.{}.{}-{}-{}'.format(*VERSION)
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.dashboard',
'oscar.apps.dashboard.orders',
'oscar.apps.dashboard.offers',
# 3rd-party apps that oscar depends on
'treebeard',
'sorl.thumbnail',
'django_tables2',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
| bsd-3-clause | Python |
c2b0d54aebb795f37541936c73f7b82aebe46763 | Add --strace option to tests. | grandquista/rethinkdb,AntouanK/rethinkdb,jfriedly/rethinkdb,ayumilong/rethinkdb,bpradipt/rethinkdb,victorbriz/rethinkdb,wkennington/rethinkdb,4talesa/rethinkdb,marshall007/rethinkdb,alash3al/rethinkdb,mbroadst/rethinkdb,urandu/rethinkdb,KSanthanam/rethinkdb,ayumilong/rethinkdb,dparnell/rethinkdb,rrampage/rethinkdb,yakovenkodenis/rethinkdb,marshall007/rethinkdb,marshall007/rethinkdb,JackieXie168/rethinkdb,pap/rethinkdb,gdi2290/rethinkdb,victorbriz/rethinkdb,marshall007/rethinkdb,gavioto/rethinkdb,matthaywardwebdesign/rethinkdb,urandu/rethinkdb,dparnell/rethinkdb,pap/rethinkdb,scripni/rethinkdb,marshall007/rethinkdb,KSanthanam/rethinkdb,4talesa/rethinkdb,captainpete/rethinkdb,elkingtonmcb/rethinkdb,JackieXie168/rethinkdb,sontek/rethinkdb,bchavez/rethinkdb,AntouanK/rethinkdb,wojons/rethinkdb,eliangidoni/rethinkdb,captainpete/rethinkdb,tempbottle/rethinkdb,catroot/rethinkdb,nviennot/rethinkdb,lenstr/rethinkdb,mquandalle/rethinkdb,KSanthanam/rethinkdb,urandu/rethinkdb,sebadiaz/rethinkdb,bchavez/rethinkdb,spblightadv/rethinkdb,RubenKelevra/rethinkdb,gavioto/rethinkdb,rrampage/rethinkdb,dparnell/rethinkdb,spblightadv/rethinkdb,yaolinz/rethinkdb,JackieXie168/rethinkdb,scripni/rethinkdb,wojons/rethinkdb,RubenKelevra/rethinkdb,mcanthony/rethinkdb,scripni/rethinkdb,eliangidoni/rethinkdb,scripni/rethinkdb,RubenKelevra/rethinkdb,sebadiaz/rethinkdb,mbroadst/rethinkdb,Qinusty/rethinkdb,robertjpayne/rethinkdb,sontek/rethinkdb,alash3al/rethinkdb,pap/rethinkdb,mbroadst/rethinkdb,victorbriz/rethinkdb,sontek/rethinkdb,gavioto/rethinkdb,lenstr/rethinkdb,bchavez/rethinkdb,grandquista/rethinkdb,elkingtonmcb/rethinkdb,dparnell/rethinkdb,elkingtonmcb/rethinkdb,niieani/rethinkdb,Qinusty/rethinkdb,tempbottle/rethinkdb,jmptrader/rethinkdb,bpradipt/rethinkdb,wujf/rethinkdb,wkennington/rethinkdb,alash3al/rethinkdb,wkennington/rethinkdb,KSanthanam/rethinkdb,gdi2290/rethinkdb,wujf/rethinkdb,yaolinz/rethinkdb,bchavez/rethinkdb,sbusso/rethinkdb,sebadiaz/rethinkdb,greyhwndz/rethinkdb,Qinusty/rethinkdb,jesseditson/rethinkdb,wojons/rethinkdb,AtnNn/rethinkdb,greyhwndz/rethinkdb,elkingtonmcb/rethinkdb,4talesa/rethinkdb,losywee/rethinkdb,urandu/rethinkdb,AtnNn/rethinkdb,gdi2290/rethinkdb,dparnell/rethinkdb,wojons/rethinkdb,4talesa/rethinkdb,AtnNn/rethinkdb,spblightadv/rethinkdb,lenstr/rethinkdb,rrampage/rethinkdb,matthaywardwebdesign/rethinkdb,RubenKelevra/rethinkdb,sebadiaz/rethinkdb,lenstr/rethinkdb,catroot/rethinkdb,elkingtonmcb/rethinkdb,KSanthanam/rethinkdb,matthaywardwebdesign/rethinkdb,yaolinz/rethinkdb,tempbottle/rethinkdb,losywee/rethinkdb,yakovenkodenis/rethinkdb,robertjpayne/rethinkdb,jmptrader/rethinkdb,victorbriz/rethinkdb,yaolinz/rethinkdb,yakovenkodenis/rethinkdb,catroot/rethinkdb,gavioto/rethinkdb,mbroadst/rethinkdb,jesseditson/rethinkdb,jesseditson/rethinkdb,gavioto/rethinkdb,mquandalle/rethinkdb,ajose01/rethinkdb,yakovenkodenis/rethinkdb,eliangidoni/rethinkdb,yaolinz/rethinkdb,pap/rethinkdb,ajose01/rethinkdb,ajose01/rethinkdb,yakovenkodenis/rethinkdb,mbroadst/rethinkdb,pap/rethinkdb,niieani/rethinkdb,jmptrader/rethinkdb,sebadiaz/rethinkdb,Wilbeibi/rethinkdb,lenstr/rethinkdb,gdi2290/rethinkdb,RubenKelevra/rethinkdb,AntouanK/rethinkdb,mquandalle/rethinkdb,jmptrader/rethinkdb,ayumilong/rethinkdb,ajose01/rethinkdb,wkennington/rethinkdb,ajose01/rethinkdb,gdi2290/rethinkdb,JackieXie168/rethinkdb,ayumilong/rethinkdb,catroot/rethinkdb,RubenKelevra/rethinkdb,spblightadv/rethinkdb,losywee/rethinkdb,dparnell/rethinkdb,Wilbeibi/rethinkdb,mbroadst/rethinkdb,urandu/rethinkdb,bpradipt/rethinkdb,mcanthony/rethinkdb,sbusso/rethinkdb,eliangidoni/rethinkdb,scripni/rethinkdb,dparnell/rethinkdb,marshall007/rethinkdb,niieani/rethinkdb,bpradipt/rethinkdb,captainpete/rethinkdb,jfriedly/rethinkdb,urandu/rethinkdb,KSanthanam/rethinkdb,elkingtonmcb/rethinkdb,urandu/rethinkdb,robertjpayne/rethinkdb,nviennot/rethinkdb,sebadiaz/rethinkdb,Wilbeibi/rethinkdb,jfriedly/rethinkdb,lenstr/rethinkdb,jmptrader/rethinkdb,robertjpayne/rethinkdb,spblightadv/rethinkdb,sebadiaz/rethinkdb,AtnNn/rethinkdb,jesseditson/rethinkdb,yaolinz/rethinkdb,mcanthony/rethinkdb,mcanthony/rethinkdb,losywee/rethinkdb,KSanthanam/rethinkdb,nviennot/rethinkdb,eliangidoni/rethinkdb,ayumilong/rethinkdb,sontek/rethinkdb,pap/rethinkdb,marshall007/rethinkdb,losywee/rethinkdb,bchavez/rethinkdb,mbroadst/rethinkdb,Qinusty/rethinkdb,tempbottle/rethinkdb,sbusso/rethinkdb,greyhwndz/rethinkdb,tempbottle/rethinkdb,robertjpayne/rethinkdb,RubenKelevra/rethinkdb,marshall007/rethinkdb,eliangidoni/rethinkdb,nviennot/rethinkdb,matthaywardwebdesign/rethinkdb,jesseditson/rethinkdb,Qinusty/rethinkdb,wojons/rethinkdb,4talesa/rethinkdb,bpradipt/rethinkdb,sbusso/rethinkdb,mbroadst/rethinkdb,AntouanK/rethinkdb,scripni/rethinkdb,yakovenkodenis/rethinkdb,niieani/rethinkdb,JackieXie168/rethinkdb,alash3al/rethinkdb,ajose01/rethinkdb,sontek/rethinkdb,wujf/rethinkdb,jesseditson/rethinkdb,nviennot/rethinkdb,victorbriz/rethinkdb,niieani/rethinkdb,Wilbeibi/rethinkdb,greyhwndz/rethinkdb,ayumilong/rethinkdb,wujf/rethinkdb,mcanthony/rethinkdb,tempbottle/rethinkdb,rrampage/rethinkdb,wujf/rethinkdb,gavioto/rethinkdb,ajose01/rethinkdb,ajose01/rethinkdb,catroot/rethinkdb,greyhwndz/rethinkdb,wkennington/rethinkdb,scripni/rethinkdb,jfriedly/rethinkdb,pap/rethinkdb,niieani/rethinkdb,alash3al/rethinkdb,yaolinz/rethinkdb,eliangidoni/rethinkdb,alash3al/rethinkdb,grandquista/rethinkdb,captainpete/rethinkdb,grandquista/rethinkdb,sbusso/rethinkdb,lenstr/rethinkdb,jesseditson/rethinkdb,rrampage/rethinkdb,niieani/rethinkdb,pap/rethinkdb,dparnell/rethinkdb,captainpete/rethinkdb,robertjpayne/rethinkdb,Qinusty/rethinkdb,Qinusty/rethinkdb,wujf/rethinkdb,gavioto/rethinkdb,sontek/rethinkdb,rrampage/rethinkdb,mquandalle/rethinkdb,grandquista/rethinkdb,wojons/rethinkdb,bpradipt/rethinkdb,sontek/rethinkdb,KSanthanam/rethinkdb,RubenKelevra/rethinkdb,JackieXie168/rethinkdb,AtnNn/rethinkdb,AntouanK/rethinkdb,catroot/rethinkdb,Wilbeibi/rethinkdb,yakovenkodenis/rethinkdb,rrampage/rethinkdb,4talesa/rethinkdb,wkennington/rethinkdb,tempbottle/rethinkdb,elkingtonmcb/rethinkdb,AtnNn/rethinkdb,wojons/rethinkdb,victorbriz/rethinkdb,sontek/rethinkdb,bpradipt/rethinkdb,captainpete/rethinkdb,mcanthony/rethinkdb,ayumilong/rethinkdb,alash3al/rethinkdb,matthaywardwebdesign/rethinkdb,JackieXie168/rethinkdb,mquandalle/rethinkdb,lenstr/rethinkdb,tempbottle/rethinkdb,catroot/rethinkdb,jfriedly/rethinkdb,spblightadv/rethinkdb,victorbriz/rethinkdb,bpradipt/rethinkdb,spblightadv/rethinkdb,greyhwndz/rethinkdb,jfriedly/rethinkdb,matthaywardwebdesign/rethinkdb,4talesa/rethinkdb,mquandalle/rethinkdb,jfriedly/rethinkdb,Qinusty/rethinkdb,mcanthony/rethinkdb,greyhwndz/rethinkdb,gdi2290/rethinkdb,bchavez/rethinkdb,spblightadv/rethinkdb,AntouanK/rethinkdb,scripni/rethinkdb,captainpete/rethinkdb,Wilbeibi/rethinkdb,sbusso/rethinkdb,bchavez/rethinkdb,gavioto/rethinkdb,wkennington/rethinkdb,nviennot/rethinkdb,bchavez/rethinkdb,niieani/rethinkdb,robertjpayne/rethinkdb,catroot/rethinkdb,dparnell/rethinkdb,grandquista/rethinkdb,sbusso/rethinkdb,urandu/rethinkdb,AntouanK/rethinkdb,mbroadst/rethinkdb,yakovenkodenis/rethinkdb,jfriedly/rethinkdb,robertjpayne/rethinkdb,grandquista/rethinkdb,bchavez/rethinkdb,nviennot/rethinkdb,sbusso/rethinkdb,JackieXie168/rethinkdb,elkingtonmcb/rethinkdb,wojons/rethinkdb,Qinusty/rethinkdb,matthaywardwebdesign/rethinkdb,yaolinz/rethinkdb,grandquista/rethinkdb,gdi2290/rethinkdb,alash3al/rethinkdb,losywee/rethinkdb,losywee/rethinkdb,AtnNn/rethinkdb,victorbriz/rethinkdb,jmptrader/rethinkdb,AntouanK/rethinkdb,eliangidoni/rethinkdb,sebadiaz/rethinkdb,mquandalle/rethinkdb,wkennington/rethinkdb,mcanthony/rethinkdb,grandquista/rethinkdb,robertjpayne/rethinkdb,jmptrader/rethinkdb,mquandalle/rethinkdb,matthaywardwebdesign/rethinkdb,Wilbeibi/rethinkdb,nviennot/rethinkdb,JackieXie168/rethinkdb,greyhwndz/rethinkdb,rrampage/rethinkdb,ayumilong/rethinkdb,4talesa/rethinkdb,wujf/rethinkdb,bpradipt/rethinkdb,losywee/rethinkdb,jmptrader/rethinkdb,eliangidoni/rethinkdb,jesseditson/rethinkdb,captainpete/rethinkdb,Wilbeibi/rethinkdb,AtnNn/rethinkdb | test/common/scenario_common.py | test/common/scenario_common.py | import shlex, random
from vcoptparse import *
import driver
import workload_runner
def prepare_option_parser_mode_flags(opt_parser):
opt_parser["wrapper"] = ChoiceFlags(["--valgrind", "--strace"], None)
opt_parser["valgrind-options"] = StringFlag("--valgrind-options", "--leak-check=full --track-origins=yes --child-silent-after-fork=yes")
opt_parser["mode"] = StringFlag("--mode", "debug")
opt_parser["serve-flags"] = StringFlag("--serve-flags", "")
def parse_mode_flags(parsed_opts):
mode = parsed_opts["mode"]
command_prefix = [ ]
if parsed_opts["wrapper"] == "valgrind":
command_prefix.append("valgrind")
for valgrind_option in shlex.split(parsed_opts["valgrind-options"]):
command_prefix.append(valgrind_option)
# Make sure we use the valgrind build
# this assumes that the 'valgrind' substring goes at the end of the specific build string
if "valgrind" not in mode:
mode = mode + "-valgrind"
elif parsed_opts["wrapper"] == "strace":
command_prefix.extend(["strace", "-f"])
return driver.find_rethinkdb_executable(mode), command_prefix, shlex.split(parsed_opts["serve-flags"])
def get_workload_ports(namespace_port, processes):
for process in processes:
assert isinstance(process, (driver.Process, driver.ProxyProcess))
process = random.choice(processes)
return workload_runner.Ports(
host = "localhost",
http_port = process.http_port,
memcached_port = namespace_port + process.port_offset
) | import shlex, random
from vcoptparse import *
import driver
import workload_runner
def prepare_option_parser_mode_flags(opt_parser):
opt_parser["valgrind"] = BoolFlag("--valgrind")
opt_parser["valgrind-options"] = StringFlag("--valgrind-options", "--leak-check=full --track-origins=yes --child-silent-after-fork=yes")
opt_parser["mode"] = StringFlag("--mode", "debug")
opt_parser["serve-flags"] = StringFlag("--serve-flags", "")
def parse_mode_flags(parsed_opts):
mode = parsed_opts["mode"]
command_prefix = [ ]
if parsed_opts["valgrind"]:
command_prefix.append("valgrind")
for valgrind_option in shlex.split(parsed_opts["valgrind-options"]):
command_prefix.append(valgrind_option)
# Make sure we use the valgrind build
# this assumes that the 'valgrind' substring goes at the end of the specific build string
if "valgrind" not in mode:
mode = mode + "-valgrind"
return driver.find_rethinkdb_executable(mode), command_prefix, shlex.split(parsed_opts["serve-flags"])
def get_workload_ports(namespace_port, processes):
for process in processes:
assert isinstance(process, (driver.Process, driver.ProxyProcess))
process = random.choice(processes)
return workload_runner.Ports(
host = "localhost",
http_port = process.http_port,
memcached_port = namespace_port + process.port_offset
) | apache-2.0 | Python |
b56badbe1923b6aa08a0c10590643fb829e882a7 | Add regimen_changed -> Switched to CAT4 to treatment outcomes | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | custom/enikshay/integrations/nikshay/field_mappings.py | custom/enikshay/integrations/nikshay/field_mappings.py | gender_mapping = {
'male': 'M',
'female': 'F',
'transgender': 'T',
}
disease_classification = {
'pulmonary': 'P',
'extra_pulmonary': 'EP',
}
patient_type_choice = {
'new': '1',
'recurrent': '2',
'other_previously_treated': '3',
'treatment_after_failure': '4',
'other_previously_treated': '5',
'treatment_after_lfu': '6',
'transfer_in': '7',
}
treatment_support_designation = {
'health_worker': '1',
'tbhv': '2',
'asha_or_other_phi_hw': '3',
'aww': '4',
'ngo_volunteer': '5',
'private_medical_pracitioner': '6',
'other_community_volunteer': '7',
}
occupation = {
'air_force': 7,
'business_person': 7,
'charity_social_work': 7,
'chartered_accountant': 7,
'college_university_teacher': 6,
'diplomat': 1,
'doctor_': 5,
'engineer': 4,
'government_service': 1,
'house_maker': 7,
'journalist': 7,
'labour': 27,
'lawyer': 11,
'media': 7,
'military': 7,
'navy': 7,
'news_broadcaster': 7,
'other': 7,
'police': 7,
'private_service': 7,
'publisher': 7,
'reporter': 7,
'researcher': 6,
'retired': 30,
'self-employed_freelancer': 7,
'student': 6,
'trader': 21,
'unemployed': 28,
'worker': 29,
'writer': 7,
}
episode_site = {
'lymph_node': 1,
'pleural_effusion': 2,
'abdominal': 3,
'others': 10,
}
dcpulmonory = {
'pulmonary': 'Y',
'extra_pulmonary': 'N',
}
dcexpulmonory = {
'pulmonary': 'N',
'extra_pulmonary': 'Y',
}
treatment_outcome = {
'cured': '1',
'treatment_complete': '2',
'died': '3',
'failure': '4',
'regimen_changed': '7',
}
| gender_mapping = {
'male': 'M',
'female': 'F',
'transgender': 'T',
}
disease_classification = {
'pulmonary': 'P',
'extra_pulmonary': 'EP',
}
patient_type_choice = {
'new': '1',
'recurrent': '2',
'other_previously_treated': '3',
'treatment_after_failure': '4',
'other_previously_treated': '5',
'treatment_after_lfu': '6',
'transfer_in': '7',
}
treatment_support_designation = {
'health_worker': '1',
'tbhv': '2',
'asha_or_other_phi_hw': '3',
'aww': '4',
'ngo_volunteer': '5',
'private_medical_pracitioner': '6',
'other_community_volunteer': '7',
}
occupation = {
'air_force': 7,
'business_person': 7,
'charity_social_work': 7,
'chartered_accountant': 7,
'college_university_teacher': 6,
'diplomat': 1,
'doctor_': 5,
'engineer': 4,
'government_service': 1,
'house_maker': 7,
'journalist': 7,
'labour': 27,
'lawyer': 11,
'media': 7,
'military': 7,
'navy': 7,
'news_broadcaster': 7,
'other': 7,
'police': 7,
'private_service': 7,
'publisher': 7,
'reporter': 7,
'researcher': 6,
'retired': 30,
'self-employed_freelancer': 7,
'student': 6,
'trader': 21,
'unemployed': 28,
'worker': 29,
'writer': 7,
}
episode_site = {
'lymph_node': 1,
'pleural_effusion': 2,
'abdominal': 3,
'others': 10,
}
dcpulmonory = {
'pulmonary': 'Y',
'extra_pulmonary': 'N',
}
dcexpulmonory = {
'pulmonary': 'N',
'extra_pulmonary': 'Y',
}
treatment_outcome = {
'cured': '1',
'treatment_complete': '2',
'died': '3',
'failure': '4',
}
| bsd-3-clause | Python |
a0af301105226053a0543e055cc1e01cee33a820 | Fix scrub_query_jobs | aipescience/django-daiquiri,aipescience/django-daiquiri,aipescience/django-daiquiri | daiquiri/query/management/commands/scrub_query_jobs.py | daiquiri/query/management/commands/scrub_query_jobs.py | from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db.utils import ProgrammingError
from rest_framework.exceptions import ValidationError
from daiquiri.core.adapter import DatabaseAdapter
from daiquiri.query.models import QueryJob
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--user', help='Only scrub jobs for this user.')
parser.add_argument('--archive', action='store_true', help='Archive stale jobs.')
def handle(self, *args, **options):
if options['user']:
if options['user'] == 'anonymous':
owners = [None]
else:
owners = [User.objects.get(username=options['user'])]
else:
owners = [None] + list(User.objects.all())
adapter = DatabaseAdapter()
stale_jobs = []
for owner in owners:
jobs = QueryJob.objects.filter(owner=owner)
for job in jobs:
if job.phase == job.PHASE_COMPLETED:
if not adapter.fetch_table(job.schema_name, job.table_name):
stale_jobs.append(job)
if stale_jobs:
print('The following QueryJobs have no associated database table:')
for job in stale_jobs:
username = job.owner.username if job.owner else 'anonymous'
print('%s by %s -> %s.%s' % (job.id, username, job.schema_name, job.table_name))
if options['archive']:
for job in stale_jobs:
job.archive()
print('The jobs have been archived.')
else:
print('No QueryJobs without associated associated database table have been found.')
| from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db.utils import ProgrammingError
from rest_framework.exceptions import ValidationError
from daiquiri.core.adapter import DatabaseAdapter
from daiquiri.query.models import QueryJob
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--user', help='Only scrub jobs for this user.')
parser.add_argument('--archive', action='store_true', help='Archive stale jobs.')
def handle(self, *args, **options):
if options['user']:
if options['user'] == 'anonymous':
owners = [None]
else:
owners = [User.objects.get(username=options['user'])]
else:
owners = [None] + list(User.objects.all())
adapter = DatabaseAdapter()
stale_jobs = []
for owner in owners:
jobs = QueryJob.objects.filter(owner=owner)
for job in jobs:
if job.phase == job.PHASE_COMPLETED:
if not adapter.fetch_table(job.schema_name, job.table_name):
stale_jobs.append(job)
if stale_jobs:
print('The following QueryJobs have no associated database table:')
for job in stale_jobs:
username = owner.username if owner else 'anonymous'
print('%s by %s -> %s.%s' % (job.id, username, job.schema_name, job.table_name))
if options['archive']:
for job in stale_jobs:
job.archive()
print('The jobs have been archived.')
else:
print('No QueryJobs without associated associated database table have been found.')
| apache-2.0 | Python |
19ab98023470fe0320d28afb8d246d2eac096d12 | Use `assertRaisesRegex` | gs0510/coala-bears,coala-analyzer/coala-bears,vijeth-aradhya/coala-bears,refeed/coala-bears,kaustubhhiware/coala-bears,shreyans800755/coala-bears,incorrectusername/coala-bears,refeed/coala-bears,coala/coala-bears,coala/coala-bears,kaustubhhiware/coala-bears,srisankethu/coala-bears,Shade5/coala-bears,Asnelchristian/coala-bears,horczech/coala-bears,incorrectusername/coala-bears,seblat/coala-bears,ankit01ojha/coala-bears,srisankethu/coala-bears,horczech/coala-bears,meetmangukiya/coala-bears,naveentata/coala-bears,aptrishu/coala-bears,shreyans800755/coala-bears,madhukar01/coala-bears,shreyans800755/coala-bears,yash-nisar/coala-bears,shreyans800755/coala-bears,incorrectusername/coala-bears,naveentata/coala-bears,seblat/coala-bears,Shade5/coala-bears,Shade5/coala-bears,coala/coala-bears,gs0510/coala-bears,madhukar01/coala-bears,srisankethu/coala-bears,ankit01ojha/coala-bears,Asnelchristian/coala-bears,seblat/coala-bears,kaustubhhiware/coala-bears,meetmangukiya/coala-bears,madhukar01/coala-bears,Vamshi99/coala-bears,Shade5/coala-bears,horczech/coala-bears,aptrishu/coala-bears,coala-analyzer/coala-bears,kaustubhhiware/coala-bears,coala-analyzer/coala-bears,horczech/coala-bears,Asnelchristian/coala-bears,naveentata/coala-bears,damngamerz/coala-bears,meetmangukiya/coala-bears,Shade5/coala-bears,horczech/coala-bears,damngamerz/coala-bears,arjunsinghy96/coala-bears,gs0510/coala-bears,aptrishu/coala-bears,ankit01ojha/coala-bears,arjunsinghy96/coala-bears,Vamshi99/coala-bears,yashtrivedi96/coala-bears,madhukar01/coala-bears,gs0510/coala-bears,Vamshi99/coala-bears,ankit01ojha/coala-bears,coala-analyzer/coala-bears,incorrectusername/coala-bears,aptrishu/coala-bears,coala-analyzer/coala-bears,coala-analyzer/coala-bears,kaustubhhiware/coala-bears,Asnelchristian/coala-bears,yashtrivedi96/coala-bears,aptrishu/coala-bears,refeed/coala-bears,coala/coala-bears,coala/coala-bears,horczech/coala-bears,shreyans800755/coala-bears,arjunsinghy96/coala-bears,ankit01ojha/coala-bears,ankit01ojha/coala-bears,damngamerz/coala-bears,Shade5/coala-bears,naveentata/coala-bears,yashtrivedi96/coala-bears,yash-nisar/coala-bears,yashtrivedi96/coala-bears,damngamerz/coala-bears,coala-analyzer/coala-bears,arjunsinghy96/coala-bears,Shade5/coala-bears,incorrectusername/coala-bears,shreyans800755/coala-bears,seblat/coala-bears,refeed/coala-bears,Shade5/coala-bears,arjunsinghy96/coala-bears,horczech/coala-bears,yashtrivedi96/coala-bears,coala/coala-bears,gs0510/coala-bears,ankit01ojha/coala-bears,seblat/coala-bears,srisankethu/coala-bears,shreyans800755/coala-bears,Asnelchristian/coala-bears,Asnelchristian/coala-bears,aptrishu/coala-bears,damngamerz/coala-bears,Vamshi99/coala-bears,kaustubhhiware/coala-bears,damngamerz/coala-bears,Vamshi99/coala-bears,vijeth-aradhya/coala-bears,madhukar01/coala-bears,ankit01ojha/coala-bears,Asnelchristian/coala-bears,aptrishu/coala-bears,gs0510/coala-bears,meetmangukiya/coala-bears,Vamshi99/coala-bears,yash-nisar/coala-bears,meetmangukiya/coala-bears,arjunsinghy96/coala-bears,yash-nisar/coala-bears,Vamshi99/coala-bears,yash-nisar/coala-bears,yashtrivedi96/coala-bears,ankit01ojha/coala-bears,horczech/coala-bears,coala-analyzer/coala-bears,refeed/coala-bears,gs0510/coala-bears,gs0510/coala-bears,meetmangukiya/coala-bears,naveentata/coala-bears,srisankethu/coala-bears,damngamerz/coala-bears,damngamerz/coala-bears,Vamshi99/coala-bears,yash-nisar/coala-bears,refeed/coala-bears,ankit01ojha/coala-bears,madhukar01/coala-bears,yash-nisar/coala-bears,ankit01ojha/coala-bears,seblat/coala-bears,Asnelchristian/coala-bears,meetmangukiya/coala-bears,aptrishu/coala-bears,gs0510/coala-bears,refeed/coala-bears,arjunsinghy96/coala-bears,damngamerz/coala-bears,yash-nisar/coala-bears,vijeth-aradhya/coala-bears,madhukar01/coala-bears,shreyans800755/coala-bears,naveentata/coala-bears,Asnelchristian/coala-bears,damngamerz/coala-bears,Shade5/coala-bears,yash-nisar/coala-bears,vijeth-aradhya/coala-bears,kaustubhhiware/coala-bears,kaustubhhiware/coala-bears,arjunsinghy96/coala-bears,coala/coala-bears,Vamshi99/coala-bears,incorrectusername/coala-bears,vijeth-aradhya/coala-bears,vijeth-aradhya/coala-bears,kaustubhhiware/coala-bears,seblat/coala-bears,madhukar01/coala-bears,naveentata/coala-bears,srisankethu/coala-bears,coala/coala-bears,shreyans800755/coala-bears,incorrectusername/coala-bears,coala/coala-bears,shreyans800755/coala-bears,aptrishu/coala-bears,coala-analyzer/coala-bears,vijeth-aradhya/coala-bears,srisankethu/coala-bears,srisankethu/coala-bears,madhukar01/coala-bears,coala/coala-bears,meetmangukiya/coala-bears,refeed/coala-bears,aptrishu/coala-bears,yash-nisar/coala-bears,naveentata/coala-bears,coala/coala-bears,horczech/coala-bears,shreyans800755/coala-bears,yashtrivedi96/coala-bears,Vamshi99/coala-bears,incorrectusername/coala-bears,incorrectusername/coala-bears,horczech/coala-bears,vijeth-aradhya/coala-bears,horczech/coala-bears,aptrishu/coala-bears,refeed/coala-bears,srisankethu/coala-bears,seblat/coala-bears,vijeth-aradhya/coala-bears,refeed/coala-bears,srisankethu/coala-bears,coala/coala-bears,yashtrivedi96/coala-bears,yash-nisar/coala-bears,srisankethu/coala-bears,arjunsinghy96/coala-bears,yashtrivedi96/coala-bears,refeed/coala-bears,naveentata/coala-bears,damngamerz/coala-bears,meetmangukiya/coala-bears,Vamshi99/coala-bears | tests/dart/DartLintBearTest.py | tests/dart/DartLintBearTest.py | from queue import Queue
from bears.dart.DartLintBear import DartLintBear
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
from coalib.testing.LocalBearTestHelper import verify_local_bear
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper
from coalib.testing.BearTestHelper import generate_skip_decorator
good_file = """
printNumber(num aNumber) {
print('The number is $aNumber.');
}
main() {
var answer = 42; // The meaning of life.
printNumber(answer);
}
"""
bad_file = """
printNumber(num aNumber) {
print('The number is $aNumber.')
}
main() {
var answer = 42; // The meaning of life.
printNumber(answer)
}
"""
DartLintBearTest = verify_local_bear(DartLintBear,
valid_files=(good_file,),
invalid_files=(bad_file,),
tempfile_kwargs={'suffix': '.dart'})
@generate_skip_decorator(DartLintBear)
class DartLintBearConfigTest(LocalBearTestHelper):
DART_VALUE_ERROR_RE = ('ValueError: DartLintBear only supports '
'`use_spaces=True` and `indent_size=2`')
def test_config_failure_use_spaces(self):
section = Section('name')
section.append(Setting('use_spaces', False))
bear = DartLintBear(section, Queue())
with self.assertRaisesRegex(AssertionError, self.DART_VALUE_ERROR_RE):
self.check_validity(bear, [], good_file)
def test_config_failure_wrong_indent_size(self):
section = Section('name')
section.append(Setting('indent_size', 3))
bear = DartLintBear(section, Queue())
with self.assertRaisesRegex(AssertionError, self.DART_VALUE_ERROR_RE):
self.check_validity(bear, [], good_file)
| from queue import Queue
from bears.dart.DartLintBear import DartLintBear
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
from coalib.testing.LocalBearTestHelper import verify_local_bear
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper
from coalib.testing.BearTestHelper import generate_skip_decorator
good_file = """
printNumber(num aNumber) {
print('The number is $aNumber.');
}
main() {
var answer = 42; // The meaning of life.
printNumber(answer);
}
"""
bad_file = """
printNumber(num aNumber) {
print('The number is $aNumber.')
}
main() {
var answer = 42; // The meaning of life.
printNumber(answer)
}
"""
DartLintBearTest = verify_local_bear(DartLintBear,
valid_files=(good_file,),
invalid_files=(bad_file,),
tempfile_kwargs={'suffix': '.dart'})
@generate_skip_decorator(DartLintBear)
class DartLintBearConfigTest(LocalBearTestHelper):
def test_config_failure_use_spaces(self):
section = Section('name')
section.append(Setting('use_spaces', False))
bear = DartLintBear(section, Queue())
with self.assertRaises(AssertionError):
self.check_validity(bear, [], good_file)
def test_config_failure_wrong_indent_size(self):
section = Section('name')
section.append(Setting('indent_size', 3))
bear = DartLintBear(section, Queue())
with self.assertRaises(AssertionError):
self.check_validity(bear, [], good_file)
| agpl-3.0 | Python |
2f0681a33cebe5bb676959a5a7fd47296b0199e6 | check for lack of 'version' key for initial full table | singer-io/tap-mysql | tap_mysql/sync_strategies/full_table.py | tap_mysql/sync_strategies/full_table.py | #!/usr/bin/env python3
# pylint: disable=duplicate-code
import copy
import singer
import tap_mysql.sync_strategies.common as common
LOGGER = singer.get_logger()
def sync_table(connection, catalog_entry, state, columns):
bookmark = state.get('bookmarks', {}).get(catalog_entry.tap_stream_id, {})
version_exists = True if 'version' in bookmark else False
stream_version = common.get_stream_version(catalog_entry.tap_stream_id, state)
state = singer.write_bookmark(state,
catalog_entry.tap_stream_id,
'version',
stream_version)
activate_version_message = singer.ActivateVersionMessage(
stream=catalog_entry.stream,
version=stream_version
)
# If there is no prior version for this stream, assume it is the
# very first replication. Emity an ACTIVATE_VERSION message at the
# beginning so the recors show up right away.
if not version_exists:
yield activate_version_message
with connection.cursor() as cursor:
select_sql = common.generate_select_sql(catalog_entry, columns)
params = {}
for message in common.sync_query(cursor,
catalog_entry,
state,
select_sql,
columns,
stream_version,
params):
yield message
yield activate_version_message
| #!/usr/bin/env python3
# pylint: disable=duplicate-code
import copy
import singer
import tap_mysql.sync_strategies.common as common
LOGGER = singer.get_logger()
def sync_table(connection, catalog_entry, state, columns):
bookmark_is_empty = state.get('bookmarks', {}).get(catalog_entry.tap_stream_id) is None
stream_version = common.get_stream_version(catalog_entry.tap_stream_id, state)
state = singer.write_bookmark(state,
catalog_entry.tap_stream_id,
'version',
stream_version)
activate_version_message = singer.ActivateVersionMessage(
stream=catalog_entry.stream,
version=stream_version
)
# If there is no bookmark at all for this stream, assume it is the
# very first replication. Emity an ACTIVATE_VERSION message at the
# beginning so the recors show up right away.
if bookmark_is_empty:
yield activate_version_message
with connection.cursor() as cursor:
select_sql = common.generate_select_sql(catalog_entry, columns)
params = {}
for message in common.sync_query(cursor,
catalog_entry,
state,
select_sql,
columns,
stream_version,
params):
yield message
yield activate_version_message
| agpl-3.0 | Python |
0ed7107cd791dc32edbe70cdd847be44b07cb883 | remove unused import | muhammad-ammar/python-social-auth,chandolia/python-social-auth,alrusdi/python-social-auth,duoduo369/python-social-auth,ononeor12/python-social-auth,lneoe/python-social-auth,MSOpenTech/python-social-auth,clef/python-social-auth,JerzySpendel/python-social-auth,lamby/python-social-auth,wildtetris/python-social-auth,robbiet480/python-social-auth,python-social-auth/social-app-cherrypy,jameslittle/python-social-auth,fearlessspider/python-social-auth,python-social-auth/social-app-django,rsteca/python-social-auth,robbiet480/python-social-auth,mark-adams/python-social-auth,falcon1kr/python-social-auth,tkajtoch/python-social-auth,mathspace/python-social-auth,cmichal/python-social-auth,mrwags/python-social-auth,noodle-learns-programming/python-social-auth,lamby/python-social-auth,SeanHayes/python-social-auth,robbiet480/python-social-auth,rsteca/python-social-auth,Andygmb/python-social-auth,ononeor12/python-social-auth,degs098/python-social-auth,MSOpenTech/python-social-auth,tutumcloud/python-social-auth,mark-adams/python-social-auth,tkajtoch/python-social-auth,jneves/python-social-auth,MSOpenTech/python-social-auth,python-social-auth/social-core,drxos/python-social-auth,ByteInternet/python-social-auth,firstjob/python-social-auth,jameslittle/python-social-auth,jneves/python-social-auth,clef/python-social-auth,jeyraof/python-social-auth,frankier/python-social-auth,drxos/python-social-auth,VishvajitP/python-social-auth,JJediny/python-social-auth,tutumcloud/python-social-auth,frankier/python-social-auth,falcon1kr/python-social-auth,san-mate/python-social-auth,SeanHayes/python-social-auth,ariestiyansyah/python-social-auth,mathspace/python-social-auth,lawrence34/python-social-auth,DhiaEddineSaidi/python-social-auth,nirmalvp/python-social-auth,lawrence34/python-social-auth,mchdks/python-social-auth,daniula/python-social-auth,san-mate/python-social-auth,lneoe/python-social-auth,clef/python-social-auth,firstjob/python-social-auth,iruga090/python-social-auth,mathspace/python-social-auth,bjorand/python-social-auth,S01780/python-social-auth,henocdz/python-social-auth,hsr-ba-fs15-dat/python-social-auth,webjunkie/python-social-auth,daniula/python-social-auth,JerzySpendel/python-social-auth,merutak/python-social-auth,falcon1kr/python-social-auth,barseghyanartur/python-social-auth,DhiaEddineSaidi/python-social-auth,san-mate/python-social-auth,wildtetris/python-social-auth,contracode/python-social-auth,joelstanner/python-social-auth,yprez/python-social-auth,drxos/python-social-auth,contracode/python-social-auth,lneoe/python-social-auth,michael-borisov/python-social-auth,lawrence34/python-social-auth,ariestiyansyah/python-social-auth,S01780/python-social-auth,ononeor12/python-social-auth,duoduo369/python-social-auth,daniula/python-social-auth,msampathkumar/python-social-auth,python-social-auth/social-storage-sqlalchemy,contracode/python-social-auth,degs098/python-social-auth,cjltsod/python-social-auth,chandolia/python-social-auth,nirmalvp/python-social-auth,nirmalvp/python-social-auth,jneves/python-social-auth,chandolia/python-social-auth,merutak/python-social-auth,henocdz/python-social-auth,tkajtoch/python-social-auth,muhammad-ammar/python-social-auth,ByteInternet/python-social-auth,python-social-auth/social-app-django,JJediny/python-social-auth,hsr-ba-fs15-dat/python-social-auth,msampathkumar/python-social-auth,alrusdi/python-social-auth,yprez/python-social-auth,henocdz/python-social-auth,joelstanner/python-social-auth,python-social-auth/social-app-django,garrett-schlesinger/python-social-auth,iruga090/python-social-auth,DhiaEddineSaidi/python-social-auth,cmichal/python-social-auth,mrwags/python-social-auth,JerzySpendel/python-social-auth,webjunkie/python-social-auth,jameslittle/python-social-auth,degs098/python-social-auth,wildtetris/python-social-auth,iruga090/python-social-auth,cmichal/python-social-auth,rsteca/python-social-auth,merutak/python-social-auth,mchdks/python-social-auth,lamby/python-social-auth,fearlessspider/python-social-auth,rsalmaso/python-social-auth,S01780/python-social-auth,garrett-schlesinger/python-social-auth,muhammad-ammar/python-social-auth,mark-adams/python-social-auth,webjunkie/python-social-auth,jeyraof/python-social-auth,VishvajitP/python-social-auth,JJediny/python-social-auth,mchdks/python-social-auth,noodle-learns-programming/python-social-auth,joelstanner/python-social-auth,msampathkumar/python-social-auth,michael-borisov/python-social-auth,michael-borisov/python-social-auth,VishvajitP/python-social-auth,alrusdi/python-social-auth,python-social-auth/social-docs,Andygmb/python-social-auth,yprez/python-social-auth,mrwags/python-social-auth,bjorand/python-social-auth,barseghyanartur/python-social-auth,rsalmaso/python-social-auth,ByteInternet/python-social-auth,noodle-learns-programming/python-social-auth,python-social-auth/social-core,hsr-ba-fs15-dat/python-social-auth,bjorand/python-social-auth,cjltsod/python-social-auth,jeyraof/python-social-auth,Andygmb/python-social-auth,fearlessspider/python-social-auth,firstjob/python-social-auth,tobias47n9e/social-core,ariestiyansyah/python-social-auth,barseghyanartur/python-social-auth | social/backends/taobao.py | social/backends/taobao.py | import json
from social.exceptions import AuthFailed
from social.backends.oauth import BaseOAuth2
# taobao OAuth base configuration
TAOBAO_OAUTH_HOST = 'oauth.taobao.com'
# TAOBAO_OAUTH_ROOT = 'authorize'
#Always use secure connection
TAOBAO_OAUTH_AUTHORIZATION_URL = 'https://%s/authorize' % (TAOBAO_OAUTH_HOST)
TAOBAO_OAUTH_ACCESS_TOKEN_URL = 'https://%s/token' % (TAOBAO_OAUTH_HOST)
TAOBAO_CHECK_AUTH = 'https://eco.taobao.com/router/rest'
class TAOBAOAuth(BaseOAuth2):
"""Taobao OAuth authentication mechanism"""
name="taobao"
ID_KEY='taobao_user_id'
AUTHORIZATION_URL = TAOBAO_OAUTH_AUTHORIZATION_URL
ACCESS_TOKEN_URL = TAOBAO_OAUTH_ACCESS_TOKEN_URL
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
params = {'method':'taobao.user.get',
'fomate':'json',
'v':'2.0',
'access_token': access_token}
try:
return self.get_json(TAOBAO_CHECK_AUTH, params=params)
except ValueError:
return None
def get_user_details(self, response):
"""Return user details from Taobao account"""
username = response.get('taobao_user_nick')
return {'username': username}
# Backend definition
BACKENDS = {
'taobao': TAOBAOAuth,
}
| import urllib2,urllib
from urllib2 import Request, urlopen, HTTPError
from urllib import urlencode
from urlparse import urlsplit
import json
from social.exceptions import AuthFailed
from social.backends.oauth import BaseOAuth2
# taobao OAuth base configuration
TAOBAO_OAUTH_HOST = 'oauth.taobao.com'
# TAOBAO_OAUTH_ROOT = 'authorize'
#Always use secure connection
TAOBAO_OAUTH_AUTHORIZATION_URL = 'https://%s/authorize' % (TAOBAO_OAUTH_HOST)
TAOBAO_OAUTH_ACCESS_TOKEN_URL = 'https://%s/token' % (TAOBAO_OAUTH_HOST)
TAOBAO_CHECK_AUTH = 'https://eco.taobao.com/router/rest'
class TAOBAOAuth(BaseOAuth2):
"""Taobao OAuth authentication mechanism"""
name="taobao"
ID_KEY='taobao_user_id'
AUTHORIZATION_URL = TAOBAO_OAUTH_AUTHORIZATION_URL
ACCESS_TOKEN_URL = TAOBAO_OAUTH_ACCESS_TOKEN_URL
def user_data(self, access_token, *args, **kwargs):
"""Return user data provided"""
params = {'method':'taobao.user.get',
'fomate':'json',
'v':'2.0',
'access_token': access_token}
try:
return self.get_json(TAOBAO_CHECK_AUTH, params=params)
except ValueError:
return None
def get_user_details(self, response):
"""Return user details from Taobao account"""
username = response.get('taobao_user_nick')
return {'username': username}
# Backend definition
BACKENDS = {
'taobao': TAOBAOAuth,
}
| bsd-3-clause | Python |
d24fb97341a2c87f743a98a5c771f81b6436f563 | Add an announcement command | MaT1g3R/YasenBaka | cogs/owner_only.py | cogs/owner_only.py | from discord import Forbidden, TextChannel
from discord.embeds import Embed
from discord.ext.commands import Context, command
from bot import Yasen
from scripts.checks import is_owner
class OnwerOnly:
__slots__ = ('bot',)
def __init__(self, bot: Yasen):
self.bot = bot
async def __local_check(self, ctx: Context):
return await is_owner(ctx)
@command()
async def announce(self, ctx: Context, *msg):
"""
Send an announcement to every guild.
This is hidden in the help message
"""
res = Embed(
colour=self.bot.config.colour,
title='This is an announcement from my developer.',
description=msg
)
await send_anncoucements(self.bot, res)
async def send_anncoucements(bot: Yasen, embed: Embed):
for guild in bot.guilds:
for ch in sorted(guild.channels, key=announce_key):
if not isinstance(ch, TextChannel):
continue
try:
await ch.send(embed=embed)
break
except Forbidden:
continue
def announce_key(channel):
if not isinstance(channel, TextChannel):
return 5
if channel.is_nsfw():
return 4
name = channel.name.lower()
if 'announce' in name:
return 0
if 'general' in name:
return 1
if 'bot' in name:
return 2
return 3
| from discord.ext.commands import Context
from bot import Yasen
from scripts.checks import is_owner
class OnwerOnly:
__slots__ = ('bot',)
def __init__(self, bot: Yasen):
self.bot = bot
async def __local_check(self, ctx: Context):
return await is_owner(ctx)
| apache-2.0 | Python |
ef05da1272413645c5b1b37b82aefd5f143645fd | Add cdict converter. | nens/colormaps | colormaps/utils.py | colormaps/utils.py | # -*- coding: utf-8 -*-
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import numpy as np
def cdict2config(cdict):
"""
Return dictionary suitable to use as kwargs on a GradientColormap.
Read the floats
Determine the stops (it is a set)
Determine interpolators for all components
That involves duplicating intermediate stops
Determine rgba values at missing stops using interpolation
Return stops and colors
"""
# stops
values = sorted(set(d[0] for v in cdict.values() for d in v))
# interpolators per color
interps = {}
for color, data in cdict.items():
x = []
y = []
for i in range(len(data) - 1):
a = data[i]
b = data[i + 1]
x.extend([a[0], b[0]])
y.extend([a[2], b[1]])
interps[color] = np.array(x), np.array(y)
# create data
index = {'red': 0, 'green': 1, 'blue': 2, 'alpha': 3}
floats = np.ones((len(values), 4))
for color, interp in interps.items():
floats[:, index[color]] = np.interp(values, *interp)
integers = (255 * floats).astype('u1')
return {'type': 'GradientColormap',
'data': zip(values, integers.tolist())}
def save(cdict, name):
""" Save a raster-server specific colormap file. """
data = cdict2config(cdict)['data']
template = ' [{v:.3f}, [{r:3}, {g:3}, {b:3}, {a}]]{c}\n'
with open('{name}.json'.format(name=name), 'w') as target:
target.write('{\n "type": "GradientColormap",\n "data": [\n')
for v, (r, g, b, a) in data[:-1]:
target.write(template.format(v=v, r=r, g=g, b=b, a=a, c=','))
v, (r, g, b, a) = data[-1]
target.write(template.format(v=v, r=r, g=g, b=b, a=a, c=''))
target.write(' ]\n}')
| # -*- coding: utf-8 -*-
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
# import numpy as np
def cdict2kwargs(cdict):
"""
Return dictionary suitable to use as kwargs on a GradientColormap.
Read the floats
Determine the stops (it is a set)
Determine interpolators for all components
That involves duplicating intermediate stops
Determine rgba values at missing stops using interpolation
Return stops and colors
"""
# r = cdict['red']
# g = cdict['green']
# b = cdict['blue'],
# a = cdict.get('alpha', [(0, None, 1), (1, 1, None)])
# result = [r, g, b, a, np]
# return result
| mit | Python |
eac2211956d49d9da957492bbac1bcdc85b1e40d | Add extra development data so the All England page loads | ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,ebmdatalab/openprescribing,annapowellsmith/openpresc,annapowellsmith/openpresc,annapowellsmith/openpresc,ebmdatalab/openprescribing | openprescribing/frontend/management/commands/load_development_data.py | openprescribing/frontend/management/commands/load_development_data.py | from django.core.management import call_command
from django.core.management.base import BaseCommand
from frontend.models import ImportLog, PPUSaving
from frontend.tests.test_api_spending import ApiTestBase, TestAPISpendingViewsPPUTable
class Command(BaseCommand):
help = 'Loads sample data intended for use in local development'
def handle(self, *args, **options):
# For now we just piggyback off the set of test fixtures used by the
# API tests
fixtures = TestAPISpendingViewsPPUTable.fixtures
call_command('loaddata', *fixtures)
ApiTestBase.setUpTestData()
max_ppu_date = PPUSaving.objects.order_by('-date')[0].date
ImportLog.objects.create(current_at=max_ppu_date, category='ppu')
| from django.core.management import call_command
from django.core.management.base import BaseCommand
from frontend.tests.test_api_spending import TestAPISpendingViewsPPUTable
class Command(BaseCommand):
help = 'Loads sample data intended for use in local development'
def handle(self, *args, **options):
# For now we just piggyback off the set of test fixtures used by the
# API tests
fixtures = TestAPISpendingViewsPPUTable.fixtures
call_command('loaddata', *fixtures)
| mit | Python |
0f7b9a9e3cead1bf5695c6fb601c2b9a7aff974f | remove whitespace | OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft | src/syft/core/common/object.py | src/syft/core/common/object.py | # external class/method imports (sorted by length)
from ...proto.core.common.common_object_pb2 import ObjectWithID as ObjectWithID_PB
# syft imports (sorted by length)
from .serializable import Serializable
from .uid import UID
class AbstractObjectWithID(Serializable):
"""This exists to allow us to typecheck on the ObjectWithId object
because we need a type which has already been initialized in
order to add it as a type hint on the ObjectWithId object.
"""
class ObjectWithID(AbstractObjectWithID):
"""This object is the superclass for nearly all Syft objects. Subclassing
from this object will cause an object to be initialized with a unique id
using the process specified in the UID class.
.. note::
At the time of writing, the only class in Syft which doesn't have an ID
of some kind is the Client class because it's job is to point to another
object (which has an ID).
"""
def __init__(self, id: UID = None, as_wrapper: bool = False):
"""This initializer only exists to set the id attribute, which is the
primary purpose of this class. It also sets the 'as_wrapper' flag
for the 'Serializable' superclass.
:param id: an override which can be used to set an ID for this object
manually. This is probably only used for deserialization.
:type id: UID
:param as_wrapper: this flag determines whether the subclass can also
be used as a wrapper class around a non-syft object. For details on
why, see :py:mod:`syft.core.common.serializable.Serializable`.
"""
# while this class is never used as a simple wrapper,
# it's possible that sub-classes of this class will be.
super().__init__(as_wrapper=as_wrapper)
if id is None:
id = UID()
self.id = id
def serialize(self):
return ObjectWithID_PB(id=self.id.serialize())
@staticmethod
def deserialize(proto_obj: ObjectWithID_PB) -> "ObjectWithID":
return ObjectWithID(id=UID.deserialize(proto_obj.id))
| # external class/method imports (sorted by length)
from ...proto.core.common.common_object_pb2 import ObjectWithID as ObjectWithID_PB
# syft imports (sorted by length)
from .serializable import Serializable
from .uid import UID
class AbstractObjectWithID(Serializable):
"""This exists to allow us to typecheck on the ObjectWithId object
because we need a type which has already been initialized in
order to add it as a type hint on the ObjectWithId object.
"""
class ObjectWithID(AbstractObjectWithID):
"""This object is the superclass for nearly all Syft objects. Subclassing
from this object will cause an object to be initialized with a unique id
using the process specified in the UID class.
.. note::
At the time of writing, the only class in Syft which doesn't have an ID
of some kind is the Client class because it's job is to point to another
object (which has an ID).
"""
def __init__(self, id: UID = None, as_wrapper: bool = False):
"""This initializer only exists to set the id attribute, which is the
primary purpose of this class. It also sets the 'as_wrapper' flag
for the 'Serializable' superclass.
:param id: an override which can be used to set an ID for this object
manually. This is probably only used for deserialization.
:type id: UID
:param as_wrapper: this flag determines whether the subclass can also
be used as a wrapper class around a non-syft object. For details on
why, see :py:mod:`syft.core.common.serializable.Serializable`.
"""
# while this class is never used as a simple wrapper,
# it's possible that sub-classes of this class will be.
super().__init__(as_wrapper=as_wrapper)
if id is None:
id = UID()
self.id = id
def serialize(self):
return ObjectWithID_PB(id=self.id.serialize())
@staticmethod
def deserialize(proto_obj: ObjectWithID_PB) -> "ObjectWithID":
return ObjectWithID(id=UID.deserialize(proto_obj.id))
| apache-2.0 | Python |
accebd9321bcbd2404fa19ba4b584b622cc4d95b | Fix test name | titipata/pubmed_parser | tests/test_pubmed_oa_parser.py | tests/test_pubmed_oa_parser.py | import os
import pytest
import pubmed_parser as pp
def test_parse_pubmed_xml():
"""
Test parsing metadata from a PubMed XML file
"""
parsed_xml = pp.parse_pubmed_xml(os.path.join("data", "pone.0046493.nxml"))
assert isinstance(parsed_xml, dict)
assert len(parsed_xml.get("abstract")) > 0
assert len(parsed_xml.get("full_title")) > 0
assert parsed_xml.get("pmc") == "3460867"
assert parsed_xml.get("doi") == "10.1371/journal.pone.0046493"
def test_parse_pubmed_paragraph():
"""
Test parsing captions and figure ID from a PubMed XML file
"""
paragraphs = pp.parse_pubmed_paragraph(os.path.join("data", "pone.0046493.nxml"))
assert isinstance(paragraphs, list)
assert isinstance(paragraphs[0], dict)
assert len(paragraphs) == 29, "Expected number of paragraphs to be 29"
assert (
len(paragraphs[0]["reference_ids"]) == 11
), "Expected number of references in the first paragraph to be 11"
def test_parse_pubmed_references():
"""
Test parsing references from a PubMed XML file
"""
references = pp.parse_pubmed_references(os.path.join("data", "pone.0046493.nxml"))
assert isinstance(references, list)
assert isinstance(references[0], dict)
assert len(references) == 58, "Expected references to have length of 29"
def test_parse_pubmed_caption():
"""
Test parsing captions and figure ID from a PubMed XML file
"""
captions = pp.parse_pubmed_caption(os.path.join("data", "pone.0046493.nxml"))
assert isinstance(captions, list)
assert isinstance(captions[0], dict)
assert (
len(captions) == 4
), "Expected number of figures/captions to have a length of 4"
| import os
import pytest
import pubmed_parser as pp
def test_parse_pubmed_xml():
"""
Test parse captions and figure ID from an XML file
"""
parsed_xml = pp.parse_pubmed_xml(os.path.join("data", "pone.0046493.nxml"))
assert isinstance(parsed_xml, dict)
assert len(parsed_xml.get("abstract")) > 0
assert len(parsed_xml.get("full_title")) > 0
assert parsed_xml.get("pmc") == "3460867"
assert parsed_xml.get("doi") == "10.1371/journal.pone.0046493"
def test_parse_pubmed_caption():
"""
Test parsing captions and figure ID from a PubMed XML file
"""
paragraphs = pp.parse_pubmed_paragraph(os.path.join("data", "pone.0046493.nxml"))
assert isinstance(paragraphs, list)
assert isinstance(paragraphs[0], dict)
assert len(paragraphs) == 29, "Expected number of paragraphs to be 29"
assert (
len(paragraphs[0]["reference_ids"]) == 11
), "Expected number of references in the first paragraph to be 11"
def test_parse_pubmed_references():
"""
Test parsing references from a PubMed XML file
"""
references = pp.parse_pubmed_references(os.path.join("data", "pone.0046493.nxml"))
assert isinstance(references, list)
assert isinstance(references[0], dict)
assert len(references) == 58, "Expected references to have length of 29"
def test_parse_pubmed_():
"""
Test parsing figures and captions from a PubMed XML file
"""
captions = pp.parse_pubmed_caption(os.path.join("data", "pone.0046493.nxml"))
assert isinstance(captions, list)
assert isinstance(captions[0], dict)
assert (
len(captions) == 4
), "Expected number of figures/captions to have a length of 4"
| mit | Python |
4043c7d72cd15c50763c40e4e3669c7967bc8a3d | Split test into two | zhouyuan/teuthology,michaelsevilla/teuthology,SUSE/teuthology,dreamhost/teuthology,t-miyamae/teuthology,zhouyuan/teuthology,ceph/teuthology,michaelsevilla/teuthology,yghannam/teuthology,dmick/teuthology,ivotron/teuthology,dmick/teuthology,ktdreyer/teuthology,robbat2/teuthology,t-miyamae/teuthology,yghannam/teuthology,tchaikov/teuthology,robbat2/teuthology,caibo2014/teuthology,ivotron/teuthology,SUSE/teuthology,dreamhost/teuthology,SUSE/teuthology,dmick/teuthology,ceph/teuthology,ktdreyer/teuthology,caibo2014/teuthology,tchaikov/teuthology | teuthology/test/test_config.py | teuthology/test/test_config.py | from .. import config
class TestConfig(object):
def test_get_ceph_git_base_default(self):
conf_obj = config.Config()
conf_obj.teuthology_yaml = ''
conf_obj.load_files()
assert conf_obj.ceph_git_base_url == "https://github.com/ceph/"
def test_set_ceph_git_base_via_private(self):
conf_obj = config.Config()
conf_obj._Config__conf['ceph_git_base_url'] = "git://ceph.com/"
assert conf_obj.ceph_git_base_url == "git://ceph.com/"
| from .. import config
class TestConfig(object):
def setup(self):
pass
def teardown(self):
pass
def test_get_and_set(self):
conf_obj = config.Config()
conf_obj.teuthology_yaml = ''
conf_obj.load_files()
assert conf_obj.ceph_git_base_url == "https://github.com/ceph/"
conf_obj._Config__conf['ceph_git_base_url'] = "git://ceph.com/"
assert conf_obj.ceph_git_base_url == "git://ceph.com/"
| mit | Python |
3e8e18354dde1681e10d8926f1902688464f071e | mark tg_pos_enhanced hidden | it-projects-llc/pos-addons,it-projects-llc/pos-addons,it-projects-llc/pos-addons | tg_pos_enhanced/__openerp__.py | tg_pos_enhanced/__openerp__.py | # -*- coding: utf-8 -*-
{
'name': 'TG POS enhanced',
'version': '1.0.0',
'category': 'Hidden',
'author': 'Thierry Godin',
'summary': 'POS modifications',
'description': """
It's a fork of TG_POS_ENHANCED module. It was not publish on github by author
Related links:
* http://thierry-godin.developpez.com/openerp/openerp-module-pos-enhanced-en/
* https://www.odoo.com/forum/Help-1/question/POS-Enhanced---with-screenshots--dl-link-40364
Numerous modifications of the Point Of Sale :
=============================================
- Customer pannel
- Editing / adding customers / link to sponsor
- Intuitive browsing for customers (right pan) - by letter
- Browse customer sales history : see its sales and all its products
- Cashier pannel at bottom-left
- POS internal messaging (instant, delayed, recurrent / text or image)
- Special discount : shop manager can allow special discount by entering a password
- Auto print option
""",
'depends': ["base", "account", "point_of_sale", "tg_partner_firstname", "tg_pos_message"],
'data': [
'tg_partner_view.xml',
'security/tg_cashiers_security.xml',
'security/ir.model.access.csv',
'tg_cashiers_view.xml',
'tg_order_view.xml',
'tg_users_view.xml',
'tg_pos_config_view.xml',
'tg_data.xml',
],
'qweb': [
'static/src/xml/tg_pos.xml',
],
#'js': [
# 'static/src/js/tg_pos.js',
#],
#'css':[
# 'static/src/css/tg_pos.css',
#],
'installable': True,
'application': False,
'auto_install': False,
}
| # -*- coding: utf-8 -*-
{
'name': 'TG POS enhanced',
'version': '1.0.0',
'category': 'Point Of Sale',
'author': 'Thierry Godin',
'summary': 'POS modifications',
'description': """
It's a fork of TG_POS_ENHANCED module. It was not publish on github by author
Related links:
* http://thierry-godin.developpez.com/openerp/openerp-module-pos-enhanced-en/
* https://www.odoo.com/forum/Help-1/question/POS-Enhanced---with-screenshots--dl-link-40364
Numerous modifications of the Point Of Sale :
=============================================
- Customer pannel
- Editing / adding customers / link to sponsor
- Intuitive browsing for customers (right pan) - by letter
- Browse customer sales history : see its sales and all its products
- Cashier pannel at bottom-left
- POS internal messaging (instant, delayed, recurrent / text or image)
- Special discount : shop manager can allow special discount by entering a password
- Auto print option
""",
'depends': ["base", "account", "point_of_sale", "tg_partner_firstname", "tg_pos_message"],
'data': [
'tg_partner_view.xml',
'security/tg_cashiers_security.xml',
'security/ir.model.access.csv',
'tg_cashiers_view.xml',
'tg_order_view.xml',
'tg_users_view.xml',
'tg_pos_config_view.xml',
'tg_data.xml',
],
'qweb': [
'static/src/xml/tg_pos.xml',
],
#'js': [
# 'static/src/js/tg_pos.js',
#],
#'css':[
# 'static/src/css/tg_pos.css',
#],
'installable': True,
'application': False,
'auto_install': False,
}
| mit | Python |
2efcd8dad6f3008660b47c0f216fc2fdcbdd80a6 | Integrate LLVM at llvm/llvm-project@9c8f888f5fca | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "9c8f888f5fcafe8a9b69e7d652dc7328612f8ec6"
LLVM_SHA256 = "979bfd8651c02ef211059f59e54773ca1d1f25961f4932f1bdc11f8c8e6ecccf"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:build_defs.bzl": "mlir/build_defs.bzl",
"//third_party/mlir:linalggen.bzl": "mlir/linalggen.bzl",
"//third_party/mlir:tblgen.bzl": "mlir/tblgen.bzl",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "970129a2ddbef1f4f21977fa364248797576fa5a"
LLVM_SHA256 = "c98ac72ae99cdd7cbd09f1ce87407a3d1229f01a1e698f1682ab4cc802fd4251"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:build_defs.bzl": "mlir/build_defs.bzl",
"//third_party/mlir:linalggen.bzl": "mlir/linalggen.bzl",
"//third_party/mlir:tblgen.bzl": "mlir/tblgen.bzl",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| apache-2.0 | Python |
c63d1fad13dcbfd9d75e4dc80fced716ce784934 | Integrate LLVM at llvm/llvm-project@4bb60c285cb3 | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "4bb60c285cb3090e5fa91f585714a63618e08b4f"
LLVM_SHA256 = "b0cb96c73cf7186e69069a773dc789a05b3f647a3e84d55c70991b9f4be6a141"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "9430efa18b02e7a3f453793e48c96d5c954ed751"
LLVM_SHA256 = "9000f71135ac5c520c269823b493f05a45098dbbe2230b3562e826bdcca766f7"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| apache-2.0 | Python |
0e02b9f622c5d6d1dd5ef006093e43b337c936da | Integrate LLVM at llvm/llvm-project@dbed14d215fe | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "dbed14d215fed740e0e26784e7b8b00b68f5e680"
LLVM_SHA256 = "0cbcfb2c0fa145f1cf54d3c53752a9cb22521fad34c304d3e1d8e59d33384118"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "ffe8720aa060d66297500f30bb8ad02114e40326"
LLVM_SHA256 = "40cd1f69f314f1ce5ab520333b58993181524cd580b58f3ceb68c4a81ffa7ebe"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| apache-2.0 | Python |
78369d47265d78af9e171e85825343006311a054 | Integrate LLVM at llvm/llvm-project@ff1374785f82 | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "ff1374785f82c2c184df7ca09a923acf62e4c277"
LLVM_SHA256 = "01d45f9a6ab6dc44ff1dad953ebace1b9819867749a7767c777485bb3199e844"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "0ecbb683a2faf0ec5bcb9eb472ebd9921cbe683a"
LLVM_SHA256 = "a4a9d16986411836e09ab6d6cf8b41ea2956fbce7000392feb6187c08f6725ae"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| apache-2.0 | Python |
713d0a113371744219e4f10dbd1575aa224e54c8 | Bump development version | lpomfrey/django-taggit-machinetags | taggit_machinetags/__init__.py | taggit_machinetags/__init__.py | # -*- coding: utf-8 -*-
from distutils import version
__version__ = '0.7.1'
version_info = version.StrictVersion(__version__).version
| # -*- coding: utf-8 -*-
from distutils import version
__version__ = '0.7.0'
version_info = version.StrictVersion(__version__).version
| bsd-2-clause | Python |
501fa785e5bbcb98c476a81c07acb9ee6ea9d386 | add filter functionality and move loging to process_change | qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq | corehq/apps/change_feed/consumer/pillow.py | corehq/apps/change_feed/consumer/pillow.py | from casexml.apps.case.models import CommCareCase
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed
from pillowtop.checkpoints.manager import PillowCheckpoint, PillowCheckpointEventHandler, \
get_django_checkpoint_store
from pillowtop.couchdb import CachedCouchDB
from pillowtop.listener import PythonPillow
from pillowtop.logger import pillow_logging
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processor import NoopProcessor
# this number intentionally left high to avoid many redundant saves while this pillow is still
# in experimental stage
KAFKA_CHECKPOINT_FREQUENCY = 1000
class LoggingPythonPillow(PythonPillow):
def __init__(self, couch_db, checkpoint, change_feed, python_filter=None):
super(LoggingPythonPillow, self).__init__(
couch_db=couch_db, checkpoint=checkpoint, change_feed=change_feed, preload_docs=False
)
self._python_filter = python_filter
self._changes_processed = 0
def python_filter(self, change):
if self._python_filter is not None:
return self._python_filter(change)
def process_change(self, change, is_retry_attempt=False):
# do nothing
if self._changes_processed % KAFKA_CHECKPOINT_FREQUENCY == 0:
# only log a small amount to avoid clogging up supervisor
pillow_logging.info('Processed change {}: {}'.format(self._changes_processed, change))
self._changes_processed += 1
def get_demo_case_consumer_pillow():
checkpoint = PillowCheckpoint(
get_django_checkpoint_store(),
'kafka-demo-case-pillow-checkpoint',
)
return ConstructedPillow(
name='KafkaCaseConsumerPillow',
document_store=None,
checkpoint=checkpoint,
change_feed=KafkaChangeFeed(topic=topics.CASE, group_id='demo-case-group'),
processor=NoopProcessor(),
change_processed_event_handler=PillowCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=KAFKA_CHECKPOINT_FREQUENCY,
),
)
def get_demo_python_pillow_consumer():
checkpoint = PillowCheckpoint(
get_django_checkpoint_store(),
'kafka-demo-python-pillow-checkpoint',
)
def arbitrary_filter(change):
# just to prove that filters work - only process data from domains starting with
# letters between "b" and "f"
return 'b' < change.metadata.domain < 'f'
return LoggingPythonPillow(
couch_db=CachedCouchDB(CommCareCase.get_db().uri, readonly=False),
checkpoint=checkpoint,
change_feed=KafkaChangeFeed(topic=topics.CASE, group_id='demo-python-pillow-group'),
python_filter=arbitrary_filter,
)
| from casexml.apps.case.models import CommCareCase
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import KafkaChangeFeed
from pillowtop.checkpoints.manager import PillowCheckpoint, PillowCheckpointEventHandler, \
get_django_checkpoint_store
from pillowtop.couchdb import CachedCouchDB
from pillowtop.listener import PythonPillow
from pillowtop.logger import pillow_logging
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processor import NoopProcessor
# this number intentionally left high to avoid many redundant saves while this pillow is still
# in experimental stage
KAFKA_CHECKPOINT_FREQUENCY = 1000
class LoggingPythonPillow(PythonPillow):
def process_change(self, change, is_retry_attempt=False):
# do nothing
pass
def fire_change_processed_event(self, change, context):
# todo: when PythonPillow moves checkpoints to this function this class will
# need to use some other mechanism for this or reimplement checkpointing
if context.changes_seen % KAFKA_CHECKPOINT_FREQUENCY == 0:
# only log a small amount to avoid clogging up supervisor
pillow_logging.info('Processed change {}: {}'.format(context.changes_seen, change))
def get_demo_case_consumer_pillow():
checkpoint = PillowCheckpoint(
get_django_checkpoint_store(),
'kafka-demo-case-pillow-checkpoint',
)
return ConstructedPillow(
name='KafkaCaseConsumerPillow',
document_store=None,
checkpoint=checkpoint,
change_feed=KafkaChangeFeed(topic=topics.CASE, group_id='demo-case-group'),
processor=NoopProcessor(),
change_processed_event_handler=PillowCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=KAFKA_CHECKPOINT_FREQUENCY,
),
)
def get_demo_python_pillow_consumer():
checkpoint = PillowCheckpoint(
get_django_checkpoint_store(),
'kafka-demo-python-pillow-checkpoint',
)
return LoggingPythonPillow(
couch_db=CachedCouchDB(CommCareCase.get_db().uri, readonly=False),
checkpoint=checkpoint,
change_feed=KafkaChangeFeed(topic=topics.CASE, group_id='demo-python-pillow-group'),
preload_docs=False,
)
| bsd-3-clause | Python |
54062a3abdc1516fe854a7b3b8ab86e74364f8cf | update test for min_zoom | mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource | integration-test/1695-turning-circles-turning-loops.py | integration-test/1695-turning-circles-turning-loops.py | # -*- encoding: utf-8 -*-
import dsl
from . import FixtureTest
# test turning circles and loops
class TurningCirclesAndLoops(FixtureTest):
def turning_circle(self):
self.generate_fixtures(
dsl.point(106186562, (-0.3544854, 51.5785667),
{u'source': u'openstreetmap.org',
u'highway': u'turning_circle'}))
self.assert_has_feature(
16, 32703, 21771, 'pois',
{'id': 106186562, 'kind': 'turning_circle', 'min_zoom': 17})
def turning_loop(self):
self.generate_fixtures(
dsl.point(4260010359, (8.43452, 49.4596352),
{u'source': u'openstreetmap.org',
u'highway': u'turning_loop'})
self.assert_has_feature(
16, 34303, 22378, 'pois',
{'id': 4260010359, 'kind': 'turning_loop', 'min_zoom': 17})
| # -*- encoding: utf-8 -*-
import dsl
from . import FixtureTest
# test turning circles and loops
class TurningCirclesAndLoops(FixtureTest):
def turning_circle(self):
self.generate_fixtures(
dsl.point(106186562, (-0.3544854, 51.5785667),
{u'source': u'openstreetmap.org',
u'highway': u'turning_circle'}))
self.assert_has_feature(
16, 32703, 21771, 'pois',
{'id': 106186562, 'kind': 'turning_circle'})
def turning_loop(self):
self.generate_fixtures(
dsl.point(4260010359, (8.43452, 49.4596352),
{u'source': u'openstreetmap.org',
u'highway': u'turning_loop'}))
self.assert_has_feature(
16, 34303, 22378, 'pois',
{'id': 4260010359, 'kind': 'turning_loop'})
| mit | Python |
0a2cdcfc4f9409e586290aff06d27d848fd46fe7 | Integrate LLVM at llvm/llvm-project@9b81d2fae8c0 | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "9b81d2fae8c05ea611578137101e46a7b61fbaed"
LLVM_SHA256 = "68cbb89c244c4db566181d37b465e84dd1676f5f391a5049ab3a12907857b077"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "64b918852c09db6461d4c4c97df288c7b7fee865"
LLVM_SHA256 = "a0bca89fbbdfd08c258d98f02ba685690938a46b9a6568017904a4a606abad47"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
| apache-2.0 | Python |
d79d726a0e7748f7e63f18592c5de776d4bb0982 | Integrate LLVM at llvm/llvm-project@0277a24f4bba | tensorflow/runtime,tensorflow/runtime,tensorflow/runtime,tensorflow/runtime | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "0277a24f4bbac284ba7a2ace7eeefdf6305e7f69"
LLVM_SHA256 = "f14a9666c78a7829e06ad8d1b6dc86f1f4a9a092eb0b9708f14a5e9e8d4f566f"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "bfb9c749c02402ba082e81bfdadb15fb331c59ae"
LLVM_SHA256 = "ccf8590120b44d2a0d8fb960f4009935ab1cee72bb3c5d1314d75bfd03915f92"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
| apache-2.0 | Python |
15c540dd5934e5e49fbf6d142bfe66415117ad80 | Update raw.py | jlanga/smsk_popoolation,jlanga/smsk_popoolation,jlanga/smsk_popoolation,jlanga/smsk_popoolation,jlanga/smsk_popoolation | src/snakefiles/raw.py | src/snakefiles/raw.py | rule raw_make_links_pe_sample:
"""
Make a link to the original file, with a prettier name than default.
"""
input:
forward= lambda wildcards: config["samples_pe"][wildcards.sample][wildcards.library]["forward"],
reverse= lambda wildcards: config["samples_pe"][wildcards.sample][wildcards.library]["reverse"]
output:
forward= RAW + "{sample}/{library}_1.fq.gz",
reverse= RAW + "{sample}/{library}_2.fq.gz"
shell:
"ln --symbolic $(readlink --canonicalize {input.forward}) {output.forward}; "
"ln --symbolic $(readlink --canonicalize {input.reverse}) {output.reverse}"
rule raw_make_links_se_sample:
"""
Make a link to the original file, with a prettier name than default.
"""
input:
single= lambda wildcards: config["samples_se"][wildcards.sample][wildcards.library]["single"],
output:
single= RAW + "{sample}/{library}_se.fq.gz"
shell:
"ln --symbolic $(readlink --canonicalize {input.single}) {output.single}"
rule raw_extract_genome:
"""
Extract the fasta.gz on config.yaml into genome.fa
"""
input:
fa_gz = config["reference"]["dna"]
output:
fa = RAW + "genome.fa"
log: RAW + "genome.log"
benchmark: RAW + "genome.json"
shell:
"pigz "
"--decompress "
"--stdout "
"{input.fa_gz} "
"> {output.fa} "
"2> {log}"
| rule raw_make_links_pe_sample:
"""
Make a link to the original file, with a prettier name than default.
"""
input:
forward= lambda wildcards: config["samples_pe"][wildcards.sample][wildcards.library]["forward"],
reverse= lambda wildcards: config["samples_pe"][wildcards.sample][wildcards.library]["reverse"]
output:
forward= RAW + "{sample}/{library}_1.fq.gz",
reverse= RAW + "{sample}/{library}_2.fq.gz"
shell:
"ln --symbolic $(readlink --canonicalize {input.forward}) {output.forward}; "
"ln --symbolic $(readlink --canonicalize {input.reverse}) {output.reverse}"
rule raw_make_links_se_sample:
"""
Make a link to the original file, with a prettier name than default.
"""
input:
single= lambda wildcards: config["samples_se"][wildcards.sample][wildcards.library]["single"],
output:
single= RAW + "{sample}/{library}_se.fq.gz"
shell:
"ln --symbolic $(readlink --canonicalize {input.single}) {output.single}"
rule raw_extract_genome:
"""
Extract the fasta.gz on config.yaml into genome.fa
"""
input:
fa_gz = config["reference"]["dna"]
output:
fa = RAW + "genome.fa"
log: RAW + "genome.log"
benchmark: RAW + "genome.json"
shell:
"pigz "
"--decompress "
"--stdout "
"{input.fa_gz} "
"> {output.fa} "
"2> {log}"
| mit | Python |
40f7f49a9ff91966da9157f2305ba04f725106fc | Fix spelling typo | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | custom/enikshay/integrations/bets/const.py | custom/enikshay/integrations/bets/const.py | # BETS event names
SUCCESSFUL_TREATMENT_EVENT = "SUCCESSFUL_TREATMENT_EVENT" # TODO: Get this from BETS
# e-Voucher payout to chemists (reimbursement of drug cost + additional x% top up)
CHEMIST_VOUCHER_EVENT = '101'
# e-Voucher payout to labs (reimbursement of lab test cost - partial or in full)
LAB_VOUCHER_EVENT = '102'
# To provider for diagnosis and notification of TB case
DIAGNOSIS_AND_NOTIFICATION_EVENT = "103"
# 6 months (180 days) of private OR govt. FDCs with "Treatment Outcome" reported
TREATMENT_180_EVENT = "104"
# Registering and referral of a presumptive TB case in UATBC/eNikshay,
# and patient subsequently gets notified
AYUSH_REFERRAL_EVENT = "105"
# Suspect Registration + Validated diagnostic e-Voucher prior to or on date
# of treatment initiation
SUSPECT_REGISTRATION_EVENT = '106'
# To compounder on case notification
COMPOUNDER_NOTIFICATION_EVENT = '107'
# Honorarium to chemists for dispensing GoI - supplied daily drugs
CHEMIST_HONORARIUM_EVENT = '108'
# Cash transfer on subsequent drug refill (~at every drug voucher validation,
# starting after 2nd voucher)
DRUG_REFILL_EVENT = "109"
# Honorarium to public DOT providers
PROVIDER_HONORARIUM = '110'
BETS_EVENT_IDS = [
CHEMIST_VOUCHER_EVENT,
LAB_VOUCHER_EVENT,
DIAGNOSIS_AND_NOTIFICATION_EVENT,
TREATMENT_180_EVENT,
AYUSH_REFERRAL_EVENT,
SUSPECT_REGISTRATION_EVENT,
COMPOUNDER_NOTIFICATION_EVENT,
CHEMIST_HONORARIUM_EVENT,
DRUG_REFILL_EVENT,
PROVIDER_HONORARIUM,
]
LOCATION_TYPE_MAP = {
"pcc": "chemist",
"pcp": "mbbs",
"plc": "lab",
"pac": "ayush_other",
# TODO: ?? -> dots_provider
# TODO: ?? -> compounder
}
| # BETS event names
SUCCESSFUL_TREATMENT_EVENT = "SUCCESSFUL_TREATMENT_EVENT" # TODO: Get this from BETS
# e-Voucher payout to chemists (reimbursement of drug cost + additional x% top up)
CHEMIST_VOUCHER_EVENT = '101'
# e-Voucher payout to labs (reimbursement of lab test cost - partial or in full)
LAB_VOUCHER_EVENT = '102'
# To provider for diagnosis and notification of TB case
DIAGNOSIS_AND_NOTIFICATION_EVENT = "103"
# 6 months (180 days) of private OR govt. FDCs with "Treatment Outcome" reported
TREATMENT_180_EVENT = "104"
# Registering and referral of a presumptive TB case in UATBC/eNikshay,
# and patient subsequently gets notified
AYUSH_REFERRAL_EVENT = "105"
# Suspect Registration + Validated diagnostic e-Voucher prior to or on date
# of treatment initiation
SUSPECT_REGISTRATION_EVENT = '106'
# To compounder on case notification
COMPOUNDER_NOTIFICATION_EVENT = '107'
# Honorarium to chemists for dispensing GoI - supplied daily drugs
CHEMIST_HONORARIUM_EVETN = '108'
# Cash transfer on subsequent drug refill (~at every drug voucher validation,
# starting after 2nd voucher)
DRUG_REFILL_EVENT = "109"
# Honorarium to public DOT providers
PROVIDER_HONORARIUM = '110'
BETS_EVENT_IDS = [
CHEMIST_VOUCHER_EVENT,
LAB_VOUCHER_EVENT,
DIAGNOSIS_AND_NOTIFICATION_EVENT,
TREATMENT_180_EVENT,
AYUSH_REFERRAL_EVENT,
SUSPECT_REGISTRATION_EVENT,
COMPOUNDER_NOTIFICATION_EVENT,
CHEMIST_HONORARIUM_EVETN,
DRUG_REFILL_EVENT,
PROVIDER_HONORARIUM,
]
LOCATION_TYPE_MAP = {
"pcc": "chemist",
"pcp": "mbbs",
"plc": "lab",
"pac": "ayush_other",
# TODO: ?? -> dots_provider
# TODO: ?? -> compounder
}
| bsd-3-clause | Python |
846113236fdcf54339a4ee41e75de743fcb7a8e4 | Rework Heavens-Above scraper to use requests instead of urllib2 | bgottula/track,bgottula/track | track/heavens_above_scraper.py | track/heavens_above_scraper.py | #!/usr/bin/env python
from bs4 import BeautifulSoup
import requests
import re
import track
def urlify(s):
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", '', s)
# Replace all runs of whitespace with a single underscore
s = re.sub(r"\s+", '_', s)
return s
def main():
parser = track.ArgParser()
parser.add_argument('--lat', required=True, help='latitude of observer (+N)')
parser.add_argument('--lon', required=True, help='longitude of observer (+E)')
parser.add_argument('--elevation', required=True, help='elevation of observer (m)', type=float)
args = parser.parse_args()
base_url = 'http://www.heavens-above.com/'
bright_sats_url = base_url + 'AllSats.aspx?lat={}&lng={}&alt={}'.format(args.lat, args.lon, args.elevation)
bright_sats_page = requests.get(bright_sats_url).text
bright_sats_soup = BeautifulSoup(bright_sats_page, 'lxml')
# find the rows in the table listing the satellite passes
table = bright_sats_soup.find_all('table')[4]
table_body = table.find_all('tbody')[0]
rows = table_body.find_all('tr')
tles = []
for row in rows:
# extract the satellite id and name from this table row
onclick_str = row['onclick']
url_suffix = re.findall(r"'([^']*)'", onclick_str)[0]
satid = re.findall(r"satid=([0-9]*)", url_suffix)[0]
satname = row.find_all('td')[0].string
pass_detail_url = base_url + url_suffix
print('Getting TLE for ' + satname + '...')
# get the TLE from the orbit details page for this satellite
orbit_url = base_url + 'orbit.aspx?satid=' + satid
orbit_page = requests.get(orbit_url).text
orbit_soup = BeautifulSoup(orbit_page, 'lxml')
pre_tag = orbit_soup.find_all('pre')[0]
span_tags = pre_tag.find_all('span')
tle = [satname]
for span_tag in span_tags:
assert span_tag['id'].startswith('ctl00_cph1_lblLine')
tle.append(span_tag.string)
tles.append(tle)
with open('/tmp/tles/' + urlify(satname) + '.tle', 'w') as f:
for line in tle:
f.write(line + '\n')
if len(tles) > 2:
break
if __name__ == "__main__":
main()
| #!/usr/bin/env python
from bs4 import BeautifulSoup
import urllib2
import re
import track
def urlify(s):
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", '', s)
# Replace all runs of whitespace with a single underscore
s = re.sub(r"\s+", '_', s)
return s
def main():
parser = track.ArgParser()
parser.add_argument('--lat', required=True, help='latitude of observer (+N)')
parser.add_argument('--lon', required=True, help='longitude of observer (+E)')
parser.add_argument('--elevation', required=True, help='elevation of observer (m)', type=float)
args = parser.parse_args()
base_url = 'http://www.heavens-above.com/'
bright_sats_url = base_url + 'AllSats.aspx?lat={}&lng={}&alt={}'.format(args.lat, args.lon, args.elevation)
bright_sats_page = urllib2.urlopen(bright_sats_url).read()
bright_sats_soup = BeautifulSoup(bright_sats_page, 'lxml')
# find the rows in the table listing the satellite passes
table = bright_sats_soup.find_all('table')[4]
table_body = table.find_all('tbody')[0]
rows = table_body.find_all('tr')
tles = []
for row in rows:
# extract the satellite id and name from this table row
onclick_str = row['onclick']
url_suffix = re.findall(r"'([^']*)'", onclick_str)[0]
satid = re.findall(r"satid=([0-9]*)", url_suffix)[0]
satname = row.find_all('td')[0].string
pass_detail_url = base_url + url_suffix
print('Getting TLE for ' + satname + '...')
# get the TLE from the orbit details page for this satellite
orbit_url = base_url + 'orbit.aspx?satid=' + satid
orbit_page = urllib2.urlopen(orbit_url).read()
orbit_soup = BeautifulSoup(orbit_page, 'lxml')
pre_tag = orbit_soup.find_all('pre')[0]
span_tags = pre_tag.find_all('span')
tle = [satname]
for span_tag in span_tags:
assert span_tag['id'].startswith('ctl00_cph1_lblLine')
tle.append(span_tag.string)
tles.append(tle)
with open('/tmp/tles/' + urlify(satname) + '.tle', 'w') as f:
for line in tle:
f.write(line + '\n')
if len(tles) > 2:
break
if __name__ == "__main__":
main()
| mit | Python |
6c6410495a2f64cb769f4e4d6c9981d9fb7d723c | Add a delete rule test | yaybu/touchdown | touchdown/tests/test_aws_waf_rule.py | touchdown/tests/test_aws_waf_rule.py | # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from botocore.stub import Stubber
from touchdown import frontends
from touchdown.core import goals, workspace
from touchdown.core.map import SerialMap
class TestWafRule(unittest.TestCase):
def setUp(self):
self.workspace = workspace.Workspace()
self.aws = self.workspace.add_aws(access_key_id='dummy', secret_access_key='dummy', region='eu-west-1')
self.goal = goals.create(
"apply",
self.workspace,
frontends.ConsoleFrontend(interactive=False),
map=SerialMap
)
def test_annotate_object(self):
rule = self.aws.add_rule(name="myrule")
desc = self.goal.get_service(rule, "describe")
stub = Stubber(desc.client)
stub.add_response(
'get_rule',
{'Rule': {
'RuleId': 'ZzZzZz',
'Predicates': [],
}},
{'RuleId': 'ZzZzZz'},
)
with stub:
obj = desc.annotate_object({
"RuleId": "ZzZzZz"
})
self.assertEqual(obj["RuleId"], "ZzZzZz")
def test_delete_rule(self):
rule = self.aws.add_rule(name="myrule")
dest = self.goal.get_service(rule, "destroy")
dest.object = {
"Predicates": [{
"Foo": 1,
}],
}
stub = Stubber(dest.client)
with stub:
plan = list(dest.destroy_object())
self.assertEqual(len(plan), 2)
| # Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from botocore.stub import Stubber
from touchdown import frontends
from touchdown.core import goals, workspace
from touchdown.core.map import SerialMap
class TestBucketDescribe(unittest.TestCase):
def setUp(self):
self.workspace = workspace.Workspace()
self.aws = self.workspace.add_aws(access_key_id='dummy', secret_access_key='dummy', region='eu-west-1')
self.goal = goals.create(
"apply",
self.workspace,
frontends.ConsoleFrontend(interactive=False),
map=SerialMap
)
def test_annotate_object(self):
rule = self.aws.add_rule(name="myrule")
desc = self.goal.get_service(rule, "describe")
stub = Stubber(desc.client)
stub.add_response(
'get_rule',
{'Rule': {
'RuleId': 'ZzZzZz',
'Predicates': [],
}},
{'RuleId': 'ZzZzZz'},
)
with stub:
obj = desc.annotate_object({
"RuleId": "ZzZzZz"
})
self.assertEqual(obj["RuleId"], "ZzZzZz")
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.