commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
f81e6ea2410a209a5d598a5d384e51fc447d9668 | update __init__.py | kangasbros/django-bitcoin,readevalprint/django-bitcoin | django_bitcoin/__init__.py | django_bitcoin/__init__.py | from django_bitcoin.models import Payment, getNewBitcoinPayment
from django_bitcoin.models import BitcoinWallet
from django_bitcoin.utils import generateuniquehash, int2base64, base642int, bitcoinprice
| from django_bitcoin.models import BitcoinPayment, getNewBitcoinPayment
from django_bitcoin.models import BitcoinWallet
from django_bitcoin.utils import generateuniquehash, int2base64, base642int, bitcoinprice
| mit | Python |
85ac27150218087217756969275249178cc695e2 | Prepare v2.10.73.dev | LynxyssCZ/Flexget,gazpachoking/Flexget,LynxyssCZ/Flexget,ianstalk/Flexget,jawilson/Flexget,Danfocus/Flexget,ianstalk/Flexget,Flexget/Flexget,malkavi/Flexget,jawilson/Flexget,Flexget/Flexget,Danfocus/Flexget,jawilson/Flexget,tobinjt/Flexget,Danfocus/Flexget,jawilson/Flexget,OmgOhnoes/Flexget,malkavi/Flexget,crawln45/Flexget,crawln45/Flexget,crawln45/Flexget,qk4l/Flexget,malkavi/Flexget,tobinjt/Flexget,Flexget/Flexget,JorisDeRieck/Flexget,gazpachoking/Flexget,tobinjt/Flexget,qk4l/Flexget,JorisDeRieck/Flexget,OmgOhnoes/Flexget,crawln45/Flexget,LynxyssCZ/Flexget,Flexget/Flexget,Danfocus/Flexget,ianstalk/Flexget,LynxyssCZ/Flexget,malkavi/Flexget,OmgOhnoes/Flexget,qk4l/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,JorisDeRieck/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.10.73.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.10.72'
| mit | Python |
316a53249a8da17097e54ffcc4fe983e1106eb85 | enable all plugins and increase duration | missionpinball/mpf,missionpinball/mpf | mpf/tests/MpfMachineTestCase.py | mpf/tests/MpfMachineTestCase.py | from mpf.tests.MpfTestCase import MpfTestCase
class MpfMachineTestCase(MpfTestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
# only disable bcp. everything else should run
self.machine_config_patches = dict()
self.machine_config_patches['bcp'] = []
# increase test expected duration
self.expected_duration = 5.0
def getConfigFile(self):
return "config.yaml"
def getMachinePath(self):
return ""
def getAbsoluteMachinePath(self):
# do not use path relative to MPF folder
return self.getMachinePath() | from mpf.tests.MpfTestCase import MpfTestCase
class MpfMachineTestCase(MpfTestCase):
def getConfigFile(self):
return "config.yaml"
def getMachinePath(self):
return ""
def getAbsoluteMachinePath(self):
# do not use path relative to MPF folder
return self.getMachinePath() | mit | Python |
206bc6360709559a18be2bfa69925a804a7b2350 | add progress bar to build_cids.py | robrem/whoserep | build_cids.py | build_cids.py | """
Retrieves the OpenSecrets.org CIDs (Candidate IDs) for every US legislator and
write them to data/cids.txt.
The cids.txt file is used by TweetText to select a random legislator for each
tweet.
"""
import os
import json
from crpapi import CRP
try:
from config import secrets
except ImportError:
secrets = {
"OPENSECRETS_API_KEY" : os.environ['OPENSECRETS_API_KEY']
}
crp = CRP(secrets['OPENSECRETS_API_KEY'])
states = [
"AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
"HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"
]
def progress_bar(count, total, bar_len, status=''):
"""
Prints a progress bar to the console.
:param count: the current item iteration
:param total: the total number of items to iterate
:param bar_len: the length of the progress bar
:param status: optional message regarding progress bar status
"""
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
progress = '=' * filled_len + '-' * (bar_len - filled_len)
print('[%s] %s%s ...%s\r' % (progress, percents, '%', status))
def build_cids():
"""
Retrieve all CIDs for current congressional members and
write the list to data/cids.txt, replacing the previous file.
"""
cids = []
i = 0
progress_bar(i, len(states), 50, 'Starting...')
for state in states:
i += 1
cands = crp.candidates.get(state)
if len(cands) > 1:
for cand in cands:
cids.append(cand['@attributes']['cid'])
else:
cids.append(cands['@attributes']['cid'])
progress_bar(i, len(states), 50, 'Retrieving legislators from %s' % state)
f = open('data/cids.txt', 'wb')
json.dump(cids, f)
f.close()
result = 'Retrieved {num} CIDs from OpenSecrets.org.'.format(num=len(cids))
print(result)
if __name__ == "__main__":
build_cids()
| import json
from crpapi import CRP
try:
from config import secrets
except ImportError:
secrets = {
"OPENSECRETS_API_KEY" : os.environ['OPENSECRETS_API_KEY']
}
crp = CRP(secrets['OPENSECRETS_API_KEY'])
states = [
"AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
"HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"
]
def build_cids():
"""
Retrieve all CIDs for current congressional members and
write the list to data/cids.txt, replacing the previous file.
"""
cids = []
for state in states:
cands = crp.candidates.get(state)
if len(cands) > 1:
for cand in cands:
cids.append(cand['@attributes']['cid'])
else:
cids.append(cands['@attributes']['cid'])
f = open('data/cids.txt','wb')
json.dump(cids, f)
f.close()
result = 'Retrieved {num} CIDs from OpenSecrets.org.'.format(num=len(cids))
print(result)
if __name__ == "__main__":
build_cids() | mit | Python |
ce4f2216fa804f066187023d9950eb9da78461ae | fix PEP8 issue in example | amiryal/XKCD-password-generator,amiryal/XKCD-password-generator | examples/example_json.py | examples/example_json.py | from xkcdpass import xkcd_password as xp
from django.http import JsonResponse
def json_password_generator(request):
# Example Django view to generate passphrase suggestions via xkcd library
# Called with optional params e.g.
# /json_password_generator/?tc=true&separator=|&acrostic=face
if request.method == 'GET':
acrostic = request.GET.get("acrostic", None)
titlecase = request.GET.get("tc", None)
wordfile = xp.locate_wordfile()
words = xp.generate_wordlist(
wordfile=wordfile,
min_length=3,
max_length=8)
suggestion = xp.generate_xkcdpassword(words, acrostic=acrostic)
if titlecase:
# Convert "foo bar" to "Foo Bar"
suggestion = suggestion.title()
return JsonResponse({
'suggestion': suggestion}
)
| from xkcdpass import xkcd_password as xp
from django.http import JsonResponse
def json_password_generator(request):
# Example Django view to generate passphrase suggestions via xkcd library
# Called with optional params e.g.
# /json_password_generator/?tc=true&separator=|&acrostic=face
if request.method == 'GET':
acrostic = request.GET.get("acrostic", None)
titlecase = request.GET.get("tc", None)
wordfile = xp.locate_wordfile()
words = xp.generate_wordlist(
wordfile=wordfile,
min_length=3,
max_length=8)
suggestion = xp.generate_xkcdpassword(words, acrostic=acrostic)
if titlecase:
# Convert "foo bar" to "Foo Bar"
suggestion = suggestion.title()
return JsonResponse({
'suggestion': suggestion}
)
| bsd-3-clause | Python |
0d78ac36f49b8492702a8bc0c49a1508af7f86c2 | Remove explicit app_name | maikhoepfel/djangocms-oscar | djangocms_oscar/cms_app.py | djangocms_oscar/cms_app.py | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.conf.urls import patterns
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from oscar.app import application
from .menu import CategoriesMenu
class OscarApp(CMSApp):
"""
Allows "mounting" the oscar shop and all of its urls to a specific CMS page.
e.g at "/shop/"
"""
name = _("Oscar")
urls = [
patterns('', *application.urls[0])
]
menus = [CategoriesMenu]
apphook_pool.register(OscarApp)
| # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.conf.urls import patterns
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from oscar.app import application
from .menu import CategoriesMenu
class OscarApp(CMSApp):
"""
Allows "mounting" the oscar shop and all of its urls to a specific CMS page.
e.g at "/shop/"
"""
name = _("Oscar")
urls = [
patterns('', *application.urls[0])
]
menus = [CategoriesMenu]
app_name = "oscar"
apphook_pool.register(OscarApp)
| mit | Python |
a8b2fddcd910606b6bb18ffe91fccdd2c505f390 | fix satella's import | piotrmaslanka/satella,piotrmaslanka/satella | satella/coding/concurrent/__init__.py | satella/coding/concurrent/__init__.py | from .atomic import AtomicNumber
from .callablegroup import CallableGroup, CallNoOftenThan, CancellableCallback
from .functions import parallel_execute, run_as_future
from .futures import Future, WrappingFuture, InvalidStateError
from .id_allocator import IDAllocator, SequentialIssuer
from .locked_dataset import LockedDataset
from .locked_structure import LockedStructure
from .monitor import MonitorList, Monitor, MonitorDict, RMonitor
from .sync import sync_threadpool
from .thread import TerminableThread, Condition, SingleStartThread, call_in_separate_thread, \
BogusTerminableThread, IntervalTerminableThread
from .timer import Timer
from .queue import PeekableQueue
__all__ = ['LockedDataset', 'Monitor', 'RMonitor', 'CallableGroup', 'TerminableThread',
'MonitorDict', 'MonitorList', 'Condition', 'LockedStructure', 'AtomicNumber',
'CallNoOftenThan', 'SingleStartThread', 'IDAllocator', 'call_in_separate_thread',
'BogusTerminableThread', 'Timer', 'parallel_execute', 'run_as_future',
'sync_threadpool', 'IntervalTerminableThread', 'Future',
'WrappingFuture', 'InvalidStateError', 'PeekableQueue',
'CancellableCallback',
'SequentialIssuer']
| from .atomic import AtomicNumber
from .callablegroup import CallableGroup, CallNoOftenThan, CancellableCallback
from .functions import parallel_execute, run_as_future
from .futures import Future, WrappingFuture, InvalidStateError
from .id_allocator import IDAllocator, SequentialIssuer
from .locked_dataset import LockedDataset
from .locked_structure import LockedStructure
from .monitor import MonitorList, Monitor, MonitorDict, RMonitor
from .sync import sync_threadpool
from .thread import TerminableThread, Condition, SingleStartThread, call_in_separate_thread, \
BogusTerminableThread, IntervalTerminableThread, Terminating
from .timer import Timer
from .queue import PeekableQueue
__all__ = ['LockedDataset', 'Monitor', 'RMonitor', 'CallableGroup', 'TerminableThread',
'MonitorDict', 'MonitorList', 'Condition', 'LockedStructure', 'AtomicNumber',
'CallNoOftenThan', 'SingleStartThread', 'IDAllocator', 'call_in_separate_thread',
'BogusTerminableThread', 'Timer', 'parallel_execute', 'run_as_future',
'sync_threadpool', 'IntervalTerminableThread', 'Future',
'WrappingFuture', 'InvalidStateError', 'PeekableQueue',
'CancellableCallback',
'SequentialIssuer',
'Terminating']
| mit | Python |
7a994fbb091854d4558de3336b4db6a3a6cb4eaf | Update json_field to use stdlib json module | mozilla/nuggets | json_field.py | json_field.py | from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
try:
import json # json module added in Python 2.6.
except ImportError:
from django.utils import simplejson as json
# https://bitbucket.org/offline/django-annoying
class JSONField(models.TextField):
"""
JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly.
Django snippet #1478
example:
class Page(models.Model):
data = JSONField(blank=True, null=True)
page = Page.objects.get(pk=5)
page.data = {'title': 'test', 'type': 3}
page.save()
"""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
if value == '':
return None
try:
if isinstance(value, basestring):
return json.loads(value)
except ValueError:
pass
return value
def get_db_prep_save(self, value, *args, **kwargs):
if value == '':
return None
value = json.dumps(value, cls=DjangoJSONEncoder, separators=(',', ':'))
return super(JSONField, self).get_db_prep_save(value, *args, **kwargs)
# South support.
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ['^json_field\.JSONField'])
| from django.core.serializers.json import DjangoJSONEncoder
from django.db import models
from django.utils import simplejson as json
# https://bitbucket.org/offline/django-annoying
class JSONField(models.TextField):
"""
JSONField is a generic textfield that neatly serializes/unserializes
JSON objects seamlessly.
Django snippet #1478
example:
class Page(models.Model):
data = JSONField(blank=True, null=True)
page = Page.objects.get(pk=5)
page.data = {'title': 'test', 'type': 3}
page.save()
"""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
if value == '':
return None
try:
if isinstance(value, basestring):
return json.loads(value)
except ValueError:
pass
return value
def get_db_prep_save(self, value, *args, **kwargs):
if value == '':
return None
value = json.dumps(value, cls=DjangoJSONEncoder, separators=(',', ':'))
return super(JSONField, self).get_db_prep_save(value, *args, **kwargs)
# South support.
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ['^json_field\.JSONField'])
| bsd-3-clause | Python |
8078371a2fdb71f2f1ea57be2513ef7d78d8117c | fix for unix paths | 159356-1702-Extramural/capstone,159356-1702-Extramural/capstone,159356-1702-Extramural/capstone,159356-1702-Extramural/capstone | tests/seleniumPythonTest/run_all_tests.py | tests/seleniumPythonTest/run_all_tests.py | __author__ = 'QSG'
import unittest
import HTMLTestRunner
import os,sys, time
# Directory for locating test cases
test_dir=os.path.split(os.path.realpath(sys.argv[0]))[0]
test_case_dir= os.path.join(test_dir ,'test_case')
test_reports_dir= os.path.join(test_dir, 'selenium_test_reports')
print test_reports_dir
def creatsuite():
testunit=unittest.TestSuite()
#Define discover selenium py test files to find py test cases.
discover=unittest.defaultTestLoader.discover(test_dir,
pattern ='slnm_test*.py',
top_level_dir=None)
#Add test cases to the test suits
for test_suite in discover:
# print test_suite,'\n'
for test_case in test_suite:
testunit.addTests(test_case)
print testunit
return testunit
alltestnames = creatsuite()
now = time.strftime('%m-%d-%Y-%H_%M_%S',time.localtime(time.time()))
#If selenium test reports dir is not exists, then create it.
if not os.path.isdir(test_reports_dir):
os.mkdir(test_reports_dir)
filename = os.path.join(test_reports_dir,now+'_result.html')
fp = file(filename, 'wb')
runner =HTMLTestRunner.HTMLTestRunner(
stream=fp,
title=u'Settler of Massey Selenium Test Report',
description=u'Results of the tests:')
# runner=unittest.TextTestRunner(); #Run this line when only want to see result on screen.
#Running the test cases.
runner.run(alltestnames)
print "Selenium test finished."
| __author__ = 'QSG'
import unittest
import HTMLTestRunner
import os,sys, time
# Directory for locating test cases
test_dir=os.path.split(os.path.realpath(sys.argv[0]))[0]
test_case_dir=test_dir+'\\test_case\\'
test_reports_dir=test_dir+'\\selenium_test_reports\\'
print test_reports_dir
def creatsuite():
testunit=unittest.TestSuite()
#Define discover selenium py test files to find py test cases.
discover=unittest.defaultTestLoader.discover(test_dir,
pattern ='slnm_test*.py',
top_level_dir=None)
#Add test cases to the test suits
for test_suite in discover:
# print test_suite,'\n'
for test_case in test_suite:
testunit.addTests(test_case)
print testunit
return testunit
alltestnames = creatsuite()
now = time.strftime('%m-%d-%Y-%H_%M_%S',time.localtime(time.time()))
#If selenium test reports dir is not exists, then create it.
if not os.path.isdir(test_reports_dir):
os.mkdir(test_reports_dir)
filename = test_reports_dir+now+'result.html'
fp = file(filename, 'wb')
runner =HTMLTestRunner.HTMLTestRunner(
stream=fp,
title=u'Settler of Massey Selenium Test Report',
description=u'Results of the tests:')
# runner=unittest.TextTestRunner(); #Run this line when only want to see result on screen.
#Running the test cases.
runner.run(alltestnames)
print "Selenium test finished."
| mpl-2.0 | Python |
4eda64c78401cc9fc66e176ff475860e457c3008 | Simplify a method slightly. | gwax/mtg_ssm,gwax/mtgcdb | mtg_ssm/serialization/mtgdict.py | mtg_ssm/serialization/mtgdict.py | """Methods for managing data in the form of dicts."""
from mtg_ssm.mtg import models
def get_printing(coll, card_dict):
"""Given a card dict and various indexes, get the matching CardPrinting."""
mtgjsid = card_dict.get('id')
set_code = card_dict.get('set')
name = card_dict.get('name')
mvid = card_dict.get('multiverseid')
number = card_dict.get('number')
if mtgjsid is not None and mtgjsid in coll.id_to_printing:
return coll.id_to_printing[mtgjsid]
print('Could not find card by id, trying (set, name, num, mvid).')
set_name_num_mv = (set_code, name, number, mvid)
if set_name_num_mv in coll.set_name_num_mv_to_printings:
prints = coll.set_name_num_mv_to_printings[set_name_num_mv]
if len(prints) == 1:
return prints[0]
print('trying (set, name, mv)')
set_name_mv = (set_code, name, mvid)
if set_name_mv in coll.set_name_mv_to_printings:
prints = coll.set_name_mv_to_printings[set_name_mv]
if len(prints) == 1:
return prints[0]
print('trying (set, name, number)')
set_name_num = (set_code, name, number)
if set_name_num in coll.set_name_num_to_printings:
prints = coll.set_name_num_to_printings[set_name_num]
if len(prints) == 1:
return prints[0]
print('trying (set, name)')
set_and_name = (set_code, name)
if set_and_name in coll.set_and_name_to_printings:
prints = coll.set_and_name_to_printings[set_and_name]
if len(prints) == 1:
return prints[0]
print('Warning: Could not find printing for {}'.format(card_dict))
return None
def load_counts(collection, card_dicts):
"""Load counts from dicts of card info/counts into a Collection."""
for card_dict in card_dicts:
printing = get_printing(collection, card_dict)
for counttype in models.CountTypes:
count = card_dict.get(counttype.name)
if count is not None:
existing = printing.counts.setdefault(counttype, 0)
printing.counts[counttype] = existing + count
| """Methods for managing data in the form of dicts."""
from mtg_ssm.mtg import models
def get_printing(coll, card_dict):
"""Given a card dict and various indexes, get the matching CardPrinting."""
mtgjsid = card_dict.get('id')
set_code = card_dict.get('set')
name = card_dict.get('name')
mvid = card_dict.get('multiverseid')
number = card_dict.get('number')
if mtgjsid is not None and mtgjsid in coll.id_to_printing:
return coll.id_to_printing[mtgjsid]
if set_code is None or name is None:
print('Card has no set or card has no name and cannot be found.')
return None
print('Could not find card by id, trying (set, name, num, mvid).')
set_name_num_mv = (set_code, name, number, mvid)
if set_name_num_mv in coll.set_name_num_mv_to_printings:
prints = coll.set_name_num_mv_to_printings[set_name_num_mv]
if len(prints) == 1:
return prints[0]
else:
print('{} entries found.'.format(len(prints)))
print('trying (set, name, mv)')
set_name_mv = (set_code, name, mvid)
if set_name_mv in coll.set_name_mv_to_printings:
prints = coll.set_name_mv_to_printings[set_name_mv]
if len(prints) == 1:
return prints[0]
else:
print('{} entries found.'.format(len(prints)))
print('trying (set, name, number)')
set_name_num = (set_code, name, number)
if set_name_num in coll.set_name_num_to_printings:
prints = coll.set_name_num_to_printings[set_name_num]
if len(prints) == 1:
return prints[0]
else:
print('{} entries found.'.format(len(prints)))
print('trying (set, name)')
set_and_name = (set_code, name)
if set_and_name in coll.set_and_name_to_printings:
prints = coll.set_and_name_to_printings[set_and_name]
if len(prints) == 1:
return prints[0]
else:
print('{} entries found.'.format(len(prints)))
print('Warning: Could not find printing for {}'.format(card_dict))
return None
def load_counts(collection, card_dicts):
"""Load counts from dicts of card info/counts into a Collection."""
for card_dict in card_dicts:
printing = get_printing(collection, card_dict)
for counttype in models.CountTypes:
count = card_dict.get(counttype.name)
if count is not None:
existing = printing.counts.setdefault(counttype, 0)
printing.counts[counttype] = existing + count
| mit | Python |
86b22c351d70ee8a84df5735020949dd02bdfb49 | Include object type only if a number (of objects) is present. | EUDAT-DPMT/eudat.accounting.client | src/eudat/accounting/client/utils.py | src/eudat/accounting/client/utils.py | # utilties module from eudat.accounting.client
# collects methods that could be useful for client code as well
USERKEY = "ACCOUNTING_USER"
PWKEY = "ACCOUNTING_PW"
URL_PATTERN = "%s/%s/%s/addRecord?"
import os
import sys
import requests
from eudat.accounting.client import LOG
def getCredentials(args):
"""Extracts and returns (username, password) from args.
Looks into environment varaibles ACCOUNTING_USER and
ACCOUNTING_PW respectively if not found in args.
Terminates if not found"""
user = args.user
if not user:
user = os.getenv(USERKEY)
pw = args.password
if not pw:
pw = os.getenv(PWKEY)
if not user:
msg = "No user id provided"
LOG.error(msg)
sys.exit(msg)
if not pw:
msg = "No password provided"
LOG.error(msg)
sys.exit(msg)
LOG.info("Credentials found")
return (user, pw)
def getUrl(args):
"""Constructs the URL to call based on the parameters provided"""
url = URL_PATTERN % (args.base_url, args.domain, args.account)
LOG.info("URL: " + url)
return url
def getData(args):
"""builds a query string including the data"""
core_pattern = "core.%s:record=%s"
meta_pattern = "meta.%s:record=%s"
vars = []
vars.append('account=%s' % args.account)
if args.key:
vars.append('key=%s' % args.key)
for k in ['type', 'value', 'unit']:
vars.append(core_pattern % (k, getattr(args, k)))
hasNumber = False
for k in ['service', 'number', 'object_type', 'measure_time', 'comment']:
value = getattr(args, k)
if k=='number' and value:
hasNumber = True
if value:
if k=='object_type' and not hasNumber:
continue
vars.append(meta_pattern % (k, value))
qstring = '&'.join(vars)
LOG.info("query string: " + qstring)
return qstring
def call(cred, url, data):
call_url = url+data
r = requests.post(call_url, auth=cred)
# TODO: add error handling
return r
| # utilties module from eudat.accounting.client
# collects methods that could be useful for client code as well
USERKEY = "ACCOUNTING_USER"
PWKEY = "ACCOUNTING_PW"
URL_PATTERN = "%s/%s/%s/addRecord?"
import os
import sys
import requests
from eudat.accounting.client import LOG
def getCredentials(args):
"""Extracts and returns (username, password) from args.
Looks into environment varaibles ACCOUNTING_USER and
ACCOUNTING_PW respectively if not found in args.
Terminates if not found"""
user = args.user
if not user:
user = os.getenv(USERKEY)
pw = args.password
if not pw:
pw = os.getenv(PWKEY)
if not user:
msg = "No user id provided"
LOG.error(msg)
sys.exit(msg)
if not pw:
msg = "No password provided"
LOG.error(msg)
sys.exit(msg)
LOG.info("Credentials found")
return (user, pw)
def getUrl(args):
"""Constructs the URL to call based on the parameters provided"""
url = URL_PATTERN % (args.base_url, args.domain, args.account)
LOG.info("URL: " + url)
return url
def getData(args):
"""builds a query string including the data"""
core_pattern = "core.%s:record=%s"
meta_pattern = "meta.%s:record=%s"
vars = []
vars.append('account=%s' % args.account)
if args.key:
vars.append('key=%s' % args.key)
for k in ['type', 'value', 'unit']:
vars.append(core_pattern % (k, getattr(args, k)))
for k in ['service', 'number', 'object_type', 'measure_time', 'comment']:
value = getattr(args, k)
if value:
vars.append(meta_pattern % (k, value))
qstring = '&'.join(vars)
LOG.info("query string: " + qstring)
return qstring
def call(cred, url, data):
call_url = url+data
r = requests.post(call_url, auth=cred)
# TODO: add error handling
return r
| bsd-2-clause | Python |
452452a8cb6f60250b2896fead64a10e41228c15 | change some pring to log | navotsil/Open-Knesset,DanaOshri/Open-Knesset,noamelf/Open-Knesset,ofri/Open-Knesset,daonb/Open-Knesset,habeanf/Open-Knesset,noamelf/Open-Knesset,DanaOshri/Open-Knesset,navotsil/Open-Knesset,otadmor/Open-Knesset,daonb/Open-Knesset,DanaOshri/Open-Knesset,ofri/Open-Knesset,MeirKriheli/Open-Knesset,alonisser/Open-Knesset,Shrulik/Open-Knesset,OriHoch/Open-Knesset,OriHoch/Open-Knesset,daonb/Open-Knesset,noamelf/Open-Knesset,MeirKriheli/Open-Knesset,Shrulik/Open-Knesset,navotsil/Open-Knesset,ofri/Open-Knesset,otadmor/Open-Knesset,noamelf/Open-Knesset,Shrulik/Open-Knesset,alonisser/Open-Knesset,jspan/Open-Knesset,habeanf/Open-Knesset,habeanf/Open-Knesset,navotsil/Open-Knesset,otadmor/Open-Knesset,MeirKriheli/Open-Knesset,otadmor/Open-Knesset,alonisser/Open-Knesset,DanaOshri/Open-Knesset,daonb/Open-Knesset,OriHoch/Open-Knesset,jspan/Open-Knesset,MeirKriheli/Open-Knesset,jspan/Open-Knesset,alonisser/Open-Knesset,habeanf/Open-Knesset,Shrulik/Open-Knesset,OriHoch/Open-Knesset,jspan/Open-Knesset,ofri/Open-Knesset | src/knesset/mks/listeners.py | src/knesset/mks/listeners.py | #encoding: utf-8
from django.db.models.signals import post_save
from planet.models import Feed, Post
from actstream import action
from knesset.utils import cannonize, disable_for_loaddata
from knesset.laws.models import VoteAction
from knesset.links.models import Link, LinkType
from knesset.mks.models import Member
import logging
logger = logging.getLogger("open-knesset.mks.listeners")
@disable_for_loaddata
def connect_feed(sender, created, instance, **kwargs):
if created:
t = cannonize(instance.title)
rss_type, _ = LinkType.objects.get_or_create(title='רסס')
for m in Member.objects.all():
if t.rfind(cannonize(m.name)) != -1:
Link.objects.create(url=instance.url, title = instance.title,
content_object=m, link_type=rss_type)
logger.info(u'connected feed to %s' % m)
return
post_save.connect(connect_feed, sender=Feed)
@disable_for_loaddata
def record_post_action(sender, created, instance, **kwargs):
if created:
try:
link, _ = Link.objects.get_or_create(url=instance.feed.url)
except Link.MultipleObjectsReturned,e:
logger.warn('Multiple feeds: %s' % e)
link = Link.objects.filter(url=instance.feed.url)[0]
member = link.content_object
action.send(member, verb='posted',
target = instance,
timestamp=instance.date_modified or instance.date_created)
post_save.connect(record_post_action, sender=Post)
| #encoding: utf-8
from django.db.models.signals import post_save
from planet.models import Feed, Post
from actstream import action
from knesset.utils import cannonize, disable_for_loaddata
from knesset.laws.models import VoteAction
from knesset.links.models import Link, LinkType
from knesset.mks.models import Member
import logging
logger = logging.getLogger("open-knesset.mks.listeners")
@disable_for_loaddata
def connect_feed(sender, created, instance, **kwargs):
if created:
t = cannonize(instance.title)
rss_type, _ = LinkType.objects.get_or_create(title='רסס')
for m in Member.objects.all():
if t.rfind(cannonize(m.name)) != -1:
Link.objects.create(url=instance.url, title = instance.title,
content_object=m, link_type=rss_type)
print u'connected feed to %s' % m
return
post_save.connect(connect_feed, sender=Feed)
@disable_for_loaddata
def record_post_action(sender, created, instance, **kwargs):
if created:
try:
link, _ = Link.objects.get_or_create(url=instance.feed.url)
except Link.MultipleObjectsReturned,e:
logger.warn('Multiple feeds: %s' % e)
link = Link.objects.filter(url=instance.feed.url)[0]
member = link.content_object
action.send(member, verb='posted',
target = instance,
timestamp=instance.date_modified or instance.date_created)
post_save.connect(record_post_action, sender=Post)
| bsd-3-clause | Python |
9f41f4feae76b56235598e4f45c76b4670e7eec8 | Add long description | bright-tools/varints | varints/setup.py | varints/setup.py | #!/usr/bin/python
# Copyright 2017 John Bailey
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import unittest
def varints_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='test_*.py')
return test_suite
setup(name='varints',
version='0.1.3',
description='Variable-length encoding of integers',
url='http://github.com/bright-tools/varints',
author='John Bailey',
author_email='dev@brightsilence.com',
license='Apache',
long_description=open('README', 'rt').read(),
packages=['varints'],
test_suite='setup.varints_test_suite',
keywords = ['varint', 'integer', 'variable', 'length' ],
zip_safe=False)
| #!/usr/bin/python
# Copyright 2017 John Bailey
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import unittest
def varints_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests', pattern='test_*.py')
return test_suite
setup(name='varints',
version='0.1',
description='Variable-length encoding of integers',
url='http://github.com/bright-tools/varints',
author='John Bailey',
author_email='dev@brightsilence.com',
license='Apache',
packages=['varints'],
test_suite='setup.varints_test_suite',
zip_safe=False)
| apache-2.0 | Python |
51d06cc32f5077cca462739204fa89ceebf76a81 | update tests | dssg/wikienergy,dssg/wikienergy,dssg/wikienergy,dssg/wikienergy,dssg/wikienergy | tests/test_pecanstreet_dataset_adapter.py | tests/test_pecanstreet_dataset_adapter.py | import sys
sys.path.append('../')
from disaggregator import PecanStreetDatasetAdapter
import unittest
class PecanStreetDatasetAdapterTestCase(unittest.TestCase):
def setUp(self):
db_url = "postgresql://USERNAME:PASSWORD@db.wiki-energy.org:5432/postgres"
self.psda = PecanStreetDatasetAdapter(db_url)
def test_get_table_names(self):
s_tables = self.psda.get_table_names('shared')
c_tables = self.psda.get_table_names('curated')
r_tables = self.psda.get_table_names('raw')
self.assertIn('group1_disaggregated_2012_12', c_tables,
'curated schema has correct tables')
self.assertIn('egauge_15min_2013', r_tables,
'raw schema has correct tables')
self.assertIn('validated_01_2014', s_tables,
'shared schema has correct tables')
def test_table_metadata(self):
ids,cols = self.psda.get_table_metadata('shared','validated_01_2014')
self.assertIn(744,ids,'shared table 01 2014 has dataid 744')
self.assertIn('use',cols,'shared table 01 2014 has column "use"')
self.assertIn('air1',cols,'shared table 01 2014 has column "air1"')
pass
def test_get_month_traces(self):
# traces = self.pdsa.get_month_traces('shared','validated_01_2014')
# trace = p.get_month_traces_wo_time_align('shared',str(tables[0]),i[0])
pass
if __name__ == '__main__':
unittest.main()
| import sys
sys.path.append('../')
from disaggregator import PecanStreetDatasetAdapter
import unittest
class PecanStreetDatasetAdapterTestCase(unittest.TestCase):
def setUp(self):
db_url = "postgresql://USERNAME:PASSWORD@db.wiki-energy.org:5432/postgres"
self.psda = PecanStreetDatasetAdapter(db_url)
def test_get_table_names(self):
s_tables = self.psda.get_table_names('shared')
c_tables = self.psda.get_table_names('curated')
r_tables = self.psda.get_table_names('raw')
self.assertIn('group1_disaggregated_2012_12', c_tables,
'curated schema has correct tables')
self.assertIn('egauge_15min_2013', r_tables,
'raw schema has correct tables')
self.assertIn('validated_01_2014', s_tables,
'shared schema has correct tables')
def test_table_metadata(self):
ids,cols = self.psda.get_table_metadata('shared','validated_01_2014')
self.assertIn(744,ids,'shared table 01 2014 has dataid 744')
self.assertIn('use',cols,'shared table 01 2014 has column "use"')
self.assertIn('air1',cols,'shared table 01 2014 has column "air1"')
pass
def test_get_month_traces(self):
# traces = self.pdsa.get_month_traces('shared','validated_01_2014')
# trace = p.get_month_traces_wo_time_align('shared',str(tables[0]),i[0])
pass
fast = TestSuite()
if __name__ == '__main__':
unittest.main()
| mit | Python |
e53491b8135d2edd338b57a6c39285fd4f1ef9ab | Raise an exception when input not real. | cournape/talkbox,cournape/talkbox | scikits/talkbox/tools/correlations.py | scikits/talkbox/tools/correlations.py | import numpy as np
from scipy.fftpack import fft, ifft
__all__ = ['nextpow2', 'acorr']
def nextpow2(n):
"""Return the next power of 2 such as 2^p >= n.
Note
----
Infinite and nan are left untouched, negative values are not allowed."""
if np.any(n < 0):
raise ValueError("n should be > 0")
if np.isscalar(n):
f, p = np.frexp(n)
if f == 0.5:
return p-1
elif np.isfinite(f):
return p
else:
return f
else:
f, p = np.frexp(n)
res = f
bet = np.isfinite(f)
exa = (f == 0.5)
res[bet] = p[bet]
res[exa] = p[exa] - 1
return res
def _acorr_last_axis(x, nfft, maxlag, onesided=False):
a = np.real(ifft(np.abs(fft(x, n=nfft) ** 2)))
if onesided:
return a[..., :maxlag]
else:
return np.concatenate([a[..., nfft-maxlag+1:nfft],
a[..., :maxlag]], axis=-1)
def acorr(x, axis=-1, onesided=False):
"""Compute autocorrelation of x along given axis.
Arguments
---------
x : array-like
signal to correlate.
axis : int
axis along which autocorrelation is computed.
onesided: bool, optional
if True, only returns the right side of the autocorrelation.
Notes
-----
No scaling is done (yet).
Use fft for computation: is more efficient than direct computation for
relatively large n.
"""
if not np.isrealobj(x):
raise ValueError("Complex input not supported yet")
maxlag = x.shape[axis]
nfft = 2 ** nextpow2(2 * maxlag - 1)
if axis != -1:
x = np.swapaxes(x, -1, axis)
a = _acorr_last_axis(x, nfft, maxlag, onesided)
if axis != -1:
a = np.swapaxes(a, -1, axis)
return a
| import numpy as np
from scipy.fftpack import fft, ifft
__all__ = ['nextpow2', 'acorr']
def nextpow2(n):
"""Return the next power of 2 such as 2^p >= n.
Note
----
Infinite and nan are left untouched, negative values are not allowed."""
if np.any(n < 0):
raise ValueError("n should be > 0")
if np.isscalar(n):
f, p = np.frexp(n)
if f == 0.5:
return p-1
elif np.isfinite(f):
return p
else:
return f
else:
f, p = np.frexp(n)
res = f
bet = np.isfinite(f)
exa = (f == 0.5)
res[bet] = p[bet]
res[exa] = p[exa] - 1
return res
def _acorr_last_axis(x, nfft, maxlag, onesided=False):
a = np.real(ifft(np.abs(fft(x, n=nfft) ** 2)))
if onesided:
return a[..., :maxlag]
else:
return np.concatenate([a[..., nfft-maxlag+1:nfft],
a[..., :maxlag]], axis=-1)
def acorr(x, axis=-1, onesided=False):
"""Compute autocorrelation of x along given axis.
Arguments
---------
x : array-like
signal to correlate.
axis : int
axis along which autocorrelation is computed.
onesided: bool, optional
if True, only returns the right side of the autocorrelation.
Notes
-----
No scaling is done (yet).
Use fft for computation: is more efficient than direct computation for
relatively large n.
"""
maxlag = x.shape[axis]
nfft = 2 ** nextpow2(2 * maxlag - 1)
if axis != -1:
x = np.swapaxes(x, -1, axis)
a = _acorr_last_axis(x, nfft, maxlag, onesided)
if axis != -1:
a = np.swapaxes(a, -1, axis)
return a
| mit | Python |
756013fdc63eae95c9a0f510ea27357777114807 | remove dead code | armijnhemel/binaryanalysis | src/bat/piecharts.py | src/bat/piecharts.py | #!/usr/bin/python
## Binary Analysis Tool
## Copyright 2012-2013 Armijn Hemel for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
'''
This is a plugin for the Binary Analysis Tool. It generates images of results
of the ranking scan, like piecharts and version charts.
It is used by generateimages.py
'''
import os, os.path, sys, cPickle
import matplotlib
matplotlib.use('cairo')
import pylab
def generateImages(picklefile, pickledir, filehash, imagedir, pietype):
leaf_file = open(os.path.join(pickledir, picklefile), 'rb')
(piedata, pielabels) = cPickle.load(leaf_file)
leaf_file.close()
pylab.figure(1, figsize=(6.5,6.5))
ax = pylab.axes([0.2, 0.15, 0.6, 0.6])
pylab.pie(piedata, labels=pielabels)
pylab.savefig('%s/%s-%s.png' % (imagedir, filehash, pietype))
pylab.gcf().clear()
os.unlink(os.path.join(pickledir, picklefile))
| #!/usr/bin/python
## Binary Analysis Tool
## Copyright 2012-2013 Armijn Hemel for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
'''
This is a plugin for the Binary Analysis Tool. It generates images of results
of the ranking scan, like piecharts and version charts.
It is used by generateimages.py
'''
import os, os.path, sys, cPickle
import matplotlib
matplotlib.use('cairo')
import pylab
def generateImages(picklefile, pickledir, filehash, imagedir, pietype):
leaf_file = open(os.path.join(pickledir, picklefile), 'rb')
(piedata, pielabels) = cPickle.load(leaf_file)
leaf_file.close()
pylab.figure(1, figsize=(6.5,6.5))
ax = pylab.axes([0.2, 0.15, 0.6, 0.6])
pylab.pie(piedata, labels=pielabels)
pylab.savefig('%s/%s-%s.png' % (imagedir, filehash, pietype))
pylab.gcf().clear()
os.unlink(os.path.join(pickledir, picklefile))
'''
### This is just some dead code that was an attempt at generating better piecharts. It might be used in the future, so keep it here for reference.
j_sorted = sorted(j[4], key=lambda x: j[4][x])
max_y = j[4][j_sorted[-1]]
xvalues = []
yvalues = []
for v in j_sorted:
xvalues.append(v)
yvalues.append(j[4][v])
print >>sys.stderr, v, j[4][v], j[1], xvalues, yvalues, "max", max_y
figsize = len(xvalues) * 1.0
pylab.gcf().set_size_inches(figsize, 7)
pylab.xlabel('version')
pylab.ylabel('matches')
pylab.title("Unique matches for %s" % j[1])
## leave some space at the top
pylab.gca().set_ylim(top=max_y + 1)
x = pylab.arange(len(xvalues))
b = pylab.bar(x, yvalues, width=0.6)
for bb in b:
print >>sys.stderr, bb.get_width(), bb.get_height()
## center the text
pylab.xticks(x+0.3, xvalues, rotation=270)
pylab.savefig('%s/%s-%s-version.png' % (imagedir, unpackreport['sha256'], j[1]))
pylab.gcf().clear()
'''
| apache-2.0 | Python |
2b3e1498b6a346be779e0857b24540b958e41073 | Use new API version and fix an error if no price is supplied. | LemonTVnz/lemontv-scrapers | nzfilm.py | nzfilm.py | import json
import util
import urllib
# SHIFT API Documentation
# http://indiereign.github.io/shift72-docs/
DATA_URL = 'https://ondemand.nzfilm.co.nz/services/meta/v4/featured/8'
FILM_DETAILS_TEMPLATE = "https://ondemand.nzfilm.co.nz/services/meta/v2{0}/show_multiple"
PRICE_TEMPLATE = "https://ondemand.nzfilm.co.nz/services/pricing/v2/prices/show_multiple?items={0}&location=nz"
URI_TEMPLATE = "https://ondemand.nzfilm.co.nz/#!/browse{0}/{1}"
TV_SEASON_TEMPLATE = "https://ondemand.nzfilm.co.nz/services/meta/v2/tv/season/show_multiple?items={0}"
def get_movie(slug):
# find movie
data = util.get_url_json(FILM_DETAILS_TEMPLATE.format(slug))[0]
price_data = util.get_url_json(PRICE_TEMPLATE.format(slug))
show = {}
show["title"] = data["title"]
show["type"] = "movie"
show["image"] = data["image_urls"]["portrait"]
show["year"] = data["release_date"][:4]
# lowest price
price = ""
if price_data["prices"]:
price = price_data["prices"][0]["rent"]["hd"]
# every show has an episode even movies
show["episodes"] = [{"show" : data["title"], "uri" : URI_TEMPLATE.format(slug, urllib.parse.quote_plus(data["title"])), "s" : 0, "e" : 0, "price" : price}]
return show
def get_tv(slug):
# find seasons
data = util.get_url_json(TV_SEASON_TEMPLATE.format(slug))["seasons"][0]
price_data = util.get_url_json(PRICE_TEMPLATE.format(slug))
show = {}
show["title"] = data["show_info"]["title"]
show["type"] = "tv"
show["image"] = data["image_urls"]["portrait"]
show["year"] = data["show_info"]["release_date"][:4]
# Price is per season. No prices per episode
# lowest price
price = ""
if price_data["prices"]:
price = price_data["prices"][0]["rent"]["hd"]
print ("TV Price: " + price)
show["episodes"] = []
for e in data["episodes"]:
episode = {}
episode["title"] = e["title"]
episode["uri"] = URI_TEMPLATE.format(slug, urllib.parse.quote_plus(data["show_info"]["title"]))
episode["s"] = data["season_num"]
episode["e"] = e["episode_number"]
show["episodes"].append(episode)
return show
def get_listings():
shows = []
data = util.get_url_json(DATA_URL)
all = []
for slug in data["items"]:
if "/film/" in slug:
shows.append(get_movie(slug))
else:
shows.append(get_tv(slug))
return shows
if __name__ == "__main__":
# Test works
f = open("nzfilm.js", "w")
f.write(json.dumps(get_listings()))
f.close()
| import json
import util
import urllib
# SHIFT API Documentation
# http://indiereign.github.io/shift72-docs/
DATA_URL = 'https://ondemand.nzfilm.co.nz/services/meta/v3/featured/show/home_page'
FILM_DETAILS_TEMPLATE = "https://ondemand.nzfilm.co.nz/services/meta/v2{0}/show_multiple"
PRICE_TEMPLATE = "https://ondemand.nzfilm.co.nz/services/pricing/v2/prices/show_multiple?items={0}&location=nz"
URI_TEMPLATE = "https://ondemand.nzfilm.co.nz/#!/browse{0}/{1}"
TV_SEASON_TEMPLATE = "https://ondemand.nzfilm.co.nz/services/meta/v2/tv/season/show_multiple?items={0}"
def get_movie(slug):
# find movie
data = util.get_url_json(FILM_DETAILS_TEMPLATE.format(slug))[0]
price_data = util.get_url_json(PRICE_TEMPLATE.format(slug))
show = {}
show["title"] = data["title"]
show["type"] = "movie"
show["image"] = data["image_urls"]["portrait"]
show["year"] = data["release_date"][:4]
# lowest price
price = price_data["prices"][0]["rent"]["hd"]
# every show has an episode even movies
show["episodes"] = [{"show" : data["title"], "uri" : URI_TEMPLATE.format(slug, urllib.parse.quote_plus(data["title"])), "s" : 0, "e" : 0, "price" : price}]
return show
def get_tv(slug):
# find seasons
data = util.get_url_json(TV_SEASON_TEMPLATE.format(slug))["seasons"][0]
price_data = util.get_url_json(PRICE_TEMPLATE.format(slug))
show = {}
show["title"] = data["show_info"]["title"]
show["type"] = "tv"
show["image"] = data["image_urls"]["portrait"]
show["year"] = data["show_info"]["release_date"][:4]
# Price is per season. No prices per episode
# lowest price
price = price_data["prices"][0]["rent"]["hd"]
print ("TV Price: " + price)
show["episodes"] = []
for e in data["episodes"]:
episode = {}
episode["title"] = e["title"]
episode["uri"] = URI_TEMPLATE.format(slug, urllib.parse.quote_plus(data["show_info"]["title"]))
episode["s"] = data["season_num"]
episode["e"] = e["episode_number"]
show["episodes"].append(episode)
return show
def get_listings():
shows = []
data = util.get_url_json(DATA_URL)
all = []
for feature in data["features"]:
if "/film/" in feature["item"]:
shows.append(get_movie(feature["item"]))
else:
shows.append(get_tv(feature["item"]))
return shows
if __name__ == "__main__":
# Test works
f = open("nzfilm.js", "w")
f.write(json.dumps(get_listings()))
f.close()
| mit | Python |
eb3b82cb356382bb4f48c917e0bf141818f78b90 | Fix test when creating duplicate campaign | wiki-ai/wikilabels,wiki-ai/wikilabels,wiki-ai/wikilabels | wikilabels/tests/utilities/test_new_campaign.py | wikilabels/tests/utilities/test_new_campaign.py | from wikilabels.utilities.new_campaign import main
item = {'wiki': "cawiki", 'name': "newiki", 'form': "chan",
'view': "bivicyt", 'labels_per_task': "1",
'task_per_task': "50", 'active': True,
'info_url':
"--info-url=https://www.mediawiki.org/wiki/ORES#Edit_quality"}
repeated = {'wiki': "cawiki", 'name': "repeated", 'form': "chan",
'view': "bivicyt", 'labels_per_task': "1",
'task_per_task': "50", 'active': True,
'info_url':
"--info-url=https://www.mediawiki.org/wiki/ORES#Edit_quality"}
optional = {'wiki': "cawiki", 'name': "optional", 'form': "chan",
'view': "bivicyt", 'labels_per_task': "1",
'task_per_task': "50", 'active': True}
def test_create_campaign():
main([item["wiki"], item['name'],
item['form'], item['view'],
item['labels_per_task'],
item['task_per_task'],
item['info_url']])
def insert_repeated():
main([repeated["wiki"], repeated['name'],
repeated['form'], repeated['view'],
repeated['labels_per_task'],
repeated['task_per_task'],
repeated['info_url']])
def test_create_campaign_fail(capsys):
insert_repeated()
insert_repeated()
out, err = capsys.readouterr()
assert (err == "Duplicate campaign: repeated already exists for cawiki. "
"Use --force if this is expected.\n")
def test_create_campaign_optional_infourl():
main([optional["wiki"], optional['name'],
optional['form'], optional['view'],
optional['labels_per_task'],
optional['task_per_task']])
| from wikilabels.utilities.new_campaign import main
import pytest
item = {'wiki': "cawiki", 'name': "newiki", 'form': "chan",
'view': "bivicyt", 'labels_per_task': "1",
'task_per_task': "50", 'active': True,
'info_url':
"--info-url=https://www.mediawiki.org/wiki/ORES#Edit_quality"}
repeated = {'wiki': "cawiki", 'name': "repeated", 'form': "chan",
'view': "bivicyt", 'labels_per_task': "1",
'task_per_task': "50", 'active': True,
'info_url':
"--info-url=https://www.mediawiki.org/wiki/ORES#Edit_quality"}
optional = {'wiki': "cawiki", 'name': "optional", 'form': "chan",
'view': "bivicyt", 'labels_per_task': "1",
'task_per_task': "50", 'active': True}
def test_create_campaign():
main([item["wiki"], item['name'],
item['form'], item['view'],
item['labels_per_task'],
item['task_per_task'],
item['info_url']])
def insert_repeated():
main([repeated["wiki"], repeated['name'],
repeated['form'], repeated['view'],
repeated['labels_per_task'],
repeated['task_per_task'],
repeated['info_url']])
def test_create_campaign_fail():
insert_repeated()
with pytest.raises(Exception):
insert_repeated()
def test_create_campaign_optional_infourl():
main([optional["wiki"], optional['name'],
optional['form'], optional['view'],
optional['labels_per_task'],
optional['task_per_task']])
| mit | Python |
2a2c9dc43a7d096dd5601f51c0407c36433e73e1 | Make coordinate 'frames' be imported | kelle/astropy,lpsinger/astropy,dhomeier/astropy,StuartLittlefair/astropy,DougBurke/astropy,astropy/astropy,stargaser/astropy,AustereCuriosity/astropy,lpsinger/astropy,stargaser/astropy,pllim/astropy,astropy/astropy,funbaker/astropy,joergdietrich/astropy,joergdietrich/astropy,kelle/astropy,aleksandr-bakanov/astropy,kelle/astropy,funbaker/astropy,dhomeier/astropy,mhvk/astropy,bsipocz/astropy,bsipocz/astropy,joergdietrich/astropy,MSeifert04/astropy,AustereCuriosity/astropy,mhvk/astropy,dhomeier/astropy,bsipocz/astropy,tbabej/astropy,aleksandr-bakanov/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,larrybradley/astropy,MSeifert04/astropy,StuartLittlefair/astropy,kelle/astropy,astropy/astropy,tbabej/astropy,funbaker/astropy,saimn/astropy,joergdietrich/astropy,pllim/astropy,AustereCuriosity/astropy,AustereCuriosity/astropy,astropy/astropy,mhvk/astropy,dhomeier/astropy,StuartLittlefair/astropy,DougBurke/astropy,pllim/astropy,DougBurke/astropy,stargaser/astropy,tbabej/astropy,larrybradley/astropy,lpsinger/astropy,saimn/astropy,mhvk/astropy,saimn/astropy,MSeifert04/astropy,StuartLittlefair/astropy,dhomeier/astropy,kelle/astropy,tbabej/astropy,bsipocz/astropy,funbaker/astropy,aleksandr-bakanov/astropy,larrybradley/astropy,pllim/astropy,saimn/astropy,lpsinger/astropy,DougBurke/astropy,saimn/astropy,astropy/astropy,joergdietrich/astropy,stargaser/astropy,pllim/astropy,larrybradley/astropy,AustereCuriosity/astropy,tbabej/astropy,larrybradley/astropy,lpsinger/astropy,MSeifert04/astropy,mhvk/astropy | astropy/coordinates/__init__.py | astropy/coordinates/__init__.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for celestial coordinates
of astronomical objects. It also contains a framework for conversions
between coordinate systems.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .errors import *
from .angles import *
from .baseframe import *
from .distances import *
from .earth import *
from .transformations import *
from .builtin_frames import *
from .name_resolve import *
from .matching import *
from .old_builtin_systems_names import * # TODO: remove this in next version, along with module file
from .representation import *
__doc__ += builtin_systems._transform_graph_docs
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains classes and functions for celestial coordinates
of astronomical objects. It also contains a framework for conversions
between coordinate systems.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .errors import *
from .angles import *
from .coordsystems import *
from .distances import *
from .earth import *
from .transformations import *
from .builtin_systems import *
from .name_resolve import *
from .matching import *
from .old_builtin_systems_names import * # TODO: remove this in next version, along with module file
from .representation import *
__doc__ += builtin_systems._transform_graph_docs
| bsd-3-clause | Python |
3cf81c31cad0ad55b0288aa70b67d03484a626b9 | Remove unused ngram references, add new thresholds for small matches | michaelrup/scancode-toolkit,yashdsaraf/scancode-toolkit,michaelrup/scancode-toolkit,michaelrup/scancode-toolkit,yashdsaraf/scancode-toolkit,marcreyesph/scancode-toolkit,michaelrup/scancode-toolkit,yashdsaraf/scancode-toolkit,michaelrup/scancode-toolkit,yashdsaraf/scancode-toolkit,michaelrup/scancode-toolkit,yashdsaraf/scancode-toolkit,marcreyesph/scancode-toolkit,yasharmaster/scancode-toolkit,michaelrup/scancode-toolkit,marcreyesph/scancode-toolkit,yashdsaraf/scancode-toolkit,yashdsaraf/scancode-toolkit,yasharmaster/scancode-toolkit,marcreyesph/scancode-toolkit,yasharmaster/scancode-toolkit,marcreyesph/scancode-toolkit,yashdsaraf/scancode-toolkit,yasharmaster/scancode-toolkit,yasharmaster/scancode-toolkit,yasharmaster/scancode-toolkit,yashdsaraf/scancode-toolkit,michaelrup/scancode-toolkit,marcreyesph/scancode-toolkit,yasharmaster/scancode-toolkit,marcreyesph/scancode-toolkit,marcreyesph/scancode-toolkit,michaelrup/scancode-toolkit,yasharmaster/scancode-toolkit,yasharmaster/scancode-toolkit,yashdsaraf/scancode-toolkit,michaelrup/scancode-toolkit,marcreyesph/scancode-toolkit,yashdsaraf/scancode-toolkit,yashdsaraf/scancode-toolkit,michaelrup/scancode-toolkit,yasharmaster/scancode-toolkit,yasharmaster/scancode-toolkit,marcreyesph/scancode-toolkit,marcreyesph/scancode-toolkit,michaelrup/scancode-toolkit,marcreyesph/scancode-toolkit,yasharmaster/scancode-toolkit | src/licensedcode/__init__.py | src/licensedcode/__init__.py | #
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from os.path import dirname
from os.path import abspath
from os.path import getsize
from os.path import getmtime
from os.path import join
from os.path import exists
from commoncode import fileutils
lic_src_dir = abspath(dirname(__file__))
src_dir = dirname(lic_src_dir)
data_dir = join(lic_src_dir, 'data')
licenses_data_dir = join(data_dir, 'licenses')
rules_data_dir = join(data_dir, 'rules')
root_dir = dirname(src_dir)
cache_dir = join(root_dir, '.cache')
license_index_cache_dir = join(cache_dir, 'license_index')
license_matches_cache_dir = join(cache_dir, 'license_matches')
if not exists(license_index_cache_dir):
fileutils.create_dir(license_index_cache_dir)
# minimum number of tokens a match should have to be considered as worthy
MIN_MATCH_LENGTH = 4
MIN_MATCH_HIGH_LENGTH = 3
# maximum number of non-matching tokens that can be skipped
MAX_GAP_SKIP = 15
# maximum distance between two matches to merge
MAX_DIST = 10
# length below which a match is considered as "small"
SMALL_MATCH = 50
# minimal score for matches to small rules
SMALL_MATCH_MIN_SCORE = 80
| #
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from os.path import dirname
from os.path import abspath
from os.path import getsize
from os.path import getmtime
from os.path import join
from os.path import exists
from commoncode import fileutils
lic_src_dir = abspath(dirname(__file__))
src_dir = dirname(lic_src_dir)
data_dir = join(lic_src_dir, 'data')
licenses_data_dir = join(data_dir, 'licenses')
rules_data_dir = join(data_dir, 'rules')
root_dir = dirname(src_dir)
cache_dir = join(root_dir, '.cache')
license_index_cache_dir = join(cache_dir, 'license_index')
license_matches_cache_dir = join(cache_dir, 'license_matches')
if not exists(license_index_cache_dir):
fileutils.create_dir(license_index_cache_dir)
# length of ngrams
NGRAM_LENGTH = 5
# minimum number of tokens a match should have to be considered as worthy
MIN_MATCH_LENGTH = 4
MIN_MATCH_HIGH_LENGTH = 3
# maximum number of non-matching tokens that can be skipped
MAX_GAP_SKIP = 15
# maximum distance between two matches to merge
MAX_DIST = 10
| apache-2.0 | Python |
11a5c32396d7b97793bd7e1f0bf2baf11f737ef6 | fix localhost bug | quanted/ubertool_ecorest,puruckertom/ubertool_ecorest,puruckertom/ubertool_ecorest,puruckertom/ubertool_ecorest,puruckertom/ubertool_ecorest,quanted/ubertool_ecorest,quanted/ubertool_ecorest,quanted/ubertool_ecorest | celery_cgi.py | celery_cgi.py | from celery import Celery
celery_tasks = [
'hms_flask.modules.hms_controller',
'pram_flask.tasks'
]
# celery = Celery('flask_qed', broker='redis://localhost:6379/0', backend='redis://localhost:6379/0', include=celery_tasks)
celery = Celery('flask_qed', broker='redis://redis:6379/0', backend='redis://redis:6379/0', include=celery_tasks)
celery.conf.update(
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERY_IGNORE_RESULT=False,
CELERY_TRACK_STARTED=True,
)
| from celery import Celery
celery_tasks = [
'hms_flask.modules.hms_controller',
'pram_flask.tasks'
]
celery = Celery('flask_qed', broker='redis://localhost:6379/0', backend='redis://localhost:6379/0', include=celery_tasks)
# celery = Celery('flask_qed', broker='redis://redis:6379/0', backend='redis://redis:6379/0', include=celery_tasks)
celery.conf.update(
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json',
CELERY_IGNORE_RESULT=False,
CELERY_TRACK_STARTED=True,
)
| unlicense | Python |
1e16e58f37a9e3b98638f66a66830e242aa76b77 | Fix to upload script | vishesh92/redash,ninneko/redash,getredash/redash,useabode/redash,jmvasquez/redashtest,useabode/redash,amino-data/redash,44px/redash,jmvasquez/redashtest,stefanseifert/redash,akariv/redash,44px/redash,moritz9/redash,getredash/redash,imsally/redash,alexanderlz/redash,hudl/redash,pubnative/redash,pubnative/redash,moritz9/redash,alexanderlz/redash,M32Media/redash,hudl/redash,pubnative/redash,akariv/redash,chriszs/redash,getredash/redash,EverlyWell/redash,ninneko/redash,akariv/redash,akariv/redash,ninneko/redash,useabode/redash,easytaxibr/redash,rockwotj/redash,rockwotj/redash,amino-data/redash,pubnative/redash,stefanseifert/redash,jmvasquez/redashtest,guaguadev/redash,rockwotj/redash,denisov-vlad/redash,amino-data/redash,hudl/redash,easytaxibr/redash,imsally/redash,vishesh92/redash,44px/redash,44px/redash,getredash/redash,alexanderlz/redash,moritz9/redash,guaguadev/redash,denisov-vlad/redash,EverlyWell/redash,easytaxibr/redash,akariv/redash,jmvasquez/redashtest,ninneko/redash,hudl/redash,chriszs/redash,easytaxibr/redash,crowdworks/redash,M32Media/redash,pubnative/redash,getredash/redash,ninneko/redash,crowdworks/redash,crowdworks/redash,alexanderlz/redash,imsally/redash,rockwotj/redash,EverlyWell/redash,vishesh92/redash,imsally/redash,guaguadev/redash,stefanseifert/redash,denisov-vlad/redash,denisov-vlad/redash,denisov-vlad/redash,guaguadev/redash,stefanseifert/redash,chriszs/redash,useabode/redash,jmvasquez/redashtest,guaguadev/redash,EverlyWell/redash,M32Media/redash,chriszs/redash,crowdworks/redash,stefanseifert/redash,easytaxibr/redash,M32Media/redash,vishesh92/redash,moritz9/redash,amino-data/redash | bin/upload_version.py | bin/upload_version.py | #!python
import os
import sys
import json
import requests
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'target_commitish': commit_sha
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth, headers=headers, verify=False)
| #!python
import os
import sys
import json
import requests
if __name__ == '__main__':
version = sys.argv[1]
filename = sys.argv[2].split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'target_commitish': commit_sha
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filename) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth, headers=headers, verify=False)
| bsd-2-clause | Python |
9938b524bb5779d5c5ce132563b3100969c8c5d6 | Drop carreralib.fw --force flag. | tkem/carreralib | src/carreralib/fw.py | src/carreralib/fw.py | import argparse
import contextlib
import logging
import time
from . import ControlUnit
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="python -m carreralib.fw")
parser.add_argument(
"device",
metavar="DEVICE",
help="the Control Unit device, e.g. a serial port or MAC address",
)
parser.add_argument(
"file",
metavar="FILE",
nargs="?",
default=None,
help="a Control Unit firmware update file",
)
parser.add_argument(
"-l", "--logfile", default="carreralib.log", help="where to write log messages"
)
parser.add_argument(
"-t",
"--timeout",
default=5.0,
type=float,
help="maximum time in seconds to wait for Control Unit",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="write more log messages"
)
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,
filename=args.logfile,
format="%(asctime)s: %(message)s",
)
with contextlib.closing(ControlUnit(args.device, timeout=args.timeout)) as cu:
if args.file:
with open(args.file) as f:
# remove double quotes wrapping each line
lines = [line.rstrip().replace('"', "") for line in f.readlines()]
# skip empty lines
lines = [line for line in lines if len(line)]
if lines:
print("Starting firmware update")
cu.fwu_start()
time.sleep(2.0) # wait for two seconds...
for n, line in enumerate(lines):
# encode to bytes and write to CU
cu.fwu_write(line.encode())
# print progress
print("Writing firmware update block %d/%d" % (n + 1, len(lines)))
print("Firmware update done")
else:
print("CU version %s" % cu.version())
| import argparse
import contextlib
import logging
import time
from . import ControlUnit
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="python -m carreralib.fw")
parser.add_argument(
"device",
metavar="DEVICE",
help="the Control Unit device, e.g. a serial port or MAC address",
)
parser.add_argument(
"file",
metavar="FILE",
nargs="?",
default=None,
help="a Control Unit firmware update file",
)
parser.add_argument(
"-f", "--force", action="store_true", help="force firmware update"
)
parser.add_argument(
"-l", "--logfile", default="carreralib.log", help="where to write log messages"
)
parser.add_argument(
"-t",
"--timeout",
default=5.0,
type=float,
help="maximum time in seconds to wait for Control Unit",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="write more log messages"
)
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,
filename=args.logfile,
format="%(asctime)s: %(message)s",
)
with contextlib.closing(ControlUnit(args.device, timeout=args.timeout)) as cu:
if not args.file or not args.force:
print("CU version %s" % cu.version())
if args.file:
with open(args.file) as f:
# remove double quotes wrapping each line
lines = [line.rstrip().replace('"', "") for line in f.readlines()]
# skip empty lines
lines = [line for line in lines if len(line)]
if lines:
print("Starting firmware update")
cu.fwu_start()
time.sleep(2.0) # wait for two seconds...
for n, line in enumerate(lines):
# encode to bytes and write to CU
cu.fwu_write(line.encode())
# print progress
print("Writing firmware update block %d/%d" % (n + 1, len(lines)))
print("Firmware update done")
| mit | Python |
a00dcd9c8ced0aa7280ed01ff061c32b552eae3c | Update common.py | sony/nnabla,sony/nnabla,sony/nnabla | build-tools/code_generator/utils/common.py | build-tools/code_generator/utils/common.py | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import io
import os
import re
import six
import subprocess
def check_update(filename, generated, force=False):
original = ''
if os.path.exists(filename):
with io.open(filename, 'rt', encoding='utf_8_sig') as f:
original = six.text_type(f.read())
s = difflib.SequenceMatcher(None, original, generated)
if(force or not os.path.exists(filename)) and s.ratio() < 1.0:
with open(filename, 'wb') as f:
print('Updating {}.'.format(filename))
write_content = generated.encode('utf_8')
write_content = write_content.replace(b'\r\n', b'\n')
write_content = write_content.replace(b'\r', b'\n')
f.write(write_content)
def get_version(dir):
os.chdir(dir)
if "NNABLA_VERSION" in os.environ and os.environ["NNABLA_VERSION"] != "":
version = os.environ["NNABLA_VERSION"]
if "NNABLA_VERSION_SUFFIX" in os.environ:
version += os.environ["NNABLA_VERSION_SUFFIX"]
if "NNABLA_SHORT_VERSION" in os.environ:
return version, os.environ["NNABLA_SHORT_VERSION"]
version = default_version = '1.0.10.dev1'
nearest_tag = default_version
if "NNABLA_VERSION_SUFFIX" in os.environ:
version += os.environ["NNABLA_VERSION_SUFFIX"]
return version.replace('/', '_').lower(), nearest_tag.replace('/', '_').lower()
| # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import io
import os
import re
import six
import subprocess
def check_update(filename, generated, force=False):
original = ''
if os.path.exists(filename):
with io.open(filename, 'rt', encoding='utf_8_sig') as f:
original = six.text_type(f.read())
s = difflib.SequenceMatcher(None, original, generated)
if(force or not os.path.exists(filename)) and s.ratio() < 1.0:
with open(filename, 'wb') as f:
print('Updating {}.'.format(filename))
write_content = generated.encode('utf_8')
write_content = write_content.replace(b'\r\n', b'\n')
write_content = write_content.replace(b'\r', b'\n')
f.write(write_content)
def get_version(dir):
os.chdir(dir)
if "NNABLA_VERSION" in os.environ and os.environ["NNABLA_VERSION"] != "":
version = os.environ["NNABLA_VERSION"]
if "NNABLA_VERSION_SUFFIX" in os.environ:
version += os.environ["NNABLA_VERSION_SUFFIX"]
if "NNABLA_SHORT_VERSION" in os.environ:
return version, os.environ["NNABLA_SHORT_VERSION"]
version = default_version = '1.0.9'
nearest_tag = default_version
if "NNABLA_VERSION_SUFFIX" in os.environ:
version += os.environ["NNABLA_VERSION_SUFFIX"]
return version.replace('/', '_').lower(), nearest_tag.replace('/', '_').lower()
| apache-2.0 | Python |
a4d5db323910852ad701f9cd9953c3bedb63ae1d | Bump base version (#5842) | DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core,DataDog/integrations-core | vsphere/setup.py | vsphere/setup.py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "vsphere", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Parse requirements
def get_requirements(fpath):
with open(path.join(HERE, fpath), encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog_checks_base>=11.0.0'
setup(
name='datadog-vsphere',
version=ABOUT["__version__"],
description='The vSphere check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent vSphere check',
url='https://github.com/DataDog/integrations-core',
author='Datadog',
author_email='packages@datadoghq.com',
license='New BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
packages=['datadog_checks.vsphere'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
| # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open
from os import path
from setuptools import setup
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "vsphere", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Parse requirements
def get_requirements(fpath):
with open(path.join(HERE, fpath), encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog_checks_base>=6.4.0'
setup(
name='datadog-vsphere',
version=ABOUT["__version__"],
description='The vSphere check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent vSphere check',
url='https://github.com/DataDog/integrations-core',
author='Datadog',
author_email='packages@datadoghq.com',
license='New BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
packages=['datadog_checks.vsphere'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
# Extra files to ship with the wheel package
include_package_data=True,
)
| bsd-3-clause | Python |
626606992854967c569296a35e641198839e5170 | use a model for test that is not influenced by the mail module | OCA/server-tools,OCA/server-tools,YannickB/server-tools,YannickB/server-tools,YannickB/server-tools,OCA/server-tools | auditlog/tests/test_auditlog.py | auditlog/tests/test_auditlog.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests.common import TransactionCase
class TestAuditlog(TransactionCase):
def test_LogCreation(self):
auditlog_log = self.env['auditlog.log']
groups_model_id = self.env.ref('base.model_res_groups').id
self.env['auditlog.rule'].create({
'name': 'testrule for groups',
'model_id': groups_model_id,
'log_create': True,
'log_write': True,
'log_unlink': True,
'state': 'subscribed',
})
group = self.env['res.groups'].create({
'name': 'testgroup',
})
self.assertTrue(auditlog_log.search([
('model_id', '=', groups_model_id),
('method', '=', 'create'),
('res_id', '=', group.id),
]))
group.write({'name': 'Testgroup'})
self.assertTrue(auditlog_log.search([
('model_id', '=', groups_model_id),
('method', '=', 'write'),
('res_id', '=', group.id),
]))
group.unlink()
self.assertTrue(auditlog_log.search([
('model_id', '=', groups_model_id),
('method', '=', 'unlink'),
('res_id', '=', group.id),
]))
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests.common import TransactionCase
class TestAuditlog(TransactionCase):
def test_LogCreation(self):
auditlog_log = self.env['auditlog.log']
user_model_id = self.env.ref('base.model_res_users').id
self.env['auditlog.rule'].create({
'name': 'testrule for users',
'model_id': user_model_id,
'log_create': True,
'log_write': True,
'log_unlink': True,
'state': 'subscribed',
})
user = self.env['res.users'].create({
'login': 'testuser',
'name': 'testuser',
})
self.assertTrue(auditlog_log.search([
('model_id', '=', user_model_id),
('method', '=', 'create'),
('res_id', '=', user.id),
]))
user.write({'name': 'Test User'})
self.assertTrue(auditlog_log.search([
('model_id', '=', user_model_id),
('method', '=', 'write'),
('res_id', '=', user.id),
]))
user.unlink()
self.assertTrue(auditlog_log.search([
('model_id', '=', user_model_id),
('method', '=', 'unlink'),
('res_id', '=', user.id),
]))
| agpl-3.0 | Python |
186e78667f87351a50943691a281c06f9e00b533 | Update background.py | strattadb/dotfiles,strattadb/dotfiles | autostart/scripts/background.py | autostart/scripts/background.py | #!/usr/bin/env python3
"""
Bing's wallpaper of the day background setter
Hits Bing's wallpaper API and compares the wallpaper of the day
with the OS's current one.
If they are the same, the script does nothing.
If they are different, it downloads the new wallpaper and sets it,
then remove the old one.
Usage:
======
(1) Put this script in your startup applications.
"""
import os
import requests
# Perform a GET to Bing's wallpapers API
url = 'https://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US'
response = requests.get(url)
data = response.json()
image_url_pathname = data['images'][0]['url']
image_filename = image_url_pathname.split('/')[-1]
local_images_dir = '{}/Pictures/wallpaper/'.format(os.path.expanduser('~'))
local_images = os.listdir(local_images_dir)
# Check if the current background is not the same as Bing's
if not local_images or local_images[0] != image_filename:
image_url = 'https://www.bing.com{}'.format(image_url_pathname)
# Perform a GET to the image URL
image_content = requests.get(image_url).content
with open(local_images_dir + image_filename, 'wb') as image_file:
image_file.write(image_content)
gsettings_command_str = (
'/usr/bin/gsettings set org.gnome.desktop.background picture-uri ' +
'"file://' + local_images_dir + image_filename + '"'
)
os.system(gsettings_command_str)
print('Success!\n')
# Remove old image if it exists
if local_images:
os.remove(local_images[0])
else:
print('You already have the latest Bing wallpaper!\n')
| #!/usr/bin/env python3
"""
Bing's wallpaper of the day background setter
Hits Bing's wallpaper API and compares the wallpaper of the day
with the OS's current one.
If they are the same, the script does nothing.
If they are different, it downloads the new wallpaper and sets it,
then remove the old one.
Usage:
======
(1) Put this script in your startup applications.
"""
import os
import requests
import json
# Perform a GET to Bing's wallpapers API
url = 'https://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1&mkt=en-US'
response = requests.get(url)
data = json.loads(response.text)
image_url_pathname = data['images'][0]['url']
image_filename = image_url_pathname.split('/')[-1]
local_images_dir = '{}/Pictures/wallpaper/'.format(os.path.expanduser('~'))
local_images = os.listdir(local_images_dir)
# Check if the current background is not the same as Bing's
if not local_images or local_images[0] != image_filename:
image_url = 'https://www.bing.com{}'.format(image_url_pathname)
# Perform a GET to the image URL
image = requests.get(image_url).content
with open(local_images_dir + image_filename, 'wb') as handler:
handler.write(image)
gsettings_command_str = (
'/usr/bin/gsettings set org.gnome.desktop.background picture-uri ' +
'"file://' + local_images_dir + image_filename + '"'
)
os.system(gsettings_command_str)
# Remove old image if it exists
if local_images:
os.remove(local_images[0])
| mit | Python |
2fa9bad60d1ea5e4af2e07b6291083be95834fc5 | fix cyclic depndencies | ufal/neuralmonkey,ufal/neuralmonkey,ufal/neuralmonkey,ufal/neuralmonkey,ufal/neuralmonkey | neuralmonkey/runners/__init__.py | neuralmonkey/runners/__init__.py | from .beamsearch_runner import BeamSearchRunner
from .beamsearch_runner import beam_search_runner_range
from .label_runner import LabelRunner
from .logits_runner import LogitsRunner
from .perplexity_runner import PerplexityRunner
from .plain_runner import PlainRunner
from .regression_runner import RegressionRunner
from .runner import GreedyRunner
from .word_alignment_runner import WordAlignmentRunner
| from .beamsearch_runner import BeamSearchRunner
from .beamsearch_runner import beam_search_runner_range
from .label_runner import LabelRunner
from .logits_runner import LogitsRunner
from .perplexity_runner import PerplexityRunner
from .plain_runner import PlainRunner
from .regression_runner import RegressionRunner
from .runner import GreedyRunner
from .tensor_runner import TensorRunner, RepresentationRunner
from .word_alignment_runner import WordAlignmentRunner
| bsd-3-clause | Python |
2a34d8cd07542228b15ed3f9b46fbe491a2c2a2c | fix MinMax | Larhard/backgammon,Larhard/backgammon | backgammon/bots/utils/minmax.py | backgammon/bots/utils/minmax.py | # Copyright (c) 2015, Bartlomiej Puget <larhard@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Bartlomiej Puget nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL BARTLOMIEJ PUGET BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools as it
from backgammon.model.utils import player_from_number
from backgammon.model.utils import available_moves
class MinMax:
def __init__(self, evaluate, levels):
self.evaluate = evaluate
self.levels = levels
def maximize(self, board, level=0, previous_value=None):
modifier = 1 if level % 2 == 0 else -1
if level == self.levels:
return self.evaluate(board)
result = 0
for dices in it.combinations_with_replacement(range(1, 7), 2):
multiplier = 1/36 if dices[0] == dices[1] else 1/18
max_value = modifier * -2**31
for _, possible_board in available_moves(board, dices,
player_from_number(modifier)):
possible_board_value = self.maximize(possible_board, level+1,
max_value)
if modifier * max_value < modifier * possible_board_value:
max_value = possible_board_value
if previous_value is not None \
and modifier * max_value <= modifier * previous_value:
return max_value
result += modifier * multiplier * max_value
return result
| # Copyright (c) 2015, Bartlomiej Puget <larhard@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Bartlomiej Puget nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL BARTLOMIEJ PUGET BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools as it
from backgammon.model.utils import player_from_number
from backgammon.model.utils import available_moves
import sys
class MinMax:
def __init__(self, evaluate, levels):
self.evaluate = evaluate
self.levels = levels
def maximize(self, board, level=0, previous_value=None):
modifier = 1 if level % 2 == 0 else -1
if level == self.levels:
return modifier * self.evaluate(board)
result = 0
for dices in it.combinations_with_replacement(range(1, 7), 2):
multiplier = 1/36 if dices[0] == dices[1] else 1/18
max_value = -2**30
for _, possible_board in available_moves(board, dices,
player_from_number(modifier)):
max_value = max(max_value,
modifier * self.maximize(possible_board, level+1,
max_value))
if previous_value is not None \
and modifier * max_value <= modifier * previous_value:
return max_value
result += modifier * multiplier * max_value
return result
| bsd-3-clause | Python |
ac6b38351264b6b7b301542793b7a9d164f3d5ec | fix get_global | phil65/script.module.kodi65 | lib/kodi65/addon.py | lib/kodi65/addon.py | # -*- coding: utf8 -*-
# Copyright (C) 2016 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
import xbmcaddon
import xbmc
import os
import xbmcgui
ADDON = xbmcaddon.Addon()
ID = ADDON.getAddonInfo('id')
ICON = ADDON.getAddonInfo('icon')
NAME = ADDON.getAddonInfo('name')
PATH = ADDON.getAddonInfo('path').decode("utf-8")
MEDIA_PATH = os.path.join(PATH, "resources", "skins", "Default", "media")
VERSION = ADDON.getAddonInfo('version')
DATA_PATH = xbmc.translatePath("special://profile/addon_data/%s" % ID).decode("utf-8")
HOME = xbmcgui.Window(10000)
def setting(setting_name):
return ADDON.getSetting(setting_name)
def set_setting(setting_name, string):
ADDON.setSetting(setting_name, string)
def bool_setting(setting_name):
return ADDON.getSetting(setting_name) == "true"
def reload_addon():
global ADDON
ADDON = xbmcaddon.Addon()
def LANG(label_id):
if 31000 <= label_id <= 33000:
return ADDON.getLocalizedString(label_id)
else:
return xbmc.getLocalizedString(label_id)
def set_global(setting_name, setting_value):
HOME.setProperty(setting_name, setting_value)
def get_global(setting_name):
return HOME.getProperty(setting_name)
def clear_global(setting_name):
HOME.clearProperty(setting_name)
def clear_globals():
HOME.clearProperties()
| # -*- coding: utf8 -*-
# Copyright (C) 2016 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
import xbmcaddon
import xbmc
import os
import xbmcgui
ADDON = xbmcaddon.Addon()
ID = ADDON.getAddonInfo('id')
ICON = ADDON.getAddonInfo('icon')
NAME = ADDON.getAddonInfo('name')
PATH = ADDON.getAddonInfo('path').decode("utf-8")
MEDIA_PATH = os.path.join(PATH, "resources", "skins", "Default", "media")
VERSION = ADDON.getAddonInfo('version')
DATA_PATH = xbmc.translatePath("special://profile/addon_data/%s" % ID).decode("utf-8")
HOME = xbmcgui.Window(10000)
def setting(setting_name):
return ADDON.getSetting(setting_name)
def set_setting(setting_name, string):
ADDON.setSetting(setting_name, string)
def bool_setting(setting_name):
return ADDON.getSetting(setting_name) == "true"
def reload_addon():
global ADDON
ADDON = xbmcaddon.Addon()
def LANG(label_id):
if 31000 <= label_id <= 33000:
return ADDON.getLocalizedString(label_id)
else:
return xbmc.getLocalizedString(label_id)
def set_global(setting_name, setting_value):
HOME.setProperty(setting_name, setting_value)
def get_global(setting_name):
HOME.getProperty(setting_name)
def clear_global(setting_name):
HOME.clearProperty(setting_name)
def clear_globals():
HOME.clearProperties()
| lgpl-2.1 | Python |
662de028b7b521a122b8ce2c6d7d1a4e4b399811 | replace getitem calls with [] | jcrist/blaze,ChinaQuants/blaze,aterrel/blaze,ChinaQuants/blaze,maxalbert/blaze,aterrel/blaze,cpcloud/blaze,mrocklin/blaze,alexmojaki/blaze,dwillmer/blaze,ContinuumIO/blaze,alexmojaki/blaze,caseyclements/blaze,jdmcbr/blaze,jdmcbr/blaze,nkhuyu/blaze,LiaoPan/blaze,cowlicks/blaze,scls19fr/blaze,cowlicks/blaze,caseyclements/blaze,LiaoPan/blaze,ContinuumIO/blaze,cpcloud/blaze,jcrist/blaze,maxalbert/blaze,xlhtc007/blaze,nkhuyu/blaze,aterrel/blaze,scls19fr/blaze,mrocklin/blaze,dwillmer/blaze,xlhtc007/blaze | blaze/serve/server.py | blaze/serve/server.py | from __future__ import absolute_import, division, print_function
from collections import Iterator
from flask import Flask, request, jsonify, json
from functools import partial, wraps
from .index import parse_index
class Server(object):
__slots__ = 'app', 'datasets'
def __init__(self, name='Blaze-Server', datasets=None):
app = self.app = Flask(name)
self.datasets = datasets or dict()
for args, kwargs, func in routes:
func2 = wraps(func)(partial(func, self.datasets))
app.route(*args, **kwargs)(func2)
def __getitem__(self, key):
return self.datasets[key]
def __setitem__(self, key, value):
self.datasets[key] = value
return value
routes = list()
def route(*args, **kwargs):
def f(func):
routes.append((args, kwargs, func))
return func
return f
@route('/datasets.json')
def dataset(datasets):
return jsonify(dict((k, str(v.dshape)) for k, v in datasets.items()))
@route('/data/<name>.json', methods=['POST'])
def data(datasets, name):
""" Basic indexing API
Allows remote indexing of datasets. Takes indexing data as JSON
Takes requests like
Example
-------
For the following array:
[['Alice', 100],
['Bob', 200],
['Charlie', 300]]
schema = '{name: string, amount: int32}'
And the following
url: /data/table-name.json
POST-data: {'index': [{'start': 0, 'step': 3}, 'name']}
and returns responses like
{"name": "table-name",
"index": [0, "name"],
"datashape": "3 * string",
"data": ["Alice", "Bob", "Charlie"]}
"""
if request.headers['content-type'] != 'application/json':
return ("Expected JSON data", 404)
try:
data = json.loads(request.data)
except ValueError:
return ("Bad JSON. Got %s " % request.data, 404)
try:
dset = datasets[name]
except KeyError:
return ("Dataset %s not found" % name, 404)
try:
index = parse_index(data['index'])
except ValueError:
return ("Bad index", 404)
rv = dset.py[index]
if isinstance(rv, Iterator):
rv = list(rv)
return jsonify({'name': name,
'index': data['index'],
'datashape': str(dset.dshape.subshape[index]),
'data': rv})
| from __future__ import absolute_import, division, print_function
from collections import Iterator
from flask import Flask, request, jsonify, json
from functools import partial, wraps
from .index import parse_index
class Server(object):
__slots__ = 'app', 'datasets'
def __init__(self, name='Blaze-Server', datasets=None):
app = self.app = Flask(name)
self.datasets = datasets or dict()
for args, kwargs, func in routes:
func2 = wraps(func)(partial(func, self.datasets))
app.route(*args, **kwargs)(func2)
def __getitem__(self, key):
return self.datasets[key]
def __setitem__(self, key, value):
self.datasets[key] = value
return value
routes = list()
def route(*args, **kwargs):
def f(func):
routes.append((args, kwargs, func))
return func
return f
@route('/datasets.json')
def dataset(datasets):
return jsonify(dict((k, str(v.dshape)) for k, v in datasets.items()))
@route('/data/<name>.json', methods=['POST'])
def data(datasets, name):
""" Basic indexing API
Allows remote indexing of datasets. Takes indexing data as JSON
Takes requests like
Example
-------
For the following array:
[['Alice', 100],
['Bob', 200],
['Charlie', 300]]
schema = '{name: string, amount: int32}'
And the following
url: /data/table-name.json
POST-data: {'index': [{'start': 0, 'step': 3}, 'name']}
and returns responses like
{"name": "table-name",
"index": [0, "name"],
"datashape": "3 * string",
"data": ["Alice", "Bob", "Charlie"]}
"""
if request.headers['content-type'] != 'application/json':
return ("Expected JSON data", 404)
try:
data = json.loads(request.data)
except ValueError:
return ("Bad JSON. Got %s " % request.data, 404)
try:
dset = datasets[name]
except KeyError:
return ("Dataset %s not found" % name, 404)
try:
index = parse_index(data['index'])
except ValueError:
return ("Bad index", 404)
rv = dset.py.__getitem__(index)
if isinstance(rv, Iterator):
rv = list(rv)
return jsonify({'name': name,
'index': data['index'],
'datashape': str(dset.dshape.subshape.__getitem__(index)),
'data': rv})
| bsd-3-clause | Python |
d3966eca1aff39640ae8a781d64e4e0525275110 | Add linhomy.candidate and linhomy.cdr_matrices to linhomy.selftest. | jfine2358/py-linhomy | py/linhomy/selftest.py | py/linhomy/selftest.py | '''Run self-test on all modules
Run this as a module and it will perform the self-test.
'''
# For Python2 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
if __name__ == '__main__':
import doctest
import linhomy
import linhomy.candidate
import linhomy.cdr_matrices
import linhomy.classtools
import linhomy.data
import linhomy.fibonacci
import linhomy.matrices
import linhomy.matrices2
import linhomy.product
import linhomy.rank
import linhomy.rankmatrices
import linhomy.rules
import linhomy.tools
import linhomy.work
modules = [
linhomy,
linhomy.candidate,
linhomy.cdr_matrices,
linhomy.classtools,
linhomy.data,
linhomy.fibonacci,
linhomy.matrices,
linhomy.matrices2,
linhomy.product,
linhomy.rankmatrices,
linhomy.rules,
linhomy.tools,
linhomy.work,
]
# TODO: Pick up 'verbose' from the command line?
for mod in modules:
prefix = mod.__name__.ljust(20)
print(prefix, doctest.testmod(mod))
| '''Run self-test on all modules
Run this as a module and it will perform the self-test.
'''
# For Python2 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
if __name__ == '__main__':
import doctest
import linhomy
import linhomy.classtools
import linhomy.data
import linhomy.fibonacci
import linhomy.matrices
import linhomy.matrices2
import linhomy.product
import linhomy.rank
import linhomy.rankmatrices
import linhomy.rules
import linhomy.tools
import linhomy.work
modules = [
linhomy,
linhomy.classtools,
linhomy.data,
linhomy.fibonacci,
linhomy.matrices,
linhomy.matrices2,
linhomy.product,
linhomy.rankmatrices,
linhomy.rules,
linhomy.tools,
linhomy.work,
]
# TODO: Pick up 'verbose' from the command line?
for mod in modules:
prefix = mod.__name__.ljust(20)
print(prefix, doctest.testmod(mod))
| mit | Python |
505214b9146f31244d60b13a15b86142a11abe4b | add print_duration_context() | aevri/phabricator-tools,bloomberg/phabricator-tools,bloomberg/phabricator-tools,kjedruczyk/phabricator-tools,cs-shadow/phabricator-tools,bloomberg/phabricator-tools,cs-shadow/phabricator-tools,cs-shadow/phabricator-tools,kjedruczyk/phabricator-tools,cs-shadow/phabricator-tools,bloomberg/phabricator-tools,aevri/phabricator-tools,aevri/phabricator-tools,kjedruczyk/phabricator-tools,aevri/phabricator-tools,kjedruczyk/phabricator-tools,cs-shadow/phabricator-tools,aevri/phabricator-tools,bloomberg/phabricator-tools,kjedruczyk/phabricator-tools | py/phl/phlsys_timer.py | py/phl/phlsys_timer.py | """Determine the wall clock duration of events."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlsys_timer
#
# Public Classes:
# Timer
# .start
# .stop
# .restart
# .duration
#
# Public Functions:
# print_duration_context
# timer_context
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import contextlib
import datetime
@contextlib.contextmanager
def print_duration_context(name):
with timer_context() as t:
yield
print "{} took {} secs".format(name, t.duration)
@contextlib.contextmanager
def timer_context():
timer = Timer()
timer.start()
try:
yield timer
finally:
timer.stop()
class Timer(object):
def __init__(self):
self._start = None
self._stop = None
self._duration = None
def start(self):
self._start = datetime.datetime.utcnow()
self._stop = None
self._duration = None
def stop(self):
assert self._start is not None
self._stop = datetime.datetime.utcnow()
def restart(self):
"""Start the timer, return the previous duration.
If there is no previous duration then return 0.0.
Usage example:
>>> timer = Timer()
>>> timer.restart() >= 0.0
True
>>> timer.restart() >= 0.0
True
:returns: a floating point number of seconds
"""
duration = 0.0
if self._start is not None:
duration = self.duration
self.start()
return duration
@property
def duration(self):
if self._duration is None:
assert self._start is not None
if self._stop is None:
duration = datetime.datetime.utcnow() - self._start
duration = duration.total_seconds()
return duration
self._duration = self._stop - self._start
self._duration = self._duration.total_seconds()
return self._duration
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| """Determine the wall clock duration of events."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlsys_timer
#
# Public Classes:
# Timer
# .start
# .stop
# .restart
# .duration
#
# Public Functions:
# timer_context
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
import contextlib
import datetime
@contextlib.contextmanager
def timer_context():
timer = Timer()
timer.start()
try:
yield timer
finally:
timer.stop()
class Timer(object):
def __init__(self):
self._start = None
self._stop = None
self._duration = None
def start(self):
self._start = datetime.datetime.utcnow()
self._stop = None
self._duration = None
def stop(self):
assert self._start is not None
self._stop = datetime.datetime.utcnow()
def restart(self):
"""Start the timer, return the previous duration.
If there is no previous duration then return 0.0.
Usage example:
>>> timer = Timer()
>>> timer.restart() >= 0.0
True
>>> timer.restart() >= 0.0
True
:returns: a floating point number of seconds
"""
duration = 0.0
if self._start is not None:
duration = self.duration
self.start()
return duration
@property
def duration(self):
if self._duration is None:
assert self._start is not None
if self._stop is None:
duration = datetime.datetime.utcnow() - self._start
duration = duration.total_seconds()
return duration
self._duration = self._stop - self._start
self._duration = self._duration.total_seconds()
return self._duration
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| apache-2.0 | Python |
6195a0665ed467f5a86f6e4ee5aab54bd1262ff7 | add data to api requests | SEL-Columbia/pybamboo,SEL-Columbia/pybamboo | pybamboo/connection.py | pybamboo/connection.py | import simplejson as json
import requests
from pybamboo.exceptions import ErrorParsingBambooData,\
ErrorRetrievingBambooData
DEFAULT_BAMBOO_URL = 'http://bamboo.io'
OK_STATUS_CODES = (200, 201, 202)
class Connection(object):
"""
Object that defines a connection to a bamboo instance.
"""
def __init__(self, url=DEFAULT_BAMBOO_URL):
self._url = url
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
def make_api_request(self, http_method, url, data=None, files=None):
http_function = {
'GET': requests.get,
'POST': requests.post,
'PUT': requests.put,
'DELETE': requests.delete,
}
response = http_function[http_method](
self.url + url, data=data, files=files)
self._check_response(response)
return self._safe_json_loads(response)
def _check_response(self, response):
if not response.status_code in OK_STATUS_CODES:
raise ErrorRetrievingBambooData(u'%d Status Code received.'
% response.status_code)
def _safe_json_loads(self, response):
try:
return json.loads(response.text)
except json.JSONDecodeError:
raise ErrorParsingBambooData
| import simplejson as json
import requests
from pybamboo.exceptions import ErrorParsingBambooData,\
ErrorRetrievingBambooData
DEFAULT_BAMBOO_URL = 'http://bamboo.io'
OK_STATUS_CODES = (200, 201, 202)
class Connection(object):
"""
Object that defines a connection to a bamboo instance.
"""
def __init__(self, url=DEFAULT_BAMBOO_URL):
self._url = url
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
def make_api_request(self, http_method, url, data=None, files=None):
http_function = {
'GET': requests.get,
'POST': requests.post,
'PUT': requests.put,
'DELETE': requests.delete,
}
response = http_function[http_method](self.url + url, files=files)
self._check_response(response)
return self._safe_json_loads(response)
def _check_response(self, response):
if not response.status_code in OK_STATUS_CODES:
raise ErrorRetrievingBambooData(u'%d Status Code received.'
% response.status_code)
def _safe_json_loads(self, response):
try:
return json.loads(response.text)
except json.JSONDecodeError:
raise ErrorParsingBambooData
| bsd-3-clause | Python |
7a99a4d03fb00d25384135fd06c7fa8464c76539 | Fix __init__.py | yagays/pybitflyer | pybitflyer/__init__.py | pybitflyer/__init__.py | # -*- coding: utf-8 -*-
from .pybitflyer import API
| mit | Python | |
e1ec65c9ba1b1c6750df33d6669595a4be27d515 | add test case for prefixes | byteweaver/django-coupons,byteweaver/django-coupons | coupons/tests/test_models.py | coupons/tests/test_models.py | from datetime import timedelta
import re
from django.utils import timezone
from django.test import TestCase
from coupons.models import Coupon
from coupons.settings import (
CODE_LENGTH,
CODE_CHARS,
SEGMENT_LENGTH,
SEGMENT_SEPARATOR,
)
class CouponTestCase(TestCase):
def test_generate_code(self):
self.assertIsNotNone(re.match("^[%s]{%d}" % (CODE_CHARS, CODE_LENGTH,), Coupon.generate_code()))
def test_generate_code_segmented(self):
num_segments = CODE_LENGTH // SEGMENT_LENGTH # full ones
num_rest = CODE_LENGTH - num_segments * SEGMENT_LENGTH
self.assertIsNotNone(
re.match(
"^([{chars}]{{{sl}}}{sep}){{{ns}}}[{chars}]{{{nr}}}$".format(
chars=CODE_CHARS,
sep=SEGMENT_SEPARATOR,
sl=SEGMENT_LENGTH,
ns=num_segments,
nr=num_rest),
Coupon.generate_code("", True)
)
)
def test_save(self):
coupon = Coupon(type='monetary', value=100)
coupon.save()
self.assertTrue(coupon.pk)
def test_create_coupon(self):
coupon = Coupon.objects.create_coupon('monetary', 100)
self.assertTrue(coupon.pk)
def test_create_coupons(self):
coupons = Coupon.objects.create_coupons(50, 'monetary', 100)
for coupon in coupons:
self.assertTrue(coupon.pk)
def test_redeem(self):
coupon = Coupon.objects.create_coupon('monetary', 100)
coupon.redeem()
self.assertIsNotNone(coupon.redeemed_at)
def test_expired(self):
coupon = Coupon.objects.create_coupon('monetary', 100)
self.assertFalse(coupon.expired())
coupon.valid_until = timezone.now()-timedelta(1)
self.assertTrue(coupon.expired())
def test_str(self):
coupon = Coupon.objects.create_coupon('monetary', 100)
self.assertEqual(coupon.code, str(coupon))
def test_prefix(self):
coupon = Coupon.objects.create_coupon('monetary', 100, None, None, "prefix-")
print coupon.code
self.assertTrue(coupon.code.startswith("prefix-"))
| from datetime import timedelta
import re
from django.utils import timezone
from django.test import TestCase
from coupons.models import Coupon
from coupons.settings import (
CODE_LENGTH,
CODE_CHARS,
SEGMENT_LENGTH,
SEGMENT_SEPARATOR,
)
class CouponTestCase(TestCase):
def test_generate_code(self):
self.assertIsNotNone(re.match("^[%s]{%d}" % (CODE_CHARS, CODE_LENGTH,), Coupon.generate_code()))
def test_generate_code_segmented(self):
num_segments = CODE_LENGTH // SEGMENT_LENGTH # full ones
num_rest = CODE_LENGTH - num_segments * SEGMENT_LENGTH
self.assertIsNotNone(
re.match(
"^([{chars}]{{{sl}}}{sep}){{{ns}}}[{chars}]{{{nr}}}$".format(
chars=CODE_CHARS,
sep=SEGMENT_SEPARATOR,
sl=SEGMENT_LENGTH,
ns=num_segments,
nr=num_rest),
Coupon.generate_code(True)
)
)
def test_save(self):
coupon = Coupon(type='monetary', value=100)
coupon.save()
self.assertTrue(coupon.pk)
def test_create_coupon(self):
coupon = Coupon.objects.create_coupon('monetary', 100)
self.assertTrue(coupon.pk)
def test_create_coupons(self):
coupons = Coupon.objects.create_coupons(50, 'monetary', 100)
for coupon in coupons:
self.assertTrue(coupon.pk)
def test_redeem(self):
coupon = Coupon.objects.create_coupon('monetary', 100)
coupon.redeem()
self.assertIsNotNone(coupon.redeemed_at)
def test_expired(self):
coupon = Coupon.objects.create_coupon('monetary', 100)
self.assertFalse(coupon.expired())
coupon.valid_until = timezone.now()-timedelta(1)
self.assertTrue(coupon.expired())
def test_str(self):
coupon = Coupon.objects.create_coupon('monetary', 100)
self.assertEqual(coupon.code, str(coupon))
| bsd-3-clause | Python |
dbe999de4798a4eed6f96073664715a181a12539 | fix FlaskMixin.handle_user_exception (closes #2) | popravich/cantal_tools | cantal_tools/flask.py | cantal_tools/flask.py | """This module provides Cantal Flask integration.
"""
from werkzeug.exceptions import HTTPException
from .metrics import appflow, web
web.ensure_branches('handle_request', 'handle_exception', 'render_template')
class FlaskMixin:
"""Flask App mixin."""
def wsgi_app(self, environ, start_response):
with web.context(), appflow.context():
web.handle_request.enter()
return super().wsgi_app(environ, start_response)
def handle_user_exception(self, e):
if not isinstance(e, HTTPException):
web.handle_exception.enter()
return super().handle_user_exception(e)
def update_template_context(self, context):
web.render_template.enter()
return super().update_template_context(context)
| """This module provides Cantal Flask integration.
"""
from .metrics import appflow, web
web.ensure_branches('handle_request', 'handle_exception', 'render_template')
class FlaskMixin:
"""Flask App mixin.
Accepts extra keyword-only argument ``metrics``.
"""
def wsgi_app(self, environ, start_response):
with web.context(), appflow.context():
web.handle_request.enter()
return super().wsgi_app(environ, start_response)
def handle_user_exception(self, e):
web.handle_exception.enter()
return super().handle_user_exception(e)
def update_template_context(self, context):
web.render_template.enter()
return super().update_template_context(context)
| mit | Python |
16d50a2b971ddfd4014c89e1bbe8bf52188eb205 | add col | 38elements/feedhoos,38elements/feedhoos,38elements/feedhoos,38elements/feedhoos | feedhoos/worker/admin.py | feedhoos/worker/admin.py | from django.contrib import admin
from feedhoos.worker.models.entry import EntryModel
from feedhoos.finder.models.feed import FeedModel
from feedhoos.reader.models.bookmark import BookmarkModel
class EntryModelAdmin(admin.ModelAdmin):
list_display = ('id', "feed_id", "updated", 'url', 'title')
admin.site.register(EntryModel, EntryModelAdmin)
class FeedModelAdmin(admin.ModelAdmin):
list_display = ('id', "title", "etag", "modified", "url", "last_access")
admin.site.register(FeedModel, FeedModelAdmin)
class BookmarkModelAdmin(admin.ModelAdmin):
list_display = ('id', "last_updated")
admin.site.register(BookmarkModel, BookmarkModelAdmin)
| from django.contrib import admin
from feedhoos.worker.models.entry import EntryModel
from feedhoos.finder.models.feed import FeedModel
from feedhoos.reader.models.bookmark import BookmarkModel
admin.site.register(EntryModel)
admin.site.register(FeedModel)
admin.site.register(BookmarkModel)
| mit | Python |
c1e97f5a2a53f5886be06c43a0bd5e66b6fb76cb | fix import | peterkshultz/lightning-python,peterkshultz/lightning-python,lightning-viz/lightning-python,peterkshultz/lightning-python,garretstuber/lightning-python,garretstuber/lightning-python,lightning-viz/lightning-python,garretstuber/lightning-python | lightning/types/three.py | lightning/types/three.py | from lightning.types.base import Base
from lightning.types.decorators import viztype
from lightning.types.utils import vecs_to_points_three, add_property
from numpy import ndarray, asarray
from lightning.types.utils import array_to_im
@viztype
class Scatter3(Base):
_name = 'scatter3'
@staticmethod
def clean(x, y, z, color=None, label=None, alpha=None, size=None):
"""
Plot three-dimensional data as points.
.. image:: particles.png
Parameters
----------
x, y, z : array-like, each (n,)
Input data
color : array-like, optional, singleton or (n,3)
Single rgb value or array to set colors
label : array-like, optional, singleton or (n,)
Single integer or array to set colors via groups
size : array-like, optional, singleton or (n,)
Single size or array to set point sizes
alpha : array-like, optional, singleton or (n,)
Single alpha value or array to set fill and stroke opacity
"""
points = vecs_to_points_three(x, y, z)
outdict = {'points': points}
outdict = add_property(outdict, color, 'color')
outdict = add_property(outdict, label, 'label')
outdict = add_property(outdict, size, 'size')
outdict = add_property(outdict, alpha, 'alpha')
return outdict
@viztype
class Volume(Base):
_name = 'volume'
@staticmethod
def clean(imagedata):
"""
Display a collection of arrays as browsable images with thumbnails.
.. image:: volume.png
Parameters
----------
imagedata : array-like, or list of array-like
Image or list of images as two dimensional (grayscale) or three dimensional (RGB) arrays.
"""
if isinstance(imagedata, ndarray):
imagedata = [imagedata]
outdict = [array_to_im(im) for im in imagedata]
return {'images': outdict}
| from lightning.types.base import Base
from lightning.types.decorators import viztype
from lightning.types.utils import vecs_to_points_three, add_property
@viztype
class Scatter3(Base):
_name = 'scatter3'
@staticmethod
def clean(x, y, z, color=None, label=None, alpha=None, size=None):
"""
Plot three-dimensional data as points.
.. image:: particles.png
Parameters
----------
x, y, z : array-like, each (n,)
Input data
color : array-like, optional, singleton or (n,3)
Single rgb value or array to set colors
label : array-like, optional, singleton or (n,)
Single integer or array to set colors via groups
size : array-like, optional, singleton or (n,)
Single size or array to set point sizes
alpha : array-like, optional, singleton or (n,)
Single alpha value or array to set fill and stroke opacity
"""
points = vecs_to_points_three(x, y, z)
outdict = {'points': points}
outdict = add_property(outdict, color, 'color')
outdict = add_property(outdict, label, 'label')
outdict = add_property(outdict, size, 'size')
outdict = add_property(outdict, alpha, 'alpha')
return outdict
@viztype
class Volume(Base):
_name = 'volume'
@staticmethod
def clean(imagedata):
"""
Display a collection of arrays as browsable images with thumbnails.
.. image:: volume.png
Parameters
----------
imagedata : array-like, or list of array-like
Image or list of images as two dimensional (grayscale) or three dimensional (RGB) arrays.
"""
if isinstance(imagedata, ndarray):
imagedata = [imagedata]
outdict = [array_to_im(im) for im in imagedata]
return {'images': outdict}
| mit | Python |
ab1028ccd73ea39d14f1eee57b91b303b31e2d63 | Bump version for v0.2.0 release. | pybee/briefcase | briefcase/__init__.py | briefcase/__init__.py | from __future__ import print_function, unicode_literals, absolute_import, division
__all__ = [
'__version__',
]
# Examples of valid version strings
# __version__ = '1.2.3.dev1' # Development release 1
# __version__ = '1.2.3a1' # Alpha Release 1
# __version__ = '1.2.3b1' # Beta Release 1
# __version__ = '1.2.3rc1' # RC Release 1
# __version__ = '1.2.3' # Final Release
# __version__ = '1.2.3.post1' # Post Release 1
__version__ = '0.2.0'
| from __future__ import print_function, unicode_literals, absolute_import, division
__all__ = [
'__version__',
]
# Examples of valid version strings
# __version__ = '1.2.3.dev1' # Development release 1
# __version__ = '1.2.3a1' # Alpha Release 1
# __version__ = '1.2.3b1' # Beta Release 1
# __version__ = '1.2.3rc1' # RC Release 1
# __version__ = '1.2.3' # Final Release
# __version__ = '1.2.3.post1' # Post Release 1
__version__ = '0.1.9'
| bsd-3-clause | Python |
08433e45da17ff74185d3588a0d5a2240e2b40a9 | Fix super call in config. | riga/law,riga/law | law/config.py | law/config.py | # -*- coding: utf-8 -*-
"""
law Config file interface.
"""
__all__ = ["Config"]
import os
try:
# python 3
from configparser import ConfigParser
except ImportError:
# python 2
from ConfigParser import ConfigParser
_no_default = object()
class Parser(ConfigParser):
def optionxform(self, option):
return option
def update(self, data, overwrite=True):
for section, _data in data.items():
if not self.has_section(section):
self.add_section(section)
for option, value in _data.items():
if overwrite or not self.has_option(section, option):
self.set(section, option, value)
def get_default(self, section, option, default=_no_default, cast=None):
if default != _no_default and not self.has_option(section, option):
return default
value = self.get(section, option)
if cast is not None:
value = cast(value)
return value
class Config(object):
_instance = None
_defaults = {
"core": {
"dbfile": os.environ.get("LAW_DB_FILE", os.path.expandvars("$HOME/.law/db"))
},
"paths": {}
}
_no_value = object()
@classmethod
def instance(cls, config_file=None):
if cls._instance is None:
cls._instance = cls(config_file=config_file)
return cls._instance
def __init__(self, config_file=None):
super(Config, self).__init__()
if config_file is None:
config_file = os.environ.get("LAW_CONFIG_FILE", None)
if config_file is None:
config_file = os.path.expandvars("$HOME/.law/config")
if not os.path.isfile(config_file):
config_file = None
if config_file is None:
config_file = "etc/law/config"
self._parser = Parser(allow_no_value=True)
self._parser.read(config_file)
self._parser.update(self._defaults, overwrite=False)
def get(self, section, option, default=_no_default, cast=None):
return self._parser.get_default(section, option, default=_no_default, cast=None)
def items(self, section):
return self._parser.items(section)
def keys(self, section):
return [key for key, _ in self.items(section)]
| # -*- coding: utf-8 -*-
"""
law Config file interface.
"""
__all__ = ["Config"]
import os
try:
# python 3
from configparser import ConfigParser
except ImportError:
# python 2
from ConfigParser import ConfigParser
_no_default = object()
class Parser(ConfigParser):
def optionxform(self, option):
return option
def update(self, data, overwrite=True):
for section, _data in data.items():
if not self.has_section(section):
self.add_section(section)
for option, value in _data.items():
if overwrite or not self.has_option(section, option):
self.set(section, option, value)
def get_default(self, section, option, default=_no_default, cast=None):
if default != _no_default and not self.has_option(section, option):
return default
value = super(Parser, self).get(section, option)
if cast is not None:
value = cast(value)
return value
class Config(object):
_instance = None
_defaults = {
"core": {
"dbfile": os.environ.get("LAW_DB_FILE", os.path.expandvars("$HOME/.law/db"))
},
"paths": {}
}
_no_value = object()
@classmethod
def instance(cls, config_file=None):
if cls._instance is None:
cls._instance = cls(config_file=config_file)
return cls._instance
def __init__(self, config_file=None):
super(Config, self).__init__()
if config_file is None:
config_file = os.environ.get("LAW_CONFIG_FILE", None)
if config_file is None:
config_file = os.path.expandvars("$HOME/.law/config")
if not os.path.isfile(config_file):
config_file = None
if config_file is None:
config_file = "etc/law/config"
self._parser = Parser(allow_no_value=True)
self._parser.read(config_file)
self._parser.update(self._defaults, overwrite=False)
def get(self, section, option, default=_no_default, cast=None):
return self._parser.get_default(section, option, default=_no_default, cast=None)
def items(self, section):
return self._parser.items(section)
def keys(self, section):
return [key for key, _ in self.items(section)]
| bsd-3-clause | Python |
dbc9b9b1bac2a05f8561be56ff15d528426a4740 | make references readonly in admin (too slow to low otherwise) | sunlightlabs/coleslaw,sunlightlabs/coleslaw,sunlightlabs/coleslaw,sunlightlabs/coleslaw | laws/admin.py | laws/admin.py | from django.contrib import admin
from laws.models import Law
class LawAdmin(admin.ModelAdmin):
list_filter = ["title"]
list_display = ["title", "section", "psection", "num_references", "level", "order", "text", "source"]
readonly_fields = ('references',)
admin.site.register(Law, LawAdmin)
| from django.contrib import admin
from laws.models import Law
class LawAdmin(admin.ModelAdmin):
list_filter = ["title"]
list_display = ["title", "section", "psection", "num_references", "level", "order", "text", "source"]
admin.site.register(Law, LawAdmin)
| bsd-3-clause | Python |
2b2da6988465b335ebc04ff8b955bc5f1c2e84e5 | Enhance crawl command with argv parameters | pyjobs/web,pyjobs/web,pyjobs/web | pyjobs_web/pyjobsweb/commands/crawl.py | pyjobs_web/pyjobsweb/commands/crawl.py | # -*- coding: utf-8 -*-
import os
from pyjobs_crawlers.run import start_crawlers
from crawlers import PyJobsWebConnector
from pyjobsweb.commands import AppContextCommand
class CrawlCommand(AppContextCommand):
def get_parser(self, prog_name):
parser = super(CrawlCommand, self).get_parser(prog_name)
parser.add_argument("-p", "--processes",
help='Number of processes used (set 0 to use main process)',
dest='processes', default=0)
parser.add_argument("-d", "--debug",
help='Enable debug',
dest='debug', default=0)
return parser
def take_action(self, parsed_args):
super(CrawlCommand, self).take_action(parsed_args)
start_crawlers(connector_class=PyJobsWebConnector,
processes=parsed_args.processes,
debug=parsed_args.debug)
| # -*- coding: utf-8 -*-
import os
from pyjobs_crawlers.run import start_crawlers
from crawlers import PyJobsWebConnector
from pyjobsweb.commands import AppContextCommand
class CrawlCommand(AppContextCommand):
def take_action(self, parsed_args):
super(CrawlCommand, self).take_action(parsed_args)
os.environ['SCRAPY_SETTINGS_MODULE'] = 'pyjobs_crawlers.settings'
start_crawlers(connector_class=PyJobsWebConnector, processes=1, debug=False)
| mit | Python |
ec7e1123a96c0521f5cb4500c49406b9464f6227 | Make migration idempotent | alphagov/backdrop,alphagov/backdrop,alphagov/backdrop | migrations/003_add_capped_collections.py | migrations/003_add_capped_collections.py | """
Add capped collections for real time data
"""
import logging
log = logging.getLogger(__name__)
def up(db):
capped_collections = [
"fco_pay_legalisation_post_realtime",
"fco_pay_legalisation_drop_off_realtime",
"fco_pay_register_birth_abroad_realtime",
"fco_pay_register_death_abroad_realtime",
"fco_pay_foreign_marriage_certificates_realtime",
"fco_deposit_foreign_marriage_realtime",
"govuk_realtime",
"licensing_realtime",
]
existing_collections = db.collection_names()
for collection_name in capped_collections:
if not collection_name in existing_collections:
db.create_collection(name=collection_name, capped=True, size=5040)
log.info("created capped collection: %s" % collection_name)
| """
Add capped collections for real time data
"""
import logging
log = logging.getLogger(__name__)
def up(db):
capped_collections = [
"fco_pay_legalisation_post_realtime",
"fco_pay_legalisation_drop_off_realtime",
"fco_pay_register_birth_abroad_realtime",
"fco_pay_register_death_abroad_realtime",
"fco_pay_foreign_marriage_certificates_realtime",
"fco_deposit_foreign_marriage_realtime",
"govuk_realtime",
"licensing_realtime",
]
for collection_name in capped_collections:
db.create_collection(name=collection_name, capped=True, size=5040)
log.info("created capped collection: %s" % collection_name)
| mit | Python |
6b70a5c824d544348503b37edb52ff2e35b2f1ce | Update player.py | triested/logs-search | player.py | player.py | '''
Defines a class Player
Data Members:
data # a dictionary of the high-level log info. contains:
logs
date
id
title
results # number of results (capped at 1000 games)
success # false if retrieval url was formatted bad
Methods:
__init__(self, id64)
load_data(self, id64)
Tools:
Sample urls:
url1 = 'http://logs.tf/json_search?&player=76561198055233348'
url2 = 'http://logs.tf/json_search?&player=76561198064873161'
'''
import urllib.request
import json
class Player:
def __init__(self, id64):
self.load_data(id64)
def load_data(self, id64):
url = "http://logs.tf/json_search?&player=" + id64
json_obj = urllib.request.urlopen(url)
output_string = json_obj.read().decode('utf-8')
self.data = json.loads(output_string)
| '''
Defines a class Player
Data Members:
data # a dictionary of the high-level log info. contains:
logs
date
id
title
results # number of results (capped at 1000 games)
success # false if retrieval url was formatted bad
Methods:
__init__(self, id64)
load_data(self, id64)
Tools:
Sample urls:
url1 = 'http://logs.tf/json_search?&player=76561198055233348'
url2 = 'http://logs.tf/json_search?&player=76561198064873161'
'''
import urllib.request
import json
class Player:
def __init__(self, id64):
load_data(self,id64)
def load_data(self, id64):
url = "http://logs.tf/json_search?&player=" + steam_id_64
json = urllib.request.urlopen(url1)
output_string = json.read().decode('utf-8')
self.data = json.loads(output_string)
| agpl-3.0 | Python |
87d1b24d8ee806c5aa6cf73d83472b129b0f87fe | Add random GT to a given vcf | sbg/Mitty,sbg/Mitty | mitty/simulation/genome/sampledgenome.py | mitty/simulation/genome/sampledgenome.py | import pysam
from numpy.random import choice
def assign_random_gt(input_vcf, outname, sample_name="HG", default_af=0.01):
vcf_pointer = pysam.VariantFile(filename=input_vcf)
new_header = vcf_pointer.header.copy()
if "GT" not in new_header.formats:
new_header.formats.add("GT", "1", "String", "Consensus Genotype across all datasets with called genotype")
new_header.samples.add(sample_name)
default_probs = [1 - default_af * (1 + default_af), default_af, default_af * default_af]
with open(outname, 'w') as out_vcf:
out_vcf.write(str(new_header))
for rec in vcf_pointer.fetch():
rec_copy = rec.copy()
if "GT" not in rec_copy.format.keys():
if "AF" not in rec_copy.info.keys():
gt_probs = default_probs
else:
af = rec_copy.info["AF"]
gt_probs = [1 - af * (1 + af), af, af * af]
c = choice(["0/0", "0/1", "1/1"], p=gt_probs)
out_vcf.write("\t".join([str(rec_copy)[:-1], "GT", c]) + "\n")
vcf_pointer.close()
| import pysam
from numpy.random import choice
import math
def assign_random_gt(input_vcf, outname, sample_name="HG", default_af=0.01):
vcf_pointer = pysam.VariantFile(filename=input_vcf)
new_header = vcf_pointer.header.copy()
if "GT" not in new_header.formats:
new_header.formats.add("GT", "1", "String", "Consensus Genotype across all datasets with called genotype")
new_header.samples.add(sample_name)
default_probs = [1 - default_af - math.pow(default_af, 2), default_af, math.pow(default_af, 2)]
with open(outname, 'w') as out_vcf:
out_vcf.write(str(new_header))
for rec in vcf_pointer.fetch():
rec_copy = rec.copy()
if "GT" not in rec_copy.format.keys():
if "AF" not in rec_copy.info.keys():
gt_probs = default_probs
else:
af = rec_copy.info["AF"]
gt_probs = [1 - af - math.pow(af, 2), af, math.pow(af, 2)]
c = choice(["0/0", "0/1", "1/1"], p=gt_probs)
out_vcf.write("\t".join([str(rec_copy)[:-1], "GT", c]) + "\n")
vcf_pointer.close()
| apache-2.0 | Python |
4260babfc83f2d9d5ad5a1122885330554fb841d | add pybloom folder | sujitmhj/document_similarity_based_on_bloom_filter,sujitmhj/document_similarity_based_on_bloom_filter,sujitmhj/document_similarity_based_on_bloom_filter,sujitmhj/document_similarity_based_on_bloom_filter | config/wsgi.py | config/wsgi.py | """
WSGI config for document_similarity project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
os.environ['DJANGO_SETTINGS_MODULE'] = 'config.settings.production'
os.environ['DJANGO_SECRET_KEY'] = 'dg_*bcao4v-yq6f90-f2hn+ysl9&)y=0d_b=g3qgu2ij)9x5hh>'
os.environ['DATABASE_URL'] = 'sqlite:///data.db'
os.environ['DJANGO_ADMIN_URL'] = 'admin'
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| """
WSGI config for document_similarity project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | Python |
45d6622f2f3b2383dd312101c8cd95367396152e | Update transformation command | rsk-mind/rsk-mind-framework | rsk_mind/core/commands.py | rsk_mind/core/commands.py | import argparse, os
def update_engine(setting):
pass
def transformation(setting):
DATASOURCE = setting.DATASOURCE
datasource = DATASOURCE['IN']['class'](*DATASOURCE['IN']['params'])
dataset = datasource.read()
dataset.setTransformer(setting.TRANSFORMER)
dataset.applyTransformations()
datasource = DATASOURCE['OUT']['class'](*DATASOURCE['OUT']['params'])
datasource.write(dataset)
def execute_from_command_line(argv, setting):
argv = argv[1:]
parser = argparse.ArgumentParser(description='')
parser.add_argument('command', help='Command to execute!')
params = parser.parse_args(argv)
if params.command == 'transformation':
transformation(setting)
elif params.command == 'update-engine':
update_engine(setting)
| import argparse, os
def update_engine(setting):
pass
def transformation(setting):
DATASOURCE = setting.DATASOURCE
datasource = DATASOURCE['IN']['class'](*DATASOURCE['IN']['params'])
dataset = datasource.read()
datasource = DATASOURCE['OUT']['class'](*DATASOURCE['OUT']['params'])
datasource.write(dataset)
def execute_from_command_line(argv, setting):
argv = argv[1:]
parser = argparse.ArgumentParser(description='')
parser.add_argument('command', help='Command to execute!')
params = parser.parse_args(argv)
if params.command == 'transformation':
transformation(setting)
elif params.command == 'update-engine':
update_engine(setting)
| mit | Python |
09dfa61ce07c64320a545374f02c13c63f4a1600 | Add display of visitor counts to admin | michaelmior/pylinks,michaelmior/pylinks,michaelmior/pylinks | pylinks/links/admin.py | pylinks/links/admin.py | from django.contrib import admin
from pylinks.main.admin import ModelAdmin
from pylinks.links import models
class CategoryAdmin(ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
search_fields = ('title',)
admin.site.register(models.Category, CategoryAdmin)
class LinkAdmin(ModelAdmin):
list_display = ('title', 'url')
list_filter = ('category__title',)
readonly_fields = ('visits',)
search_fields = ('title',)
admin.site.register(models.Link, LinkAdmin)
| from django.contrib import admin
from pylinks.main.admin import ModelAdmin
from pylinks.links import models
class CategoryAdmin(ModelAdmin):
prepopulated_fields = {
'slug': ('title',)
}
search_fields = ('title',)
admin.site.register(models.Category, CategoryAdmin)
class LinkAdmin(ModelAdmin):
list_display = ('title', 'url')
list_filter = ('category__title',)
search_fields = ('title',)
admin.site.register(models.Link, LinkAdmin)
| mit | Python |
1f8b54d22cee5653254514bf07c1b4cb1eb147cb | Remove test item from config grabbing script | weloxux/dotfiles,weloxux/dotfiles | _grabconfig.py | _grabconfig.py | #!/usr/bin/env python2
import os, shutil
files = ["/etc/crontab",
"/usr/local/bin/ssu", "/usr/local/bin/xyzzy",
"/home/dagon/.bashrc","/home/dagon/.i3status.conf", "/home/dagon/.profile", "/home/dagon/.vimrc", "/home/dagon/.i3/config", "/home/dagon/.vim",
"/home/dagon/.config/bless", "/home/dagon/.config/terminator"]
for item in files:
dest = os.getcwd() + item
if os.path.isdir(item):
try:
shutil.rmtree(dest)
except: pass
shutil.copytree(item, dest)
else:
try:
os.remove(dest)
except: pass
shutil.copyfile(item, dest)
| #!/usr/bin/env python2
import os, shutil
files = ["/etc/crontab",
"/usr/local/bin/ssu", "/usr/local/bin/xyzzy",
"/home/dagon/.bashrc","/home/dagon/.i3status.conf", "/home/dagon/.profile", "/home/dagon/.vimrc", "/home/dagon/.i3/config", "/home/dagon/.vim",
"/home/dagon/.config/bless", "/home/dagon/.config/terminator", "/bla.txt"]
for item in files:
dest = os.getcwd() + item
if os.path.isdir(item):
try:
shutil.rmtree(dest)
except: pass
shutil.copytree(item, dest)
else:
try:
os.remove(dest)
except: pass
shutil.copyfile(item, dest)
| unlicense | Python |
de31d92a07d85e83088c04a542b5fdf927a05797 | Remove redundant code | nathanbjenx/cairis,failys/CAIRIS,nathanbjenx/cairis,failys/CAIRIS,nathanbjenx/cairis,nathanbjenx/cairis,failys/CAIRIS | cairis/core/Target.py | cairis/core/Target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'Shamal Faily'
class Target:
def __init__(self,tName,tEffectiveness,tRat):
self.theName = tName
self.theEffectiveness = tEffectiveness
self.theRationale = tRat
def name(self): return self.theName
def effectiveness(self): return self.theEffectiveness
def rationale(self): return self.theRationale
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'Shamal Faily'
class Target:
def __init__(self,tName,tEffectiveness,tRat):
self.theName = tName
self.theEffectiveness = tEffectiveness
self.theRationale = tRat
def name(self): return self.theName
def effectiveness(self): return self.theEffectiveness
def rationale(self): return self.theRationale
def __getitem__(self,idx):
if (idx == 0):
return self.theName
elif (idx == 1):
return self.theEffectiveness
else:
return self.theRationale
| apache-2.0 | Python |
ee28e56c403389ac5baf1810fced232327931bdd | remove unused import from service command base | genome/flow-core,genome/flow-core,genome/flow-core | flow/commands/service.py | flow/commands/service.py | from flow.commands.base import CommandBase
from injector import inject, Injector
from twisted.internet import defer
import flow.interfaces
import logging
LOG = logging.getLogger(__name__)
@inject(storage=flow.interfaces.IStorage, broker=flow.interfaces.IBroker,
injector=Injector)
class ServiceCommand(CommandBase):
# XXX This should be removed
@staticmethod
def annotate_parser(parser):
pass
def _setup(self, parsed_arguments):
for handler in self.handlers:
self.broker.register_handler(handler)
self.broker.connect()
def _execute(self, parsed_arguments):
"""
Returns a deferred that will never fire.
"""
deferred = defer.Deferred()
return deferred
| from flow.commands.base import CommandBase
from flow.configuration.inject.redis_conf import RedisConfiguration
from injector import inject, Injector
from twisted.internet import defer
import flow.interfaces
import logging
LOG = logging.getLogger(__name__)
@inject(storage=flow.interfaces.IStorage, broker=flow.interfaces.IBroker,
injector=Injector)
class ServiceCommand(CommandBase):
# XXX This should be removed
@staticmethod
def annotate_parser(parser):
pass
def _setup(self, parsed_arguments):
for handler in self.handlers:
self.broker.register_handler(handler)
self.broker.connect()
def _execute(self, parsed_arguments):
"""
Returns a deferred that will never fire.
"""
deferred = defer.Deferred()
return deferred
| agpl-3.0 | Python |
7ca2b660a316cc6a52dc111cd738e0b7779a55e5 | set new version number for development | Gustavosdo/django-salmonella,lincolnloop/django-salmonella,lincolnloop/django-salmonella,Gustavosdo/django-salmonella,Gustavosdo/django-salmonella,lincolnloop/django-salmonella,lincolnloop/django-salmonella | salmonella/__init__.py | salmonella/__init__.py | VERSION = (0, 5, 2, "a", "1") # following PEP 386
DEV_N = 1 # for PyPi releases, set this to None
def get_version(short=False):
version = "%s.%s" % (VERSION[0], VERSION[1])
if short:
return version
if VERSION[2]:
version = "%s.%s" % (version, VERSION[2])
if VERSION[3] != "f":
version = "%s%s%s" % (version, VERSION[3], VERSION[4])
if DEV_N:
version = "%s.dev%s" % (version, DEV_N)
return version
__version__ = get_version()
| VERSION = (0, 5, 1, "f") # following PEP 386
DEV_N = None # for PyPi releases, set this to None
def get_version(short=False):
version = "%s.%s" % (VERSION[0], VERSION[1])
if short:
return version
if VERSION[2]:
version = "%s.%s" % (version, VERSION[2])
if VERSION[3] != "f":
version = "%s%s%s" % (version, VERSION[3], VERSION[4])
if DEV_N:
version = "%s.dev%s" % (version, DEV_N)
return version
__version__ = get_version()
| mit | Python |
dd1bcf71c6548f99e6bc133bf890c87440e13535 | Add a running state which can be used to know when a workflow is running. | openstack/taskflow,jessicalucci/TaskManagement,jimbobhickville/taskflow,openstack/taskflow,citrix-openstack-build/taskflow,jimbobhickville/taskflow,junneyang/taskflow,varunarya10/taskflow,pombredanne/taskflow-1,jessicalucci/TaskManagement,citrix-openstack-build/taskflow,junneyang/taskflow,pombredanne/taskflow-1,varunarya10/taskflow | taskflow/states.py | taskflow/states.py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Job states.
CLAIMED = 'CLAIMED'
FAILURE = 'FAILURE'
PENDING = 'PENDING'
REVERTING = 'REVERTING'
SUCCESS = 'SUCCESS'
UNCLAIMED = 'UNCLAIMED'
RESUMING = 'RESUMING'
# Flow states.
FAILURE = FAILURE
PENDING = PENDING
REVERTING = REVERTING
STARTED = 'STARTED'
SUCCESS = SUCCESS
RESUMING = RESUMING
RUNNING = 'RUNNING'
# Task states.
FAILURE = FAILURE
STARTED = STARTED
SUCCESS = SUCCESS
| # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Job states.
CLAIMED = 'CLAIMED'
FAILURE = 'FAILURE'
PENDING = 'PENDING'
REVERTING = 'REVERTING'
SUCCESS = 'SUCCESS'
UNCLAIMED = 'UNCLAIMED'
RESUMING = 'RESUMING'
# Flow states.
FAILURE = FAILURE
PENDING = PENDING
REVERTING = REVERTING
STARTED = 'STARTED'
SUCCESS = SUCCESS
RESUMING = RESUMING
# Task states.
FAILURE = FAILURE
STARTED = STARTED
SUCCESS = SUCCESS
| apache-2.0 | Python |
5cf39c69994245c0a6d743f06d4d81a05f88b0e5 | Fix flake8 | bulv1ne/django-utils,bulv1ne/django-utils | utils/tests/test_templatetags_markdown.py | utils/tests/test_templatetags_markdown.py | from django.template import Context, Template
from django.test import TestCase
template = '''
{% load md %}
{% markdown %}
# Foo
## Bar
Baz
{% endmarkdown %}
'''
template_html = '''
<h1>Foo</h1>
<h2>Bar</h2>
<p>Baz</p>
'''
class MarkdownTestCase(TestCase):
def test_markdown(self):
html = Template(template).render(Context({}))
self.assertHTMLEqual(html, template_html)
| from django.template import Context, Template
from django.test import TestCase
template = '''
{% load md %}
{% markdown %}
# Foo
## Bar
Baz
{% endmarkdown %}
'''
template_html = '''
<h1>Foo</h1>
<h2>Bar</h2>
<p>Baz</p>
'''
class MarkdownTestCase(TestCase):
def test_markdown(self):
html = Template(template).render(Context({}))
self.assertHTMLEqual(html, template_html)
| mit | Python |
545a9a0c5f7a7fac5eb4a1552969301f553d150e | Add some comments. | jdhp-docs/notebooks,jdhp-docs/notebook_skeleton,jdhp-docs/python-notebooks,jdhp-docs/notebooks | clean_ipynb.py | clean_ipynb.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This script strips the output of an Jupyter notebook.
It can be used as a preprocessing before commiting notebooks on Version Systems
like Git.
Here is a different way to do this: http://stackoverflow.com/questions/25178118/opening-ipython-notebook-without-output.
"""
import argparse
import json
def main():
"""Main function"""
# PARSE OPTIONS ###########################################################
# TODO: improve the description
parser = argparse.ArgumentParser(description='Strip the output of Jupyter Notebooks (.ipynb files).')
parser.add_argument("fileargs", nargs="+", metavar="FILE", help="Jupyter Notebook files to clean")
args = parser.parse_args()
file_paths = args.fileargs
# CLEAN FILES #############################################################
output_files = {}
for file_path in file_paths:
with open(file_path, 'r') as fd:
notebook = json.load(fd)
for cell in notebook['cells']:
# Init execution_count items
if "execution_count" in cell:
cell["execution_count"] = None
# Remove outputs
if "outputs" in cell:
cell["outputs"] = []
output_files[file_path] = notebook
# SAVE THE RESULT #########################################################
for file_path, data in output_files.items():
with open(file_path, 'w') as fd:
json.dump(data, fd, sort_keys=True, indent=1) # pretty print format
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import json
def main():
"""Main function"""
# PARSE OPTIONS ###########################################################
# TODO: improve the description
parser = argparse.ArgumentParser(description='Clean Jupyter Notebook files (.ipynb files) for Version Systems like Git.')
parser.add_argument("fileargs", nargs="+", metavar="FILE", help="Jupyter Notebook files to clean")
args = parser.parse_args()
file_paths = args.fileargs
# CLEAN FILES #############################################################
output_files = {}
for file_path in file_paths:
with open(file_path, 'r') as fd:
notebook = json.load(fd)
for cell in notebook['cells']:
# Init execution_count items
if "execution_count" in cell:
cell["execution_count"] = None
# Remove outputs
if "outputs" in cell:
cell["outputs"] = []
output_files[file_path] = notebook
# SAVE THE RESULT #########################################################
for file_path, data in output_files.items():
with open(file_path, 'w') as fd:
json.dump(data, fd, sort_keys=True, indent=1) # pretty print format
if __name__ == '__main__':
main()
| mit | Python |
cfeca089dd10a6853d2b969d2d248a0f7d506d1a | Handle the weird field names from the new version of the API | e-mission/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,yw374cornell/e-mission-server,e-mission/e-mission-server,joshzarrabi/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,joshzarrabi/e-mission-server,joshzarrabi/e-mission-server,yw374cornell/e-mission-server,sunil07t/e-mission-server,sunil07t/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,joshzarrabi/e-mission-server,yw374cornell/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server | emission/net/usercache/formatters/android/motion_activity.py | emission/net/usercache/formatters/android/motion_activity.py | import logging
import emission.core.wrapper.motionactivity as ecwa
import emission.net.usercache.formatters.common as fc
import attrdict as ad
def format(entry):
formatted_entry = ad.AttrDict()
formatted_entry["_id"] = entry["_id"]
formatted_entry.user_id = entry.user_id
metadata = entry.metadata
if "time_zone" not in metadata:
metadata.time_zone = "America/Los_Angeles"
fc.expand_metadata_times(metadata)
formatted_entry.metadata = metadata
data = ad.AttrDict()
if 'agb' in entry.data:
data.type = ecwa.MotionTypes(entry.data.agb).value
else:
data.type = ecwa.MotionTypes(entry.data.zzaEg).value
if 'agc' in entry.data:
data.confidence = entry.data.agc
else:
data.confidence = entry.data.zzaEh
data.ts = formatted_entry.metadata.write_ts
data.local_dt = formatted_entry.metadata.write_local_dt
data.fmt_time = formatted_entry.metadata.write_fmt_time
formatted_entry.data = data
return formatted_entry
| import logging
import emission.core.wrapper.motionactivity as ecwa
import emission.net.usercache.formatters.common as fc
import attrdict as ad
def format(entry):
formatted_entry = ad.AttrDict()
formatted_entry["_id"] = entry["_id"]
formatted_entry.user_id = entry.user_id
metadata = entry.metadata
if "time_zone" not in metadata:
metadata.time_zone = "America/Los_Angeles"
fc.expand_metadata_times(metadata)
formatted_entry.metadata = metadata
data = ad.AttrDict()
data.type = ecwa.MotionTypes(entry.data.agb).value
data.confidence = entry.data.agc
data.ts = formatted_entry.metadata.write_ts
data.local_dt = formatted_entry.metadata.write_local_dt
data.fmt_time = formatted_entry.metadata.write_fmt_time
formatted_entry.data = data
return formatted_entry
| bsd-3-clause | Python |
4a2cb70e08d3762955ca0933f239114481a553b5 | add a python script for checking results | bremond/siconos,fperignon/siconos,radarsat1/siconos,bremond/siconos,siconos/siconos,fperignon/siconos,siconos/siconos,radarsat1/siconos,fperignon/siconos,bremond/siconos,bremond/siconos,radarsat1/siconos,fperignon/siconos,radarsat1/siconos,siconos/siconos,bremond/siconos,fperignon/siconos,siconos/siconos,radarsat1/siconos | examples/Mechanics/Mechanisms/SliderCrank/check_reference.py | examples/Mechanics/Mechanisms/SliderCrank/check_reference.py | import numpy
#results = numpy.loadtxt('simulation_results.dat')
results = []
results_ref = []
with open ("simulation_results.dat", "r") as myfile:
data=myfile.readlines()
#print data[1:]
for i in data[1:]:
data_l_i = i.split('\t')
data_l_i_f = []
#for val in i:
# print val
for val in data_l_i :
try:
float(val)
data_l_i_f.append(float(val))
except:
print (' -- ')
results = numpy.array(data_l_i_f)
#print i,float(i.split('\t'))
#print ' '
with open ("simulation_results.ref", "r") as myfile:
data=myfile.readlines()
#print data[1:]
for i in data[1:]:
data_l_i = i.split('\t')
data_l_i_f = []
#for val in i:
# print val
for val in data_l_i :
try:
float(val)
data_l_i_f.append(float(val))
except:
print (' -- ')
results_ref = numpy.array(data_l_i_f)
#print i,float(i.split('\t'))
#print ' '
error_= numpy.linalg.norm(results-results_ref)
print 'Error w.r.t reference file = ', error_
assert (error_< 1e-14)
| import numpy
#results = numpy.loadtxt('simulation_results.dat')
results = []
results_ref = []
with open ("simulation_results.dat", "r") as myfile:
data=myfile.readlines()
#print data[1:]
for i in data[1:]:
data_l_i = i.split('\t')
data_l_i_f = []
#for val in i:
# print val
for val in data_l_i :
try:
float(val)
data_l_i_f.append(float(val))
except:
print (' -- ')
results = numpy.array(data_l_i_f)
#print i,float(i.split('\t'))
#print ' '
with open ("simulation_results.ref", "r") as myfile:
data=myfile.readlines()
#print data[1:]
for i in data[1:]:
data_l_i = i.split('\t')
data_l_i_f = []
#for val in i:
# print val
for val in data_l_i :
try:
float(val)
data_l_i_f.append(float(val))
except:
print (' -- ')
results_ref = numpy.array(data_l_i_f)
#print i,float(i.split('\t'))
#print ' '
error_= numpy.linalg.norm(results-results_ref)
print 'Error w.r.t reference file = ', error_
assert (error_< 1e-14)
| apache-2.0 | Python |
f716e8bbf1f20e280d5f973664fd3b4fc91686de | Fix concatentation error on no args to `./mach rustc` | Adenilson/prototype-viewing-distance,AnthonyBroadCrawford/servo,anthgur/servo,ruud-v-a/servo,s142857/servo,echochamber/servo,SimonSapin/servo,jimberlage/servo,mbrubeck/servo,DominoTree/servo,GyrosOfWar/servo,pyecs/servo,rixrix/servo,g-k/servo,steveklabnik/servo,DominoTree/servo,tschneidereit/servo,pyfisch/servo,dhananjay92/servo,s142857/servo,meh/servo,dvberkel/servo,saratang/servo,dmarcos/servo,pyecs/servo,splav/servo,steveklabnik/servo,bjwbell/servo,fiji-flo/servo,s142857/servo,deokjinkim/servo,evilpie/servo,rnestler/servo,thiagopnts/servo,zhangjunlei26/servo,rixrix/servo,brendandahl/servo,szeged/servo,mattnenterprise/servo,fiji-flo/servo,peterjoel/servo,GreenRecycleBin/servo,pyecs/servo,kindersung/servo,splav/servo,runarberg/servo,aweinstock314/servo,kindersung/servo,SimonSapin/servo,jimberlage/servo,huonw/servo,dmarcos/servo,canaltinova/servo,zentner-kyle/servo,nnethercote/servo,aweinstock314/servo,WriterOfAlicrow/servo,jgraham/servo,dsandeephegde/servo,notriddle/servo,pyfisch/servo,dsandeephegde/servo,canaltinova/servo,mdibaiee/servo,canaltinova/servo,codemac/servo,saratang/servo,hyowon/servo,nnethercote/servo,dati91/servo,sadmansk/servo,larsbergstrom/servo,KiChjang/servo,KiChjang/servo,walac/servo,pyecs/servo,jimberlage/servo,sadmansk/servo,canaltinova/servo,j3parker/servo,samfoo/servo,kindersung/servo,srbhklkrn/SERVOENGINE,paulrouget/servo,KiChjang/servo,A-deLuna/servo,wpgallih/servo,szeged/servo,mattnenterprise/servo,CJ8664/servo,boghison/servo,dmarcos/servo,tafia/servo,jlegendary/servo,mattnenterprise/servo,zentner-kyle/servo,bjwbell/servo,boghison/servo,dhananjay92/servo,eddyb/servo,tafia/servo,eddyb/servo,CJ8664/servo,SimonSapin/servo,GreenRecycleBin/servo,Adenilson/prototype-viewing-distance,notriddle/servo,A-deLuna/servo,walac/servo,pyecs/servo,paulrouget/servo,pyfisch/servo,caldwell/servo,Adenilson/prototype-viewing-distance,jimberlage/servo,saratang/servo,anthgur/servo,evilpie/servo,vks/servo,srbhklkrn/SERVOENGINE,rnestler/servo,WriterOfAlicrow/servo,srbhklkrn/SERVOENGINE,youprofit/servo,nerith/servo,GreenRecycleBin/servo,peterjoel/servo,canaltinova/servo,paulrouget/servo,jgraham/servo,dagnir/servo,saneyuki/servo,CJ8664/servo,saneyuki/servo,ryancanhelpyou/servo,mbrubeck/servo,notriddle/servo,nrc/servo,mt2d2/servo,indykish/servo,GyrosOfWar/servo,wpgallih/servo,GyrosOfWar/servo,g-k/servo,dagnir/servo,runarberg/servo,larsbergstrom/servo,saneyuki/servo,jgraham/servo,AnthonyBroadCrawford/servo,ryancanhelpyou/servo,dmarcos/servo,vks/servo,emilio/servo,paulrouget/servo,meh/servo,KiChjang/servo,bjwbell/servo,ruud-v-a/servo,g-k/servo,deokjinkim/servo,echochamber/servo,caldwell/servo,jdramani/servo,cbrewster/servo,sadmansk/servo,nrc/servo,peterjoel/servo,nrc/servo,larsbergstrom/servo,hyowon/servo,snf/servo,caldwell/servo,A-deLuna/servo,notriddle/servo,samfoo/servo,AnthonyBroadCrawford/servo,dhananjay92/servo,peterjoel/servo,dsandeephegde/servo,tafia/servo,karlito40/servo,nrc/servo,aidanhs/servo,akosel/servo,ConnorGBrewster/servo,emilio/servo,kindersung/servo,cbrewster/servo,nrc/servo,RenaudParis/servo,indykish/servo,thiagopnts/servo,larsbergstrom/servo,wartman4404/servo,ConnorGBrewster/servo,s142857/servo,samfoo/servo,caldwell/servo,anthgur/servo,RenaudParis/servo,runarberg/servo,rixrix/servo,saneyuki/servo,vks/servo,GreenRecycleBin/servo,upsuper/servo,karlito40/servo,RenaudParis/servo,meh/servo,thiagopnts/servo,RenaudParis/servo,dati91/servo,evilpie/servo,dagnir/servo,aweinstock314/servo,anthgur/servo,cbrewster/servo,nrc/servo,meh/servo,ryancanhelpyou/servo,mbrubeck/servo,akosel/servo,rixrix/servo,tempbottle/servo,wpgallih/servo,hyowon/servo,ryancanhelpyou/servo,tempbottle/servo,larsbergstrom/servo,aidanhs/servo,indykish/servo,mbrubeck/servo,echochamber/servo,tempbottle/servo,huonw/servo,CJ8664/servo,sadmansk/servo,rnestler/servo,mattnenterprise/servo,ruud-v-a/servo,Adenilson/prototype-viewing-distance,aweinstock314/servo,ryancanhelpyou/servo,j3parker/servo,ruud-v-a/servo,tafia/servo,nick-thompson/servo,splav/servo,youprofit/servo,anthgur/servo,mdibaiee/servo,mdibaiee/servo,nick-thompson/servo,indykish/servo,larsbergstrom/servo,anthgur/servo,tschneidereit/servo,dhananjay92/servo,indykish/servo,upsuper/servo,boghison/servo,wpgallih/servo,brendandahl/servo,saratang/servo,snf/servo,dvberkel/servo,jgraham/servo,dvberkel/servo,peterjoel/servo,jimberlage/servo,karlito40/servo,CJ8664/servo,emilio/servo,mdibaiee/servo,evilpie/servo,splav/servo,zhangjunlei26/servo,jgraham/servo,kindersung/servo,szeged/servo,samfoo/servo,nnethercote/servo,saneyuki/servo,nnethercote/servo,youprofit/servo,tschneidereit/servo,dmarcos/servo,chotchki/servo,KiChjang/servo,thiagopnts/servo,mdibaiee/servo,pgonda/servo,CJ8664/servo,ryancanhelpyou/servo,jgraham/servo,walac/servo,echochamber/servo,zentner-kyle/servo,pyfisch/servo,eddyb/servo,larsbergstrom/servo,dvberkel/servo,SimonSapin/servo,eddyb/servo,fiji-flo/servo,saratang/servo,szeged/servo,avadacatavra/servo,rentongzhang/servo,ConnorGBrewster/servo,caldwell/servo,fiji-flo/servo,szeged/servo,wartman4404/servo,zhangjunlei26/servo,wartman4404/servo,j3parker/servo,saneyuki/servo,WriterOfAlicrow/servo,rentongzhang/servo,fiji-flo/servo,wpgallih/servo,thiagopnts/servo,mattnenterprise/servo,boghison/servo,bjwbell/servo,KiChjang/servo,Adenilson/prototype-viewing-distance,paulrouget/servo,pyfisch/servo,A-deLuna/servo,tempbottle/servo,jlegendary/servo,srbhklkrn/SERVOENGINE,mt2d2/servo,walac/servo,mbrubeck/servo,chotchki/servo,upsuper/servo,snf/servo,peterjoel/servo,echochamber/servo,szeged/servo,AnthonyBroadCrawford/servo,j3parker/servo,jdramani/servo,paulrouget/servo,youprofit/servo,karlito40/servo,nick-thompson/servo,zentner-kyle/servo,cbrewster/servo,dati91/servo,thiagopnts/servo,saneyuki/servo,zhangjunlei26/servo,GyrosOfWar/servo,zhangjunlei26/servo,DominoTree/servo,Adenilson/prototype-viewing-distance,deokjinkim/servo,mdibaiee/servo,emilio/servo,jdramani/servo,szeged/servo,karlito40/servo,nick-thompson/servo,larsbergstrom/servo,peterjoel/servo,s142857/servo,brendandahl/servo,splav/servo,akosel/servo,tafia/servo,cbrewster/servo,upsuper/servo,dvberkel/servo,youprofit/servo,nerith/servo,CJ8664/servo,jlegendary/servo,samfoo/servo,ConnorGBrewster/servo,wartman4404/servo,vks/servo,SimonSapin/servo,rixrix/servo,hyowon/servo,nerith/servo,notriddle/servo,dsandeephegde/servo,pyfisch/servo,nnethercote/servo,youprofit/servo,DominoTree/servo,aweinstock314/servo,rentongzhang/servo,szeged/servo,nnethercote/servo,indykish/servo,walac/servo,GreenRecycleBin/servo,tschneidereit/servo,WriterOfAlicrow/servo,luniv/servo,vks/servo,karlito40/servo,luniv/servo,RenaudParis/servo,chotchki/servo,j3parker/servo,dmarcos/servo,mt2d2/servo,runarberg/servo,huonw/servo,codemac/servo,dagnir/servo,bjwbell/servo,wartman4404/servo,jlegendary/servo,jimberlage/servo,srbhklkrn/SERVOENGINE,luniv/servo,huonw/servo,rentongzhang/servo,runarberg/servo,s142857/servo,nerith/servo,aidanhs/servo,emilio/servo,pgonda/servo,avadacatavra/servo,codemac/servo,pyecs/servo,KiChjang/servo,nick-thompson/servo,nick-thompson/servo,snf/servo,pyecs/servo,saneyuki/servo,vks/servo,zhangjunlei26/servo,ruud-v-a/servo,aidanhs/servo,SimonSapin/servo,dhananjay92/servo,RenaudParis/servo,mbrubeck/servo,boghison/servo,splav/servo,boghison/servo,GyrosOfWar/servo,wpgallih/servo,upsuper/servo,ryancanhelpyou/servo,meh/servo,luniv/servo,brendandahl/servo,KiChjang/servo,g-k/servo,deokjinkim/servo,dagnir/servo,karlito40/servo,dsandeephegde/servo,codemac/servo,evilpie/servo,ruud-v-a/servo,caldwell/servo,rnestler/servo,cbrewster/servo,dsandeephegde/servo,paulrouget/servo,dati91/servo,sadmansk/servo,emilio/servo,akosel/servo,eddyb/servo,AnthonyBroadCrawford/servo,caldwell/servo,sadmansk/servo,pgonda/servo,mattnenterprise/servo,youprofit/servo,dati91/servo,tschneidereit/servo,DominoTree/servo,canaltinova/servo,snf/servo,rixrix/servo,j3parker/servo,pgonda/servo,srbhklkrn/SERVOENGINE,rentongzhang/servo,splav/servo,saratang/servo,srbhklkrn/SERVOENGINE,bjwbell/servo,runarberg/servo,deokjinkim/servo,zentner-kyle/servo,g-k/servo,meh/servo,GreenRecycleBin/servo,akosel/servo,mt2d2/servo,cbrewster/servo,pyfisch/servo,fiji-flo/servo,notriddle/servo,paulrouget/servo,aweinstock314/servo,steveklabnik/servo,rixrix/servo,A-deLuna/servo,jimberlage/servo,DominoTree/servo,hyowon/servo,echochamber/servo,s142857/servo,wpgallih/servo,nerith/servo,WriterOfAlicrow/servo,splav/servo,zentner-kyle/servo,huonw/servo,ConnorGBrewster/servo,paulrouget/servo,ConnorGBrewster/servo,snf/servo,szeged/servo,luniv/servo,steveklabnik/servo,DominoTree/servo,dati91/servo,larsbergstrom/servo,jlegendary/servo,dvberkel/servo,evilpie/servo,CJ8664/servo,akosel/servo,wartman4404/servo,emilio/servo,codemac/servo,tempbottle/servo,wpgallih/servo,tschneidereit/servo,nerith/servo,runarberg/servo,jimberlage/servo,walac/servo,thiagopnts/servo,rnestler/servo,jdramani/servo,wartman4404/servo,DominoTree/servo,indykish/servo,larsbergstrom/servo,chotchki/servo,pyfisch/servo,notriddle/servo,evilpie/servo,saneyuki/servo,echochamber/servo,zhangjunlei26/servo,sadmansk/servo,GreenRecycleBin/servo,pgonda/servo,codemac/servo,avadacatavra/servo,deokjinkim/servo,luniv/servo,tempbottle/servo,GreenRecycleBin/servo,thiagopnts/servo,dati91/servo,zhangjunlei26/servo,avadacatavra/servo,A-deLuna/servo,jgraham/servo,dhananjay92/servo,szeged/servo,samfoo/servo,zentner-kyle/servo,rnestler/servo,eddyb/servo,paulrouget/servo,dhananjay92/servo,vks/servo,wpgallih/servo,cbrewster/servo,evilpie/servo,avadacatavra/servo,rentongzhang/servo,deokjinkim/servo,avadacatavra/servo,AnthonyBroadCrawford/servo,nnethercote/servo,dagnir/servo,hyowon/servo,tafia/servo,j3parker/servo,indykish/servo,tempbottle/servo,canaltinova/servo,dagnir/servo,canaltinova/servo,mbrubeck/servo,bjwbell/servo,g-k/servo,GyrosOfWar/servo,tschneidereit/servo,tafia/servo,eddyb/servo,rixrix/servo,jlegendary/servo,jdramani/servo,nick-thompson/servo,mattnenterprise/servo,GreenRecycleBin/servo,brendandahl/servo,sadmansk/servo,chotchki/servo,steveklabnik/servo,nnethercote/servo,akosel/servo,chotchki/servo,dmarcos/servo,dvberkel/servo,peterjoel/servo,dsandeephegde/servo,nnethercote/servo,rixrix/servo,A-deLuna/servo,jimberlage/servo,WriterOfAlicrow/servo,samfoo/servo,mattnenterprise/servo,nerith/servo,anthgur/servo,AnthonyBroadCrawford/servo,emilio/servo,anthgur/servo,mt2d2/servo,brendandahl/servo,dati91/servo,huonw/servo,notriddle/servo,notriddle/servo,DominoTree/servo,aidanhs/servo,rnestler/servo,mdibaiee/servo,emilio/servo,snf/servo,peterjoel/servo,avadacatavra/servo,mt2d2/servo,saneyuki/servo,g-k/servo,pyfisch/servo,steveklabnik/servo,upsuper/servo,zhangjunlei26/servo,mbrubeck/servo,emilio/servo,fiji-flo/servo,kindersung/servo,jlegendary/servo,codemac/servo,mt2d2/servo,indykish/servo,eddyb/servo,KiChjang/servo,RenaudParis/servo,GyrosOfWar/servo,brendandahl/servo,saratang/servo,walac/servo,boghison/servo,fiji-flo/servo,notriddle/servo,ConnorGBrewster/servo,luniv/servo,splav/servo,dsandeephegde/servo,aidanhs/servo,ConnorGBrewster/servo,upsuper/servo,Adenilson/prototype-viewing-distance,WriterOfAlicrow/servo,splav/servo,aweinstock314/servo,pgonda/servo,evilpie/servo,pyfisch/servo,peterjoel/servo,hyowon/servo,SimonSapin/servo,steveklabnik/servo,nrc/servo,DominoTree/servo,meh/servo,aidanhs/servo,jdramani/servo,KiChjang/servo,upsuper/servo,kindersung/servo,avadacatavra/servo,nnethercote/servo,rentongzhang/servo,jdramani/servo,huonw/servo,SimonSapin/servo,pgonda/servo,rnestler/servo | python/servo/devenv_commands.py | python/servo/devenv_commands.py | from __future__ import print_function, unicode_literals
from os import path
import subprocess
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase, cd
@CommandProvider
class MachCommands(CommandBase):
@Command('cargo',
description='Run Cargo',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to Cargo")
def cargo(self, params):
return subprocess.call(["cargo"] + params,
env=self.build_env())
@Command('update-cargo',
description='Update Cargo dependencies',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
def update_cargo(self, params):
cargo_paths = [path.join('.'),
path.join('ports', 'cef'),
path.join('ports', 'android', 'glut_app')]
for cargo_path in cargo_paths:
with cd(cargo_path):
print(cargo_path)
subprocess.call(["cargo", "update"] + params,
env=self.build_env())
@Command('rustc',
description='Run the Rust compiler',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to rustc")
def rustc(self, params):
if params is None:
params = []
return subprocess.call(["rustc"] + params, env=self.build_env())
@Command('rust-root',
description='Print the path to the root of the Rust compiler',
category='devenv')
def rust_root(self):
print(self.config["tools"]["rust-root"])
| from __future__ import print_function, unicode_literals
from os import path
import subprocess
from mach.decorators import (
CommandArgument,
CommandProvider,
Command,
)
from servo.command_base import CommandBase, cd
@CommandProvider
class MachCommands(CommandBase):
@Command('cargo',
description='Run Cargo',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to Cargo")
def cargo(self, params):
return subprocess.call(["cargo"] + params,
env=self.build_env())
@Command('update-cargo',
description='Update Cargo dependencies',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
def update_cargo(self, params):
cargo_paths = [path.join('.'),
path.join('ports', 'cef'),
path.join('ports', 'android', 'glut_app')]
for cargo_path in cargo_paths:
with cd(cargo_path):
print(cargo_path)
subprocess.call(["cargo", "update"] + params,
env=self.build_env())
@Command('rustc',
description='Run the Rust compiler',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to rustc")
def rustc(self, params):
return subprocess.call(["rustc"] + params, env=self.build_env())
@Command('rust-root',
description='Print the path to the root of the Rust compiler',
category='devenv')
def rust_root(self):
print(self.config["tools"]["rust-root"])
| mpl-2.0 | Python |
23febe89d3868d91f9869517256b4422fde82dcf | update Merthyr import script for 2017 | DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_merthyr.py | polling_stations/apps/data_collection/management/commands/import_merthyr.py | from data_collection.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = 'W06000024'
addresses_name = 'Merthyr_polling_station_export-2017-04-13.csv'
stations_name = 'Merthyr_polling_station_export-2017-04-13.csv'
elections = ['local.merthyr-tydfil.2017-05-04']
csv_encoding = 'latin-1'
| """
Import Merthyr Tydfil
note: this script takes quite a long time to run
"""
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseCsvStationsCsvAddressesImporter
from data_finder.helpers import geocode_point_only, PostcodeError
from data_collection.google_geocoding_api_wrapper import (
GoogleGeocodingApiWrapper,
PostcodeNotFoundException
)
class Command(BaseCsvStationsCsvAddressesImporter):
"""
Imports the Polling Station data from Merthyr Tydfil
"""
council_id = 'W06000024'
addresses_name = 'Merthyr Tydfil UPRN Numbers.csv'
stations_name = 'Merthyr Tydfil UPRN Numbers.csv'
elections = [
'pcc.2016-05-05',
'naw.c.2016-05-05',
'naw.r.2016-05-05',
'ref.2016-06-23'
]
known_stations = set()
def station_record_to_dict(self, record):
unique_station_id = "-".join((
record.polling_district,
record.polling_station,
record.polling_station_postcode,
))
if unique_station_id in self.known_stations:
return None
address = record.polling_station
postcode = record.polling_station_postcode
if not postcode:
gwrapper = GoogleGeocodingApiWrapper(address, self.council_id, 'UTA')
try:
postcode = gwrapper.address_to_postcode()
except PostcodeNotFoundException:
postcode = ''
"""
No grid references were supplied, so attempt to
derive a grid ref from postcode if we have that
"""
if postcode:
try:
gridref = geocode_point_only(postcode)
location = Point(
gridref['wgs84_lon'],
gridref['wgs84_lat'],
srid=4326
)
except PostcodeError:
location = None
else:
location = None
self.known_stations.add(unique_station_id)
return {
'internal_council_id': record.polling_district,
'postcode' : postcode,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
address = record.property
postcode = address.split(',')[-1].strip()
address = address.replace(postcode, '').strip(', ')
return {
'address' : address,
'postcode' : postcode,
'polling_station_id': record.polling_district
}
| bsd-3-clause | Python |
7628b46141efee1f0a267862d4da04fe020a48d4 | update for Telford & Wrekin | DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_telford.py | polling_stations/apps/data_collection/management/commands/import_telford.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E06000020"
addresses_name = (
"local.2019-05-02/Version 2/Democracy_Club__02May2019 revised data.tsv"
)
stations_name = (
"local.2019-05-02/Version 2/Democracy_Club__02May2019 revised data.tsv"
)
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"452071472", # TF43JZ -> TF43JX : The White Horse, Finger Road, Dawley, Telford, Shropshire
"452041200", # TF109HN -> TF109HW : Abbotts Way, Abbey Road, Lilleshall, Newport, Shropshire
"10091738061", # TF107HD -> TF107HF : Ground Floor Flat, 48 Wellington Road, Newport, Shropshire
"10091738062", # TF107HD -> TF107HF : First Floor Flat, 48 Wellington Road, Newport, Shropshire
"452063592", # SY44RF -> SY44RQ : Rose Cottage, Somerwood, Rodington, Shrewsbury, Shropshire
"10024238558", # TF26DN -> TF26DW : Flat Dominos Pizza, Holyhead Road, Oakengates, Telford, Shropshire
]:
rec["accept_suggestion"] = True
if uprn in [
"200000670794", # TF16RB -> TF15GH : 27A Castle Houses, Castle Street, Hadley, Telford, Shropshire
"10024238030", # TF87BJ -> TF87DN : Annexe At Greenacres, Buildwas Road, Ironbridge, Telford, Shropshire
]:
rec["accept_suggestion"] = False
return rec
| from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E06000020"
addresses_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019Telford.tsv"
stations_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019Telford.tsv"
elections = ["local.2019-05-02"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"452071472", # TF43JZ -> TF43JX : The White Horse, Finger Road, Dawley, Telford, Shropshire
"452041200", # TF109HN -> TF109HW : Abbotts Way, Abbey Road, Lilleshall, Newport, Shropshire
"10091738061", # TF107HD -> TF107HF : Ground Floor Flat, 48 Wellington Road, Newport, Shropshire
"10091738062", # TF107HD -> TF107HF : First Floor Flat, 48 Wellington Road, Newport, Shropshire
"452063592", # SY44RF -> SY44RQ : Rose Cottage, Somerwood, Rodington, Shrewsbury, Shropshire
"10024238558", # TF26DN -> TF26DW : Flat Dominos Pizza, Holyhead Road, Oakengates, Telford, Shropshire
]:
rec["accept_suggestion"] = True
if uprn in [
"200000670794", # TF16RB -> TF15GH : 27A Castle Houses, Castle Street, Hadley, Telford, Shropshire
"10024238030", # TF87BJ -> TF87DN : Annexe At Greenacres, Buildwas Road, Ironbridge, Telford, Shropshire
]:
rec["accept_suggestion"] = False
return rec
| bsd-3-clause | Python |
18beb88e495f53673f6256266c273c6db27cb03a | Remove uneeded check | aaxelb/osf.io,zkraime/osf.io,kch8qx/osf.io,danielneis/osf.io,ZobairAlijan/osf.io,lyndsysimon/osf.io,njantrania/osf.io,jeffreyliu3230/osf.io,wearpants/osf.io,jinluyuan/osf.io,billyhunt/osf.io,erinspace/osf.io,barbour-em/osf.io,chrisseto/osf.io,doublebits/osf.io,sbt9uc/osf.io,bdyetton/prettychart,fabianvf/osf.io,samchrisinger/osf.io,haoyuchen1992/osf.io,crcresearch/osf.io,reinaH/osf.io,lamdnhan/osf.io,jmcarp/osf.io,brandonPurvis/osf.io,cldershem/osf.io,GageGaskins/osf.io,leb2dg/osf.io,petermalcolm/osf.io,bdyetton/prettychart,mluke93/osf.io,reinaH/osf.io,jinluyuan/osf.io,felliott/osf.io,haoyuchen1992/osf.io,mfraezz/osf.io,kushG/osf.io,laurenrevere/osf.io,GaryKriebel/osf.io,CenterForOpenScience/osf.io,mluo613/osf.io,jnayak1/osf.io,danielneis/osf.io,abought/osf.io,erinspace/osf.io,arpitar/osf.io,ZobairAlijan/osf.io,GaryKriebel/osf.io,cosenal/osf.io,mluo613/osf.io,caseyrygt/osf.io,felliott/osf.io,emetsger/osf.io,KAsante95/osf.io,leb2dg/osf.io,TomBaxter/osf.io,Nesiehr/osf.io,asanfilippo7/osf.io,kushG/osf.io,doublebits/osf.io,GageGaskins/osf.io,zkraime/osf.io,hmoco/osf.io,zachjanicki/osf.io,Nesiehr/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,fabianvf/osf.io,jeffreyliu3230/osf.io,reinaH/osf.io,DanielSBrown/osf.io,emetsger/osf.io,mluke93/osf.io,asanfilippo7/osf.io,sbt9uc/osf.io,lyndsysimon/osf.io,billyhunt/osf.io,samanehsan/osf.io,cwisecarver/osf.io,MerlinZhang/osf.io,monikagrabowska/osf.io,HarryRybacki/osf.io,acshi/osf.io,acshi/osf.io,Ghalko/osf.io,hmoco/osf.io,Johnetordoff/osf.io,rdhyee/osf.io,zkraime/osf.io,kch8qx/osf.io,lyndsysimon/osf.io,caseyrygt/osf.io,wearpants/osf.io,acshi/osf.io,RomanZWang/osf.io,sbt9uc/osf.io,GageGaskins/osf.io,ticklemepierce/osf.io,pattisdr/osf.io,amyshi188/osf.io,emetsger/osf.io,CenterForOpenScience/osf.io,abought/osf.io,crcresearch/osf.io,arpitar/osf.io,cwisecarver/osf.io,njantrania/osf.io,dplorimer/osf,wearpants/osf.io,zamattiac/osf.io,HarryRybacki/osf.io,hmoco/osf.io,cwisecarver/osf.io,cosenal/osf.io,brandonPurvis/osf.io,Ghalko/osf.io,zachjanicki/osf.io,DanielSBrown/osf.io,emetsger/osf.io,jmcarp/osf.io,GaryKriebel/osf.io,icereval/osf.io,barbour-em/osf.io,mattclark/osf.io,samanehsan/osf.io,doublebits/osf.io,jolene-esposito/osf.io,laurenrevere/osf.io,reinaH/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,cslzchen/osf.io,TomBaxter/osf.io,dplorimer/osf,monikagrabowska/osf.io,kwierman/osf.io,jmcarp/osf.io,brianjgeiger/osf.io,wearpants/osf.io,cldershem/osf.io,petermalcolm/osf.io,MerlinZhang/osf.io,baylee-d/osf.io,GageGaskins/osf.io,lyndsysimon/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,jinluyuan/osf.io,RomanZWang/osf.io,TomHeatwole/osf.io,bdyetton/prettychart,jnayak1/osf.io,zamattiac/osf.io,jolene-esposito/osf.io,brandonPurvis/osf.io,HalcyonChimera/osf.io,sloria/osf.io,mfraezz/osf.io,TomBaxter/osf.io,icereval/osf.io,Nesiehr/osf.io,abought/osf.io,sbt9uc/osf.io,mluo613/osf.io,chennan47/osf.io,alexschiller/osf.io,jinluyuan/osf.io,zamattiac/osf.io,caneruguz/osf.io,barbour-em/osf.io,cslzchen/osf.io,danielneis/osf.io,erinspace/osf.io,adlius/osf.io,binoculars/osf.io,billyhunt/osf.io,kwierman/osf.io,HalcyonChimera/osf.io,SSJohns/osf.io,icereval/osf.io,ZobairAlijan/osf.io,kwierman/osf.io,lamdnhan/osf.io,saradbowman/osf.io,mluke93/osf.io,chrisseto/osf.io,felliott/osf.io,leb2dg/osf.io,billyhunt/osf.io,acshi/osf.io,amyshi188/osf.io,binoculars/osf.io,sloria/osf.io,ticklemepierce/osf.io,monikagrabowska/osf.io,danielneis/osf.io,samchrisinger/osf.io,himanshuo/osf.io,mattclark/osf.io,asanfilippo7/osf.io,arpitar/osf.io,ZobairAlijan/osf.io,cosenal/osf.io,brianjgeiger/osf.io,fabianvf/osf.io,monikagrabowska/osf.io,doublebits/osf.io,lamdnhan/osf.io,zamattiac/osf.io,revanthkolli/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,cosenal/osf.io,adlius/osf.io,SSJohns/osf.io,hmoco/osf.io,ckc6cz/osf.io,ckc6cz/osf.io,samanehsan/osf.io,KAsante95/osf.io,amyshi188/osf.io,binoculars/osf.io,caseyrollins/osf.io,aaxelb/osf.io,MerlinZhang/osf.io,cslzchen/osf.io,amyshi188/osf.io,revanthkolli/osf.io,samchrisinger/osf.io,abought/osf.io,jnayak1/osf.io,revanthkolli/osf.io,caseyrollins/osf.io,himanshuo/osf.io,caseyrygt/osf.io,kushG/osf.io,chrisseto/osf.io,jolene-esposito/osf.io,adlius/osf.io,sloria/osf.io,MerlinZhang/osf.io,cslzchen/osf.io,KAsante95/osf.io,alexschiller/osf.io,SSJohns/osf.io,leb2dg/osf.io,jeffreyliu3230/osf.io,dplorimer/osf,himanshuo/osf.io,jnayak1/osf.io,zachjanicki/osf.io,mluo613/osf.io,himanshuo/osf.io,kwierman/osf.io,Ghalko/osf.io,pattisdr/osf.io,KAsante95/osf.io,chennan47/osf.io,kushG/osf.io,adlius/osf.io,RomanZWang/osf.io,rdhyee/osf.io,rdhyee/osf.io,alexschiller/osf.io,GaryKriebel/osf.io,haoyuchen1992/osf.io,caneruguz/osf.io,HarryRybacki/osf.io,DanielSBrown/osf.io,HarryRybacki/osf.io,jolene-esposito/osf.io,bdyetton/prettychart,saradbowman/osf.io,ticklemepierce/osf.io,ckc6cz/osf.io,revanthkolli/osf.io,jeffreyliu3230/osf.io,aaxelb/osf.io,brandonPurvis/osf.io,chrisseto/osf.io,chennan47/osf.io,alexschiller/osf.io,kch8qx/osf.io,haoyuchen1992/osf.io,cwisecarver/osf.io,ckc6cz/osf.io,mluke93/osf.io,doublebits/osf.io,cldershem/osf.io,jmcarp/osf.io,njantrania/osf.io,TomHeatwole/osf.io,SSJohns/osf.io,crcresearch/osf.io,baylee-d/osf.io,kch8qx/osf.io,RomanZWang/osf.io,RomanZWang/osf.io,mluo613/osf.io,njantrania/osf.io,KAsante95/osf.io,petermalcolm/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,cldershem/osf.io,samchrisinger/osf.io,HalcyonChimera/osf.io,brandonPurvis/osf.io,zachjanicki/osf.io,TomHeatwole/osf.io,samanehsan/osf.io,caneruguz/osf.io,aaxelb/osf.io,asanfilippo7/osf.io,ticklemepierce/osf.io,Ghalko/osf.io,rdhyee/osf.io,caneruguz/osf.io,fabianvf/osf.io,GageGaskins/osf.io,dplorimer/osf,zkraime/osf.io,acshi/osf.io,mfraezz/osf.io,barbour-em/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,Nesiehr/osf.io,DanielSBrown/osf.io,baylee-d/osf.io,kch8qx/osf.io,arpitar/osf.io,billyhunt/osf.io,caseyrygt/osf.io,TomHeatwole/osf.io,alexschiller/osf.io,petermalcolm/osf.io,monikagrabowska/osf.io,felliott/osf.io,lamdnhan/osf.io,brianjgeiger/osf.io | framework/render/core.py | framework/render/core.py | # -*- coding: utf-8 -*-
import os
import time
import mfr
from mfr.ext import ALL_HANDLERS
import requests
from website import settings
from framework.render.exceptions import error_message_or_exception
def init_mfr(app):
"""Register all available FileHandlers and collect each
plugin's static assets to the app's static path.
"""
# Available file handlers
mfr.register_filehandlers(ALL_HANDLERS)
# Update mfr config with static path and url
mfr.config.update({
# Base URL for static files
'ASSETS_URL': os.path.join(app.static_url_path, 'mfr'),
# Where to save static files
'ASSETS_FOLDER': os.path.join(app.static_folder, 'mfr'),
})
mfr.collect_static(dest=mfr.config['ASSETS_FOLDER'])
def render_is_done_or_happening(cache_path, temp_path):
# if the cached file exists do nothing
if os.path.isfile(cache_path):
return True
if os.path.isfile(temp_path):
if time.time() - os.path.getmtime(temp_path) > settings.MFR_TIMEOUT:
# If the temp path has not been modified since the timeout
# seconds assume the task failed, remove the file and
# start over
os.remove(temp_path)
return False
# Otherwise the task is happening somewhere else
return True
# If none of the above go ahead and start
return False
def save_to_file_or_error(download_url, dest_path):
with open(dest_path, 'wb') as temp_file:
response = requests.get(download_url, stream=True)
if response.ok:
for block in response.iter_content(1024): # 1kb
temp_file.write(block)
else:
temp_file.write(
error_message_or_exception(
response.status_code,
dest_path=dest_path,
download_url=download_url,
)
)
| # -*- coding: utf-8 -*-
import os
import time
import mfr
from mfr.ext import ALL_HANDLERS
import requests
from website import settings
from framework.render.exceptions import error_message_or_exception
def init_mfr(app):
"""Register all available FileHandlers and collect each
plugin's static assets to the app's static path.
"""
# Available file handlers
mfr.register_filehandlers(ALL_HANDLERS)
# Update mfr config with static path and url
mfr.config.update({
# Base URL for static files
'ASSETS_URL': os.path.join(app.static_url_path, 'mfr'),
# Where to save static files
'ASSETS_FOLDER': os.path.join(app.static_folder, 'mfr'),
})
mfr.collect_static(dest=mfr.config['ASSETS_FOLDER'])
def render_is_done_or_happening(cache_path, temp_path):
# if the cached file exists do nothing
if os.path.isfile(cache_path):
return True
if os.path.isfile(temp_path):
if time.time() - os.path.getmtime(temp_path) > settings.MFR_TIMEOUT:
# If the temp path has not been modified since the timeout
# seconds assume the task failed, remove the file and
# start over
os.remove(temp_path)
return False
# Otherwise the task is happening somewhere else
return True
# If none of the above go ahead and start
return False
def save_to_file_or_error(download_url, dest_path):
with open(dest_path, 'wb') as temp_file:
response = requests.get(download_url, stream=True)
if response.ok:
for block in response.iter_content(1024): # 1kb
if not block:
break
temp_file.write(block)
else:
temp_file.write(
error_message_or_exception(
response.status_code,
dest_path=dest_path,
download_url=download_url,
)
)
| apache-2.0 | Python |
b1a06b069232d9347ecf2d74fbe010948e019e4f | Add AD to secrets. | owainkenwayucl/stats-plus-plus,owainkenwayucl/stats-plus-plus,owainkenwayucl/stats-plus-plus,owainkenwayucl/stats-plus-plus | python/auth/secrets.py | python/auth/secrets.py | '''
This class lets us keep the user ids and passwords outside of the git repo.
This is basically a port of the equivalent I added to Bruno's old stats code.
Owain Kenway
'''
class Secrets:
def __init__(self):
import configparser
import os.path
self.filename = os.path.join(os.path.expanduser("~"), ".stats_secrets", "accounts")
secretconfig = configparser.ConfigParser()
secretconfig.read(self.filename)
self.dbuser = secretconfig.get("database", "user").strip("'")
self.dbpasswd = secretconfig.get("database", "pass").strip("")
# See if we have an AD password available to us anywhere:
self.ad = False
# First check our config file for "ad" or "ldap" sections.
if secretconfig.has_section("ad"):
self.aduser = secretconfig.get("ad", "user").strip("'")
self.adpasswd = secretconfig.get("ad", "pass").strip("")
self.ad = True
elif secretconfig.has_section("ldap"):
self.aduser = secretconfig.get("ldap", "user").strip("'")
self.adpasswd = secretconfig.get("ldap", "pass").strip("")
self.ad = True
# Then check to see if we have a ~/.adpw as for the ClusterStats repo.
elif os.path.exists(os.path.expanduser("~/.adpw")):
self.aduser = "AD\sa-ritsldap01"
with open(os.path.expanduser("~/.adpw")) as f:
self.passwd = f.read().strip()
self.ad = True
# Then check to see if we have a system-wide password.
elif os.path.exists("/shared/ucl/etc/adpw"):
self.aduser = "AD\sa-ritsldap01"
with open("/shared/ucl/etc/adpw") as f:
self.passwd= f.read().strip()
self.ad = True
| '''
This class lets us keep the user ids and passwords outside of the git repo.
This is basically a port of the equivalent I added to Bruno's old stats code.
Owain Kenway
'''
class Secrets:
def __init__(self):
import configparser
import os.path
self.filename = os.path.join(os.path.expanduser("~"), ".stats_secrets", "accounts")
secretconfig = configparser.ConfigParser()
secretconfig.read(self.filename)
self.dbuser = secretconfig.get("database", "user").strip("'")
self.dbpasswd = secretconfig.get("database", "pass").strip("") | mit | Python |
5aa3dbf8f520f9ffaaed51ed397eb9f4c722882a | Fix pep8 order import issue | brantlk/python-sample-app | sample_app/__init__.py | sample_app/__init__.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import json
import falcon
class _SimpleResource(object):
def __init__(self, conf):
try:
message = conf.get('sample_app', 'message')
except ConfigParser.Error:
message = 'something'
self._message = message
def on_get(self, req, resp):
resp.body = json.dumps({'message': self._message})
resp.set_header('Content-Type', 'application/json')
def on_put(self, req, resp):
doc = json.load(req.stream)
self._message = doc['message']
resp.body = json.dumps({'message': self._message})
def make_application():
conf = ConfigParser.RawConfigParser()
conf.read(['/etc/sample_app/sample_app.conf'])
application = falcon.API()
application.add_route('/', _SimpleResource(conf))
return application
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import ConfigParser
import falcon
class _SimpleResource(object):
def __init__(self, conf):
try:
message = conf.get('sample_app', 'message')
except ConfigParser.Error:
message = 'something'
self._message = message
def on_get(self, req, resp):
resp.body = json.dumps({'message': self._message})
resp.set_header('Content-Type', 'application/json')
def on_put(self, req, resp):
doc = json.load(req.stream)
self._message = doc['message']
resp.body = json.dumps({'message': self._message})
def make_application():
conf = ConfigParser.RawConfigParser()
conf.read(['/etc/sample_app/sample_app.conf'])
application = falcon.API()
application.add_route('/', _SimpleResource(conf))
return application
| apache-2.0 | Python |
d2beae040b49babda3cea140c6a1fcefb71668a7 | fix missing key in ci_helper | sdpython/pyquickhelper,sdpython/pyquickhelper,sdpython/pyquickhelper,sdpython/pyquickhelper | src/pyquickhelper/pycode/ci_helper.py | src/pyquickhelper/pycode/ci_helper.py | """
@file
@brief Helpers for CI
.. versionadded:: 1.3
"""
def is_travis_or_appveyor():
"""
tells if is a travis environment or appveyor
@return ``'travis'``, ``'appveyor'`` or ``None``
The function should rely more on environement variables
``CI``, ``TRAVIS``, ``APPVEYOR``.
.. versionadded:: 1.3
"""
import sys
if "travis" in sys.executable:
return "travis"
import os
if os.environ.get("USERNAME", os.environ.get("USER", None)) == "appveyor" or \
os.environ.get("APPVEYOR", "").lower() in ("true", "1"):
return "appveyor"
return None
| """
@file
@brief Helpers for CI
.. versionadded:: 1.3
"""
def is_travis_or_appveyor():
"""
tells if is a travis environment or appveyor
@return ``'travis'``, ``'appveyor'`` or ``None``
The function should rely more on environement variables
``CI``, ``TRAVIS``, ``APPVEYOR``.
.. versionadded:: 1.3
"""
import sys
if "travis" in sys.executable:
return "travis"
import os
if os.environ.get("USERNAME", os.environ["USER"]) == "appveyor" or \
os.environ.get("APPVEYOR", "").lower() in ("true", "1"):
return "appveyor"
return None
| mit | Python |
efe65f1e89ad8f5c1305084b1526a62595f9e732 | Fix monitoring test | JavaRabbit/CS496_capstone,GoogleCloudPlatform/python-docs-samples,hashems/Mobile-Cloud-Development-Projects,GoogleCloudPlatform/python-docs-samples,hashems/Mobile-Cloud-Development-Projects,canglade/NLP,canglade/NLP,sharbison3/python-docs-samples,canglade/NLP,JavaRabbit/CS496_capstone,sharbison3/python-docs-samples,sharbison3/python-docs-samples,BrandonY/python-docs-samples,BrandonY/python-docs-samples,JavaRabbit/CS496_capstone,BrandonY/python-docs-samples,BrandonY/python-docs-samples,GoogleCloudPlatform/python-docs-samples,canglade/NLP,sharbison3/python-docs-samples,hashems/Mobile-Cloud-Development-Projects,hashems/Mobile-Cloud-Development-Projects,JavaRabbit/CS496_capstone,GoogleCloudPlatform/python-docs-samples | monitoring/api/v3/list_resources_test.py | monitoring/api/v3/list_resources_test.py | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Integration test for list_env.py
GOOGLE_APPLICATION_CREDENTIALS must be set to a Service Account for a project
that has enabled the Monitoring API.
Currently the TEST_PROJECT_ID is hard-coded to run using the project created
for this test, but it could be changed to a different project.
"""
import re
from gcp.testing.flaky import flaky
import list_resources
METRIC = 'compute.googleapis.com/instance/cpu/usage_time'
@flaky
def test_list_monitored_resources(cloud_config, capsys):
PROJECT_RESOURCE = "projects/{}".format(cloud_config.project)
client = list_resources.get_client()
list_resources.list_monitored_resource_descriptors(
client, PROJECT_RESOURCE)
stdout, _ = capsys.readouterr()
regex = re.compile(
'An application running', re.I)
assert regex.search(stdout) is not None
@flaky
def test_list_metrics(cloud_config, capsys):
PROJECT_RESOURCE = "projects/{}".format(cloud_config.project)
client = list_resources.get_client()
list_resources.list_metric_descriptors(
client, PROJECT_RESOURCE, METRIC)
stdout, _ = capsys.readouterr()
regex = re.compile(
u'Delta CPU', re.I)
assert regex.search(stdout) is not None
@flaky
def test_list_timeseries(cloud_config, capsys):
PROJECT_RESOURCE = "projects/{}".format(cloud_config.project)
client = list_resources.get_client()
list_resources.list_timeseries(
client, PROJECT_RESOURCE, METRIC)
stdout, _ = capsys.readouterr()
regex = re.compile(u'list_timeseries response:\n', re.I)
assert regex.search(stdout) is not None
| #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Integration test for list_env.py
GOOGLE_APPLICATION_CREDENTIALS must be set to a Service Account for a project
that has enabled the Monitoring API.
Currently the TEST_PROJECT_ID is hard-coded to run using the project created
for this test, but it could be changed to a different project.
"""
import re
from gcp.testing.flaky import flaky
import list_resources
METRIC = 'compute.googleapis.com/instance/cpu/usage_time'
@flaky
def test_list_monitored_resources(cloud_config, capsys):
PROJECT_RESOURCE = "projects/{}".format(cloud_config.project)
client = list_resources.get_client()
list_resources.list_monitored_resource_descriptors(
client, PROJECT_RESOURCE)
stdout, _ = capsys.readouterr()
regex = re.compile(
'An application running')
assert regex.search(stdout) is not None
@flaky
def test_list_metrics(cloud_config, capsys):
PROJECT_RESOURCE = "projects/{}".format(cloud_config.project)
client = list_resources.get_client()
list_resources.list_metric_descriptors(
client, PROJECT_RESOURCE, METRIC)
stdout, _ = capsys.readouterr()
regex = re.compile(
u'Delta CPU usage time')
assert regex.search(stdout) is not None
@flaky
def test_list_timeseries(cloud_config, capsys):
PROJECT_RESOURCE = "projects/{}".format(cloud_config.project)
client = list_resources.get_client()
list_resources.list_timeseries(
client, PROJECT_RESOURCE, METRIC)
stdout, _ = capsys.readouterr()
regex = re.compile(u'list_timeseries response:\n')
assert regex.search(stdout) is not None
| apache-2.0 | Python |
449e79a8e00585c2a1ed595fc2c240151d77a4c0 | Update default-template.py | SNET-Entrance/Entrance-UM,SNET-Entrance/Entrance-UM,SNET-Entrance/Entrance-UM,SNET-Entrance/Entrance-UM | src/config/default-template.py | src/config/default-template.py | from const import basedir
import os
import socket
import re
ATTRIBUTE_AUTHORITY_URL = 'http://localhost:8095'
KEY_EXCHANGE_URL = 'http://localhost:20001'
CONTAINER_EXTENSION = '.media'
ALL_ATTRIBUTE = 'All'
# already registered at the KEX
# used to authenticate UM/CM against KEX
UMCM_CLIENT_ID = 'QlWDSSGCCUFdD9Nupq3vA6Q7C95IQf2LjCqwBh10'
UMCM_CLIENT_SECRET = '0Bh761e9WKEPkuyxPRMChey0d2m1uVkvDn20WyDum0Bb7TGUtt'
# Policy Mode
# Normal ABE mode : 0
# No re-encryption : 1
# No key update : 2
POLICY_MODE = 0
DEBUG = True
TESTING = False
SECRET_KEY = 'w\x7f\x1b\x94}S\x98\x0e\x97\xa7\xdd\xae\xe5\xcd 4\xd3\n\xfeG\xacT\xa2\xdf\xfd\xec\xbf:|N\\"'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'entrance.dev.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
USER_APP_NAME = 'entrance'
USER_AFTER_LOGIN_ENDPOINT = 'at.index'
CSRF_ENABLED = False
MAIL_USERNAME = 'entrance.project.tu@gmail.com'
MAIL_PASSWORD = 'password3'
MAIL_DEFAULT_SENDER = 'entrance.project.tu@gmail.com'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 31536000
# openIDconnect
DEFAULT_CALLBACK_PATH = 'contacts/oidc/callback'
HOST = '134.158.74.59:20000' # This service port
CLIENT_SECRET = '00e4a5f3-fb85-4a5e-be9e-cd77e1c48115' # Client Secret
CLIENT_ID = 'pamtest' # Client ID
REALM = 'master' # Keycloak realm
OIDC_HOST = 'https://federation.cyclone-project.eu' # Keycloak host
| from const import basedir
import os
import socket
import re
ATTRIBUTE_AUTHORITY_URL = 'http://localhost:8095'
KEY_EXCHANGE_URL = 'http://localhost:20001'
CONTAINER_EXTENSION = '.media'
ALL_ATTRIBUTE = 'All'
# already registered at the KEX
# used to authenticate UM/CM against KEX
UMCM_CLIENT_ID = 'QlWDSSGCCUFdD9Nupq3vA6Q7C95IQf2LjCqwBh10'
UMCM_CLIENT_SECRET = '0Bh761e9WKEPkuyxPRMChey0d2m1uVkvDn20WyDum0Bb7TGUtt'
# Policy Mode
# Normal ABE mode : 0
# No re-encryption : 1
# No key update : 2
POLICY_MODE = 0
DEBUG = True
TESTING = False
SECRET_KEY = 'w\x7f\x1b\x94}S\x98\x0e\x97\xa7\xdd\xae\xe5\xcd 4\xd3\n\xfeG\xacT\xa2\xdf\xfd\xec\xbf:|N\\"'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'entrance.dev.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
USER_APP_NAME = 'entrance'
USER_AFTER_LOGIN_ENDPOINT = 'at.index'
CSRF_ENABLED = False
MAIL_USERNAME = 'entrance.project.tu@gmail.com'
MAIL_PASSWORD = 'password3'
MAIL_DEFAULT_SENDER = 'entrance.project.tu@gmail.com'
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 31536000
# openIDconnect
DEFAULT_CALLBACK_PATH = 'contacts/oidc/callback'
HOST = SSP_IP +':20000' # This service port
CLIENT_SECRET = '00e4a5f3-fb85-4a5e-be9e-cd77e1c48115' # Client Secret
CLIENT_ID = 'pamtest' # Client ID
REALM = 'master' # Keycloak realm
OIDC_HOST = 'https://federation.cyclone-project.eu' # Keycloak host
| apache-2.0 | Python |
a4de7af5d4b177ac213f315ca45d99fa840fee9a | Handle connection more gracefully | Motoko11/MotoBot | motobot/core_plugins/network_handlers.py | motobot/core_plugins/network_handlers.py | from motobot import hook
from time import sleep
@hook('PING')
def handle_ping(bot, message):
""" Handle the server's pings. """
bot.send('PONG :' + message.params[-1])
@hook('439')
def handle_wait(bot, message):
""" Handles too fast for server message and waits 1 second. """
bot.identified = False
sleep(1)
@hook('NOTICE')
def handle_identification(bot, message):
""" Use the notice message to identify and register to the server. """
if not bot.identified:
bot.send('USER MotoBot localhost localhost MotoBot')
bot.send('NICK ' + bot.nick)
bot.identified = True
@hook('004')
def handle_nickserv_identification(bot, message):
""" At server welcome message 004 identify to nickserv and join channels. """
if bot.nickserv_password is not None:
bot.send('PRIVMSG nickserv :identify ' + bot.nickserv_password)
for channel in bot.channels:
bot.send('JOIN ' + channel)
@hook('INVITE')
def handle_invite(bot, message):
""" Join a channel when invited. """
bot.join(message.params[-1])
@hook('ERROR')
def handle_error(bot, message):
""" Handle an error message from the server. """
bot.connected = bot.identified = False
| from motobot import hook
from time import sleep
@hook('PING')
def handle_ping(bot, message):
""" Handle the server's pings. """
bot.send('PONG :' + message.params[-1])
@hook('439')
def handle_wait(bot, message):
bot.identified = False
sleep(1)
@hook('NOTICE')
def handle_identification(bot, message):
""" Use the notice message to identify and register to the server. """
if not bot.identified:
bot.send('USER MotoBot localhost localhost MotoBot')
bot.send('NICK ' + bot.nick)
sleep(2)
if bot.nickserv_password is not None:
bot.send('PRIVMSG nickserv :identify ' + bot.nickserv_password)
sleep(2)
for channel in bot.channels:
bot.send('JOIN ' + channel)
bot.identified = True
@hook('INVITE')
def handle_invite(bot, message):
""" Join a channel when invited. """
bot.join(message.params[-1])
@hook('ERROR')
def handle_error(bot, message):
""" Handle an error message from the server. """
bot.connected = bot.identified = False
| mit | Python |
93fdcbf3358b062c8dbb03508507b49e9a7e9bf5 | fix tests | GoogleCloudPlatform/cloud-foundation-fabric,GoogleCloudPlatform/cloud-foundation-fabric,GoogleCloudPlatform/cloud-foundation-fabric,GoogleCloudPlatform/cloud-foundation-fabric | tests/examples/data_solutions/dp-foundation/test_plan.py | tests/examples/data_solutions/dp-foundation/test_plan.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
def test_resources(e2e_plan_runner):
"Test that plan works and the numbers of resources is as expected."
modules, resources = e2e_plan_runner(FIXTURES_DIR)
assert len(modules) == 37
assert len(resources) == 280
| # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixture')
def test_resources(e2e_plan_runner):
"Test that plan works and the numbers of resources is as expected."
modules, resources = e2e_plan_runner(FIXTURES_DIR)
assert len(modules) == 37
assert len(resources) == 265
| apache-2.0 | Python |
7be85339a1487697dab56dade362e92d16d0093f | Handle initial connection better | Motoko11/MotoBot | motobot/core_plugins/network_handlers.py | motobot/core_plugins/network_handlers.py | from motobot import hook
from time import sleep
@hook('PING')
def handle_ping(bot, message):
""" Handle the server's pings. """
bot.send('PONG :' + message.params[-1])
@hook('439')
def handle_wait(bot, message):
bot.identified = False
sleep(1)
@hook('NOTICE')
def handle_identification(bot, message):
""" Use the notice message to identify and register to the server. """
if not bot.identified:
bot.send('USER MotoBot localhost localhost MotoBot')
bot.send('NICK ' + bot.nick)
sleep(2)
if bot.nickserv_password is not None:
bot.send('PRIVMSG nickserv :identify ' + bot.nickserv_password)
sleep(2)
for channel in bot.channels:
bot.send('JOIN ' + channel)
bot.identified = True
@hook('INVITE')
def handle_invite(bot, message):
""" Join a channel when invited. """
bot.join(message.params[-1])
@hook('ERROR')
def handle_error(bot, message):
""" Handle an error message from the server. """
bot.connected = bot.identified = False
| from motobot import hook
from time import sleep
@hook('PING')
def handle_ping(bot, message):
""" Handle the server's pings. """
bot.send('PONG :' + message.params[-1])
@hook('439')
def handle_notice(bot, message):
""" Use the notice message to identify and register to the server. """
if not bot.identified:
bot.send('USER MotoBot localhost localhost MotoBot')
bot.send('NICK ' + bot.nick)
sleep(2)
if bot.nickserv_password is not None:
bot.send('PRIVMSG nickserv :identify ' + bot.nickserv_password)
sleep(2)
for channel in bot.channels:
bot.send('JOIN ' + channel)
bot.identified = True
@hook('INVITE')
def handle_invite(bot, message):
""" Join a channel when invited. """
bot.join(message.params[-1])
@hook('ERROR')
def handle_error(bot, message):
""" Handle an error message from the server. """
bot.connected = bot.identified = False
| mit | Python |
e4eb1925bff31a2e64d0ce6a55a6da97d2d60b9b | Add participants to button | umbc-hackafe/sign-drivers,umbc-hackafe/sign-drivers,umbc-hackafe/sign-drivers | python/games/button.py | python/games/button.py | import functools
import threading
import websocket
import graphics
import requests
import driver
import game
import json
import sys
import re
def on_message(game, ws, message):
a = json.loads(message)
game.reset_time(a["payload"]["seconds_left"])
game.reset_participants(a["payload"]["participants_text"])
class Button(game.Game):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timer = graphics.Rectangle(112-12, 7, x=12, y=0)
self.text_top = graphics.TextSprite("60", x=0, y=0, width=5, height=7)
self.text_bottom = graphics.TextSprite("00", x=0, y=8, width=5, height=7)
self.text_participants = graphics.TextSprite("???,???", x=16, y=8, width=5, height=7)
self.time = 60.00
self.total_width = 112-12
self.sprites.add(self.timer)
self.sprites.add(self.text_top)
self.sprites.add(self.text_bottom)
self.sprites.add(self.text_participants)
regex = re.compile(r"(wss://wss\.redditmedia\.com/thebutton\?h=[^\"]*)")
url = requests.get("https://www.reddit.com/r/thebutton")
try:
url = re.search(regex, requests.get("https://www.reddit.com/r/thebutton").text).group(1)
except:
url = "wss://wss.redditmedia.com/thebutton?h=7f66bf82878e6151f7688ead7085eb63a0baff0b&e=1428621271"
self.ws = websocket.WebSocketApp(url)
self.ws.on_message = functools.partial(on_message, self)
self.ws_thread = threading.Thread(target=self.ws.run_forever, daemon=True)
self.ws_thread.start()
def reset_time(self, time=60.00):
self.time = time
def reset_participants(self, participants="???,???"):
self.text_participants.set_text(participants)
def loop(self):
self.time -= 1 / self.framerate
self.text_top.set_text(str(int(self.time)))
self.text_bottom.set_text(str(int(self.time % 1 * 100)))
self.timer.width = int(self.total_width * self.time / 60)
super().loop()
GAME = Button
| import functools
import threading
import websocket
import graphics
import requests
import driver
import game
import json
import sys
import re
def on_message(game, ws, message):
a = json.loads(message)
game.reset_time(a["payload"]["seconds_left"])
class Button(game.Game):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timer = graphics.Rectangle(112-12, 7, x=12, y=0)
self.text_top = graphics.TextSprite("60", x=0, y=0, width=5, height=7)
self.text_bottom = graphics.TextSprite("00", x=0, y=8, width=5, height=7)
self.time = 60.00
self.total_width = 112-12
self.sprites.add(self.timer)
self.sprites.add(self.text_top)
self.sprites.add(self.text_bottom)
self.sprites.add(graphics.TextSprite("/r/thebutton", x=16, y=8, width=5, height=7))
regex = re.compile(r"(wss://wss\.redditmedia\.com/thebutton\?h=[^\"]*)")
url = requests.get("https://www.reddit.com/r/thebutton")
try:
url = re.search(regex, requests.get("https://www.reddit.com/r/thebutton").text).group(1)
except:
url = "wss://wss.redditmedia.com/thebutton?h=7f66bf82878e6151f7688ead7085eb63a0baff0b&e=1428621271"
self.ws = websocket.WebSocketApp(url)
self.ws.on_message = functools.partial(on_message, self)
self.ws_thread = threading.Thread(target=self.ws.run_forever, daemon=True)
self.ws_thread.start()
def reset_time(self, time=60.00):
self.time = time
def loop(self):
self.time -= 1 / self.framerate
self.text_top.set_text(str(int(self.time)))
self.text_bottom.set_text(str(int(self.time % 1 * 100)))
self.timer.width = int(self.total_width * self.time / 60)
super().loop()
GAME = Button
| mit | Python |
02d6f690552586c002da55c8cec8124e9ec1855c | enable high dpi | ipython/ipython,ipython/ipython | IPython/terminal/pt_inputhooks/qt.py | IPython/terminal/pt_inputhooks/qt.py | import sys
import os
from IPython.external.qt_for_kernel import QtCore, QtGui
# If we create a QApplication, keep a reference to it so that it doesn't get
# garbage collected.
_appref = None
_already_warned = False
def inputhook(context):
global _appref
app = QtCore.QCoreApplication.instance()
if not app:
if sys.platform == 'linux':
if not os.environ.get('DISPLAY') \
and not os.environ.get('WAYLAND_DISPLAY'):
import warnings
global _already_warned
if not _already_warned:
_already_warned = True
warnings.warn(
'The DISPLAY or WAYLAND_DISPLAY environment variable is '
'not set or empty and Qt5 requires this environment '
'variable. Deactivate Qt5 code.'
)
return
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
_appref = app = QtGui.QApplication([" "])
event_loop = QtCore.QEventLoop(app)
if sys.platform == 'win32':
# The QSocketNotifier method doesn't appear to work on Windows.
# Use polling instead.
timer = QtCore.QTimer()
timer.timeout.connect(event_loop.quit)
while not context.input_is_ready():
timer.start(50) # 50 ms
event_loop.exec_()
timer.stop()
else:
# On POSIX platforms, we can use a file descriptor to quit the event
# loop when there is input ready to read.
notifier = QtCore.QSocketNotifier(context.fileno(),
QtCore.QSocketNotifier.Read)
try:
# connect the callback we care about before we turn it on
notifier.activated.connect(event_loop.exit)
notifier.setEnabled(True)
# only start the event loop we are not already flipped
if not context.input_is_ready():
event_loop.exec_()
finally:
notifier.setEnabled(False)
| import sys
import os
from IPython.external.qt_for_kernel import QtCore, QtGui
# If we create a QApplication, keep a reference to it so that it doesn't get
# garbage collected.
_appref = None
_already_warned = False
def inputhook(context):
global _appref
app = QtCore.QCoreApplication.instance()
if not app:
if sys.platform == 'linux':
if not os.environ.get('DISPLAY') \
and not os.environ.get('WAYLAND_DISPLAY'):
import warnings
global _already_warned
if not _already_warned:
_already_warned = True
warnings.warn(
'The DISPLAY or WAYLAND_DISPLAY environment variable is '
'not set or empty and Qt5 requires this environment '
'variable. Deactivate Qt5 code.'
)
return
_appref = app = QtGui.QApplication([" "])
event_loop = QtCore.QEventLoop(app)
if sys.platform == 'win32':
# The QSocketNotifier method doesn't appear to work on Windows.
# Use polling instead.
timer = QtCore.QTimer()
timer.timeout.connect(event_loop.quit)
while not context.input_is_ready():
timer.start(50) # 50 ms
event_loop.exec_()
timer.stop()
else:
# On POSIX platforms, we can use a file descriptor to quit the event
# loop when there is input ready to read.
notifier = QtCore.QSocketNotifier(context.fileno(),
QtCore.QSocketNotifier.Read)
try:
# connect the callback we care about before we turn it on
notifier.activated.connect(event_loop.exit)
notifier.setEnabled(True)
# only start the event loop we are not already flipped
if not context.input_is_ready():
event_loop.exec_()
finally:
notifier.setEnabled(False)
| bsd-3-clause | Python |
3e0ac9f0c7b42a8f608b65ac810e469e42be79ee | Add failure cases to integration group tests | fabric/fabric | integration/group.py | integration/group.py | from socket import gaierror
from spec import Spec, eq_, ok_
from fabric import ThreadingGroup as Group
from fabric.exceptions import GroupException
class Group_(Spec):
def simple_command(self):
group = Group('localhost', '127.0.0.1')
result = group.run('echo foo', hide=True)
eq_(
[x.stdout.strip() for x in result.values()],
['foo', 'foo'],
)
def failed_command(self):
group = Group('localhost', '127.0.0.1')
try:
group.run('lolnope', hide=True)
except GroupException as e:
# GroupException.result -> GroupResult;
# GroupResult values will be UnexpectedExit in this case;
# UnexpectedExit.result -> Result, and thus .exited etc.
eq_(
[x.result.exited for x in e.result.values()],
[127, 127],
)
else:
assert False, "Did not raise GroupException!"
def excepted_command(self):
group = Group('nopebadhost1', 'nopebadhost2')
try:
group.run('lolnope', hide=True)
except GroupException as e:
for value in e.result.values():
ok_(isinstance(value, gaierror))
else:
assert False, "Did not raise GroupException!"
| from spec import Spec, eq_
from fabric import ThreadingGroup as Group
class Group_(Spec):
def simple_command(self):
group = Group('localhost', '127.0.0.1')
result = group.run('echo foo', hide=True)
eq_(
[x.stdout.strip() for x in result.values()],
['foo', 'foo'],
)
| bsd-2-clause | Python |
f7239feb9bef69c1f1cd07ceb066c82bf76fb67e | Abort build if package does not exist | Cal-CS-61A-Staff/ok-client | client/cli/publish.py | client/cli/publish.py | """This module is responsible for publishing OK."""
import argparse
import client
import os
import shutil
import sys
import zipfile
OK_ROOT = os.path.normpath(os.path.dirname(client.__file__))
CONFIG_NAME = 'config.ok'
EXTRA_PACKAGES = ['sanction', 'requests']
def abort(message):
print(message + ' Aborting', file=sys.stderr)
sys.exit(1)
def find_site_packages_directory():
virtualenv = os.getenv('VIRTUAL_ENV')
if not virtualenv:
abort('You must activate your virtualenv to publish.')
for path in sys.path:
if path.startswith(virtualenv) and 'site-packages' in path:
return path
abort('No site packages directory found.')
def write_tree(zipf, src_directory, dst_directory):
"""Write all .py files in a source directory to a destination directory
inside a zip archive.
"""
if not os.path.exists(src_directory):
abort('Tree ' + src_directory + ' does not exist.')
for root, _, files in os.walk(src_directory):
for filename in files:
if not filename.endswith(('.py', '.pem')):
continue
fullname = os.path.join(root, filename)
arcname = fullname.replace(src_directory, dst_directory)
zipf.write(fullname, arcname=arcname)
def package_client(destination):
package_dir = find_site_packages_directory()
if not os.path.isdir(destination):
os.mkdir(destination)
dest = os.path.join(destination, client.FILE_NAME)
with zipfile.ZipFile(dest, 'w') as zipf:
zipf.write(os.path.join(OK_ROOT, '__main__.py'), '__main__.py')
write_tree(zipf, OK_ROOT, 'client')
for package in EXTRA_PACKAGES:
src_directory = os.path.join(package_dir, package)
write_tree(zipf, src_directory, package)
def new_config():
"""Creates a new config file in the current directory."""
shutil.copyfile(os.path.join(OK_ROOT, CONFIG_NAME),
CONFIG_NAME)
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-d', '--destination', type=str, default='.',
help='Publish to the specified directory.')
parser.add_argument('--new-config', action='store_true',
help='Creates a new config file in the current directory.')
return parser.parse_args()
def publish(args):
if args.new_config:
new_config()
else:
package_client(args.destination)
def main():
publish(parse_args())
if __name__ == '__main__':
main()
| """This module is responsible for publishing OK."""
import argparse
import client
import os
import shutil
import sys
import zipfile
OK_ROOT = os.path.normpath(os.path.dirname(client.__file__))
CONFIG_NAME = 'config.ok'
EXTRA_PACKAGES = ['sanction', 'requests']
def abort(message):
print(message + ' Aborting', file=sys.stderr)
sys.exit(1)
def find_site_packages_directory():
virtualenv = os.getenv('VIRTUAL_ENV')
if not virtualenv:
abort('You must activate your virtualenv to publish.')
for path in sys.path:
if path.startswith(virtualenv) and 'site-packages' in path:
return path
abort('No site packages directory found.')
def write_tree(zipf, src_directory, dst_directory):
"""Write all .py files in a source directory to a destination directory
inside a zip archive.
"""
for root, _, files in os.walk(src_directory):
for filename in files:
if not filename.endswith(('.py', '.pem')):
continue
fullname = os.path.join(root, filename)
arcname = fullname.replace(src_directory, dst_directory)
zipf.write(fullname, arcname=arcname)
def package_client(destination):
package_dir = find_site_packages_directory()
if not os.path.isdir(destination):
os.mkdir(destination)
dest = os.path.join(destination, client.FILE_NAME)
with zipfile.ZipFile(dest, 'w') as zipf:
zipf.write(os.path.join(OK_ROOT, '__main__.py'), '__main__.py')
write_tree(zipf, OK_ROOT, 'client')
for package in EXTRA_PACKAGES:
src_directory = os.path.join(package_dir, package)
write_tree(zipf, src_directory, package)
def new_config():
"""Creates a new config file in the current directory."""
shutil.copyfile(os.path.join(OK_ROOT, CONFIG_NAME),
CONFIG_NAME)
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-d', '--destination', type=str, default='.',
help='Publish to the specified directory.')
parser.add_argument('--new-config', action='store_true',
help='Creates a new config file in the current directory.')
return parser.parse_args()
def publish(args):
if args.new_config:
new_config()
else:
package_client(args.destination)
def main():
publish(parse_args())
if __name__ == '__main__':
main()
| apache-2.0 | Python |
8d34d33286d954a94ab4b3fd7fe5350c5a724c48 | rename yield_execution to coroutine | freevo/kaa-base,freevo/kaa-base | test/async_lock.py | test/async_lock.py | import kaa
@kaa.coroutine(synchronize = True)
def f(x):
print 'in', x
yield kaa.YieldContinue
print 'work1', x
yield kaa.YieldContinue
print 'work2', x
yield kaa.YieldContinue
print 'out', x
f(1)
f(2)
f(3)
kaa.main.run()
| import kaa
@kaa.yield_execution(synchronize = True)
def f(x):
print 'in', x
yield kaa.YieldContinue
print 'work1', x
yield kaa.YieldContinue
print 'work2', x
yield kaa.YieldContinue
print 'out', x
f(1)
f(2)
f(3)
kaa.main.run()
| lgpl-2.1 | Python |
ba355cd70f43bfbec33d807248dfb3be73d6a3bc | change formatting | s0hvaperuna/Not-a-bot | cogs/server.py | cogs/server.py | from bot.bot import command
from cogs.cog import Cog
from discord.ext.commands import cooldown
class Server(Cog):
def __init__(self, bot):
super().__init__(bot)
@command(no_pm=True, pass_context=True)
@cooldown(1, 20)
async def top(self, ctx):
server = ctx.message.server
sorted_users = sorted(server.members, key=lambda u: len(u.roles), reverse=True)
s = 'Leaderboards for **%s**\n\n```md\n' % server.name
for idx, u in enumerate(sorted_users[:10]):
s += '{}. {} with {} roles\n'.format(idx + 1, u, len(u.roles) - 1)
s += '```'
await self.bot.say(s)
def setup(bot):
bot.add_cog(Server(bot))
| from bot.bot import command
from cogs.cog import Cog
from discord.ext.commands import cooldown
class Server(Cog):
def __init__(self, bot):
super().__init__(bot)
@command(no_pm=True, pass_context=True)
@cooldown(1, 20)
async def top(self, ctx):
server = ctx.message.server
sorted_users = sorted(server.members, key=lambda u: len(u.roles), reverse=True)
s = 'Leaderboards for **%s**\n\n```md\n' % server.name
for idx, u in enumerate(sorted_users[:10]):
s += '#{} {} with {} roles\n'.format(idx + 1, u, len(u.roles))
s += '```'
await self.bot.say(s)
def setup(bot):
bot.add_cog(Server(bot))
| mit | Python |
945cd889c6120d15e71764427217788a978d65ef | Fix env | buildbot/buildbot_travis,tardyp/buildbot_travis,tardyp/buildbot_travis,isotoma/buildbot_travis,buildbot/buildbot_travis,isotoma/buildbot_travis,tardyp/buildbot_travis,tardyp/buildbot_travis,buildbot/buildbot_travis | buildbot_travis/steps/runner.py | buildbot_travis/steps/runner.py | from twisted.internet import defer
from buildbot.process import buildstep
from buildbot.process.buildstep import SUCCESS, FAILURE
from .base import ConfigurableStep
class TravisRunner(ConfigurableStep):
haltOnFailure = True
flunkOnFailure = True
progressMetrics = ConfigurableStep.progressMetrics + ('commands',)
def __init__(self, step, **kwargs):
kwargs.setdefault('name', step)
#kwargs.setdefault('description', step)
ConfigurableStep.__init__(self, **kwargs)
self.addFactoryArguments(
step = step,
)
self.step = step
@defer.inlineCallbacks
def start(self):
config = yield self.getStepConfig()
i = 0
for i, command in enumerate(getattr(config, self.step)):
self.setProgress("commands", i+1)
log = self.addLog("%d.log" % i)
cmd = buildstep.RemoteShellCommand(workdir="build",command=command)
self.setupEnvironment(cmd)
cmd.useLog(log, False, "stdio")
yield self.runCommand(cmd)
if cmd.rc != 0:
self.finished(FAILURE)
self.step_status.setStatistic('commands', i)
self.finished(SUCCESS)
defer.returnValue(None)
def setupEnvironment(self, cmd):
""" Turn all build properties into environment variables """
env = {}
for k, v in self.build.getProperties().properties.items():
env[str(k)] = str(v[0])
if not 'env' in cmd.args or not cmd.args['env']:
cmd.args['env'] = {}
if self.build.slaveEnvironment:
cmd.args['env'].update(self.build.slaveEnvironment)
cmd.args['env'].update(env)
def describe(self, done=False):
description = ConfigurableStep.describe(self, done)
if done:
description.append('%d commands' % self.step_status.getStatistic('commands', 0))
return description
def hideStepIf(self, results, _):
"""
Check to see how many commands were run - if we didnt running any
then hide this step
"""
return int(self.step_status.getStatistic('commands', 0)) == 0
| from twisted.internet import defer
from buildbot.process import buildstep
from buildbot.process.buildstep import SUCCESS, FAILURE
from .base import ConfigurableStep
class TravisRunner(ConfigurableStep):
haltOnFailure = True
flunkOnFailure = True
progressMetrics = ConfigurableStep.progressMetrics + ('commands',)
def __init__(self, step, **kwargs):
kwargs.setdefault('name', step)
#kwargs.setdefault('description', step)
ConfigurableStep.__init__(self, **kwargs)
self.addFactoryArguments(
step = step,
)
self.step = step
@defer.inlineCallbacks
def start(self):
config = yield self.getStepConfig()
i = 0
for i, command in enumerate(getattr(config, self.step)):
self.setProgress("commands", i+1)
log = self.addLog("%d.log" % i)
cmd = buildstep.RemoteShellCommand(workdir="build",command=command)
self.setupEnvironment(cmd)
cmd.useLog(log, False, "stdio")
yield self.runCommand(cmd)
if cmd.rc != 0:
self.finished(FAILURE)
self.step_status.setStatistic('commands', i)
self.finished(SUCCESS)
defer.returnValue(None)
def setupEnvironment(self, cmd):
""" Turn all build properties into environment variables """
env = {}
for k, v in self.build.getProperties().properties.items():
env[str(k)] = str(v[0])
if not 'env' in cmd.args or not cmd.args['env']:
cmd.args['env'] = {}
cmd.args['env'].update(env)
def describe(self, done=False):
description = ConfigurableStep.describe(self, done)
if done:
description.append('%d commands' % self.step_status.getStatistic('commands', 0))
return description
def hideStepIf(self, results, _):
"""
Check to see how many commands were run - if we didnt running any
then hide this step
"""
return int(self.step_status.getStatistic('commands', 0)) == 0
| unknown | Python |
17d095914b095ccc370922aa9b42aed7d6b473f9 | add player `__repr__` | barraponto/python-pawn,barraponto/python-pawn | src/pawn/player.py | src/pawn/player.py | class Player(object):
def __init__(self, name='John Doe'):
self.name = name
def __repr__(self):
return 'Player(name={name!r})'.format(name=self.name)
| class Player(object):
def __init__(self, name='John Doe'):
self.name = name
| bsd-2-clause | Python |
5bc1f7e4fe5da198a37d5fc3b65357885b3b6697 | update update-hosts.py | wangyangjun/RealtimeStreamBenchmark,wangyangjun/RealtimeStreamBenchmark,wangyangjun/StreamBench,wangyangjun/RealtimeStreamBenchmark,wangyangjun/StreamBench,wangyangjun/RealtimeStreamBenchmark,wangyangjun/RealtimeStreamBenchmark,wangyangjun/StreamBench,wangyangjun/StreamBench,wangyangjun/RealtimeStreamBenchmark,wangyangjun/RealtimeStreamBenchmark,wangyangjun/StreamBench,wangyangjun/StreamBench,wangyangjun/StreamBench,wangyangjun/RealtimeStreamBenchmark,wangyangjun/StreamBench,wangyangjun/StreamBench,wangyangjun/RealtimeStreamBenchmark | script/update-hosts.py | script/update-hosts.py | #!/bin/python
from __future__ import print_function
import subprocess
import sys
import os
import json
from util import appendline
if __name__ == "__main__":
path = os.path.dirname(os.path.realpath(__file__))
config = json.load(open(path+'/cluster-config.json'));
for node in config['nodes']:
appendline('/etc/hosts', node['ip']+'\t'+node['host'])
config = json.load(open(path+'/zookeeper-config.json'));
for node in config['nodes']:
appendline('/etc/hosts', node['ip']+'\t'+'zoo'+str(node['id']))
| #!/bin/python
from __future__ import print_function
import subprocess
import sys
import os
import json
from util import appendline
if __name__ == "__main__":
path = os.path.dirname(os.path.realpath(__file__))
config = json.load(open(path+'/cluster-config.json'));
for node in config['nodes']:
appendline('/etc/hosts', node['ip']+'\t'+node['host'])
config = json.load(open('zookeeper-config.json'));
for node in config['nodes']:
appendline('/etc/hosts', node['ip']+'\t'+'zoo'+str(node['id']))
| apache-2.0 | Python |
dfb9384355ebfad21de28719eabb740f511a748b | Move to boto3 | century-arcade/xd,century-arcade/xd,century-arcade/xd,century-arcade/xd | scripts/49-cat-logs.py | scripts/49-cat-logs.py | #!/usr/bin/env python3
# Usage:
# $0 -o log.txt products/
#
# concatenates .log files (even those in subdirs or .zip) and combines into a single combined.log
from xdfile.utils import find_files_with_time, open_output, get_args
import boto3
# from boto.s3.connection import S3Connection
import os
def main():
args = get_args('aggregates all .log files')
outf = open_output()
s3 = boto3.resource('s3')
s3path = "logs/"
# bucket = conn.get_bucket(s3path)
bucket = s3.Bucket(os.environ['BUCKET'])
for obj in sorted(bucket.objects.all(), key=lambda x: x.last_modified):
# last_modified
if s3path in obj.key:
print("Name: %s LastModified:%s" % (obj.key.encode('utf-8'), obj.last_modified))
for fn, contents, dt in sorted(find_files_with_time(*args.inputs, ext=".log"), key=lambda x: x[2]): # earliest first
outf.write_file(fn, contents.decode("utf-8"))
main()
| #!/usr/bin/env python3
# Usage:
# $0 -o log.txt products/
#
# concatenates .log files (even those in subdirs or .zip) and combines into a single combined.log
from xdfile.utils import find_files_with_time, open_output, get_args
from boto.s3.connection import S3Connection
import os
def main():
args = get_args('aggregates all .log files')
outf = open_output()
print(os.environ['AWS_ACCESS_KEY'],os.environ['AWS_SECRET_KEY'])
conn = S3Connection(aws_access_key_id=os.environ['AWS_ACCESS_KEY'], aws_secret_access_key=os.environ['AWS_SECRET_KEY'])
print(conn)
s3path = "s3://" + os.environ['BUCKET'] + "/logs/"
bucket = conn.get_bucket(s3path)
print(bucket, s3path)
for key in sorted(bucket.list(), key=lambda x: x.last_modified):
# last_modified
print("Name: %s LastModified:%s" % (key.name.encode('utf-8'), key.last_modified))
for fn, contents, dt in sorted(find_files_with_time(*args.inputs, ext=".log"), key=lambda x: x[2]): # earliest first
outf.write_file(fn, contents.decode("utf-8"))
main()
| mit | Python |
8dcd64c347a47c55b22997990271468976a2d896 | Update comment | ECP-CANDLE/Database,ECP-CANDLE/Database | plots/json2loss.py | plots/json2loss.py |
# JSON TO LOSS
# NOTE: Run this with './json2table loss' (shell script wrapper)
# Converts a directory of runs into a simple loss over time table
# The output file format for each line is
# timestamp load
# where the timestamp is floating point Unix hours
# relative to first event
# and the loss is the validation loss reported in the JSON
# Tested with P1B1, NT3, and possibly others
import logging
from math import isnan
# Set PYTHONPATH=$PWD
from plottools import *
(root_dir, output_file) = get_args()
# List of all run directories to be processed
rundirs = get_rundirs(root_dir)
logging.info("Found %i directories." % len(rundirs))
# Initially unordered list of events,
# each a tuple: (timestamp, val_loss, debug_data)
events = []
for rundir in rundirs:
Js = get_jsons(rundir)
for J in Js:
record_count = len(J)
record_penult = J[record_count-2]
record_stop = J[record_count-1]
val_loss = record_penult["validation_loss"]["set"]
time_str_stop = record_stop ["end_time"]["set"]
secs_stop = date2secs(time_str_stop)
# Store the event tuple
events.append((secs_stop, val_loss))
logging.info("Found %i events." % len(events))
if len(events) == 0:
abort("No events!")
# Sort by timestamp
events.sort()
# Get first event time
first = events[0][0]
fp_out = open(output_file, "w")
# Run through the events in timestamp order,
# record val_loss at each timestamp
load = 0
for event in events:
t = (event[0] - first)/3600
if isnan(event[1]):
logging.debug("found NaN at timestamp: %0.6f" % t)
continue
fp_out.write("%0.6f %f\n" % (t, event[1]))
fp_out.close()
logging.info("Wrote %s ." % output_file)
|
# JSON TO LOSS
# NOTE: Run this with './json2table loss' (shell script wrapper)
# Converts a directory of runs into a simple loss over time table
# The output file format for each line is
# timestamp load
# where the timestamp is floating point Unix hours
# relative to first event
# and the loss is the validation loss reported in the JSON
# Tested with P1B1, NT3, and possibly others
import logging
from math import isnan
# Set PYTHONPATH=$PWD
from plottools import *
(root_dir, output_file) = get_args()
# List of all run directories to be processed
rundirs = get_rundirs(root_dir)
logging.info("Found %i directories." % len(rundirs))
# List of events, each a tuple: (timestamp, val_loss)
events = []
for rundir in rundirs:
Js = get_jsons(rundir)
for J in Js:
record_count = len(J)
record_penult = J[record_count-2]
record_stop = J[record_count-1]
val_loss = record_penult["validation_loss"]["set"]
time_str_stop = record_stop ["end_time"]["set"]
secs_stop = date2secs(time_str_stop)
# Store the event tuple
events.append((secs_stop, val_loss))
logging.info("Found %i events." % len(events))
if len(events) == 0:
abort("No events!")
# Sort by timestamp
events.sort()
# Get first event time
first = events[0][0]
fp_out = open(output_file, "w")
# Run through the events in timestamp order,
# record val_loss at each timestamp
load = 0
for event in events:
t = (event[0] - first)/3600
if isnan(event[1]):
logging.debug("found NaN at timestamp: %0.6f" % t)
continue
fp_out.write("%0.6f %f\n" % (t, event[1]))
fp_out.close()
logging.info("Wrote %s ." % output_file)
| mit | Python |
37984a616fcd05f159c52d70e658a1268e4f5904 | Update expressions.py | ratnania/pyccel,ratnania/pyccel | scripts/expressions.py | scripts/expressions.py | # coding: utf-8
1
+1
-1
a + 2
3*a + 1
3*(4+a) + 1
3/(4+a) + 1
1.
+1.
-1.
a + 2.
3.*a + 1.
3.*(4.+a) + 1.
3./(4.+a) + 1.
True
False
not a
a and b
a or b
a == b
a != b
a < b
a > b
a <= b
a >= b
| # ... integers
1
+1
-1
a + 2
3*a + 1
3*(4+a) + 1
3/(4+a) + 1
# ...
# ... floats
1.
+1.
-1.
a + 2.
3.*a + 1.
3.*(4.+a) + 1.
3./(4.+a) + 1.
# ...
# ... booleans
True
False
not a
a and b
a or b
# ...
# ... relationals
a == b
a != b
a < b
a > b
a <= b
a >= b
# ...
| mit | Python |
8ce7a90218788174234176113defb602c9ebb87d | Format compose file | zero323/spark-in-a-box | sparkinabox/compose/__init__.py | sparkinabox/compose/__init__.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import yaml
def _image(context, role):
return "{0}/{1}-{2}".format(context["DOCKER_PREFIX"], context["DOCKER_NAME"], role)
def _interactive(context):
return context["CLIENT_ENTRYPOINT"] != "spark-submit"
def make_client(context):
return {
"build": "./client",
"image": _image(context, "client"),
"links": ["master"] if context["PROFILE"] != "local" else [],
"stdin_open": _interactive(context),
"tty": _interactive(context),
}
def make_master(context):
return {
"build": "./master",
"image": _image(context, "master"),
"ports": ["8080:8080"],
}
def make_worker(context):
return {
"build": "./worker",
"image": _image(context, "worker"),
"links": ["master"]
}
def make_compose(context, targets):
return yaml.dump({
"version": "2",
"services": {target: make(context) for target, make in {
"master": make_master,
"worker": make_worker,
"client": make_client,
}.items() if target in targets}
}, default_flow_style=False)
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import yaml
def _image(context, role):
return "{0}/{1}-{2}".format(context["DOCKER_PREFIX"], context["DOCKER_NAME"], role)
def _interactive(context):
return context["CLIENT_ENTRYPOINT"] != "spark-submit"
def make_client(context):
return {
"build": "./client",
"image": _image(context, "client"),
"links": ["master"] if context["PROFILE"] != "local" else [],
"stdin_open": _interactive(context),
"tty": _interactive(context),
}
def make_master(context):
return {
"build": "./master",
"image": _image(context, "master"),
"ports": ["8080"],
}
def make_worker(context):
return {
"build": "./worker",
"image": _image(context, "worker"),
"links": ["master"]
}
def make_compose(context, targets):
return yaml.dump({
"version": "2",
"services": {target: make(context) for target, make in {
"master": make_master,
"worker": make_worker,
"client": make_client,
}.items() if target in targets}
})
| mit | Python |
01ebc8fe45dbcbfd665a586c6a3f2aa56ea35f9c | add a test to demonstrate recursive behavior | alfredodeza/merfi | merfi/tests/test_repocollector.py | merfi/tests/test_repocollector.py | from merfi.collector import RepoCollector
from os.path import join, dirname
class TestRepoCollector(object):
def setup(self):
self.paths = RepoCollector(path='/', _eager=False)
def test_simple_tree(self, repotree):
paths = RepoCollector(path=repotree)
# The root of the repotree fixture is itself a repository.
expected = [ repotree ]
assert set(paths) == set(expected)
def test_path_is_absolute(self):
assert self.paths._abspath('/') == '/'
def test_path_is_not_absolute(self):
assert self.paths._abspath('directory').startswith('/')
def test_debian_release_files(self, repotree):
paths = RepoCollector.debian_release_files(repotree)
# The root of the repotree fixture is itself a repository.
expected = [
join(repotree, 'dists', 'trusty', 'Release'),
join(repotree, 'dists', 'precise', 'Release'),
]
assert set(paths) == set(expected)
def test_debian_nested_release_files(self, repotree):
path = dirname(repotree)
paths = RepoCollector(path)
release_files = paths.debian_release_files
expected = [
join(repotree, 'dists', 'trusty', 'Release'),
join(repotree, 'dists', 'precise', 'Release'),
]
assert set(release_files) == set(expected)
| from merfi.collector import RepoCollector
from os.path import join
class TestRepoCollector(object):
def setup(self):
self.paths = RepoCollector(path='/', _eager=False)
def test_simple_tree(self, repotree):
paths = RepoCollector(path=repotree)
# The root of the repotree fixture is itself a repository.
expected = [ repotree ]
assert set(paths) == set(expected)
def test_path_is_absolute(self):
assert self.paths._abspath('/') == '/'
def test_path_is_not_absolute(self):
assert self.paths._abspath('directory').startswith('/')
def test_debian_release_files(self, repotree):
paths = RepoCollector.debian_release_files(repotree)
# The root of the repotree fixture is itself a repository.
expected = [
join(repotree, 'dists', 'trusty', 'Release'),
join(repotree, 'dists', 'precise', 'Release'),
]
assert set(paths) == set(expected)
| mit | Python |
977a13ec9078aacba2156a74c1adce2f32fd00a6 | Remove auto doc string. | access-missouri/am-django-project,access-missouri/am-django-project,access-missouri/am-django-project,access-missouri/am-django-project | am/am/urls.py | am/am/urls.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
am URL Configuration.
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
am URL Configuration.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| bsd-2-clause | Python |
d825b11868c37877660149ebc4b21d7429d6eaf0 | remove unnecessary python2 fallback | cigroup-ol/metaopt,cigroup-ol/metaopt,cigroup-ol/metaopt | metaopt/core/arg/util/modifier.py | metaopt/core/arg/util/modifier.py | # Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
class ArgsModifier(object):
'''
Modifies args in useful ways.
This module provides ways to modify given args. Specifically, args can be
combined to each other and randomized with a certain strength.
'''
@staticmethod
def combine(args1, args2):
"""Returns the combination of the elements of the two given args."""
return [arg1.combine(arg2) for arg1, arg2 in zip(args1, args2)]
@staticmethod
def randomize(args, strengths):
"""Randomizes all of the given args with the given strength."""
return [arg.randomize(strength) for arg, strength in
zip(args, strengths)]
| # Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Standard Library
import sys
if sys.version_info[0] == 2:
# python 3 has an iterative zip built in
from itertools import izip as zip
class ArgsModifier(object):
'''
Modifies args in useful ways.
This module provides ways to modify given args. Specifically, args can be
combined to each other and randomized with a certain strength.
'''
@staticmethod
def combine(args1, args2):
"""Returns the combination of the elements of the two given args."""
return [arg1.combine(arg2) for arg1, arg2 in zip(args1, args2)]
@staticmethod
def randomize(args, strengths):
"""Randomizes all of the given args with the given strength."""
return [arg.randomize(strength) for arg, strength in
zip(args, strengths)]
| bsd-3-clause | Python |
c17e0f804a446ed0135730045ca96954c8967a48 | Bump version to 0.5.3 | botify-labs/simpleflow,botify-labs/simpleflow | simpleflow/__init__.py | simpleflow/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.5.3'
__author__ = 'Greg Leclercq'
__license__ = "MIT"
from .activity import Activity
from .workflow import Workflow
| # -*- coding: utf-8 -*-
__version__ = '0.5.2'
__author__ = 'Greg Leclercq'
__license__ = "MIT"
from .activity import Activity
from .workflow import Workflow
| mit | Python |
4146551d191152ae486f079911f53086f3a60a07 | Rename version field in Choice to question | holycattle/pysqueak-api,holycattle/pysqueak-api | api/models.py | api/models.py | from django.db import models
from rest_framework import serializers
class Question(models.Model):
version = models.CharField(primary_key=True, max_length=8)
text = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Choice(models.Model):
text = models.TextField()
question = models.ForeignKey(Question, on_delete=models.CASCADE)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Answer(models.Model):
choice = models.ForeignKey(Choice, on_delete=models.CASCADE)
user_id = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
class ChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = Choice
fields = ('id', 'text', 'version', 'created_on', 'updated_on',)
class QuestionSerializer(serializers.ModelSerializer):
# TODO: create a serializer that returns list of choices for the question
class Meta:
model = Question
fields = ('text', 'version', 'created_on', 'updated_on',)
class AnswerSerializer(serializers.ModelSerializer):
class Meta:
model = Answer
fields = ('id', 'choice_id', 'user_id', 'created_on',)
| from django.db import models
from rest_framework import serializers
class Question(models.Model):
version = models.CharField(primary_key=True, max_length=8)
text = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Choice(models.Model):
text = models.TextField()
version = models.ForeignKey(Question, on_delete=models.CASCADE)
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
class Answer(models.Model):
choice = models.ForeignKey(Choice, on_delete=models.CASCADE)
user_id = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
class ChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = Choice
fields = ('id', 'text', 'version', 'created_on', 'updated_on',)
class QuestionSerializer(serializers.ModelSerializer):
# TODO: create a serializer that returns list of choices for the question
class Meta:
model = Question
fields = ('text', 'version', 'created_on', 'updated_on',)
class AnswerSerializer(serializers.ModelSerializer):
class Meta:
model = Answer
fields = ('id', 'choice_id', 'user_id', 'created_on',)
| mit | Python |
b9d54da73e5c4d859aa5ad8e9d8b96bb7527ae6d | Add position table and add field mid in box tables | g82411/s_square | api/models.py | api/models.py | from django.db import models
# Create your models here.
class User(models.Model):
uid = models.AutoField(primary_key=True)
uname = models.CharField(max_length=128)
session = models.CharField(max_length=35,unique=True)
class Box(models.Model):
bid = models.AutoField(primary_key=True)
level = models.IntegerField()
mid = models.ForeignKey("Monster")
owner = models.ForeignKey("User")
class Party(models.Model):
uid = models.ForeignKey("User")
fir = models.ForeignKey("Box",related_name="first")
sec = models.ForeignKey("Box",related_name="second")
thi = models.ForeignKey("Box",related_name="third")
fou = models.ForeignKey("Box",related_name="fourth")
class Position(models.Model):
uid = models.ForeignKey("User")
latitude = models.FloatField()
longitude = models.FloatField()
time = models.DateField(auto_now=True)
class Monster(models.Model):
mid = models.AutoField(primary_key=True)
mname = models.CharField(max_length=128)
initHP = models.IntegerField()
initAtk = models.IntegerField()
groHP = models.FloatField()
groAtk = models.FloatField()
class Skill(models.Model):
sid = models.AutoField(primary_key=True)
target = models.CharField(max_length=40)
function = models.TextField()
| from django.db import models
# Create your models here.
class User(models.Model):
uid = models.AutoField(primary_key=True)
uname = models.CharField(max_length=128)
session = models.CharField(max_length=35,unique=True)
class Box(models.Model):
bid = models.AutoField(primary_key=True)
level = models.IntegerField()
owner = models.ForeignKey("User")
class Party(models.Model):
uid = models.ForeignKey("User")
fir = models.ForeignKey("Box",related_name="first")
sec = models.ForeignKey("Box",related_name="second")
thi = models.ForeignKey("Box",related_name="third")
fou = models.ForeignKey("Box",related_name="fourth")
class Monster(models.Model):
mid = models.AutoField(primary_key=True)
mname = models.CharField(max_length=128)
initHP = models.IntegerField()
initAtk = models.IntegerField()
groHP = models.FloatField()
groAtk = models.FloatField()
class Skill(models.Model):
sid = models.AutoField(primary_key=True)
target = models.CharField(max_length=40)
function = models.TextField()
| apache-2.0 | Python |
5822fd465c69834774dabdc6d417e9712136f0d6 | Use count as option | dgengtek/scripts,dgengtek/scripts | admin/pwgen.py | admin/pwgen.py | #!/bin/env python3
# generate passwords
# TODO: generate passwords first then build aligned output table format
import sys
import random
import click
import string
@click.command("pwgen.py")
@click.argument("length", default=16)
@click.option("character_filter", "-f","--filter", help="Filter characters from the given string")
@click.option("-c","--count", default=30, help="Password count")
@click.option("-i","--inverse", is_flag=True, help="Inverse filter. Only show characters from the given filter")
@click.option('-b', '--bits', is_flag=True, help="Use bits instead of length to determine password length")
@click.option('-g', '--graph', 'mode', flag_value='graph', default=True, help="mode: [default] All printable characters except whitespace")
@click.option('-a', '--alnum', 'mode', flag_value='alnum', default=True, help="mode: letters + digits")
@click.option('-d', '--digits', 'mode', flag_value='digits', help="mode: digits")
@click.option('-p', '--printable', 'mode', flag_value='printable', help="mode: All printable characters")
@click.option('-l', '--letters', 'mode', flag_value='letters', help="mode: letters")
def main(length, character_filter, count, inverse, bits, mode):
set_alpha = set(string.ascii_letters)
set_digits = set(string.digits)
set_printable = set(string.printable)
set_whitespace = set(string.whitespace)
if bits:
length = 256 // 8
character_pool = None
if mode == "graph":
character_pool = set_printable - set_whitespace
if mode == "alnum":
character_pool = set_alpha.union(set_digits)
if mode == "letters":
character_pool = set_alpha
if mode == "digits":
character_pool = set_digits
if mode == "printable":
character_pool = set_printable
if not character_filter:
character_filter = set()
else:
character_filter = set(character_filter)
character_pool = character_pool.difference(character_filter)
character_pool = "".join(character_pool)
passwords = [ generate_password(character_pool, length) for i in range(1, count+1)]
print_passwords(passwords)
def print_passwords(passwords):
if len(passwords) == 1:
password = passwords[0]
print(password, end="")
return
for i,password in enumerate(passwords, 1):
print(password, end="\t")
if i % 3 == 0:
print()
def generate_password(pool, size):
return "".join([ random.choice(pool) for x in range(size) ])
if __name__ == "__main__":
main()
| #!/bin/env python3
# generate passwords
# TODO: generate passwords first then build aligned output table format
import sys
import random
import click
import string
@click.command("pwgen.py")
@click.argument("count", default=30)
@click.argument("length", default=12)
@click.option("character_filter", "-f","--filter", help="Filter characters from the given string")
@click.option("-i","--inverse", is_flag=True, help="Inverse filter. Only show characters from the given filter")
@click.option('-b', '--bits', is_flag=True, help="Use bits instead of length to determine password length")
@click.option('-g', '--graph', 'mode', flag_value='graph', default=True, help="mode: [default] All printable characters except whitespace")
@click.option('-a', '--alnum', 'mode', flag_value='alnum', default=True, help="mode: letters + digits")
@click.option('-d', '--digits', 'mode', flag_value='digits', help="mode: digits")
@click.option('-p', '--printable', 'mode', flag_value='printable', help="mode: All printable characters")
@click.option('-l', '--letters', 'mode', flag_value='letters', help="mode: letters")
def main(length, count, character_filter, inverse, bits, mode):
set_alpha = set(string.ascii_letters)
set_digits = set(string.digits)
set_printable = set(string.printable)
set_whitespace = set(string.whitespace)
if bits:
length = 256 // 8
character_pool = None
if mode == "graph":
character_pool = set_printable - set_whitespace
if mode == "alnum":
character_pool = set_alpha.union(set_digits)
if mode == "letters":
character_pool = set_alpha
if mode == "digits":
character_pool = set_digits
if mode == "printable":
character_pool = set_printable
if not character_filter:
character_filter = set()
else:
character_filter = set(character_filter)
character_pool = character_pool.difference(character_filter)
character_pool = "".join(character_pool)
passwords = [ generate_password(character_pool, length) for i in range(1, count+1)]
print_passwords(passwords)
def print_passwords(passwords):
if len(passwords) == 1:
password = passwords[0]
print(password, end="")
return
for i,password in enumerate(passwords, 1):
print(password, end="\t")
if i % 3 == 0:
print()
def generate_password(pool, size):
return "".join([ random.choice(pool) for x in range(size) ])
if __name__ == "__main__":
main()
| mit | Python |
d1655e583c5012e09269a300d9d49bcab7266119 | Create prueba.py | aescoda/TFG | prueba.py | prueba.py | from flask import Flask
from flask import request
import os
import xml.etree.ElementTree as ET
from threading import Thread
import email_lib
app = Flask(__name__)
xml = ""
def send_email(xml):
print "2"
email_lib.prueba()
print xml
return None
@app.route('/webhook', methods=['POST','GET'])
def webhook():
print "webhook"
global xml
xml = "hola"
t = Thread(target=send_email, args=(xml,))
t.start()
print "acabando"
#Jasper resend the notification unless it receives a status 200 confirming the reception
return '',200
@app.route('/response', methods=['POST','GET'])
def response():
print xml #Comprobar como comparto la variable.
return "Acabamos de procesar su peticion, en breve recibira un email con los detalles"
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
app.run(debug=True, port=port, host='0.0.0.0', threaded=True)
| from flask import Flask
from flask import request
import os
import xml.etree.ElementTree as ET
from threading import Thread
import email_lib
app = Flask(__name__)
global xml = ""
def send_email(xml):
print "2"
email_lib.prueba()
print xml
return None
@app.route('/webhook', methods=['POST','GET'])
def webhook():
print "webhook"
global xml = "hola"
t = Thread(target=send_email, args=(xml,))
t.start()
print "acabando"
#Jasper resend the notification unless it receives a status 200 confirming the reception
return '',200
@app.route('/response', methods=['POST','GET'])
def response():
print xml #Comprobar como comparto la variable.
return "Acabamos de procesar su peticion, en breve recibira un email con los detalles"
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
app.run(debug=True, port=port, host='0.0.0.0', threaded=True)
| apache-2.0 | Python |
e7cef2f5ab71f1841a1649f8838a48f5ba2130ae | Create prueba.py | aescoda/TFG | prueba.py | prueba.py | from flask import Flask
from flask import request
import os
import xml.etree.ElementTree as ET
from threading import Thread
import email_lib
app = Flask(__name__)
xml = None
def send_email(xml):
print "2"
email_lib.prueba()
print xml
return None
@app.route('/webhook', methods=['POST','GET'])
def webhook():
print "webhook"
xml = "hola"
t = Thread(target=send_email, args=(xml,))
t.start()
print "acabando"
#Jasper resend the notification unless it receives a status 200 confirming the reception
return '',200
@app.route('/response', methods=['POST','GET'])
def response():
print xml #Comprobar como comparto la variable.
return "Acabamos de procesar su peticion, en breve recibira un email con los detalles"
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
app.run(debug=True, port=port, host='0.0.0.0', threaded=True)
| from flask import Flask
from flask import request
import os
import xml.etree.ElementTree as ET
from threading import Thread
import email_lib
app = Flask(__name__)
xml =""
def send_email(xml):
print "2"
email_lib.prueba()
print xml
return None
@app.route('/webhook', methods=['POST','GET'])
def webhook():
print "webhook"
xml = "hola"
t = Thread(target=send_email, args=(xml,))
t.start()
print "acabando"
#Jasper resend the notification unless it receives a status 200 confirming the reception
return '',200
@app.route('/response', methods=['POST','GET'])
def response():
print xml #Comprobar como comparto la variable.
return "Acabamos de procesar su peticion, en breve recibira un email con los detalles"
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
app.run(debug=True, port=port, host='0.0.0.0', threaded=True)
| apache-2.0 | Python |
857d660f8908128aa320a6f303f34bced224c651 | Switch to python3 | jcommelin/vimnb,jcommelin/vimnb | pywrap.py | pywrap.py | #!/usr/bin/python3
import sys, time, traceback, pprint
sys.stdout.close()
while True:
line = sys.stdin.readline()
if line:
i = open(line[:-1] + ".in", "r")
sys.stdout = open(line[:-1] + ".out", "w")
try:
for l in i:
exec(l)
except:
traceback.print_exc(file=sys.stdout)
i.close()
sys.stdout.close()
else:
time.sleep(0.1)
| #!/usr/bin/python
import sys, time, traceback, pprint
sys.stdout.close()
while True:
line = sys.stdin.readline()
if line:
i = open(line[:-1] + ".in", "r")
sys.stdout = open(line[:-1] + ".out", "w")
try:
for l in i:
exec(l)
except:
traceback.print_exc(file=sys.stdout)
i.close()
sys.stdout.close()
else:
time.sleep(0.1)
| unlicense | Python |
ecf68c5d095a4f85856e012409e8dde4567376e2 | Fix negative values | kyleconroy/popfly | reaper.py | reaper.py | #!/usr/bin/env python
import boto
import base64
import json
from datetime import datetime
def seconds_from_hours(hours):
return hours * 60 * 60
def should_kill(instance):
attributes = instance.get_attribute("userData")
if attributes['userData'] == None:
print "Instance has no userdata"
return False
user_data = base64.b64decode(attributes['userData'])
try:
data = json.loads(user_data)
except ValueError:
print "User Data isn't json"
return False
if not data.get("popfly"):
print "No key popfly"
return False
if instance.state != "running":
print "Instance isn't running"
return False
start = datetime.strptime(instance.launch_time, "%Y-%m-%dT%H:%M:%S.%fZ")
end = datetime.utcnow()
life = end - start
print "Current {}".format(life.total_seconds())
print "Machine {}".format(seconds_from_hours(data['duration']))
return life.total_seconds() >= seconds_from_hours(data['duration'])
def kill(instance):
instance.terminate()
print "BOOM"
conn = boto.connect_ec2()
for reservation in conn.get_all_instances():
for instance in reservation.instances:
print "Checking {}".format(instance.id)
if should_kill(instance):
kill(instance)
print "Killing {}".format(instance.id)
else:
print "Sparing {}".format(instance.id)
| #!/usr/bin/env python
import boto
import base64
import json
from datetime import datetime
def seconds_from_hours(hours):
return hours * 60 * 60
def should_kill(instance):
attributes = instance.get_attribute("userData")
if attributes['userData'] == None:
print "Instance has no userdata"
return False
user_data = base64.b64decode(attributes['userData'])
try:
data = json.loads(user_data)
except ValueError:
print "User Data isn't json"
return False
if not data.get("popfly"):
print "No key popfly"
return False
if instance.state != "running":
print "Instance isn't running"
return False
start = datetime.strptime(instance.launch_time, "%Y-%m-%dT%H:%M:%S.%fZ")
end = datetime.utcnow()
life = start - end
print "Current {}".format(life.total_seconds())
print "Machine {}".format(seconds_from_hours(data['duration']))
return life.total_seconds() >= seconds_from_hours(data['duration'])
def kill(instance):
instance.terminate()
print "BOOM"
conn = boto.connect_ec2()
for reservation in conn.get_all_instances():
for instance in reservation.instances:
if should_kill(instance):
kill(instance)
print "Killing {}".format(instance.id)
else:
print "Sparing {}".format(instance.id)
| mit | Python |
9c4f927f9565fc158dea8a7840842839b986ca64 | Improve rest api | njbbaer/unicorn-remote,njbbaer/unicorn-remote,njbbaer/unicorn-remote | remote.py | remote.py | from flask import Flask, render_template, request
from flask_restful import Resource, Api
import unicornhat as unicorn
import argparse
import json
import subprocess
import sys
app = Flask(__name__)
api = Api(app)
class State:
''' Handle Unicorn HAT state'''
def __init__(self, brightness, program_name):
self.process = None
self.set_brightness(brightness)
self.set_program(program_name)
def set_brightness(self, brightness):
self.brightness = brightness
unicorn.brightness(self.brightness/100)
def set_program(self, program_name):
self.program_name = program_name
# Terminate running program
if self.process is not None:
self.process.terminate()
# Start new program
if self.program_name is not None:
path = 'programs/' + self.program_name + '.py'
self.process = subprocess.Popen([sys.executable, path])
return True
else:
return False
# Initialize unicorn state
state = State(brightness=50, program_name=None)
@app.route('/', methods=['POST', 'GET'])
def index(brightness=state.brightness,
program=state.program_name):
if request.method == 'POST':
if 'brightness' in request.form:
state.set_brightness(int(request.form['brightness']))
if 'program' in request.form:
state.set_program(request.form['program'])
return render_template('index.html', brightness=state.brightness,
program_name=state.program_name)
class Brightness(Resource):
def get(self):
return {'brightness': state.brightness}
def put(self, level):
state.set_brightness(int(level))
return {'brightness': state.brightness}
class Program(Resource):
def get(self):
return {'program': state.program_name}
def put(self, name):
if state.set_program(name):
return {'program': state.program_name}
else:
return {'program': 'ERROR program not found'}
api.add_resource(Brightness, '/api/brightness/<level>')
api.add_resource(Program, '/api/program/<name>')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true', dest='debug', default=False)
parser.add_argument('-p', '--port', action='store', dest='port', default=5000, type=int)
params = parser.parse_args()
app.run(debug=params.debug, port=params.port, host='0.0.0.0') | from flask import Flask, render_template, request
from flask_restful import Resource, Api
import unicornhat as unicorn
import argparse
import json
import subprocess
import sys
app = Flask(__name__)
api = Api(app)
class State:
''' Handle Unicorn HAT state'''
def __init__(self, brightness, program_name):
self.process = None
self.set_brightness(brightness)
self.set_program(program_name)
def set_brightness(self, brightness):
self.brightness = brightness
unicorn.brightness(self.brightness/100)
def set_program(self, program_name):
self.program_name = program_name
# Terminate running program
if self.process is not None:
self.process.terminate()
# Start new program
if self.program_name is not None:
path = 'programs/' + self.program_name + '.py'
self.process = subprocess.Popen([sys.executable, path])
# Initialize unicorn state
state = State(brightness=50, program_name=None)
@app.route('/', methods=['POST', 'GET'])
def index(brightness=state.brightness,
program=state.program_name):
if request.method == 'POST':
if 'brightness' in request.form:
state.set_brightness(int(request.form['brightness']))
if 'program' in request.form:
state.set_program(request.form['program'])
return render_template('index.html', brightness=state.brightness,
program_name=state.program_name)
class Brightness(Resource):
def get(self):
return {'brightness': state.brightness}
def post(self):
state.set_brightness(int(request.form['brightness']))
return {'brightness': state.brightness}
class Program(Resource):
def get(self):
return {'program': state.program_name}
def post(self):
state.set_brightness(int(request.form['program']))
return {'program': state.program_name}
api.add_resource(Brightness, '/api/brightness')
api.add_resource(Program, '/api/program')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', action='store_true', dest='debug', default=False)
parser.add_argument('-p', '--port', action='store', dest='port', default=5000, type=int)
params = parser.parse_args()
app.run(debug=params.debug, port=params.port, host='0.0.0.0') | mit | Python |
97140ae5183c82c0da7817d09e6aaa5d3c74cb4a | Fix indentation in routes | AlexMathew/csipy-home | routes.py | routes.py | from flask import Flask, render_template, redirect, request
import psycopg2
from functools import wraps
import os
import urlparse
app = Flask(__name__)
def connectDB(wrapped):
@wraps(wrapped)
def inner(*args, **kwargs):
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
cur = None
return wrapped(cur, *args, **kwargs)
return inner
@app.route('/')
def home():
return render_template('home.html')
@app.route('/participants')
@connectDB
def participants(*args):
cur = args[0]
cur.execute('SELECT REG, NAME FROM PARTICIPANTS WHERE REGISTERED IS true ORDER BY REG')
parts = cur.fetchall()
return render_template('participants.html', participants = parts)
@app.route('/setup')
def setup():
return render_template('setup.html')
@app.route('/register')
def register():
return render_template('register.html')
@app.route('/complete', methods=['POST'])
@connectDB
def complete(*args):
cur = args[0]
try:
regno = int(request.form['regno'])
print regno
except Exception:
return render_template('failure.html', error_msg="Please enter a valid register number.")
cur.execute('SELECT REG FROM PARTICIPANTS')
complete = cur.fetchall()
cur.execute('SELECT REG FROM PARTICIPANTS WHERE REGISTERED IS true')
registered = cur.fetchall()
if regno not in complete:
return render_template('failure.html', error_msg="We're sorry, but the workshop is currently \
open only to members registed to CSI. Try getting links to the resources \
from your friends who are attending the workshop. We'll keep you \
posted if we plan an open Python workshop. Thanks for your interest !")
elif regno in registered:
return render_template('failure.html', error_msg="You're already registered.")
else:
cur.execute('UPDATE PARTICIPANTS SET REGISTERED=false WHERE REG=(%s)', (regno,))
return render_template('success.html')
| from flask import Flask, render_template, redirect, request
import psycopg2
from functools import wraps
import os
import urlparse
app = Flask(__name__)
def connectDB(wrapped):
@wraps(wrapped)
def inner(*args, **kwargs):
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
cur = None
return wrapped(cur, *args, **kwargs)
return inner
@app.route('/')
def home():
return render_template('home.html')
@app.route('/participants')
@connectDB
def participants(*args):
cur = args[0]
cur.execute('SELECT REG, NAME FROM PARTICIPANTS WHERE REGISTERED IS true ORDER BY REG')
parts = cur.fetchall()
return render_template('participants.html', participants = parts)
@app.route('/setup')
def setup():
return render_template('setup.html')
@app.route('/register')
def register():
return render_template('register.html')
@app.route('/complete', methods=['POST'])
@connectDB
def complete(*args):
cur = args[0]
try:
regno = int(request.form['regno'])
print regno
except Exception:
return render_template('failure.html', error_msg="Please enter a valid register number.")
cur.execute('SELECT REG FROM PARTICIPANTS')
complete = cur.fetchall()
cur.execute('SELECT REG FROM PARTICIPANTS WHERE REGISTERED IS true')
registered = cur.fetchall()
if regno not in complete:
return render_template('failure.html', error_msg="We're sorry, but the workshop is currently \
open only to members registed to CSI. Try getting links to the resources \
from your friends who are attending the workshop. We'll keep you \
posted if we plan an open Python workshop. Thanks for your interest !")
elif regno in registered:
return render_template('failure.html', error_msg="You're already registered.")
else:
cur.execute('UPDATE PARTICIPANTS SET REGISTERED=false WHERE REG=(%s)', (regno,))
return render_template('success.html')
| mit | Python |
6517f260eece4dd828dde5f74109b42b7e91e4bb | sort is realizing anyway | nathants/py-util,nathants/s | s/iter.py | s/iter.py | import itertools
import six
def groupby(val, key):
val = sorted(val, key=key)
return [(k, list(v)) for k, v in itertools.groupby(val, key=key)]
def nwise(val, n):
return six.moves.zip(*(itertools.islice(val, i, None) for i, val in enumerate(itertools.tee(val, n))))
def chunk(val, n, drop_extra=False):
res = ()
for x in val:
res += (x,)
if len(res) == n:
yield res
res = ()
if res and not drop_extra:
yield res
| import itertools
import six
def groupby(val, key, realize=True):
val = sorted(val, key=key)
if realize:
return [(k, list(v)) for k, v in itertools.groupby(val, key=key)]
else:
return itertools.groupby(val, key=key)
def nwise(val, n):
return six.moves.zip(*(itertools.islice(val, i, None) for i, val in enumerate(itertools.tee(val, n))))
def chunk(val, n, drop_extra=False):
res = ()
for x in val:
res += (x,)
if len(res) == n:
yield res
res = ()
if res and not drop_extra:
yield res
| mit | Python |
dc3a848998567ddfe15df5a6ee3aaa50e2d6b9ce | Add samples | aidiary/python-recaius | sample.py | sample.py | # -*- coding: utf-8 -*-
from recaius.RecaiusTTS import RecaiusTTS
# 登録時に発行された自分のIDとパスワードを設定
rec = RecaiusTTS('YOUR_ID', 'YOUR_PASSWORD')
# デフォルトの話者はsakura
rec.speak('あらゆる現実をすべて自分のほうへねじ曲げたのだ。')
rec.speaker('moe').speak('嬉しいはずがゆっくり寝てもいられない。')
rec.speaker('hiroto').speak('テレビゲームやパソコンでゲームをして遊ぶ。')
rec.speaker('itaru').speak('救急車が十分に動けず救助作業が遅れている。')
# sakuraは感情音声が使える
rec.speaker('sakura')
rec.emotion('happy', 100).speak('とってもうれしい。')
rec.emotion('angry', 100).speak('ふざけないでください。')
rec.emotion('sad', 100).speak('悲しいことがありました。')
rec.emotion('fear', 100).speak('怖いこと言わないでよ。')
rec.emotion('tender', 100).speak('どうしましたか?')
# 設定した感情をリセット
rec.reset_emotions()
rec.speaker('moe')
rec.speed(-10).speak('ゆっくりです。')
rec.speed(10).speak('早口です。')
# 設定したパラメータをリセット
rec.reset_parameters()
rec.speaker('sakura')
rec.pitch(-10).speak('低い声です。')
rec.pitch(10).speak('高い声です。')
rec.reset_parameters()
rec.speaker('itaru')
rec.depth(-4).speak('細い声です。')
rec.depth(4).speak('太い声です。')
rec.reset_parameters()
rec.speaker('hiroto')
rec.volume(-40).speak('小さい声です。')
rec.volume(50).speak('大きい声です。')
rec.reset_parameters()
# ファイルに保存する
rec.speaker('sakura')
rec.speed(-3).pitch(8).depth(-2).volume(30).save_wav('ファイルに保存します。', 'output.wav')
rec.reset_parameters()
| mit | Python | |
c39ed35ce4c21c6a7341ff81ab8cf186692c1845 | Read Guardian API key from positional argument. | mattpatey/news-to-epub | scrape.py | scrape.py | #!/usr/bin/env python
import argparse
from datetime import datetime
from json import loads
from bs4 import BeautifulSoup
from ebooklib import epub
import requests
def get_todays_news(api_key):
payload = {'api-key': api_key,
'section': 'world',
'from-date': '2015-03-22'}
r = requests.get('http://content.guardianapis.com/search', params=payload)
json = loads(r.text)
articles = [(x['webTitle'], x['webUrl']) for x in json['response']['results']]
return articles
def scrape(uri):
response = requests.get(uri)
soup = BeautifulSoup(response.text)
content = soup.find('div', class_='content__article-body')
filtered_content = content.find_all('p')
processed_content = u''.join([unicode(x) for x in filtered_content])
return processed_content
def make_chapter(title, content):
safe_title = u''.join([x for x in title if x.isalpha() or x.isspace()]).replace(u' ', u'-')
file_name = u'chapter-{}.xhtml'.format(safe_title)
chapter = epub.EpubHtml(title=title, file_name=file_name, lang='en')
chapter.content = u'<h1>{}</h1>{}'.format(title, content)
return chapter
def make_ebook(title, chapters):
book = epub.EpubBook()
book.set_title(title)
book.set_language('en')
date = datetime.now().strftime(u'%A %d %B %Y')
section_name = u'Headlines for {}'.format(date)
book.toc = ((epub.Link(c.file_name, c.title, c.title) for c in chapters),
(epub.Section(section_name), chapters))
for c in chapters:
book.add_item(c)
book.spine = ['nav'] + chapters
book.add_item(epub.EpubNcx())
safe_filename = u''.join([x for x in title if x.isalpha() or x.isspace() or x.isdigit()]).replace(u' ', u'-')
filename = u'{}.epub'.format(safe_filename.lower())
epub.write_epub(filename, book, {})
def main():
parser = argparse.ArgumentParser("Transform news from The Guardian's website into an epub file.")
parser.add_argument('api_key', type=str)
args = parser.parse_args()
uris = get_todays_news(args.api_key)
chapters = []
for title, raw_content in uris:
processed_content = scrape(raw_content)
chapter = make_chapter(title, processed_content)
chapters.append(chapter)
date = datetime.now().strftime(u'%A %d %B %Y')
book_title = u'News for {}'.format(date)
make_ebook(book_title, chapters)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
from datetime import datetime
from json import loads
from bs4 import BeautifulSoup
from ebooklib import epub
import requests
def get_todays_news():
api_key = ''
payload = {'api-key': api_key,
'section': 'world',
'from-date': '2015-03-22'}
r = requests.get('http://content.guardianapis.com/search', params=payload)
json = loads(r.text)
articles = [(x['webTitle'], x['webUrl']) for x in json['response']['results']]
return articles
def scrape(uri):
response = requests.get(uri)
soup = BeautifulSoup(response.text)
content = soup.find('div', class_='content__article-body')
filtered_content = content.find_all('p')
processed_content = u''.join([unicode(x) for x in filtered_content])
return processed_content
def make_chapter(title, content):
safe_title = u''.join([x for x in title if x.isalpha() or x.isspace()]).replace(u' ', u'-')
file_name = u'chapter-{}.xhtml'.format(safe_title)
chapter = epub.EpubHtml(title=title, file_name=file_name, lang='en')
chapter.content = u'<h1>{}</h1>{}'.format(title, content)
return chapter
def make_ebook(title, chapters):
book = epub.EpubBook()
book.set_title(title)
book.set_language('en')
date = datetime.now().strftime(u'%A %d %B %Y')
section_name = u'Headlines for {}'.format(date)
book.toc = ((epub.Link(c.file_name, c.title, c.title) for c in chapters),
(epub.Section(section_name), chapters))
for c in chapters:
book.add_item(c)
book.spine = ['nav'] + chapters
book.add_item(epub.EpubNcx())
safe_filename = u''.join([x for x in title if x.isalpha() or x.isspace() or x.isdigit()]).replace(u' ', u'-')
filename = u'{}.epub'.format(safe_filename.lower())
epub.write_epub(filename, book, {})
def main():
uris = get_todays_news()
chapters = []
for title, raw_content in uris:
processed_content = scrape(raw_content)
chapter = make_chapter(title, processed_content)
chapters.append(chapter)
date = datetime.now().strftime(u'%A %d %B %Y')
book_title = u'News for {}'.format(date)
make_ebook(book_title, chapters)
if __name__ == '__main__':
main()
| mit | Python |
5d51479eaf6b03d6b86faa4fe4ac79dda609f44d | Add code to clean_code function to clean quotes and emoticons. | LWprogramming/Hack-Brown2017,liuisaiah/Hack-Brown2017 | script.py | script.py | '''
Dataset source: https://nlds.soe.ucsc.edu/sarcasm2
By Shereen Oraby, Vrindavan Harrison, Lena Reed, Ernesto Hernandez, Ellen Riloff and Marilyn Walker. "Creating and Characterizing a Diverse Corpus of Sarcasm in Dialogue." In The 17th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL), Los Angeles, California, USA, 2016.
'''
import numpy as np
import pandas
import re
def main():
data = clean_data()
def clean_data():
'''
Steps:
1. Load data from csv file
2. Remove unnecessary quotes
3. Remove emoticons
3. Fix all typos using Microsoft's Bing Spell Check API: https://www.microsoft.com/cognitive-services/en-us/bing-spell-check-api
'''
# step 1
data = pandas.read_csv('sarcasm_v2.csv').as_matrix()
# print(data.shape)
data[:, 0] = np.array([find_category(x) for x in data[:, 0]])
data[:, 1] = np.array([sarcasm(x) for x in data[:, 1]])
# print(data[0,1]) # should be 1 for sarcasm
# step 2
data[:, 3] = np.array([clean_quotes(x) for x in data[:, 3]])
data[:, 4] = np.array([clean_quotes(x) for x in data[:, 4]])
# step 3
data[:, 3] = np.array([remove_emoticons(x) for x in data[:, 3]])
data[:, 4] = np.array([remove_emoticons(x) for x in data[:, 4]])
return data
def find_category(category):
return {
'GEN': 0, # general
'HYP': 1, # hyperbole
'RQ': 2 # rhetorical question
}[category]
def sarcasm(sarcasm_value):
return {
'sarc': 1, # true for sarcasm
'notsarc': 0 # false for sarcasm
}[sarcasm_value]
def get_data_index(ID):
'''find the index of the data point. Corresponds to 1234 in GEN_sarc_1234 under ID in data.
'''
# TODO: given a string as shown in the comment, extract the number in it, possibly with regex.
# Test
def clean_quotes(string):
'''
clean_quotes a string and removes beginning and closing quotation marks. Also replaces all duplicated double quotation marks with just one set of double quotation marks, e.g. ""hello"" with "hello"
'''
if string[0] == '"':
string = string[1:] # first character
if string[-1] == '"':
string = string[:-1] # last character
string = re.sub('\"\"', "\"", string)
# triple quotes should not exist after removing first and last quotes from beginning and end, respectively, of the original string.
# quadruple quotes will also be removed by replacing all duplicated double quotes "" with a single double quote ". This way, back-to-back duplicated double quotes (e.g. ""hello""""world"" will become "hello""world") will be taken care of as well.
return string
if __name__ == '__main__':
main()
| '''
Dataset source: https://nlds.soe.ucsc.edu/sarcasm2
By Shereen Oraby, Vrindavan Harrison, Lena Reed, Ernesto Hernandez, Ellen Riloff and Marilyn Walker. "Creating and Characterizing a Diverse Corpus of Sarcasm in Dialogue." In The 17th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL), Los Angeles, California, USA, 2016.
'''
import numpy as np
import pandas
import re
def main():
data = clean_data()
def clean_data():
'''
Steps:
1. Load data from csv file
2. Remove unnecessary quotes
3. Fix all typos using Microsoft's Bing Spell Check API: https://www.microsoft.com/cognitive-services/en-us/bing-spell-check-api
'''
# step 1
data = pandas.read_csv('sarcasm_v2.csv').as_matrix()
# print(data.shape)
data[:, 0] = np.array([find_category(x) for x in data[:, 0]])
data[:, 1] = np.array([sarcasm(x) for x in data[:, 1]])
# print(data[0,1]) # should be 1 for sarcasm
# step 2
return data
def find_category(category):
return {
'GEN': 0, # general
'HYP': 1, # hyperbole
'RQ': 2 # rhetorical question
}[category]
def sarcasm(sarcasm_value):
return {
'sarc': 1, # true for sarcasm
'notsarc': 0 # false for sarcasm
}[sarcasm_value]
def get_data_index(ID):
'''find the index of the data point. Corresponds to 1234 in GEN_sarc_1234 under ID in data.
'''
# TODO: given a string as shown in the comment, extract the number in it, possibly with regex.
# Test
def clean_quotes(string):
'''
clean_quotes a string and removes beginning and closing quotation marks. Also replaces all duplicated double quotation marks with just one set of double quotation marks, e.g. ""hello"" with "hello"
'''
if string[0] == '"':
string = string[1:] # first character
if string[-1] == '"':
string = string[:-1] # last character
string = re.sub('\"\"', "\"", string)
# triple quotes should not exist after removing first and last quotes from beginning and end, respectively, of the original string.
# quadruple quotes will also be removed by replacing all duplicated double quotes "" with a single double quote ". This way, back-to-back duplicated double quotes (e.g. ""hello""""world"" will become "hello""world") will be taken care of as well.
return string
if __name__ == '__main__':
main()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.