commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
c358123651df5eb900ddeff113512462c842d2a6 | Expand as_dict on Link | LINKIWI/linkr,LINKIWI/linkr,LINKIWI/linkr | models/link.py | models/link.py | import time
import config.options
import util.cryptography
from linkr import db
class Link(db.Model):
__tablename__ = 'link'
link_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, index=True, default=None)
submit_time = db.Column(db.Integer)
password_hash = db.Column(db.Text, default=None)
alias = db.Column(db.String(32), index=True, unique=True)
outgoing_url = db.Column(db.Text)
def __init__(
self,
alias,
outgoing_url,
password=None,
user_id=None,
):
self.submit_time = int(time.time())
self.alias = alias
self.outgoing_url = outgoing_url
self.password_hash = util.cryptography.secure_hash(password) if password else None
self.user_id = user_id
def validate_password(self, password):
return not self.password_hash or \
util.cryptography.secure_hash(password) == self.password_hash
def as_dict(self):
return {
'link_id': self.link_id,
'user_id': self.user_id,
'submit_time': self.submit_time,
'alias': self.alias,
'full_alias': '{base}/{alias}'.format(base=config.options.LINKR_URL, alias=self.alias),
'outgoing_url': self.outgoing_url,
'is_password_protected': bool(self.password_hash),
}
| import time
import util.cryptography
from linkr import db
class Link(db.Model):
__tablename__ = 'link'
link_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, index=True, default=None)
submit_time = db.Column(db.Integer)
password_hash = db.Column(db.Text, default=None)
alias = db.Column(db.String(32), index=True, unique=True)
outgoing_url = db.Column(db.Text)
def __init__(
self,
alias,
outgoing_url,
password=None,
user_id=None,
):
self.submit_time = int(time.time())
self.alias = alias
self.outgoing_url = outgoing_url
self.password_hash = util.cryptography.secure_hash(password) if password else None
self.user_id = user_id
def validate_password(self, password):
return not self.password_hash or \
util.cryptography.secure_hash(password) == self.password_hash
def as_dict(self):
return {
'link_id': self.link_id,
'submit_time': self.submit_time,
'hits': self.hits,
'alias': self.alias,
'outgoing_url': self.outgoing_url,
}
| mit | Python |
0ea9172e253d36dffab40b3df64ee08cbfc908aa | remove only from the user and not from the group | moranmo29/ShareLink,moranmo29/ShareLink,moranmo29/ShareLink | models/link.py | models/link.py | #this model keeps the link
from google.appengine.ext import ndb
from user import User
import datetime
import time
class Link(ndb.Model):
user = ndb.KeyProperty()
description = ndb.StringProperty()
url_link = ndb.StringProperty() #error when write linkproperty
from_link = ndb.StringProperty()
time_of_enter_the_link=ndb.DateTimeProperty(auto_now_add=True)
ifInTheGroup=ndb.BooleanProperty(default=False)
@staticmethod
def getLink(user,link_url,des,from_link):
if not link_url:
self.error(403)
self.response.write('Empty url link submitted')
return
link=Link.query(Link.user == user , Link.description == des , Link.url_link == link_url , Link.from_link==from_link).get()
if link:
return link
return None
@staticmethod
def getAllLinksPerUser(user):
links=[]
qur=Link.query(Link.user == user.key ).order(-Link.time_of_enter_the_link)
if qur:
for url_link in qur:
links.append(url_link)
return links
return None
@staticmethod
def remove(user,link_url,des,from_link):
if from_link == "None" :
link=Link.getLink(user,link_url,des,None)
link=Link.getLink(user,link_url,des,from_link)
if link is not None:
if link.ifInTheGroup is False:
link.key.delete();
return
@staticmethod
def addlinkfronuser(user,link_url,des,fromuser):
link=Link()
link.description=des
link.url_link=link_url
link.user=user.key
link.from_link=fromuser.email
link.put()
return
| #this model keeps the link
from google.appengine.ext import ndb
from user import User
import datetime
import time
class Link(ndb.Model):
user = ndb.KeyProperty()
description = ndb.StringProperty()
url_link = ndb.StringProperty() #error when write linkproperty
from_link = ndb.StringProperty()
time_of_enter_the_link=ndb.DateTimeProperty(auto_now_add=True)
ifInTheGroup=ndb.BooleanProperty(default=False)
@staticmethod
def getLink(user,link_url,des,from_link):
if not link_url:
self.error(403)
self.response.write('Empty url link submitted')
return
link=Link.query(Link.user == user , Link.description == des , Link.url_link == link_url , Link.from_link==from_link).get()
if link:
return link
return None
@staticmethod
def getAllLinksPerUser(user):
links=[]
qur=Link.query(Link.user == user.key ).order(-Link.time_of_enter_the_link)
if qur:
for url_link in qur:
links.append(url_link)
return links
return None
@staticmethod
def remove(user,link_url,des,from_link):
if from_link == "None" :
link=Link.getLink(user,link_url,des,None)
link=Link.getLink(user,link_url,des,from_link)
#link=Link.query(Link.user == user , Link.description == des , Link.url_link == link_url , Link.from_link==None).get()
if link is not None:
link.key.delete();
return
@staticmethod
def addlinkfronuser(user,link_url,des,fromuser):
link=Link()
link.description=des
link.url_link=link_url
link.user=user.key
link.from_link=fromuser.email
link.put()
return
| mit | Python |
18736f7ef302ee75453c41a2f08bb69ca96e4d9f | Bump version | hypothesis/bouncer,hypothesis/bouncer,hypothesis/bouncer | bouncer/__about__.py | bouncer/__about__.py | """Metadata about bouncer shared between setup.py and bouncer code."""
__all__ = ["__version__"]
__version__ = "0.0.7" # PEP440-compliant version number.
| """Metadata about bouncer shared between setup.py and bouncer code."""
__all__ = ["__version__"]
__version__ = "0.0.6" # PEP440-compliant version number.
| bsd-2-clause | Python |
e46c6c85027dcb596392056972350b4e4073b628 | delete grappelli | undocume/undocume,undocume/undocume | undocume/settings/base.py | undocume/settings/base.py | """
Django settings for undocume project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECURITY WARNING: don't run with debug turned on in production!
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'home',
'storages',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'undocume.urls'
WSGI_APPLICATION = 'undocume.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
| """
Django settings for undocume project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
GRAPPELLI_ADMIN_TITLE="Undocume"
# SECURITY WARNING: don't run with debug turned on in production!
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'home',
'storages',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'undocume.urls'
WSGI_APPLICATION = 'undocume.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
| mit | Python |
7d30c3e74d2400bb4c976a02f671880560c65b43 | Update merge sort | kirnap/algorithms-in-python | merge_sort.py | merge_sort.py | # Implementation of merge-sort algorithm is purely for educational purposes.
# This example is an intermediate for python learners and will be really helpful to better understand the programming
# and algorithm paradigm and highly recommended for python learners.
# Input is an list of any length and any entries and the output is a sorted version of a list
def merge(first, second):
"""
This function returns the merged version of first and second list object.
Input lists has to be sorted in itself for example list1 = [3,5,7,64] where the integers listed in increasing order
:param first:
:param second:
:return: ret
"""
i = 0
j = 0
ret = []
# while loop is made for operation to be done until the total length of output is equal to sum of input lengths
while len(ret) != len(first) + len(second):
if i == len(first): # this means that all the entries of first are appended to ret list and no need for comparison
ret += second[j:]
elif j == len(second): # similarly all the entries of second list appended to the result list
ret += first[i:]
else: # else statement compares two lists and then appends the smallest entry.
if first[i] < second[j]:
ret.append(first[i])
i += 1
else:
ret.append(second[j])
j += 1
return ret
def merge_sort(iterable):
"""
Recursive call reduces until the base case which is the case of 1 entry in a list and then combines the two arrays
and this operation is made until it reaches one single list
Note that there is a python slice expression which partitions a list into two lists
:param iterable:
:return: Sorted version of list
"""
if len(iterable) == 1:
return iterable
else:
left_part = iterable[:(len(iterable) / 2)]
right_part = iterable[len(iterable) / 2:]
result = merge(merge_sort(right_part), merge_sort(left_part))
return result
if __name__ == '__main__':
print merge_sort([1,14,7,5,11,3,19,16])
| # Implementation of merge-sort algorithm is purely for educational purposes.
# This example is an intermediate for python learners and will be really helpful to better understand the programming
# and algorithm paradigm and highly recommended for python learners.
# Input is an list of any length and any entries and the output is a sorted version of a list
def merge(first, second):
"""
This function returns the merged version of first and second list object.
Input lists has to be sorted in itself for example list1 = [3,5,7,64] where the integers listed in increasing order
:param first:
:param second:
:return: ret
"""
i = 0
j = 0
ret = []
# while loop is made for operation to be done until the total length of output is equal to sum of input lengths
while len(ret) != len(first) + len(second):
if i == len(first): # this means that all the entries of first are appended to ret list and no need for comparison
ret += second[j:]
elif j == len(second): # similarly all the entries of second list appended to the result list
ret += first[i:]
else: # else statement compares two lists and then appends the smallest entry.
if first[i] < second[j]:
ret.append(first[i])
i += 1
else:
ret.append(second[j])
j += 1
return ret
def merge_sort(iterable):
"""
Recursive call reduces until the base case which is the case of 1 entry in a list and then combines the two arrays
and this operation is made until it reaches one single list
Note that there is a python slice expression which partitions a list into two lists
:param iterable:
:return: Sorted version of list
"""
if len(iterable) == 1:
return iterable
else:
left_part = iterable[:(len(iterable) / 2)]
right_part = iterable[len(iterable) / 2:]
result = merge(merge_sort(right_part), merge_sort(left_part))
return result
if __name__ == '__main__':
print merge_sort([1,14,7,5,11,3,19,16])
| mit | Python |
4732cfb10dc4a7126166b340c3bc2a6023de924e | Save work on performance demo | jonathanstallings/data-structures | merge_sort.py | merge_sort.py | """
Placeholder for Jonathan's ridiculously long docstring
"""
def merge_srt(un_list):
if len(un_list) > 1:
mid = len(un_list) // 2
left_half = un_list[:mid]
right_half = un_list[mid:]
merge_srt(left_half)
merge_srt(right_half)
x = y = z = 0
while x < len(left_half) and y < len(right_half):
if left_half[x] < right_half[y]:
un_list[z] = left_half[x]
x += 1
else:
un_list[z] = right_half[y]
y += 1
z += 1
while x < len(left_half):
un_list[z] = left_half[x]
x += 1
z += 1
while y < len(right_half):
un_list[z] = right_half[y]
y += 1
z += 1
if __name__ == '__main__':
even_half = range(0, 1001, 2)
odd_half = range(1, 1000, 2)
BEST_CASE = range(0, 1001)
WORST_CASE = even_half + odd_half
from timeit import Timer
SETUP = """from __main__ import BEST_CASE, WORST_CASE, merge_srt"""
best = Timer('merge_srt({})'.format(BEST_CASE), SETUP).timeit(1000)
worst = Timer('merge_srt({})'.format(WORST_CASE), SETUP).timeit(1000)
print("""Best case represented as a list that is already sorted\n
Worst case represented as a list that is absolute reverse of sorted""")
print('Best Case: {}'.format(best))
print('Worst Case: {}'.format(worst))
| """
Placeholder for Jonathan's ridiculously long docstring
"""
def merge_srt(un_list):
if len(un_list) > 1:
mid = len(un_list) // 2
left_half = un_list[:mid]
right_half = un_list[mid:]
merge_srt(left_half)
merge_srt(right_half)
x = y = z = 0
while x < len(left_half) and y < len(right_half):
if left_half[x] < right_half[y]:
un_list[z] = left_half[x]
x += 1
else:
un_list[z] = right_half[y]
y += 1
z += 1
while x < len(left_half):
un_list[z] = left_half[x]
x += 1
z += 1
while y < len(right_half):
un_list[z] = right_half[y]
y += 1
z += 1
if __name__ == '__main__':
even_half = range(0, 1001, 2)
odd_half = range(1, 1000, 2)
BEST_CASE = range(0, 1001)
WORST_CASE = even_half + odd_half
from timeit import Timer
setup = """
'merge_srt({})'.format(BEST_CASE),
'from __main__ import BEST_CASE'
"""
best = Timer(
).timeit(1000)
worst = Timer(
'insertion_sort({})'.format(WORST_CASE),
'from __main__ import WORST_CASE, insertion_sort').timeit(1000)
print("""Best case represented as a list that is already sorted\n
Worst case represented as a list that is absolute reverse of sorted""")
print('Best Case: {}'.format(best))
print('Worst Case: {}'.format(worst))
| mit | Python |
788913436e9924d6fde9e5c573f71a2f270f2bc4 | Correct import | RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline | luigi/tasks/rgd/organism.py | luigi/tasks/rgd/organism.py | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import luigi
from tasks.config import rgd
from tasks.config import output
from tasks.utils.entry_writers import Output
from tasks.utils.fetch import FetchTask
from databases.rgd import parsers
from databases.rgd import helpers
class RgdOrganism(luigi.Task):
organism = luigi.Parameter()
def requires(self):
conf = rgd()
summary = helpers.RgdInfo.from_name(self.organism)
local_genes = conf.raw(self.organism + '-genes.txt')
local_sequences = conf.raw(self.organism + 'sequences.fasta')
return [
FetchTask(
remote_path=summary.sequence_uri(conf),
local_path=local_sequences,
),
FetchTask(
remote_path=summary.gene_uri(conf),
local_path=local_genes,
),
]
def output(self):
prefix = os.path.basename(self.organism)
return Output.build(output().base, 'rgd', prefix)
def run(self):
extract, fetch = self.requires()
genes_file = fetch.output().fn
seqs_file = extract.output().fn
with self.output().writer() as writer:
with helpers.indexed(seqs_file) as indexed, \
open(genes_file, 'r') as handle:
writer.write_all(parsers.parse(handle, indexed))
| # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import luigi
from tasks.config import rgd
from tasks.config import output
from tasks.utils.entry_writers import Output
from tasks.utils.fetch import Fetch as FetchTask
from databases.rgd import parsers
from databases.rgd import helpers
class RgdOrganism(luigi.Task):
organism = luigi.Parameter()
def requires(self):
conf = rgd()
summary = helpers.RgdInfo.from_name(self.organism)
local_genes = conf.raw(self.organism + '-genes.txt')
local_sequences = conf.raw(self.organism + 'sequences.fasta')
return [
FetchTask(
remote_path=summary.sequence_uri(conf),
local_path=local_sequences,
),
FetchTask(
remote_path=summary.gene_uri(conf),
local_path=local_genes,
),
]
def output(self):
prefix = os.path.basename(self.organism)
return Output.build(output().base, 'rgd', prefix)
def run(self):
extract, fetch = self.requires()
genes_file = fetch.output().fn
seqs_file = extract.output().fn
with self.output().writer() as writer:
with helpers.indexed(seqs_file) as indexed, \
open(genes_file, 'r') as handle:
writer.write_all(parsers.parse(handle, indexed))
| apache-2.0 | Python |
0307c6bae5eaf6859ac157d53a50035cde4d6313 | Update utils.py | huyouare/SnapchatBot,huyouare/SnapchatBot,Gendreau/SnapchatBot,agermanidis/SnapchatBot,N07070/SnapchatBot,N07070/SnapchatBot,agermanidis/SnapchatBot,Gendreau/SnapchatBot | snapchat_bots/utils.py | snapchat_bots/utils.py | import tempfile, mimetypes, datetime, subprocess, re, math
from PIL import Image
from constants import MEDIA_TYPE_IMAGE, MEDIA_TYPE_VIDEO, MEDIA_TYPE_VIDEO_WITHOUT_AUDIO, SNAP_IMAGE_DIMENSIONS
def file_extension_for_type(media_type):
if media_type is MEDIA_TYPE_IMAGE:
return ".jpg"
else:
return ".mp4"
def create_temporary_file(suffix):
return tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
def is_video_file(path):
return mimetypes.guess_type(path)[0].startswith("video")
def is_image_file(path):
return mimetypes.guess_type(path)[0].startswith("image")
def guess_type(path):
if is_video_file(path): return MEDIA_TYPE_VIDEO
if is_image_file(path): return MEDIA_TYPE_IMAGE
return MEDIA_TYPE_UNKNOWN
def resize_image(im, output_path):
im.thumbnail(SNAP_IMAGE_DIMENSIONS, Image.ANTIALIAS)
im.save(output_path)
def duration_string_to_timedelta(s):
[hours, minutes, seconds] = map(int, s.split(':'))
seconds = seconds + minutes * 60 + hours * 3600
return datetime.timedelta(seconds=seconds)
def get_video_duration(path):
result = subprocess.Popen(["ffprobe", path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
matches = [x for x in result.stdout.readlines() if "Duration" in x]
duration_string = re.findall(r'Duration: ([0-9:]*)', matches[0])[0]
return math.ceil(duration_string_to_timedelta(duration_string).seconds)
| import tempfile, mimetypes, datetime, subprocess, re, math
from PIL import Image
from constants import MEDIA_TYPE_IMAGE, MEDIA_TYPE_VIDEO, MEDIA_TYPE_VIDEO_WITHOUT_AUDIO, SNAP_IMAGE_DIMENSIONS
def file_extension_for_type(media_type):
print media_type
if media_type is MEDIA_TYPE_IMAGE:
return ".jpg"
else:
return ".mp4"
def create_temporary_file(suffix):
return tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
def is_video_file(path):
return mimetypes.guess_type(path)[0].startswith("video")
def is_image_file(path):
return mimetypes.guess_type(path)[0].startswith("image")
def guess_type(path):
if is_video_file(path): return MEDIA_TYPE_VIDEO
if is_image_file(path): return MEDIA_TYPE_IMAGE
return MEDIA_TYPE_UNKNOWN
def resize_image(im, output_path):
im.thumbnail(SNAP_IMAGE_DIMENSIONS, Image.ANTIALIAS)
im.save(output_path)
def duration_string_to_timedelta(s):
[hours, minutes, seconds] = map(int, s.split(':'))
seconds = seconds + minutes * 60 + hours * 3600
return datetime.timedelta(seconds=seconds)
def get_video_duration(path):
result = subprocess.Popen(["ffprobe", path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
matches = [x for x in result.stdout.readlines() if "Duration" in x]
duration_string = re.findall(r'Duration: ([0-9:]*)', matches[0])[0]
return math.ceil(duration_string_to_timedelta(duration_string).seconds)
| mit | Python |
d3574ab8e83fa1ca8bb86cc30b6bc29f36c78eac | Update piece.py | Niceboy5275/PythonChess,Niceboy5275/PythonChess | classes/piece.py | classes/piece.py | class piece(object):
_players = {'BLANC' : -1, 'NOIR' : 1}
_color = 0
_moved = False
def __init__(self, color):
self._color = color
def move(self, pox_x, pos_y, tableau, isPossible):
raise NotImplementedException()
def getLetter(self):
raise NotImplementedException()
def getColor(self):
return self._color
def setMoved(self):
self._moved=True
def hasMoved(self):
return self._moved
| class piece(object):
_players = {'BLANC' : -1, 'NOIR' : 1}
_color = 0
_moved = False
def __init__(self, color):
self._color = color
def move(self, pox_x, pos_y, tableau, isPossible):
raise NotImplementedException()
def getLetter(self):
raise NotImplementedException()
def getColor(self):
return self._color
def getImage(self):
raise NotImplementedException()
def setMoved(self):
self._moved=True
def hasMoved(self):
return self._moved
| mit | Python |
8ba94b216531f249a7097f10eb74f363af6151e2 | Rename internal variables to start with a _ | dstufft/xmlrpc2 | xmlrpc2/client.py | xmlrpc2/client.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import urllib.parse
class BaseTransport(object):
@property
def scheme(self):
raise NotImplementedError("Transports must have a scheme")
class HTTPTransport(BaseTransport):
scheme = "http"
class Client(object):
def __init__(self, uri, transports=None):
if transports is None:
transports = [HTTPTransport]
# Initialize transports
self._transports = {}
for transport in transports:
t = transport()
self._transports[t.scheme] = t
parsed = urllib.parse.urlparse(uri)
if parsed.scheme not in self._transports:
raise ValueError("Invalid uri scheme {scheme}. Must be one of {available}.".format(scheme=parsed.scheme, available=",".join(self._transports)))
self._transport = self._transports[parsed.scheme]
# Default to /RPC2 for path as it is a common endpoint
if not parsed.path:
parsed = parsed[:2] + ("/RPC2",) + parsed[3:]
self._uri = urllib.parse.urlunparse(parsed)
| from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import urllib.parse
class BaseTransport(object):
@property
def scheme(self):
raise NotImplementedError("Transports must have a scheme")
class HTTPTransport(BaseTransport):
scheme = "http"
class Client(object):
def __init__(self, uri, transports=None):
if transports is None:
transports = [HTTPTransport]
# Initialize transports
self.transports = {}
for transport in transports:
t = transport()
self.transports[t.scheme] = t
parsed = urllib.parse.urlparse(uri)
if parsed.scheme not in self.transports:
raise ValueError("Invalid uri scheme {scheme}. Must be one of {available}.".format(scheme=parsed.scheme, available=",".join(self.transports)))
self.transport = self.transports[parsed.scheme]
# Default to /RPC2 for path as it is a common endpoint
if not parsed.path:
parsed = parsed[:2] + ("/RPC2",) + parsed[3:]
self.uri = urllib.parse.urlunparse(parsed)
| bsd-2-clause | Python |
8265ac5a97e487ba690bc832e3c398365f76975f | Fix for Python 3.6 | healthchecks/healthchecks,iphoting/healthchecks,healthchecks/healthchecks,iphoting/healthchecks,iphoting/healthchecks,healthchecks/healthchecks,healthchecks/healthchecks,iphoting/healthchecks | hc/api/management/commands/smtpd.py | hc/api/management/commands/smtpd.py | import asyncore
import re
from smtpd import SMTPServer
from django.core.management.base import BaseCommand
from hc.api.models import Check
RE_UUID = re.compile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$")
class Listener(SMTPServer):
def __init__(self, localaddr, stdout):
self.stdout = stdout
super(Listener, self).__init__(localaddr, None)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None, rcpt_options=None):
to_parts = rcpttos[0].split("@")
code = to_parts[0]
if not RE_UUID.match(code):
self.stdout.write("Not an UUID: %s" % code)
return
try:
check = Check.objects.get(code=code)
except Check.DoesNotExist:
self.stdout.write("Check not found: %s" % code)
return
ua = "Email from %s" % mailfrom
check.ping(peer[0], "email", "", ua, data)
self.stdout.write("Processed ping for %s" % code)
class Command(BaseCommand):
help = "Listen for ping emails"
def add_arguments(self, parser):
parser.add_argument("--host",
help="ip address to listen on, default 0.0.0.0",
default="0.0.0.0")
parser.add_argument('--port',
help="port to listen on, default 25",
type=int,
default=25)
def handle(self, host, port, *args, **options):
listener = Listener((host, port), self.stdout)
print("Starting SMTP listener on %s:%d ..." % (host, port))
asyncore.loop()
| import asyncore
import re
from smtpd import SMTPServer
from django.core.management.base import BaseCommand
from hc.api.models import Check
RE_UUID = re.compile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$")
class Listener(SMTPServer):
def __init__(self, localaddr, stdout):
self.stdout = stdout
super(Listener, self).__init__(localaddr, None)
def process_message(self, peer, mailfrom, rcpttos, data):
to_parts = rcpttos[0].split("@")
code = to_parts[0]
if not RE_UUID.match(code):
self.stdout.write("Not an UUID: %s" % code)
return
try:
check = Check.objects.get(code=code)
except Check.DoesNotExist:
self.stdout.write("Check not found: %s" % code)
return
ua = "Email from %s" % mailfrom
check.ping(peer[0], "email", "", ua, data)
self.stdout.write("Processed ping for %s" % code)
class Command(BaseCommand):
help = "Listen for ping emails"
def add_arguments(self, parser):
parser.add_argument("--host",
help="ip address to listen on, default 0.0.0.0",
default="0.0.0.0")
parser.add_argument('--port',
help="port to listen on, default 25",
type=int,
default=25)
def handle(self, host, port, *args, **options):
listener = Listener((host, port), self.stdout)
print("Starting SMTP listener on %s:%d ..." % (host, port))
asyncore.loop()
| bsd-3-clause | Python |
9548ab5b95d59b4fd55653e163ea5c9f1f53dd6e | Bump the version. | delfick/bespin,realestate-com-au/bespin,realestate-com-au/bespin,delfick/bespin | bespin/__init__.py | bespin/__init__.py | VERSION="0.5.5.8"
| VERSION="0.5.5.7"
| mit | Python |
0cfb5f53f3cea5ec9cb42f205d79cd25b3034d90 | clean up the admin for task log | crateio/crate.io | crate_project/apps/pypi/admin.py | crate_project/apps/pypi/admin.py | from django.contrib import admin
from pypi.models import ChangeLog, Log, PackageModified, TaskLog
class ChangeLogAdmin(admin.ModelAdmin):
list_display = ["package", "version", "timestamp", "action", "handled"]
list_filter = ["timestamp", "handled"]
search_fields = ["package", "action"]
class LogAdmin(admin.ModelAdmin):
list_display = ["type", "created", "message"]
list_filter = ["type", "created"]
class PackageModifiedAdmin(admin.ModelAdmin):
list_display = ["url", "md5", "last_modified", "created", "modified"]
list_filter = ["created", "modified"]
search_fields = ["url", "md5"]
raw_id_fields = ["release_file"]
class TaskLogAdmin(admin.ModelAdmin):
list_display = ["task_id", "status", "name", "created", "modified"]
list_filter = ["status", "name", "created", "modified"]
search_fields = ["task_id"]
admin.site.register(ChangeLog, ChangeLogAdmin)
admin.site.register(Log, LogAdmin)
admin.site.register(PackageModified, PackageModifiedAdmin)
admin.site.register(TaskLog, TaskLogAdmin)
| from django.contrib import admin
from pypi.models import ChangeLog, Log, PackageModified, TaskLog
class ChangeLogAdmin(admin.ModelAdmin):
list_display = ["package", "version", "timestamp", "action", "handled"]
list_filter = ["timestamp", "handled"]
search_fields = ["package", "action"]
class LogAdmin(admin.ModelAdmin):
list_display = ["type", "created", "message"]
list_filter = ["type", "created"]
class PackageModifiedAdmin(admin.ModelAdmin):
list_display = ["url", "md5", "last_modified", "created", "modified"]
list_filter = ["created", "modified"]
search_fields = ["url", "md5"]
raw_id_fields = ["release_file"]
class TaskLogAdmin(admin.ModelAdmin):
pass
admin.site.register(ChangeLog, ChangeLogAdmin)
admin.site.register(Log, LogAdmin)
admin.site.register(PackageModified, PackageModifiedAdmin)
admin.site.register(TaskLog, TaskLogAdmin)
| bsd-2-clause | Python |
d01528228b647fb55b9e60f0279349adf0438ac1 | Solve Module2/assignment2 | kHarshit/DAT210x_Microsoft | Module2/assignment2.py | Module2/assignment2.py | import pandas as pd
import os
os.chdir("../Module1/")
cwd = os.getcwd()
print(cwd)
# TODO: Load up the 'tutorial.csv' dataset
csv_df = pd.read_csv('tutorial.csv', sep=',')
# TODO: Print the results of the .describe() method
print(csv_df)
print(csv_df.describe())
print(csv_df.info())
print(csv_df.head(3)) # csv_df.iloc[:3, :] OR tail(); default no is 5
print(csv_df.columns) # display the name of the columns
print(csv_df.index)
print(csv_df.dtypes) # what data type Pandas assigned each column
# TODO: Figure out which indexing method you need to use in order to index your dataframe with: [2:4,'col3']:
print(csv_df.loc[2:4, 'col3']) # .ix uses hybrid approach of selecting by column label and column index
| import pandas as pd
# TODO: Load up the 'tutorial.csv' dataset
#
# .. your code here ..
# TODO: Print the results of the .describe() method
#
# .. your code here ..
# TODO: Figure out which indexing method you need to
# use in order to index your dataframe with: [2:4,'col3']
# And print the results
#
# .. your code here ..
| mit | Python |
d9d2e8ce6ef1365eaa7e722128529ad5142ca31c | Add sleep to prevent 100% CPU usage in some cases | 5225225/bar,5225225/bar | modules/mpd.py | modules/mpd.py | import socket
import signal
import linelib
import time
import json
ID = "mpd"
def handler(x, y):
pass
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGALRM, handler)
def mpd2dict(output):
x = output.split("\n")
d = dict()
for item in x[:-2]:
# MPD returns OK at the end, and there's a newline. This skips both of
# them.
key, val = item.split(":", maxsplit=1)
val = val.lstrip()
d[key] = val
return d
def sendline():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", 6600))
except ConnectionRefusedError:
time.sleep(1)
return
except OSError:
time.sleep(1)
return
version = sock.recv(2048)
assert version == b"OK MPD 0.19.0\n"
sock.send(b"currentsong\n")
currsong = mpd2dict(sock.recv(2048).decode("UTF-8"))
if currsong == {}:
return
sock.send(b"status\n")
status = mpd2dict(sock.recv(2048).decode("UTF-8"))
infodict = currsong.copy()
infodict.update(status)
titlecolour = "#ac4142"
albumcolour = "#6a9fb5"
dark_titlecolour = "#542020"
dark_albumcolour = "#3c5a66"
TC = str()
AC = str()
BC = str()
if infodict["state"] == "pause":
TC = dark_titlecolour
AC = dark_albumcolour
else:
TC = titlecolour
AC = albumcolour
# TODO make this code not ugly
formatcodes = "<span foreground='{}'>{}</span> - <span "\
"foreground='{}'>{}</span>".format(TC, infodict["Title"], AC,
infodict["Album"])
formatcodes = formatcodes.replace("&", "&")
linelib.sendblock(ID, {"full_text": formatcodes})
linelib.sendPID(ID)
linelib.waitsig(1)
click = linelib.getclick(ID).decode("UTF-8")
if click != "":
x = json.loads(click)
if x["button"] == 1:
sock.send(b"pause\n")
while True:
sendline()
| import socket
import signal
import linelib
import time
import json
ID = "mpd"
def handler(x, y):
pass
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGALRM, handler)
def mpd2dict(output):
x = output.split("\n")
d = dict()
for item in x[:-2]:
# MPD returns OK at the end, and there's a newline. This skips both of
# them.
key, val = item.split(":", maxsplit=1)
val = val.lstrip()
d[key] = val
return d
def sendline():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", 6600))
except ConnectionRefusedError:
return
except OSError:
return
version = sock.recv(2048)
assert version == b"OK MPD 0.19.0\n"
sock.send(b"currentsong\n")
currsong = mpd2dict(sock.recv(2048).decode("UTF-8"))
if currsong == {}:
return
sock.send(b"status\n")
status = mpd2dict(sock.recv(2048).decode("UTF-8"))
infodict = currsong.copy()
infodict.update(status)
titlecolour = "#ac4142"
albumcolour = "#6a9fb5"
dark_titlecolour = "#542020"
dark_albumcolour = "#3c5a66"
TC = str()
AC = str()
BC = str()
if infodict["state"] == "pause":
TC = dark_titlecolour
AC = dark_albumcolour
else:
TC = titlecolour
AC = albumcolour
# TODO make this code not ugly
formatcodes = "<span foreground='{}'>{}</span> - <span "\
"foreground='{}'>{}</span>".format(TC, infodict["Title"], AC,
infodict["Album"])
formatcodes = formatcodes.replace("&", "&")
linelib.sendblock(ID, {"full_text": formatcodes})
linelib.sendPID(ID)
linelib.waitsig(1)
click = linelib.getclick(ID).decode("UTF-8")
if click != "":
x = json.loads(click)
if x["button"] == 1:
sock.send(b"pause\n")
while True:
sendline()
| mit | Python |
e64195a005be583f32754e49e870b198ee7bc396 | Increase case search limit to 100 results | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/pillows/mappings/case_search_mapping.py | corehq/pillows/mappings/case_search_mapping.py | from corehq.pillows.mappings.case_mapping import CASE_ES_TYPE
from corehq.pillows.mappings.utils import mapping_from_json
from corehq.util.elastic import es_index
from pillowtop.es_utils import ElasticsearchIndexInfo
CASE_SEARCH_INDEX = es_index("case_search_2016-03-15")
CASE_SEARCH_ALIAS = "case_search"
CASE_SEARCH_MAX_RESULTS = 100
CASE_SEARCH_MAPPING = mapping_from_json('case_search_mapping.json')
CASE_SEARCH_INDEX_INFO = ElasticsearchIndexInfo(
index=CASE_SEARCH_INDEX,
alias=CASE_SEARCH_ALIAS,
type=CASE_ES_TYPE,
mapping=CASE_SEARCH_MAPPING,
)
| from corehq.pillows.mappings.case_mapping import CASE_ES_TYPE
from corehq.pillows.mappings.utils import mapping_from_json
from corehq.util.elastic import es_index
from pillowtop.es_utils import ElasticsearchIndexInfo
CASE_SEARCH_INDEX = es_index("case_search_2016-03-15")
CASE_SEARCH_ALIAS = "case_search"
CASE_SEARCH_MAX_RESULTS = 10
CASE_SEARCH_MAPPING = mapping_from_json('case_search_mapping.json')
CASE_SEARCH_INDEX_INFO = ElasticsearchIndexInfo(
index=CASE_SEARCH_INDEX,
alias=CASE_SEARCH_ALIAS,
type=CASE_ES_TYPE,
mapping=CASE_SEARCH_MAPPING,
)
| bsd-3-clause | Python |
cd395e2f183ec01fd7ce8245e2de2c5bd802eac4 | handle no groups on partner in admin | Socialsquare/verellen.biz,Socialsquare/verellen.biz,Socialsquare/verellen.biz,Socialsquare/verellen.biz | verellen/partner/admin.py | verellen/partner/admin.py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from partner.models import Partner, PartnerGroup, PriceList, SalesTool
class UserInline(admin.TabularInline):
model = User
class PartnerGroupAdmin(admin.ModelAdmin):
fields = [ 'name', 'email' ]
class PartnerInline(admin.TabularInline):
model = Partner
can_delete = False
class UserAdmin(UserAdmin):
inlines = (PartnerInline,)
list_display = UserAdmin.list_display + ('list_display_partner',)
def list_display_partner(self, user):
if user.partner and user.partner.group:
return user.partner.group.name
else:
return None
list_display_partner.short_description = "Partner group"
class PriceListAdmin(admin.ModelAdmin):
fields = [ 'name', 'file', 'partner_group' ]
list_display = [ 'name', 'partner_group' ]
class SalesToolAdmin(admin.ModelAdmin):
fields = [ 'name', 'file' ]
list_display = [ 'name', 'file' ]
list_editable = [ 'file' ]
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(PartnerGroup, PartnerGroupAdmin)
# admin.site.register(TearSheet, TearSheetAdmin)
admin.site.register(PriceList, PriceListAdmin)
admin.site.register(SalesTool, SalesToolAdmin)
| from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from partner.models import Partner, PartnerGroup, PriceList, SalesTool
class UserInline(admin.TabularInline):
model = User
class PartnerGroupAdmin(admin.ModelAdmin):
fields = [ 'name', 'email' ]
class PartnerInline(admin.TabularInline):
model = Partner
can_delete = False
class UserAdmin(UserAdmin):
inlines = (PartnerInline,)
list_display = UserAdmin.list_display + ('list_display_partner',)
def list_display_partner(self, user):
if user.partner:
return user.partner.group.name
else:
return None
list_display_partner.short_description = "Partner group"
class PriceListAdmin(admin.ModelAdmin):
fields = [ 'name', 'file', 'partner_group' ]
list_display = [ 'name', 'partner_group' ]
class SalesToolAdmin(admin.ModelAdmin):
fields = [ 'name', 'file' ]
list_display = [ 'name', 'file' ]
list_editable = [ 'file' ]
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(PartnerGroup, PartnerGroupAdmin)
# admin.site.register(TearSheet, TearSheetAdmin)
admin.site.register(PriceList, PriceListAdmin)
admin.site.register(SalesTool, SalesToolAdmin)
| mit | Python |
11580c007cda3e43fe38dfe10ecc36b75eaa7c56 | Add missing coma ',' | SerpentCS/purchase-workflow,SerpentCS/purchase-workflow,SerpentCS/purchase-workflow | purchase_order_variant_mgmt/__openerp__.py | purchase_order_variant_mgmt/__openerp__.py | # -*- coding: utf-8 -*-
# Copyright 2016 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'Handle easily multiple variants on Purchase Orders',
'summary': 'Handle the addition/removal of multiple variants from '
'product template into the purchase order',
'version': '9.0.1.0.0',
'author': 'Tecnativa,'
'Odoo Community Association (OCA)',
'category': 'Purchases',
'license': 'AGPL-3',
'website': 'https://www.tecnativa.com',
'depends': [
'purchase',
'web_widget_x2many_2d_matrix',
],
'demo': [],
'data': [
'wizard/purchase_manage_variant_view.xml',
'views/purchase_order_view.xml',
],
'installable': True,
}
| # -*- coding: utf-8 -*-
# Copyright 2016 Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'Handle easily multiple variants on Purchase Orders',
'summary': 'Handle the addition/removal of multiple variants from '
'product template into the purchase order',
'version': '9.0.1.0.0',
'author': 'Tecnativa'
'Odoo Community Association (OCA)',
'category': 'Purchases',
'license': 'AGPL-3',
'website': 'https://www.tecnativa.com',
'depends': [
'purchase',
'web_widget_x2many_2d_matrix',
],
'demo': [],
'data': [
'wizard/purchase_manage_variant_view.xml',
'views/purchase_order_view.xml',
],
'installable': True,
}
| agpl-3.0 | Python |
37671781813c5dca3c8d8eaf65c0fa9f91a6117f | Set up default request timeout to 15 seconds #18081 | business-factory/gold-digger | gold_digger/data_providers/_provider.py | gold_digger/data_providers/_provider.py | # -*- coding: utf-8 -*-
import requests
import requests.exceptions
from abc import ABCMeta, abstractmethod
from decimal import Decimal, InvalidOperation
class Provider(metaclass=ABCMeta):
DEFAULT_REQUEST_TIMEOUT = 15 # 15 seconds for both connect & read timeouts
def __init__(self, logger):
self.logger = logger
@property
@abstractmethod
def name(self):
pass
@abstractmethod
def get_supported_currencies(self, date_of_exchange):
pass
@abstractmethod
def get_by_date(self, date_of_exchange, currency):
pass
@abstractmethod
def get_all_by_date(self, date_of_exchange, currencies):
pass
@abstractmethod
def get_historical(self, origin_date, currencies):
pass
def _get(self, url, params=None):
try:
response = requests.get(url, params=params, timeout=self.DEFAULT_REQUEST_TIMEOUT)
if response.status_code == 200:
return response
else:
self.logger.error("%s - status code: %s, URL: %s, Params: %s", self, response.status_code, url, params)
except requests.exceptions.RequestException as e:
self.logger.error("%s - exception: %s, URL: %s, Params: %s", self, e, url, params)
def _post(self, url, params=None):
try:
response = requests.post(url, params=params, timeout=self.DEFAULT_REQUEST_TIMEOUT)
if response.status_code == 200:
return response
else:
self.logger.error("%s - status code: %s, URL: %s, Params: %s", self, response.status_code, url, params)
except requests.exceptions.RequestException as e:
self.logger.error("%s - exception: %s, URL: %s, Params: %s", self, e, url, params)
def _to_decimal(self, value, currency=""):
try:
return Decimal(value)
except InvalidOperation:
self.logger.error("%s - Invalid operation: value %s is not a number (currency %s)" % (self, value, currency))
| # -*- coding: utf-8 -*-
import requests
import requests.exceptions
from abc import ABCMeta, abstractmethod
from decimal import Decimal, InvalidOperation
class Provider(metaclass=ABCMeta):
def __init__(self, logger):
self.logger = logger
@property
@abstractmethod
def name(self):
pass
@abstractmethod
def get_supported_currencies(self, date_of_exchange):
pass
@abstractmethod
def get_by_date(self, date_of_exchange, currency):
pass
@abstractmethod
def get_all_by_date(self, date_of_exchange, currencies):
pass
@abstractmethod
def get_historical(self, origin_date, currencies):
pass
def _get(self, url, params=None):
try:
response = requests.get(url, params=params)
if response.status_code == 200:
return response
else:
self.logger.error("%s get %s status code on %s. Params: %s." % (self, response.status_code, url, params))
except requests.exceptions.RequestException as e:
self.logger.error("%s get %s exception on %s. Params: %s." % (self, e, url, params))
def _post(self, url, **kwargs):
try:
response = requests.post(url, **kwargs)
if response.status_code == 200:
return response
else:
self.logger.error("%s get %s status code on %s" % (self, response.status_code, url))
except requests.exceptions.RequestException as e:
self.logger.error("%s get %s exception on %s" % (self, e, url))
def _to_decimal(self, value, currency=""):
try:
return Decimal(value)
except InvalidOperation:
self.logger.error("%s - Invalid operation: value %s is not a number (currency %s)" % (self, value, currency))
| apache-2.0 | Python |
eb2e4e8c69ed722363aa9d93ecc7c4c0c24d12b3 | increase version to 0.4.1 | arneb/django-campaign,arneb/django-campaign | campaign/__init__.py | campaign/__init__.py | __version__ = '0.4.1'
default_app_config = 'campaign.apps.CampaignConfig'
| __version__ = '0.4.0'
default_app_config = 'campaign.apps.CampaignConfig'
| bsd-3-clause | Python |
68711ec4c5ef45730b6bbde252468a05fdaf184f | FIX drop pdb | gisce/primestg | spec/Order_B07_spec.py | spec/Order_B07_spec.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from primestg.order.orders import Order
from expects import expect, equal
with description('Order B07 IP FTP Generation'):
with it('generates expected B07 xml'):
expected_result = '<Order IdPet="1234" IdReq="B07" Version="3.1.c">\n ' \
'<Cnc Id="CIR000000000">\n ' \
'<B07 IPftp="10.1.5.206"/>\n ' \
'</Cnc>\n</Order>\n'
generic_values = {
'id_pet': '1234',
'id_req': 'B07',
'cnc': 'CIR000000000',
}
payload = {
'IPftp': '10.1.5.206',
}
order = Order('B07_ipftp')
order = order.create(generic_values, payload)
expect(order).to(equal(expected_result))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from primestg.order.orders import Order
from expects import expect, equal
with description('Order B07 IP FTP Generation'):
with it('generates expected B07 xml'):
import pdb; pdb.set_trace()
expected_result = '<Order IdPet="1234" IdReq="B07" Version="3.1.c">\n ' \
'<Cnc Id="CIR000000000">\n ' \
'<B07 IPftp="10.1.5.206"/>\n ' \
'</Cnc>\n</Order>\n'
generic_values = {
'id_pet': '1234',
'id_req': 'B07',
'cnc': 'CIR000000000',
}
payload = {
'IPftp': '10.1.5.206',
}
order = Order('B07_ipftp')
order = order.create(generic_values, payload)
expect(order).to(equal(expected_result))
| agpl-3.0 | Python |
f202efa24934a197a45b261a403edf4fdcd4673e | fix login required decorator | bulletshot60/primer-web,bulletshot60/primer-web,bulletshot60/primer-web,bulletshot60/primer-web | blockly/views.py | blockly/views.py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import logging
import json
from .models import BlocklyBot
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your views here.
def index_view(request):
return render(request, 'index.html')
@login_required(login_url='/')
def program_view(request):
if len(BlocklyBot.objects.all()) == 0:
bot = BlocklyBot.objects.create(running = False)
bot.save()
return render(request, 'program.html', {'bot': bot})
else:
return render(request, 'program.html', {'bot': BlocklyBot.objects.all()[0]})
@login_required(login_url='/')
def blockly_programmer_view(request):
if len(BlocklyBot.objects.all()) == 0:
bot = BlocklyBot.objects.create(running = False)
bot.save()
return render(request, 'programmer.html', {'bot': bot})
else:
return render(request, 'programmer.html', {'bot': BlocklyBot.objects.all()[0]})
def login_view(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
logger.debug(user)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect("/program")
else:
return HttpResponseRedirect("/", {"message": "account not active"})
else:
return HttpResponseRedirect("/", {"message": "username / password incorrect"})
@login_required(login_url='/')
def logout_view(request):
logout(request)
return HttpResponseRedirect("/", {"message": "username / password incorrect"})
def get_program_view(request):
bot = BlocklyBot.objects.all()[0]
return HttpResponse(json.dumps({"running": bot.running, "program": bot.program}), content_type='application/json')
@login_required(login_url='/')
def start_program_view(request):
bot = BlocklyBot.objects.all()[0]
bot.running = True
bot.save()
return HttpResponse(json.dumps({"success": True}), content_type='application/json')
@login_required(login_url='/')
def stop_program_view(request):
bot = BlocklyBot.objects.all()[0]
bot.running = False
bot.save()
return HttpResponse(json.dumps({"success": True}), content_type='application/json')
@login_required(login_url='/')
def submit_program_view(request):
bot = BlocklyBot.objects.all()[0]
bot.program = request.POST['program']
bot.save()
return HttpResponse(json.dumps({"success": True}), content_type='application/json') | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import logging
import json
from .models import BlocklyBot
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your views here.
def index_view(request):
return render(request, 'index.html')
@login_required
def program_view(request):
if len(BlocklyBot.objects.all()) == 0:
bot = BlocklyBot.objects.create(running = False)
bot.save()
return render(request, 'program.html', {'bot': bot})
else:
return render(request, 'program.html', {'bot': BlocklyBot.objects.all()[0]})
@login_required
def blockly_programmer_view(request):
if len(BlocklyBot.objects.all()) == 0:
bot = BlocklyBot.objects.create(running = False)
bot.save()
return render(request, 'programmer.html', {'bot': bot})
else:
return render(request, 'programmer.html', {'bot': BlocklyBot.objects.all()[0]})
def login_view(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
logger.debug(user)
if user is not None:
if user.is_active:
login(request, user)
return HttpResponseRedirect("/program")
else:
return HttpResponseRedirect("/", {"message": "account not active"})
else:
return HttpResponseRedirect("/", {"message": "username / password incorrect"})
@login_required
def logout_view(request):
logout(request)
return HttpResponseRedirect("/", {"message": "username / password incorrect"})
def get_program_view(request):
bot = BlocklyBot.objects.all()[0]
return HttpResponse(json.dumps({"running": bot.running, "program": bot.program}), content_type='application/json')
@login_required
def start_program_view(request):
bot = BlocklyBot.objects.all()[0]
bot.running = True
bot.save()
return HttpResponse(json.dumps({"success": True}), content_type='application/json')
@login_required
def stop_program_view(request):
bot = BlocklyBot.objects.all()[0]
bot.running = False
bot.save()
return HttpResponse(json.dumps({"success": True}), content_type='application/json')
@login_required
def submit_program_view(request):
bot = BlocklyBot.objects.all()[0]
bot.program = request.POST['program']
bot.save()
return HttpResponse(json.dumps({"success": True}), content_type='application/json') | mit | Python |
a19d09a65af1370e6c35946c720d1258072113aa | add csv as allowed format | ExCiteS/geokey-export,ExCiteS/geokey-export,ExCiteS/geokey-export | geokey_export/urls.py | geokey_export/urls.py | from django.conf.urls import include, url
from rest_framework.urlpatterns import format_suffix_patterns
from views import (
IndexPage,
ExportOverview,
ExportCreate,
ExportDelete,
ExportToRenderer,
ExportGetExportContributions,
ExportGetProjectCategories,
ExportGetProjectCategoryContributions
)
datapatterns = [
url(
r'^admin/export/(?P<urlhash>[\w-]+)$',
ExportToRenderer.as_view(),
name='export_to_renderer')
]
datapatterns = format_suffix_patterns(datapatterns, allowed=['json', 'kml','csv'])
urlpatterns = [
url(
r'^admin/export/$',
IndexPage.as_view(),
name='index'),
url(
r'^admin/export/(?P<export_id>[0-9]+)/$',
ExportOverview.as_view(),
name='export_overview'),
url(
r'^admin/export/(?P<export_id>[0-9]+)/contributions/$',
ExportGetExportContributions.as_view(),
name='export_get_export_contributions'),
url(
r'^admin/export/(?P<export_id>[0-9]+)/delete/$',
ExportDelete.as_view(),
name='export_delete'),
url(
r'^admin/export/create/$',
ExportCreate.as_view(),
name='export_create'),
url(
r'^admin/export/projects/(?P<project_id>[0-9]+)/categories/$',
ExportGetProjectCategories.as_view(),
name='export_get_project_categories'),
url(
r'^admin/export/projects/(?P<project_id>[0-9]+)/categories/(?P<category_id>[0-9]+)/contributions/$',
ExportGetProjectCategoryContributions.as_view(),
name='export_get_project_category_contributions'),
url(
r'^', include(datapatterns))
]
| from django.conf.urls import include, url
from rest_framework.urlpatterns import format_suffix_patterns
from views import (
IndexPage,
ExportOverview,
ExportCreate,
ExportDelete,
ExportToRenderer,
ExportGetExportContributions,
ExportGetProjectCategories,
ExportGetProjectCategoryContributions
)
datapatterns = [
url(
r'^admin/export/(?P<urlhash>[\w-]+)$',
ExportToRenderer.as_view(),
name='export_to_renderer')
]
datapatterns = format_suffix_patterns(datapatterns, allowed=['json', 'kml'])
urlpatterns = [
url(
r'^admin/export/$',
IndexPage.as_view(),
name='index'),
url(
r'^admin/export/(?P<export_id>[0-9]+)/$',
ExportOverview.as_view(),
name='export_overview'),
url(
r'^admin/export/(?P<export_id>[0-9]+)/contributions/$',
ExportGetExportContributions.as_view(),
name='export_get_export_contributions'),
url(
r'^admin/export/(?P<export_id>[0-9]+)/delete/$',
ExportDelete.as_view(),
name='export_delete'),
url(
r'^admin/export/create/$',
ExportCreate.as_view(),
name='export_create'),
url(
r'^admin/export/projects/(?P<project_id>[0-9]+)/categories/$',
ExportGetProjectCategories.as_view(),
name='export_get_project_categories'),
url(
r'^admin/export/projects/(?P<project_id>[0-9]+)/categories/(?P<category_id>[0-9]+)/contributions/$',
ExportGetProjectCategoryContributions.as_view(),
name='export_get_project_category_contributions'),
url(
r'^', include(datapatterns))
]
| mit | Python |
c3f6b4ddf56b8844f2ddf91c566e233270c42f74 | Add `SampleArtifactCache` and `SampleReadsFileCache` SQL models | igboyes/virtool,igboyes/virtool,virtool/virtool,virtool/virtool | virtool/samples/models.py | virtool/samples/models.py | from sqlalchemy import Column, DateTime, Enum, Integer, String
from virtool.pg.utils import Base, SQLEnum
class ArtifactType(str, SQLEnum):
"""
Enumerated type for possible artifact types
"""
sam = "sam"
bam = "bam"
fasta = "fasta"
fastq = "fastq"
csv = "csv"
tsv = "tsv"
json = "json"
class SampleArtifact(Base):
"""
SQL model to store a sample artifact
"""
__tablename__ = "sample_artifact"
id = Column(Integer, primary_key=True)
sample = Column(String, nullable=False)
name = Column(String, nullable=False)
name_on_disk = Column(String)
size = Column(Integer)
type = Column(Enum(ArtifactType), nullable=False)
uploaded_at = Column(DateTime)
def __repr__(self):
return f"<SampleArtifact(id={self.id}, sample={self.sample}, name={self.name}, " \
f"name_on_disk={self.name_on_disk}, size={self.size}, type={self.type}, " \
f"uploaded_at={self.uploaded_at}"
class SampleReadsFile(Base):
"""
SQL model to store new sample reads files
"""
__tablename__ = "sample_reads_files"
id = Column(Integer, primary_key=True)
sample = Column(String, nullable=False)
name = Column(String(length=13), nullable=False)
name_on_disk = Column(String, nullable=False)
size = Column(Integer)
uploaded_at = Column(DateTime)
def __repr__(self):
return f"<SampleReadsFile(id={self.id}, sample={self.sample}, name={self.name}, " \
f"name_on_disk={self.name_on_disk}, size={self.size}, uploaded_at={self.uploaded_at})>"
class SampleArtifactCache(Base):
"""
SQL model to store a cached sample artifact
"""
__tablename__ = "sample_artifact_cache"
id = Column(Integer, primary_key=True)
sample = Column(String, nullable=False)
name = Column(String, nullable=False)
name_on_disk = Column(String)
size = Column(Integer)
type = Column(Enum(ArtifactType), nullable=False)
uploaded_at = Column(DateTime)
def __repr__(self):
return f"<SampleArtifactCache(id={self.id}, sample={self.sample}, name={self.name}, " \
f"name_on_disk={self.name_on_disk}, size={self.size}, type={self.type}, " \
f"uploaded_at={self.uploaded_at}"
class SampleReadsFileCache(Base):
"""
SQL model to store cached sample reads files
"""
__tablename__ = "sample_reads_files_cache"
id = Column(Integer, primary_key=True)
sample = Column(String, nullable=False)
name = Column(String(length=13), nullable=False)
name_on_disk = Column(String, nullable=False)
size = Column(Integer)
uploaded_at = Column(DateTime)
def __repr__(self):
return f"<SampleReadsFileCache(id={self.id}, sample={self.sample}, name={self.name}, " \
f"name_on_disk={self.name_on_disk}, size={self.size}, uploaded_at={self.uploaded_at})>"
| from sqlalchemy import Column, DateTime, Enum, Integer, String
from virtool.pg.utils import Base, SQLEnum
class ArtifactType(str, SQLEnum):
"""
Enumerated type for possible artifact types
"""
sam = "sam"
bam = "bam"
fasta = "fasta"
fastq = "fastq"
csv = "csv"
tsv = "tsv"
json = "json"
class SampleArtifact(Base):
"""
SQL model to store sample artifacts
"""
__tablename__ = "sample_artifacts"
id = Column(Integer, primary_key=True)
sample = Column(String, nullable=False)
name = Column(String, nullable=False)
name_on_disk = Column(String)
size = Column(Integer)
type = Column(Enum(ArtifactType), nullable=False)
uploaded_at = Column(DateTime)
class SampleReadsFile(Base):
"""
SQL model to store new sample reads files
"""
__tablename__ = "sample_reads_files"
id = Column(Integer, primary_key=True)
sample = Column(String, nullable=False)
name = Column(String(length=13), nullable=False)
name_on_disk = Column(String, nullable=False)
size = Column(Integer)
uploaded_at = Column(DateTime)
def __repr__(self):
return f"<SampleReadsFile(id={self.id}, sample={self.sample}, name={self.name}, " \
f"name_on_disk={self.name_on_disk}, size={self.size}, uploaded_at={self.uploaded_at})>"
| mit | Python |
474e79bfd64aeeb4e0ef0f24b614f3d19a72120e | Fix doctests | Ceasar/trees | trees/heap.py | trees/heap.py | """
Convenience wrapper for the functional heapq library.
"""
import heapq
# TODO: add a __contains__ method
class heap(object):
'''A tree-based data structure that satisfies the heap property.
A heap can be used as priority queue by pushing tuples onto the heap.
>>> import trees
>>> h = trees.heap.heap()
>>> h.push(1)
>>> h.push(3)
>>> h.push(2)
>>> h.top
1
>>> h.pop()
1
>>> h.top
2
>>> h.pop()
2
>>> h.pop()
3
>>> h.empty
True
'''
def __init__(self, items=None):
if items is None:
items = []
self._items = list(items)
heapq.heapify(self._items)
@property
def top(self):
return self._items[0]
@property
def empty(self):
return len(self) == 0
def pop(self):
'''Pop and return the smallest item from the heap, maintaining the heap
invariant. If the heap is empty, IndexError is raised.
'''
try:
return heapq.heappop(self._items)
except IndexError as e:
raise e
def push(self, item):
'''Push the value item onto the heap, maintaining the heap invariant.
If the item is not hashable, a TypeError is raised.
'''
try:
hash(item)
except TypeError as e:
raise e
else:
heapq.heappush(self._items, item)
def __len__(self):
return len(self._items)
def __iter__(self):
while len(self) > 0:
yield self.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| """
Convenience wrapper for the functional heapq library.
"""
import heapq
# TODO: add a __contains__ method
class heap(object):
'''A tree-based data structure that satisfies the heap property.
A heap can be used as priority queue by pushing tuples onto the heap.
>>> import trees
>>> h = trees.heap()
>>> h.push(1)
>>> h.push(3)
>>> h.push(2)
>>> h.top
1
>>> h.pop()
1
>>> h.top
2
>>> h.pop()
2
>>> h.pop()
3
>>> h.empty
True
'''
def __init__(self, items=None):
if items is None:
items = []
self._items = list(items)
heapq.heapify(self._items)
@property
def top(self):
return self._items[0]
@property
def empty(self):
return len(self) == 0
def pop(self):
'''Pop and return the smallest item from the heap, maintaining the heap
invariant. If the heap is empty, IndexError is raised.
'''
try:
return heapq.heappop(self._items)
except IndexError as e:
raise e
def push(self, item):
'''Push the value item onto the heap, maintaining the heap invariant.
If the item is not hashable, a TypeError is raised.
'''
try:
hash(item)
except TypeError as e:
raise e
else:
heapq.heappush(self._items, item)
def __len__(self):
return len(self._items)
def __iter__(self):
while len(self) > 0:
yield self.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | Python |
00d9483d405972352786b164881031a2b11cb5e4 | Fix previous commit. | eriklovlie/scaling-octo-bear,eriklovlie/scaling-octo-bear | get-long-functions.py | get-long-functions.py | #!/usr/bin/env python
import os.path
import json
import glob
def get_long(json_files):
threshold = 100
longfun = {}
for path in json_files:
print "Reading {}".format(path)
with open(path) as f:
doc = json.load(f)
for func in doc:
length = func['line_end'] - func['line_start']
if length > threshold:
fun = "{}:{}".format(os.path.basename(func['file_name']), func['fun_name'])
if exempt(func):
print "Skipping exempt function: {}".format(fun)
else:
longfun[fun] = length
return longfun
def exempt(func):
# Check if a function has been annotated as exempt from the style check.
with open(func['file_name']) as f:
lines = f.readlines()
line_start = func['line_start']
# NOTE: line numbers start at 1, so line 2 is actually 1, prev being 0.
line_prev = line_start - 2
if line_prev >= 0:
line = lines[line_prev]
return "[STYLE-CHECK IGNORE LONG FUNCTION]" in line
return False
longfun = get_long(glob.glob("*_metrics.json"))
with open('long_functions.json', 'w') as f:
json.dump(longfun, f)
| #!/usr/bin/env python
import os.path
import json
import glob
def get_long(json_files):
threshold = 100
longfun = {}
for path in json_files:
print "Reading {}".format(path)
with open(path) as f:
doc = json.load(f)
for func in doc:
length = func['line_end'] - func['line_start']
if length > threshold:
fun = "{}:{}".format(os.path.basename(func['file_name']), func['fun_name'])
if exempt(func):
print "Skipping exempt function: {}".format(fun)
else:
longfun[fun] = length
return longfun
def exempt(func):
# Check if a function has been annotated as exempt from the style check.
with open(func['file_name']) as f:
lines = f.readlines()
line_start = func['line_start']
if line_start > 0:
prev_line = lines[line_start - 1]
return "[STYLE-CHECK IGNORE LONG FUNCTION]" in prev_line
return False
longfun = get_long(glob.glob("*_metrics.json"))
with open('long_functions.json', 'w') as f:
json.dump(longfun, f)
| mit | Python |
ba75e76df78b2b09f8b94584360187b719a39b19 | Fix init value | daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various | project_euler/021.amicable_numbers.py | project_euler/021.amicable_numbers.py | '''
Problem 021
Let d(n) be defined as the sum of proper divisors of n (numbers less than n
which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71
and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
import math
from collections import defaultdict
class DivisorSums():
sum_cache = defaultdict(lambda:1)
def __init__(self):
self.sum_cache
def add_value(self, n1, n2):
self.sum_cache[n1] = n2
def get_value(self, n1):
return self.sum_cache[n1]
def find_divisors(n):
divisors = {1}
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
divisors.add(int(n / i))
divisors.add(i)
return divisors
def check_if_amicable(n1, n2):
if cached.get_value(n1) == 1:
cached.add_value(n1, sum(find_divisors(n1)))
if cached.get_value(n1) == n2:
return True
return False
def solve_problem(end_number):
amicable_numbers = set()
for n in range(1, end_number):
divisor_list = find_divisors(n)
list_sum = sum(divisor_list)
cached.add_value(n, list_sum)
if (list_sum != n):
if check_if_amicable(list_sum, n):
amicable_numbers.update([list_sum, n])
return(amicable_numbers)
if __name__ == "__main__":
ending_number = 10000
cached = DivisorSums()
print(sum(solve_problem(ending_number)))
| '''
Problem 021
Let d(n) be defined as the sum of proper divisors of n (numbers less than n
which divide evenly into n).
If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and
each of a and b are called amicable numbers.
For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55
and 110; therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71
and 142; so d(284) = 220.
Evaluate the sum of all the amicable numbers under 10000.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
import math
from collections import defaultdict
class DivisorSums():
sum_cache = defaultdict(lambda:1)
def _init_(self):
self.sum_cache
def add_value(self, n1, n2):
self.sum_cache[n1] = n2
def get_value(self, n1):
return self.sum_cache[n1]
def find_divisors(n):
divisors = {1}
for i in range(2, int(math.sqrt(n)) + 1):
if n % i == 0:
divisors.add(int(n / i))
divisors.add(i)
return divisors
def check_if_amicable(n1, n2):
if cached.get_value(n1) == 1:
cached.add_value(n1, sum(find_divisors(n1)))
if cached.get_value(n1) == n2:
return True
return False
def solve_problem(end_number):
amicable_numbers = set()
for n in range(1, end_number):
divisor_list = find_divisors(n)
list_sum = sum(divisor_list)
cached.add_value(n, list_sum)
if (list_sum != n):
if check_if_amicable(list_sum, n):
amicable_numbers.update([list_sum, n])
return(amicable_numbers)
if __name__ == "__main__":
ending_number = 10000
cached = DivisorSums()
print(sum(solve_problem(ending_number)))
| mit | Python |
8ba35ff373fea95278034a3d50d0dc95db5c6e20 | test properties app name modified | buildbuild/buildbuild,buildbuild/buildbuild,buildbuild/buildbuild | buildbuild/properties/tests/test_available_language.py | buildbuild/properties/tests/test_available_language.py | from properties.models import AvailableLanguage
from django.test import TestCase
from django.core.exceptions import ObjectDoesNotExist
class TestLanguage(TestCase):
fixtures = ['properties_data.yaml']
def setUp(self):
pass
def test_get_all_available_language(self):
self.assertIsNotNone(Language.objects.all())
def test_get_python(self):
self.assertIsNotNone(Language.objects.get(lang="python"))
def test_get_python_value_must_be_equal_to_python(self):
language_object = Language.objects.get(lang="python")
self.assertEqual("python", language_object.lang)
def test_get_ruby(self):
self.assertIsNotNone(Language.objects.get(lang="ruby"))
def test_get_ruby_value_must_be_equal_to_ruby(self):
language_object = Language.objects.get(lang="ruby")
self.assertEqual("ruby", language_object.lang)
def test_get_non_exsit_language_must_be_fail(self):
self.assertRaises(
ObjectDoesNotExist,
Language.objects.get,
lang="never_exist_lang"
)
| from properties.models import Language
from django.test import TestCase
from django.core.exceptions import ObjectDoesNotExist
class TestLanguage(TestCase):
fixtures = ['properties_data.yaml']
def setUp(self):
pass
def test_get_all_available_language(self):
self.assertIsNotNone(Language.objects.all())
def test_get_python(self):
self.assertIsNotNone(Language.objects.get(lang="python"))
def test_get_python_value_must_be_equal_to_python(self):
language_object = Language.objects.get(lang="python")
self.assertEqual("python", language_object.lang)
def test_get_ruby(self):
self.assertIsNotNone(Language.objects.get(lang="ruby"))
def test_get_ruby_value_must_be_equal_to_ruby(self):
language_object = Language.objects.get(lang="ruby")
self.assertEqual("ruby", language_object.lang)
def test_get_non_exsit_language_must_be_fail(self):
self.assertRaises(
ObjectDoesNotExist,
Language.objects.get,
lang="never_exist_lang"
)
| bsd-3-clause | Python |
a8252d8e8323ffeeb3e222d03aa1caabd9fa10db | expand ambiguities test | nkhuyu/blaze,alexmojaki/blaze,jcrist/blaze,jdmcbr/blaze,LiaoPan/blaze,dwillmer/blaze,scls19fr/blaze,ChinaQuants/blaze,ContinuumIO/blaze,ChinaQuants/blaze,alexmojaki/blaze,cpcloud/blaze,caseyclements/blaze,xlhtc007/blaze,cowlicks/blaze,jcrist/blaze,jdmcbr/blaze,caseyclements/blaze,cpcloud/blaze,maxalbert/blaze,nkhuyu/blaze,LiaoPan/blaze,scls19fr/blaze,cowlicks/blaze,xlhtc007/blaze,maxalbert/blaze,mrocklin/blaze,mrocklin/blaze,dwillmer/blaze,ContinuumIO/blaze | blaze/tests/test_core.py | blaze/tests/test_core.py | from blaze import into, compute_up, compute_down, drop, create_index
from multipledispatch.conflict import ambiguities
def test_no_dispatch_ambiguities():
for func in [into, compute_up, compute_down, drop, create_index]:
assert not ambiguities(func.funcs)
| from blaze import into, compute_up
from multipledispatch.conflict import ambiguities
def test_into_non_ambiguous():
assert not ambiguities(into.funcs)
def test_compute_up_non_ambiguous():
assert not ambiguities(compute_up.funcs)
| bsd-3-clause | Python |
9f3cdd657a6fb1916cb82a0423f3da7d2738bf49 | change api name | zeluspudding/googlefinance,hongtaocai/googlefinance | googlefinance/__init__.py | googlefinance/__init__.py | '''
MIT License
'''
from urllib2 import Request, urlopen
import json
import sys
__author__ = 'hongtaocai@gmail.com'
googleFinanceKeyToFullName = {
u'id' : u'ID',
u't' : u'StockSymbol',
u'e' : u'Index',
u'l' : u'LastTradePrice',
u'l_cur' : u'LastTradeWithCurrency',
u'ltt' : u'LastTradeTime',
u'lt_dts' : u'LastTradeDateTime',
u'lt' : u'LastTradeDateTimeLong',
u'div' : u'Dividend',
u'yld' : u'Yield'
}
def buildUrl(symbols):
symbol_list = ','.join([symbol for symbol in symbols])
# a deprecated but still active & correct api
return 'http://finance.google.com/finance/info?client=ig&q=' \
+ symbol_list
def request(symbols):
url = buildUrl(symbols)
req = Request(url)
resp = urlopen(req)
content = resp.read().decode().strip()
content = content[3:]
return content
def replaceKeys(quotes):
global googleFinanceKeyToFullName
quotesWithReadableKey = []
for q in quotes:
qReadableKey = {}
for k in googleFinanceKeyToFullName:
if k in q:
qReadableKey[googleFinanceKeyToFullName[k]] = q[k]
quotesWithReadableKey.append(qReadableKey)
return quotesWithReadableKey
def getQuotes(symbols):
'''
get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api:
http://finance.google.com/finance/info?client=ig&q=symbols
Unlike python package 'yahoo-finance', There is no delay for NYSE and NASDAQ stocks in googlefinance.
:param symbols: a list of stock symbols
:return: real-time quotes
'''
content = json.loads(request(symbols))
return replaceKeys(content);
if __name__ == '__main__':
try:
symbols = sys.argv[1]
except:
symbols = "GOOG,AAPL"
symbols = symbols.split(',')
try:
print json.dumps(getQuotes(symbols), indent=2)
except:
print "Fail"
| '''
MIT License
'''
from urllib2 import Request, urlopen
import json
import sys
__author__ = 'hongtaocai@gmail.com'
googleFinanceKeyToFullName = {
u'id' : u'ID',
u't' : u'StockSymbol',
u'e' : u'Index',
u'l' : u'LastTradePrice',
u'l_cur' : u'LastTradeWithCurrency',
u'ltt' : u'LastTradeTime',
u'lt_dts' : u'LastTradeDateTime',
u'lt' : u'LastTradeDateTimeLong',
u'div' : u'Dividend',
u'yld' : u'Yield'
}
def buildUrl(symbols):
symbol_list = ','.join([symbol for symbol in symbols])
# a deprecated but still active & correct api
return 'http://finance.google.com/finance/info?client=ig&q=' \
+ symbol_list
def request(symbols):
url = buildUrl(symbols)
req = Request(url)
resp = urlopen(req)
content = resp.read().decode().strip()
content = content[3:]
return content
def replaceKeys(quotes):
global googleFinanceKeyToFullName
quotesWithReadableKey = []
for q in quotes:
qReadableKey = {}
for k in googleFinanceKeyToFullName:
if k in q:
qReadableKey[googleFinanceKeyToFullName[k]] = q[k]
quotesWithReadableKey.append(qReadableKey)
return quotesWithReadableKey
def getRealtimeQuotes(symbols):
'''
get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api:
http://finance.google.com/finance/info?client=ig&q=symbols
Unlike python package 'yahoo-finance', There is no delay for NYSE and NASDAQ stocks in googlefinance.
:param symbols: a list of stock symbols
:return: real-time quote
'''
content = json.loads(request(symbols))
return replaceKeys(content);
if __name__ == '__main__':
try:
symbols = sys.argv[1]
except:
symbols = "GOOG,AAPL"
symbols = symbols.split(',')
try:
print json.dumps(getRealtimeQuotes(symbols), indent=2)
except:
print "Fail"
| mit | Python |
0789b9afc84757b7cef1d4cf6d433e90c7cb78d3 | Make stream return headers immediately | grengojbo/st2,jtopjian/st2,pinterb/st2,alfasin/st2,pixelrebel/st2,nzlosh/st2,alfasin/st2,Plexxi/st2,armab/st2,nzlosh/st2,punalpatel/st2,emedvedev/st2,tonybaloney/st2,StackStorm/st2,peak6/st2,StackStorm/st2,jtopjian/st2,tonybaloney/st2,peak6/st2,armab/st2,alfasin/st2,pinterb/st2,nzlosh/st2,jtopjian/st2,dennybaa/st2,grengojbo/st2,pixelrebel/st2,armab/st2,pinterb/st2,peak6/st2,Plexxi/st2,StackStorm/st2,lakshmi-kannan/st2,lakshmi-kannan/st2,dennybaa/st2,emedvedev/st2,tonybaloney/st2,pixelrebel/st2,grengojbo/st2,Itxaka/st2,Itxaka/st2,Itxaka/st2,Plexxi/st2,lakshmi-kannan/st2,punalpatel/st2,emedvedev/st2,StackStorm/st2,punalpatel/st2,nzlosh/st2,dennybaa/st2,Plexxi/st2 | st2api/st2api/controllers/v1/stream.py | st2api/st2api/controllers/v1/stream.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pecan
from pecan import Response
from pecan.rest import RestController
from st2common import log as logging
from st2common.models.base import jsexpose
from st2common.util.jsonify import json_encode
from st2api.listener import get_listener
LOG = logging.getLogger(__name__)
def format(gen):
# Yield initial state so client would receive the headers the moment it connects to the stream
yield '\n'
message = '''event: %s\ndata: %s\n\n'''
for pack in gen:
if not pack:
yield '\n'
else:
(event, body) = pack
yield message % (event, json_encode(body, indent=None))
class StreamController(RestController):
@jsexpose(content_type='text/event-stream')
def get_all(self):
def make_response():
res = Response(content_type='text/event-stream',
app_iter=format(get_listener().generator()))
return res
# Prohibit buffering response by eventlet
pecan.request.environ['eventlet.minimum_write_chunk_size'] = 0
stream = make_response()
return stream
| # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pecan
from pecan import Response
from pecan.rest import RestController
from st2common import log as logging
from st2common.models.base import jsexpose
from st2common.util.jsonify import json_encode
from st2api.listener import get_listener
LOG = logging.getLogger(__name__)
def format(gen):
message = '''event: %s\ndata: %s\n\n'''
for pack in gen:
if not pack:
yield '\n'
else:
(event, body) = pack
yield message % (event, json_encode(body, indent=None))
class StreamController(RestController):
@jsexpose(content_type='text/event-stream')
def get_all(self):
def make_response():
res = Response(content_type='text/event-stream',
app_iter=format(get_listener().generator()))
return res
# Prohibit buffering response by eventlet
pecan.request.environ['eventlet.minimum_write_chunk_size'] = 0
stream = make_response()
return stream
| apache-2.0 | Python |
db20fc6b7a21efbd7de0f5b0d1aa754c19c1a21f | Remove all scores before populating the sorted set. | theju/f1oracle,theju/f1oracle | race/management/commands/update_leaderboard.py | race/management/commands/update_leaderboard.py | from django.core.management.base import BaseCommand
from django.conf import settings
from ...models import OverallDriverPrediction, OverallConstructorPrediction
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **kwargs):
conn = settings.REDIS_CONN
num_ranks = conn.zcard("ranks")
conn.zremrangebyscore("ranks", 0, num_ranks + 1)
for driver_prediction in OverallDriverPrediction.objects.all():
conn.zadd("ranks",
driver_prediction.user.username,
driver_prediction.score)
for constructor_prediction in OverallConstructorPrediction.objects.all():
score = conn.zscore("ranks", constructor_prediction.user.username)
if not score:
score = 0
conn.zadd("ranks",
constructor_prediction.user.username,
constructor_prediction.score + score)
| from django.core.management.base import BaseCommand
from django.conf import settings
from ...models import OverallDriverPrediction, OverallConstructorPrediction
class Command(BaseCommand):
can_import_settings = True
def handle(self, *args, **kwargs):
conn = settings.REDIS_CONN
num_ranks = conn.zcard("ranks")
conn.zremrangebyscore("ranks", 0, num_ranks)
for driver_prediction in OverallDriverPrediction.objects.all():
conn.zadd("ranks",
driver_prediction.user.username,
driver_prediction.score)
for constructor_prediction in OverallConstructorPrediction.objects.all():
score = conn.zscore("ranks", constructor_prediction.user.username)
if not score:
score = 0
conn.zadd("ranks",
constructor_prediction.user.username,
constructor_prediction.score + score)
| bsd-3-clause | Python |
0f0e0e91db679f18ad9dc7568047b76e447ac589 | Change of the module version | kmee/stock-logistics-warehouse,acsone/stock-logistics-warehouse,open-synergy/stock-logistics-warehouse | stock_inventory_chatter/__openerp__.py | stock_inventory_chatter/__openerp__.py | # -*- coding: utf-8 -*-
# Copyright 2017 Eficent Business and IT Consulting Services S.L.
# Copyright 2018 initOS GmbH
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
'name': 'Stock Inventory Chatter',
'version': '8.0.1.0.0',
'author': "Eficent, "
"initOS GmbH, "
"Odoo Community Association (OCA)",
"website": "https://github.com/OCA/stock-logistics-warehouse",
'category': 'Warehouse',
'summary': "Log changes being done in Inventory Adjustments",
'depends': ['stock'],
"data": [
'data/stock_data.xml',
'views/stock_inventory_view.xml',
],
'license': 'AGPL-3',
'installable': True,
'application': False,
}
| # -*- coding: utf-8 -*-
# Copyright 2017 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
'name': 'Stock Inventory Chatter',
'version': '9.0.1.0.0',
'author': "Eficent, "
"Odoo Community Association (OCA)",
"website": "https://github.com/OCA/stock-logistics-warehouse",
'category': 'Warehouse',
'summary': "Log changes being done in Inventory Adjustments",
'depends': ['stock'],
"data": [
'data/stock_data.xml',
'views/stock_inventory_view.xml',
],
'license': 'AGPL-3',
'installable': True,
'application': False,
}
| agpl-3.0 | Python |
137904809733720d24f8715545652820c0a93cd6 | change tag to type for all records | stacybird/CS510CouchDB,stacybird/CS510CouchDB,stacybird/CS510CouchDB | scripts/csv_to_json_file_tag.py | scripts/csv_to_json_file_tag.py | #!/usr/bin/env python
import sys
import csv
import json
import re
if len(sys.argv) != 3:
print 'Incorrect number of arguments.'
print 'Usage: csv_to_json.py path_to_csv path_to_json'
exit()
print 'Argument List:', str(sys.argv)
csvFileName = sys.argv[1]
jsonFileArray = sys.argv[2].split(".")
csvFile = open (csvFileName, 'rU')
myReader = csv.reader(csvFile)
header = myReader.next()
print "Header fields:", header
myReader = csv.DictReader( csvFile, fieldnames = header)
# far too fancy regex for my tastes.
# grabs words, makes the tag the last word prior to ".csv"
fileTag = re.findall(r"[\w']+", csvFileName)[-2:-1][0]
jsonFileCount = 0
def writeNRecords(n):
jsonFileName = jsonFileArray[0] + "_" + str(jsonFileCount) + "." + jsonFileArray[1]
jsonFile = open( jsonFileName, 'w')
count = 0
jsonFile.write("{\"docs\": [")
for row in myReader:
if count != 0:
jsonFile.write(", ")
row['type'] = fileTag
for key in row:
try:
row[key] = int(row[key])
except:
pass
parsedJson = json.dumps( row )
jsonFile.write(parsedJson)
count += 1
if 0 == (count % n):
break
jsonFile.write("] }")
print "up to 10,000 JSON records saved to: ", jsonFileName
return (jsonFileCount + 1)
for x in range(0,1792):
jsonFileCount = writeNRecords(10000)
| #!/usr/bin/env python
import sys
import csv
import json
import re
if len(sys.argv) != 3:
print 'Incorrect number of arguments.'
print 'Usage: csv_to_json.py path_to_csv path_to_json'
exit()
print 'Argument List:', str(sys.argv)
csvFileName = sys.argv[1]
jsonFileArray = sys.argv[2].split(".")
csvFile = open (csvFileName, 'rU')
myReader = csv.reader(csvFile)
header = myReader.next()
print "Header fields:", header
myReader = csv.DictReader( csvFile, fieldnames = header)
# far too fancy regex for my tastes.
# grabs words, makes the tag the last word prior to ".csv"
fileTag = re.findall(r"[\w']+", csvFileName)[-2:-1][0]
jsonFileCount = 0
def writeNRecords(n):
jsonFileName = jsonFileArray[0] + "_" + str(jsonFileCount) + "." + jsonFileArray[1]
jsonFile = open( jsonFileName, 'w')
count = 0
jsonFile.write("{\"docs\": [")
for row in myReader:
if count != 0:
jsonFile.write(", ")
row['tag'] = fileTag
for key in row:
try:
row[key] = int(row[key])
except:
pass
parsedJson = json.dumps( row )
jsonFile.write(parsedJson)
count += 1
if 0 == (count % n):
break
jsonFile.write("] }")
print "up to 10,000 JSON records saved to: ", jsonFileName
return (jsonFileCount + 1)
for x in range(0,1790):
jsonFileCount = writeNRecords(10000)
| apache-2.0 | Python |
51cc1df39a53ef26d36ff8d65aa690f08b57dd99 | Add tests for SentryLogObserver. | harrissoerja/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix,vishwaprakashmishra/xmatrix,TouK/vumi,TouK/vumi,harrissoerja/vumi,TouK/vumi,vishwaprakashmishra/xmatrix | vumi/tests/test_sentry.py | vumi/tests/test_sentry.py | """Tests for vumi.sentry."""
import logging
from twisted.trial.unittest import TestCase
from twisted.internet.defer import inlineCallbacks
from twisted.web import http
from twisted.python.failure import Failure
from vumi.tests.utils import MockHttpServer, LogCatcher
from vumi.sentry import quiet_get_page, SentryLogObserver
class TestQuietGetPage(TestCase):
@inlineCallbacks
def setUp(self):
self.mock_http = MockHttpServer(self._handle_request)
yield self.mock_http.start()
@inlineCallbacks
def tearDown(self):
yield self.mock_http.stop()
def _handle_request(self, request):
request.setResponseCode(http.OK)
request.do_not_log = True
return "Hello"
@inlineCallbacks
def test_request(self):
with LogCatcher() as lc:
result = yield quiet_get_page(self.mock_http.url)
self.assertEqual(lc.logs, [])
self.assertEqual(result, "Hello")
class DummySentryClient(object):
def __init__(self):
self.exceptions = []
self.messages = []
def captureMessage(self, *args, **kwargs):
self.messages.append((args, kwargs))
def captureException(self, *args, **kwargs):
self.exceptions.append((args, kwargs))
class TestSentryLogObserver(TestCase):
def setUp(self):
self.client = DummySentryClient()
self.obs = SentryLogObserver(self.client)
def test_level_for_event(self):
for expected_level, event in [
(logging.WARN, {'logLevel': logging.WARN}),
(logging.ERROR, {'isError': 1}),
(logging.INFO, {}),
]:
self.assertEqual(self.obs.level_for_event(event), expected_level)
def test_logger_for_event(self):
self.assertEqual(self.obs.logger_for_event({'system': 'foo,bar'}),
'foo,bar')
self.assertEqual(self.obs.logger_for_event({}), 'unknown')
def test_log_failure(self):
e = ValueError("foo error")
f = Failure(e)
self.obs({'failure': f, 'system': 'test.log'})
self.assertEqual(self.client.exceptions, [
(((type(e), e, None),),
{'data': {'level': 20, 'logger': 'test.log'}}),
])
def test_log_message(self):
self.obs({'message': ["a"], 'system': 'test.log'})
self.assertEqual(self.client.messages, [
(('a',),
{'data': {'level': 20, 'logger': 'test.log'}})
])
class TestRavenUtilityFunctions(TestCase):
pass
| """Tests for vumi.sentry."""
from twisted.trial.unittest import TestCase
from twisted.internet.defer import inlineCallbacks
from twisted.web import http
from vumi.tests.utils import MockHttpServer, LogCatcher
from vumi.sentry import quiet_get_page
class TestQuietGetPage(TestCase):
@inlineCallbacks
def setUp(self):
self.mock_http = MockHttpServer(self._handle_request)
yield self.mock_http.start()
@inlineCallbacks
def tearDown(self):
yield self.mock_http.stop()
def _handle_request(self, request):
request.setResponseCode(http.OK)
request.do_not_log = True
return "Hello"
@inlineCallbacks
def test_request(self):
with LogCatcher() as lc:
result = yield quiet_get_page(self.mock_http.url)
self.assertEqual(lc.logs, [])
self.assertEqual(result, "Hello")
class TestSentryLogObserver(TestCase):
pass
class TestRavenUtilityFunctions(TestCase):
pass
| bsd-3-clause | Python |
0599acdaa610324de36805503aff133f5d6aff08 | set notification logger to warning instead of error | thp44/delphin_6_automation | delphin_6_automation/logging/ribuild_logger.py | delphin_6_automation/logging/ribuild_logger.py | __author__ = 'Christian Kongsgaard'
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import logging
import os
from notifiers.logging import NotificationHandler
import platform
# RiBuild Modules:
try:
from delphin_6_automation.database_interactions.auth import gmail
except ModuleNotFoundError:
gmail = {'mail': None, 'password': None}
# -------------------------------------------------------------------------------------------------------------------- #
# LOGGERS
def ribuild_logger(name):
source_folder = os.environ.get("_MEIPASS2", os.path.abspath("."))
# create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# create console handler and set level to debug
if os.path.exists(f'{source_folder}/{name}.log'):
try:
os.remove(f'{source_folder}/{name}.log')
except PermissionError:
pass
# File Handler
fh = logging.FileHandler(f'{source_folder}/{name}.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
# Notification Handler
message_dict = {'to': 'ocni@dtu.dk',
'subject': f'Fatal Error on {platform.node()}',
'username': gmail['mail'],
'password': gmail['password']}
nh = NotificationHandler('gmail', defaults=message_dict)
nh.setLevel(logging.WARNING)
nh.setFormatter(formatter)
logger.addHandler(nh)
# Stream Handler
sh = logging.StreamHandler()
stream_formatter = logging.Formatter('%(message)s')
sh.setFormatter(stream_formatter)
sh.setLevel(logging.INFO)
logger.addHandler(sh)
return logger
| __author__ = 'Christian Kongsgaard'
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules:
import logging
import os
from notifiers.logging import NotificationHandler
import platform
# RiBuild Modules:
try:
from delphin_6_automation.database_interactions.auth import gmail
except ModuleNotFoundError:
gmail = {'mail': None, 'password': None}
# -------------------------------------------------------------------------------------------------------------------- #
# LOGGERS
def ribuild_logger(name):
source_folder = os.environ.get("_MEIPASS2", os.path.abspath("."))
# create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# create console handler and set level to debug
if os.path.exists(f'{source_folder}/{name}.log'):
try:
os.remove(f'{source_folder}/{name}.log')
except PermissionError:
pass
# File Handler
fh = logging.FileHandler(f'{source_folder}/{name}.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
# Notification Handler
message_dict = {'to': 'ocni@dtu.dk',
'subject': f'Fatal Error on {platform.node()}',
'username': gmail['mail'],
'password': gmail['password']}
nh = NotificationHandler('gmail', defaults=message_dict)
nh.setLevel(logging.ERROR)
nh.setFormatter(formatter)
logger.addHandler(nh)
# Stream Handler
sh = logging.StreamHandler()
stream_formatter = logging.Formatter('%(message)s')
sh.setFormatter(stream_formatter)
sh.setLevel(logging.INFO)
logger.addHandler(sh)
return logger
| mit | Python |
517cc83aff398b62073abbcd2d23bbaae556d3ae | fix Python 3 check | Kamekameha/vapoursynth,vapoursynth/vapoursynth,vapoursynth/vapoursynth,Kamekameha/vapoursynth,vapoursynth/vapoursynth,Kamekameha/vapoursynth,Kamekameha/vapoursynth,vapoursynth/vapoursynth | waftools/checks/custom.py | waftools/checks/custom.py | from waftools.inflectors import DependencyInflector
from waftools.checks.generic import *
from waflib import Utils, Errors
import os
__all__ = ["check_python", "check_cpu_x86", "check_cpu_x86_64"]
def check_python(ctx, dependency_identifier):
ctx.find_program(['python3', 'python'], var = 'PYTHON')
ctx.load('python')
try:
ctx.check_python_version((3, 0, 0))
ctx.check_python_headers()
return True
except Errors.WafError:
return False
def check_cpu_x86(ctx, dependency_identifier):
return ctx.env.DEST_CPU in ['x86', 'x86_64', 'x64', 'amd64', 'x86_amd64']
def check_cpu_x86_64(ctx, dependency_identifier):
return ctx.env.DEST_CPU in ['x86_64', 'x64', 'amd64', 'x86_amd64']
| from waftools.inflectors import DependencyInflector
from waftools.checks.generic import *
from waflib import Utils
import os
__all__ = ["check_python", "check_cpu_x86", "check_cpu_x86_64"]
def check_python(ctx, dependency_identifier):
ctx.find_program(['python3', 'python'], var = 'PYTHON')
ctx.load('python')
ctx.check_python_version()
ver = int(ctx.env.PYTHON_VERSION.split('.')[0])
if (ver == 3):
ctx.check_python_headers()
return True
return False
def check_cpu_x86(ctx, dependency_identifier):
return ctx.env.DEST_CPU in ['x86', 'x86_64', 'x64', 'amd64', 'x86_amd64']
def check_cpu_x86_64(ctx, dependency_identifier):
return ctx.env.DEST_CPU in ['x86_64', 'x64', 'amd64', 'x86_amd64']
| lgpl-2.1 | Python |
87010af869f58e23f89c7d47e5aa173127114a54 | Update eidos_reader.py for new Eidos version | johnbachman/belpy,johnbachman/indra,johnbachman/indra,pvtodorov/indra,bgyori/indra,sorgerlab/indra,pvtodorov/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/belpy,bgyori/indra,pvtodorov/indra,sorgerlab/belpy,pvtodorov/indra,johnbachman/belpy,bgyori/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/belpy | indra/sources/eidos/eidos_reader.py | indra/sources/eidos/eidos_reader.py | import json
from indra.java_vm import autoclass, JavaException
class EidosReader(object):
"""Reader object keeping an instance of the Eidos reader as a singleton.
This allows the Eidos reader to need initialization when the first piece of
text is read, the subsequent readings are done with the same
instance of the reader and are therefore faster.
Attributes
----------
eidos_reader : org.clulab.wm.EidosSystem
A Scala object, an instance of the Eidos reading system. It is
instantiated only when first processing text.
"""
def __init__(self):
self.eidos_reader = None
def process_text(self, text):
"""Return a mentions JSON object given text.
Parameters
----------
text : str
Text to be processed.
Returns
-------
json_dict : dict
A JSON object of mentions extracted from text.
"""
if self.eidos_reader is None:
eidos = autoclass('org.clulab.wm.EidosSystem')
self.eidos_reader = eidos(autoclass('java.lang.Object')())
mentions = self.eidos_reader.extractFrom(text, False).mentions()
ser = autoclass('org.clulab.wm.serialization.json.WMJSONSerializer')
mentions_json = ser.toJsonStr(mentions)
json_dict = json.loads(mentions_json)
return json_dict
| import json
from indra.java_vm import autoclass, JavaException
class EidosReader(object):
"""Reader object keeping an instance of the Eidos reader as a singleton.
This allows the Eidos reader to need initialization when the first piece of
text is read, the subsequent readings are done with the same
instance of the reader and are therefore faster.
Attributes
----------
eidos_reader : org.clulab.wm.AgroSystem
A Scala object, an instance of the Eidos reading system. It is
instantiated only when first processing text.
"""
def __init__(self):
self.eidos_reader = None
def process_text(self, text):
"""Return a mentions JSON object given text.
Parameters
----------
text : str
Text to be processed.
Returns
-------
json_dict : dict
A JSON object of mentions extracted from text.
"""
if self.eidos_reader is None:
eidos = autoclass('org.clulab.wm.EidosSystem')
self.eidos_reader = eidos(autoclass('java.lang.Object')())
mentions = self.eidos_reader.extractFrom(text)
ser = autoclass('org.clulab.wm.serialization.json.WMJSONSerializer')
mentions_json = ser.toJsonStr(mentions)
json_dict = json.loads(mentions_json)
return json_dict
| bsd-2-clause | Python |
2366b4fa6ca940c4c774a2f3b3c5e5be14edb0af | Refactor dice rolling algorithm in DiceRollerSuite | johnmarcampbell/twircBot | src/DiceRollerSuite.py | src/DiceRollerSuite.py | import random
import re
from src.CommandSuite import CommandSuite
class DiceRollerSuite(CommandSuite):
"""Suite for rolling dice"""
def __init__(self, name):
"""Initialize some variables"""
CommandSuite.__init__(self, name)
self.config = self.config_manager.parse_file('config/defaultLogSuite.config')
random.seed()
self.dice_roll_string = '\!([0-9]+)d([0-9]+)([+]|[-])?([0-9]*)'
self.invoke_coin_match = '\!flip'
def parse(self, data):
"""Parse chat data and log it"""
self.chat_tuple = self.parse_chat(data, self.config['nick'])
message = self.chat_tuple[1]
dice_roll_match = re.search(self.dice_roll_string, data)
coin_flip_match = re.search(self.invoke_coin_match, data)
number_of_dice = 0
die_size = 0
plus_or_minus = ''
modifier = 0
if dice_roll_match:
total = self.roll_dice(dice_roll_match)
print(str(total))
# print(str(total))
if coin_flip_match:
heads_or_tails = self.flip_coin()
print(heads_or_tails)
def roll_dice(self, dice_roll_match):
"""Function to roll a dice pool"""
(pool, die, plus_or_minus, modifier) = dice_roll_match.groups()
pool = int(pool)
die = int(die)
if modifier == '':
modifier = 0
else:
modifier = int(modifier)
total = 0
for i in range(0, pool):
total += random.randint(1, die)
if plus_or_minus == '+':
total += modifier
elif plus_or_minus == '-':
total -= modifier
return total
# return 0
def flip_coin(self):
"""Function to flip a coin"""
if random.randint(0,1):
heads_or_tails = 'heads'
else:
heads_or_tails = 'tails'
return heads_or_tails
| import random
import re
from src.CommandSuite import CommandSuite
class DiceRollerSuite(CommandSuite):
"""Suite for rolling dice"""
def __init__(self, name):
"""Initialize some variables"""
CommandSuite.__init__(self, name)
self.config = self.config_manager.parse_file('config/defaultLogSuite.config')
random.seed()
self.invoke_match_string = '\!([0-9]+)d([0-9]+)'
self.invoke_modified_match_string = '\![0-9]+d[0-9]+([+]|[-])([0-9]+)'
self.invoke_coin_match = '\!flip'
def parse(self, data):
"""Parse chat data and log it"""
self.chat_tuple = self.parse_chat(data, self.config['nick'])
message = self.chat_tuple[1]
base_match = re.search(self.invoke_match_string, data)
modifier_match = re.search(self.invoke_modified_match_string, data)
coin_flip_match = re.search(self.invoke_coin_match, data)
number_of_dice = 0
die_size = 0
plus_or_minus = ''
modifier = 0
if base_match:
number_of_dice = int(base_match.group(1))
die_size = int(base_match.group(2))
if modifier_match:
plus_or_minus = modifier_match.group(1)
modifier = int(modifier_match.group(2))
total = self.roll_dice(number_of_dice, die_size, plus_or_minus, modifier)
print(str(total))
if coin_flip_match:
heads_or_tails = self.flip_coin()
print(heads_or_tails)
def roll_dice(self, number_of_dice, die_size, plus_or_minus, modifier):
"""Function to roll a dice pool"""
total = 0
for i in range(0,number_of_dice):
total += random.randint(1,die_size)
if plus_or_minus == '+':
total += modifier
elif plus_or_minus == '-':
total -= modifier
return total
def flip_coin(self):
"""Function to flip a coin"""
if random.randint(0,1):
heads_or_tails = 'heads'
else:
heads_or_tails = 'tails'
return heads_or_tails
| mit | Python |
2228084849ce3e2e17e91402b6ae6e7e3a5cb7a4 | Use key().name() instead of link_id | MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging | app/soc/views/helper/redirects.py | app/soc/views/helper/redirects.py | #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Redirect related methods.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
def getCreateRequestRedirect(entity, params):
"""Returns the create request redirect for the specified entity.
"""
result ='/request/create/%s/%s/%s' % (
params['url_name'], params['group_scope'], entity.key.name())
return result
def getInviteRedirect(entity, params):
"""Returns the invitation redirect for the specified entity.
"""
result ='/%s/invite/%s' % (
params['url_name'], entity.key().name())
return result
def getCreateRedirect(entity, params):
"""Returns the create program redirect for the specified entity.
"""
result ='/%s/create/%s' % (
params['url_name'], entity.key().name())
return result
def getEditRedirect(entity, params):
"""Returns the edit redirect for the specified entity.
"""
url_name = params['url_name']
return '/%s/edit/%s' % (url_name, entity.key().name())
def inviteAcceptedRedirect(entity, _):
"""Returns the redirect for accepting an invite.
"""
return '/%s/create/%s/%s' % (
entity.role, entity.scope_path, entity.link_id)
| #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Redirect related methods.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
def getCreateRequestRedirect(entity, params):
"""Returns the create request redirect for the specified entity.
"""
result ='/request/create/%s/%s/%s' % (
params['url_name'], params['group_scope'], entity.link_id)
return result
def getInviteRedirect(entity, params):
"""Returns the invitation redirect for the specified entity.
"""
result ='/%s/invite/%s' % (
params['url_name'], entity.link_id)
return result
def getCreateRedirect(entity, params):
"""Returns the create program redirect for the specified entity.
"""
result ='/%s/create/%s' % (
params['url_name'], entity.link_id)
return result
def getEditRedirect(entity, params):
"""Returns the edit redirect for the specified entity.
"""
suffix = params['logic'].getKeySuffix(entity)
url_name = params['url_name']
return '/%s/edit/%s' % (url_name, suffix)
def inviteAcceptedRedirect(entity, _):
"""Returns the redirect for accepting an invite.
"""
return '/%s/create/%s/%s' % (
entity.role, entity.scope_path, entity.link_id)
| apache-2.0 | Python |
4593bedda981bff49a3ddb54f20e2f17b55f4c0b | Fix for web interface CLI backup. | lukacu/manus,lukacu/manus,lukacu/manus,lukacu/manus,lukacu/manus | python/manus_webshell/cli/__main__.py | python/manus_webshell/cli/__main__.py | import sys
import os
import json
import errno
import glob
import urllib2
import argparse
import mimetypes
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def do_backup(args):
url = "http://%s/api/storage" % args.host
response = urllib2.urlopen(url)
data = response.read()
keys = json.loads(data)
mkdir_p(args.storage)
for key in keys:
response = urllib2.urlopen("%s?key=%s" % (url, key))
data = response.read()
type = mimetypes.guess_extension(response.info().type)
print "Saving %s" % key
with open(os.path.join(args.storage, "%s%s" % (key, type)), "w") as file:
file.write(data)
def do_clear(args):
url = "http://%s/api/storage" % args.host
response = urllib2.urlopen(url)
data = response.read()
keys = json.loads(data)
for key in keys:
response = urllib2.urlopen("%s?key=%s" % (url, key), "")
data = response.read()
print "Deleting %s" % key
def do_restore(args):
url = "http://%s/api/storage" % args.host
for element in glob.glob(os.path.join(args.storage, '*')):
(key, ext) = os.path.splitext(os.path.basename(element))
(ctype, encoding) = mimetypes.guess_type(element)
with open(element, "r") as file:
data = file.read()
opener = urllib2.build_opener()
request = urllib2.Request("%s?key=%s" % (url, key), data=data,
headers={'Content-Type': ctype})
response = opener.open(request)
print "Restoring %s" % key
if len(sys.argv) < 2:
sys.exit(1)
parser = argparse.ArgumentParser(description='Manus API CLI utility', prog=sys.argv[0])
parser.add_argument('--host', dest='host', help='API server address and port', default="localhost:8080")
if sys.argv[1] == 'backup':
parser.add_argument('-s', dest='storage', help='Local storage directory', default=os.path.join(os.getcwd(), 'manus_storage'))
operation = do_backup
if sys.argv[1] == 'restore':
parser.add_argument('-s', dest='storage', help='Local storage directory', default=os.path.join(os.getcwd(), 'manus_storage'))
operation = do_restore
if sys.argv[1] == 'clear':
operation = do_clear
args = parser.parse_args(sys.argv[2:])
operation(args)
| import sys
import os
import json
import errno
import glob
import urllib2
import argparse
import mimetypes
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def do_backup(args):
url = "http://%s/api/storage" % args.host
response = urllib2.urlopen(url)
data = response.read()
keys = json.loads(data)
mkdir_p(args.storage)
for key in keys:
response = urllib2.urlopen("%s?key=%s" % (url, key))
data = response.read()
type = mimetypes.guess_extension(response.info().type)
print "Saving %s" % key
with open(os.path.join(args.storage, "%s.%s" % (key, type)), "w") as file:
file.write(data)
def do_clear(args):
url = "http://%s/api/storage" % args.host
response = urllib2.urlopen(url)
data = response.read()
keys = json.loads(data)
for key in keys:
response = urllib2.urlopen("%s?key=%s" % (url, key), "")
data = response.read()
print "Deleting %s" % key
def do_restore(args):
url = "http://%s/api/storage" % args.host
for element in glob.glob(os.path.join(args.storage, '*')):
(key, ext) = os.path.splitext(os.path.basename(element))
(ctype, encoding) = mimetypes.guess_type(element)
with open(element, "r") as file:
data = file.read()
opener = urllib2.build_opener()
request = urllib2.Request("%s?key=%s" % (url, key), data=data,
headers={'Content-Type': ctype})
response = opener.open(request)
print "Restoring %s" % key
if len(sys.argv) < 2:
sys.exit(1)
parser = argparse.ArgumentParser(description='Manus API CLI utility', prog=sys.argv[0])
parser.add_argument('--host', dest='host', help='API server address and port', default="localhost:8080")
if sys.argv[1] == 'backup':
parser.add_argument('-s', dest='storage', help='Local storage directory', default=os.path.join(os.getcwd(), 'manus_storage'))
operation = do_backup
if sys.argv[1] == 'restore':
parser.add_argument('-s', dest='storage', help='Local storage directory', default=os.path.join(os.getcwd(), 'manus_storage'))
operation = do_restore
if sys.argv[1] == 'clear':
operation = do_clear
args = parser.parse_args(sys.argv[2:])
operation(args)
| mit | Python |
020564bdcbcb6586e8d9ed622624db47e0a122d8 | fix incorrect type for empty tags | ProgVal/irctest | irctest/irc_utils/message_parser.py | irctest/irc_utils/message_parser.py | import re
import collections
import supybot.utils
# http://ircv3.net/specs/core/message-tags-3.2.html#escaping-values
TAG_ESCAPE = [
('\\', '\\\\'), # \ -> \\
(' ', r'\s'),
(';', r'\:'),
('\r', r'\r'),
('\n', r'\n'),
]
unescape_tag_value = supybot.utils.str.MultipleReplacer(
dict(map(lambda x:(x[1],x[0]), TAG_ESCAPE)))
# TODO: validate host
tag_key_validator = re.compile(r'\+?(\S+/)?[a-zA-Z0-9-]+')
def parse_tags(s):
tags = {}
for tag in s.split(';'):
if '=' not in tag:
tags[tag] = None
else:
(key, value) = tag.split('=', 1)
assert tag_key_validator.match(key), \
'Invalid tag key: {}'.format(key)
tags[key] = unescape_tag_value(value)
return tags
Message = collections.namedtuple('Message',
'tags prefix command params')
def parse_message(s):
"""Parse a message according to
http://tools.ietf.org/html/rfc1459#section-2.3.1
and
http://ircv3.net/specs/core/message-tags-3.2.html"""
assert s.endswith('\r\n'), 'Message does not end with CR LF: {!r}'.format(s)
s = s[0:-2]
if s.startswith('@'):
(tags, s) = s.split(' ', 1)
tags = parse_tags(tags[1:])
else:
tags = {}
if ' :' in s:
(other_tokens, trailing_param) = s.split(' :', 1)
tokens = list(filter(bool, other_tokens.split(' '))) + [trailing_param]
else:
tokens = list(filter(bool, s.split(' ')))
if tokens[0].startswith(':'):
prefix = tokens.pop(0)[1:]
else:
prefix = None
command = tokens.pop(0)
params = tokens
return Message(
tags=tags,
prefix=prefix,
command=command,
params=params,
)
| import re
import collections
import supybot.utils
# http://ircv3.net/specs/core/message-tags-3.2.html#escaping-values
TAG_ESCAPE = [
('\\', '\\\\'), # \ -> \\
(' ', r'\s'),
(';', r'\:'),
('\r', r'\r'),
('\n', r'\n'),
]
unescape_tag_value = supybot.utils.str.MultipleReplacer(
dict(map(lambda x:(x[1],x[0]), TAG_ESCAPE)))
# TODO: validate host
tag_key_validator = re.compile(r'\+?(\S+/)?[a-zA-Z0-9-]+')
def parse_tags(s):
tags = {}
for tag in s.split(';'):
if '=' not in tag:
tags[tag] = None
else:
(key, value) = tag.split('=', 1)
assert tag_key_validator.match(key), \
'Invalid tag key: {}'.format(key)
tags[key] = unescape_tag_value(value)
return tags
Message = collections.namedtuple('Message',
'tags prefix command params')
def parse_message(s):
"""Parse a message according to
http://tools.ietf.org/html/rfc1459#section-2.3.1
and
http://ircv3.net/specs/core/message-tags-3.2.html"""
assert s.endswith('\r\n'), 'Message does not end with CR LF: {!r}'.format(s)
s = s[0:-2]
if s.startswith('@'):
(tags, s) = s.split(' ', 1)
tags = parse_tags(tags[1:])
else:
tags = []
if ' :' in s:
(other_tokens, trailing_param) = s.split(' :', 1)
tokens = list(filter(bool, other_tokens.split(' '))) + [trailing_param]
else:
tokens = list(filter(bool, s.split(' ')))
if tokens[0].startswith(':'):
prefix = tokens.pop(0)[1:]
else:
prefix = None
command = tokens.pop(0)
params = tokens
return Message(
tags=tags,
prefix=prefix,
command=command,
params=params,
)
| mit | Python |
7381f2367bc155434c155e2116cd0d046b3d66ae | add alarm message | dgu-dna/DNA-Bot | apps/alarm.py | apps/alarm.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from apps.decorators import on_command
from apps.slackutils import cat_token, get_nickname, send_msg
import time
@on_command(['!알람', '!ㅇㄹ'])
def run(robot, channel, tokens, user, command):
'''일정시간 이후에 알람 울려줌'''
msg = '사용법 오류'
if len(tokens) > 1:
user_name = get_nickname(user)
sec = eval(cat_token(tokens, 1))
noti_msg = user_name + ', ' + str(sec) + '초 후에 알려주겠음.'
send_msg(robot, channel, noti_msg)
time.sleep(sec)
msg = user_name + ', ' + cat_token(tokens, 1)
return channel, msg
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from apps.decorators import on_command
from apps.slackutils import cat_token, get_nickname
import time
@on_command(['!알람', '!ㅇㄹ'])
def run(robot, channel, tokens, user, command):
'''일정시간 이후에 알람 울려줌'''
msg = '사용법 오류'
if len(tokens) > 1:
user_name = get_nickname(user)
time.sleep(int(tokens[0]))
msg = user_name + ', ' + cat_token(tokens, 1)
return channel, msg
| mit | Python |
0eb0608eeecd287ce5d286fc244013781c29214f | Split admin.globalConfig into two endpoints | luci/luci-py,luci/luci-py,luci/luci-py,luci/luci-py | appengine/config_service/admin.py | appengine/config_service/admin.py | # Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Administration API accessible only by service admins.
Defined as Endpoints API mostly to abuse API Explorer UI and not to write our
own admin UI. Note that all methods are publicly visible (though the source code
itself is also publicly visible, so not a big deal).
Callers have to be in 'administrators' group.
"""
import logging
from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop
from protorpc import message_types
from protorpc import messages
from protorpc import remote
from components import auth
from components.datastore_utils import config
import acl
# This is used by endpoints indirectly.
package = 'luci-config'
class ServiceConfigStorageType(messages.Enum):
"""Type of repository where service configs are stored."""
GITILES = 0
class GlobalConfig(config.GlobalConfig):
"""Server-wide static configuration stored in datastore.
Typically it is set once during service setup and is never changed.
"""
# Type of repository where service configs are stored.
services_config_storage_type = msgprop.EnumProperty(ServiceConfigStorageType)
# If config storage type is Gitiles, URL to the root of service configs
# directory.
services_config_location = ndb.StringProperty()
class GlobalConfigMessage(messages.Message):
"""GlobalConfig as a RPC message."""
services_config_storage_type = messages.EnumField(ServiceConfigStorageType, 1)
services_config_location = messages.StringField(2)
@auth.endpoints_api(name='admin', version='v1', title='Administration API')
class AdminApi(remote.Service):
"""Administration API accessible only by the service admins."""
@auth.endpoints_method(
message_types.VoidMessage, GlobalConfigMessage, name='readGlobalConfig')
@auth.require(acl.is_admin)
def read_global_config(self, request):
"""Reads global configuration."""
conf = GlobalConfig.fetch()
if not conf:
conf = GlobalConfig()
return GlobalConfigMessage(
services_config_location=conf.services_config_location,
services_config_storage_type=conf.services_config_storage_type)
@auth.endpoints_method(
GlobalConfigMessage, GlobalConfigMessage, name='writeGlobalConfig')
@auth.require(acl.is_admin)
def write_global_config(self, request):
"""Writes global configuration."""
conf = GlobalConfig.fetch()
if not conf:
conf = GlobalConfig()
changed = conf.modify(
updated_by=auth.get_current_identity().to_bytes(),
services_config_storage_type=request.services_config_storage_type,
services_config_location=request.services_config_location)
if changed:
logging.warning('Updated global configuration')
return GlobalConfigMessage(
services_config_location=conf.services_config_location,
services_config_storage_type=conf.services_config_storage_type)
| # Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Administration API accessible only by service admins.
Defined as Endpoints API mostly to abuse API Explorer UI and not to write our
own admin UI. Note that all methods are publicly visible (though the source code
itself is also publicly visible, so not a big deal).
Callers have to be in 'administrators' group.
"""
import logging
from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop
from protorpc import messages
from protorpc import remote
from components import auth
from components.datastore_utils import config
import acl
# This is used by endpoints indirectly.
package = 'luci-config'
class ServiceConfigStorageType(messages.Enum):
"""Type of repository where service configs are stored."""
GITILES = 0
class GlobalConfig(config.GlobalConfig):
"""Server-wide static configuration stored in datastore.
Typically it is set once during service setup and is never changed.
"""
# Type of repository where service configs are stored.
services_config_storage_type = msgprop.EnumProperty(ServiceConfigStorageType)
# If config storage type is Gitiles, URL to the root of service configs
# directory.
services_config_location = ndb.StringProperty()
class GlobalConfigMessage(messages.Message):
"""GlobalConfig as a RPC message."""
services_config_storage_type = messages.EnumField(ServiceConfigStorageType, 1)
services_config_location = messages.StringField(2)
@auth.endpoints_api(name='admin', version='v1', title='Administration API')
class AdminApi(remote.Service):
"""Administration API accessible only by the service admins."""
@auth.endpoints_method(
GlobalConfigMessage, GlobalConfigMessage, name='globalConfig')
@auth.require(acl.is_admin)
def global_config(self, request):
"""Reads/writes global configuration."""
conf = GlobalConfig.fetch()
if not conf:
conf = GlobalConfig()
changed = conf.modify(
updated_by=auth.get_current_identity().to_bytes(),
services_config_storage_type=request.services_config_storage_type,
services_config_location=request.services_config_location)
if changed:
logging.warning('Updated global configuration')
return GlobalConfigMessage(
services_config_location=conf.services_config_location,
services_config_storage_type=conf.services_config_storage_type)
| apache-2.0 | Python |
da10771e21c2dee4eee0f4fb046b3135d51aa3a9 | Fix file path issue while trying to save the file on Windows. | kerma/Sublime-AdvancedNewFile,yogiben/Sublime-AdvancedNewFile,skuroda/Sublime-AdvancedNewFile,kerma/Sublime-AdvancedNewFile,yogiben/Sublime-AdvancedNewFile | AdvancedNewFile.py | AdvancedNewFile.py | import os
import sublime, sublime_plugin
class AdvancedNewFileCommand(sublime_plugin.TextCommand):
def run(self, edit, is_python=False):
self.count = 0
self.window = self.view.window()
self.root = self.get_root()
self.is_python = is_python
self.show_filename_input()
def get_root(self):
try:
root = self.window.folders()[0]
except IndexError:
root = os.path.abspath(os.path.dirname(self.view.file_name()))
return root
def show_filename_input(self, initial=''):
caption = 'Enter a path for a new file'
if self.is_python:
caption = '%s (creates __init__.py in new dirs)' % caption
self.window.show_input_panel(
caption, initial,
self.entered_filename, self.update_filename_input, None
)
def update_filename_input(self, path):
# TODO: Autocomplete feature
pass
def entered_filename(self, filename):
file_path = os.path.join(self.root, filename)
if not os.path.exists(file_path):
self.create(file_path)
self.window.open_file(file_path)
def create(self, filename):
base, filename = os.path.split(filename)
self.create_folder(base)
def create_folder(self, base):
if not os.path.exists(base):
parent = os.path.split(base)[0]
if not os.path.exists(parent):
self.create_folder(parent)
os.mkdir(base)
if self.is_python:
open(os.path.join(base, '__init__.py'), 'w').close()
| import os
import sublime, sublime_plugin
class AdvancedNewFileCommand(sublime_plugin.TextCommand):
def run(self, edit, is_python=False):
self.count = 0
self.window = self.view.window()
self.root = self.get_root()
self.is_python = is_python
self.show_filename_input()
def get_root(self):
try:
root = self.window.folders()[0]
except IndexError:
root = os.path.abspath(os.path.dirname(self.view.file_name()))
return root
def show_filename_input(self, initial=''):
caption = 'Enter a path for a new file'
if self.is_python:
caption = '%s (creates __init__.py in new dirs)' % caption
self.window.show_input_panel(
caption, initial,
self.entered_filename, self.update_filename_input, None
)
def update_filename_input(self, path):
# TODO: Autocomplete feature
pass
def entered_filename(self, filename):
file_path = os.path.join(self.root, filename)
if not os.path.exists(file_path):
self.create(file_path)
self.window.run_command('open_file', {"file": file_path})
def create(self, filename):
base, filename = os.path.split(filename)
self.create_folder(base)
def create_folder(self, base):
if not os.path.exists(base):
parent = os.path.split(base)[0]
if not os.path.exists(parent):
self.create_folder(parent)
os.mkdir(base)
if self.is_python:
open(os.path.join(base, '__init__.py'), 'w').close()
| mit | Python |
88bdc56cad7c0dba165de26940fd19997e4d9862 | Complete solution | CubicComet/exercism-python-solutions | atbash-cipher/atbash_cipher.py | atbash-cipher/atbash_cipher.py | import re
from string import ascii_lowercase, digits
ATBASH = {k: v for k, v in zip(ascii_lowercase + digits,
ascii_lowercase[::-1] + digits)}
def encode(s):
return " ".join(re.findall(r'.{1,5}', atbash(s)))
def decode(s):
return atbash(s)
def atbash(s):
return "".join(ATBASH.get(ch, "") for ch in s.lower())
| from string import ascii_lowercase, digits
ATBASH = {k: v for k, v in zip(ascii_lowercase + digits,
ascii_lowercase[::-1] + digits)}
def encode(s):
encoded = atbash(s)
def decode(s):
return atbash(s)
def atbash(s):
return "".join(ATBASH.get(ch, "") for ch in s.lower())
| agpl-3.0 | Python |
e0f92f43200d290d657dbbb09dd1d66451393f3d | Fix appcast script | qvacua/vimr,qvacua/vimr,qvacua/vimr,qvacua/vimr,qvacua/vimr | bin/set_appcast.py | bin/set_appcast.py | #!/usr/bin/env python
# pip install requests
# pip install Markdown
import os
import io
import sys
import subprocess
import requests
import json
import markdown
from datetime import datetime
from string import Template
SIGN_UPDATE = './bin/sign_update'
PRIVATE_KEY_PATH = os.path.expanduser('~/Projects/sparkle_priv.pem')
GITHUB_TOKEN_PATH = os.path.expanduser('~/.config/github.qvacua.release.token')
file_path = sys.argv[1]
bundle_version = sys.argv[2]
marketing_version = sys.argv[3]
tag_name = sys.argv[4]
is_snapshot = True if sys.argv[5] == "true" else False
file_size = os.stat(file_path).st_size
file_signature = subprocess.check_output([SIGN_UPDATE, file_path, PRIVATE_KEY_PATH]).strip()
appcast_template_file = open('resources/appcast_template.xml', 'r')
appcast_template = Template(appcast_template_file.read())
appcast_template_file.close()
token_file = open(GITHUB_TOKEN_PATH, 'r')
token = token_file.read().strip()
token_file.close()
release_response = requests.get('https://api.github.com/repos/qvacua/vimr/releases/tags/{0}'.format(tag_name),
params={'access_token': token})
release_json = json.loads(release_response.content)
title = release_json['name']
download_url = release_json['assets'][0]['browser_download_url']
release_notes_url = release_json['html_url']
release_notes = release_json['body']
appcast = appcast_template.substitute(
title=title,
release_notes=markdown.markdown(release_notes),
release_notes_link=release_notes_url,
publication_date=datetime.now().isoformat(),
file_url=download_url,
bundle_version=bundle_version,
marketing_version=marketing_version,
file_length=file_size,
signature=file_signature
)
appcast_file_name = 'appcast_snapshot.xml' if is_snapshot else 'appcast.xml'
with io.open('build/Release/{0}'.format(appcast_file_name), 'w+') as appcast_file:
appcast_file.write(appcast)
print('--------------------------------')
print(appcast.strip())
print('--------------------------------')
| #!/usr/bin/env python
# pip install requests
# pip install Markdown
import os
import sys
import subprocess
import requests
import json
import markdown
from datetime import datetime
from string import Template
SIGN_UPDATE = './bin/sign_update'
PRIVATE_KEY_PATH = os.path.expanduser('~/Projects/sparkle_priv.pem')
GITHUB_TOKEN_PATH = os.path.expanduser('~/.config/github.qvacua.release.token')
file_path = sys.argv[1]
bundle_version = sys.argv[2]
marketing_version = sys.argv[3]
tag_name = sys.argv[4]
is_snapshot = True if sys.argv[5] == "true" else False
file_size = os.stat(file_path).st_size
file_signature = subprocess.check_output([SIGN_UPDATE, file_path, PRIVATE_KEY_PATH]).strip()
appcast_template_file = open('resources/appcast_template.xml', 'r')
appcast_template = Template(appcast_template_file.read())
appcast_template_file.close()
token_file = open(GITHUB_TOKEN_PATH, 'r')
token = token_file.read().strip()
token_file.close()
release_response = requests.get('https://api.github.com/repos/qvacua/vimr/releases/tags/{0}'.format(tag_name),
params={'access_token': token})
release_json = json.loads(release_response.content)
title = release_json['name']
download_url = release_json['assets'][0]['browser_download_url']
release_notes_url = release_json['html_url']
release_notes = release_json['body']
appcast = appcast_template.substitute(
title=title,
release_notes=markdown.markdown(release_notes),
release_notes_link=release_notes_url,
publication_date=datetime.now().isoformat(),
file_url=download_url,
bundle_version=bundle_version,
marketing_version=marketing_version,
file_length=file_size,
signature=file_signature
)
appcast_file_name = 'appcast_snapshot.xml' if is_snapshot else 'appcast.xml'
appcast_file = open('build/Release/{0}'.format(appcast_file_name), 'w+')
appcast_file.write(appcast)
appcast_file.close()
print('--------------------------------')
print(appcast.strip())
print('--------------------------------')
| mit | Python |
0c506e9e29096c4feb118694b00020d631d67082 | add two drawNetworkGraph functions | oeg8168/PTT-ID-correlator | src/PTTpushAnalyser.py | src/PTTpushAnalyser.py | import collections
import networkx as nx
import matplotlib.pyplot as plt
from src.DBmanage import DBmanage
class PTTpushAnalyser:
def __init__(self):
db = DBmanage()
def analyse(self):
pass
def getAllAuthorPusherPairs(self, crawlArticles):
allAuthorPusherPairs = []
for artical in crawlArticles:
authorID = artical['authorID']
for push in artical['pushMessages']:
pushUserID = push['pushUserID']
pushTag = push['pushTag']
authorPusherPair = (authorID, pushUserID, pushTag)
allAuthorPusherPairs.append(authorPusherPair)
return allAuthorPusherPairs
def filterAuthorPusherPair(self, authorPusherPair):
minDegree = 2
pairSummary = collections.Counter(authorPusherPair)
return [x for x in pairSummary if pairSummary[x] >= minDegree]
def createNetworkGraph(self, authorPusherPair):
graph = nx.DiGraph()
for pair in authorPusherPair:
author = pair[0]
pusher = pair[1]
graph.add_edge(pusher, author)
return graph
def drawNetworkGraphThenShow(self, graph):
nx.draw(graph, with_labels=True, font_color='green')
plt.show()
def drawNetworkGraphThenSave(self, graph):
nx.draw(graph, with_labels=True, font_color='green')
plt.savefig('networkGraph.png')
| import collections
import networkx as nx
from src.DBmanage import DBmanage
class PTTpushAnalyser:
def __init__(self):
db = DBmanage()
def analyse(self):
pass
def getAllAuthorPusherPairs(self, crawlArticles):
allAuthorPusherPairs = []
for artical in crawlArticles:
authorID = artical['authorID']
for push in artical['pushMessages']:
pushUserID = push['pushUserID']
pushTag = push['pushTag']
authorPusherPair = (authorID, pushUserID, pushTag)
allAuthorPusherPairs.append(authorPusherPair)
return allAuthorPusherPairs
def filterAuthorPusherPair(self, authorPusherPair):
minDegree = 2
pairSummary = collections.Counter(authorPusherPair)
return [x for x in pairSummary if pairSummary[x] >= minDegree]
def createNetworkGraph(self, authorPusherPair):
graph = nx.DiGraph()
for pair in authorPusherPair:
author = pair[0]
pusher = pair[1]
graph.add_edge(pusher, author)
nx.draw(graph, with_labels=True, font_color='green')
| mit | Python |
794113ebfd3ea480ac745640b592b893359a62e0 | Use existing service function to look up sessio token for tests | m-ober/byceps,m-ober/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps | tests/base.py | tests/base.py | """
tests.base
~~~~~~~~~~
Base classes for test cases
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from contextlib import contextmanager
import os
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch
from byceps.application import create_app
from byceps.database import db
from byceps.services.authentication.session.service \
import find_session_token_for_user
from tests import mocks
_CONFIG_PATH = Path('../config')
CONFIG_FILENAME_TEST_PARTY = _CONFIG_PATH / 'test_party.py'
CONFIG_FILENAME_TEST_ADMIN = _CONFIG_PATH / 'test_admin.py'
class AbstractAppTestCase(TestCase):
@patch('redis.StrictRedis.from_url', mocks.strict_redis_client_from_url)
def setUp(self, config_filename=CONFIG_FILENAME_TEST_PARTY):
self.app = create_app(config_filename)
# Allow overriding of database URI from the environment.
db_uri_override = os.environ.get('DATABASE_URI')
if db_uri_override:
self.app.config['SQLALCHEMY_DATABASE_URI'] = db_uri_override
self.db = db
db.app = self.app
db.reflect()
db.drop_all()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
@contextmanager
def client(self, *, user_id=None):
"""Provide an HTTP client.
If a user ID is given, the client authenticates with the user's
credentials.
"""
client = self.app.test_client()
if user_id is not None:
add_user_credentials_to_session(client, user_id)
yield client
def add_user_credentials_to_session(client, user_id):
session_token = find_session_token_for_user(user_id)
with client.session_transaction() as session:
session['user_id'] = str(user_id)
session['user_auth_token'] = str(session_token.token)
| """
tests.base
~~~~~~~~~~
Base classes for test cases
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from contextlib import contextmanager
import os
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch
from byceps.application import create_app
from byceps.database import db
from byceps.services.authentication.session.models.session_token \
import SessionToken
from tests import mocks
_CONFIG_PATH = Path('../config')
CONFIG_FILENAME_TEST_PARTY = _CONFIG_PATH / 'test_party.py'
CONFIG_FILENAME_TEST_ADMIN = _CONFIG_PATH / 'test_admin.py'
class AbstractAppTestCase(TestCase):
@patch('redis.StrictRedis.from_url', mocks.strict_redis_client_from_url)
def setUp(self, config_filename=CONFIG_FILENAME_TEST_PARTY):
self.app = create_app(config_filename)
# Allow overriding of database URI from the environment.
db_uri_override = os.environ.get('DATABASE_URI')
if db_uri_override:
self.app.config['SQLALCHEMY_DATABASE_URI'] = db_uri_override
self.db = db
db.app = self.app
db.reflect()
db.drop_all()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
@contextmanager
def client(self, *, user_id=None):
"""Provide an HTTP client.
If a user ID is given, the client authenticates with the user's
credentials.
"""
client = self.app.test_client()
if user_id is not None:
add_user_credentials_to_session(client, user_id)
yield client
def add_user_credentials_to_session(client, user_id):
session_token = SessionToken.query.filter_by(user_id=user_id).one_or_none()
with client.session_transaction() as session:
session['user_id'] = str(user_id)
session['user_auth_token'] = str(session_token.token)
| bsd-3-clause | Python |
8ef3a5841f35af7348581cdd49224c007c9e8729 | Add open assets testcase | flux3dp/fluxghost,flux3dp/fluxghost,flux3dp/fluxghost,flux3dp/fluxghost | tests/main.py | tests/main.py |
from importlib import import_module
import sys
def try_import(module_name):
try:
sys.stdout.write("Import %s ... " % module_name)
sys.stdout.flush()
import_module(module_name)
sys.stdout.write("OK\n")
sys.stdout.flush()
except ImportError as e:
sys.stdout.write("ERROR: %s" % e)
sys.stdout.flush()
def main():
try_import("scipy")
try_import("scipy.interpolate.rbf")
try_import("Crypto")
try_import("serial")
try_import("PIL")
try_import("numpy")
try_import("zipimport")
try_import("fluxclient")
try_import("fluxclient.fcode")
try_import("fluxclient.hw_profile")
try_import("fluxclient.laser")
try_import("fluxclient.printer")
try_import("fluxclient.printer._printer")
try_import("fluxclient.robot")
try_import("fluxclient.scanner")
try_import("fluxclient.scanner._scanner")
try_import("fluxclient.upnp")
sys.stdout.write("Open resource fluxclient::assets/flux3dp-icon.png ... ")
sys.stdout.flush()
try:
import pkg_resources
pkg_resources.resource_stream("fluxclient", "assets/flux3dp-icon.png")
sys.stdout.write("OK\n")
except Exception as e:
sys.stdout.write("ERROR: %s" % e)
sys.stdout.flush()
|
from importlib import import_module
import sys
def try_import(module_name):
try:
sys.stdout.write("Import %s ... " % module_name)
sys.stdout.flush()
m = import_module(module_name)
sys.stdout.write("OK\n")
sys.stdout.flush()
except ImportError as e:
sys.stdout.write("ERROR: %s" % e)
sys.stdout.flush()
def main():
try_import("scipy")
try_import("scipy.interpolate.rbf")
try_import("Crypto")
try_import("serial")
try_import("PIL")
try_import("numpy")
try_import("zipimport")
try_import("fluxclient")
try_import("fluxclient.fcode")
try_import("fluxclient.hw_profile")
try_import("fluxclient.laser")
try_import("fluxclient.printer")
try_import("fluxclient.printer._printer")
try_import("fluxclient.robot")
try_import("fluxclient.scanner")
try_import("fluxclient.scanner._scanner")
try_import("fluxclient.upnp")
| agpl-3.0 | Python |
82f9c36998a526d302be1b7e10e61983d7dbd182 | adjust view for the renaming of log keys | SuperCowPowers/workbench,djtotten/workbench,SuperCowPowers/workbench,djtotten/workbench,djtotten/workbench,SuperCowPowers/workbench | server/workers/view_pcap_bro.py | server/workers/view_pcap_bro.py | ''' view_pcap_bro worker '''
import zerorpc
import itertools
def plugin_info():
return {'name':'view_pcap_bro', 'class':'ViewPcapBro', 'dependencies': ['pcap_bro', 'pcap_meta'],
'description': 'This worker generates a pcap view for the sample. Output keys: [bro_output_log_names...]'}
class ViewPcapBro():
''' ViewPcapBro: Generates a view for bro output on a pcap sample '''
def __init__(self):
self.c = zerorpc.Client()
self.c.connect("tcp://127.0.0.1:4242")
def execute(self, input_data):
# Loop around the output keys for pcap_meta and pcap_bro output
view = {key: input_data['pcap_bro'][key] for key in input_data['pcap_bro'].keys()}
view.update({key: input_data['pcap_meta'][key] for key in input_data['pcap_meta'].keys()})
# Okay this view is going to also take a peek at the bro output logs
for name, md5 in input_data['pcap_bro'].iteritems():
if '_log' in name:
view[name] = []
stream = self.c.stream_sample(md5, 20)
for row in itertools.islice(stream, 0, 1):
view[name].append(row)
return view
def __del__(self):
''' Class Cleanup '''
# Close zeroRPC client
self.c.close()
# Unit test: Create the class, the proper input and run the execute() method for a test
def test():
''' view_pcap_bro.py: Unit test'''
# This worker test requires a local server as it relies on the recursive dependencies
import zerorpc
c = zerorpc.Client(timeout=300)
c.connect("tcp://127.0.0.1:4242")
md5 = c.store_sample('http.pcap', open('../../test_files/pcap/http.pcap', 'rb').read(), 'pcap')
output = c.work_request('view_pcap_bro', md5)
print 'ViewPcapBro: '
import pprint
pprint.pprint(output)
if __name__ == "__main__":
test() | ''' view_pcap_bro worker '''
import zerorpc
import itertools
def plugin_info():
return {'name':'view_pcap_bro', 'class':'ViewPcapBro', 'dependencies': ['pcap_bro', 'pcap_meta'],
'description': 'This worker generates a pcap view for the sample. Output keys: [bro_output_log_names...]'}
class ViewPcapBro():
''' ViewPcapBro: Generates a view for bro output on a pcap sample '''
def __init__(self):
self.c = zerorpc.Client()
self.c.connect("tcp://127.0.0.1:4242")
def execute(self, input_data):
# Loop around the output keys for pcap_meta and pcap_bro output
view = {key: input_data['pcap_bro'][key] for key in input_data['pcap_bro'].keys()}
view.update({key: input_data['pcap_meta'][key] for key in input_data['pcap_meta'].keys()})
# Okay this view is going to also take a peek at the bro output logs
for name, md5 in input_data['pcap_bro'].iteritems():
if 'bro_log' in name:
view[name] = []
stream = self.c.stream_sample(md5, 20)
for row in itertools.islice(stream, 0, 1):
view[name].append(row)
return view
def __del__(self):
''' Class Cleanup '''
# Close zeroRPC client
self.c.close()
# Unit test: Create the class, the proper input and run the execute() method for a test
def test():
''' view_pcap_bro.py: Unit test'''
# This worker test requires a local server as it relies on the recursive dependencies
import zerorpc
c = zerorpc.Client(timeout=300)
c.connect("tcp://127.0.0.1:4242")
md5 = c.store_sample('http.pcap', open('../../test_files/pcap/http.pcap', 'rb').read(), 'pcap')
output = c.work_request('view_pcap_bro', md5)
print 'ViewPcapBro: '
import pprint
pprint.pprint(output)
if __name__ == "__main__":
test() | mit | Python |
021dc3e1fa90bad39ce92ea08f3233dd87236d8e | Bump version | markstory/lint-review,markstory/lint-review,markstory/lint-review | lintreview/__init__.py | lintreview/__init__.py | __version__ = '2.9.0'
| __version__ = '2.8.0'
| mit | Python |
eba09997f1208b729eac4a3c8cf37a92dbc1e6ed | fix raising Exception | wenh123/news-diff,g0v/news-diff | lib/util/net.py | lib/util/net.py | # -*- coding: utf-8 -*-
#
import urllib
portal_ptrn_list = {
'feedsportal': "\.feedsportal\.com",
}
def get_portal(url):
import re
for portal in portal_ptrn_list:
ptrn = re.compile(portal_ptrn_list[portal])
if (ptrn.search(url)): return portal
return False
def break_portal(portal, payload, uo):
try:
if 'feedsportal' == portal:
return _break_portal_feedsportal(payload, uo)
except Exception:
import traceback
print('\n***\nFailed breaking portal (%s, %s) ***' % (portal, payload['url']))
traceback.print_exc()
def _break_portal_feedsportal(payload, uo):
from lxml.html import fromstring
text = uo.read()
try:
html = fromstring(text)
payload['url_read'] = html.cssselect('a')[0].attrib['href']
payload['src'] = urllib.urlopen(payload['url_read'])
except:
payload['url_read'] = uo.url
payload['src'] = text
def fetch(payload, dbi = None):
"""抓取 payload['url'] 的檔案
並將最終讀取到的 url 寫入 payload['url_read'], response 寫入 payload['src']
"""
import re
from lxml.html import fromstring
from lib import db, DB, logger
from lib.util.text import to_unicode
extra = {'classname': 'util.net.fetch()'}
try:
uo = urllib.urlopen(payload['url'])
if (uo.code != 200):
raise IOError("HTTP response code=%d from %s" % (uo.code, uo.url))
portal = get_portal(uo.url)
if portal:
break_portal(portal, payload, uo)
else:
payload['src'] = uo.read()
payload['url_read'] = uo.url
except Exception as e:
# 抓取出錯,留待記錄 (save_fetch)
payload['src'] = 'error ' + unicode(e)
payload['category'] = 'error'
payload['exception'] = e
if 'url_read' not in payload:
payload['url_read'] = payload['url']
if dbi is None: _dbi = DB()
else: _dbi = dbi
try:
db.save_fetch(payload['url'], to_unicode(payload['src']), payload['category'], dbi = _dbi)
except Exception as e:
logger.warning('DB save_fetch failed for url %s' % payload['url'], extra=extra)
logger.debug(e)
if dbi is None: _dbi.disconnect()
if 'error' == payload['category']:
# raise the exception to skip the parsing process
logger.warning("failed fetching %s" % payload['url'], extra=extra)
raise payload['exception']
return payload
def normalize_url(url):
import re
url = url.rstrip('/')
return re.sub('^https?://', '', url)
| # -*- coding: utf-8 -*-
#
import urllib
portal_ptrn_list = {
'feedsportal': "\.feedsportal\.com",
}
def get_portal(url):
import re
for portal in portal_ptrn_list:
ptrn = re.compile(portal_ptrn_list[portal])
if (ptrn.search(url)): return portal
return False
def break_portal(portal, payload, uo):
try:
if 'feedsportal' == portal:
return _break_portal_feedsportal(payload, uo)
except Exception:
import traceback
print('\n***\nBreak Portal Failed (%s, %s) ***' % (portal, payload['url']))
traceback.print_exc()
def _break_portal_feedsportal(payload, uo):
from lxml.html import fromstring
text = uo.read()
try:
html = fromstring(text)
payload['url_read'] = html.cssselect('a')[0].attrib['href']
payload['src'] = urllib.urlopen(payload['url_read'])
except:
payload['url_read'] = uo.url
payload['src'] = text
def fetch(payload, dbi = None):
"""抓取 payload['url'] 的檔案
並將最終讀取到的 url 寫入 payload['url_read'], response 寫入 payload['src']
"""
import re
from lxml.html import fromstring
from lib import db, DB, logger
from lib.util.text import to_unicode
extra = {'classname': 'util.net.fetch()'}
try:
uo = urllib.urlopen(payload['url'])
if (uo.code != 200):
raise("HTTP response code=%d from %s" % (uo.code, uo.url))
portal = get_portal(uo.url)
if portal:
break_portal(portal, payload, uo)
else:
payload['src'] = uo.read()
payload['url_read'] = uo.url
except Exception as e:
# 抓取出錯,留待記錄 (save_fetch)
payload['src'] = 'error ' + unicode(e)
payload['category'] = 'error'
payload['exception'] = e
if 'url_read' not in payload:
payload['url_read'] = payload['url']
if dbi is None: _dbi = DB()
else: _dbi = dbi
try:
db.save_fetch(payload['url'], to_unicode(payload['src']), payload['category'], dbi = _dbi)
except Exception as e:
logger.warning('DB save_fetch failed for url %s' % payload['url'], extra=extra)
logger.debug(e)
if dbi is None: _dbi.disconnect()
if 'error' == payload['category']:
# raise the exception to skip the parsing process
logger.warning("failed fetching %s" % payload['url'], extra=extra)
raise payload['exception']
return payload
def normalize_url(url):
import re
url = url.rstrip('/')
return re.sub('^https?://', '', url)
| mit | Python |
665eb8182e57e729790e83d2bf925e67ab864e6e | Update discord backend | python-social-auth/social-core,python-social-auth/social-core | social_core/backends/discord.py | social_core/backends/discord.py | """
Discord Auth OAuth2 backend, docs at:
https://discordapp.com/developers/docs/topics/oauth2
"""
from social_core.backends.oauth import BaseOAuth2
class DiscordOAuth2(BaseOAuth2):
name = 'discord'
AUTHORIZATION_URL = 'https://discordapp.com/api/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://discordapp.com/api/oauth2/token'
ACCESS_TOKEN_METHOD = 'POST'
REVOKE_TOKEN_URL = 'https://discordapp.com/api/oauth2/token/revoke'
REVOKE_TOKEN_METHOD = 'GET'
DEFAULT_SCOPE = ['identify']
SCOPE_SEPARATOR = '+'
REDIRECT_STATE = False
EXTRA_DATA = [
('expires_in', 'expires'),
('refresh_token', 'refresh_token')
]
def get_user_details(self, response):
return {'username': response.get('username'),
'email': response.get('email') or ''}
def user_data(self, access_token, *args, **kwargs):
url = 'https://discordapp.com/api/users/@me'
auth_header = {"Authorization": "Bearer %s" % access_token}
return self.get_json(url, headers=auth_header) | """
Discord Auth OAuth2 backend, docs at:
https://discordapp.com/developers/docs/topics/oauth2
"""
from social_core.backends.oauth import BaseOAuth2
class DiscordOAuth2(BaseOAuth2):
name = 'discord'
AUTHORIZATION_URL = 'https://discordapp.com/api/oauth2/authorize'
ACCESS_TOKEN_URL = 'https://discordapp.com/api/oauth2/token'
ACCESS_TOKEN_METHOD = 'POST'
DEFAULT_SCOPE = ['identify']
SCOPE_SEPARATOR = '+'
REDIRECT_STATE = False
EXTRA_DATA = [
('expires_in', 'expires'),
('refresh_token', 'refresh_token', True)
]
def get_user_details(self, response):
return {'username': response.get('username'),
'email': response.get('email') or ''}
def user_data(self, access_token, *args, **kwargs):
url = 'https://discordapp.com/api/users/@me'
auth_header = {"Authorization": "Bearer %s" % access_token}
return self.get_json(url, headers=auth_header) | bsd-3-clause | Python |
d3227e87b658b4ee634dd273a97d1a8fba4c96c9 | Revise docstring and add space line | bowen0701/algorithms_data_structures | lc461_hamming_distance.py | lc461_hamming_distance.py | """Leetcode 461. Hamming Distance
Medium
URL: https://leetcode.com/problems/hamming-distance/description/
The Hamming distance between two integers is the number of positions at which
the corresponding bits are different.
Given two integers x and y, calculate the Hamming distance.
Note:
0 ≤ x, y < 231.
Example:
Input: x = 1, y = 4
Output: 2
Explanation:
1 (0 0 0 1)
4 (0 1 0 0)
↑ ↑
The above arrows point to positions where the corresponding bits
are different.
"""
class Solution(object):
def hammingDistance(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
Time complexity: O(1).
Space complexity: O(1).
"""
return bin(x ^ y).count('1')
def main():
print Solution().hammingDistance(1, 4)
if __name__ == '__main__':
main()
| """Leetcode 461. Hamming Distance
Medium
URL: https://leetcode.com/problems/hamming-distance/description/
The Hamming distance between two integers is the number of positions at which
the corresponding bits are different.
Given two integers x and y, calculate the Hamming distance.
Note:
0 ≤ x, y < 231.
Example:
Input: x = 1, y = 4
Output: 2
Explanation:
1 (0 0 0 1)
4 (0 1 0 0)
↑ ↑
The above arrows point to positions where the corresponding bits are different.
"""
class Solution(object):
def hammingDistance(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
Time complexity: O(1).
Space complexity: O(1).
"""
return bin(x ^ y).count('1')
def main():
print Solution().hammingDistance(1, 4)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
5d33de3868df4549621763db07267ef59fb94eb8 | Fix ohe type in ce | analysiscenter/dataset | dataset/models/tf/losses/core.py | dataset/models/tf/losses/core.py | """ Contains base tf losses """
import tensorflow as tf
def softmax_cross_entropy(labels, logits, *args, **kwargs):
""" Multi-class CE which takes plain or one-hot labels
Parameters
----------
labels : tf.Tensor
logits : tf.Tensor
args
other positional parameters from `tf.losses.softmax_cross_entropy`
kwargs
other named parameters from `tf.losses.softmax_cross_entropy`
Returns
-------
tf.Tensor
"""
labels_shape = tf.shape(labels)
logits_shape = tf.shape(logits)
c = tf.cast(tf.equal(labels_shape, logits_shape), tf.int32)
e = tf.equal(tf.reduce_sum(c, axis=-1), logits_shape.shape[-1])
labels = tf.cond(e, lambda: tf.cast(labels, dtype=logits.dtype),
lambda: tf.one_hot(tf.cast(labels, tf.int32), logits_shape[-1], dtype=logits.dtype))
return tf.losses.softmax_cross_entropy(labels, logits, *args, **kwargs)
| """ Contains base tf losses """
import tensorflow as tf
def softmax_cross_entropy(labels, logits, *args, **kwargs):
""" Multi-class CE which takes plain or one-hot labels
Parameters
----------
labels : tf.Tensor
logits : tf.Tensor
args
other positional parameters from `tf.losses.softmax_cross_entropy`
kwargs
other named parameters from `tf.losses.softmax_cross_entropy`
Returns
-------
tf.Tensor
"""
labels_shape = tf.shape(labels)
logits_shape = tf.shape(logits)
c = tf.cast(tf.equal(labels_shape, logits_shape), tf.int32)
e = tf.equal(tf.reduce_sum(c, axis=-1), logits_shape.shape[-1])
labels = tf.cond(e, lambda: tf.cast(labels, dtype=logits.dtype),
lambda: tf.one_hot(labels, logits_shape[-1], dtype=logits.dtype))
return tf.losses.softmax_cross_entropy(labels, logits, *args, **kwargs)
| apache-2.0 | Python |
33e581931859eb23d541332f9f31ca2fe8be6630 | Update device_credentials to work with new RestClient | auth0/auth0-python,auth0/auth0-python | auth0/v2/device_credentials.py | auth0/v2/device_credentials.py | from .rest import RestClient
class DeviceCredentials(object):
def __init__(self, domain, jwt_token):
self.domain = domain
self.client = RestClient(jwt=jwt_token)
def _url(self, id=None):
url = 'https://%s/api/v2/device-credentials' % self.domain
if id is not None:
return url + '/' + id
return url
def get(self, user_id=None, client_id=None, type=None,
fields=[], include_fields=True):
params = {
'fields': ','.join(fields) or None,
'include_fields': str(include_fields).lower(),
'user_id': user_id,
'client_id': client_id,
'type': type,
}
return self.client.get(self._url(), params=params)
def create(self, body):
return self.client.post(self._url(), data=body)
def delete(self, id):
return self.client.delete(self._url(id))
| from .rest import RestClient
class DeviceCredentials(object):
def __init__(self, domain, jwt_token):
url = 'https://%s/api/v2/device-credentials' % domain
self.client = RestClient(endpoint=url, jwt=jwt_token)
def get(self, user_id=None, client_id=None, type=None,
fields=[], include_fields=True):
params = {
'fields': ','.join(fields) or None,
'include_fields': str(include_fields).lower(),
'user_id': user_id,
'client_id': client_id,
'type': type,
}
return self.client.get(params=params)
def create(self, body):
return self.client.post(data=body)
def delete(self, id):
return self.client.delete(id=id)
| mit | Python |
ef4d1de9c30df4c2d75f09e1d23ab306a9762f71 | call super init in IncludeHandler | github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql | misc/scripts/check-qhelp.py | misc/scripts/check-qhelp.py | #!/bin/env python3
"""cross platform wrapper around codeql generate query-help to check .qhelp files
This takes care of:
* providing a temporary directory to --output
* finding usages of .inc.qhelp arguments
"""
import pathlib
import subprocess
import sys
import tempfile
import xml.sax
include_cache = {}
class IncludeHandler(xml.sax.ContentHandler):
def __init__(self, xml):
super().__init__()
self.__xml = xml
def startElement(self, name, attrs):
if name == "include":
src = (self.__xml.parent / attrs["src"]).resolve()
include_cache.setdefault(src, set()).add(self.__xml)
class IgnoreErrorsHandler(xml.sax.ErrorHandler):
def error(self, exc):
pass
def fatalError(self, exc):
pass
def warning(self, exc):
pass
def init_include_cache():
if not include_cache:
for qhelp in pathlib.Path().rglob("*.qhelp"):
xml.sax.parse(qhelp, IncludeHandler(qhelp), IgnoreErrorsHandler())
def find_inc_qhelp_usages(arg):
init_include_cache()
return include_cache.get(arg.resolve(), ())
def transform_inputs(args):
for arg in args:
arg = pathlib.Path(arg)
if arg.suffixes == ['.inc', '.qhelp']:
for qhelp in find_inc_qhelp_usages(arg):
yield str(qhelp)
else:
yield str(arg)
affected_qhelp_files = list(transform_inputs(sys.argv[1:]))
if not affected_qhelp_files:
# can happen with changes on an unused .inc.qhelp file
print("nothing to do!")
sys.exit(0)
cmd = ["codeql", "generate", "query-help", "--format=markdown"]
with tempfile.TemporaryDirectory() as tmp:
cmd += [f"--output={tmp}", "--"]
cmd += affected_qhelp_files
res = subprocess.run(cmd)
sys.exit(res.returncode)
| #!/bin/env python3
"""cross platform wrapper around codeql generate query-help to check .qhelp files
This takes care of:
* providing a temporary directory to --output
* finding usages of .inc.qhelp arguments
"""
import pathlib
import tempfile
import sys
import subprocess
import xml.sax
include_cache = {}
class IncludeHandler(xml.sax.ContentHandler):
def __init__(self, xml):
self.__xml = xml
def startElement(self, name, attrs):
if name == "include":
src = (self.__xml.parent / attrs["src"]).resolve()
include_cache.setdefault(src, set()).add(self.__xml)
class IgnoreErrorsHandler(xml.sax.ErrorHandler):
def error(self, exc):
pass
def fatalError(self, exc):
pass
def warning(self, exc):
pass
def init_include_cache():
if not include_cache:
for qhelp in pathlib.Path().rglob("*.qhelp"):
xml.sax.parse(qhelp, IncludeHandler(qhelp), IgnoreErrorsHandler())
def find_inc_qhelp_usages(arg):
init_include_cache()
return include_cache.get(arg.resolve(), ())
def transform_inputs(args):
for arg in args:
arg = pathlib.Path(arg)
if arg.suffixes == ['.inc', '.qhelp']:
for qhelp in find_inc_qhelp_usages(arg):
yield str(qhelp)
else:
yield str(arg)
affected_qhelp_files = list(transform_inputs(sys.argv[1:]))
if not affected_qhelp_files:
# can happen with changes on an unused .inc.qhelp file
print("nothing to do!")
sys.exit(0)
cmd = ["codeql", "generate", "query-help", "--format=markdown"]
with tempfile.TemporaryDirectory() as tmp:
cmd += [f"--output={tmp}", "--"]
cmd += affected_qhelp_files
res = subprocess.run(cmd)
sys.exit(res.returncode)
| mit | Python |
cd45585233acfe7db1f757b244e2edba8dbb6f6b | Use environment variable REDIS_SERVER. | ooda/cloudly,ooda/cloudly | cloudly/cache.py | cloudly/cache.py | import os
import redis as pyredis
from cloudly.aws import ec2
from cloudly.memoized import Memoized
@Memoized
def get_conn():
ip_addresses = (os.environ.get("REDIS_SERVER") or
ec2.find_service_ip('redis-server') or
["127.0.0.1"])
redis_url = os.getenv('REDISTOGO_URL', # Set when on Heroku.
'redis://{}:6379'.format(ip_addresses[0]))
return pyredis.from_url(redis_url)
redis = get_conn()
| import os
import redis as pyredis
from cloudly.aws import ec2
from cloudly.memoized import Memoized
@Memoized
def get_conn():
ip_addresses = ec2.find_service_ip('redis-server') or ["127.0.0.1"]
redis_url = os.getenv('REDISTOGO_URL', # Set when on Heroku.
'redis://{}:6379'.format(ip_addresses[0]))
return pyredis.from_url(redis_url)
redis = get_conn()
| mit | Python |
15aeb761e07ce3a1f6aef696abf3c2a0b6c6e394 | change status codes | hmisty/bson-rpc | bson_rpc/status.py | bson_rpc/status.py | # MIT License
#
# Copyright (c) 2017 Evan Liu (hmisty)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
ok = {'error_code': 0, 'result': None}
unknown_message = {'error_code': 401, 'error_msg': 'unknown message'}
invoke_error = {'error_code': 402, 'error_msg': 'failed to call function'}
function_not_found = {'error_code': 404, 'error_msg': 'function not found'}
function_not_callable = {'error_code': 405, 'error_msg': 'function not callable'}
#network error
connection_error: {'error_code': 501, 'error_msg': 'connection error'},
| # MIT License
#
# Copyright (c) 2017 Evan Liu (hmisty)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
ok = {'error_code': 0, 'result': None}
unknown_message = {'error_code': -1, 'error_msg': 'unknown message'}
invoke_error = {'error_code': -2, 'error_msg': 'failed to call function'}
function_not_found = {'error_code': -3, 'error_msg': 'function not found'}
function_not_callable = {'error_code': -4, 'error_msg': 'function not callable'}
| mit | Python |
aef238386c71d52def424c8f47a103bd25f12e26 | Make fix_updated migration (sort of) reversible | cityofsomerville/citydash,cityofsomerville/citydash,codeforboston/cornerwise,codeforboston/cornerwise,codeforboston/cornerwise,codeforboston/cornerwise,cityofsomerville/citydash,cityofsomerville/cornerwise,cityofsomerville/citydash,cityofsomerville/cornerwise,cityofsomerville/cornerwise,cityofsomerville/cornerwise | server/proposal/migrations/0034_fix_updated.py | server/proposal/migrations/0034_fix_updated.py | import django.contrib.gis.db.models.fields
from django.db import migrations
from django.contrib.gis.db.models import Max
def fix_updated(apps, _):
Proposal = apps.get_model("proposal", "Proposal")
proposals = Proposal.objects.annotate(published=Max("documents__published"))
for proposal in proposals:
if proposal.published:
proposal.updated = proposal.published
proposal.save()
def do_nothing(apps, _):
pass
class Migration(migrations.Migration):
dependencies = [
('proposal', '0033_non_null_started'),
]
operations = [
migrations.RunPython(fix_updated, do_nothing),
]
| import django.contrib.gis.db.models.fields
from django.db import migrations
from django.contrib.gis.db.models import Max
def fix_updated(apps, _):
Proposal = apps.get_model("proposal", "Proposal")
proposals = Proposal.objects.annotate(published=Max("documents__published"))
for proposal in proposals:
if proposal.published:
proposal.updated = proposal.published
proposal.save()
class Migration(migrations.Migration):
dependencies = [
('proposal', '0033_non_null_started'),
]
operations = [
migrations.RunPython(fix_updated),
]
| mit | Python |
8ca32b33db506ba9083b98dd3ecf740cbee89ab1 | Update authentication.JWTAuthentication | davesque/django-rest-framework-simplejwt,davesque/django-rest-framework-simplejwt | rest_framework_simplejwt/authentication.py | rest_framework_simplejwt/authentication.py | from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from django.utils.six import text_type
from jose import jwt
from rest_framework import HTTP_HEADER_ENCODING
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import AuthenticationFailed
AUTH_HEADER_TYPE = 'Bearer'
AUTH_HEADER_TYPE_BYTES = AUTH_HEADER_TYPE.encode('utf-8')
USER_ID_FIELD = 'pk'
PAYLOAD_ID_FIELD = 'user_pk'
SECRET_KEY = 'blah'
User = get_user_model()
class JWTAuthentication(BaseAuthentication):
www_authenticate_realm = 'api'
def authenticate(self, request):
header = self.get_header(request)
if header is None:
return None
token = self.get_token(header)
if token is None:
return None
payload = self.get_payload(token)
user_id = self.get_user_id(payload)
return (self.get_user(user_id), None)
def authenticate_header(self, request):
return '{0} realm="{1}"'.format(
AUTH_HEADER_TYPE,
self.www_authenticate_realm,
)
def get_header(self, request):
header = request.META.get('HTTP_AUTHORIZATION')
if isinstance(header, text_type):
# Work around django test client oddness
header = header.encode(HTTP_HEADER_ENCODING)
return header
def get_token(self, header):
parts = header.split()
if parts[0] != AUTH_HEADER_TYPE_BYTES:
return None
if len(parts) != 2:
raise AuthenticationFailed(_('Authorization header is invalid.'))
return parts[1]
def get_payload(self, token):
try:
return jwt.decode(token, SECRET_KEY, algorithms=['HS256'])
except TypeError:
raise AuthenticationFailed(_('Token is invalid.'))
def get_user_id(self, payload):
try:
return payload[PAYLOAD_ID_FIELD]
except KeyError:
raise AuthenticationFailed(_('Token contained no user identification.'))
def get_user(self, user_id):
try:
user = User.objects.get(**{USER_ID_FIELD: user_id})
except User.DoesNotExist:
raise AuthenticationFailed(_('User not found.'))
if not user.is_active:
raise AuthenticationFailed(_('User is inactive.'))
return user
| from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from jose import jwt
from rest_framework.authentication import BaseAuthentication, get_authorization_header
from rest_framework.exceptions import AuthenticationFailed
AUTH_HEADER_TYPE = 'Bearer'
AUTH_HEADER_TYPE_BYTES = AUTH_HEADER_TYPE.encode('utf-8')
USER_ID_FIELD = 'pk'
PAYLOAD_ID_FIELD = 'user_pk'
SECRET_KEY = 'blah'
User = get_user_model()
class JWTAuthentication(BaseAuthentication):
www_authenticate_realm = 'api'
def authenticate(self, request):
header = self.get_header(request)
token = self.get_token(header)
payload = self.get_payload(token)
user_id = self.get_user_id(payload)
return (self.get_user(user_id), None)
def authenticate_header(self, request):
return '{0} realm="{1}"'.format(
AUTH_HEADER_TYPE,
self.www_authenticate_realm,
)
def get_header(self, request):
return get_authorization_header(request)
def get_token(self, header):
parts = header.split()
if not parts or parts[0] != AUTH_HEADER_TYPE_BYTES:
return None
if len(parts) == 1:
raise AuthenticationFailed(_('No token provided.'))
elif len(parts) > 2:
raise AuthenticationFailed(_('Token should not contain spaces.'))
return parts[1]
def get_payload(self, token):
try:
return jwt.decode(token, SECRET_KEY, algorithms=['HS256'])
except TypeError:
raise AuthenticationFailed(_('Token was invalid.'))
def get_user_id(self, payload):
try:
return payload[PAYLOAD_ID_FIELD]
except KeyError:
raise AuthenticationFailed(_('Token contained no user identification.'))
def get_user(self, user_id):
try:
user = User.objects.get(**{USER_ID_FIELD: user_id})
except User.DoesNotExist:
raise AuthenticationFailed(_('User not found.'))
if not user.is_active:
raise AuthenticationFailed(_('User inactive.'))
return user
| mit | Python |
f29e177ff039990463ce4af3e08b9df014d4542c | put output files in a separate dir and tar from there | sassoftware/jobslave,sassoftware/jobslave,sassoftware/jobslave | jobslave/generators/raw_fs_image.py | jobslave/generators/raw_fs_image.py | #
# Copyright (c) 2004-2007 rPath, Inc.
#
# All Rights Reserved
#
import os
import tempfile
from jobslave.generators import bootable_image, constants
from jobslave.filesystems import sortMountPoints
from conary.lib import util, log
class RawFsImage(bootable_image.BootableImage):
def makeBlankFS(self, image, fsType, size, fsLabel = None):
if os.path.exists(image):
util.rmtree(image)
util.mkdirChain(os.path.split(image)[0])
util.execute('dd if=/dev/zero of=%s count=1 seek=%d bs=4096' % \
(image, (size / 4096) - 1))
fs = bootable_image.Filesystem(image, fsType, size, fsLabel = fsLabel)
fs.format()
return fs
def makeFSImage(self, sizes):
root = self.workDir + "/root"
try:
# create an image file per mount point
imgFiles = {}
for mountPoint in self.mountDict.keys():
requestedSize, minFreeSpace, fsType = self.mountDict[mountPoint]
if requestedSize - sizes[mountPoint] < minFreeSpace:
requestedSize += sizes[mountPoint] + minFreeSpace
tag = mountPoint.replace("/", "")
tag = tag and tag or "root"
imgFiles[mountPoint] = os.path.join(self.workDir, "output", "%s-%s.%s" % (self.basefilename, tag, fsType))
log.info("creating mount point %s as %s size of %d" % (mountPoint, imgFiles[mountPoint], requestedSize))
fs = self.makeBlankFS(imgFiles[mountPoint], fsType, requestedSize, fsLabel = mountPoint)
self.addFilesystem(mountPoint, fs)
self.mountAll()
self.installFileTree(root)
finally:
self.umountAll()
util.rmtree(root, ignore_errors = True)
return imgFiles
def write(self):
totalSize, sizes = self.getImageSize(realign = 0, partitionOffset = 0)
finalImage = os.path.join(self.outputDir, self.basefilename + '.fs.tgz')
images = self.makeFSImage(sizes)
self.gzip(os.path.join(self.workDir, "output"), finalImage)
self.postOutput(((finalImage, 'Raw Filesystem Image'),))
| #
# Copyright (c) 2004-2007 rPath, Inc.
#
# All Rights Reserved
#
import os
import tempfile
from jobslave.generators import bootable_image, constants
from jobslave.filesystems import sortMountPoints
from conary.lib import util, log
class RawFsImage(bootable_image.BootableImage):
def makeBlankFS(self, image, fsType, size, fsLabel = None):
if os.path.exists(image):
util.rmtree(image)
util.mkdirChain(os.path.split(image)[0])
util.execute('dd if=/dev/zero of=%s count=1 seek=%d bs=4096' % \
(image, (size / 4096) - 1))
fs = bootable_image.Filesystem(image, fsType, size, fsLabel = fsLabel)
fs.format()
return fs
def makeFSImage(self, sizes):
root = self.workDir + "/root"
try:
# create an image file per mount point
imgFiles = {}
for mountPoint in self.mountDict.keys():
requestedSize, minFreeSpace, fsType = self.mountDict[mountPoint]
if requestedSize - sizes[mountPoint] < minFreeSpace:
requestedSize += sizes[mountPoint] + minFreeSpace
tag = mountPoint.replace("/", "")
tag = tag and tag or "root"
imgFiles[mountPoint] = os.path.join(self.outputDir, "%s-%s.%s" % (self.basefilename, tag, fsType))
log.info("creating mount point %s as %s size of %d" % (mountPoint, imgFiles[mountPoint], requestedSize))
fs = self.makeBlankFS(imgFiles[mountPoint], fsType, requestedSize, fsLabel = mountPoint)
self.addFilesystem(mountPoint, fs)
self.mountAll()
self.installFileTree(root)
finally:
self.umountAll()
util.rmtree(root, ignore_errors = True)
return imgFiles
def write(self):
totalSize, sizes = self.getImageSize(realign = 0, partitionOffset = 0)
finalImage = os.path.join(self.outputDir, self.basefilename + '.tar.gz')
images = self.makeFSImage(sizes)
self.gzip(self.outputDir, finalImage)
self.postOutput(((finalImage, 'Raw Filesystem Image'),))
| apache-2.0 | Python |
8e20d2e6fde371fcc85979f0ee0b10a38a19d00b | Remove unused environment variable | laughingman7743/PyAthena | tests/util.py | tests/util.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
import contextlib
import functools
import os
class Env(object):
def __init__(self):
self.region_name = os.getenv('AWS_DEFAULT_REGION', None)
assert self.region_name, \
'Required environment variable `AWS_DEFAULT_REGION` not found.'
self.s3_staging_dir = os.getenv('AWS_ATHENA_S3_STAGING_DIR', None)
assert self.s3_staging_dir, \
'Required environment variable `AWS_ATHENA_S3_STAGING_DIR` not found.'
def with_cursor(fn):
@functools.wraps(fn)
def wrapped_fn(self, *args, **kwargs):
with contextlib.closing(self.connect()) as conn:
with conn.cursor() as cursor:
fn(self, cursor, *args, **kwargs)
return wrapped_fn
def with_engine(fn):
@functools.wraps(fn)
def wrapped_fn(self, *args, **kwargs):
engine = self.create_engine()
try:
with contextlib.closing(engine.connect()) as conn:
fn(self, engine, conn, *args, **kwargs)
finally:
engine.dispose()
return wrapped_fn
def read_query(path):
with codecs.open(path, 'rb', 'utf-8') as f:
query = f.read()
return [q.strip() for q in query.split(';') if q and q.strip()]
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
import contextlib
import functools
import os
class Env(object):
def __init__(self):
# self.user = os.getenv('AWS_ACCESS_KEY_ID', None)
# assert self.user, \
# 'Required environment variable `AWS_ACCESS_KEY_ID` not found.'
# self.password = os.getenv('AWS_SECRET_ACCESS_KEY', None)
# assert self.password, \
# 'Required environment variable `AWS_SECRET_ACCESS_KEY` not found.'
self.region_name = os.getenv('AWS_DEFAULT_REGION', None)
assert self.region_name, \
'Required environment variable `AWS_DEFAULT_REGION` not found.'
self.s3_staging_dir = os.getenv('AWS_ATHENA_S3_STAGING_DIR', None)
assert self.s3_staging_dir, \
'Required environment variable `AWS_ATHENA_S3_STAGING_DIR` not found.'
def with_cursor(fn):
@functools.wraps(fn)
def wrapped_fn(self, *args, **kwargs):
with contextlib.closing(self.connect()) as conn:
with conn.cursor() as cursor:
fn(self, cursor, *args, **kwargs)
return wrapped_fn
def with_engine(fn):
@functools.wraps(fn)
def wrapped_fn(self, *args, **kwargs):
engine = self.create_engine()
try:
with contextlib.closing(engine.connect()) as conn:
fn(self, engine, conn, *args, **kwargs)
finally:
engine.dispose()
return wrapped_fn
def read_query(path):
with codecs.open(path, 'rb', 'utf-8') as f:
query = f.read()
return [q.strip() for q in query.split(';') if q and q.strip()]
| mit | Python |
b468faf2bc291b668ea3f32eeabdfa8933cfacac | use rffi.sizeof(rffi.INTPTR_T) instead of checking r_uint.BITS | topazproject/topaz,babelsberg/babelsberg-r,topazproject/topaz,topazproject/topaz,babelsberg/babelsberg-r,kachick/topaz,babelsberg/babelsberg-r,kachick/topaz,babelsberg/babelsberg-r,kachick/topaz,babelsberg/babelsberg-r,topazproject/topaz | rupypy/utils/packing/stringpacking.py | rupypy/utils/packing/stringpacking.py | from pypy.rpython.lltypesystem import rffi
pointerlen = rffi.sizeof(rffi.INTPTR_T)
def make_string_packer(padding=" ", nullterminated=False):
def pack_string(packer, width):
space = packer.space
try:
string = space.str_w(
space.convert_type(packer.args_w[packer.args_index], space.w_string, "to_str")
)
except IndexError:
raise space.error(space.w_ArgumentError, "too few arguments")
if nullterminated:
packer.result += string
packer.result.append("\0")
else:
assert width >= 0
string = string[:width]
packer.result += string
packer.result.extend([padding] * (width - len(string)))
packer.args_index += 1
return pack_string
def pack_pointer(packer, repetitions):
# Should return a C pointer string to a char* or struct*, but we
# fake it to return just the right length, just as Rubinius does
if repetitions > len(packer.args_w) - packer.args_index:
raise packer.space.error(packer.space.w_ArgumentError, "too few arguments")
for i in xrange(repetitions):
for i in xrange(packer.args_index, repetitions + packer.args_index):
packer.result.extend(["\0"] * pointerlen)
packer.args_index += repetitions
| from pypy.rlib.rarithmetic import r_uint
pointerlen = 8 if r_uint.BITS > 32 else 4
def make_string_packer(padding=" ", nullterminated=False):
def pack_string(packer, width):
space = packer.space
try:
string = space.str_w(
space.convert_type(packer.args_w[packer.args_index], space.w_string, "to_str")
)
except IndexError:
raise space.error(space.w_ArgumentError, "too few arguments")
if nullterminated:
packer.result += string
packer.result.append("\0")
else:
assert width >= 0
string = string[:width]
packer.result += string
packer.result.extend([padding] * (width - len(string)))
packer.args_index += 1
return pack_string
def pack_pointer(packer, repetitions):
# Should return a C pointer string to a char* or struct*, but we
# fake it to return just the right length, just as Rubinius does
if repetitions > len(packer.args_w) - packer.args_index:
raise packer.space.error(packer.space.w_ArgumentError, "too few arguments")
for i in xrange(repetitions):
for i in xrange(packer.args_index, repetitions + packer.args_index):
packer.result.extend(["\0"] * pointerlen)
packer.args_index += repetitions
| bsd-3-clause | Python |
206fe7c34f3f15f52c27f0b40d419e84b5a28644 | Fix fabfile typo. | nexiles/nexiles.fabric.tasks,nexiles/nexiles.fabric.tasks | fabfile.py | fabfile.py | import os
from fabric.api import env
from nexiles.fabric.tasks import docs
from nexiles.fabric.tasks import utils
from nexiles.fabric.tasks import release
from nexiles.fabric.tasks import environment
PACKAGE_NAME = "nexiles.fabric.tasks"
VERSION = utils.get_version_from_setup_py()
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
BUILD_DIR = "{ROOT_DIR}/dist".format(**globals())
DIST_DIR = "~/develop/nexiles/dist/{PACKAGE_NAME}/{PACKAGE_NAME}-{VERSION}".format(**globals())
SRC_PACKAGE = "{BUILD_DIR}/{PACKAGE_NAME}-{VERSION}.tar.gz".format(**globals())
DOC_PACKAGE = "{BUILD_DIR}/{PACKAGE_NAME}-doc-{VERSION}.tar.gz".format(**globals())
PUBLIC_DIR = "/Volumes/skynet-wt-10-2/ptc/Windchill_10.2/HTTPServer/htdocs/docs/{PACKAGE_NAME}".format(**globals())
env.nexiles.update(
public_source=True,
package_name=PACKAGE_NAME,
version=VERSION,
root_dir=ROOT_DIR,
doc_package=DOC_PACKAGE,
doc_public_dir=PUBLIC_DIR
) | import os
from fabric.api import env
from nexiles.fabric.tasks import docs
from nexiles.fabric.tasks import utils
from nexiles.fabric.tasks import release
PACKAGE_NAME = "nexiles.fabric.tasks"
VERSION = utils.get_version_from_setup_py()
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
BUILD_DIR = "{ROOT_DIR}/dist".format(**globals())
DIST_DIR = "~/develop/nexiles/dist/{PACKAGE_NAME}/{PACKAGE_NAME}-{VERSION}".format(**globals())
SRC_PACKAGE = "{BUILD_DIR}/{PACKAGE_NAME}-{VERSION}.tar.gz".format(**globals())
DOC_PACKAGE = "{BUILD_DIR}/{PACKAGE_NAME}-doc-{VERSION}.tar.gz".format(**globals())
PUBLIC_DIR = "/Volumes/skynet-wt-10-2/ptc/Windchill_10.2/HTTPServer/htdocs/docs/{PACKAGE_NAME}".format(**globals()),
env.nexiles.update(
public_source=True,
package_name=PACKAGE_NAME,
version=VERSION,
root_dir=ROOT_DIR,
doc_package=DOC_PACKAGE,
doc_public_dir=PUBLIC_DIR
) | bsd-3-clause | Python |
482c3952213fdaef8336d3fe4abbc7ca0f78156c | Use `magick` instead of `imgconvert` | kms70847/Animation | lib/animation/__init__.py | lib/animation/__init__.py | #prerequisites: ImageMagick (http://www.imagemagick.org/script/index.php)
import itertools
import os
import os.path
import subprocess
import shutil
import math
def generate_unused_folder_name():
base = "temp_{}"
for i in itertools.count():
name = base.format(i)
if not os.path.exists(name):
return name
def make_gif(imgs, **kwargs):
"""creates a gif in the current directory, composed of the given images.
parameters:
- imgs: a list of PIL image objects.
optional parameters:
- name: the name of the gif file.
- default: "output.gif"
- delay: the number of 'ticks' between frames. 1 tick == 10 ms == 0.01 seconds. anything smaller than 2 will cause display problems.
- default: 2
- delete_temp_files: True if the temporary directory containing each individual frame should be deleted, False otherwise.
- default: True
"""
name = kwargs.get("name", "output.gif")
delay = kwargs.get("delay", 2)
dir_name = generate_unused_folder_name()
#create directory and move into it
os.mkdir(dir_name)
os.chdir(dir_name)
#create images. Use leading zeroes to ensure lexicographic order.
num_digits = max(1, int(math.log(len(imgs))))
for i, img in enumerate(imgs):
img.save("img_{:0{padding}}.png".format(i, padding=num_digits))
#create gif
cmd = ["magick", "convert", "-delay", str(delay), "img_*.png", "-layers", "optimize", "output.gif"]
subprocess.call(cmd)
#move gif out of temp directory
shutil.copyfile("output.gif", "../{}".format(name))
#return to original directory, and delete temp
os.chdir("..")
if kwargs.get("delete_temp_files", True):
shutil.rmtree(dir_name)
| #prerequisites: ImageMagick (http://www.imagemagick.org/script/index.php)
import itertools
import os
import os.path
import subprocess
import shutil
import math
def generate_unused_folder_name():
base = "temp_{}"
for i in itertools.count():
name = base.format(i)
if not os.path.exists(name):
return name
def make_gif(imgs, **kwargs):
"""creates a gif in the current directory, composed of the given images.
parameters:
- imgs: a list of PIL image objects.
optional parameters:
- name: the name of the gif file.
- default: "output.gif"
- delay: the number of 'ticks' between frames. 1 tick == 10 ms == 0.01 seconds. anything smaller than 2 will cause display problems.
- default: 2
- delete_temp_files: True if the temporary directory containing each individual frame should be deleted, False otherwise.
- default: True
"""
name = kwargs.get("name", "output.gif")
delay = kwargs.get("delay", 2)
dir_name = generate_unused_folder_name()
#create directory and move into it
os.mkdir(dir_name)
os.chdir(dir_name)
#create images. Use leading zeroes to ensure lexicographic order.
num_digits = max(1, int(math.log(len(imgs))))
for i, img in enumerate(imgs):
img.save("img_{:0{padding}}.png".format(i, padding=num_digits))
#create gif
#cmd = "imgconvert -delay {} img_*.png -layers optimize output.gif".format(delay)
cmd = ["imgconvert", "-delay", str(delay), "img_*.png", "-layers", "optimize", "output.gif"]
subprocess.call(cmd)
#move gif out of temp directory
shutil.copyfile("output.gif", "../{}".format(name))
#return to original directory, and delete temp
os.chdir("..")
if kwargs.get("delete_temp_files", True):
shutil.rmtree(dir_name) | mit | Python |
b43e59a5389ebbd1e57d4fcf62b0958937b504df | Add OCA as author of OCA addons | Eficent/manufacture-reporting,Endika/manufacture-reporting | __unported__/mrp_webkit/__openerp__.py | __unported__/mrp_webkit/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2013 Serpent Consulting Services Pvt. Ltd.(<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
{
'name': 'MRP',
'version': '1.0',
'author': "Serpent Consulting Services Pvt. Ltd.,Odoo Community Association (OCA)",
'website': 'http://www.serpentcs.com',
'license': 'AGPL-3',
'category': 'Manufacturing',
'sequence': 18,
'summary': 'Manufacturing Orders, Bill of Materials, Routing',
'depends': ['mrp','report_webkit'],
'description': """
Manage the Manufacturing process in OpenERP
===========================================
It is conversion of rml report to Webkit Report.
""",
'data': [
'mrp_report.xml',
],
'installable': False,
'application': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2013 Serpent Consulting Services Pvt. Ltd.(<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
{
'name': 'MRP',
'version': '1.0',
'author': 'Serpent Consulting Services Pvt. Ltd.',
'website': 'http://www.serpentcs.com',
'license': 'AGPL-3',
'category': 'Manufacturing',
'sequence': 18,
'summary': 'Manufacturing Orders, Bill of Materials, Routing',
'depends': ['mrp','report_webkit'],
'description': """
Manage the Manufacturing process in OpenERP
===========================================
It is conversion of rml report to Webkit Report.
""",
'data': [
'mrp_report.xml',
],
'installable': False,
'application': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
a7b5041f42a7ea621f5b484e24cc36e751776474 | Add unit test for reader reconnect | mre/kafka-influxdb,mre/kafka-influxdb | kafka_influxdb/tests/test_worker.py | kafka_influxdb/tests/test_worker.py | import unittest
from mock import Mock
import random
from kafka_influxdb.worker import Worker
from kafka_influxdb.encoder import echo_encoder
class Config:
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.kafka_topic = "test"
self.influxdb_dbname = "mydb"
class DummyReader(object):
"""
A reader that yields dummy messages
"""
def __init__(self, messages, num_messages):
self.messages = messages
self.num_messages = num_messages
def read(self):
for i in range(self.num_messages):
yield random.choice(self.messages)
class FlakyReader(object):
"""
A fake reader that throws exceptions to simulate
connection errors
"""
def __init__(self, message, num_messages):
self.message = message
self.num_messages = num_messages
def read(self):
# Yield the first half of messages
for i in range(int(self.num_messages/2)):
yield self.message
# Simulate a connection error while reading
try:
raise Exception
except Exception:
# Continue like you don't care.
# Yield the second half of messages
for i in range(int(self.num_messages/2)):
yield self.message
class DummyWriter(object):
"""
A fake writer that does nothing with the input data
"""
def __init__(self):
pass
def write(self):
pass
class TestKafkaInfluxDB(unittest.TestCase):
def setUp(self):
self.config = Config(100)
self.encoder = echo_encoder.Encoder()
self.writer = DummyWriter()
self.writer = Mock()
self.writer.write.return_value = True
def test_buffering(self):
self.reader = DummyReader(["foo"], self.config.buffer_size - 1)
self.client = Worker(self.reader, self.encoder, self.writer, self.config)
self.client.consume()
self.assertFalse(self.writer.write.called)
def test_flush(self):
self.reader = DummyReader(["bar"], self.config.buffer_size)
self.client = Worker(self.reader, self.encoder, self.writer, self.config)
self.client.consume()
self.assertTrue(self.writer.write.called)
def test_reconnect(self):
self.reader = FlakyReader(["baz"], self.config.buffer_size)
self.client = Worker(self.reader, self.encoder, self.writer, self.config)
self.client.consume()
self.assertTrue(self.writer.write.called)
self.writer.write.assert_called_once_with(["baz"] * self.config.buffer_size)
| import unittest
from mock import Mock
import random
from kafka_influxdb.worker import Worker
from kafka_influxdb.encoder import echo_encoder
class Config:
def __init__(self, buffer_size):
self.buffer_size = buffer_size
self.kafka_topic = "test"
self.influxdb_dbname = "mydb"
class DummyReader(object):
"""
A reader that yields dummy messages
"""
def __init__(self, messages, num_messages):
self.messages = messages
self.num_messages = num_messages
def read(self):
for i in range(self.num_messages):
yield random.choice(self.messages)
class DummyWriter(object):
"""
A fake writer that does nothing with the input data
"""
def __init__(self):
pass
def write(self):
pass
class TestKafkaInfluxDB(unittest.TestCase):
def setUp(self):
self.config = Config(100)
self.encoder = echo_encoder.Encoder()
self.writer = DummyWriter()
self.writer = Mock()
self.writer.write.return_value = True
def test_buffering(self):
self.reader = DummyReader(["foo"], self.config.buffer_size - 1)
self.client = Worker(self.reader, self.encoder, self.writer, self.config)
self.client.consume()
self.assertFalse(self.writer.write.called)
def test_flush(self):
self.reader = DummyReader(["bar"], self.config.buffer_size)
self.client = Worker(self.reader, self.encoder, self.writer, self.config)
self.client.consume()
self.assertTrue(self.writer.write.called)
self.client = Worker(self.reader, self.encoder, self.writer, self.config)
self.client.consume()
self.assertTrue(self.writer.write.called)
| apache-2.0 | Python |
0ac7b18c846fe8df134a2241bb0163e9fd4b7633 | Initialize empty MonadicDict by default. | genenetwork/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2 | wqflask/utility/monads.py | wqflask/utility/monads.py | """Monadic utilities
This module is a collection of monadic utilities for use in
GeneNetwork. It includes:
* MonadicDict - monadic version of the built-in dictionary
* MonadicDictCursor - monadic version of MySQLdb.cursors.DictCursor
that returns a MonadicDict instead of the built-in dictionary
"""
from collections import UserDict
from functools import partial
from MySQLdb.cursors import DictCursor
from pymonad.maybe import Just, Nothing
class MonadicDict(UserDict):
"""
Monadic version of the built-in dictionary.
Keys in this dictionary can be any python object, but values must
be monadic values.
"""
def __init__(self, d={}, convert=True):
"""
Initialize monadic dictionary.
If convert is False, values in dictionary d must be
monadic. If convert is True, values in dictionary d are
converted to monadic values.
"""
if convert:
super().__init__({key:(Nothing if value is None else Just(value))
for key, value in d.items()})
else:
super().__init__(d)
def __getitem__(self, key):
"""
Get key from dictionary.
If key exists in the dictionary, return a Just value. Else,
return Nothing.
"""
try:
return Just(self.data[key])
except KeyError:
return Nothing
def __setitem__(self, key, value):
"""
Set key in dictionary.
value must be a monadic value---either Nothing or a Just
value. If value is a Just value, set it in the dictionary. If
value is Nothing, do nothing.
"""
value.bind(partial(super().__setitem__, key))
def __delitem__(self, key):
"""
Delete key from dictionary.
If key exists in the dictionary, delete it. Else, do nothing.
"""
try:
super().__delitem__(key)
except KeyError:
pass
class MonadicDictCursor(DictCursor):
"""
Monadic version of MySQLdb.cursors.DictCursor.
Monadic version of MySQLdb.cursors.DictCursor that returns a
MonadicDict instead of the built-in dictionary.
"""
def fetchone(self):
return MonadicDict(super().fetchone())
def fetchmany(self, size=None):
return [MonadicDict(row) for row in super().fetchmany(size=size)]
def fetchall(self):
return [MonadicDict(row) for row in super().fetchall()]
| """Monadic utilities
This module is a collection of monadic utilities for use in
GeneNetwork. It includes:
* MonadicDict - monadic version of the built-in dictionary
* MonadicDictCursor - monadic version of MySQLdb.cursors.DictCursor
that returns a MonadicDict instead of the built-in dictionary
"""
from collections import UserDict
from functools import partial
from MySQLdb.cursors import DictCursor
from pymonad.maybe import Just, Nothing
class MonadicDict(UserDict):
"""
Monadic version of the built-in dictionary.
Keys in this dictionary can be any python object, but values must
be monadic values.
"""
def __init__(self, d, convert=True):
"""
Initialize monadic dictionary.
If convert is False, values in dictionary d must be
monadic. If convert is True, values in dictionary d are
converted to monadic values.
"""
if convert:
super().__init__({key:(Nothing if value is None else Just(value))
for key, value in d.items()})
else:
super().__init__(d)
def __getitem__(self, key):
"""
Get key from dictionary.
If key exists in the dictionary, return a Just value. Else,
return Nothing.
"""
try:
return Just(self.data[key])
except KeyError:
return Nothing
def __setitem__(self, key, value):
"""
Set key in dictionary.
value must be a monadic value---either Nothing or a Just
value. If value is a Just value, set it in the dictionary. If
value is Nothing, do nothing.
"""
value.bind(partial(super().__setitem__, key))
def __delitem__(self, key):
"""
Delete key from dictionary.
If key exists in the dictionary, delete it. Else, do nothing.
"""
try:
super().__delitem__(key)
except KeyError:
pass
class MonadicDictCursor(DictCursor):
"""
Monadic version of MySQLdb.cursors.DictCursor.
Monadic version of MySQLdb.cursors.DictCursor that returns a
MonadicDict instead of the built-in dictionary.
"""
def fetchone(self):
return MonadicDict(super().fetchone())
def fetchmany(self, size=None):
return [MonadicDict(row) for row in super().fetchmany(size=size)]
def fetchall(self):
return [MonadicDict(row) for row in super().fetchall()]
| agpl-3.0 | Python |
06c860b317471221fb27285aa9f42ac088c96867 | update Trim | naturalis/HTS-barcode-checker,naturalis/HTS-barcode-checker,naturalis/HTS-barcode-checker | bin/Trim/Trim.py | bin/Trim/Trim.py | #!/usr/bin/python2.7
'''
Created on 22 Nov. 2012
Author: Alex Hoogkamer
E-mail: aqhoogkamer@outlook.com / s1047388@student.hsleiden.nl
this script will trim fastq files based on the phred quality scores.
'''
from Bio import SeqIO
import os
'''
this block counts the number of reads and filters reads that have more
than 5% low quality bases.
'''
file1 = "data/test/s_2_1_sequence"
os.system("rm {file1}-trimmed".format(file1 = file1))
count = 0
for rec in SeqIO.parse(file1, "fastq"):
count += 1
qual = rec.letter_annotations["phred_quality"]
qualcount = 0
for i in qual:
if i < 46:
qualcount = qualcount + 1
else:
pass
if qualcount < (len(rec.seq)*0.05):
out_handle = open("{file1}-trimmed".format(file1=file1), "a")
SeqIO.write(rec, out_handle, "fastq")
out_handle.close
else:
pass
#print("{count} reads".format(count = count))
'''
this block will translate the phred
records = SeqIO.parse(open("data/test/s_2_1_sequence.txt"), "fastq")
out_handle = open("data/test/s_2_1_sequence.txt.qual", "w")
SeqIO.write(records, out_handle, "qual")
out_handle.close()
''' | #!/usr/bin/python2.7
'''
Created on 22 Nov. 2012
Author: Alex Hoogkamer
E-mail: aqhoogkamer@outlook.com / s1047388@student.hsleiden.nl
this script will trim fastq files based on the phred quality scores.
'''
import Bio
| bsd-3-clause | Python |
d35b9c4973cba355b1924072c992731191124722 | Make image title editable in the list | jodal/comics,jodal/comics,datagutten/comics,jodal/comics,datagutten/comics,datagutten/comics,jodal/comics,datagutten/comics | comics/core/admin.py | comics/core/admin.py | from django.contrib import admin
from comics.core import models
class ComicAdmin(admin.ModelAdmin):
list_display = ('slug', 'name', 'language', 'url', 'rights', 'start_date',
'end_date', 'active')
list_filter = ['active', 'language']
readonly_fields = ('name', 'slug', 'language', 'url', 'rights',
'start_date', 'end_date', 'active')
def has_add_permission(self, request):
return False
class ReleaseAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'comic', 'pub_date', 'fetched')
list_filter = ['pub_date', 'fetched', 'comic']
date_hierarchy = 'pub_date'
exclude = ('images',)
readonly_fields = ('comic', 'pub_date', 'fetched')
def has_add_permission(self, request):
return False
def text_preview(obj):
MAX_LENGTH = 60
if len(obj.text) < MAX_LENGTH:
return obj.text
else:
return obj.text[:MAX_LENGTH] + '...'
class ImageAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'file', 'height', 'width', 'fetched',
'title', text_preview)
list_editable = ('title',)
list_filter = ['fetched', 'comic']
date_hierarchy = 'fetched'
readonly_fields = ('comic', 'file', 'checksum', 'height', 'width',
'fetched')
def has_add_permission(self, request):
return False
admin.site.register(models.Comic, ComicAdmin)
admin.site.register(models.Release, ReleaseAdmin)
admin.site.register(models.Image, ImageAdmin)
| from django.contrib import admin
from comics.core import models
class ComicAdmin(admin.ModelAdmin):
list_display = ('slug', 'name', 'language', 'url', 'rights', 'start_date',
'end_date', 'active')
list_filter = ['active', 'language']
readonly_fields = ('name', 'slug', 'language', 'url', 'rights',
'start_date', 'end_date', 'active')
def has_add_permission(self, request):
return False
class ReleaseAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'comic', 'pub_date', 'fetched')
list_filter = ['pub_date', 'fetched', 'comic']
date_hierarchy = 'pub_date'
exclude = ('images',)
readonly_fields = ('comic', 'pub_date', 'fetched')
def has_add_permission(self, request):
return False
def text_preview(obj):
MAX_LENGTH = 60
if len(obj.text) < MAX_LENGTH:
return obj.text
else:
return obj.text[:MAX_LENGTH] + '...'
class ImageAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'file', 'height', 'width', 'fetched',
'title', text_preview)
list_filter = ['fetched', 'comic']
date_hierarchy = 'fetched'
readonly_fields = ('comic', 'file', 'checksum', 'height', 'width',
'fetched')
def has_add_permission(self, request):
return False
admin.site.register(models.Comic, ComicAdmin)
admin.site.register(models.Release, ReleaseAdmin)
admin.site.register(models.Image, ImageAdmin)
| agpl-3.0 | Python |
3dcba234b5c29e393d611adf88bfdca2a081f8db | fix dump_syms.py to work with python 3 (#28910) | electron/electron,electron/electron,bpasero/electron,electron/electron,bpasero/electron,electron/electron,gerhardberger/electron,bpasero/electron,electron/electron,gerhardberger/electron,electron/electron,bpasero/electron,bpasero/electron,gerhardberger/electron,gerhardberger/electron,gerhardberger/electron,electron/electron,bpasero/electron,bpasero/electron,gerhardberger/electron,gerhardberger/electron | build/dump_syms.py | build/dump_syms.py | from __future__ import print_function
import collections
import os
import subprocess
import sys
import errno
# The BINARY_INFO tuple describes a binary as dump_syms identifies it.
BINARY_INFO = collections.namedtuple('BINARY_INFO',
['platform', 'arch', 'hash', 'name'])
def get_module_info(header_info):
# header info is of the form "MODULE $PLATFORM $ARCH $HASH $BINARY"
info_split = header_info.strip().split(' ', 4)
if len(info_split) != 5 or info_split[0] != 'MODULE':
return None
return BINARY_INFO(*info_split[1:])
def get_symbol_path(symbol_data):
module_info = get_module_info(symbol_data[:symbol_data.index('\n')])
if not module_info:
raise Exception("Couldn't get module info for binary '{}'".format(binary))
exe_name = module_info.name.replace('.pdb', '')
return os.path.join(module_info.name, module_info.hash, exe_name + ".sym")
def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def main(dump_syms, binary, out_dir, stamp_file, dsym_file=None):
args = [dump_syms]
if dsym_file:
args += ["-g", dsym_file]
args += [binary]
symbol_data = subprocess.check_output(args).decode(sys.stdout.encoding)
symbol_path = os.path.join(out_dir, get_symbol_path(symbol_data))
mkdir_p(os.path.dirname(symbol_path))
with open(symbol_path, 'w') as out:
out.write(symbol_data)
with open(stamp_file, 'w'):
pass
if __name__ == '__main__':
main(*sys.argv[1:])
| from __future__ import print_function
import collections
import os
import subprocess
import sys
import errno
# The BINARY_INFO tuple describes a binary as dump_syms identifies it.
BINARY_INFO = collections.namedtuple('BINARY_INFO',
['platform', 'arch', 'hash', 'name'])
def get_module_info(header_info):
# header info is of the form "MODULE $PLATFORM $ARCH $HASH $BINARY"
info_split = header_info.strip().split(' ', 4)
if len(info_split) != 5 or info_split[0] != 'MODULE':
return None
return BINARY_INFO(*info_split[1:])
def get_symbol_path(symbol_data):
module_info = get_module_info(symbol_data[:symbol_data.index('\n')])
if not module_info:
raise Exception("Couldn't get module info for binary '{}'".format(binary))
exe_name = module_info.name.replace('.pdb', '')
return os.path.join(module_info.name, module_info.hash, exe_name + ".sym")
def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def main(dump_syms, binary, out_dir, stamp_file, dsym_file=None):
args = [dump_syms]
if dsym_file:
args += ["-g", dsym_file]
args += [binary]
symbol_data = subprocess.check_output(args)
symbol_path = os.path.join(out_dir, get_symbol_path(symbol_data))
mkdir_p(os.path.dirname(symbol_path))
with open(symbol_path, 'w') as out:
out.write(symbol_data)
with open(stamp_file, 'w'):
pass
if __name__ == '__main__':
main(*sys.argv[1:])
| mit | Python |
c9568f90c3605d5a8b647f01c68362f71669fa1a | Use six.moves.reduce for compatibility | utgwkk/py-timeparser,utgwkk/py-timeparser | timeparser.py | timeparser.py | # coding: utf-8
from __future__ import print_function
import re
from six.moves import reduce
def parse(s):
'''
'''
RE_HOUR = r'([0-9]+)h(our)?'
RE_MINUTE = r'([0-9]+)m(in(ute)?)?'
RE_SECOND = r'([0-9]+)(s(ec(ond)?)?)?'
def _parse_time_with_unit(s):
retval = 0
mh = re.match(RE_HOUR, s)
mm = re.match(RE_MINUTE, s)
ms = re.match(RE_SECOND, s)
if mh:
retval = 3600 * int(mh.group(1))
elif mm:
retval = 60 * int(mm.group(1))
elif ms:
retval = int(ms.group(1))
return retval
if s is None:
return None
# 末尾が数字なら 's' を付ける
if s[-1] in '0123456789':
s += 's'
m = re.match(r'^(%s)?(%s)?(%s)?$' % (RE_HOUR, RE_MINUTE, RE_SECOND), s)
if not m:
raise Exception('invalid string: "%s"' % s)
# 時間を表す文字列(例: 1hour, 30min, 20s)にマッチしたグループだけを残す
times = [x for x in m.groups() if isinstance(x, str) and
re.match(r'[0-9]+[a-z]+', x)]
return reduce(lambda x, y: x + y, [_parse_time_with_unit(z) for z in times])
if __name__ == '__main__':
print(parse('8minute10'))
| # coding: utf-8
from __future__ import print_function
import re
def parse(s):
'''
'''
RE_HOUR = r'([0-9]+)h(our)?'
RE_MINUTE = r'([0-9]+)m(in(ute)?)?'
RE_SECOND = r'([0-9]+)(s(ec(ond)?)?)?'
def _parse_time_with_unit(s):
retval = 0
mh = re.match(RE_HOUR, s)
mm = re.match(RE_MINUTE, s)
ms = re.match(RE_SECOND, s)
if mh:
retval = 3600 * int(mh.group(1))
elif mm:
retval = 60 * int(mm.group(1))
elif ms:
retval = int(ms.group(1))
return retval
if s is None:
return None
# 末尾が数字なら 's' を付ける
if s[-1] in '0123456789':
s += 's'
m = re.match(r'^(%s)?(%s)?(%s)?$' % (RE_HOUR, RE_MINUTE, RE_SECOND), s)
if not m:
raise Exception('invalid string: "%s"' % s)
# 時間を表す文字列(例: 1hour, 30min, 20s)にマッチしたグループだけを残す
times = [x for x in m.groups() if isinstance(x, str) and
re.match(r'[0-9]+[a-z]+', x)]
return reduce(lambda x, y: x + y, [_parse_time_with_unit(z) for z in times])
if __name__ == '__main__':
print(parse('8minute10'))
| mit | Python |
9669fb2b733d1a4051ccd744307f358cab2737e4 | Rename private fields | arthurlockman/p.haul,jne100/p.haul,aburluka/p.haul,xemul/p.haul,xemul/p.haul,aburluka/p.haul,jne100/p.haul,biddyweb/phaul,marcosnils/p.haul,arthurlockman/p.haul,jne100/p.haul,aburluka/p.haul,marcosnils/p.haul,jne100/p.haul,biddyweb/phaul,aburluka/p.haul,xemul/p.haul,biddyweb/phaul,arthurlockman/p.haul,jne100/p.haul,aburluka/p.haul,xemul/p.haul,arthurlockman/p.haul,arthurlockman/p.haul,marcosnils/p.haul,xemul/p.haul | p_haul_criu.py | p_haul_criu.py | #
# CRIU API
# Includes class to work with CRIU service and helpers
#
import socket
import struct
import os
import subprocess
import rpc_pb2 as cr_rpc
import stats_pb2 as crs
criu_binary = "/root/criu/criu"
req_types = {
cr_rpc.DUMP: "dump",
cr_rpc.PRE_DUMP: "pre_dump",
cr_rpc.PAGE_SERVER: "page_server",
cr_rpc.RESTORE: "restore"
}
def_verb = 2
#
# Connection to CRIU service
#
class criu_conn:
def __init__(self, iteration = 0):
self._iter = iteration
def __enter__(self):
css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.__swrk = subprocess.Popen([criu_binary, "swrk", "%d" % css[0].fileno()])
css[0].close()
self.__cs = css[1]
return self
def __exit__(self, type, value, traceback):
self.__cs.close()
self.__swrk.wait()
def verbose(self, level):
self.verb = level
def send_req(self, req, with_resp = True):
req.opts.log_level = self.verb
req.opts.log_file = "criu_%s.%d.log" % (req_types[req.type], self._iter)
self.__cs.send(req.SerializeToString())
self._iter += 1
if with_resp:
return self.recv_resp()
def recv_resp(self):
resp = cr_rpc.criu_resp()
resp.ParseFromString(self.__cs.recv(1024))
return resp
def ack_notify(self, success = True):
req = cr_rpc.criu_req()
req.type = cr_rpc.NOTIFY
req.notify_success = True
self.__cs.send(req.SerializeToString())
#
# Helper to read CRIU-generated statistics
#
CRIU_STATS_MAGIC = 0x57093306
def criu_get_stats(img, file_name):
s = struct.Struct("I I")
f = open(os.path.join(img.work_dir(), file_name))
#
# Stats file is 4 butes of magic, then 4 bytes with
# stats packet size
#
v = s.unpack(f.read(s.size))
if v[0] != CRIU_STATS_MAGIC:
raise Exception("Magic is %x, expect %x" % (v[0], CRIU_STATS_MAGIC))
stats = crs.stats_entry()
stats.ParseFromString(f.read(v[1]))
return stats
def criu_get_dstats(img):
stats = criu_get_stats(img, "stats-dump")
return stats.dump
def criu_get_rstats(img):
stats = criu_get_stats(img, "stats-restore")
return stats.restore
| #
# CRIU API
# Includes class to work with CRIU service and helpers
#
import socket
import struct
import os
import subprocess
import rpc_pb2 as cr_rpc
import stats_pb2 as crs
criu_binary = "/root/criu/criu"
req_types = {
cr_rpc.DUMP: "dump",
cr_rpc.PRE_DUMP: "pre_dump",
cr_rpc.PAGE_SERVER: "page_server",
cr_rpc.RESTORE: "restore"
}
def_verb = 2
#
# Connection to CRIU service
#
class criu_conn:
def __init__(self, iteration = 0):
self._iter = iteration
def __enter__(self):
css = socket.socketpair(socket.AF_UNIX, socket.SOCK_SEQPACKET)
self.swrk = subprocess.Popen([criu_binary, "swrk", "%d" % css[0].fileno()])
css[0].close()
self.cs = css[1]
return self
def __exit__(self, type, value, traceback):
self.cs.close()
self.swrk.wait()
def verbose(self, level):
self.verb = level
def send_req(self, req, with_resp = True):
req.opts.log_level = self.verb
req.opts.log_file = "criu_%s.%d.log" % (req_types[req.type], self._iter)
self.cs.send(req.SerializeToString())
self._iter += 1
if with_resp:
return self.recv_resp()
def recv_resp(self):
resp = cr_rpc.criu_resp()
resp.ParseFromString(self.cs.recv(1024))
return resp
def ack_notify(self, success = True):
req = cr_rpc.criu_req()
req.type = cr_rpc.NOTIFY
req.notify_success = True
self.cs.send(req.SerializeToString())
#
# Helper to read CRIU-generated statistics
#
CRIU_STATS_MAGIC = 0x57093306
def criu_get_stats(img, file_name):
s = struct.Struct("I I")
f = open(os.path.join(img.work_dir(), file_name))
#
# Stats file is 4 butes of magic, then 4 bytes with
# stats packet size
#
v = s.unpack(f.read(s.size))
if v[0] != CRIU_STATS_MAGIC:
raise Exception("Magic is %x, expect %x" % (v[0], CRIU_STATS_MAGIC))
stats = crs.stats_entry()
stats.ParseFromString(f.read(v[1]))
return stats
def criu_get_dstats(img):
stats = criu_get_stats(img, "stats-dump")
return stats.dump
def criu_get_rstats(img):
stats = criu_get_stats(img, "stats-restore")
return stats.restore
| lgpl-2.1 | Python |
0ca8edc752a104dcb7faaa6b5c303a1a8fdbc6aa | Mark functions `{get,set}_extension_value` as internal | homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps | byceps/config.py | byceps/config.py | """
byceps.config
~~~~~~~~~~~~~
:Copyright: 2006-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from enum import Enum
from typing import Any, Optional
from flask import current_app, Flask
from .services.site.transfer.models import SiteID
EXTENSION_KEY = 'byceps_config'
KEY_APP_MODE = 'app_mode'
KEY_SITE_ID = 'site_id'
class AppMode(Enum):
admin = object()
base = object()
site = object()
def is_admin(self) -> bool:
return self == AppMode.admin
def is_base(self) -> bool:
return self == AppMode.base
def is_site(self) -> bool:
return self == AppMode.site
class ConfigurationError(Exception):
pass
def init_app(app: Flask) -> None:
app.extensions[EXTENSION_KEY] = {}
app_mode = _determine_app_mode(app)
_set_extension_value(KEY_APP_MODE, app_mode, app)
if app_mode.is_site():
site_id = _determine_site_id(app)
_set_extension_value(KEY_SITE_ID, site_id, app)
def _get_extension_value(key: str, app: Optional[Flask] = None) -> Any:
"""Return the value for the key in this application's own extension
namespace.
It is expected that the value has already been set. An exception is
raised if that is not the case.
"""
if app is None:
app = current_app
extension = app.extensions[EXTENSION_KEY]
return extension[key]
def _set_extension_value(key: str, value: Any, app: Flask) -> None:
"""Set/replace the value for the key in this application's own
extension namespace.
"""
extension = app.extensions[EXTENSION_KEY]
extension[key] = value
# -------------------------------------------------------------------- #
# app mode
def _determine_app_mode(app: Flask) -> AppMode:
value = app.config.get('APP_MODE')
if value is None:
return AppMode.base
try:
return AppMode[value]
except KeyError:
raise ConfigurationError(f'Invalid app mode "{value}" configured.')
def get_app_mode(app: Optional[Flask] = None) -> AppMode:
"""Return the mode the site should run in."""
return _get_extension_value(KEY_APP_MODE, app)
# -------------------------------------------------------------------- #
# site ID
def _determine_site_id(app: Flask) -> SiteID:
site_id = app.config.get('SITE_ID')
if site_id is None:
raise ConfigurationError('No site ID configured.')
return site_id
def get_current_site_id(app: Optional[Flask] = None) -> SiteID:
"""Return the id of the current site."""
return _get_extension_value(KEY_SITE_ID, app)
| """
byceps.config
~~~~~~~~~~~~~
:Copyright: 2006-2022 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from enum import Enum
from typing import Any, Optional
from flask import current_app, Flask
from .services.site.transfer.models import SiteID
EXTENSION_KEY = 'byceps_config'
KEY_APP_MODE = 'app_mode'
KEY_SITE_ID = 'site_id'
class AppMode(Enum):
admin = object()
base = object()
site = object()
def is_admin(self) -> bool:
return self == AppMode.admin
def is_base(self) -> bool:
return self == AppMode.base
def is_site(self) -> bool:
return self == AppMode.site
class ConfigurationError(Exception):
pass
def init_app(app: Flask) -> None:
app.extensions[EXTENSION_KEY] = {}
app_mode = _determine_app_mode(app)
set_extension_value(KEY_APP_MODE, app_mode, app)
if app_mode.is_site():
site_id = _determine_site_id(app)
set_extension_value(KEY_SITE_ID, site_id, app)
def get_extension_value(key: str, app: Optional[Flask] = None) -> Any:
"""Return the value for the key in this application's own extension
namespace.
It is expected that the value has already been set. An exception is
raised if that is not the case.
"""
if app is None:
app = current_app
extension = app.extensions[EXTENSION_KEY]
return extension[key]
def set_extension_value(key: str, value: Any, app: Flask) -> None:
"""Set/replace the value for the key in this application's own
extension namespace.
"""
extension = app.extensions[EXTENSION_KEY]
extension[key] = value
# -------------------------------------------------------------------- #
# app mode
def _determine_app_mode(app: Flask) -> AppMode:
value = app.config.get('APP_MODE')
if value is None:
return AppMode.base
try:
return AppMode[value]
except KeyError:
raise ConfigurationError(f'Invalid app mode "{value}" configured.')
def get_app_mode(app: Optional[Flask] = None) -> AppMode:
"""Return the mode the site should run in."""
return get_extension_value(KEY_APP_MODE, app)
# -------------------------------------------------------------------- #
# site ID
def _determine_site_id(app: Flask) -> SiteID:
site_id = app.config.get('SITE_ID')
if site_id is None:
raise ConfigurationError('No site ID configured.')
return site_id
def get_current_site_id(app: Optional[Flask] = None) -> SiteID:
"""Return the id of the current site."""
return get_extension_value(KEY_SITE_ID, app)
| bsd-3-clause | Python |
9be2bf94043dbf49e5510c34d0bdff98e71b8022 | add read_assembly as main entrance for this module; use redesigned logging class | svm-zhang/AGOUTI | src/agouti_sequence.py | src/agouti_sequence.py | import os
import sys
from lib import agouti_log as agLOG
#def set_module_name(name):
# global moduleName
# moduleName = name
def get_contigs(assemblyFile, agSeqProgress):
# try:
# fCONTIG = open(assemblyFile, 'r')
# except IOError:
# agSeqProgress.logger.error("Error opening contig file: %s" %(assemblyFile), exc_info=True)
# sys.exit()
agSeqProgress.logger.info("[BEGIN] Reading the initial assembly")
seq = ""
contigs = []
seqLens = []
contigDict = {}
contigIndex = 0
with open(assemblyFile, 'r') as fCONTIG:
for line in fCONTIG:
if line.startswith('>'):
if seq != "":
contigDict[contigIndex] = seq
agSeqProgress.logger.debug("%s\t%d" %(contig, len(seq)))
seqLens.append(len(seq))
contigIndex += 1
seq = ""
contig = line.strip()[1:]
contigs.append(contig)
else:
seq += line.strip()
# read one last sequence
agSeqProgress.logger.debug("%s\t%d" %(contig, len(seq)))
contigDict[contigIndex] = seq
seqLens.append(len(seq))
n50 = get_assembly_NXX(seqLens)
agSeqProgress.logger.info("%d sequences parsed" %(len(contigDict)))
agSeqProgress.logger.info("The given assembly N50: %d" %(n50))
agSeqProgress.logger.info("[DONE]")
return contigs, contigDict
def read_assembly(assemblyFile, outDir, prefix):
moduleName = os.path.basename(__file__).split('.')[0].upper()
moduleOutDir = os.path.join(outDir, "agouti_seq")
if not os.path.exists(moduleOutDir):
os.makedirs(moduleOutDir)
progressLogFile = os.path.join(moduleOutDir, "%s.agouti_seq.progressMeter" %(prefix))
agSeqProgress = agLOG.PROGRESS_METER(moduleName)
agSeqProgress.add_file_handler(progressLogFile)
contigs, contigDict = get_contigs(assemblyFile, agSeqProgress)
return contigs, contigDict
def get_assembly_NXX(seqLens, nXX=50):
seqLenSum = sum(seqLens)
nXXThreshold = seqLenSum * (nXX/100.0)
seqLens.sort()
cumuSeqLen = 0
nXXLen = 0
for i in range(len(seqLens)-1, -1, -1):
cumuSeqLen += seqLens[i]
if cumuSeqLen > nXXThreshold:
nXXLen = seqLens[i]
break
return nXXLen
| import os
import sys
import logging
from lib import agouti_log as agLOG
def set_module_name(name):
global moduleName
moduleName = name
def get_contigs(contigFasta, moduleOutDir, prefix, logLevel):
moduleLogFile = os.path.join(moduleOutDir, "%s.agouti_seq.progressMeter" %(prefix))
moduleProgressLogger = agLOG.AGOUTI_LOG(moduleName).create_logger(moduleLogFile)
try:
fCONTIG = open(contigFasta, 'r')
except IOError:
moduleProgressLogger.error("Error opening contig file: %s" %(contigFasta), exc_info=True)
sys.exit()
moduleProgressLogger.info("[BEGIN] Reading the initial assembly")
seq = ""
contigs = []
seqLens = []
contigDict = {}
contigIndex = 0
with open(contigFasta, 'r') as fCONTIG:
for line in fCONTIG:
if line.startswith('>'):
if seq != "":
contigDict[contigIndex] = seq
moduleProgressLogger.debug("%s\t%d" %(contig, len(seq)))
seqLens.append(len(seq))
contigIndex += 1
seq = ""
contig = line.strip()[1:]
contigs.append(contig)
else:
seq += line.strip()
# read one last sequence
moduleProgressLogger.debug("%s\t%d" %(contig, len(seq)))
contigDict[contigIndex] = seq
seqLens.append(len(seq))
n50 = get_assembly_NXX(seqLens)
moduleProgressLogger.info("%d sequences parsed" %(len(contigDict)))
moduleProgressLogger.info("The given assembly N50: %d" %(n50))
moduleProgressLogger.info("[DONE]")
return contigs, contigDict
def get_scaffolds(scaffoldFasta):
moduleProgressLogger = agLOG.AGOUTI_LOG(moduleName, logLevel, None).create_logger()
moduleProgressLogger.info("Processing scaffolds ... ")
moduleProgressLogger.info("TO BE CONTINUED\n")
def get_assembly_NXX(seqLens, nXX=50):
seqLenSum = sum(seqLens)
nXXThreshold = seqLenSum * (nXX/100.0)
seqLens.sort()
cumuSeqLen = 0
nXXLen = 0
for i in range(len(seqLens)-1, -1, -1):
cumuSeqLen += seqLens[i]
if cumuSeqLen > nXXThreshold:
nXXLen = seqLens[i]
break
return nXXLen
| mit | Python |
339415048005a9eba957357a02459a977a2e3007 | Update bazel deps to hopefully get CI happy again. | google/google-toolbox-for-mac,thomasvl/google-toolbox-for-mac,google/google-toolbox-for-mac,thomasvl/google-toolbox-for-mac | bazel_support/repositories.bzl | bazel_support/repositories.bzl | """Definitions for handling Bazel repositories for GoogleToolboxForMac. """
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def _maybe(repo_rule, name, **kwargs):
"""Executes the given repository rule if it hasn't been executed already.
Args:
repo_rule: The repository rule to be executed (e.g., `http_archive`.)
name: The name of the repository to be defined by the rule.
**kwargs: Additional arguments passed directly to the repository rule.
"""
if not native.existing_rule(name):
repo_rule(name = name, **kwargs)
def google_toolbox_for_mac_rules_dependencies():
"""Fetches repositories that are dependencies of GoogleToolboxForMac.
Users should call this macro in their `WORKSPACE` to ensure that all of the
dependencies are downloaded and that they are isolated
from changes to those dependencies.
"""
_maybe(
http_archive,
name = "rules_cc",
urls = ["https://github.com/bazelbuild/rules_cc/releases/download/0.0.1/rules_cc-0.0.1.tar.gz"],
sha256 = "4dccbfd22c0def164c8f47458bd50e0c7148f3d92002cdb459c2a96a68498241",
)
_maybe(
http_archive,
name = "build_bazel_rules_apple",
sha256 = "a5f00fd89eff67291f6cd3efdc8fad30f4727e6ebb90718f3f05bbf3c3dd5ed7",
url = "https://github.com/bazelbuild/rules_apple/releases/download/0.33.0/rules_apple.0.33.0.tar.gz",
)
| """Definitions for handling Bazel repositories for GoogleToolboxForMac. """
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def _maybe(repo_rule, name, **kwargs):
"""Executes the given repository rule if it hasn't been executed already.
Args:
repo_rule: The repository rule to be executed (e.g., `http_archive`.)
name: The name of the repository to be defined by the rule.
**kwargs: Additional arguments passed directly to the repository rule.
"""
if not native.existing_rule(name):
repo_rule(name = name, **kwargs)
def google_toolbox_for_mac_rules_dependencies():
"""Fetches repositories that are dependencies of GoogleToolboxForMac.
Users should call this macro in their `WORKSPACE` to ensure that all of the
dependencies are downloaded and that they are isolated
from changes to those dependencies.
"""
_maybe(
http_archive,
name = "rules_cc",
# Latest 08-10-20
urls = ["https://github.com/bazelbuild/rules_cc/archive/1477dbab59b401daa94acedbeaefe79bf9112167.tar.gz"],
sha256 = "b87996d308549fc3933f57a786004ef65b44b83fd63f1b0303a4bbc3fd26bbaf",
strip_prefix = "rules_cc-1477dbab59b401daa94acedbeaefe79bf9112167/",
)
_maybe(
http_archive,
name = "build_bazel_rules_apple",
# Latest 2-11-21
urls = ["https://github.com/bazelbuild/rules_apple/archive/c909dd759627f40e0fbd17112ba5e7b753755906.tar.gz"],
strip_prefix = "rules_apple-c909dd759627f40e0fbd17112ba5e7b753755906/",
)
| apache-2.0 | Python |
835cc8cbc939b582eea0357f11045633df531779 | Use ipyparallel instead IPython.parallel | alexandrucoman/bcbio-nextgen-vm,alexandrucoman/bcbio-nextgen-vm | bcbiovm/docker/ipythontasks.py | bcbiovm/docker/ipythontasks.py | """IPython interface to run bcbio distributed functions inside
a docker container.
Exports processing of a specific function and arguments within docker using
bcbio_nextgen.py runfn.
"""
from ipyparallel import require
from bcbio.distributed import ipython
from bcbio.distributed.ipythontasks import _setup_logging
from bcbiovm.docker import run
@require(run)
def runfn(*args):
args = ipython.unzip_args(args)
assert len(args) == 1
fn_args = args[0][4:]
with _setup_logging(fn_args):
fn_name = args[0][0]
dockerconf = args[0][1]
cmd_args = args[0][2]
parallel = args[0][3]
return ipython.zip_args(run.do_runfn(fn_name, fn_args, cmd_args,
parallel, dockerconf))
| """IPython interface to run bcbio distributed functions inside
a docker container.
Exports processing of a specific function and arguments within docker using
bcbio_nextgen.py runfn.
"""
from IPython.parallel import require
from bcbio.distributed import ipython
from bcbio.distributed.ipythontasks import _setup_logging
from bcbiovm.docker import run
@require(run)
def runfn(*args):
args = ipython.unzip_args(args)
assert len(args) == 1
fn_args = args[0][4:]
with _setup_logging(fn_args):
fn_name = args[0][0]
dockerconf = args[0][1]
cmd_args = args[0][2]
parallel = args[0][3]
return ipython.zip_args(run.do_runfn(fn_name, fn_args, cmd_args,
parallel, dockerconf))
| mit | Python |
57db2a901afe8a5b0c80c9dab7d2370c18e656a2 | Expand AUTHORS | Eficent/manufacture,Endika/manufacture,credativUK/manufacture,raycarnes/manufacture | mrp_bom_note/__openerp__.py | mrp_bom_note/__openerp__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "MRP BoM Notes",
"version": "1.0",
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"website": "http://www.odoomrp.com",
"contributors": [
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <ajuaristio@gmail.com>"
],
"category": "Tools",
"depends": [
"mrp",
],
"data": [
"views/mrp_bom_view.xml",
],
"installable": True
}
| # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "MRP BoM Notes",
"version": "1.0",
"author": "OdooMRP team",
"website": "http://www.odoomrp.com",
"contributors": [
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <ajuaristio@gmail.com>"
],
"category": "Tools",
"depends": [
"mrp",
],
"data": [
"views/mrp_bom_view.xml",
],
"installable": True
}
| agpl-3.0 | Python |
d6866c6bc7c0c6851912f299d0160080d162c4b3 | Add a lot more resiliancy/output to better_webbrowser.py. We now use webbrowser, but register a WindowsHttpDefault class | protron/namebench,rogers0/namebench,google/namebench,google/namebench,google/namebench | libnamebench/better_webbrowser.py | libnamebench/better_webbrowser.py | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for webbrowser library, to invoke the http handler on win32."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import os.path
import subprocess
import sys
import traceback
import webbrowser
def create_win32_http_cmd(url):
"""Create a command-line tuple to launch a web browser for a given URL.
At the moment, this ignores all default arguments to the browser.
TODO(tstromberg): Properly parse the command-line arguments.
"""
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
'Software\Classes\http\shell\open\command')
cmd = _winreg.EnumValue(key, 0)[1]
# "C:\blah blah\iexplore.exe" -nohome
# "C:\blah blah\firefox.exe" -requestPending -osint -url "%1"
if '"' in cmd:
executable = cmd.split('"')[1]
else:
executable = cmd.split(' ')[0]
if not os.path.exists(executable):
print "Default HTTP browser does not exist: %s" % executable
return False
else:
print "HTTP handler: %s" % executable
return (executable, url)
if sys.platform[:3] == 'win':
import _winreg
class WindowsHttpDefault(webbrowser.BaseBrowser):
def open(self, url, new=0, autoraise=1):
command_args = create_win32_http_cmd(url)
if not command_args:
return False
print command_args
try:
browser = subprocess.Popen(command_args)
except:
traceback.print_exc()
print "* Failed to run HTTP handler, trying next browser."
return False
webbrowser.register("windows-http", WindowsHttpDefault, update_tryorder=-1)
def open(url):
if hasattr(webbrowser, '_tryorder'):
print "Browsers: %s" % webbrowser._tryorder
print "Opening: %s" % url
webbrowser.open(url)
| #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for webbrowser library, to invoke the http handler on win32."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import os.path
import subprocess
import sys
import webbrowser
use_win32 = False
if sys.platform == "win32":
try:
import _winreg
use_win32 = True
except ImportError:
pass
def open(url):
"""Open a URL in the users web browser."""
if not use_win32:
return webbrowser.open(url)
else:
return win32_open(url)
def win32_open(url):
"""Open a URL with the program handler for the http protocol on win32."""
command_args = create_win32_http_cmd(url)
browser = subprocess.Popen(command_args)
def get_win32_http_handler():
"""Given a url, return the appropriate win32 command and arguments."""
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
'Software\Classes\http\shell\open\command')
return _winreg.EnumValue(key, 0)[1]
def create_win32_http_cmd(url):
"""Create a command-line tuple to launch a web browser for a given URL.
At the moment, this ignores all default arguments to the browser.
TODO(tstromberg): Properly parse the command-line arguments.
"""
cmd = get_win32_http_handler()
# "C:\blah blah\iexplore.exe" -nohome
# "C:\blah blah\firefox.exe" -requestPending -osint -url "%1"
if '"' in cmd:
executable = cmd.split('"')[1]
else:
executable = cmd.split(' ')[0]
return (executable, url)
| apache-2.0 | Python |
45d21d4a6c8dd27e904b154366c96069072ecacd | Add register_as() to simplify replacing formatter implementations (if necessary). | jenisys/behave,vrutkovs/behave,Abdoctor/behave,kymbert/behave,metaperl/behave,Gimpneek/behave,KevinOrtman/behave,allanlewis/behave,mzcity123/behave,vrutkovs/behave,charleswhchan/behave,spacediver/behave,kymbert/behave,Gimpneek/behave,joshal/behave,Abdoctor/behave,charleswhchan/behave,connorsml/behave,jenisys/behave,metaperl/behave,mzcity123/behave,connorsml/behave,Gimpneek/behave,hugeinc/behave-parallel,allanlewis/behave,KevinOrtman/behave,benthomasson/behave,joshal/behave,spacediver/behave,benthomasson/behave | behave/formatter/formatters.py | behave/formatter/formatters.py | # -*- coding: utf-8 -*-
import sys
import codecs
# -----------------------------------------------------------------------------
# FORMATTER REGISTRY:
# -----------------------------------------------------------------------------
formatters = {}
def register_as(formatter_class, name):
"""
Register formatter class with given name.
:param formatter_class: Formatter class to register.
:param name: Name for this formatter (as identifier).
"""
formatters[name] = formatter_class
def register(formatter_class):
register_as(formatter_class, formatter_class.name)
def list_formatters(stream):
for name in sorted(formatters):
stream.write(u'%s: %s\n' % (name, formatters[name].description))
def get_formatter(config, streams):
# -- ONLY ONCE (issue #159):
# the stream may already handle encoding (py3k sys.stdout)
# if it doesn't (py2k sys.stdout) then make it do so.
default_encoding = 'UTF-8'
for i, stream in enumerate(streams):
if hasattr(stream, 'stream'):
continue # Already wrapped with a codecs.StreamWriter
if sys.version_info[0] < 3:
# py2 does, however, sometimes declare an encoding on sys.stdout,
# even if it doesn't use it (or it might be explicitly None)
encoding = getattr(stream, 'encoding', None) or default_encoding
streams[i] = codecs.getwriter(encoding)(stream)
elif not getattr(stream, 'encoding', None):
# ok, so the stream doesn't have an encoding at all so add one
streams[i] = codecs.getwriter(default_encoding)(stream)
# -- BUILD: Formatter list
default_stream = sys.stdout
formatter_list = []
for i, name in enumerate(config.format):
stream = default_stream
if i < len(streams):
stream = streams[i]
formatter_list.append(formatters[name](stream, config))
return formatter_list
# -----------------------------------------------------------------------------
# REGISTER KNOWN FORMATTER:
# -----------------------------------------------------------------------------
from behave.formatter import plain
register(plain.PlainFormatter)
from behave.formatter import pretty
register(pretty.PrettyFormatter)
from behave.formatter import json
register(json.JSONFormatter)
register(json.PrettyJSONFormatter)
from behave.formatter import null
register(null.NullFormatter)
from behave.formatter import progress
register(progress.ScenarioProgressFormatter)
register(progress.StepProgressFormatter)
| # -*- coding: utf-8 -*-
import sys
import codecs
# -----------------------------------------------------------------------------
# FORMATTER REGISTRY:
# -----------------------------------------------------------------------------
formatters = {}
def register(formatter):
formatters[formatter.name] = formatter
def list_formatters(stream):
for name in sorted(formatters):
stream.write(u'%s: %s\n' % (name, formatters[name].description))
def get_formatter(config, streams):
# -- ONLY ONCE (issue #159):
# the stream may already handle encoding (py3k sys.stdout)
# if it doesn't (py2k sys.stdout) then make it do so.
default_encoding = 'UTF-8'
for i, stream in enumerate(streams):
if hasattr(stream, 'stream'):
continue # Already wrapped with a codecs.StreamWriter
if sys.version_info[0] < 3:
# py2 does, however, sometimes declare an encoding on sys.stdout,
# even if it doesn't use it (or it might be explicitly None)
encoding = getattr(stream, 'encoding', None) or default_encoding
streams[i] = codecs.getwriter(encoding)(stream)
elif not getattr(stream, 'encoding', None):
# ok, so the stream doesn't have an encoding at all so add one
streams[i] = codecs.getwriter(default_encoding)(stream)
# -- BUILD: Formatter list
default_stream = sys.stdout
formatter_list = []
for i, name in enumerate(config.format):
stream = default_stream
if i < len(streams):
stream = streams[i]
formatter_list.append(formatters[name](stream, config))
return formatter_list
# -----------------------------------------------------------------------------
# REGISTER KNOWN FORMATTERS:
# -----------------------------------------------------------------------------
from behave.formatter import plain
register(plain.PlainFormatter)
from behave.formatter import pretty
register(pretty.PrettyFormatter)
from behave.formatter import json
register(json.JSONFormatter)
register(json.PrettyJSONFormatter)
from behave.formatter import null
register(null.NullFormatter)
from behave.formatter import progress
register(progress.ScenarioProgressFormatter)
register(progress.StepProgressFormatter)
| bsd-2-clause | Python |
3710295bd34c23c8a2fc1f37c3523c49cf22ecf9 | Bump the version. | c4fcm/MediaCloud-API-Client | mediacloud/__init__.py | mediacloud/__init__.py |
from api import MediaCloud
from storage import *
__version__ = '2.23.0'
|
from api import MediaCloud
from storage import *
__version__ = '2.22.2'
| mit | Python |
98d6b11c05dad95bdece060094baa81c4eabcfe6 | Update enums.py | sammchardy/python-binance | binance/enums.py | binance/enums.py | # coding=utf-8
SYMBOL_TYPE_SPOT = 'SPOT'
ORDER_STATUS_NEW = 'NEW'
ORDER_STATUS_PARTIALLY_FILLED = 'PARTIALLY_FILLED'
ORDER_STATUS_FILLED = 'FILLED'
ORDER_STATUS_CANCELED = 'CANCELED'
ORDER_STATUS_PENDING_CANCEL = 'PENDING_CANCEL'
ORDER_STATUS_REJECTED = 'REJECTED'
ORDER_STATUS_EXPIRED = 'EXPIRED'
KLINE_INTERVAL_1MINUTE = '1m'
KLINE_INTERVAL_3MINUTE = '3m'
KLINE_INTERVAL_5MINUTE = '5m'
KLINE_INTERVAL_15MINUTE = '15m'
KLINE_INTERVAL_30MINUTE = '30m'
KLINE_INTERVAL_1HOUR = '1h'
KLINE_INTERVAL_2HOUR = '2h'
KLINE_INTERVAL_4HOUR = '4h'
KLINE_INTERVAL_6HOUR = '6h'
KLINE_INTERVAL_8HOUR = '8h'
KLINE_INTERVAL_12HOUR = '12h'
KLINE_INTERVAL_1DAY = '1d'
KLINE_INTERVAL_3DAY = '3d'
KLINE_INTERVAL_1WEEK = '1w'
KLINE_INTERVAL_1MONTH = '1M'
SIDE_BUY = 'BUY'
SIDE_SELL = 'SELL'
ORDER_TYPE_LIMIT = 'LIMIT'
ORDER_TYPE_MARKET = 'MARKET'
ORDER_TYPE_STOP = 'STOP'
ORDER_TYPE_STOP_MARKET = 'STOP_MARKET'
ORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT'
ORDER_TYPE_TAKE_PROFIT_MARKET = 'TAKE_PROFIT_MARKET'
ORDER_TYPE_LIMIT_MAKER = 'LIMIT_MAKER'
TIME_IN_FORCE_GTC = 'GTC' # Good till cancelled
TIME_IN_FORCE_IOC = 'IOC' # Immediate or cancel
TIME_IN_FORCE_FOK = 'FOK' # Fill or kill
ORDER_RESP_TYPE_ACK = 'ACK'
ORDER_RESP_TYPE_RESULT = 'RESULT'
ORDER_RESP_TYPE_FULL = 'FULL'
WEBSOCKET_DEPTH_5 = '5'
WEBSOCKET_DEPTH_10 = '10'
WEBSOCKET_DEPTH_20 = '20'
| # coding=utf-8
SYMBOL_TYPE_SPOT = 'SPOT'
ORDER_STATUS_NEW = 'NEW'
ORDER_STATUS_PARTIALLY_FILLED = 'PARTIALLY_FILLED'
ORDER_STATUS_FILLED = 'FILLED'
ORDER_STATUS_CANCELED = 'CANCELED'
ORDER_STATUS_PENDING_CANCEL = 'PENDING_CANCEL'
ORDER_STATUS_REJECTED = 'REJECTED'
ORDER_STATUS_EXPIRED = 'EXPIRED'
KLINE_INTERVAL_1MINUTE = '1m'
KLINE_INTERVAL_3MINUTE = '3m'
KLINE_INTERVAL_5MINUTE = '5m'
KLINE_INTERVAL_15MINUTE = '15m'
KLINE_INTERVAL_30MINUTE = '30m'
KLINE_INTERVAL_1HOUR = '1h'
KLINE_INTERVAL_2HOUR = '2h'
KLINE_INTERVAL_4HOUR = '4h'
KLINE_INTERVAL_6HOUR = '6h'
KLINE_INTERVAL_8HOUR = '8h'
KLINE_INTERVAL_12HOUR = '12h'
KLINE_INTERVAL_1DAY = '1d'
KLINE_INTERVAL_3DAY = '3d'
KLINE_INTERVAL_1WEEK = '1w'
KLINE_INTERVAL_1MONTH = '1M'
SIDE_BUY = 'BUY'
SIDE_SELL = 'SELL'
ORDER_TYPE_LIMIT = 'LIMIT'
ORDER_TYPE_MARKET = 'MARKET'
ORDER_TYPE_STOP_LOSS = 'STOP_LOSS'
ORDER_TYPE_STOP_LOSS_LIMIT = 'STOP_LOSS_LIMIT'
ORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT'
ORDER_TYPE_TAKE_PROFIT_LIMIT = 'TAKE_PROFIT_LIMIT'
ORDER_TYPE_LIMIT_MAKER = 'LIMIT_MAKER'
TIME_IN_FORCE_GTC = 'GTC' # Good till cancelled
TIME_IN_FORCE_IOC = 'IOC' # Immediate or cancel
TIME_IN_FORCE_FOK = 'FOK' # Fill or kill
ORDER_RESP_TYPE_ACK = 'ACK'
ORDER_RESP_TYPE_RESULT = 'RESULT'
ORDER_RESP_TYPE_FULL = 'FULL'
WEBSOCKET_DEPTH_5 = '5'
WEBSOCKET_DEPTH_10 = '10'
WEBSOCKET_DEPTH_20 = '20'
| mit | Python |
3b271300b5921753691a65236652b5c46060e882 | Fix issue 36 | pombreda/django-page-cms,pombreda/django-page-cms,Alwnikrotikz/django-page-cms,Alwnikrotikz/django-page-cms,google-code-export/django-page-cms,PiRSquared17/django-page-cms,odyaka341/django-page-cms,Alwnikrotikz/django-page-cms,PiRSquared17/django-page-cms,odyaka341/django-page-cms,google-code-export/django-page-cms,google-code-export/django-page-cms,google-code-export/django-page-cms,PiRSquared17/django-page-cms,odyaka341/django-page-cms,PiRSquared17/django-page-cms,pombreda/django-page-cms,pombreda/django-page-cms,odyaka341/django-page-cms,Alwnikrotikz/django-page-cms | pages/views.py | pages/views.py | from django.http import Http404
from django.shortcuts import get_object_or_404
from django.contrib.sites.models import SITE_CACHE
from pages import settings
from pages.models import Page, Content
from pages.utils import auto_render, get_language_from_request
def details(request, page_id=None, slug=None,
template_name=settings.DEFAULT_PAGE_TEMPLATE):
lang = get_language_from_request(request)
site = request.site
pages = Page.objects.navigation(site).order_by("tree_id")
if pages:
if page_id:
current_page = get_object_or_404(
Page.objects.published(site), pk=page_id)
elif slug:
slug_content = Content.objects.get_page_slug(slug, site)
if slug_content and \
slug_content.page.calculated_status in (
Page.PUBLISHED, Page.HIDDEN):
current_page = slug_content.page
else:
raise Http404
else:
current_page = pages[0]
template_name = current_page.get_template()
else:
raise Http404
return template_name, locals()
details = auto_render(details)
| from django.http import Http404
from django.shortcuts import get_object_or_404
from django.contrib.sites.models import SITE_CACHE
from pages import settings
from pages.models import Page, Content
from pages.utils import auto_render, get_language_from_request
def details(request, page_id=None, slug=None,
template_name=settings.DEFAULT_PAGE_TEMPLATE):
lang = get_language_from_request(request)
site = request.site
pages = Page.objects.navigation(site).order_by("tree_id")
if pages:
if page_id:
current_page = get_object_or_404(
Page.objects.published(site), pk=page_id)
elif slug:
slug_content = Content.objects.get_page_slug(slug, site)
if slug_content and \
slug_content.page.calculated_status in (
Page.PUBLISHED, Page.HIDDEN):
current_page = slug_content.page
else:
raise Http404
else:
current_page = pages[0]
template_name = current_page.get_template()
else:
current_page = None
return template_name, locals()
details = auto_render(details)
| bsd-3-clause | Python |
184d6984b9c1a21dea35500edb56de2f94d66371 | Split up logout and authorization checks. | bueda/django-comrade | comrade/test/base.py | comrade/test/base.py | from django import test
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.cache import cache
from nose.tools import eq_, ok_
import json
import mockito
class BaseTest(test.TestCase):
fixtures = ['dev']
def setUp(self):
super(BaseTest, self).setUp()
self.login()
def tearDown(self):
super(BaseTest, self).tearDown()
mockito.unstub()
cache.clear()
def login(self, password=None):
return self.client.login(username=(
getattr(self, 'user', None) and self.user.username) or 'test',
password=password or 'test')
def _test_unauthenticated(self, method, url, next_url=None, allowed=False):
self.client.logout()
self._test_unauthorized(method, url, next_url, allowed)
def _test_unauthorized(self, method, url, next_url=None, allowed=False):
response = method(url)
if not allowed:
self.assertRedirects(response,
(next_url or reverse('account:login')) +
'?' + REDIRECT_FIELD_NAME + '=' + url)
else:
eq_(response.status_code, 200)
def _test_method_not_allowed(self, method, url):
response = method(url)
eq_(response.status_code, 405)
def assertJsonContains(self, response, key, value=None, status_code=200,
msg_prefix=''):
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve page: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
data = json.loads(response.content)
assert key in data
if value:
eq_(data[key], value)
class BaseModelTest(BaseTest):
def setUp(self, instance=None):
super(BaseModelTest, self).setUp()
self.instance = instance
def test_unicode(self):
if self.instance:
ok_(isinstance(self.instance.__unicode__(), unicode))
| from django import test
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.cache import cache
from nose.tools import eq_, ok_
import json
import mockito
class BaseTest(test.TestCase):
fixtures = ['dev']
def setUp(self):
super(BaseTest, self).setUp()
self.login()
def tearDown(self):
super(BaseTest, self).tearDown()
mockito.unstub()
cache.clear()
def login(self, password=None):
return self.client.login(username=(
getattr(self, 'user', None) and self.user.username) or 'test',
password=password or 'test')
def _test_unauthenticated(self, method, url, next_url=None, allowed=False):
self.client.logout()
response = method(url)
if not allowed:
self.assertRedirects(response,
(next_url or reverse('account:login')) +
'?' + REDIRECT_FIELD_NAME + '=' + url)
else:
eq_(response.status_code, 200)
def _test_method_not_allowed(self, method, url):
response = method(url)
eq_(response.status_code, 405)
def assertJsonContains(self, response, key, value=None, status_code=200,
msg_prefix=''):
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve page: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
data = json.loads(response.content)
assert key in data
if value:
eq_(data[key], value)
class BaseModelTest(BaseTest):
def setUp(self, instance=None):
super(BaseModelTest, self).setUp()
self.instance = instance
def test_unicode(self):
if self.instance:
ok_(isinstance(self.instance.__unicode__(), unicode))
| mit | Python |
9baa355e1224115a707c0dc507372b20d76cc322 | Clear cache after each test run. | bueda/django-comrade | comrade/test/base.py | comrade/test/base.py | from django import test
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.cache import cache
from nose.tools import eq_, ok_
import json
import mockito
class BaseTest(test.TestCase):
fixtures = ['dev']
def setUp(self):
super(BaseTest, self).setUp()
self.user = User.objects.get(username='test')
self.login()
def tearDown(self):
super(BaseTest, self).tearDown()
mockito.unstub()
cache.clear()
def login(self):
return self.client.login(username='test', password='test')
def _test_unauthenticated(self, method, url, allowed=False):
self.client.logout()
response = method(url)
if not allowed:
self.assertRedirects(response, reverse('account:login') +
'?' + REDIRECT_FIELD_NAME + '=' + url)
else:
eq_(response.status_code, 200)
def _test_method_not_allowed(self, method, url):
response = method(url)
eq_(response.status_code, 405)
def assertJsonContains(self, response, key, value, status_code=200,
msg_prefix=''):
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve page: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
json = json.loads(response.content)
assert key in json
eq_(json[key], value)
class BaseModelTest(BaseTest):
def check_unicode(self):
ok_(isinstance(self.instance.__unicode__(), unicode))
| from django import test
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import REDIRECT_FIELD_NAME
from nose.tools import eq_, ok_
import json
import mockito
class BaseTest(test.TestCase):
fixtures = ['dev']
def setUp(self):
super(BaseTest, self).setUp()
self.user = User.objects.get(username='test')
self.login()
def tearDown(self):
super(BaseTest, self).tearDown()
mockito.unstub()
def login(self):
return self.client.login(username='test', password='test')
def _test_unauthenticated(self, method, url, allowed=False):
self.client.logout()
response = method(url)
if not allowed:
self.assertRedirects(response, reverse('account:login') +
'?' + REDIRECT_FIELD_NAME + '=' + url)
else:
eq_(response.status_code, 200)
def _test_method_not_allowed(self, method, url):
response = method(url)
eq_(response.status_code, 405)
def assertJsonContains(self, response, key, value, status_code=200,
msg_prefix=''):
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve page: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
json = json.loads(response.content)
assert key in json
eq_(json[key], value)
class BaseModelTest(BaseTest):
def check_unicode(self):
ok_(isinstance(self.instance.__unicode__(), unicode))
| mit | Python |
154878c635d1fbb820a3a3ff89b8711b6cc0a6c5 | Bring 1st cut of the CLI | bendtherules/godaddycli,bendtherules/godaddycli | godaddycli/godaddycli/godaddycli.py | godaddycli/godaddycli/godaddycli.py | #!/usr/bin/env python
# Copyright 2015 by Wojciech A. Koszek <wojciech@koszek.com>
# -*- coding: utf-8 -*-
import os
import sys
import json
import argparse
import getpass
from pygodaddy import GoDaddyClient
g_dns_record_types = [ "A", "CNAME", "MX", "TXT", "SRV", "NS", "AAAA" ]
g_debug = False
def dbg(s):
if g_debug != True:
return
print "# debug: " + str(s)
def parse_args(args):
parser = argparse.ArgumentParser(description="GoDaddy.com CLI")
parser.add_argument("--user")
parser.add_argument("--password")
parser.add_argument("--debug", action="store_true", default=False)
args = parser.parse_args(args)
return args
def godaddycli(username, password):
client = GoDaddyClient()
c = client.login(username, password)
if not c:
print "couldn't login"
sys.exit(1)
for domain_name in client.find_domains():
for record_type in g_dns_record_types:
domain_data_all = client.find_dns_records(domain_name, record_type)
for domain_data in domain_data_all:
print domain_name, record_type, domain_data.hostname, domain_data.value
def doit(cfg):
global g_debug
g_debug = cfg.debug
home_dir = os.environ["HOME"]
user = password = cfg_data = None
cfg_filename = home_dir + "/.godaddyclirc"
if os.path.isfile(cfg_filename):
with open(cfg_filename, "r") as f:
cfg_data = json.load(f)
f.close()
valid_fields_count = 0
if "user" in cfg_data.keys():
user = cfg_data["user"]
if "password" in cfg_data.keys():
password = cfg_data["password"]
maybe_save = False
if cfg.user:
user = cfg.user
if user is None:
sys.stdout.write("Enter GoDaddy user : ")
user = sys.stdin.readline().strip("\n")
maybe_save = True
if cfg.password:
password = cfg.password
if password is None:
password = getpass.getpass("Enter GoDaddy password: ")
maybe_save = True
dbg("user: " + user)
dbg("pass: " + password)
dbg("home: " + home_dir)
will_save = False
if maybe_save:
sys.stdout.write("Do you want to save your password in " +
cfg_filename + "? Enter 'yes' or 'no': ")
while True:
yes_or_no = sys.stdin.readline().strip("\n")
if yes_or_no != "yes" and yes_or_no != "no":
print "Only 'yes' or 'no' supported"
continue
if yes_or_no == "yes":
will_save = True
break
if will_save:
data_to_save = {
"user" : user,
"password" : password
};
with open(cfg_filename, "w") as f:
js = json.dump(data_to_save, f)
f.close()
godaddycli(user, password)
def main():
cfg = parse_args(sys.argv[1:])
doit(cfg)
if __name__ == "__main__":
sys.exit(main())
| # -*- coding: utf-8 -*-
| bsd-2-clause | Python |
91f995c87463c82badc79d2515a769b2dd178a6d | Fix get_members.py to use correct environment variables | tiegz/ThreatExchange,wxsBSD/ThreatExchange,theCatWisel/ThreatExchange,tiegz/ThreatExchange,wxsBSD/ThreatExchange,tiegz/ThreatExchange,tiegz/ThreatExchange,wxsBSD/ThreatExchange,theCatWisel/ThreatExchange,wxsBSD/ThreatExchange,theCatWisel/ThreatExchange,theCatWisel/ThreatExchange,RyPeck/ThreatExchange,wxsBSD/ThreatExchange,RyPeck/ThreatExchange,mgoffin/ThreatExchange,RyPeck/ThreatExchange,RyPeck/ThreatExchange,arirubinstein/ThreatExchange,tiegz/ThreatExchange,theCatWisel/ThreatExchange,mgoffin/ThreatExchange,RyPeck/ThreatExchange,wxsBSD/ThreatExchange,tiegz/ThreatExchange,mgoffin/ThreatExchange,mgoffin/ThreatExchange,RyPeck/ThreatExchange,mgoffin/ThreatExchange,theCatWisel/ThreatExchange,mgoffin/ThreatExchange | members/get_members.py | members/get_members.py | #!/usr/local/bin/python
'''
Search for and retrieve threat indicators from ThreatExchange
'''
from __future__ import print_function
import json
import os
import re
from urllib import urlencode
from urllib2 import urlopen
FB_APP_ID = os.environ['TX_APP_ID']
FB_ACCESS_TOKEN = os.environ['TX_APP_SECRET']
SERVER = 'https://graph.facebook.com/'
def clean_url(url):
'''
Removes the access token from the URL to display onscreen
'''
return re.sub(
'access_token\=[A-Za-z0-9\%\|]+',
'access_token=xxx|xxxx',
url
)
def get_query():
'''
Builds a query string based on the specified options
'''
fields = ({
'access_token': FB_APP_ID + '|' + FB_ACCESS_TOKEN,
})
return SERVER + 'threat_exchange_members?' + urlencode(fields)
def process_results(data):
'''
Process the threat indicators received from the server.
'''
for row in data:
if 'email'in row:
email = row['email']
else:
email = ''
print ('"' + row['name'] + '","' + email + '","' + row['id'] + '"')
def run_query(url):
try:
response = urlopen(url).read()
except Exception as e:
lines = str(e.info()).split('\r\n')
msg = str(e)
for line in lines:
# Hacky way to get the exact error from the server
result = re.search('^WWW-Authenticate: .*\) (.*)\"$', line)
if result:
msg = result.groups()[0]
print ('ERROR: %s\n' % (msg))
return True
try:
data = json.loads(response)
if 'data' in data:
process_results(data['data'])
return False
except Exception as e:
print (str(e))
return True
if __name__ == '__main__':
run_query(get_query())
| #!/usr/local/bin/python
'''
Search for and retrieve threat indicators from ThreatExchange
'''
from __future__ import print_function
import json
import os
import re
from urllib import urlencode
from urllib2 import urlopen
FB_APP_ID = os.environ['FB_THREATEXCHANGE_APP_ID']
FB_ACCESS_TOKEN = os.environ['FB_THREATEXCHANGE_APP_SECRET']
SERVER = 'https://graph.facebook.com/'
def clean_url(url):
'''
Removes the access token from the URL to display onscreen
'''
return re.sub(
'access_token\=[A-Za-z0-9\%\|]+',
'access_token=xxx|xxxx',
url
)
def get_query():
'''
Builds a query string based on the specified options
'''
fields = ({
'access_token': FB_APP_ID + '|' + FB_ACCESS_TOKEN,
})
return SERVER + 'threat_exchange_members?' + urlencode(fields)
def process_results(data):
'''
Process the threat indicators received from the server.
'''
for row in data:
if 'email'in row:
email = row['email']
else:
email = ''
print ('"' + row['name'] + '","' + email + '","' + row['id'] + '"')
def run_query(url):
try:
response = urlopen(url).read()
except Exception as e:
lines = str(e.info()).split('\r\n')
msg = str(e)
for line in lines:
# Hacky way to get the exact error from the server
result = re.search('^WWW-Authenticate: .*\) (.*)\"$', line)
if result:
msg = result.groups()[0]
print ('ERROR: %s\n' % (msg))
return True
try:
data = json.loads(response)
if 'data' in data:
process_results(data['data'])
return False
except Exception as e:
print (str(e))
return True
if __name__ == '__main__':
run_query(get_query())
| bsd-3-clause | Python |
6e526a173de970f2cc8f7cd62823a257786e348e | Add the missing category-detail urlconf to not to break bookmarked users | PARINetwork/pari,PARINetwork/pari,PARINetwork/pari,PARINetwork/pari | category/urls.py | category/urls.py | from django.conf.urls import patterns, url
from .views import CategoriesList, GalleryDetail, StoryDetail
urlpatterns = patterns('category.views',
url(r'^categories/$', CategoriesList.as_view(), name='category-list'),
url(r'^categories/(?P<slug>.+)/$', StoryDetail.as_view(), name='category-detail'),
url(r'^stories/categories/(?P<slug>.+)/$', StoryDetail.as_view(), name='story-detail'),
url(r'^gallery/categories/(?P<slug>.+)/$', GalleryDetail.as_view(), name='gallery-detail'),
url(r'^gallery/$', 'gallery_home_page', name='gallery-home-page')
)
| from django.conf.urls import patterns, url
from .views import CategoriesList, GalleryDetail, StoryDetail
urlpatterns = patterns('category.views',
url(r'^gallery/categories/(?P<slug>.+)/$', GalleryDetail.as_view(), name='gallery-detail'),
url(r'^stories/categories/(?P<slug>.+)/$', StoryDetail.as_view(), name='story-detail'),
url(r'^categories/$', CategoriesList.as_view(), name='category-list'),
url(r'^gallery/$', 'gallery_home_page', name='gallery-home-page')
)
| bsd-3-clause | Python |
f41a4e00ac2def76d1ea6f149d08f16be6760b71 | bump version to 0.1.2 | dcoker/cfpp,dcoker/cfpp | cfpp/_version.py | cfpp/_version.py | """0.1.2"""
VERSION = __doc__
| """0.0.1"""
VERSION = __doc__
| apache-2.0 | Python |
4c97b40ff15b3eb41a30d43518a8d922f7e30581 | Update compat.py | mocketize/python-mocket,mindflayer/python-mocket | mocket/compat.py | mocket/compat.py | import sys
import shlex
import six
PY2 = sys.version_info[0] == 2
if PY2:
from BaseHTTPServer import BaseHTTPRequestHandler
from urlparse import urlsplit, parse_qs
else:
from http.server import BaseHTTPRequestHandler
from urllib.parse import urlsplit, parse_qs
text_type = six.text_type
byte_type = six.binary_type
basestring = six.string_types
encoding = 'utf-8'
def encode_utf8(s):
if isinstance(s, text_type):
s = s.encode(encoding)
return byte_type(s)
def decode_utf8(s):
if isinstance(s, byte_type):
s = s.decode(encoding)
return text_type(s)
def shsplit(s):
if PY2:
s = encode_utf8(s)
else:
s = decode_utf8(s)
return shlex.split(s)
| import sys
import shlex
import six
PY2 = sys.version_info[0] == 2
if PY2:
from BaseHTTPServer import BaseHTTPRequestHandler
from urlparse import urlsplit, parse_qs
else:
from http.server import BaseHTTPRequestHandler
from urllib.parse import urlsplit, parse_qs
text_type = six.text_type
byte_type = six.binary_type
basestring = six.string_types
def encode_utf8(s):
if isinstance(s, text_type):
s = s.encode('utf-8')
return byte_type(s)
def decode_utf8(s):
if isinstance(s, byte_type):
s = s.decode("utf-8")
return text_type(s)
def shsplit(s):
if PY2:
s = encode_utf8(s)
else:
s = decode_utf8(s)
return shlex.split(s)
| bsd-3-clause | Python |
3fb5b55637f6835da578605d3229fb6833415420 | refactor __repr__ | deseret-tech/litecoin-python,doged/dogecoindark-python,deseret-tech/litecoin-python,laanwj/bitcoin-python,laanwj/bitcoin-python,doged/dogecoindark-python | src/bitcoinrpc/util.py | src/bitcoinrpc/util.py | # Copyright (c) 2010 Witchspace <witchspace81@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Generic utilities used by bitcoin client library."""
from copy import copy
class DStruct(object):
"""
Simple dynamic structure, like :const:`collections.namedtuple` but more flexible
(and less memory-efficient)
"""
# Default arguments. Defaults are *shallow copied*, to allow defaults such as [].
_fields = []
_defaults = {}
def __init__(self, *args_t, **args_d):
# order
if len(args_t) > len(self._fields):
raise TypeError("Number of arguments is larger than of predefined fields")
# Copy default values
for (k, v) in self._defaults.iteritems():
self.__dict__[k] = copy(v)
# Set pass by value arguments
self.__dict__.update(zip(self._fields, args_t))
# dict
self.__dict__.update(args_d)
def __repr__(self):
return '{module}.{classname}({slots})'.format(
module=self.__class__.__module__, classname=self.__class__.__name__,
slots=", ".join('{k}={v!r}'.format(k=k, v=v) for k, v in
self.__dict__.iteritems()))
| # Copyright (c) 2010 Witchspace <witchspace81@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Generic utilities used by bitcoin client library."""
from copy import copy
class DStruct(object):
"""
Simple dynamic structure, like :const:`collections.namedtuple` but more flexible
(and less memory-efficient)
"""
# Default arguments. Defaults are *shallow copied*, to allow defaults such as [].
_fields = []
_defaults = {}
def __init__(self, *args_t, **args_d):
# order
if len(args_t) > len(self._fields):
raise TypeError("Number of arguments is larger than of predefined fields")
# Copy default values
for (k, v) in self._defaults.iteritems():
self.__dict__[k] = copy(v)
# Set pass by value arguments
self.__dict__.update(zip(self._fields, args_t))
# dict
self.__dict__.update(args_d)
def __repr__(self):
rv = []
for (k, v) in self.__dict__.iteritems():
rv.append(k+"="+v.__repr__())
return self.__class__.__module__+"."+self.__class__.__name__+"("+(",".join(rv))+")"
| mit | Python |
077ad3e5227c3ad9831a6c94c14cd640f7e933d9 | Use reverse function for urls in carusele app | SarFootball/backend,SarFootball/backend,SarFootball/backend | carusele/models.py | carusele/models.py | from django.core.urlresolvers import reverse
from django.db import models
class News (models.Model):
"""
News model represent detail description and
content of each carusele element.
"""
title = models.CharField(max_length=400)
description = models.TextField(default="")
content = models.TextField()
pubdate = models.DateTimeField()
image = models.ImageField(upload_to="media")
def __unicode__(self):
return unicode(self.title)
def get_absolute_url(self):
return reverse("article", args=(self.id,))
class Element (models.Model):
"""
This model presents picture and short description
of news in carusele javascript element on main page.
"""
description = models.CharField(max_length=400)
image = models.ImageField(upload_to="media")
news = models.OneToOneField("News")
def __unicode__(self):
return unicode(self.description)
| from django.db import models
class News (models.Model):
"""
News model represent detail description and
content of each carusele element.
"""
title = models.CharField(max_length=400)
description = models.TextField(default="")
content = models.TextField()
pubdate = models.DateTimeField()
image = models.ImageField(upload_to="media")
def __unicode__(self):
return unicode(self.title)
def get_absolute_url(self):
return "/carusele/art/%i/" % self.id
class Element (models.Model):
"""
This model presents picture and short description
of news in carusele javascript element on main page.
"""
description = models.CharField(max_length=400)
image = models.ImageField(upload_to="media")
news = models.OneToOneField("News")
def __unicode__(self):
return unicode(self.description)
| apache-2.0 | Python |
e046b9e47d4ce26403f7bd2054460f6f671bffaf | Fix Typo | iGene/igene_bot,aver803bath5/igene_bot | models/google.py | models/google.py | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import logging
import re
import requests
import urllib.parse
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def google(bot, update):
search = update.message.text
search = re.sub(r'^(?i)google ','',search)
logger.info("Google %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find('h3', {'class': 'r'}).find('a').attrs['href']
result = urllib.parse.unquote(result)
if_http_start_regex = re.compile('^http')
if_http_start = if_http_start_regex.match(str(result))
if if_http_start == None:
remove_url_q_re = re.compile('^\/url\?q=')
remove_url_sa_re = re.compile('\&sa.+')
result = re.sub(remove_url_q_re, '', result)
result = re.sub(remove_url_sa_re, '', result)
update.message.reply_text(result)
else:
update.message.reply_text(result)
def images(bot, update):
search = update.message.text
search = re.sub(r'%(?i)image ','',search)
logger.info("Google image search %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?tbm=isch&q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("gstatic.com")})]
update.message.reply_text(images)
| # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import logging
import re
import requests
import urllib.parse
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def google(bot, update):
search = update.message.text
search = re.sub(r'^(?i)google ','',search)
logger.info("Google %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find('h3', {'class': 'r'}).find('a').attrs['href']
result = urllib.parse.unquote(result)
if_http_start_regex = re.compile('^http')
if_http_start = if_http_start_regex.match(str(result))
if if_http_start == None:
remove_url_q_re = re.compile('^\/url\?q=')
remove_url_sa_re = re.compile('\&sa.+')
result = re.sub(remove_url_q_re, '', result)
result = re.sub(remove_url_sa_re, '', result)
update.message.reply_text(result)
else:
update.message.reply_text(result)
def images(bot, update):
search = update.message.text
search = re.sub(r'%(?i)image ','',search)
logger.ingo("Google image search %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?tbm=isch&q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("gstatic.com")})]
update.message.reply_text(images)
| mit | Python |
b3c1eecf617cf9278cbd000537f9fac1f9303c4c | Disable django-registration tests in Jenkins. | onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site,onepercentclub/onepercentclub-site | bluebottle/settings/jenkins.py | bluebottle/settings/jenkins.py |
# SECRET_KEY and DATABASES needs to be defined before the base settings is imported.
SECRET_KEY = 'hbqnTEq+m7Tk61bvRV/TLANr3i0WZ6hgBXDh3aYpSU8m+E1iCtlU3Q=='
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
from .base import *
#
# Put jenkins environment specific overrides below.
#
INSTALLED_APPS += ('django_jenkins',)
DEBUG = False
TEMPLATE_DEBUG = False
# Test all INSTALLED_APPS by default
PROJECT_APPS = list(INSTALLED_APPS)
# Some of these tests fail, and it's not our fault
# https://code.djangoproject.com/ticket/17966
PROJECT_APPS.remove('django.contrib.auth')
# This app fails with a strange error:
# DatabaseError: no such table: django_comments
# Not sure what's going on so it's disabled for now.
PROJECT_APPS.remove('django.contrib.sites')
# https://github.com/django-extensions/django-extensions/issues/154
PROJECT_APPS.remove('django_extensions')
PROJECT_APPS.remove('django_extensions.tests')
# FIXME: We need to fix the django_polymorphic tests
PROJECT_APPS.remove('polymorphic')
# Social auth tests require firefox webdriver which we don't want to install right now.
PROJECT_APPS.remove('social_auth')
# django-salesforce tests don't pass when it's not setup.
PROJECT_APPS.remove('salesforce')
# django-registration tests don't pass with our Django 1.5 custom user model / manager.
PROJECT_APPS.remove('registration')
# django_fluent_contents 0.8.5 tests don't pass with a Django 1.5 custom user model.
PROJECT_APPS.remove('fluent_contents')
PROJECT_APPS.remove('fluent_contents.plugins.text')
PROJECT_APPS.remove('fluent_contents.plugins.oembeditem')
PROJECT_APPS.remove('fluent_contents.plugins.rawhtml')
# Disable pylint becasue it seems to be causing problems
JENKINS_TASKS = (
# 'django_jenkins.tasks.run_pylint',
'django_jenkins.tasks.with_coverage',
'django_jenkins.tasks.django_tests',
)
|
# SECRET_KEY and DATABASES needs to be defined before the base settings is imported.
SECRET_KEY = 'hbqnTEq+m7Tk61bvRV/TLANr3i0WZ6hgBXDh3aYpSU8m+E1iCtlU3Q=='
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
from .base import *
#
# Put jenkins environment specific overrides below.
#
INSTALLED_APPS += ('django_jenkins',)
DEBUG = False
TEMPLATE_DEBUG = False
# Test all INSTALLED_APPS by default
PROJECT_APPS = list(INSTALLED_APPS)
# Some of these tests fail, and it's not our fault
# https://code.djangoproject.com/ticket/17966
PROJECT_APPS.remove('django.contrib.auth')
# This app fails with a strange error:
# DatabaseError: no such table: django_comments
# Not sure what's going on so it's disabled for now.
PROJECT_APPS.remove('django.contrib.sites')
# https://github.com/django-extensions/django-extensions/issues/154
PROJECT_APPS.remove('django_extensions')
PROJECT_APPS.remove('django_extensions.tests')
# FIXME: We need to fix the django_polymorphic tests
PROJECT_APPS.remove('polymorphic')
# Social auth tests require firefox webdriver which we don't want to install right now.
PROJECT_APPS.remove('social_auth')
# django-salesforce tests don't pass when it's not setup.
PROJECT_APPS.remove('salesforce')
# django_fluent_contents 0.8.5 tests don't pass with a Django 1.5 custom user model.
PROJECT_APPS.remove('fluent_contents')
PROJECT_APPS.remove('fluent_contents.plugins.text')
PROJECT_APPS.remove('fluent_contents.plugins.oembeditem')
PROJECT_APPS.remove('fluent_contents.plugins.rawhtml')
# Disable pylint becasue it seems to be causing problems
JENKINS_TASKS = (
# 'django_jenkins.tasks.run_pylint',
'django_jenkins.tasks.with_coverage',
'django_jenkins.tasks.django_tests',
)
| bsd-3-clause | Python |
1f6448b0d67d28dcca77bef1ac6bdb45d6c3f5d2 | swap emphasis | MUNComputerScienceSociety/Frisket | modules/fetch.py | modules/fetch.py | import willie.module
import re
from random import randint
from willie.formatting import bold
@willie.module.commands('throw')
@willie.module.commands('fetch')
def fetch(bot, trigger):
"""
Fetch command
"""
x = randint(0,10) #inclusive
words = re.split(' ' , trigger)
del words[0]
joined_words = ' '.join(words)
if(x < 5):
bot.action('fetches the ' + bold(joined_words) + ' for ' + trigger.nick)
elif(x >= 5 and x < 10):
bot.action('brings the ' + bold(joined_words) + ' back to ' + trigger.nick)
elif (x == 10):
bot.action('ignores ' + trigger.nick + ' and chases a squirrel instead')
| import willie.module
import re
from random import randint
from willie.formatting import bold
@willie.module.commands('throw')
@willie.module.commands('fetch')
def fetch(bot, trigger):
"""
Fetch command
"""
x = randint(0,10) #inclusive
words = re.split(' ' , trigger)
del words[0]
joined_words = ' '.join(words)
if(x < 5):
bot.action('fetches the ' + joined_words + ' for ' + bold(trigger.nick))
elif(x >= 5 and x < 10):
bot.action('brings the ' + joined_words + ' back to ' + bold(trigger.nick))
elif (x == 10):
bot.action('ignores ' + bold(trigger.nick) + ' and chases a squirrel instead')
| mit | Python |
4aba708916984c61cc7f5fd205d66e8f64634589 | Make compatible with python 3.4 | Davidyuk/witcoin,Davidyuk/witcoin | main/widgets.py | main/widgets.py | from django_filters.widgets import RangeWidget
class CustomRangeWidget(RangeWidget):
def __init__(self, widget, attrs={}):
attrs_start = {'placeholder': 'От'}
attrs_start.update(attrs)
attrs_stop = {'placeholder': 'До'}
attrs_stop.update(attrs)
super(RangeWidget, self).__init__((widget(attrs_start), widget(attrs_stop)), attrs)
def format_output(self, rendered_widgets):
rendered_widgets.insert(1, ' — ')
return '<table class="range-widget"><tr><td>' + '</td><td>'.join(rendered_widgets) + '</td></tr></table>'
| from django_filters.widgets import RangeWidget
class CustomRangeWidget(RangeWidget):
def __init__(self, widget, attrs=None):
attrs_start = {'placeholder': 'От', **(attrs or {})}
attrs_stop = {'placeholder': 'До', **(attrs or {})}
widgets = (widget(attrs_start), widget(attrs_stop))
super(RangeWidget, self).__init__(widgets, attrs)
def format_output(self, rendered_widgets):
rendered_widgets.insert(1, ' — ')
return '<table class="range-widget"><tr><td>' + '</td><td>'.join(rendered_widgets) + '</td></tr></table>'
| agpl-3.0 | Python |
312962b85e3f59117efbdb7d7faf7c1c4ff17f5d | Adjust messages_path to support having the locale files in /usr/share/locale | Xender/wtforms,subyraman/wtforms,Aaron1992/wtforms,Aaron1992/wtforms,hsum/wtforms,skytreader/wtforms,cklein/wtforms,pawl/wtforms,pawl/wtforms,jmagnusson/wtforms,wtforms/wtforms,crast/wtforms | wtforms/i18n.py | wtforms/i18n.py | import os
def messages_path():
"""
Determine the path to the 'messages' directory as best possible.
"""
module_path = os.path.abspath(__file__)
locale_path = os.path.join(os.path.dirname(module_path), 'locale')
if not os.path.exists(locale_path):
locale_path = '/usr/share/locale'
return locale_path
def get_builtin_gnu_translations(languages=None):
"""
Get a gettext.GNUTranslations object pointing at the
included translation files.
:param languages:
A list of languages to try, in order. If omitted or None, then
gettext will try to use locale information from the environment.
"""
import gettext
return gettext.translation('wtforms', messages_path(), languages)
def get_translations(languages=None, getter=get_builtin_gnu_translations):
"""
Get a WTForms translation object which wraps a low-level translations object.
:param languages:
A sequence of languages to try, in order.
:param getter:
A single-argument callable which returns a low-level translations object.
"""
translations = getter(languages)
if hasattr(translations, 'ugettext'):
return DefaultTranslations(translations)
else:
# Python 3 has no ugettext/ungettext, so just return the translations object.
return translations
class DefaultTranslations(object):
"""
A WTForms translations object to wrap translations objects which use
ugettext/ungettext.
"""
def __init__(self, translations):
self.translations = translations
def gettext(self, string):
return self.translations.ugettext(string)
def ngettext(self, singular, plural, n):
return self.translations.ungettext(singular, plural, n)
class DummyTranslations(object):
"""
A translations object which simply returns unmodified strings.
This is typically used when translations are disabled or if no valid
translations provider can be found.
"""
def gettext(self, string):
return string
def ngettext(self, singular, plural, n):
if n == 1:
return singular
return plural
| import os
def messages_path():
"""
Determine the path to the 'messages' directory as best possible.
"""
module_path = os.path.abspath(__file__)
return os.path.join(os.path.dirname(module_path), 'locale')
def get_builtin_gnu_translations(languages=None):
"""
Get a gettext.GNUTranslations object pointing at the
included translation files.
:param languages:
A list of languages to try, in order. If omitted or None, then
gettext will try to use locale information from the environment.
"""
import gettext
return gettext.translation('wtforms', messages_path(), languages)
def get_translations(languages=None, getter=get_builtin_gnu_translations):
"""
Get a WTForms translation object which wraps a low-level translations object.
:param languages:
A sequence of languages to try, in order.
:param getter:
A single-argument callable which returns a low-level translations object.
"""
translations = getter(languages)
if hasattr(translations, 'ugettext'):
return DefaultTranslations(translations)
else:
# Python 3 has no ugettext/ungettext, so just return the translations object.
return translations
class DefaultTranslations(object):
"""
A WTForms translations object to wrap translations objects which use
ugettext/ungettext.
"""
def __init__(self, translations):
self.translations = translations
def gettext(self, string):
return self.translations.ugettext(string)
def ngettext(self, singular, plural, n):
return self.translations.ungettext(singular, plural, n)
class DummyTranslations(object):
"""
A translations object which simply returns unmodified strings.
This is typically used when translations are disabled or if no valid
translations provider can be found.
"""
def gettext(self, string):
return string
def ngettext(self, singular, plural, n):
if n == 1:
return singular
return plural
| bsd-3-clause | Python |
94d66121368906b52fa8a9f214813b7b798c2b5b | Add constant for settings schema file path | MarquisLP/Sidewalk-Champion | lib/custom_data/settings_manager.py | lib/custom_data/settings_manager.py | """This module provides functions for saving to and loading data from
the settings XML file.
Attributes:
SETTINGS_PATH (String): The file path for the settings file.
SETTINGS_SCHEMA_PATH (String): The file path for the settings'
XML Schema.
"""
SETTINGS_PATH = 'settings.xml'
SETTINGS_SCHEMA_PATH = 'settings.xsd'
| """This module provides functions for saving to and loading data from
the settings XML file.
Attributes:
SETTINGS_PATH Filepath for the settings file.
"""
SETTINGS_PATH = 'settings.xml'
| unlicense | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.