commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
206991a376c0d7ee9d431c82323b1da72593708d | Revert "Improve urls formatting." | Imaginashion/cloud-vision,Imaginashion/cloud-vision,extremoburo/django-jquery-file-upload,Imaginashion/cloud-vision,sigurdga/django-jquery-file-upload,indrajithi/mgc-django,Imaginashion/cloud-vision,extremoburo/django-jquery-file-upload,extremoburo/django-jquery-file-upload,indrajithi/mgc-django,Imaginashion/cloud-vision,minhlongdo/django-jquery-file-upload,sigurdga/django-jquery-file-upload,sigurdga/django-jquery-file-upload,minhlongdo/django-jquery-file-upload,vaniakov/django-jquery-file-upload,vaniakov/django-jquery-file-upload,vaniakov/django-jquery-file-upload,Imaginashion/cloud-vision,minhlongdo/django-jquery-file-upload | fileupload/urls.py | fileupload/urls.py | # encoding: utf-8
from django.conf.urls import patterns, url
from fileupload.views import BasicVersionCreateView, BasicPlusVersionCreateView, PictureCreateView, AngularVersionCreateView, jQueryVersionCreateView, PictureDeleteView
urlpatterns = patterns('',
(r'^basic/$', BasicVersionCreateView.as_view(), {}, 'upload-basic'),
(r'^basic/plus/$', BasicPlusVersionCreateView.as_view(), {}, 'upload-basic-plus'),
(r'^new/$', PictureCreateView.as_view(), {}, 'upload-new'),
(r'^angular/$', AngularVersionCreateView.as_view(), {}, 'upload-angular'),
(r'^jquery-ui/$', jQueryVersionCreateView.as_view(), {}, 'upload-jquery'),
(r'^delete/(?P<pk>\d+)$', PictureDeleteView.as_view(), {}, 'upload-delete'),
url(r'^view/$', 'fileupload.views.PictureListView', name='upload-view'),
)
| # encoding: utf-8
from django.conf.urls import patterns, url
from .views import (BasicVersionCreateView, BasicPlusVersionCreateView,
PictureCreateView, AngularVersionCreateView, jQueryVersionCreateView,
PictureDeleteView, PictureListView)
urlpatterns = patterns('',
url(
r'^basic/$',
BasicVersionCreateView.as_view(),
name='upload-basic'
),
url(
r'^basic/plus/$',
BasicPlusVersionCreateView.as_view(),
name='upload-basic-plus'
),
url(
r'^new/$',
PictureCreateView.as_view(),
name='upload-new'
),
url(
r'^angular/$',
AngularVersionCreateView.as_view(),
name='upload-angular'
),
url(
r'^jquery-ui/$',
jQueryVersionCreateView.as_view(),
name='upload-jquery'
),
url(
r'^delete/(?P<pk>\d+)$',
PictureDeleteView.as_view(),
name='upload-delete'
),
url(
r'^view/$',
PictureListView.as_view(),
name='upload-view'
),
)
| mit | Python |
cf972237100aa556a748cc84732b2e268a341dc9 | Remove demo string | kentmacdonald2/Sas-Problem-Fixer | find_duplicates.py | find_duplicates.py | def find_problems(given_list = ["12345678a", "12345678b", "12345678c", "abcdefghi1","123"],print_me=True):
"""
Scans a list of strings to find any cases where the first 8 characters are matching
:param given_list: A string or list of strings to scan (optional, will demo otherwise)
:param print_me: Boolean to choose if matches should be printed (Optional, defaults to true)
:return: List of all matches
"""
#If given a string, convert to a list of strings
if isinstance(given_list,str):
#If it contains commas, split via commas
if "," in given_list:
given_list = given_list.split(",")
#If is doesnt have commas, use spaces
else:
given_list= given_list.split()
trim_first_spaces(given_list)
#Populate a list of keys
keys = []
for curr in given_list:
keys.append(curr[:8])
matching_strings = []
index1 = 0
#Check for matches
for curr3 in keys:
index2 = 0
for curr4 in given_list:
#Ignore matching with itself
if curr3 == curr4[0:8] and index1 != index2:
#If not already found
if not curr4 in matching_strings:
matching_strings.append(curr4)
index2 += 1
index1 += 1
#Pass in False to make this be quiet
if print_me:
print "Given List: "
print given_list
print "Problematic values: "
print matching_strings.__str__()
return matching_strings
def trim_first_spaces(given_list):
"""
Fixes case in which the infile was delimited with commas, but had a starting space
:param given_list: List supplied
:return: List without leading spaces
"""
p = 0
for current_item in given_list:
if given_list[p].startswith(" "):
given_list[p] = given_list[p][1:]
p += 1
if __name__ == '__main__':
find_problems()
| def find_problems(given_list = ["12345678a", "12345678b", "12345678c", "abcdefghi1","123"],print_me=True):
"""
Scans a list of strings to find any cases where the first 8 characters are matching
:param given_list: A string or list of strings to scan (optional, will demo otherwise)
:param print_me: Boolean to choose if matches should be printed (Optional, defaults to true)
:return: List of all matches
"""
#If given a string, convert to a list of strings
if isinstance(given_list,str):
#If it contains commas, split via commas
if "," in given_list:
given_list = given_list.split(",")
#If is doesnt have commas, use spaces
else:
given_list= given_list.split()
trim_first_spaces(given_list)
#Populate a list of keys
keys = []
for curr in given_list:
keys.append(curr[:8])
matching_strings = []
index1 = 0
#Check for matches
for curr3 in keys:
index2 = 0
for curr4 in given_list:
#Ignore matching with itself
if curr3 == curr4[0:8] and index1 != index2:
#If not already found
if not curr4 in matching_strings:
matching_strings.append(curr4)
index2 += 1
index1 += 1
#Pass in False to make this be quiet
if print_me:
print "Given List: "
print given_list
print "Problematic values: "
print matching_strings.__str__()
return matching_strings
def trim_first_spaces(given_list):
"""
Fixes case in which the infile was delimited with commas, but had a starting space
:param given_list: List supplied
:return: List without leading spaces
"""
p = 0
for current_item in given_list:
if given_list[p].startswith(" "):
given_list[p] = given_list[p][1:]
p += 1
if __name__ == '__main__':
find_problems("This,is, a, demo")
| mit | Python |
6f24dbf7b9da9cfe3565c697e8615949bd39cfcc | Fix celery tasks logging and add shortcut for app context | Infinidat/lanister,vmalloc/mailboxer,getslash/mailboxer,vmalloc/mailboxer,vmalloc/mailboxer,getslash/mailboxer,Infinidat/lanister,getslash/mailboxer | flask_app/tasks.py | flask_app/tasks.py | from __future__ import absolute_import
import functools
import os
import sys
import logbook
from celery import Celery
from celery.signals import after_setup_logger, after_setup_task_logger
from .app import create_app
_logger = logbook.Logger(__name__)
queue = Celery('tasks', broker='redis://localhost')
queue.conf.update(
CELERY_TASK_SERIALIZER='json',
CELERY_ACCEPT_CONTENT=['json'], # Ignore other content
CELERY_RESULT_SERIALIZER='json',
CELERY_ENABLE_UTC=True,
)
def setup_log(**args):
logbook.SyslogHandler().push_application()
logbook.StreamHandler(sys.stderr, bubble=True).push_application()
APP = None
def needs_app_context(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
global APP
if APP is None:
APP = create_app()
with APP.app_context():
return f(*args, **kwargs)
return wrapper
after_setup_logger.connect(setup_log)
after_setup_task_logger.connect(setup_log)
| from __future__ import absolute_import
from logging import Formatter
from logbook.compat import LoggingHandler
from logging.handlers import SysLogHandler
from celery import Celery
from celery.signals import after_setup_task_logger, after_setup_logger
import os
queue = Celery('tasks', broker='redis://localhost')
queue.conf.update(
CELERY_TASK_SERIALIZER='json',
CELERY_ACCEPT_CONTENT=['json'], # Ignore other content
CELERY_RESULT_SERIALIZER='json',
CELERY_ENABLE_UTC=True,
)
def setup_log(**args):
if os.path.exists("/dev/log"):
hl = SysLogHandler('/dev/log')
hl.setLevel(args['loglevel'])
formatter = Formatter("%(message)s")
hl.setFormatter(formatter)
args['logger'].addHandler(hl)
LoggingHandler().push_application()
after_setup_logger.connect(setup_log)
after_setup_task_logger.connect(setup_log)
| bsd-3-clause | Python |
571fc0ace1712e772baf08d60a25a33b6c231f76 | test sublime | pouyana/teireader,pouyana/teireader,pouyana/teireader,pouyana/teireader,pouyana/teireader,pouyana/teireader | parser.py | parser.py | #!#/usr/bin/env python
# -*- coding: utf-8 -*- | #!#/usr/bin/env python
# -*- coding: utf-8 -*-
hi | mit | Python |
38357d8d6043561084c8f7469ee11d373878cf3b | Fix spacing in jnp tile | e3nn/e3nn-jax,e3nn/e3nn-jax | e3nn_jax/_dropout.py | e3nn_jax/_dropout.py | import jax
import jax.numpy as jnp
import haiku as hk
from e3nn_jax import Irreps
class Dropout(hk.Module):
"""Equivariant Dropout
:math:`A_{zai}` is the input and :math:`B_{zai}` is the output where
- ``z`` is the batch index
- ``a`` any non-batch and non-irrep index
- ``i`` is the irrep index, for instance if ``irreps="0e + 2x1e"`` then ``i=2`` select the *second vector*
.. math::
B_{zai} = \frac{x_{zi}}{1-p} A_{zai}
where :math:`p` is the dropout probability and :math:`x` is a Bernoulli random variable with parameter :math:`1-p`.
Parameters
----------
irreps : `Irreps`
representation
p : float
probability to drop
"""
def __init__(self, irreps, p):
super().__init__()
self.irreps = Irreps(irreps)
self.p = p
def __repr__(self):
return f"{self.__class__.__name__} ({self.irreps}, p={self.p})"
def __call__(self, rng, x, is_training=True):
"""evaluate
Parameters
----------
input : `DeviceArray`
tensor of shape ``(batch, ..., irreps.dim)``
Returns
-------
`DeviceArray`
tensor of shape ``(batch, ..., irreps.dim)``
"""
if not is_training:
return x
batch = x.shape[0]
noises = []
for mul, ir in self.irreps:
dim = ir.dim
if self.p >= 1:
noise = jnp.zeros((batch, mul, 1), dtype=x.dtype)
elif self.p <= 0:
noise = jnp.ones((batch, mul, 1), dtype=x.dtype)
else:
noise = jax.random.bernoulli(rng, p=1 - self.p, shape=(batch, mul, 1)) / (1 - self.p)
noise = jnp.tile(noise, (1, 1, dim)).reshape(batch, mul * dim)
noises.append(noise)
noise = jnp.concatenate(noises, axis=-1)
while len(noise.shape) < len(x.shape):
noise = noise[:, None]
return x * noise
| import jax
import jax.numpy as jnp
import haiku as hk
from e3nn_jax import Irreps
class Dropout(hk.Module):
"""Equivariant Dropout
:math:`A_{zai}` is the input and :math:`B_{zai}` is the output where
- ``z`` is the batch index
- ``a`` any non-batch and non-irrep index
- ``i`` is the irrep index, for instance if ``irreps="0e + 2x1e"`` then ``i=2`` select the *second vector*
.. math::
B_{zai} = \frac{x_{zi}}{1-p} A_{zai}
where :math:`p` is the dropout probability and :math:`x` is a Bernoulli random variable with parameter :math:`1-p`.
Parameters
----------
irreps : `Irreps`
representation
p : float
probability to drop
"""
def __init__(self, irreps, p):
super().__init__()
self.irreps = Irreps(irreps)
self.p = p
def __repr__(self):
return f"{self.__class__.__name__} ({self.irreps}, p={self.p})"
def __call__(self, rng, x, is_training=True):
"""evaluate
Parameters
----------
input : `DeviceArray`
tensor of shape ``(batch, ..., irreps.dim)``
Returns
-------
`DeviceArray`
tensor of shape ``(batch, ..., irreps.dim)``
"""
if not is_training:
return x
batch = x.shape[0]
noises = []
for mul, ir in self.irreps:
dim = ir.dim
if self.p >= 1:
noise = jnp.zeros((batch, mul, 1), dtype=x.dtype)
elif self.p <= 0:
noise = jnp.ones((batch, mul, 1), dtype=x.dtype)
else:
noise = jax.random.bernoulli(rng, p=1 - self.p, shape=(batch, mul, 1)) / (1 - self.p)
noise = jnp.tile(noise,(1,1,dim)).reshape(batch, mul * dim)
noises.append(noise)
noise = jnp.concatenate(noises, axis=-1)
while len(noise.shape) < len(x.shape):
noise = noise[:, None]
return x * noise
| apache-2.0 | Python |
e48e0654e1bbed6c3e49835c5a40356fa6127fe5 | Make the REPL loop. | darikg/pgcli,j-bennet/pgcli,w4ngyi/pgcli,bitmonk/pgcli,bitmonk/pgcli,lk1ngaa7/pgcli,dbcli/vcli,stuartquin/pgcli,joewalnes/pgcli,bitemyapp/pgcli,koljonen/pgcli,d33tah/pgcli,n-someya/pgcli,dbcli/pgcli,MattOates/pgcli,janusnic/pgcli,lk1ngaa7/pgcli,janusnic/pgcli,bitemyapp/pgcli,suzukaze/pgcli,nosun/pgcli,TamasNo1/pgcli,w4ngyi/pgcli,nosun/pgcli,johshoff/pgcli,suzukaze/pgcli,darikg/pgcli,dbcli/pgcli,MattOates/pgcli,joewalnes/pgcli,zhiyuanshi/pgcli,koljonen/pgcli,zhiyuanshi/pgcli,thedrow/pgcli,thedrow/pgcli,yx91490/pgcli,yx91490/pgcli,TamasNo1/pgcli,dbcli/vcli,n-someya/pgcli,johshoff/pgcli,d33tah/pgcli,j-bennet/pgcli | pg-cli.py | pg-cli.py | #!/usr/bin/env python
from __future__ import unicode_literals
from prompt_toolkit import CommandLineInterface, AbortAction, Exit
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.line import Line
from prompt_toolkit.layout import Layout
from prompt_toolkit.layout.prompt import DefaultPrompt
from prompt_toolkit.layout.menus import CompletionMenu
from pygments.token import Token
from pygments.style import Style
from pygments.lexers.sql import SqlLexer
from pygments.styles.default import DefaultStyle
class SqlCompleter(Completer):
keywords = [
'SELECT',
'INSERT',
'ALTER',
'DROP',
'DELETE',
'FROM',
'WHERE',
]
def get_completions(self, document):
word_before_cursor = document.get_word_before_cursor()
for keyword in self.keywords:
if (keyword.startswith(word_before_cursor) or
keyword.startswith(word_before_cursor.upper())):
yield Completion(keyword, -len(word_before_cursor))
class DocumentStyle(Style):
styles = {
Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',
Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',
Token.Menu.Completions.ProgressButton: 'bg:#003333',
Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',
Token.SelectedText: '#ffffff bg:#6666aa',
Token.IncrementalSearchMatch: '#ffffff bg:#4444aa',
Token.IncrementalSearchMatch.Current: '#ffffff bg:#44aa44',
}
styles.update(DefaultStyle.styles)
def main():
layout = Layout(before_input=DefaultPrompt('> '),
menus=[CompletionMenu()],
lexer=SqlLexer)
line = Line(completer=SqlCompleter())
cli = CommandLineInterface(style=DocumentStyle, layout=layout, line=line)
try:
while True:
document = cli.read_input(on_exit=AbortAction.RAISE_EXCEPTION)
print 'You entered:', document.text
except Exit:
print 'GoodBye!'
if __name__ == '__main__':
main()
| #!/usr/bin/env python
from __future__ import unicode_literals
from prompt_toolkit import CommandLineInterface
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.line import Line
from prompt_toolkit.layout import Layout
from prompt_toolkit.layout.prompt import DefaultPrompt
from prompt_toolkit.layout.menus import CompletionMenu
from pygments.token import Token
from pygments.style import Style
from pygments.lexers.sql import SqlLexer
from pygments.styles.default import DefaultStyle
class SqlCompleter(Completer):
keywords = [
'SELECT',
'INSERT',
'ALTER',
'DROP',
'DELETE',
'FROM',
'WHERE',
]
def get_completions(self, document):
word_before_cursor = document.get_word_before_cursor()
for keyword in self.keywords:
if (keyword.startswith(word_before_cursor) or
keyword.startswith(word_before_cursor.upper())):
yield Completion(keyword, -len(word_before_cursor))
class DocumentStyle(Style):
styles = {
Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000',
Token.Menu.Completions.Completion: 'bg:#008888 #ffffff',
Token.Menu.Completions.ProgressButton: 'bg:#003333',
Token.Menu.Completions.ProgressBar: 'bg:#00aaaa',
Token.SelectedText: '#ffffff bg:#6666aa',
Token.IncrementalSearchMatch: '#ffffff bg:#4444aa',
Token.IncrementalSearchMatch.Current: '#ffffff bg:#44aa44',
}
styles.update(DefaultStyle.styles)
def main():
cli = CommandLineInterface(style=DocumentStyle,
layout=Layout(before_input=DefaultPrompt('> '),
menus=[CompletionMenu()],
lexer=SqlLexer),
line=Line(completer=SqlCompleter())
)
print('Press tab to complete')
code_obj = cli.read_input()
print('You said: ' + code_obj.text)
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
67c110e7e7d64727de66cc992fb9b21bf75d4dfc | fix url to image because in expression like this: for x in Model.objects[:10]: print(x.image) then x.image get incorrect url without THUMBOR_HOST | gerasim13/libthumbor | libthumbor/flask/field.py | libthumbor/flask/field.py | from werkzeug.datastructures import FileStorage
from urllib.parse import urlparse, urljoin
from flask import current_app
from mongoengine.base import BaseField
from libthumbor.crypto import CryptoURL
from mongoengine import *
import requests
crypto_url = None
class ThumborData(str):
def __new__(self, content = None, data = None):
with current_app.app_context():
if isinstance(data, FileStorage):
files = { 'media': data }
response = requests.post(current_app.config['THUMBOR_IMAGE_ENDPOINT'], files=files)
content = response.headers['location']
return str.__new__(self, content)
def delete(self, **kwargs):
with current_app.app_context():
url = urljoin(current_app.config['THUMBOR_IMAGE_ENDPOINT'], self)
requests.delete(url)
def image(self, **kwargs):
with current_app.app_context():
global crypto_url
if crypto_url == None:
crypto_url = CryptoURL(key=current_app.config['THUMBOR_SECURITY_KEY'])
if len(self) > 0:
_url = urljoin('{u.scheme}://{u.netloc}'.format(u=urlparse(current_app.config['THUMBOR_HOST'])), crypto_url.generate(image_url='/'.join(self.split('/')[2:]), **kwargs))
return _url
return ''
def endpoint(self):
with current_app.app_context():
return urljoin(current_app.config['THUMBOR_HOST'], self)
return ''
def __str__(self):
return self.endpoint()
def __repr__(self):
return self.endpoint()
class ThumborField(BaseField):
def validate(self, value):
if not isinstance(value, (type(None), ThumborData, str, list)):
self.error('{0} is not a valid Thumbor data'.format(value))
return
def to_python(self, value):
return ThumborData(value)
| from werkzeug.datastructures import FileStorage
from urllib.parse import urlparse, urljoin
from flask import current_app
from mongoengine.base import BaseField
from libthumbor.crypto import CryptoURL
from mongoengine import *
import requests
crypto_url = None
class ThumborData(str):
def __new__(self, content = None, data = None):
with current_app.app_context():
if isinstance(data, FileStorage):
files = { 'media': data }
response = requests.post(current_app.config['THUMBOR_IMAGE_ENDPOINT'], files=files)
content = response.headers['location']
return str.__new__(self, content)
def delete(self, **kwargs):
with current_app.app_context():
url = urljoin(current_app.config['THUMBOR_IMAGE_ENDPOINT'], self)
requests.delete(url)
def image(self, **kwargs):
with current_app.app_context():
global crypto_url
if crypto_url == None:
crypto_url = CryptoURL(key=current_app.config['THUMBOR_SECURITY_KEY'])
if len(self) > 0:
_url = urljoin('{u.scheme}://{u.netloc}'.format(u=urlparse(current_app.config['THUMBOR_HOST'])), crypto_url.generate(image_url='/'.join(self.split('/')[2:]), **kwargs))
return _url
return ''
def endpoint(self):
with current_app.app_context():
return urljoin(current_app.config['THUMBOR_HOST'], self)
return ''
def __repr__(self):
return self.endpoint()
class ThumborField(BaseField):
def validate(self, value):
if not isinstance(value, (type(None), ThumborData, str, list)):
self.error('{0} is not a valid Thumbor data'.format(value))
return
def to_python(self, value):
return ThumborData(value)
| mit | Python |
d092c8de1bc018968eb87137c9003aa4b5a43e39 | delete useless if..elif.. | ccagg/xunlei,windygu/xunlei-lixian,davies/xunlei-lixian,liujianpc/xunlei-lixian,xieyanhao/xunlei-lixian,sndnvaps/xunlei-lixian,iambus/xunlei-lixian,myself659/xunlei-lixian,GeassDB/xunlei-lixian,wangjun/xunlei-lixian,wogong/xunlei-lixian,sdgdsffdsfff/xunlei-lixian | lixian_commands/delete.py | lixian_commands/delete.py |
from lixian import XunleiClient
from lixian_commands.util import *
from lixian_cli_parser import *
from lixian_encoding import default_encoding
from lixian_colors import colors
import lixian_help
import lixian_query
@command_line_parser(help=lixian_help.delete)
@with_parser(parse_login)
@with_parser(parse_colors)
@command_line_option('i')
@command_line_option('all')
def delete_task(args):
client = XunleiClient(args.username, args.password, args.cookies)
to_delete = lixian_query.search_tasks(client, args)
if not to_delete:
print 'Nothing to delete'
return
with colors(args.colors).red.bold():
print "Below files are going to be deleted:"
for x in to_delete:
print x['name'].encode(default_encoding)
if args.i:
yes_or_no = raw_input('Are your sure to delete below files from Xunlei cloud? ')
while yes_or_no.lower() not in ('y', 'yes', 'n', 'no'):
yes_or_no = raw_input('yes or no? ')
if yes_or_no.lower() in ('y', 'yes'):
pass
elif yes_or_no.lower() in ('n', 'no'):
raise RuntimeError('Deletion abort per user request.')
client.delete_tasks(to_delete)
|
from lixian import XunleiClient
from lixian_commands.util import *
from lixian_cli_parser import *
from lixian_encoding import default_encoding
from lixian_colors import colors
import lixian_help
import lixian_query
@command_line_parser(help=lixian_help.delete)
@with_parser(parse_login)
@with_parser(parse_colors)
@command_line_option('i')
@command_line_option('all')
def delete_task(args):
client = XunleiClient(args.username, args.password, args.cookies)
if len(args):
to_delete = lixian_query.search_tasks(client, args)
elif args.all:
to_delete = client.read_all_tasks()
if not to_delete:
print 'Nothing to delete'
return
with colors(args.colors).red.bold():
print "Below files are going to be deleted:"
for x in to_delete:
print x['name'].encode(default_encoding)
if args.i:
yes_or_no = raw_input('Are your sure to delete below files from Xunlei cloud? ')
while yes_or_no.lower() not in ('y', 'yes', 'n', 'no'):
yes_or_no = raw_input('yes or no? ')
if yes_or_no.lower() in ('y', 'yes'):
pass
elif yes_or_no.lower() in ('n', 'no'):
raise RuntimeError('Deletion abort per user request.')
client.delete_tasks(to_delete)
| mit | Python |
a5eb5450a91eb43a2227bdc5f3d25555faaf8544 | Add hypen as delimiter | suclearnub/discordgrapher | scrape.py | scrape.py | import discord
import asyncio
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser(description='Discord channel scraper')
requiredNamed = parser.add_argument_group('Required arguments:')
requiredNamed.add_argument('-c', '--channel', type=str, help='Channel to scrape. Requires the channel ID.', required=True)
requiredNamed.add_argument('-o', '--output', type=str, help='Output file in form *.txt. Will be stored in the same directory.', required=True)
args = parser.parse_args()
print(args.channel)
print(args.output)
client = discord.Client()
@client.event
async def on_ready():
print('Connection successful.')
print('Your ID: ' + client.user.id)
target = open(args.output, 'w')
print(args.output, 'has been opened.')
messageCount = 0
channel = discord.Object(id=args.channel)
print("Scraping messages... Don't send any messages while scraping!")
with tqdm(leave=True,unit=' messages') as scraped:
async for msg in client.logs_from(channel, 10000000000):
line = "{} - {} - {}".format(msg.timestamp,msg.author.name, msg.content)
line = line.encode('utf-8')
toWrite = "{}".format(line)
target.write(toWrite)
target.write("\n")
messageCount += 1
scraped.update(1)
print('-----')
print('Scraping complete.')
#----------------------------
client.run('email', 'password')
| import discord
import asyncio
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser(description='Discord channel scraper')
requiredNamed = parser.add_argument_group('Required arguments:')
requiredNamed.add_argument('-c', '--channel', type=str, help='Channel to scrape. Requires the channel ID.', required=True)
requiredNamed.add_argument('-o', '--output', type=str, help='Output file in form *.txt. Will be stored in the same directory.', required=True)
args = parser.parse_args()
print(args.channel)
print(args.output)
client = discord.Client()
@client.event
async def on_ready():
print('Connection successful.')
print('Your ID: ' + client.user.id)
target = open(args.output, 'w')
print(args.output, 'has been opened.')
messageCount = 0
channel = discord.Object(id=args.channel)
print("Scraping messages... Don't send any messages while scraping!")
with tqdm(leave=True,unit=' messages') as scraped:
async for msg in client.logs_from(channel, 10000000000):
line = "{} {} {}".format(msg.timestamp,msg.author.name, msg.content)
line = line.encode('utf-8')
toWrite = "{}".format(line)
target.write(toWrite)
target.write("\n")
messageCount += 1
scraped.update(1)
print('-----')
print('Scraping complete.')
#----------------------------
client.run('email', 'password')
| mit | Python |
0af3258cd3a6ad4a75aad8c967d72f7c1b755624 | Refactor to use with-block | kentoj/python-fundamentals | series.py | series.py | """Read and print an integer series."""
import sys
def read_series(filename):
with open(filename, mode='rt', encoding='utf-8') as f:
return [int(line.strip()) for line in f]
def main(filename):
print(read_series(filename))
if __name__ == '__main__':
main(sys.argv[1])
| """Read and print an integer series."""
import sys
def read_series(filename):
try:
f = open(filename, mode='rt', encoding='utf-8')
return [int(line.strip()) for line in f]
finally:
f.close()
def main(filename):
print(read_series(filename))
if __name__ == '__main__':
main(sys.argv[1])
| mit | Python |
fb02617b29cab97a70a1a11b0d3b7b62b834aa3b | Structure for sending dummy files | rotemh/soteria | server.py | server.py | from flask import Flask
from flask import request
import flask
import hashlib
import json
import gzip
app = Flask(__name__)
stored_files = {}
@app.route('/profile/<type>', methods=['GET'])
def get_dummy_files(type):
if type == 'lawyer':
gzip_address = './zipfiles/doc.tar.gz'
elif type == 'doctor:':
gzip_address = './zipfiles/doc.tar.gz'
elif type == 'female':
gzip_address = './zipfiles/doc.tar.gz'
elif type == 'male':
gzip_address = './zipfiles/doc.tar.gz'
else:
return "No files here\n"
gzip_file = open(gzip_address).read()
return bytearray(gzip_file)
@app.route('/<int:id>', methods=['GET'])
def get_file(id):
if id in stored_files:
return stored_files[id]
else:
return "No such file\n"
@app.route('/', methods=['POST'])
def upload_file():
data = json.loads(request.data)
uploaded_file = data['uploaded_file']
salt = data['salt']
id = hashlib.sha256(uploaded_file.encode()).hexdigest()
stored_files[id] = (uploaded_file, salt)
return "File stored\n"
if __name__ == "__main__":
app.run()
| from flask import Flask
from flask import request
import flask
import hashlib
import json
import gzip
app = Flask(__name__)
stored_files = {}
@app.route('/profile/<type>', methods=['GET'])
def get_dummy_files(type):
if type == 'lawyer':
pass
elif type == 'doctor:':
pass
elif type == 'female':
pass
elif type == 'male':
pass
else:
return "No files here\n"
return "Sent files\n"
@app.route('/<int:id>', methods=['GET'])
def get_file(id):
if id in stored_files:
return stored_files[id]
else:
return "No such file\n"
@app.route('/', methods=['POST'])
def upload_file():
data = json.loads(request.data)
uploaded_file = data['uploaded_file']
salt = data['salt']
id = hashlib.sha256(uploaded_file.encode()).hexdigest()
stored_files[id] = (uploaded_file, salt)
return "File stored\n"
if __name__ == "__main__":
app.run()
| mit | Python |
82424fa6ce90f384e418c54bd9bdc61216e550d8 | Remove shadowing of file builtin in server.py. PEP8 formatting | Capocaccia/reacting,Capocaccia/reacting | server.py | server.py | # This file provided by Facebook is for non-commercial testing and evaluation
# purposes only. Facebook reserves all rights not expressly granted.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# FACEBOOK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import os
import time
from flask import Flask, Response, request
app = Flask(__name__, static_url_path='', static_folder='public')
app.add_url_rule('/', 'root', lambda: app.send_static_file('index.html'))
@app.route('/api/comments', methods=['GET', 'POST'])
def comments_handler():
with open('comments.json', 'r') as f:
comments = json.loads(f.read())
if request.method == 'POST':
new_comment = request.form.to_dict()
new_comment['id'] = int(time.time() * 1000)
comments.append(new_comment)
with open('comments.json', 'w') as f:
f.write(json.dumps(comments, indent=4, separators=(',', ': ')))
return Response(
json.dumps(comments),
mimetype='application/json',
headers={
'Cache-Control': 'no-cache',
'Access-Control-Allow-Origin': '*'
}
)
if __name__ == '__main__':
app.run(port=int(os.environ.get("PORT", 3000)))
| # This file provided by Facebook is for non-commercial testing and evaluation
# purposes only. Facebook reserves all rights not expressly granted.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# FACEBOOK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import os
import time
from flask import Flask, Response, request
app = Flask(__name__, static_url_path='', static_folder='public')
app.add_url_rule('/', 'root', lambda: app.send_static_file('index.html'))
@app.route('/api/comments', methods=['GET', 'POST'])
def comments_handler():
with open('comments.json', 'r') as file:
comments = json.loads(file.read())
if request.method == 'POST':
newComment = request.form.to_dict()
newComment['id'] = int(time.time() * 1000)
comments.append(newComment)
with open('comments.json', 'w') as file:
file.write(json.dumps(comments, indent=4, separators=(',', ': ')))
return Response(json.dumps(comments), mimetype='application/json', headers={'Cache-Control': 'no-cache', 'Access-Control-Allow-Origin': '*'})
if __name__ == '__main__':
app.run(port=int(os.environ.get("PORT",3000)))
| mit | Python |
0fbea689d8debebea36a96d960946bc4d49fe004 | Fix a bug that would only show the first letter of the announcing message of a new client on the server to other clients. | nvanheuverzwijn/naughty-chat | server.py | server.py | import socket
import os
import select
import sys
import commands
import parsers
import clients
import string
import protocols
class Server(object):
"""The chat server. It relays communication between client."""
_port = 0
_bind = ""
_server_socket = None
_clients = []
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
@property
def bind(self):
return self._bind
@bind.setter
def bind(self, value):
self._bind = value
@property
def server_socket(self):
return self._server_socket
@property
def clients(self):
return self._clients
def __init__(self, port=9999, bind="0.0.0.0", parser=None):
self.port = port
self.bind = bind
try:
self.parser = parsers.getParser(parser)
except NameError, e:
print e.message
print "Now using default parser"
self.parser = parsers.getParser("Parser")
def disconnect_client(self, client):
self.clients.remove(client)
client.socket.close()
def listen(self):
self._server_socket = socket.socket(socket.AF_INET)
self._server_socket.bind((self.bind, self.port))
self._server_socket.listen(10)
while True:
inputready, outputready, exceptready = select.select(self.clients + [self.server_socket],[],[])
for sock in inputready:
if sock == self.server_socket:
self.__handle_new_connection()
else:
self.__handle_request(sock)
def __handle_new_connection(self):
"""This is called whenever a new connection is initiated"""
socket, address = self.server_socket.accept()
client = clients.Client(ip=address[0], name=address[0], protocol=protocols.Raw(), socket=socket, server=self)
self.clients.append(client)
cmd = commands.Broadcast(self, client, ["HERE COMES DADDY!"])
cmd.execute()
def __handle_request(self, caller):
"""This is called whenever data is received from one of the client."""
try:
data = caller.receive()
result = self.parser.parse(data)
cmd = commands.getCommand(result[0], self, caller, result[1])
try:
cmd.execute()
except clients.CouldNotSendRequestError, e:
#Tell the client that the command could not be executed properly.
pass
except clients.SocketError, e:
self.disconnect_client(caller)
except clients.ClientIsNotFinishedSendingError, e:
pass
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='kronos-chat server')
parser.add_argument("--port", metavar="PORT", type=int, help="the port to listen to")
parser.add_argument("--bind", metavar="IP", type=str, help="the ip to listen on")
args = parser.parse_args()
s = Server(parser = "Parser")
s.listen()
| import socket
import os
import select
import sys
import commands
import parsers
import clients
import string
import protocols
class Server(object):
"""The chat server. It relays communication between client."""
_port = 0
_bind = ""
_server_socket = None
_clients = []
@property
def port(self):
return self._port
@port.setter
def port(self, value):
self._port = value
@property
def bind(self):
return self._bind
@bind.setter
def bind(self, value):
self._bind = value
@property
def server_socket(self):
return self._server_socket
@property
def clients(self):
return self._clients
def __init__(self, port=9999, bind="0.0.0.0", parser=None):
self.port = port
self.bind = bind
try:
self.parser = parsers.getParser(parser)
except NameError, e:
print e.message
print "Now using default parser"
self.parser = parsers.getParser("Parser")
def disconnect_client(self, client):
self.clients.remove(client)
client.socket.close()
def listen(self):
self._server_socket = socket.socket(socket.AF_INET)
self._server_socket.bind((self.bind, self.port))
self._server_socket.listen(10)
while True:
inputready, outputready, exceptready = select.select(self.clients + [self.server_socket],[],[])
for sock in inputready:
if sock == self.server_socket:
self.__handle_new_connection()
else:
self.__handle_request(sock)
def __handle_new_connection(self):
"""This is called whenever a new connection is initiated"""
socket, address = self.server_socket.accept()
client = clients.Client(ip=address[0], name=address[0], protocol=protocols.Raw(), socket=socket, server=self)
self.clients.append(client)
cmd = commands.Broadcast(self, client, "HERE COMES DADDY!")
cmd.execute()
def __handle_request(self, caller):
"""This is called whenever data is received from one of the client."""
try:
data = caller.receive()
result = self.parser.parse(data)
cmd = commands.getCommand(result[0], self, caller, result[1])
try:
cmd.execute()
except clients.CouldNotSendRequestError, e:
#Tell the client that the command could not be executed properly.
pass
except clients.SocketError, e:
self.disconnect_client(caller)
except clients.ClientIsNotFinishedSendingError, e:
pass
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='kronos-chat server')
parser.add_argument("--port", metavar="PORT", type=int, help="the port to listen to")
parser.add_argument("--bind", metavar="IP", type=str, help="the ip to listen on")
args = parser.parse_args()
s = Server(parser = "Parser")
s.listen()
| mit | Python |
cc697b677bc5b97b033cd24444d43d8b94bc85a1 | Fix mock connection | russss/python-emv | emv/test/test_transmission.py | emv/test/test_transmission.py | from unittest2 import TestCase
from emv.util import unformat_bytes
from emv.protocol.command import SelectCommand
from emv.protocol.response import SuccessResponse
from emv.transmission import TransmissionProtocol
class MockConnection(object):
T0_protocol = 1
def __init__(self, responses):
self.responses = responses
self.requests = []
def connect(self, protocol=None):
pass
def getProtocol(self):
return self.T0_protocol
def transmit(self, request):
self.requests.append(request)
return self.responses.pop(0)
class TestTransmission(TestCase):
def test_simple(self):
responses = [
([], 0x90, 0x00)
]
conn = MockConnection(responses)
tp = TransmissionProtocol(conn)
res = tp.exchange(SelectCommand('test'))
self.assertIs(type(res), SuccessResponse)
def test_retry(self):
r_data = unformat_bytes('''6F 1D 84 07 A0 00 00 00 03 80 02 A5 12 50 08 42 41
52 43 4C 41 59 53 87 01 00 5F 2D 02 65 6E''')
responses = [
([], 0x61, 0x1F),
(r_data, 0x90, 0x00)
]
conn = MockConnection(responses)
tp = TransmissionProtocol(conn)
res = tp.exchange(SelectCommand([0xA0, 0x00, 0x00, 0x00, 0x03, 0x80, 0x02]))
self.assertIs(type(res), SuccessResponse)
| from unittest2 import TestCase
from emv.util import unformat_bytes
from emv.protocol.command import SelectCommand
from emv.protocol.response import SuccessResponse
from emv.transmission import TransmissionProtocol
class MockConnection(object):
T0_protocol = 1
def __init__(self, responses):
self.responses = responses
self.requests = []
def connect(self):
pass
def getProtocol(self):
return self.T0_protocol
def transmit(self, request):
self.requests.append(request)
return self.responses.pop(0)
class TestTransmission(TestCase):
def test_simple(self):
responses = [
([], 0x90, 0x00)
]
conn = MockConnection(responses)
tp = TransmissionProtocol(conn)
res = tp.exchange(SelectCommand('test'))
self.assertIs(type(res), SuccessResponse)
def test_retry(self):
r_data = unformat_bytes('''6F 1D 84 07 A0 00 00 00 03 80 02 A5 12 50 08 42 41
52 43 4C 41 59 53 87 01 00 5F 2D 02 65 6E''')
responses = [
([], 0x61, 0x1F),
(r_data, 0x90, 0x00)
]
conn = MockConnection(responses)
tp = TransmissionProtocol(conn)
res = tp.exchange(SelectCommand([0xA0, 0x00, 0x00, 0x00, 0x03, 0x80, 0x02]))
self.assertIs(type(res), SuccessResponse)
| mit | Python |
24fc06d17303868ef4ea057cd001ec6cb49ab18f | Fix utf-8 problem with åäö and friends. | sknippen/refreeze,sknippen/refreeze,sknippen/refreeze | flask_app.py | flask_app.py | import os
from flask import Flask, render_template
from jinja2 import Template
app = Flask(__name__, template_folder='.', static_url_path='', static_folder='..')
app.config.from_pyfile('settings.py')
BASE = '/%s' % app.config['REPO_NAME']
@app.route('/')
def home():
with open('talk.md', 'r') as f:
template = Template(f.read().decode('utf-8'))
markdown = template.render(base=BASE)
js_file = 'talk.js'
if os.path.isfile(js_file):
with open(js_file, 'r') as f_js:
js = f_js.read()
else:
js = ''
return render_template('slides.html', markdown=markdown, js=js)
if __name__ == '__main__':
BASE = ''
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| import os
from flask import Flask, render_template
from jinja2 import Template
app = Flask(__name__, template_folder='.', static_url_path='', static_folder='..')
app.config.from_pyfile('settings.py')
BASE = '/%s' % app.config['REPO_NAME']
@app.route('/')
def home():
with open('talk.md', 'r') as f:
template = Template(f.read())
markdown = template.render(base=BASE)
js_file = 'talk.js'
if os.path.isfile(js_file):
with open(js_file, 'r') as f_js:
js = f_js.read()
else:
js = ''
return render_template('slides.html', markdown=markdown, js=js)
if __name__ == '__main__':
BASE = ''
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| bsd-3-clause | Python |
0cbc06890eb131fb5014a88c8f5a111d3a6abb0d | Update mockito-core to 2.24.5 | GerritCodeReview/plugins_delete-project,GerritCodeReview/plugins_delete-project,GerritCodeReview/plugins_delete-project,GerritCodeReview/plugins_delete-project | external_plugin_deps.bzl | external_plugin_deps.bzl | load("//tools/bzl:maven_jar.bzl", "maven_jar")
def external_plugin_deps():
maven_jar(
name = "mockito",
artifact = "org.mockito:mockito-core:2.24.5",
sha1 = "599509fe319bd9e39559b8f987bee5d4b77167e4",
deps = [
"@byte-buddy//jar",
"@byte-buddy-agent//jar",
"@objenesis//jar",
],
)
BYTE_BUDDY_VERSION = "1.9.7"
maven_jar(
name = "byte-buddy",
artifact = "net.bytebuddy:byte-buddy:" + BYTE_BUDDY_VERSION,
sha1 = "8fea78fea6449e1738b675cb155ce8422661e237",
)
maven_jar(
name = "byte-buddy-agent",
artifact = "net.bytebuddy:byte-buddy-agent:" + BYTE_BUDDY_VERSION,
sha1 = "8e7d1b599f4943851ffea125fd9780e572727fc0",
)
maven_jar(
name = "objenesis",
artifact = "org.objenesis:objenesis:2.6",
sha1 = "639033469776fd37c08358c6b92a4761feb2af4b",
)
maven_jar(
name = "commons-io",
artifact = "commons-io:commons-io:2.6",
sha1 = "815893df5f31da2ece4040fe0a12fd44b577afaf",
)
| load("//tools/bzl:maven_jar.bzl", "maven_jar")
def external_plugin_deps():
maven_jar(
name = "mockito",
artifact = "org.mockito:mockito-core:2.24.0",
sha1 = "969a7bcb6f16e076904336ebc7ca171d412cc1f9",
deps = [
"@byte-buddy//jar",
"@byte-buddy-agent//jar",
"@objenesis//jar",
],
)
BYTE_BUDDY_VERSION = "1.9.7"
maven_jar(
name = "byte-buddy",
artifact = "net.bytebuddy:byte-buddy:" + BYTE_BUDDY_VERSION,
sha1 = "8fea78fea6449e1738b675cb155ce8422661e237",
)
maven_jar(
name = "byte-buddy-agent",
artifact = "net.bytebuddy:byte-buddy-agent:" + BYTE_BUDDY_VERSION,
sha1 = "8e7d1b599f4943851ffea125fd9780e572727fc0",
)
maven_jar(
name = "objenesis",
artifact = "org.objenesis:objenesis:2.6",
sha1 = "639033469776fd37c08358c6b92a4761feb2af4b",
)
maven_jar(
name = "commons-io",
artifact = "commons-io:commons-io:2.6",
sha1 = "815893df5f31da2ece4040fe0a12fd44b577afaf",
)
| apache-2.0 | Python |
56dabb4f34385c007ee9bdcebcd35953fe5b9085 | Upgrade mockito-core to 2.9.0 | GerritCodeReview/plugins_webhooks | external_plugin_deps.bzl | external_plugin_deps.bzl | load("//tools/bzl:maven_jar.bzl", "maven_jar")
def external_plugin_deps():
maven_jar(
name = "mockito",
artifact = "org.mockito:mockito-core:2.9.0",
sha1 = "f28b9606eca8da77e10df30a7e301f589733143e",
deps = [
'@byte-buddy//jar',
'@objenesis//jar',
],
)
maven_jar(
name = "byte-buddy",
artifact = "net.bytebuddy:byte-buddy:1.7.0",
sha1 = "48481d20ed4334ee0abfe8212ecb44e0233a97b5",
)
maven_jar(
name = "objenesis",
artifact = "org.objenesis:objenesis:2.6",
sha1 = "639033469776fd37c08358c6b92a4761feb2af4b",
)
| load("//tools/bzl:maven_jar.bzl", "maven_jar")
def external_plugin_deps():
maven_jar(
name = "mockito",
artifact = "org.mockito:mockito-core:2.7.21",
sha1 = "23e9f7bfb9717e849a05b84c29ee3ac723f1a653",
deps = [
'@byte-buddy//jar',
'@objenesis//jar',
],
)
maven_jar(
name = "byte-buddy",
artifact = "net.bytebuddy:byte-buddy:1.6.11",
sha1 = "8a8f9409e27f1d62c909c7eef2aa7b3a580b4901",
)
maven_jar(
name = "objenesis",
artifact = "org.objenesis:objenesis:2.5",
sha1 = "612ecb799912ccf77cba9b3ed8c813da086076e9",
)
| apache-2.0 | Python |
a827ee08e73ac7474908a06f871fba4dfcceee0b | drop todo comment | drmonkeysee/ecs-scheduler,drmonkeysee/ecs-scheduler | ecs_scheduler/env.py | ecs_scheduler/env.py | """ECS scheduler initialization helper methods."""
import os
import logging
import logging.handlers
import setuptools_scm
from . import triggers, __version__
_logger = logging.getLogger(__name__)
def init():
"""Initialize global application state."""
_init_logging()
triggers.init()
def get_var(name, required=False, default=None):
"""
Get environment variable value.
:param name: Name of the env variable (sans ECSS_ prefix)
:required: Raise KeyError if env variable not found
:default: Default value if env variable not found
"""
name = f'ECSS_{name}'
val = os.environ[name] if required else os.getenv(name, default)
return val.format(**os.environ) if val else val
def get_version():
"""
Get the current application version.
"""
try:
return setuptools_scm.get_version()
except LookupError:
return __version__
def _init_logging():
log_level = getattr(logging, get_var('LOG_LEVEL', default=''), None)
log_handlers = [logging.StreamHandler()]
log_folder = get_var('LOG_FOLDER')
if log_folder:
os.makedirs(os.path.abspath(log_folder), exist_ok=True)
log_file = os.path.join(log_folder, 'app.log')
log_handlers.append(logging.handlers.RotatingFileHandler(log_file, maxBytes=5*1024*1024, backupCount=1))
logging.basicConfig(level=log_level, handlers=log_handlers, format='%(levelname)s:%(name)s:%(asctime)s %(message)s')
| """ECS scheduler initialization helper methods."""
import os
import logging
import logging.handlers
import setuptools_scm
from . import triggers, __version__
_logger = logging.getLogger(__name__)
def init():
"""Initialize global application state."""
_init_logging()
triggers.init()
def get_var(name, required=False, default=None):
"""
Get environment variable value.
:param name: Name of the env variable (sans ECSS_ prefix)
:required: Raise KeyError if env variable not found
:default: Default value if env variable not found
"""
name = f'ECSS_{name}'
val = os.environ[name] if required else os.getenv(name, default)
return val.format(**os.environ) if val else val
def get_version():
"""
Get the current application version.
"""
# TODO: remove hardcoded version once package can be built for docker
try:
return setuptools_scm.get_version()
except LookupError:
return __version__
def _init_logging():
log_level = getattr(logging, get_var('LOG_LEVEL', default=''), None)
log_handlers = [logging.StreamHandler()]
log_folder = get_var('LOG_FOLDER')
if log_folder:
os.makedirs(os.path.abspath(log_folder), exist_ok=True)
log_file = os.path.join(log_folder, 'app.log')
log_handlers.append(logging.handlers.RotatingFileHandler(log_file, maxBytes=5*1024*1024, backupCount=1))
logging.basicConfig(level=log_level, handlers=log_handlers, format='%(levelname)s:%(name)s:%(asctime)s %(message)s')
| mit | Python |
6c35c54cd9561c9f829e8ba9500acdcd09642a32 | rename global to make usage clear | Connexions/cnx-archive,Connexions/cnx-archive | cnxarchive/utils/safe.py | cnxarchive/utils/safe.py | import subprocess
import threading
from subprocess import PIPE
from logging import getLogger
logger = getLogger('safestat')
safe_stat_process = None
def safe_stat(path, timeout=1, cmd=None):
"Use threads and a subproc to bodge a timeout on top of filesystem access"
global safe_stat_process
if cmd is None:
cmd = ['/usr/bin/stat']
cmd.append(path)
def target():
global safe_stat_process
logger.debug('Stat thread started')
safe_stat_process = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE)
_results = safe_stat_process.communicate() # noqa
logger.debug('Stat thread finished')
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive(): # stat took longer than timeout
safe_stat_process.terminate()
thread.join()
return safe_stat_process.returncode == 0
| import subprocess
import threading
from subprocess import PIPE
from logging import getLogger
logger = getLogger('safestat')
process = None
def safe_stat(path, timeout=1, cmd=None):
"Use threads and a subproc to bodge a timeout on top of filesystem access"
global process
if cmd is None:
cmd = ['/usr/bin/stat']
cmd.append(path)
# import pdb; pdb.set_trace()
def target():
global process
logger.debug('Stat thread started')
process = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE)
_results = process.communicate() # noqa
logger.debug('Stat thread finished')
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive(): # stat took longer than timeout
process.terminate()
thread.join()
return process.returncode == 0
| agpl-3.0 | Python |
a1e92f9fcb8364eee991f0a7f976b4e1b83dcddd | Use consistent data in "colour.models.cie_uvw" module doc tests. | colour-science/colour | colour/models/cie_uvw.py | colour/models/cie_uvw.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CIE UVW Colourspace
===================
Defines the *CIE UVW* colourspace transformations:
- :func:`XYZ_to_UVW`
See Also
--------
`CIE UVW Colourspace IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/cie_uvw.ipynb>`_ # noqa
References
----------
.. [1] http://en.wikipedia.org/wiki/CIE_1964_color_space
(Last accessed 10 June 2014)
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import ILLUMINANTS
from colour.models import UCS_to_uv, XYZ_to_UCS, XYZ_to_xyY, xy_to_XYZ
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['XYZ_to_UVW']
def XYZ_to_UVW(XYZ,
illuminant=ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get('D50')):
"""
Converts from *CIE XYZ* colourspace to *CIE 1964 U\*V*\W\** colourspace.
Parameters
----------
XYZ : array_like, (3,)
*CIE XYZ* colourspace matrix.
illuminant : array_like, optional
Reference *illuminant* chromaticity coordinates.
Returns
-------
ndarray, (3,)
*CIE 1964 U\*V*\W\** colourspace matrix.
Notes
-----
- Input *CIE XYZ* colourspace matrix is in domain [0, 100].
- Output *CIE UVW* colourspace matrix is in domain [0, 100].
Warning
-------
The input / output domains of that definition are non standard!
Examples
--------
>>> XYZ = np.array([0.07049534, 0.1008, 0.09558313]) * 100
>>> XYZ_to_UVW(XYZ) # doctest: +ELLIPSIS
array([-28.0483277..., -0.8805242..., 37.0041149...])
"""
x, y, Y = np.ravel(XYZ_to_xyY(XYZ, illuminant))
u, v = np.ravel(UCS_to_uv(XYZ_to_UCS(XYZ)))
u0, v0 = np.ravel(UCS_to_uv(XYZ_to_UCS(xy_to_XYZ(illuminant))))
W = 25 * Y ** (1 / 3) - 17
U = 13 * W * (u - u0)
V = 13 * W * (v - v0)
return np.array([U, V, W])
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CIE UVW Colourspace
===================
Defines the *CIE UVW* colourspace transformations:
- :func:`XYZ_to_UVW`
See Also
--------
`CIE UVW Colourspace IPython Notebook
<http://nbviewer.ipython.org/github/colour-science/colour-ipython/blob/master/notebooks/models/cie_uvw.ipynb>`_ # noqa
References
----------
.. [1] http://en.wikipedia.org/wiki/CIE_1964_color_space
(Last accessed 10 June 2014)
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import ILLUMINANTS
from colour.models import UCS_to_uv, XYZ_to_UCS, XYZ_to_xyY, xy_to_XYZ
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013 - 2014 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['XYZ_to_UVW']
def XYZ_to_UVW(XYZ,
illuminant=ILLUMINANTS.get(
'CIE 1931 2 Degree Standard Observer').get('D50')):
"""
Converts from *CIE XYZ* colourspace to *CIE 1964 U\*V*\W\** colourspace.
Parameters
----------
XYZ : array_like, (3,)
*CIE XYZ* colourspace matrix.
illuminant : array_like, optional
Reference *illuminant* chromaticity coordinates.
Returns
-------
ndarray, (3,)
*CIE 1964 U\*V*\W\** colourspace matrix.
Notes
-----
- Input *CIE XYZ* colourspace matrix is in domain [0, 100].
- Output *CIE UVW* colourspace matrix is in domain [0, 100].
Warning
-------
The input / output domains of that definition are non standard!
Examples
--------
>>> XYZ = np.array([11.80583421, 10.34, 5.15089229])
>>> XYZ_to_UVW(XYZ) # doctest: +ELLIPSIS
array([ 24.2543371..., 7.2205484..., 37.4645000...])
"""
x, y, Y = np.ravel(XYZ_to_xyY(XYZ, illuminant))
u, v = np.ravel(UCS_to_uv(XYZ_to_UCS(XYZ)))
u0, v0 = np.ravel(UCS_to_uv(XYZ_to_UCS(xy_to_XYZ(illuminant))))
W = 25 * Y ** (1 / 3) - 17
U = 13 * W * (u - u0)
V = 13 * W * (v - v0)
return np.array([U, V, W])
| bsd-3-clause | Python |
2ea688e90d6924bbad8fb59aa0706e9328fbba6d | Add initial FlatContainer class. | nodev-io/nodev.specs | nodev/specs/generic.py | nodev/specs/generic.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015-2016 Alessandro Amici.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# python 2 support via python-future
from __future__ import absolute_import, unicode_literals
from builtins import super
import inspect
try:
# from python version >= 3.0
from collections import abc
except ImportError:
import collections as abc
try:
# from python version >= 3.5
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
@singledispatch
def contains(container, item):
"""Extends ``operator.contains`` by trying very hard to find ``item`` inside container."""
contained = False
try:
contained = instance_contains(container, item)
except:
pass
return contained
@contains.register(abc.Container)
@contains.register(abc.Iterator)
def container_contains(container, item):
return item in container
@contains.register(abc.Mapping)
def mapping_contains(container, item):
return item in container.values()
@contains.register(str)
def str_contains(container, item):
return item in set(container)
def instance_contains(container, item):
"""Search into instance attributes and properties and class attributes."""
return item in (p for _, p in inspect.getmembers(container))
class Container(object):
def __init__(self, container):
self.container = container
def __contains__(self, item):
return contains(self.container, item)
def generate_items(object):
if isinstance(object, abc.Mapping):
for key, value in object.items():
yield key
yield value
elif isinstance(object, abc.Iterable):
for item in object:
yield item
for name, attr in inspect.getmembers(object):
if not name.startswith('_'):
yield attr
def generate_flat_items(object):
for item in generate_items(object):
yield item
try:
for subitem in generate_items(item):
yield subitem
except:
pass
class FlatContainer(tuple):
def __new__(cls, object):
super().__new__(generate_flat_items(object))
| # -*- coding: utf-8 -*-
#
# Copyright (c) 2015-2016 Alessandro Amici.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# python 2 support via python-future
from __future__ import absolute_import, unicode_literals
import inspect
try:
# from python version >= 3.0
from collections import abc
except ImportError:
import collections as abc
try:
# from python version >= 3.5
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
@singledispatch
def contains(container, item):
"""Extends ``operator.contains`` by trying very hard to find ``item`` inside container."""
contained = False
try:
contained = instance_contains(container, item)
except:
pass
return contained
@contains.register(abc.Container)
def container_contains(container, item):
return item in container
@contains.register(abc.Iterator)
def iterator_contains(container, item):
return item in container
@contains.register(abc.Mapping)
def mapping_contains(container, item):
return item in container.values()
@contains.register(str)
def str_contains(container, item):
return item in set(container)
def instance_contains(container, item):
"""Search into instance attributes and properties and class attributes."""
return item in (p for _, p in inspect.getmembers(container))
class Container(object):
def __init__(self, container):
self.container = container
def __contains__(self, item):
return contains(self.container, item)
| mit | Python |
37d8fadd25ebf06207e046007097b06ecb9f33ac | Use Numpy dtype when creating Numpy array | sklam/numba,pombredanne/numba,IntelLabs/numba,GaZ3ll3/numba,stonebig/numba,stuartarchibald/numba,IntelLabs/numba,jriehl/numba,IntelLabs/numba,ssarangi/numba,stonebig/numba,stonebig/numba,seibert/numba,seibert/numba,jriehl/numba,GaZ3ll3/numba,jriehl/numba,sklam/numba,stonebig/numba,gmarkall/numba,numba/numba,gmarkall/numba,stefanseefeld/numba,IntelLabs/numba,pombredanne/numba,cpcloud/numba,pombredanne/numba,seibert/numba,GaZ3ll3/numba,stuartarchibald/numba,stuartarchibald/numba,gdementen/numba,stefanseefeld/numba,stefanseefeld/numba,cpcloud/numba,stuartarchibald/numba,numba/numba,ssarangi/numba,sklam/numba,cpcloud/numba,gmarkall/numba,stuartarchibald/numba,ssarangi/numba,gdementen/numba,cpcloud/numba,ssarangi/numba,ssarangi/numba,pitrou/numba,pitrou/numba,GaZ3ll3/numba,gmarkall/numba,seibert/numba,numba/numba,numba/numba,GaZ3ll3/numba,gdementen/numba,sklam/numba,gdementen/numba,pitrou/numba,pombredanne/numba,IntelLabs/numba,gmarkall/numba,jriehl/numba,numba/numba,jriehl/numba,stonebig/numba,gdementen/numba,pitrou/numba,sklam/numba,pitrou/numba,stefanseefeld/numba,seibert/numba,pombredanne/numba,cpcloud/numba,stefanseefeld/numba | numba/cuda/tests/cudapy/test_alignment.py | numba/cuda/tests/cudapy/test_alignment.py | import numpy as np
from numba import from_dtype, cuda
from numba import unittest_support as unittest
class TestAlignment(unittest.TestCase):
def test_record_alignment(self):
rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')], align=True)
rec = from_dtype(rec_dtype)
@cuda.jit((rec[:],))
def foo(a):
i = cuda.grid(1)
a[i].a = a[i].b
a_recarray = np.recarray(3, dtype=rec_dtype)
for i in range(a_recarray.size):
a_rec = a_recarray[i]
a_rec.a = 0
a_rec.b = (i + 1) * 123
foo[1, 3](a_recarray)
self.assertTrue(np.all(a_recarray.a == a_recarray.b))
def test_record_alignment_error(self):
rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')])
rec = from_dtype(rec_dtype)
with self.assertRaises(Exception) as raises:
@cuda.jit((rec[:],))
def foo(a):
i = cuda.grid(1)
a[i].a = a[i].b
self.assertTrue('type float64 is not aligned' in str(raises.exception))
if __name__ == '__main__':
unittest.main()
| import numpy as np
from numba import from_dtype, cuda
from numba import unittest_support as unittest
class TestAlignment(unittest.TestCase):
def test_record_alignment(self):
rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')], align=True)
rec = from_dtype(rec_dtype)
@cuda.jit((rec[:],))
def foo(a):
i = cuda.grid(1)
a[i].a = a[i].b
a_recarray = np.recarray(3, dtype=rec)
for i in range(a_recarray.size):
a_rec = a_recarray[i]
a_rec.a = 0
a_rec.b = (i + 1) * 123
foo[1, 3](a_recarray)
self.assertTrue(np.all(a_recarray.a == a_recarray.b))
def test_record_alignment_error(self):
rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')])
rec = from_dtype(rec_dtype)
with self.assertRaises(Exception) as raises:
@cuda.jit((rec[:],))
def foo(a):
i = cuda.grid(1)
a[i].a = a[i].b
self.assertTrue('type float64 is not aligned' in str(raises.exception))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python |
ad7b71212a9c2356227048a28ee94c52193f6156 | clean up | awemulya/fieldsight-kobocat,awemulya/fieldsight-kobocat,awemulya/fieldsight-kobocat,awemulya/fieldsight-kobocat | onadata/apps/fsforms/line_data_project.py | onadata/apps/fsforms/line_data_project.py | import datetime
from collections import OrderedDict
from django.db.models import Count
from .models import FInstance
def date_range(start, end, intv):
start = datetime.datetime.strptime(start,"%Y%m%d")
end = datetime.datetime.strptime(end,"%Y%m%d")
diff = (end - start ) / intv
for i in range(intv):
yield (start + diff * i)
yield end
class LineChartGenerator(object):
def __init__(self, project):
self.project = project
self.date_list = list(date_range(project.date_created.strftime("%Y%m%d"), datetime.datetime.today().strftime("%Y%m%d"), 6))
def get_count(self, date):
date = date + datetime.timedelta(days=1)
return self.project.project_instances.filter(date__lte=date.date()).count()
def data(self):
d = OrderedDict()
dt = self.date_list
for date in dt:
count = self.get_count(date)
d[date.strftime('%Y-%m-%d')] = count
return d
class LineChartGeneratorOrganization(object):
def __init__(self, organization):
self.organization = organization
self.date_list = list(date_range(organization.date_created.strftime("%Y%m%d"), datetime.datetime.today().strftime("%Y%m%d"), 6))
def get_count(self, date):
date = date + datetime.timedelta(days=1)
return FInstance.objects.filter(project__organization=self.organization, date__lte=date.date()).count()
def data(self):
d = OrderedDict()
dt = self.date_list
for date in dt:
count = self.get_count(date)
d[date.strftime('%Y-%m-%d')] = count
return d
class LineChartGeneratorSite(object):
def __init__(self, site):
self.site = site
self.date_list = list(date_range(site.date_created.strftime("%Y%m%d"), datetime.datetime.today().strftime("%Y%m%d"), 6))
def get_count(self, date):
date = date + datetime.timedelta(days=1)
return self.site.site_instances.filter(date__lte=date.date()).count()
def data(self):
d = OrderedDict()
dt = self.date_list
for date in dt:
count = self.get_count(date)
d[date.strftime('%Y-%m-%d')] = count
return d
| import datetime
from collections import OrderedDict
from django.db.models import Count
from .models import FInstance
def date_range(start, end, intv):
start = datetime.datetime.strptime(start,"%Y%m%d")
end = datetime.datetime.strptime(end,"%Y%m%d")
diff = (end - start ) / intv
for i in range(intv):
yield (start + diff * i)
yield end
class LineChartGenerator(object):
def __init__(self, project):
self.project = project
self.date_list = list(date_range(project.date_created.strftime("%Y%m%d"), datetime.datetime.today().strftime("%Y%m%d"), 6))
def get_count(self, date):
return self.project.project_instances.filter(date__lte=date.date()).count()
def data(self):
d = OrderedDict()
dt = self.date_list
for date in dt:
count = self.get_count(date)
d[date.strftime('%Y-%m-%d')] = count
return d
class LineChartGeneratorOrganization(object):
def __init__(self, organization):
self.organization = organization
self.date_list = list(date_range(organization.date_created.strftime("%Y%m%d"), datetime.datetime.today().strftime("%Y%m%d"), 6))
def get_count(self, date):
date = date + datetime.timedelta(days=1)
return FInstance.objects.filter(project__organization=self.organization, date__lte=date.date()).count()
def data(self):
d = OrderedDict()
dt = self.date_list
for date in dt:
count = self.get_count(date)
d[date.strftime('%Y-%m-%d')] = count
return d
class LineChartGeneratorSite(object):
def __init__(self, site):
self.site = site
self.date_list = list(date_range(site.date_created.strftime("%Y%m%d"), datetime.datetime.today().strftime("%Y%m%d"), 6))
def get_count(self, date):
return self.site.site_instances.filter(date__lte=date.date()).count()
def data(self):
d = OrderedDict()
dt = self.date_list
for date in dt:
count = self.get_count(date)
d[date.strftime('%Y-%m-%d')] = count
return d
| bsd-2-clause | Python |
dbd3320bba832d031781f02d8199628d9acd3fac | Fix except syntax | hastexo/edx-shopify,fghaas/edx-shopify | edx_shopify/tasks.py | edx_shopify/tasks.py | from celery import Task
from celery.utils.log import get_task_logger
from .models import Order, OrderItem
from .utils import auto_enroll_email
logger = get_task_logger(__name__)
class ProcessOrder(Task):
"""
Process order creation event.
"""
def run(self, data):
order = Order.objects.get(id=data['id'])
# If the order is anything but UNPROCESSED, abandon the attempt.
if order.status != Order.UNPROCESSED:
return
# Mark the order as being processed.
order.status = Order.PROCESSING
order.save()
# Process line items
order_error = False
for item in data['line_items']:
try:
sku = item['sku']
email = next(
p['value'] for p in item['properties']
if p['name'] == 'email'
)
except (KeyError, StopIteration):
order_error = True
continue
# Store line item
order_item, created = OrderItem.objects.get_or_create(
order = order,
sku = sku,
email = email
)
if order_item.status == OrderItem.UNPROCESSED:
try:
# Enroll the email in the course
auto_enroll_email(sku, email)
except:
order_error = True
order_item.status = OrderItem.ERROR
order_item.save()
continue
# Mark the item as processed
order_item.status = OrderItem.PROCESSED
order_item.save()
elif order_item.status == OrderItem.ERROR:
order_error = True
# Mark the order status
if order_error:
order.status = Order.ERROR
else:
order.status = Order.PROCESSED
order.save()
| from celery import Task
from celery.utils.log import get_task_logger
from .models import Order, OrderItem
from .utils import auto_enroll_email
logger = get_task_logger(__name__)
class ProcessOrder(Task):
"""
Process order creation event.
"""
def run(self, data):
order = Order.objects.get(id=data['id'])
# If the order is anything but UNPROCESSED, abandon the attempt.
if order.status != Order.UNPROCESSED:
return
# Mark the order as being processed.
order.status = Order.PROCESSING
order.save()
# Process line items
order_error = False
for item in data['line_items']:
try:
sku = item['sku']
email = next(
p['value'] for p in item['properties']
if p['name'] == 'email'
)
except KeyError, StopIteration:
order_error = True
continue
# Store line item
order_item, created = OrderItem.objects.get_or_create(
order = order,
sku = sku,
email = email
)
if order_item.status == OrderItem.UNPROCESSED:
try:
# Enroll the email in the course
auto_enroll_email(sku, email)
except:
order_error = True
order_item.status = OrderItem.ERROR
order_item.save()
continue
# Mark the item as processed
order_item.status = OrderItem.PROCESSED
order_item.save()
elif order_item.status == OrderItem.ERROR:
order_error = True
# Mark the order status
if order_error:
order.status = Order.ERROR
else:
order.status = Order.PROCESSED
order.save()
| agpl-3.0 | Python |
4b2f91c2fe3629e0dac6d95cb2736a01984ca63b | Update koth example to match new Entity behavior | BHSPitMonkey/vmflib | examples/koth_vmflib_example.py | examples/koth_vmflib_example.py | #!/usr/bin/python3
"""Example map generator: King of the Hill Example
This script demonstrates vmflib by generating a basic "king of the hill" style
map. "King of the hill" is a game mode in Team Fortress 2 where each team tries
to maintain control of a central "control point" for some total defined amount
of time (before the other team does).
After this script executes, the map will be written to: koth_vmflib_example.vmf
This example highlights the use of TF2 game mechanics (in this case the use of
a control point and a goal timer). A simple implementation of team
spawn/resupply areas is also included.
https://developer.valvesoftware.com/wiki/Creating_a_Capture_Point
https://developer.valvesoftware.com/wiki/TF2/King_of_the_Hill
"""
from vmf import *
from vmf.types import Vertex
from vmf.tools import Block
m = vmf.ValveMap()
# Environment and lighting (these values come from Sky List on Valve dev wiki)
# Sun angle S Pitch Brightness Ambience
# 0 300 0 -20 238 218 181 250 224 188 122 250
m.world.skyname = 'sky_harvest_01'
light = vmf.Entity('light_environment')
light.origin = "0 0 0"
light.properties['pitch'] = -20
light.properties['angles'] = "0 300 0"
light.properties['_lightscaleHDR'] = "238 218 181 450"
light.properties['_lightHDR'] = "238 218 181 450"
light.properties['_light'] = "238 218 181 450"
light.properties['_AmbientScaleHDR'] = "1"
light.properties['_ambientHDR'] = "224 188 122 250"
light.properties['_ambient'] = "224 188 122 250"
# Ground
ground = Block(Vertex(0, 0, -32), (2048, 2048, 64), 'nature/dirtground004')
m.world.children.append(ground)
# Skybox
skybox = [
Block(Vertex(0, 0, 2048), (2048, 2048, 64)), # Ceiling
Block(Vertex(-1024, 0, 1024), (64, 2048, 2048)), # Left wall
Block(Vertex(1024, 0, 1024), (64, 2048, 2048)), # Right wall
Block(Vertex(0, 1024, 1024), (2048, 64, 2048)), # Forward wall
Block(Vertex(0, -1024, 1024), (2048, 64, 2048)) # Rear wall
]
for wall in skybox:
wall.set_material('tools/toolsskybox2d')
m.world.children.extend(skybox)
# Control point prop
cp_prop = vmf.Entity('prop_dynamic')
cp_prop.origin = "0 0 0"
cp_prop.properties['targetname'] = "prop_cap_1"
cp_prop.properties['model'] = "models/props_gameplay/cap_point_base.mdl"
m.children.append(cp_prop)
# TODO
# Player spawn areas
# Define RED spawn
spawn_red = vmf.Entity('info_player_teamspawn')
spawn_red.origin = "900 900 10"
spawn_red.properties['TeamNum'] = "2" # RED
spawn_red.properties['angles'] = "0 -135 0"
# Define BLU spawn
spawn_blu = vmf.Entity('info_player_teamspawn')
spawn_blu.origin = "-900 -900 10"
spawn_blu.properties['TeamNum'] = "3" # BLU
spawn_blu.properties['angles'] = "0 45 0"
# Write the map to a file
m.write_vmf('koth_vmflib_example.vmf')
| #!/usr/bin/python3
"""Example map generator: King of the Hill Example
This script demonstrates vmflib by generating a basic "king of the hill" style
map. "King of the hill" is a game mode in Team Fortress 2 where each team tries
to maintain control of a central "control point" for some total defined amount
of time (before the other team does).
After this script executes, the map will be written to: koth_vmflib_example.vmf
This example highlights the use of TF2 game mechanics (in this case the use of
a control point and a goal timer). A simple implementation of team
spawn/resupply areas is also included.
https://developer.valvesoftware.com/wiki/Creating_a_Capture_Point
https://developer.valvesoftware.com/wiki/TF2/King_of_the_Hill
"""
from vmf import *
from vmf.types import Vertex
from vmf.tools import Block
m = vmf.ValveMap()
# Environment and lighting (these values come from Sky List on Valve dev wiki)
# Sun angle S Pitch Brightness Ambience
# 0 300 0 -20 238 218 181 250 224 188 122 250
m.world.skyname = 'sky_harvest_01'
light = vmf.Entity('light_environment')
light.origin = "0 0 0"
light.properties['pitch'] = -20
light.properties['angles'] = "0 300 0"
light.properties['_lightscaleHDR'] = "238 218 181 450"
light.properties['_lightHDR'] = "238 218 181 450"
light.properties['_light'] = "238 218 181 450"
light.properties['_AmbientScaleHDR'] = "1"
light.properties['_ambientHDR'] = "224 188 122 250"
light.properties['_ambient'] = "224 188 122 250"
m.children.append(light)
# Ground
ground = Block(Vertex(0, 0, -32), (2048, 2048, 64), 'nature/dirtground004')
m.world.children.append(ground)
# Skybox
skybox = [
Block(Vertex(0, 0, 2048), (2048, 2048, 64)), # Ceiling
Block(Vertex(-1024, 0, 1024), (64, 2048, 2048)), # Left wall
Block(Vertex(1024, 0, 1024), (64, 2048, 2048)), # Right wall
Block(Vertex(0, 1024, 1024), (2048, 64, 2048)), # Forward wall
Block(Vertex(0, -1024, 1024), (2048, 64, 2048)) # Rear wall
]
for wall in skybox:
wall.set_material('tools/toolsskybox2d')
m.world.children.extend(skybox)
# Control point prop
cp_prop = vmf.Entity('prop_dynamic')
cp_prop.origin = "0 0 0"
cp_prop.properties['targetname'] = "prop_cap_1"
cp_prop.properties['model'] = "models/props_gameplay/cap_point_base.mdl"
m.children.append(cp_prop)
# TODO
# Player spawn areas
# Define RED spawn
spawn_red = vmf.Entity('info_player_teamspawn')
spawn_red.origin = "900 900 10"
spawn_red.properties['TeamNum'] = "2" # RED
spawn_red.properties['angles'] = "0 -135 0"
m.children.append(spawn_red)
# Define BLU spawn
spawn_blu = vmf.Entity('info_player_teamspawn')
spawn_blu.origin = "-900 -900 10"
spawn_blu.properties['TeamNum'] = "3" # BLU
spawn_blu.properties['angles'] = "0 45 0"
m.children.append(spawn_blu)
# Write the map to a file
m.write_vmf('koth_vmflib_example.vmf')
| bsd-2-clause | Python |
2bc0cf211a15a6bd20f9a851ba92a12d8bb8479c | fix float instead of int | instagrambot/instabot,ohld/instabot,rasperepodvipodvert/instabot,vkgrd/instabot,misisnik/testinsta,AlexBGoode/instabot,instagrambot/instabot,Diapostrofo/instabot,sudoguy/instabot,misisnik/testinsta,instagrambot/instapro | examples/ultimate_schedule/ultimate.py | examples/ultimate_schedule/ultimate.py | import schedule
import time
import sys
import os
import random
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0],'../../'))
from instabot import Bot
bot = Bot()
bot.login()
bot.logger.info("ULTIMATE script. 24hours save")
comments_file_name = "comments.txt"
random_user_file = bot.read_list_from_file("username_database.txt")
random_user=random.choice(random_user_file)
random_hashtag_file = bot.read_list_from_file("hashtag_database.txt")
random_hashtag=random.choice(random_hashtag_file)
def job1(): bot.like_hashtag(random_hashtag, amount=int(700/24))
def job2(): bot.like_timeline(amount=int(300/24))
def job3(): bot.like_followers(random_user, nlikes=3)
def job4(): bot.follow_followers(random_list)
def job5(): bot.comment_medias(bot.get_timeline_medias())
def job6(): bot.unfollow_non_followers()
schedule.every(1).hours.do(job1)
schedule.every(1).hours.do(job2)
schedule.every(1).days.at("16:00").do(job3)
schedule.every(1).days.at("11:00").do(job4)
schedule.every(2).hours.do(job5)
schedule.every(1).days.at("08:00").do(job6)
while True:
schedule.run_pending()
time.sleep(1)
| import schedule
import time
import sys
import os
import random
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0],'../../'))
from instabot import Bot
bot = Bot()
bot.login()
bot.logger.info("ULTIMATE script. 24hours save")
comments_file_name = "comments.txt"
random_user_file = bot.read_list_from_file("username_database.txt")
random_user=random.choice(random_user_file)
random_hashtag_file = bot.read_list_from_file("hashtag_database.txt")
random_hashtag=random.choice(random_hashtag_file)
def job1(): bot.like_hashtag(random_hashtag, amount=(700/24))
def job2(): bot.like_timeline(amount=300/24)
def job3(): bot.like_followers(random_user, nlikes=3)
def job4(): bot.follow_followers(random_list)
def job5(): bot.comment_medias(bot.get_timeline_medias())
def job6(): bot.unfollow_non_followers()
schedule.every(1).hours.do(job1)
schedule.every(1).hours.do(job2)
schedule.every(1).days.at("16:00").do(job3)
schedule.every(1).days.at("11:00").do(job4)
schedule.every(2).hours.do(job5)
schedule.every(1).days.at("08:00").do(job6)
while True:
schedule.run_pending()
time.sleep(1)
| apache-2.0 | Python |
911c8ebd648fb0e14d5b7d713ebdb2b0fc42b63e | add documenation for restore_signals | datalib/proclib | proclib/helpers.py | proclib/helpers.py | """
proclib.helpers
~~~~~~~~~~~~~~~
Helper utility functions.
"""
import shlex
import signal
TO_RESTORE = tuple(
getattr(signal, sig) for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ')
if hasattr(signal, sig)
)
def restore_signals(signals=TO_RESTORE):
"""
Restores signals before the process is
executed so that they can be terminated
with SIGPIPE.
:param signals: Defaults to SIGPIPE,
SIGXFZ, and SIGXFSZ (if available).
"""
for sig in signals:
signal.signal(sig, signal.SIG_DFL)
def str_parse(cmds):
"""
Given a string of commands *cmds* yield the
command in chunks, separated by the pipe
operator '|'.
:param cmds: String of commands.
"""
buff = []
for item in shlex.split(cmds):
if item == '|':
yield buff
buff = []
continue
buff.append(item)
if buff:
yield buff
def list_parse(cmds):
"""
Given a list of commands, if they are a
string then parse them, else yield them
as if they were already correctly formatted.
:param cmds: List of commands.
"""
for item in cmds:
if isinstance(item, str):
for item in str_parse(item):
yield item
continue
yield list(item)
class cached_property(object):
"""
Property that is computed only once during
the lifetime of an object, i.e. the second
time the attribute is looked up there is
zero cost overhead.
:param func: Function to wrap over.
"""
def __init__(self, func):
self.func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __get__(self, obj, objtype=None):
if obj is None:
return self
res = obj.__dict__[self.__name__] = self.func(obj)
return res
| """
proclib.helpers
~~~~~~~~~~~~~~~
Helper utility functions.
"""
import shlex
import signal
TO_RESTORE = tuple(
getattr(signal, sig) for sig in ('SIGPIPE', 'SIGXFZ', 'SIGXFSZ')
if hasattr(signal, sig)
)
def restore_signals(signals=TO_RESTORE):
"""
Function for restoring the signals
"""
for sig in signals:
signal.signal(sig, signal.SIG_DFL)
def str_parse(cmds):
"""
Given a string of commands *cmds* yield the
command in chunks, separated by the pipe
operator '|'.
:param cmds: String of commands.
"""
buff = []
for item in shlex.split(cmds):
if item == '|':
yield buff
buff = []
continue
buff.append(item)
if buff:
yield buff
def list_parse(cmds):
"""
Given a list of commands, if they are a
string then parse them, else yield them
as if they were already correctly formatted.
:param cmds: List of commands.
"""
for item in cmds:
if isinstance(item, str):
for item in str_parse(item):
yield item
continue
yield list(item)
class cached_property(object):
"""
Property that is computed only once during
the lifetime of an object, i.e. the second
time the attribute is looked up there is
zero cost overhead.
:param func: Function to wrap over.
"""
def __init__(self, func):
self.func = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __get__(self, obj, objtype=None):
if obj is None:
return self
res = obj.__dict__[self.__name__] = self.func(obj)
return res
| mit | Python |
f5dbe1df012241d680cd05c2a96cf4c5d3ccb463 | add logger | legnaleurc/acddl,legnaleurc/acddl,legnaleurc/acddl | acddl/util.py | acddl/util.py | import argparse
import signal
import sys
from tornado import ioloop, web, httpserver
from wcpan.logger import setup as setup_logger, INFO
from . import api
from .controller import RootController
def parse_args(args):
parser = argparse.ArgumentParser('acddl')
parser.add_argument('-l', '--listen', required=True, type=int)
parser.add_argument('-r', '--root', required=True, type=str)
args = parser.parse_args(args)
return args
def main(args=None):
if args is None:
args = sys.argv
args = parse_args(args[1:])
loggers = setup_logger('/tmp/acddl.log', (
'tornado.access',
'tornado.application',
'tornado.general',
'requests.packages.urllib3.connectionpool',
'wcpan.worker',
'acddl',
))
main_loop = ioloop.IOLoop.instance()
controller = RootController(args.root)
signal.signal(signal.SIGINT, controller.close)
application = web.Application([
(r'/nodes', api.NodesHandler),
(r'/nodes/([a-zA-Z0-9\-_]{22})', api.NodesHandler),
(r'/cache', api.CacheHandler),
(r'/cache/([a-zA-Z0-9\-_]{22})', api.CacheHandler),
], controller=controller)
server = httpserver.HTTPServer(application)
server.listen(args.listen)
INFO('acddl') << 'ready'
main_loop.start()
main_loop.close()
return 0
| import argparse
import signal
import sys
from tornado import ioloop, web, httpserver
from wcpan.logger import setup as setup_logger, INFO
from . import api
from .controller import RootController
def parse_args(args):
parser = argparse.ArgumentParser('acddl')
parser.add_argument('-l', '--listen', required=True, type=int)
parser.add_argument('-r', '--root', required=True, type=str)
args = parser.parse_args(args)
return args
def main(args=None):
if args is None:
args = sys.argv
args = parse_args(args[1:])
loggers = setup_logger('/tmp/acddl.log', (
'tornado.access',
'tornado.application',
'tornado.general',
'requests.packages.urllib3.connectionpool',
'acddl',
))
main_loop = ioloop.IOLoop.instance()
controller = RootController(args.root)
signal.signal(signal.SIGINT, controller.close)
application = web.Application([
(r'/nodes', api.NodesHandler),
(r'/nodes/([a-zA-Z0-9\-_]{22})', api.NodesHandler),
(r'/cache', api.CacheHandler),
(r'/cache/([a-zA-Z0-9\-_]{22})', api.CacheHandler),
], controller=controller)
server = httpserver.HTTPServer(application)
server.listen(args.listen)
INFO('acddl') << 'ready'
main_loop.start()
main_loop.close()
return 0
| mit | Python |
995f2f032fd37273d976bc94a2b5b28b2e2abbbd | Fix public room pagination for client_reader app | TribeMedia/synapse,matrix-org/synapse,TribeMedia/synapse,TribeMedia/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,matrix-org/synapse,TribeMedia/synapse,matrix-org/synapse,TribeMedia/synapse | synapse/replication/slave/storage/room.py | synapse/replication/slave/storage/room.py | # -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import BaseSlavedStore
from synapse.storage import DataStore
from ._slaved_id_tracker import SlavedIdTracker
class RoomStore(BaseSlavedStore):
def __init__(self, db_conn, hs):
super(RoomStore, self).__init__(db_conn, hs)
self._public_room_id_gen = SlavedIdTracker(
db_conn, "public_room_list_stream", "stream_id"
)
get_public_room_ids = DataStore.get_public_room_ids.__func__
get_current_public_room_stream_id = (
DataStore.get_current_public_room_stream_id.__func__
)
get_public_room_ids_at_stream_id = (
DataStore.get_public_room_ids_at_stream_id.__func__
)
get_public_room_ids_at_stream_id_txn = (
DataStore.get_public_room_ids_at_stream_id_txn.__func__
)
get_published_at_stream_id_txn = (
DataStore.get_published_at_stream_id_txn.__func__
)
get_public_room_changes = DataStore.get_public_room_changes.__func__
def stream_positions(self):
result = super(RoomStore, self).stream_positions()
result["public_rooms"] = self._public_room_id_gen.get_current_token()
return result
def process_replication(self, result):
stream = result.get("public_rooms")
if stream:
self._public_room_id_gen.advance(int(stream["position"]))
return super(RoomStore, self).process_replication(result)
| # -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import BaseSlavedStore
from synapse.storage import DataStore
from ._slaved_id_tracker import SlavedIdTracker
class RoomStore(BaseSlavedStore):
def __init__(self, db_conn, hs):
super(RoomStore, self).__init__(db_conn, hs)
self._public_room_id_gen = SlavedIdTracker(
db_conn, "public_room_list_stream", "stream_id"
)
get_public_room_ids = DataStore.get_public_room_ids.__func__
get_current_public_room_stream_id = (
DataStore.get_current_public_room_stream_id.__func__
)
get_public_room_ids_at_stream_id = (
DataStore.get_public_room_ids_at_stream_id.__func__
)
get_public_room_ids_at_stream_id_txn = (
DataStore.get_public_room_ids_at_stream_id_txn.__func__
)
get_published_at_stream_id_txn = (
DataStore.get_published_at_stream_id_txn.__func__
)
def stream_positions(self):
result = super(RoomStore, self).stream_positions()
result["public_rooms"] = self._public_room_id_gen.get_current_token()
return result
def process_replication(self, result):
stream = result.get("public_rooms")
if stream:
self._public_room_id_gen.advance(int(stream["position"]))
return super(RoomStore, self).process_replication(result)
| apache-2.0 | Python |
ed91a2a5caa296c35bf806cac4b92ba3ceaa5441 | Remove unused imports (#10) | Hackathonners/vania | examples/simple_distribution.py | examples/simple_distribution.py | from vania import FairDistributor
def main():
# User input for the number of targets and objects.
users = ['user1', 'user2']
tasks = ['task1', 'task2']
preferences = [
[1, 2],
[2, 1],
]
# Run solver
distributor = FairDistributor(users, tasks, preferences)
output = distributor.distribute(output='problem.lp')
# Output
print(output)
if __name__ == '__main__':
main()
| import sys
import time
from random import shuffle
from vania.fair_distributor import FairDistributor
def main():
# User input for the number of targets and objects.
users = ['user1', 'user2']
tasks = ['task1', 'task2']
preferences = [
[1, 2],
[2, 1],
]
# Run solver
distributor = FairDistributor(users, tasks, preferences)
output = distributor.distribute(output='problem.lp')
# Output
print(output)
if __name__ == '__main__':
main()
| mit | Python |
0dfa43733b9cf5be5722520f2949da77bf8d9dc6 | add safe_format for None | guiniol/py3status,ultrabug/py3status,valdur55/py3status,vvoland/py3status,tobes/py3status,docwalter/py3status,guiniol/py3status,Andrwe/py3status,Andrwe/py3status,tobes/py3status,ultrabug/py3status,alexoneill/py3status,ultrabug/py3status,valdur55/py3status,valdur55/py3status | py3status/modules/getjson.py | py3status/modules/getjson.py | # -*- coding: utf-8 -*-
"""
Display JSON data fetched from a URL.
This module gets the given `url` configuration parameter and assumes the
response is a JSON object. The keys of the JSON object are used as the format
placeholders. The format placeholders are replaced by the value. Objects that
are nested can be accessed by using the `delimiter` configuration parameter
in between.
Examples:
```
# Straightforward key replacement
url = 'http://ip-api.com/json'
format = '{lat}, {lon}'
# Access child objects
url = 'http://api.icndb.com/jokes/random'
format = '{value-joke}'
# Access title from 0th element of articles list
url = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey={KEY}'
format = '{articles-0-title}'
# Access if top-level object is a list
url = 'https://jsonplaceholder.typicode.com/posts/1/comments'
format = '{0-name}'
```
Configuration parameters:
cache_timeout: refresh interval for this module (default 30)
delimiter: the delimiter between parent and child objects (default '-')
format: display format for this module (default None)
timeout: time to wait for a response, in seconds (default 5)
url: specify URL to fetch JSON from (default None)
Format placeholders:
Placeholders will be replaced by the JSON keys.
Placeholders for objects with sub-objects are flattened using 'delimiter'
in between (eg. {'parent': {'child': 'value'}} will use placeholder
{parent-child}).
Placeholders for list elements have 'delimiter' followed by the index
(eg. {'parent': ['this', 'that']) will use placeholders {parent-0}
for 'this' and {parent-1} for 'that'.
@author vicyap
SAMPLE OUTPUT
{'full_text': 'Github: Everything operating normally'}
"""
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 30
delimiter = '-'
format = None
timeout = 5
url = None
def getjson(self):
"""
"""
try:
json_data = self.py3.request(self.url, timeout=self.timeout).json()
json_data = self.py3.flatten_dict(json_data, self.delimiter, True)
except self.py3.RequestException:
json_data = None
if json_data:
full_text = self.py3.safe_format(self.format, json_data)
else:
full_text = ''
return {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': full_text
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| # -*- coding: utf-8 -*-
"""
Display JSON response from a URL.
This module gets the given `url` configuration parameter and assumes the
response is a JSON object. The keys of the JSON object are used as the format
placeholders. The format placeholders are replaced by the value. Objects that
are nested can be accessed by using the `delimiter` configuration parameter
in between.
Examples:
```
# Straightforward key replacement
url = 'http://ip-api.com/json'
format = '{lat}, {lon}'
# Access child objects
url = 'http://api.icndb.com/jokes/random'
format = '{value-joke}'
# Access title from 0th element of articles list
url = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey={KEY}'
format = '{articles-0-title}'
# Access if top-level object is a list
url = 'https://jsonplaceholder.typicode.com/posts/1/comments'
format = '{0-name}'
```
Configuration parameters:
cache_timeout: refresh interval for this module (default 30)
delimiter: the delimiter between parent and child objects (default '-')
format: display format for this module (default None)
timeout: time to wait for a response, in seconds (default 5)
url: specify URL to fetch JSON from (default None)
Format placeholders:
Placeholders will be replaced by the JSON keys.
Placeholders for objects with sub-objects are flattened using 'delimiter'
in between (eg. {'parent': {'child': 'value'}} will use placeholder
{parent-child}).
Placeholders for list elements have 'delimiter' followed by the index
(eg. {'parent': ['this', 'that']) will use placeholders {parent-0}
for 'this' and {parent-1} for 'that'.
@author vicyap
SAMPLE OUTPUT
{'full_text': 'Github: Everything operating normally'}
"""
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 30
delimiter = '-'
format = None
timeout = 5
url = None
def getjson(self):
"""
"""
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
}
try:
resp = self.py3.request(self.url, timeout=self.timeout)
status = resp.status_code == 200
resp = resp.json()
except self.py3.RequestException:
resp = None
status = False
if status:
response['full_text'] = self.py3.safe_format(
self.format,
self.py3.flatten_dict(
resp, delimiter=self.delimiter, intermediates=True
)
)
else:
response['full_text'] = ''
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| bsd-3-clause | Python |
933308ea388f46273648af588dfb54cd78e71d12 | return on same line | tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler,tijko/Project-Euler | py_solutions_1-10/Euler_2.py | py_solutions_1-10/Euler_2.py | # sum of even fibonacci numbers below 4 million?
import timeit
start = timeit.default_timer()
#def euler_2():
# a1 = 1
# b1 = 1
# b2 = 0
# limit = 4000000
# while b2 <= limit:
# b2 = a1 + b1
# a1 = b1
# b1 = b2
# if b2 % 2 == 0:
# yield b2
def euler_2(t=0, n=2, ln=1):
if n >= 4000000: return t
elif n % 2 == 0: return euler_2(t + n, n + ln, n)
return euler_2(t, n + ln, n)
print "Answer: %s" % euler_2()
#print "Answer: %s" % sum([i for i in euler_2()])
stop = timeit.default_timer()
print "Time: %f" % (stop - start)
| # sum of even fibonacci numbers below 4 million?
import timeit
start = timeit.default_timer()
#def euler_2():
# a1 = 1
# b1 = 1
# b2 = 0
# limit = 4000000
# while b2 <= limit:
# b2 = a1 + b1
# a1 = b1
# b1 = b2
# if b2 % 2 == 0:
# yield b2
def euler_2(t=0, n=2, ln=1):
if n >= 4000000:
return t
if n % 2 == 0:
return euler_2(t + n, n + ln, n)
return euler_2(t, n + ln, n)
print "Answer: %s" % euler_2()
#print "Answer: %s" % sum([i for i in euler_2()])
stop = timeit.default_timer()
print "Time: %f" % (stop - start)
| mit | Python |
70043341fef4942b0486074a5f2ae50ea537de72 | Update version.py | uezo/minette-python | minette/version.py | minette/version.py | __version__ = "0.4.dev4"
| __version__ = "0.4.dev3"
| apache-2.0 | Python |
a3413ab2932dfcf5fce1110b87ea2333ff908fac | fix typo | balanced-ops/infra-zookeeper | formation/zookeeper.py | formation/zookeeper.py | #!/usr/bin/python
from confu import atlas
from troposphere import ( Template, FindInMap, GetAtt, Ref, Parameter, Join, Base64, Select, Output, ec2 as ec2 )
template = Template()
template.add_description('ZooKeeper')
atlas.infra_params(template) ## ssh_key, Env, Silo
atlas.conf_params(template) ## Conf Name, Conf Version, Conf tarball bucket
atlas.instance_params(
template,
roles_default=['zookeeper',],
iam_default='zookeeper',
)
atlas.scaling_params(template)
atlas.mappings(
template,
accounts=[atlas.poundpay],
)
kafka_secgrp = atlas.instance_secgrp(
template,
name="Kafka",
## TODO: add Kafka SG roles later
)
zk_secgrp = atlas.instance_secgrp(
template,
name="ZooKeeper",
SecurityGroupIngress=[
## not sure if it will work properly or not!
ec2.SecurityGroupIngress(
"ZKFollowers",
GroupId=Ref(zk_secgrp),
SourceSecurityGroupId=Ref(zk_secgrp),
FromPort="2888",
ToPort="2888",
IpProtocol="tcp",
),
ec2.SecurityGroupIngress(
"ZKServers",
GroupId=Ref(zk_secgrp),
SourceSecurityGroupId=Ref(zk_secgrp),
FromPort="3888",
ToPort="3888",
IpProtocol="tcp",
),
ec2.SecurityGroupIngress(
"ZKClients",
GroupId=Ref(zk_secgrp),
SourceSecurityGroupId=Ref(kafka_secgrp),
FromPort="2181",
ToPort="2181",
IpProtocol="tcp",
),
]
)
i_meta_data = {}
atlas.cfn_auth_metadata(i_meta_data)
atlas.cfn_init_metadata(i_meta_data)
i_launchconf = atlas.instance_launchconf(
template,
"ZK",
Metadata=i_meta_data,
SecurityGroups=[Ref(zk_secgrp)],
)
scaling_group = atlas.instance_scalegrp(
template,
'ZK',
LaunchConfigurationName=Ref(i_launchconf),
MinSize=Ref('MinSize'),
MaxSize=Ref('MaxSize'),
DesiredCapacity=Ref('DesiredCapacity'),
)
if __name__ == '__main__':
print template.to_json(indent=4, sort_keys=True)
| #!/usr/bin/python
from confu import atlas
from troposphere import ( Template, FindInMap, GetAtt, Ref, Parameter, Join, Base64, Select, Output, ec2 as ec2 )
template = Template()
template.add_description('ZooKeeper')
atlas.infra_params(template) ## ssh_key, Env, Silo
atlas.conf_params(template) ## Conf Name, Conf Version, Conf tarball bucket
atlas.instance_params(
template,
roles_default=['zookeeper',],
iam_default='zookeeper',
)
atlas.scaling_params(template)
atlas.mappings(
template,
accounts=[atlas.poundpay],
)
kafka_secgrp = atlas.instance_secgrp(
template,
name="Kafka",
## TODO: add Kafka SG roles later
)
zk_secgrp = atlas.instance_secgrp(
template,
name="ZooKeeper",
SecurityGroupIngress=[
## not sure if it will work properly or not!
ec2.SecurityGroupIngress(
"ZKFollowers",
GroupId=Ref(zk_secgrp),
SourceSecurityGroupId=Ref(zk_secgrp),
FromPort="2888",
ToPort="2888",
IpProtocol="tcp",
),
ec2.SecurityGroupIngress(
"ZKServers",
GroupId=Ref(zk_secgrp),
SourceSecurityGroupId=Ref(zk_secgrp),
FromPort="3888",
ToPort="3888",
IpProtocol="tcp",
),
ec2.SecurityGroupIngress(
"ZKClients",
GroupId=Ref(zk_secgrp),
SourceSecurityGroupId=Ref(kafka_secgrp),
FromPort="2181",
ToPort="2181",
IpProtocol="tcp",
),
]
)
i_meta_data = {}
atlas.cfn_auth_metadata(i_meta_data)
atlas.cfn_init_metadata(i_meta_data)
i_launchconf = atlas.instance_launchconf(
template,
"ZK",
Metadata=i_meta_data,
SecurityGroups=[Ref(zk_secgrp)],
)
scaling_group = atlas.instance_scalegrp(
template,
'ZK',
LaunchConfigurationName=Ref(launchconf),
MinSize=Ref('MinSize'),
MaxSize=Ref('MaxSize'),
DesiredCapacity=Ref('DesiredCapacity'),
)
if __name__ == '__main__':
print template.to_json(indent=4, sort_keys=True)
| mit | Python |
9d7db8f4ba86e8e5e526efacd878b31403b75e90 | fix test_examples | bjodah/pycodeexport,bjodah/pycodeexport | examples/tests/test_examples.py | examples/tests/test_examples.py | # -*- coding: utf-8 -*-
import glob
import os
import subprocess
import sys
import pytest
tests = glob.glob(os.path.join(os.path.dirname(__file__), '../*_main.py'))
@pytest.mark.parametrize('pypath', tests)
def test_examples(pypath):
p = subprocess.Popen(
[sys.executable, pypath, 'clean'],
cwd=os.path.join(os.path.dirname(__file__), '..')
)
assert p.wait() == 0 # SUCCESS==0
| # -*- coding: utf-8 -*-
import glob
import os
import subprocess
import sys
import pytest
tests = glob.glob(os.path.join(os.path.dirname(__file__), '../*_main.py'))
@pytest.mark.parametrize('pypath', tests)
def test_examples(pypath):
p = subprocess.Popen(
['python3' if sys.version_info.major == 3 else 'python',
pypath, 'clean'],
cwd=os.path.join(os.path.dirname(__file__), '..'))
assert p.wait() == 0 # SUCCESS==0
| bsd-2-clause | Python |
ce15f3a9143e6d9b640cd49f695c3179957c211e | comment quanmin | ieiayaobb/lushi8,ieiayaobb/lushi8,ieiayaobb/lushi8 | web/management/commands/fetch.py | web/management/commands/fetch.py | # -*- coding: utf-8 -*-
import sys
from web.fetch import Fetcher
from django.core.management.base import BaseCommand
import leancloud
from settings import LEAN_CLOUD_ID, LEAN_CLOUD_SECRET
reload(sys)
sys.setdefaultencoding("utf-8")
class Command(BaseCommand):
def handle(self, *args, **options):
leancloud.init(LEAN_CLOUD_ID, LEAN_CLOUD_SECRET)
query = leancloud.Query('Chairman')
allDataCompleted = False
batch = 0
limit = 1000
while not allDataCompleted:
query.limit(limit)
query.skip(batch * limit)
query.add_ascending('createdAt')
resultList = query.find()
if len(resultList) < limit:
allDataCompleted = True
leancloud.Object.destroy_all(resultList)
batch += 1
fetcher = Fetcher()
fetcher.fetch_cc()
fetcher.fetch_douyu()
fetcher.fetch_longzhu()
# fetcher.fetch_quanmin()
fetcher.fetch_xiongmao()
fetcher.fetch_zhanqi()
fetcher.fetch_huya()
for chairman in fetcher.chairmans:
try:
chairman.save()
except Exception, e:
print e
| # -*- coding: utf-8 -*-
import sys
from web.fetch import Fetcher
from django.core.management.base import BaseCommand
import leancloud
from settings import LEAN_CLOUD_ID, LEAN_CLOUD_SECRET
reload(sys)
sys.setdefaultencoding("utf-8")
class Command(BaseCommand):
def handle(self, *args, **options):
leancloud.init(LEAN_CLOUD_ID, LEAN_CLOUD_SECRET)
query = leancloud.Query('Chairman')
allDataCompleted = False
batch = 0
limit = 1000
while not allDataCompleted:
query.limit(limit)
query.skip(batch * limit)
query.add_ascending('createdAt')
resultList = query.find()
if len(resultList) < limit:
allDataCompleted = True
leancloud.Object.destroy_all(resultList)
batch += 1
fetcher = Fetcher()
fetcher.fetch_cc()
fetcher.fetch_douyu()
fetcher.fetch_longzhu()
fetcher.fetch_quanmin()
fetcher.fetch_xiongmao()
fetcher.fetch_zhanqi()
fetcher.fetch_huya()
for chairman in fetcher.chairmans:
try:
chairman.save()
except Exception, e:
print e
| mit | Python |
915098188119a930600a2e202137ad043f18a666 | Bump version 2.0.3 | arteria/django-hijack,arteria/django-hijack,arteria/django-hijack | hijack/__init__.py | hijack/__init__.py | # -*- coding: utf-8 -*-
__version__ = '2.0.3' # pragma: no cover
default_app_config = 'hijack.apps.HijackConfig'
| # -*- coding: utf-8 -*-
__version__ = '2.0.2' # pragma: no cover
default_app_config = 'hijack.apps.HijackConfig'
| mit | Python |
7dc3ca92b33951ef712e463d650aaeb5c3ef7403 | Bump to version 0.1.5 | posterior/treecat,posterior/treecat | treecat/__init__.py | treecat/__init__.py | __version__ = '0.1.5'
| __version__ = '0.1.4'
| apache-2.0 | Python |
8d519a811b05ffa6b63444ccbf85fb7c4e07f8ef | rename utils to math | ojengwa/algpy | algpy/math.py | algpy/math.py | """Summary."""
from __future__ import absolute_import
class Fraction(object):
"""docstring for Fraction."""
def __init__(self, numerator, denominator=1, whole_number=0):
"""
Constructor.
Args:
numerator (int): The numerator or top part of the fraction
denominator (int): The denominator or bottom part of the fraction
"""
if denominator == 0:
raise ValueError("Denominator cannot be zero.")
if not isinstance(numerator, int) or not isinstance(denominator, int)\
or not isinstance(whole_number, int):
raise TypeError(
'Invalid argument type. All argument must be integrers')
self.top = (whole_number * denominator) + numerator
self.denominator = denominator
self.whole = whole_number
self.numerator = numerator
def __str__(self):
"""
Magic method for returning formatted fraction output.
Returns:
name (TYPE): Formatted instance
"""
return "{0} {1}/{2}".format(self.whole, self.top, self.denominator)
def __unicode__(self):
"""
Magic method for returning formatted fraction output.
Returns:
name (TYPE): Formatted instance
"""
return "{0} {1}/{2}".format(self.whole, self.top, self.denominator)
def __add__(self, other):
"""
Addition magic method.
Args:
other (Fraction): The other fraction to add
Returns:
name (Fraction): New fraction instance
"""
denominator = self.denominator * other.denominator
numerator = (self.top * other.denominator) + \
(other.top * self.denominator)
whole = numerator // denominator
numerator = numerator % denominator
if not denominator % numerator:
denominator = denominator // numerator
return Fraction(numerator, denominator, whole)
| """Summary."""
from __future__ import absolute_import
class Fraction(object):
"""docstring for Fraction."""
def __init__(self, numerator, denominator):
"""
Constructor.
Args:
numerator (int): The numerator or top part of the fraction
denominator (int): The denominator or bottom part of the fraction
"""
self.numerator = numerator
self.denominator = denominator
def __str__(self):
"""
Magic method for returning formatted fraction output.
Returns:
name (TYPE): Formatted instance
"""
return "{} : {}/{}".format(self, self.top, self.bottom)
def __unicode__(self):
"""
Magic method for returning formatted fraction output.
Returns:
name (TYPE): Formatted instance
"""
return "{} : {}/{}".format(self, self.top, self.bottom)
| mit | Python |
bcc9c398eafeaf2b1ae4b02c67e1f6b4260f9355 | Enable OrderedEnqueuer from keras in tf.keras. (#19183) | ageron/tensorflow,nburn42/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,benoitsteiner/tensorflow-xsmm,aselle/tensorflow,jbedorf/tensorflow,frreiss/tensorflow-fred,xodus7/tensorflow,ageron/tensorflow,snnn/tensorflow,dancingdan/tensorflow,alsrgv/tensorflow,nburn42/tensorflow,xodus7/tensorflow,ZhangXinNan/tensorflow,gunan/tensorflow,kobejean/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Bismarrck/tensorflow,benoitsteiner/tensorflow-xsmm,apark263/tensorflow,manipopopo/tensorflow,seanli9jan/tensorflow,snnn/tensorflow,nburn42/tensorflow,snnn/tensorflow,caisq/tensorflow,petewarden/tensorflow,xzturn/tensorflow,petewarden/tensorflow,xzturn/tensorflow,kevin-coder/tensorflow-fork,hehongliang/tensorflow,karllessard/tensorflow,yongtang/tensorflow,dendisuhubdy/tensorflow,benoitsteiner/tensorflow-xsmm,dancingdan/tensorflow,caisq/tensorflow,dancingdan/tensorflow,chemelnucfin/tensorflow,gunan/tensorflow,snnn/tensorflow,gojira/tensorflow,arborh/tensorflow,AnishShah/tensorflow,seanli9jan/tensorflow,apark263/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,meteorcloudy/tensorflow,girving/tensorflow,AnishShah/tensorflow,gautam1858/tensorflow,ghchinoy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,xodus7/tensorflow,ghchinoy/tensorflow,dendisuhubdy/tensorflow,apark263/tensorflow,petewarden/tensorflow,renyi533/tensorflow,petewarden/tensorflow,Bismarrck/tensorflow,seanli9jan/tensorflow,jalexvig/tensorflow,frreiss/tensorflow-fred,lukeiwanski/tensorflow,ZhangXinNan/tensorflow,paolodedios/tensorflow,jhseu/tensorflow,adit-chandra/tensorflow,manipopopo/tensorflow,davidzchen/tensorflow,drpngx/tensorflow,seanli9jan/tensorflow,yongtang/tensorflow,kevin-coder/tensorflow-fork,jalexvig/tensorflow,yongtang/tensorflow,ZhangXinNan/tensorflow,frreiss/tensorflow-fred,dongjoon-hyun/tensorflow,renyi533/tensorflow,meteorcloudy/tensorflow,seanli9jan/tensorflow,frreiss/tensorflow-fred,gunan/tensorflow,gunan/tensorflow,adit-chandra/tensorflow,yanchen036/tensorflow,gunan/tensorflow,aselle/tensorflow,aselle/tensorflow,Intel-tensorflow/tensorflow,chemelnucfin/tensorflow,arborh/tensorflow,gautam1858/tensorflow,brchiu/tensorflow,arborh/tensorflow,xodus7/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,alsrgv/tensorflow,snnn/tensorflow,chemelnucfin/tensorflow,jart/tensorflow,AnishShah/tensorflow,adit-chandra/tensorflow,Bismarrck/tensorflow,dongjoon-hyun/tensorflow,jbedorf/tensorflow,brchiu/tensorflow,aam-at/tensorflow,theflofly/tensorflow,kobejean/tensorflow,hfp/tensorflow-xsmm,dongjoon-hyun/tensorflow,hfp/tensorflow-xsmm,brchiu/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,jalexvig/tensorflow,chemelnucfin/tensorflow,gojira/tensorflow,xzturn/tensorflow,ghchinoy/tensorflow,hehongliang/tensorflow,renyi533/tensorflow,annarev/tensorflow,yanchen036/tensorflow,paolodedios/tensorflow,Bismarrck/tensorflow,xodus7/tensorflow,girving/tensorflow,caisq/tensorflow,xodus7/tensorflow,gojira/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,ppwwyyxx/tensorflow,dendisuhubdy/tensorflow,kobejean/tensorflow,jendap/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,xodus7/tensorflow,DavidNorman/tensorflow,alshedivat/tensorflow,aam-at/tensorflow,ZhangXinNan/tensorflow,aldian/tensorflow,kobejean/tensorflow,gojira/tensorflow,Intel-Corporation/tensorflow,alsrgv/tensorflow,alshedivat/tensorflow,jart/tensorflow,dongjoon-hyun/tensorflow,AnishShah/tensorflow,hfp/tensorflow-xsmm,frreiss/tensorflow-fred,nburn42/tensorflow,theflofly/tensorflow,jhseu/tensorflow,ghchinoy/tensorflow,jbedorf/tensorflow,jendap/tensorflow,gunan/tensorflow,jhseu/tensorflow,aselle/tensorflow,ghchinoy/tensorflow,gojira/tensorflow,xodus7/tensorflow,brchiu/tensorflow,asimshankar/tensorflow,aam-at/tensorflow,hfp/tensorflow-xsmm,nburn42/tensorflow,alsrgv/tensorflow,girving/tensorflow,jendap/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,jart/tensorflow,chemelnucfin/tensorflow,apark263/tensorflow,jart/tensorflow,seanli9jan/tensorflow,seanli9jan/tensorflow,freedomtan/tensorflow,sarvex/tensorflow,kevin-coder/tensorflow-fork,tensorflow/tensorflow-pywrap_tf_optimizer,adit-chandra/tensorflow,meteorcloudy/tensorflow,ppwwyyxx/tensorflow,paolodedios/tensorflow,chemelnucfin/tensorflow,jendap/tensorflow,ghchinoy/tensorflow,adit-chandra/tensorflow,petewarden/tensorflow,jbedorf/tensorflow,aldian/tensorflow,theflofly/tensorflow,kevin-coder/tensorflow-fork,adit-chandra/tensorflow,ghchinoy/tensorflow,gojira/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,paolodedios/tensorflow,brchiu/tensorflow,paolodedios/tensorflow,jalexvig/tensorflow,karllessard/tensorflow,arborh/tensorflow,dendisuhubdy/tensorflow,frreiss/tensorflow-fred,hehongliang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,girving/tensorflow,ghchinoy/tensorflow,kevin-coder/tensorflow-fork,ZhangXinNan/tensorflow,kobejean/tensorflow,ZhangXinNan/tensorflow,Intel-tensorflow/tensorflow,xzturn/tensorflow,kobejean/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,renyi533/tensorflow,aldian/tensorflow,theflofly/tensorflow,aselle/tensorflow,hfp/tensorflow-xsmm,alshedivat/tensorflow,gautam1858/tensorflow,annarev/tensorflow,jendap/tensorflow,tensorflow/tensorflow-pywrap_saved_model,girving/tensorflow,meteorcloudy/tensorflow,dongjoon-hyun/tensorflow,aam-at/tensorflow,aldian/tensorflow,Intel-Corporation/tensorflow,AnishShah/tensorflow,aselle/tensorflow,ppwwyyxx/tensorflow,caisq/tensorflow,davidzchen/tensorflow,dongjoon-hyun/tensorflow,frreiss/tensorflow-fred,yanchen036/tensorflow,gunan/tensorflow,gautam1858/tensorflow,drpngx/tensorflow,hfp/tensorflow-xsmm,jbedorf/tensorflow,karllessard/tensorflow,alsrgv/tensorflow,chemelnucfin/tensorflow,xodus7/tensorflow,apark263/tensorflow,aselle/tensorflow,yanchen036/tensorflow,chemelnucfin/tensorflow,dancingdan/tensorflow,ageron/tensorflow,benoitsteiner/tensorflow-xsmm,lukeiwanski/tensorflow,aldian/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,brchiu/tensorflow,karllessard/tensorflow,sarvex/tensorflow,yanchen036/tensorflow,cxxgtxy/tensorflow,dendisuhubdy/tensorflow,drpngx/tensorflow,ageron/tensorflow,drpngx/tensorflow,tensorflow/tensorflow,dendisuhubdy/tensorflow,tensorflow/tensorflow,brchiu/tensorflow,davidzchen/tensorflow,asimshankar/tensorflow,paolodedios/tensorflow,dongjoon-hyun/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aldian/tensorflow,freedomtan/tensorflow,lukeiwanski/tensorflow,yongtang/tensorflow,annarev/tensorflow,manipopopo/tensorflow,karllessard/tensorflow,karllessard/tensorflow,alshedivat/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,kevin-coder/tensorflow-fork,ppwwyyxx/tensorflow,theflofly/tensorflow,jbedorf/tensorflow,renyi533/tensorflow,jalexvig/tensorflow,paolodedios/tensorflow,xzturn/tensorflow,brchiu/tensorflow,ageron/tensorflow,yongtang/tensorflow,theflofly/tensorflow,ppwwyyxx/tensorflow,dendisuhubdy/tensorflow,davidzchen/tensorflow,renyi533/tensorflow,xzturn/tensorflow,alsrgv/tensorflow,asimshankar/tensorflow,freedomtan/tensorflow,jhseu/tensorflow,dongjoon-hyun/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,alsrgv/tensorflow,hfp/tensorflow-xsmm,petewarden/tensorflow,tensorflow/tensorflow,benoitsteiner/tensorflow-xsmm,Intel-tensorflow/tensorflow,kevin-coder/tensorflow-fork,adit-chandra/tensorflow,benoitsteiner/tensorflow-xsmm,DavidNorman/tensorflow,meteorcloudy/tensorflow,xzturn/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,jhseu/tensorflow,hehongliang/tensorflow,brchiu/tensorflow,alsrgv/tensorflow,AnishShah/tensorflow,gojira/tensorflow,theflofly/tensorflow,AnishShah/tensorflow,Bismarrck/tensorflow,asimshankar/tensorflow,tensorflow/tensorflow,jhseu/tensorflow,benoitsteiner/tensorflow-xsmm,jart/tensorflow,manipopopo/tensorflow,tensorflow/tensorflow-pywrap_saved_model,jhseu/tensorflow,paolodedios/tensorflow,ZhangXinNan/tensorflow,gunan/tensorflow,nburn42/tensorflow,ZhangXinNan/tensorflow,gojira/tensorflow,benoitsteiner/tensorflow-xsmm,lukeiwanski/tensorflow,renyi533/tensorflow,jalexvig/tensorflow,karllessard/tensorflow,freedomtan/tensorflow,apark263/tensorflow,tensorflow/tensorflow-pywrap_saved_model,renyi533/tensorflow,aam-at/tensorflow,aam-at/tensorflow,meteorcloudy/tensorflow,petewarden/tensorflow,dancingdan/tensorflow,petewarden/tensorflow,manipopopo/tensorflow,jhseu/tensorflow,DavidNorman/tensorflow,theflofly/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,aam-at/tensorflow,jalexvig/tensorflow,renyi533/tensorflow,annarev/tensorflow,alsrgv/tensorflow,sarvex/tensorflow,hehongliang/tensorflow,meteorcloudy/tensorflow,alshedivat/tensorflow,DavidNorman/tensorflow,manipopopo/tensorflow,adit-chandra/tensorflow,jalexvig/tensorflow,petewarden/tensorflow,nburn42/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,ppwwyyxx/tensorflow,tensorflow/tensorflow,DavidNorman/tensorflow,adit-chandra/tensorflow,arborh/tensorflow,petewarden/tensorflow,frreiss/tensorflow-fred,aam-at/tensorflow,Intel-Corporation/tensorflow,jendap/tensorflow,lukeiwanski/tensorflow,gunan/tensorflow,yanchen036/tensorflow,jbedorf/tensorflow,manipopopo/tensorflow,aldian/tensorflow,ageron/tensorflow,aam-at/tensorflow,kobejean/tensorflow,annarev/tensorflow,theflofly/tensorflow,gautam1858/tensorflow,hfp/tensorflow-xsmm,apark263/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,girving/tensorflow,ghchinoy/tensorflow,kobejean/tensorflow,benoitsteiner/tensorflow-xsmm,Intel-Corporation/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,arborh/tensorflow,jbedorf/tensorflow,gojira/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,aselle/tensorflow,gunan/tensorflow,ghchinoy/tensorflow,annarev/tensorflow,cxxgtxy/tensorflow,arborh/tensorflow,cxxgtxy/tensorflow,yongtang/tensorflow,yongtang/tensorflow,kobejean/tensorflow,xzturn/tensorflow,caisq/tensorflow,snnn/tensorflow,freedomtan/tensorflow,jbedorf/tensorflow,alshedivat/tensorflow,tensorflow/tensorflow,xzturn/tensorflow,ageron/tensorflow,dongjoon-hyun/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,ZhangXinNan/tensorflow,girving/tensorflow,arborh/tensorflow,Bismarrck/tensorflow,aselle/tensorflow,DavidNorman/tensorflow,freedomtan/tensorflow,ageron/tensorflow,aam-at/tensorflow,cxxgtxy/tensorflow,alshedivat/tensorflow,Bismarrck/tensorflow,nburn42/tensorflow,adit-chandra/tensorflow,theflofly/tensorflow,aselle/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,lukeiwanski/tensorflow,sarvex/tensorflow,renyi533/tensorflow,tensorflow/tensorflow,snnn/tensorflow,DavidNorman/tensorflow,gautam1858/tensorflow,brchiu/tensorflow,yongtang/tensorflow,freedomtan/tensorflow,Bismarrck/tensorflow,alsrgv/tensorflow,benoitsteiner/tensorflow-xsmm,petewarden/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,xodus7/tensorflow,alshedivat/tensorflow,jart/tensorflow,Intel-Corporation/tensorflow,cxxgtxy/tensorflow,cxxgtxy/tensorflow,arborh/tensorflow,sarvex/tensorflow,Intel-tensorflow/tensorflow,nburn42/tensorflow,dancingdan/tensorflow,dendisuhubdy/tensorflow,ageron/tensorflow,caisq/tensorflow,jalexvig/tensorflow,tensorflow/tensorflow-pywrap_saved_model,davidzchen/tensorflow,seanli9jan/tensorflow,jendap/tensorflow,nburn42/tensorflow,dongjoon-hyun/tensorflow,jhseu/tensorflow,karllessard/tensorflow,renyi533/tensorflow,dancingdan/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,seanli9jan/tensorflow,freedomtan/tensorflow,annarev/tensorflow,manipopopo/tensorflow,yongtang/tensorflow,girving/tensorflow,AnishShah/tensorflow,alsrgv/tensorflow,AnishShah/tensorflow,xzturn/tensorflow,hfp/tensorflow-xsmm,ZhangXinNan/tensorflow,renyi533/tensorflow,alshedivat/tensorflow,freedomtan/tensorflow,kevin-coder/tensorflow-fork,meteorcloudy/tensorflow,jart/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,karllessard/tensorflow,drpngx/tensorflow,ppwwyyxx/tensorflow,kevin-coder/tensorflow-fork,lukeiwanski/tensorflow,jendap/tensorflow,asimshankar/tensorflow,jhseu/tensorflow,benoitsteiner/tensorflow-xsmm,lukeiwanski/tensorflow,davidzchen/tensorflow,lukeiwanski/tensorflow,gojira/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,chemelnucfin/tensorflow,jbedorf/tensorflow,aam-at/tensorflow,kobejean/tensorflow,davidzchen/tensorflow,drpngx/tensorflow,ppwwyyxx/tensorflow,theflofly/tensorflow,Bismarrck/tensorflow,apark263/tensorflow,apark263/tensorflow,jendap/tensorflow,snnn/tensorflow,annarev/tensorflow,meteorcloudy/tensorflow,drpngx/tensorflow,yanchen036/tensorflow,DavidNorman/tensorflow,davidzchen/tensorflow,freedomtan/tensorflow,DavidNorman/tensorflow,dendisuhubdy/tensorflow,adit-chandra/tensorflow,jalexvig/tensorflow,aldian/tensorflow,manipopopo/tensorflow,DavidNorman/tensorflow,freedomtan/tensorflow,asimshankar/tensorflow,annarev/tensorflow,apark263/tensorflow,dancingdan/tensorflow,jhseu/tensorflow,asimshankar/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,Intel-Corporation/tensorflow,yanchen036/tensorflow,alshedivat/tensorflow,jendap/tensorflow,alsrgv/tensorflow,asimshankar/tensorflow,arborh/tensorflow,jhseu/tensorflow,Intel-Corporation/tensorflow,asimshankar/tensorflow,snnn/tensorflow,jbedorf/tensorflow,arborh/tensorflow,ppwwyyxx/tensorflow,hfp/tensorflow-xsmm,apark263/tensorflow,ageron/tensorflow,frreiss/tensorflow-fred,theflofly/tensorflow,Intel-tensorflow/tensorflow,dancingdan/tensorflow,hehongliang/tensorflow,ghchinoy/tensorflow,jbedorf/tensorflow,seanli9jan/tensorflow,ghchinoy/tensorflow,Bismarrck/tensorflow,caisq/tensorflow,petewarden/tensorflow,frreiss/tensorflow-fred,girving/tensorflow,Bismarrck/tensorflow,jalexvig/tensorflow,davidzchen/tensorflow,annarev/tensorflow,annarev/tensorflow,adit-chandra/tensorflow,gojira/tensorflow,ppwwyyxx/tensorflow,chemelnucfin/tensorflow,davidzchen/tensorflow,manipopopo/tensorflow,ZhangXinNan/tensorflow,xzturn/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,aam-at/tensorflow,davidzchen/tensorflow,DavidNorman/tensorflow,cxxgtxy/tensorflow,snnn/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,xzturn/tensorflow,Intel-tensorflow/tensorflow,caisq/tensorflow,jart/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,kobejean/tensorflow,gautam1858/tensorflow,AnishShah/tensorflow,ageron/tensorflow,ppwwyyxx/tensorflow,kevin-coder/tensorflow-fork,asimshankar/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,chemelnucfin/tensorflow,jart/tensorflow,drpngx/tensorflow,ageron/tensorflow,caisq/tensorflow,girving/tensorflow,brchiu/tensorflow,paolodedios/tensorflow,seanli9jan/tensorflow,DavidNorman/tensorflow,hfp/tensorflow-xsmm,xodus7/tensorflow,arborh/tensorflow,snnn/tensorflow,dendisuhubdy/tensorflow,girving/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aselle/tensorflow,asimshankar/tensorflow,drpngx/tensorflow,kevin-coder/tensorflow-fork,dancingdan/tensorflow,tensorflow/tensorflow,jendap/tensorflow,ppwwyyxx/tensorflow,drpngx/tensorflow,AnishShah/tensorflow,manipopopo/tensorflow,nburn42/tensorflow,meteorcloudy/tensorflow,hehongliang/tensorflow,dancingdan/tensorflow,alshedivat/tensorflow,caisq/tensorflow,jart/tensorflow,dongjoon-hyun/tensorflow,sarvex/tensorflow,lukeiwanski/tensorflow | tensorflow/python/keras/utils/__init__.py | tensorflow/python/keras/utils/__init__.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.keras._impl.keras.utils.data_utils import OrderedEnqueuer
from tensorflow.python.keras._impl.keras.utils.data_utils import Sequence
from tensorflow.python.keras._impl.keras.utils.data_utils import SequenceEnqueuer
from tensorflow.python.keras._impl.keras.utils.generic_utils import custom_object_scope
from tensorflow.python.keras._impl.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.keras._impl.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras._impl.keras.utils.generic_utils import get_custom_objects
from tensorflow.python.keras._impl.keras.utils.generic_utils import Progbar
from tensorflow.python.keras._impl.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras._impl.keras.utils.io_utils import HDF5Matrix
from tensorflow.python.keras._impl.keras.utils.layer_utils import convert_all_kernels_in_model
from tensorflow.python.keras._impl.keras.utils.multi_gpu_utils import multi_gpu_model
from tensorflow.python.keras._impl.keras.utils.np_utils import normalize
from tensorflow.python.keras._impl.keras.utils.np_utils import to_categorical
from tensorflow.python.keras._impl.keras.utils.vis_utils import plot_model
del absolute_import
del division
del print_function
| # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.utils.data_utils import GeneratorEnqueuer
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.keras._impl.keras.utils.data_utils import Sequence
from tensorflow.python.keras._impl.keras.utils.data_utils import SequenceEnqueuer
from tensorflow.python.keras._impl.keras.utils.generic_utils import custom_object_scope
from tensorflow.python.keras._impl.keras.utils.generic_utils import CustomObjectScope
from tensorflow.python.keras._impl.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras._impl.keras.utils.generic_utils import get_custom_objects
from tensorflow.python.keras._impl.keras.utils.generic_utils import Progbar
from tensorflow.python.keras._impl.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.keras._impl.keras.utils.io_utils import HDF5Matrix
from tensorflow.python.keras._impl.keras.utils.layer_utils import convert_all_kernels_in_model
from tensorflow.python.keras._impl.keras.utils.multi_gpu_utils import multi_gpu_model
from tensorflow.python.keras._impl.keras.utils.np_utils import normalize
from tensorflow.python.keras._impl.keras.utils.np_utils import to_categorical
from tensorflow.python.keras._impl.keras.utils.vis_utils import plot_model
del absolute_import
del division
del print_function
| apache-2.0 | Python |
4963856156516689b0ae1a38c681d92f2c15ec6e | remove warnings by renaming variables/methods | rtcTo/rtc2git,WtfJoke/rtc2git,cwill747/rtc2git,ohumbel/rtc2git,akchinSTC/rtc2git,jacobilsoe/rtc2git | sorter.py | sorter.py | def getfirstentryfromeachkeyasmap(changeentrymap):
firstentries = {}
for key in changeentrymap.keys():
changeentries = changeentrymap.get(key)
if changeentries:
firstentries[key] = changeentries[0]
return firstentries
def deleteentry(changeentrymap, changeentrytodelete):
for key in changeentrymap.keys():
changeentries = changeentrymap.get(key)
if changeentries and changeentrytodelete.revision is changeentries[0].revision:
changeentries.remove(changeentrytodelete)
break
def tosortedlist(changeentrymap):
sortedlist = []
expectedlistsize = len(aslist(changeentrymap))
while len(sortedlist) < expectedlistsize:
firstentryfromeachkey = getfirstentryfromeachkeyasmap(changeentrymap)
changesetwithearliestdate = getchangeentrywithearliestdate(firstentryfromeachkey)
deleteentry(changeentrymap, changesetwithearliestdate)
sortedlist.append(changesetwithearliestdate)
return sortedlist;
def getchangeentrywithearliestdate(changeentries):
changeentrywithearliestdate = None
for key in changeentries.keys():
changeentry = changeentries.get(key)
if not changeentrywithearliestdate or changeentry.date < changeentrywithearliestdate.date:
changeentrywithearliestdate = changeentry
return changeentrywithearliestdate
def aslist(anymap):
resultlist = []
for key in anymap.keys():
for changeentry in anymap.get(key):
resultlist.append(changeentry)
return resultlist | def getfirstentryfromeachkeyasmap(changeentrymap):
firstentries = {}
for key in changeentrymap.keys():
changeentries = changeentrymap.get(key)
if changeentries:
firstentries[key] = changeentries[0]
return firstentries
def deleteentry(changeentrymap, changeentrytodelete):
for key in changeentrymap.keys():
changeentries = changeentrymap.get(key)
if changeentries and changeentrytodelete.revision is changeentries[0].revision:
changeentries.remove(changeentrytodelete)
break
def tosortedlist(changeentrymap):
sortedlist = []
expectedlistsize = len(aslist(changeentrymap))
while len(sortedlist) < expectedlistsize:
firstentryfromeachkey = getfirstentryfromeachkeyasmap(changeentrymap)
changeentrytoAdd = getchangeentrywithearliestdate(firstentryfromeachkey)
deleteentry(changeentrymap, changeentrytoAdd)
sortedlist.append(changeentrytoAdd)
return sortedlist;
def getchangeentrywithearliestdate(changeentries):
changeentrywithearliestdate = None
for key in changeentries.keys():
changeentry = changeentries.get(key)
if not changeentrywithearliestdate or changeentry.date < changeentrywithearliestdate.date:
changeentrywithearliestdate = changeentry
return changeentrywithearliestdate
def aslist(anymap):
resultlist = []
for key in anymap.keys():
for changeentry in anymap.get(key):
resultlist.append(changeentry)
return resultlist | mit | Python |
62d385e2bbcd8ce36a4f91adc021cd8ec6be41d3 | Convert functions into class | neoliberal/css-updater | source/update.py | source/update.py | """updates subreddit css with compiled sass"""
from os import path
from typing import List, Dict, Any, Tuple
import praw
import sass
# leave my typedefs alone, pylint: disable=C0103
WebhookResponse = Dict[str, Any]
class SubredditUploader(object):
"""various uploads"""
def __init__(
self: SubredditUploader, data: WebhookResponse,
absolute_path: str, reddit: praw.Reddit, subreddit: str
) -> None:
self.webhook: WebhookResponse = data
self.reddit: praw.Reddit = reddit
self.subreddit: str = subreddit
self.path: str = absolute_path
def changed_assets(self: SubredditUploader) -> Tuple[List[str], List[str]]:
"""
identifies changed assets to upload or remove by checking if any changed files are images
returns a tuple containing modified / new files and removed files
"""
endings: List[str] = ["png", "jpg"]
head_commit: Dict[str, Any] = self.webhook["head_commit"]
uploading_files: List[str] = [
file for file in (head_commit["modified"] + head_commit["added"])
for ending in endings
if path.splitext(file)[1] == ending
]
# removed_files require a name, not file extension
removed_files: List[str] = [
path.splitext(file)[0] for file in head_commit["removed"]
for ending in endings
if path.splitext(file)[1] == ending
]
return (uploading_files, removed_files)
def upload_stylesheet(self: SubredditUploader) -> bool:
"""compiles and uploads stylesheet"""
style: str = ""
try:
style = sass.compile(
filename=(self.path + "index.scss"), output_style="compressed")
except sass.CompileError as sass_error:
print(sass_error)
return False
self.reddit.subreddit(self.subreddit).stylesheet.update(style)
return True
def changed_stylesheet(self: SubredditUploader) -> bool:
"""checks if any sass files have been changed"""
ending: str = "scss"
head_commit: Dict[str, Any] = self.webhook["head_commit"]
return any(
path.splitext(file)[1] == ending
for file in (head_commit["modified"] + head_commit["added"])
)
| """updates subreddit css with compiled sass"""
from os import path
import time
from typing import List, Dict, Any, Tuple
import praw
import sass
# leave my typedefs alone, pylint: disable=C0103
WebhookResponse = Dict[str, Any]
def css() -> str:
"""compiles sass and returns css"""
return sass.compile(filename="index.scss", output_style="compressed")
def uid() -> str:
"""return date and time"""
return "Subreddit upload on {}".format(time.strftime("%c"))
def changed_assets(data: WebhookResponse) -> Tuple[List[str], List[str]]:
"""
identifies changed assets to upload or remove by checking if any changed files are images
returns a tuple containing modified / new files and removed files
"""
endings: List[str] = ["png", "jpg"]
head_commit: Dict[str, Any] = data["head_commit"]
uploading_files: List[str] = [
file for file in (head_commit["modified"] + head_commit["added"])
for ending in endings
if path.splitext(file)[1] == ending
]
# removed_files require a name, not file extension
removed_files: List[str] = [
path.splitext(file)[0] for file in head_commit["removed"]
for ending in endings
if path.splitext(file)[1] == ending
]
return (uploading_files, removed_files)
def changed_spreedsheet(data: WebhookResponse) -> bool:
"""checks if any sass files have been changed"""
ending: str = "scss"
head_commit: Dict[str, Any] = data["head_commit"]
return any(
path.splitext(file)[1] == ending
for file in (head_commit["modified"] + head_commit["added"])
)
def update(data: WebhookResponse) -> None:
"""main function"""
reddit: praw.Reddit = praw.Reddit()
reddit.subreddit("neoliberal").stylesheet.update(css(), reason=uid())
return
| mit | Python |
2502447e05daedd0fc44443b92e67ce3bf40ff37 | fix pep8 on tamplate tag article (box), E302 expected ./opps/article/templatetags/article_tags.py:9:1: E302 expected 2 blank lines, found 1 | jeanmask/opps,williamroot/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,opps/opps,williamroot/opps,opps/opps,YACOWS/opps,jeanmask/opps,williamroot/opps,jeanmask/opps,jeanmask/opps,opps/opps,opps/opps,YACOWS/opps | opps/article/templatetags/article_tags.py | opps/article/templatetags/article_tags.py | # -*- coding: utf-8 -*-
from django import template
from django.conf import settings
from opps.article.models import ArticleBox
register = template.Library()
@register.inclusion_tag('article/articlebox_detail.html')
def get_articlebox(slug, channel_slug=None):
if channel_slug:
slug = slug + '-' + channel_slug
try:
box = ArticleBox.objects.get(site=settings.SITE_ID, slug=slug)
except ArticleBox.DoesNotExist:
box = None
return {'articlebox': box}
@register.inclusion_tag('article/articlebox_list.html')
def get_all_articlebox(channel_slug):
boxes = ArticleBox.objects.filter(site=settings.SITE_ID, channel__slug=channel_slug)
return {'articleboxes': boxes}
| # -*- coding: utf-8 -*-
from django import template
from django.conf import settings
from opps.article.models import ArticleBox
register = template.Library()
@register.inclusion_tag('article/articlebox_detail.html')
def get_articlebox(slug, channel_slug=None):
if channel_slug:
slug = slug + '-' + channel_slug
try:
box = ArticleBox.objects.get(site=settings.SITE_ID, slug=slug)
except ArticleBox.DoesNotExist:
box = None
return {'articlebox': box}
@register.inclusion_tag('article/articlebox_list.html')
def get_all_articlebox(channel_slug):
boxes = ArticleBox.objects.filter(site=settings.SITE_ID, channel__slug=channel_slug)
return {'articleboxes': boxes}
| mit | Python |
7ec3c34be0c57163840a8df0f7e2c174a7f0dd67 | Add batch command for sqlite files | matslindh/kimochi,matslindh/kimochi | alembic/env.py | alembic/env.py | # from https://github.com/virajkanwade/alembic-templates-pyramid/blob/master/pyramid/env.py
from alembic import context
from sqlalchemy import engine_from_config, pool
from paste.deploy import loadapp
from pyramid.paster import get_appsettings, setup_logging
#from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
pyramid_config_file = config.get_main_option('pyramid_config_file')
# Interpret the config file for Python logging.
# This line sets up loggers basically.
#fileConfig(config.config_file_name)
#setup_logging(pyramid_config_file)
print(pyramid_config_file)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
app = loadapp('config:%s' % pyramid_config_file, relative_to='.')
settings = get_appsettings(pyramid_config_file)
target_metadata = __import__(app.registry.__name__).models.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=settings.get('sqlalchemy.url'))
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(settings, 'sqlalchemy.')
#engine = engine_from_config(
# config.get_section(config.config_ini_section),
# prefix='sqlalchemy.',
# poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
render_as_batch=True
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| # from https://github.com/virajkanwade/alembic-templates-pyramid/blob/master/pyramid/env.py
from alembic import context
from sqlalchemy import engine_from_config, pool
from paste.deploy import loadapp
from pyramid.paster import get_appsettings, setup_logging
#from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
pyramid_config_file = config.get_main_option('pyramid_config_file')
# Interpret the config file for Python logging.
# This line sets up loggers basically.
#fileConfig(config.config_file_name)
#setup_logging(pyramid_config_file)
print(pyramid_config_file)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
app = loadapp('config:%s' % pyramid_config_file, relative_to='.')
settings = get_appsettings(pyramid_config_file)
target_metadata = __import__(app.registry.__name__).models.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=settings.get('sqlalchemy.url'))
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(settings, 'sqlalchemy.')
#engine = engine_from_config(
# config.get_section(config.config_ini_section),
# prefix='sqlalchemy.',
# poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit | Python |
d3f82c3ed1e4879dfc0a0885e35c848b6cf311fb | use dns cache for improved adress resolving | Lispython/pycurl,Lispython/pycurl,Lispython/pycurl | pycurl/tests/test.py | pycurl/tests/test.py | # $Id$
## System modules
import sys
import threading
import time
## PycURL module
import pycurl
class Test(threading.Thread):
def __init__(self, url, ofile):
threading.Thread.__init__(self)
self.curl = pycurl.init()
self.curl.setopt(pycurl.URL, url)
self.curl.setopt(pycurl.FILE, ofile)
self.curl.setopt(pycurl.NOPROGRESS, 1)
self.curl.setopt(pycurl.FOLLOWLOCATION, 1)
self.curl.setopt(pycurl.MAXREDIRS, 5)
self.curl.setopt(pycurl.DNS_USE_GLOBAL_CACHE, 1)
self.curl.setopt(pycurl.DNS_CACHE_TIMEOUT, 10)
def run(self):
self.curl.perform()
self.curl.cleanup()
sys.stdout.write('.')
sys.stdout.flush()
# Read list of URIs from file specified on commandline
try:
urls = open(sys.argv[1]).readlines()
except IndexError:
# No file was specified, show usage string
print "Usage: %s <file with uris to fetch>" % sys.argv[0]
raise SystemExit
# Initialize thread array and the file number
threads = []
fileno = 0
# Start one thread per URI in parallel
t1 = time.time()
for url in urls:
f = open(str(fileno), 'w')
t = Test(url, f)
t.start()
threads.append((t, f))
fileno = fileno + 1
# Wait for all threads to finish
for thread, file in threads:
thread.join()
file.close()
t2 = time.time()
print '\n** Multithreading, %d seconds elapsed for %d uris' % (int(t2-t1), len(urls))
# Start one thread per URI in sequence
fileno = 0
t1 = time.time()
for url in urls:
f = open(str(fileno), 'w')
t = Test(url, f)
t.start()
fileno = fileno + 1
t.join()
f.close()
t2 = time.time()
print '\n** Singlethreading, %d seconds elapsed for %d uris' % (int(t2-t1), len(urls))
| # $Id$
## System modules
import sys
import threading
import time
## PycURL module
import pycurl
class Test(threading.Thread):
def __init__(self, url, ofile):
threading.Thread.__init__(self)
self.curl = pycurl.init()
self.curl.setopt(pycurl.URL, url)
self.curl.setopt(pycurl.FILE, ofile)
self.curl.setopt(pycurl.NOPROGRESS, 1)
self.curl.setopt(pycurl.FOLLOWLOCATION, 1)
self.curl.setopt(pycurl.MAXREDIRS, 5)
def run(self):
self.curl.perform()
self.curl.cleanup()
sys.stdout.write('.')
sys.stdout.flush()
# Read list of URIs from file specified on commandline
try:
urls = open(sys.argv[1]).readlines()
except IndexError:
# No file was specified, show usage string
print "Usage: %s <file with uris to fetch>" % sys.argv[0]
raise SystemExit
# Initialize thread array and the file number
threads = []
fileno = 0
# Start one thread per URI in parallel
t1 = time.time()
for url in urls:
f = open(str(fileno), 'w')
t = Test(url, f)
t.start()
threads.append((t, f))
fileno = fileno + 1
# Wait for all threads to finish
for thread, file in threads:
thread.join()
file.close()
t2 = time.time()
print '\n** Multithreading, %d seconds elapsed for %d uris' % (int(t2-t1), len(urls))
# Start one thread per URI in sequence
fileno = 0
t1 = time.time()
for url in urls:
f = open(str(fileno), 'w')
t = Test(url, f)
t.start()
fileno = fileno + 1
t.join()
f.close()
t2 = time.time()
print '\n** Singlethreading, %d seconds elapsed for %d uris' % (int(t2-t1), len(urls))
| lgpl-2.1 | Python |
ba408df025136563c0eafe00551f23e44e9c2731 | Change version to 1.0.1 unstable | xcgd/account_credit_transfer | __openerp__.py | __openerp__.py | # -*- coding: utf-8 -*-
{
"name": "Account Credit Transfer",
"version": "1.0.1",
"author": "XCG Consulting",
"website": "http://www.openerp-experts.com",
"category": 'Accounting',
"description": """Account Voucher Credit Transfer Payment.
You need to set up some things before using it.
A credit transfer config link a bank with a parser
A credit transfer parser link a parser with a template that you can upload
""",
"depends": [
'base',
'account_streamline',
],
"data": [
"security/ir.model.access.csv",
"views/config.xml",
"views/parser.xml",
"views/res.bank.xml",
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
'external_dependencies': {
'python': ['genshi']
}
}
| # -*- coding: utf-8 -*-
{
"name": "Account Credit Transfer",
"version": "1.0",
"author": "XCG Consulting",
"website": "http://www.openerp-experts.com",
"category": 'Accounting',
"description": """Account Voucher Credit Transfer Payment.
You need to set up some things before using it.
A credit transfer config link a bank with a parser
A credit transfer parser link a parser with a template that you can upload
""",
"depends": [
'base',
'account_streamline',
],
"data": [
"security/ir.model.access.csv",
"views/config.xml",
"views/parser.xml",
"views/res.bank.xml",
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
'external_dependencies': {
'python': ['genshi']
}
}
| agpl-3.0 | Python |
f27fc83bf7a9c14b90cba1052d54397a211f7a24 | add a forward slash to the allowed regex pattern when making the repo slug | rca/issuebranch | src/issuebranch/console_scripts.py | src/issuebranch/console_scripts.py | #!/usr/bin/env python
"""
create a new branch for the given redmine issue
"""
import argparse
import importlib
import os
import sh
import sys
from slugify import slugify
MAX_SLUG_LENGTH = 50
def make_branch(name):
command_l = 'git checkout -b {} master'.format(name).split()
getattr(sh, command_l[0])(*command_l[1:])
def issuebranch():
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', help='branch prefix, e.g. feature, bugfix, etc.')
parser.add_argument('--subject', help='provide subject text instead of fetching')
parser.add_argument('issue_number', type=int, help='the issue tracker\'s issue number')
args = parser.parse_args()
issue_number = args.issue_number
backend_name = os.environ['ISSUE_BACKEND']
backend_module = importlib.import_module('issuebranch.backends.{}'.format(backend_name))
issue = getattr(backend_module, 'Backend')(issue_number)
prefix = args.prefix
if not prefix:
prefix = issue.prefix
subject = args.subject
if not subject:
subject = issue.subject
branch_name = '{}/{}-{}'.format(prefix, issue_number, subject)
# add the forward slash to the allowed regex
# default is: r'[^-a-z0-9]+'
regex_pattern = r'[^/\-a-z0-9_]+'
slug = slugify(branch_name, max_length=MAX_SLUG_LENGTH, regex_pattern=regex_pattern)
make_branch(slug)
| #!/usr/bin/env python
"""
create a new branch for the given redmine issue
"""
import argparse
import importlib
import os
import sh
import sys
from slugify import slugify
MAX_SLUG_LENGTH = 50
def make_branch(name):
command_l = 'git checkout -b {} master'.format(name).split()
getattr(sh, command_l[0])(*command_l[1:])
def issuebranch():
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', help='branch prefix, e.g. feature, bugfix, etc.')
parser.add_argument('--subject', help='provide subject text instead of fetching')
parser.add_argument('issue_number', type=int, help='the issue tracker\'s issue number')
args = parser.parse_args()
issue_number = args.issue_number
backend_name = os.environ['ISSUE_BACKEND']
backend_module = importlib.import_module('issuebranch.backends.{}'.format(backend_name))
issue = getattr(backend_module, 'Backend')(issue_number)
prefix = args.prefix
if not prefix:
prefix = issue.prefix
subject = args.subject
if not subject:
subject = issue.subject
branch_name = '{}/{}-{}'.format(prefix, issue_number, subject)
regex_pattern = r'[^/-a-z0-9_]+'
slug = slugify(branch_name, max_length=MAX_SLUG_LENGTH, regex_pattern=regex_pattern)
print('SLUG {}'.format(slug))
make_branch(slug)
| apache-2.0 | Python |
c1809cf217f268a5325b58513e4872a8ad44e231 | Add cross domain request support for server, incomplete. | mgunyho/kiltiskahvi | webserver.py | webserver.py | """
This module is responsible for handling web requests using Flask.
Requests are of the form (start, end) in unix time and are passed on to the db
manager, which then returns the appropriate data to be sent back as JSON.
"""
#TODO: turn this into a daemon
from flask import Flask, request, current_app, jsonify
from functools import update_wrapper
import db
import config
app = Flask(__name__)
cfg = config.get_config_dict()
dbm = db.DatabaseManager(cfg)
# Allow cross domain requests, see http://flask.pocoo.org/snippets/56/
# Simplified version.
#TODO: configure allowed origins etc. in this via config file...
def crossdomain(origin = None):
if not isinstance(origin, str):
origin = ", ".join(origin)
def decorator(f):
def wrapped_function(*args, **kwargs):
#resp = make_response(f(*args, **kwargs))
resp = current_app.make_default_options_response()
h = resp.headers
h["Access-Control-Allow-Origin"] = origin
h["Access-Control-Allow-Methods"] = "GET" # TODO make this configurable
return resp
return update_wrapper(wrapped_function, f)
return decorator
@app.route("/data", methods=["GET", "OPTIONS"])
@crossdomain(origin="*")
def get_data():
print("getting data: {}".format(request))
try:
data_range = (int(request.args.get("s")), int(request.args.get("e")))
datapoints = dbm.query_range(data_range)
#query_
#return str(data_range)
#return str(query_result)
#return json.dumps([[i, x] for i, x in enumerate(datapoints)])
return jsonify(items = [[i, x] for i, x in enumerate(datapoints)])
#TODO
except db.DBException:
raise
def main():
pass
# Testing
#TODO: move these to main function...
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--public",
dest="public", action = "store_true",
help = "make the flask app publicly visible to the network.")
args = ap.parse_args()
# initialize a dummy database, which returns random values.
#TODO
dbm = db.DatabaseManager("dummy")
app.run('0.0.0.0' if args.public else None)
| """
This module is responsible for handling web requests using Flask.
Requests are of the form (start, end) in unix time and are passed on to the db
manager, which then returns the appropriate data to be sent back as JSON.
"""
#TODO: turn this into a daemon
from flask import Flask, request
import json
import db
import config
app = Flask(__name__)
cfg = config.get_config_dict()
dbm = db.DatabaseManager(cfg)
@app.route("/data")
def get_data():
print("getting data: {}".format(request))
try:
data_range = (int(request.args.get("s")), int(request.args.get("e")))
datapoints = dbm.query_range(data_range)
#query_
#return str(data_range)
#return str(query_result)
return json.dumps([[i, x] for i, x in enumerate(datapoints)])
#TODO
except db.DBException:
raise
def main():
pass
# Testing
#TODO: move these to main function...
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--public",
dest="public", action = "store_true",
help = "make the flask app publicly visible to the network.")
args = ap.parse_args()
# initialize a dummy database, which returns random values.
#TODO
dbm = db.DatabaseManager("dummy")
app.run('0.0.0.0' if args.public else None)
| mit | Python |
c4187e804d3a0f2b9b0053acdc34d9868abd2b65 | Test git | gras/SensorWorkshop | python/02-HelloWorld/main.py | python/02-HelloWorld/main.py | # Making Sense of Sensors Workshop
# Educators Edition 2015
#
# 00-SampleIncludeFiles
# main.py
'''
@author: Dead Robot Society
'''
import actions as act
##################################
# main routine
##################################
'''
This routine controls everything that happens.
Remember to start with act.init("program name")
and end with act.shutdown()
'''
def main() :
act.init("00-YourProgramNameHere") # change this!
act.getOutOfStartBox()
act.doSomething()
act.shutdown()
##################################
# initialization/shutdown routines
##################################
'''
This chunk of code sets print output to unbuffered
(so that print statements are displayed in real-time)
then calls main() to start the program
'''
if __name__ == '__main__':
import os
import sys
sys.stdout = os.fdopen(sys.stdout.fileno(),'w',0)
main()
| # Making Sense of Sensors Workshop
# Educators Edition 2015
#
# 00-SampleIncludeFiles
# main.py
'''
@author: Dead Robot Society
'''
import actions as act
##################################
# main routine
##################################
'''
This routine controls everything that happens.
Remember to start with act.init("program name")
and end with act.shutdown()
'''
def main() :
act.init("00-YourProgramName") # change this!
act.getOutOfStartBox()
act.doSomething()
act.shutdown()
##################################
# initialization/shutdown routines
##################################
'''
This chunk of code sets print output to unbuffered
(so that print statements are displayed in real-time)
then calls main() to start the program
'''
if __name__ == '__main__':
import os
import sys
sys.stdout = os.fdopen(sys.stdout.fileno(),'w',0)
main()
| mit | Python |
3ca66d7fe4c325ee6b607522b0fb544e1bdc78ec | Update pybind11_bazel from 26973c0ff320cb4b39e45bc3e4297b82bc3a6c09 to 72cbbf1fbc830e487e3012862b7b720001b70672. | tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,karllessard/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer | third_party/pybind11_abseil/workspace.bzl | third_party/pybind11_abseil/workspace.bzl | """Provides the repo macro to import pybind11_abseil.
pybind11_abseil requires pybind11 (which is loaded in another rule) and pybind11_bazel.
See https://github.com/pybind/pybind11_abseil#installation.
"""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports pybind11_abseil."""
PA_COMMIT = "d9614e4ea46b411d02674305245cba75cd91c1c6"
PA_SHA256 = "a2b5509dc1c344954fc2f1ba1d277afae84167691c0daad59b6da71886d1f276"
tf_http_archive(
name = "pybind11_abseil",
sha256 = PA_SHA256,
strip_prefix = "pybind11_abseil-{commit}".format(commit = PA_COMMIT),
urls = tf_mirror_urls("https://github.com/pybind/pybind11_abseil/archive/{commit}.tar.gz".format(commit = PA_COMMIT)),
build_file = "//third_party/pybind11_abseil:BUILD",
)
# pybind11_bazel is a dependency of pybind11_abseil.
PB_COMMIT = "26973c0ff320cb4b39e45bc3e4297b82bc3a6c09"
PB_SHA256 = "8f546c03bdd55d0e88cb491ddfbabe5aeb087f87de2fbf441391d70483affe39"
tf_http_archive(
name = "pybind11_bazel",
strip_prefix = "pybind11_bazel-{commit}".format(commit = PB_COMMIT),
sha256 = PB_SHA256,
urls = tf_mirror_urls("https://github.com/pybind/pybind11_bazel/archive/26973c0ff320cb4b39e45bc3e4297b82bc3a6c09.tar.gz"),
)
| """Provides the repo macro to import pybind11_abseil.
pybind11_abseil requires pybind11 (which is loaded in another rule) and pybind11_bazel.
See https://github.com/pybind/pybind11_abseil#installation.
"""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports pybind11_abseil."""
PA_COMMIT = "d9614e4ea46b411d02674305245cba75cd91c1c6"
PA_SHA256 = "a2b5509dc1c344954fc2f1ba1d277afae84167691c0daad59b6da71886d1f276"
tf_http_archive(
name = "pybind11_abseil",
sha256 = PA_SHA256,
strip_prefix = "pybind11_abseil-{commit}".format(commit = PA_COMMIT),
urls = tf_mirror_urls("https://github.com/pybind/pybind11_abseil/archive/{commit}.tar.gz".format(commit = PA_COMMIT)),
build_file = "//third_party/pybind11_abseil:BUILD",
)
# pybind11_bazel is a dependency of pybind11_abseil.
PB_COMMIT = "72cbbf1fbc830e487e3012862b7b720001b70672"
PB_SHA256 = "516c1b3a10d87740d2b7de6f121f8e19dde2c372ecbfe59aef44cd1872c10395"
tf_http_archive(
name = "pybind11_bazel",
strip_prefix = "pybind11_bazel-{commit}".format(commit = PB_COMMIT),
sha256 = PB_SHA256,
urls = tf_mirror_urls("https://github.com/pybind/pybind11_bazel/archive/72cbbf1fbc830e487e3012862b7b720001b70672.tar.gz"),
)
| apache-2.0 | Python |
c9c4ac9cad24b691edd6848595edc85c608873fe | Patch mockito params matcher | drslump/pyshould | pyshould/__init__.py | pyshould/__init__.py | """
pyshould - a should style wrapper for pyhamcrest
"""
from pyshould.dsl import *
__author__ = "Ivan -DrSlump- Montes"
__email__ = "drslump@pollinimini.net"
__license__ = "MIT"
# Override the list public symbols for a wildcard import
__all__ = [
'should',
'should_not',
'should_any',
'should_all',
'should_none',
'should_either',
'it',
'all_of',
'any_of',
'none_of',
]
# Patch mockito param matcher to use pyshould expectations
try:
import mockito
from pyshould.expectation import Expectation
original_method = mockito.invocation.MatchingInvocation.compare
@staticmethod
def pyshould_compare(p1, p2):
if isinstance(p1, Expectation):
try:
expectation = p1.clone()
expectation.resolve(p2)
return True
except AssertionError:
return False
return original_method(p1, p2)
except ImportError:
pass
| """
pyshould - a should style wrapper for pyhamcrest
"""
from pyshould.dsl import *
__author__ = "Ivan -DrSlump- Montes"
__email__ = "drslump@pollinimini.net"
__license__ = "MIT"
# Override the list public symbols for a wildcard import
__all__ = [
'should',
'should_not',
'should_any',
'should_all',
'should_none',
'should_either',
'it',
'all_of',
'any_of',
'none_of',
]
| mit | Python |
5b0421a10497f89ab62e9c0f75e760be38eb5d27 | remove plot_curve in v2.__init__ to avoid importing matplotlib when test | baidu/Paddle,chengduoZH/Paddle,emailweixu/Paddle,lispc/Paddle,reyoung/Paddle,livc/Paddle,lispc/Paddle,jacquesqiao/Paddle,emailweixu/Paddle,lcy-seso/Paddle,hedaoyuan/Paddle,emailweixu/Paddle,pengli09/Paddle,pkuyym/Paddle,jacquesqiao/Paddle,cxysteven/Paddle,reyoung/Paddle,yu239/Paddle,luotao1/Paddle,pkuyym/Paddle,pengli09/Paddle,lcy-seso/Paddle,cxysteven/Paddle,cxysteven/Paddle,Canpio/Paddle,QiJune/Paddle,chengduoZH/Paddle,lispc/Paddle,emailweixu/Paddle,PaddlePaddle/Paddle,QiJune/Paddle,pkuyym/Paddle,livc/Paddle,PaddlePaddle/Paddle,hedaoyuan/Paddle,emailweixu/Paddle,cxysteven/Paddle,chengduoZH/Paddle,luotao1/Paddle,baidu/Paddle,PaddlePaddle/Paddle,pengli09/Paddle,lcy-seso/Paddle,PaddlePaddle/Paddle,Canpio/Paddle,hedaoyuan/Paddle,luotao1/Paddle,luotao1/Paddle,reyoung/Paddle,tensor-tang/Paddle,yu239/Paddle,jacquesqiao/Paddle,reyoung/Paddle,yu239/Paddle,pkuyym/Paddle,hedaoyuan/Paddle,jacquesqiao/Paddle,lispc/Paddle,yu239/Paddle,putcn/Paddle,pkuyym/Paddle,putcn/Paddle,chengduoZH/Paddle,livc/Paddle,reyoung/Paddle,hedaoyuan/Paddle,livc/Paddle,putcn/Paddle,lcy-seso/Paddle,luotao1/Paddle,reyoung/Paddle,livc/Paddle,cxysteven/Paddle,lispc/Paddle,chengduoZH/Paddle,pengli09/Paddle,pengli09/Paddle,hedaoyuan/Paddle,baidu/Paddle,tensor-tang/Paddle,putcn/Paddle,pengli09/Paddle,Canpio/Paddle,lcy-seso/Paddle,yu239/Paddle,hedaoyuan/Paddle,QiJune/Paddle,pengli09/Paddle,putcn/Paddle,luotao1/Paddle,tensor-tang/Paddle,luotao1/Paddle,Canpio/Paddle,lcy-seso/Paddle,emailweixu/Paddle,QiJune/Paddle,PaddlePaddle/Paddle,Canpio/Paddle,yu239/Paddle,tensor-tang/Paddle,Canpio/Paddle,yu239/Paddle,pengli09/Paddle,QiJune/Paddle,lispc/Paddle,Canpio/Paddle,Canpio/Paddle,yu239/Paddle,baidu/Paddle,putcn/Paddle,baidu/Paddle,livc/Paddle,QiJune/Paddle,tensor-tang/Paddle,pkuyym/Paddle,livc/Paddle,emailweixu/Paddle,lispc/Paddle,PaddlePaddle/Paddle,hedaoyuan/Paddle,jacquesqiao/Paddle,cxysteven/Paddle,cxysteven/Paddle,jacquesqiao/Paddle,PaddlePaddle/Paddle,lispc/Paddle | python/paddle/v2/__init__.py | python/paddle/v2/__init__.py | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import optimizer
import layer
import activation
import parameters
import trainer
import event
import data_type
import topology
import data_feeder
import networks
from . import dataset
from . import reader
import attr
import pooling
import inference
import networks
import py_paddle.swig_paddle as api
import minibatch
__all__ = [
'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader',
'topology', 'networks', 'infer'
]
def init(**kwargs):
args = []
for key in kwargs.keys():
args.append('--%s=%s' % (key, str(kwargs[key])))
api.initPaddle(*args)
infer = inference.infer
batch = minibatch.batch
| # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import optimizer
import layer
import activation
import parameters
import trainer
import event
import data_type
import topology
import data_feeder
import networks
from . import dataset
from . import reader
import attr
import pooling
import inference
import networks
import py_paddle.swig_paddle as api
import minibatch
import plot_curve
__all__ = [
'optimizer', 'layer', 'activation', 'parameters', 'init', 'trainer',
'event', 'data_type', 'attr', 'pooling', 'data_feeder', 'dataset', 'reader',
'topology', 'networks', 'infer', 'plot_curve'
]
def init(**kwargs):
args = []
for key in kwargs.keys():
args.append('--%s=%s' % (key, str(kwargs[key])))
api.initPaddle(*args)
infer = inference.infer
batch = minibatch.batch
| apache-2.0 | Python |
4a878bc4cca45e3258a70dcd5e43d28d9eef9d96 | Implement slots for refreshed and edit_patch | vadmium/python-quilt,bjoernricks/python-quilt | quilt/cli/refresh.py | quilt/cli/refresh.py | # vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
from quilt.cli.meta import Command
from quilt.refresh import Refresh
from quilt.utils import SubprocessError, Process
class RefreshCommand(Command):
usage = "%prog refresh [patch]"
name = "refresh"
def run(self, options, args):
refresh = Refresh(os.getcwd(), self.get_pc_dir(),
self.get_patches_dir())
refresh.refreshed.connect(self.refreshed)
if options.edit:
refresh.edit_patch.connect(self.edit_patch)
patch_name = None
if len(args) > 0:
patch_name = args[0]
refresh.refresh(patch_name, options.edit)
def add_args(self, parser):
parser.add_option("-e", "--edit", help="open patch in editor before " \
"refreshing", dest="edit", action="store_true",
default=False)
def edit_patch(self, tmpfile):
editor = os.environ.get("EDITOR", "vi")
try:
cmd = [editor]
cmd.append(tmpfile.get_name())
Process(cmd).run(cwd=os.getcwd())
except SubprocessError, e:
self.exit_error(e, value=e.returncode)
def refreshed(self, patch):
print "Patch %s refreshed" % patch.get_name()
| # vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
from quilt.cli.meta import Command
from quilt.refresh import Refresh
class RefreshCommand(Command):
usage = "%prog refresh [patch]"
name = "refresh"
def run(self, options, args):
refresh = Refresh(os.getcwd(), self.get_pc_dir(),
self.get_patches_dir())
patch_name = None
if len(args) > 0:
patch_name = args[0]
refresh.refresh(patch_name)
| mit | Python |
6b5dd8c23bd3b9c17ce672b67da3c36e7d63981a | fix issue #61 (#2) | SandstoneHPC/sandstone-spawner | sandstone_spawner/spawner.py | sandstone_spawner/spawner.py | from jupyterhub.spawner import LocalProcessSpawner
from jupyterhub.utils import random_port
from subprocess import Popen
from tornado import gen
import pipes
import shutil
import os
# This is the path to the sandstone-jupyterhub script
APP_PATH = os.environ.get('SANDSTONE_APP_PATH')
SANDSTONE_SETTINGS = os.environ.get('SANDSTONE_SETTINGS')
class SandstoneSpawner(LocalProcessSpawner):
@gen.coroutine
def start(self):
"""Start the single-user server."""
self.port = random_port()
cmd = [APP_PATH]
env = self.get_env()
env['SANDSTONE_SETTINGS'] = SANDSTONE_SETTINGS
args = self.get_args()
# print(args)
cmd.extend(args)
self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))
try:
self.proc = Popen(cmd, env=env,
preexec_fn=self.make_preexec_fn(self.user.name),
start_new_session=True, # don't forward signals
)
except PermissionError:
# use which to get abspath
script = shutil.which(cmd[0]) or cmd[0]
self.log.error("Permission denied trying to run %r. Does %s have access to this file?",
script, self.user.name,
)
raise
self.pid = self.proc.pid
if self.__class__ is not LocalProcessSpawner:
# subclasses may not pass through return value of super().start,
# relying on deprecated 0.6 way of setting ip, port,
# so keep a redundant copy here for now.
# A deprecation warning will be shown if the subclass
# does not return ip, port.
if self.ip:
self.user.server.ip = self.ip
self.user.server.port = self.port
return (self.ip or '127.0.0.1', self.port)
@gen.coroutine
def _signal(self, sig):
try:
os.killpg(os.getpgid(self.pid), sig)
except OSError as e:
if e.errno == errno.ESRCH:
return False
else:
raise
return True
| from jupyterhub.spawner import LocalProcessSpawner
from jupyterhub.utils import random_port
from subprocess import Popen
from tornado import gen
import pipes
import shutil
import os
# This is the path to the sandstone-jupyterhub script
APP_PATH = os.environ.get('SANDSTONE_APP_PATH')
SANDSTONE_SETTINGS = os.environ.get('SANDSTONE_SETTINGS')
class SandstoneSpawner(LocalProcessSpawner):
@gen.coroutine
def start(self):
"""Start the single-user server."""
self.port = random_port()
cmd = [APP_PATH]
env = self.get_env()
env['SANDSTONE_SETTINGS'] = SANDSTONE_SETTINGS
args = self.get_args()
# print(args)
cmd.extend(args)
self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))
try:
self.proc = Popen(cmd, env=env,
preexec_fn=self.make_preexec_fn(self.user.name),
start_new_session=True, # don't forward signals
)
except PermissionError:
# use which to get abspath
script = shutil.which(cmd[0]) or cmd[0]
self.log.error("Permission denied trying to run %r. Does %s have access to this file?",
script, self.user.name,
)
raise
self.pid = self.proc.pid
if self.__class__ is not LocalProcessSpawner:
# subclasses may not pass through return value of super().start,
# relying on deprecated 0.6 way of setting ip, port,
# so keep a redundant copy here for now.
# A deprecation warning will be shown if the subclass
# does not return ip, port.
if self.ip:
self.user.server.ip = self.ip
self.user.server.port = self.port
return (self.ip or '127.0.0.1', self.port)
| mit | Python |
89b64ae2c83a0af7d103d1117efa89242dd669da | add rgb2ycc, ycc2rgb | piraaa/VideoDigitalWatermarking | src/image.py | src/image.py | #
# image.py
# Created by pira on 2017/07/28.
#
#coding: utf-8
import numpy as np
import cv2
def readImage(filename):
#imreadのflags flags>0(cv2.IMREAD_COLOR):3ChannelColors,flags=0(cv2.IMREAD_GRAYSCALE):GrayScale,flags<0(cv2.IMREAD_UNCHANGED):Original
#白黒画像でも強制的にRGBで扱う.
img = cv2.imread(filename, 1)
print('Read "' + filename + '".')
return img
def writeImage(filename, img):
cv2.imwrite(filename, img)
print('Write "'+filename+'".')
def printImage(img):
cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def rgb2ycc(img):
u"""RGB to YCbCr.
@param img :3 dimension np.ndarray[Height][Width][RGB]
@return ycc_data:3 dimension np.ndarray[Height][Width][YCC]
"""
height = img.shape[0]
width = img.shape[1]
ycc_data = np.empty([height,width,3])
for i in np.arange(height):
for j in np.arange(width):
ycc_data[i][j][0] = 0.299*img[i][j][2] + 0.587*img[i][j][1] + 0.114*img[i][j][0] #Y
ycc_data[i][j][1] = -0.169*img[i][j][2] - 0.331*img[i][j][1] + 0.500*img[i][j][0] #Cb
ycc_data[i][j][2] = 0.500*img[i][j][2] - 0.419*img[i][j][1] - 0.081*img[i][j][0] #Cr
return ycc_data
def ycc2rgb(img):
u"""YCbCr to BGR.
@param img :3 dimension np.ndarray[Height][Width][YCC]
@return rgb_data:3 dimension np.ndarray[Height][Width][BGR]
"""
height = img.shape[0]
width = img.shape[1]
rgb_data = np.empty([height,width,3])
for i in np.arange(height):
for j in np.arange(width):
rgb_data[i][j][0] = img[i][j][0] + 1.772*img[i][j][1] #B
rgb_data[i][j][1] = img[i][j][0] - 0.344*img[i][j][1] - 0.714*img[i][j][2] #G
rgb_data[i][j][2] = img[i][j][0] + 1.402*img[i][j][2] #R
return rgb_data | #
# image.py
# Created by pira on 2017/07/28.
#
#coding: utf-8
import numpy as np
import cv2
def readImage(filename):
#imreadのflags flags>0(cv2.IMREAD_COLOR):3ChannelColors,flags=0(cv2.IMREAD_GRAYSCALE):GrayScale,flags<0(cv2.IMREAD_UNCHANGED):Original
#白黒画像でも強制的にRGBで扱う.
img = cv2.imread(filename, 1)
print('Read "' + filename + '".')
return img
def writeImage(filename, img):
cv2.imwrite(filename, img)
print('Write "'+filename+'".')
def printImage(img):
cv2.imshow('Image', img)
cv2.waitKey(0)
cv2.destroyAllWindows() | mit | Python |
38a9f75bc87dbfb698b852145b7d62e9913602b4 | Stop trying to be overly clever | tolysz/tcldis,tolysz/tcldis,tolysz/tcldis,tolysz/tcldis | tcldis.py | tcldis.py | from __future__ import print_function
import _tcldis
printbc = _tcldis.printbc
getbc = _tcldis.getbc
inst_table = _tcldis.inst_table
| from __future__ import print_function
def _tcldis_init():
import sys
import _tcldis
mod = sys.modules[__name__]
for key, value in _tcldis.__dict__.iteritems():
if not callable(value):
continue
mod.__dict__[key] = value
_tcldis_init()
| bsd-3-clause | Python |
63eba95bccaaecc497fecc9fb60766dc11c37c47 | update setup.py for upload to pypi | aepyornis/nyc-db,aepyornis/nyc-db | src/setup.py | src/setup.py | import setuptools
setuptools.setup(
name="nycdb",
version="0.1.0",
url="https://github.com/aepyornis/nyc-db",
author="ziggy",
author_email="nycdb@riseup.net",
license='GPL',
description="database of nyc housing data",
long_description=open('README.md').read(),
packages=['nycdb'],
python_requires='>=3',
install_requires=[
'Cython>=0.27',
'PyYAML>=3',
'requests>=2.18',
'xlrd>=1.1.0',
'pyproj>=1.9.5',
'psycopg2>=2.7'
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| import setuptools
setuptools.setup(
name="nycdb",
version="0.1.0",
url="https://github.com/aepyornis/nyc-db",
author="ziggy",
author_email="ziggy@elephant-bird.net",
description="nyc housing database",
long_description=open('README.md').read(),
packages=setuptools.find_packages(exclude=('tests', 'docs', 'data', 'venv')),
install_requires=[],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| agpl-3.0 | Python |
4294c286afda9126aed5497e2a1048fc9bbbb491 | Update test.py | 1313e/e13Tools | test/test.py | test/test.py | # -*- coding: utf-8 -*-
"""
Testing script for travis
"""
from __future__ import division, absolute_import, print_function
import pytest
def tests():
# Check if all modules can be imported
import e13tools as e13
import e13tools.pyplot as e13plt
import e13tools.sampling as e13spl
assert True
if(__name__ == '__main__'):
tests()
| # -*- coding: utf-8 -*-
"""
Testing script for travis
"""
from __future__ import division, absolute_import, print_function
import pytest
def tests():
# Check if all modules can be imported
import e13tools as e13
import e13tools.pyplot as e13plt
import e13tools.sampling as e13spl
if(__name__ == '__main__'):
tests()
| bsd-3-clause | Python |
0c8739457150e4ae6e47ffb42d43a560f607a141 | Add test re: hide kwarg | frol/invoke,sophacles/invoke,mkusz/invoke,alex/invoke,mkusz/invoke,frol/invoke,pyinvoke/invoke,kejbaly2/invoke,mattrobenolt/invoke,mattrobenolt/invoke,pfmoore/invoke,tyewang/invoke,kejbaly2/invoke,pfmoore/invoke,singingwolfboy/invoke,pyinvoke/invoke | tests/run.py | tests/run.py | from spec import eq_, skip, Spec, raises, ok_, trap
from invoke.run import run
from invoke.exceptions import Failure
class Run(Spec):
"""run()"""
def return_code_in_result(self):
r = run("echo 'foo'")
eq_(r.stdout, "foo\n")
eq_(r.return_code, 0)
eq_(r.exited, 0)
def nonzero_return_code_for_failures(self):
result = run("false", warn=True)
eq_(result.exited, 1)
result = run("goobypls", warn=True)
eq_(result.exited, 127)
@raises(Failure)
def fast_failures(self):
run("false")
def run_acts_as_success_boolean(self):
ok_(not run("false", warn=True))
ok_(run("true"))
def non_one_return_codes_still_act_as_False(self):
ok_(not run("goobypls", warn=True, hide=True))
def warn_kwarg_allows_continuing_past_failures(self):
eq_(run("false", warn=True).exited, 1)
@trap
def hide_kwarg_allows_hiding_output(self):
run("echo 'foo'", hide=True)
eq_(sys.stdall.getvalue(), "")
| from spec import eq_, skip, Spec, raises, ok_
from invoke.run import run
from invoke.exceptions import Failure
class Run(Spec):
"""run()"""
def return_code_in_result(self):
r = run("echo 'foo'")
eq_(r.stdout, "foo\n")
eq_(r.return_code, 0)
eq_(r.exited, 0)
def nonzero_return_code_for_failures(self):
result = run("false", warn=True)
eq_(result.exited, 1)
result = run("goobypls", warn=True)
eq_(result.exited, 127)
@raises(Failure)
def fast_failures(self):
run("false")
def run_acts_as_success_boolean(self):
ok_(not run("false", warn=True))
ok_(run("true"))
def non_one_return_codes_still_act_as_False(self):
ok_(not run("goobypls", warn=True))
def warn_kwarg_allows_continuing_past_failures(self):
eq_(run("false", warn=True).exited, 1)
| bsd-2-clause | Python |
133c48e759a30135c4cd5fdf92b75a83f58205c9 | Update at 2017-07-22 21-38-49 | amoshyc/tthl-code | train_vgg.py | train_vgg.py | import json
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
from keras.models import Sequential, Model
from keras.preprocessing import image
from keras.layers import *
from keras.optimizers import *
from keras.applications.vgg16 import VGG16
from utils import get_callbacks
def main():
with tf.device('/gpu:2'):
vgg = VGG16(weights='imagenet', include_top=False, pooling='max')
x = vgg.output
x = Dense(16)(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(inputs=vgg.input, outputs=x)
model_arg = {
'loss': 'binary_crossentropy',
'optimizer': 'sgd',
'metrics': ['binary_accuracy']
}
model.compile(**model_arg)
model.summary()
train = np.load('npz/image_train.npz')
x_train, y_train = train['xs'], train['ys']
val = np.load('npz/image_val.npz')
x_val, y_val = val['xs'], val['ys']
fit_arg = {
'x': x_train,
'y': y_train,
'batch_size': 50,
'epochs': 100,
'shuffle': True,
'validation_data': (x_val, y_val),
'callbacks': get_callbacks('vgg'),
}
model.fit(**fit_arg)
# fit_gen_arg = {
# 'generator': image_train_gen,
# 'steps_per_epoch': N_IMAGE_TRAIN // IMAGE_BATCH_SIZE,
# 'epochs': 30,
# 'validation_data': image_val_gen,
# 'validation_steps': N_IMAGE_VAL // IMAGE_BATCH_SIZE,
# 'callbacks': get_callbacks('cnn')
# }
# model.fit_generator(**fit_gen_arg)
if __name__ == '__main__':
main() | import json
from pathlib import Path
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
from keras.models import Sequential, Model
from keras.preprocessing import image
from keras.layers import *
from keras.optimizers import *
from keras.applications.vgg16 import VGG16
from utils import get_callbacks
def main():
with tf.device('/gpu:2'):
vgg = VGG16(weights='imagenet', include_top=False, pooling='max')
x = vgg.output
x = Dense(16, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(inputs=vgg.input, outputs=x)
model_arg = {
'loss': 'binary_crossentropy',
'optimizer': 'sgd',
'metrics': ['binary_accuracy']
}
model.compile(**model_arg)
model.summary()
train = np.load('npz/image_train.npz')
x_train, y_train = train['xs'], train['ys']
val = np.load('npz/image_val.npz')
x_val, y_val = val['xs'], val['ys']
fit_arg = {
'x': x_train,
'y': y_train,
'batch_size': 50,
'epochs': 100,
'shuffle': True,
'validation_data': (x_val, y_val),
'callbacks': get_callbacks('vgg'),
}
model.fit(**fit_arg)
# fit_gen_arg = {
# 'generator': image_train_gen,
# 'steps_per_epoch': N_IMAGE_TRAIN // IMAGE_BATCH_SIZE,
# 'epochs': 30,
# 'validation_data': image_val_gen,
# 'validation_steps': N_IMAGE_VAL // IMAGE_BATCH_SIZE,
# 'callbacks': get_callbacks('cnn')
# }
# model.fit_generator(**fit_gen_arg)
if __name__ == '__main__':
main() | apache-2.0 | Python |
6b94d3622759a4851886e0a917551f3e6bb62fdb | Set log level to CRITICAL when not in verbose mode. Reason: We catch the important errors anyway. | buckket/twtxt | twtxt/log.py | twtxt/log.py | """
twtxt.log
~~~~~~~~~
This module configures the logging module for twtxt.
:copyright: (c) 2016 by buckket.
:license: MIT, see LICENSE for more details.
"""
import logging
def init_logging(debug=False):
logger = logging.getLogger()
formatter = logging.Formatter("%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.CRITICAL)
| """
twtxt.log
~~~~~~~~~
This module configures the logging module for twtxt.
:copyright: (c) 2016 by buckket.
:license: MIT, see LICENSE for more details.
"""
import logging
def init_logging(debug=False):
logger = logging.getLogger()
formatter = logging.Formatter("%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
if debug:
logger.setLevel(logging.DEBUG)
| mit | Python |
cd8815c7d82d7c8b3e0c66622ab99762e54ae330 | Change models to allow for blank URLs | lo-windigo/fragdev,lo-windigo/fragdev | projects/models.py | projects/models.py | from django.db import models
from django.core.urlresolvers import reverse
class Project(models.Model):
'''
A open source/development project to be displayed on the Projects page
'''
PUBLIC = 'pub'
HIDDEN = 'hid'
PROJECT_STATUS = (
(PUBLIC, 'Public'),
(HIDDEN, 'Hidden'),
)
name = models.CharField(max_length=150)
desc = models.TextField()
date = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=3, choices=PROJECT_STATUS)
github = models.URLField(blank=True)
gitlab = models.URLField(blank=True)
website = models.URLField(blank=True)
slug = models.SlugField(max_length=150)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("project", args=[self.project])
| from django.db import models
class Project(models.Model):
'''
A open source/development project to be displayed on the Projects page
'''
PUBLIC = 'pub'
HIDDEN = 'hid'
PROJECT_STATUS = (
(PUBLIC, 'Public'),
(HIDDEN, 'Hidden'),
)
name = models.CharField(max_length=150)
desc = models.TextField()
date = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=3, choices=PROJECT_STATUS)
github = models.URLField()
gitlab = models.URLField()
website = models.URLField()
slug = models.SlugField(max_length=150)
| agpl-3.0 | Python |
c32b64c3d391f525e65bb43e6e24d6f3ea486e6e | Update urls.py | mcuringa/nyc-el | el/el/urls.py | el/el/urls.py | from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'el.views.home', ),
url(r'^link1$', 'el.views.link1', ),
url(r'^link2$', 'el.views.link2', ),
url(r'^tokyo$', 'el.views.tokyo', ),
url(r'^link4$', 'el.views.link4', ),
url(r'^home$', 'el.views.link5', ),
)
| from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'el.views.home', ),
url(r'^link1$', 'el.views.link1', ),
url(r'^link2$', 'el.views.link2', ),
url(r'^link3$', 'el.views.link3', ),
url(r'^link4$', 'el.views.link4', ),
url(r'^home$', 'el.views.link5', ),
)
| agpl-3.0 | Python |
7fe3776a59de7a133c5e396cb43d9b4bcc476f7d | Fix the check if form is submitted | Hackfmi/Diaphanum,Hackfmi/Diaphanum | protocols/views.py | protocols/views.py | from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from members.models import User
from .models import Protocol, Topic
from .forms import ProtocolForm, TopicForm, InstitutionForm
@login_required
def add(request):
data = request.POST if request.POST else None
form = ProtocolForm(data)
if form.is_valid():
form.save()
return render(request, 'protocols/add.html', locals())
| from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.shortcuts import render
from members.models import User
from .models import Protocol, Topic
from .forms import ProtocolForm, TopicForm, InstitutionForm
@login_required
def add(request):
data = request.POST if request else None
form = ProtocolForm(data)
#import ipdb; ipdb.set_trace()
if form.is_valid():
form.save()
return render(request, 'protocols/add.html', locals())
| mit | Python |
f53026455803bcd2171531faa48cc5a6bbdfcdd7 | Add note to tokenstorage about Windows permissions (#532) | sirosen/globus-sdk-python,globus/globus-sdk-python,globus/globus-sdk-python | src/globus_sdk/tokenstorage/base.py | src/globus_sdk/tokenstorage/base.py | import abc
import contextlib
import os
from typing import Any, Dict, Iterator, Optional
from globus_sdk.services.auth import OAuthTokenResponse
class StorageAdapter(metaclass=abc.ABCMeta):
@abc.abstractmethod
def store(self, token_response: OAuthTokenResponse) -> None:
"""
Store an `OAuthTokenResponse` in the underlying storage for this adapter.
"""
@abc.abstractmethod
def get_token_data(self, resource_server: str) -> Optional[Dict[str, Any]]:
"""
Lookup token data for a resource server
Either returns a dict with the access token, refresh token (optional), and
expiration time, or returns ``None``, indicating that there was no data for that
resource server.
"""
def on_refresh(self, token_response: OAuthTokenResponse) -> None:
"""
By default, the on_refresh handler for a token storage adapter simply
stores the token response.
"""
self.store(token_response)
class FileAdapter(StorageAdapter, metaclass=abc.ABCMeta):
"""
File adapters are for single-user cases, where we can assume that there's a
simple file-per-user and users are only ever attempting to read their own
files.
"""
filename: str
def file_exists(self) -> bool:
"""
Check if the file used by this file storage adapter exists.
"""
return os.path.exists(self.filename)
@contextlib.contextmanager
def user_only_umask(self) -> Iterator[None]:
"""
A context manager to deny rwx to Group and World, x to User
This does not create a file, but ensures that if a file is created while in the
context manager, its permissions will be correct on unix systems.
.. note::
On Windows, this has no effect. To control the permissions on files used for
token storage, use ``%LOCALAPPDATA%`` or ``%APPDATA%``.
These directories should only be accessible to the current user.
"""
old_umask = os.umask(0o177)
try:
yield
finally:
os.umask(old_umask)
| import abc
import contextlib
import os
from typing import Any, Dict, Iterator, Optional
from globus_sdk.services.auth import OAuthTokenResponse
class StorageAdapter(metaclass=abc.ABCMeta):
@abc.abstractmethod
def store(self, token_response: OAuthTokenResponse) -> None:
"""
Store an `OAuthTokenResponse` in the underlying storage for this adapter.
"""
@abc.abstractmethod
def get_token_data(self, resource_server: str) -> Optional[Dict[str, Any]]:
"""
Lookup token data for a resource server
Either returns a dict with the access token, refresh token (optional), and
expiration time, or returns ``None``, indicating that there was no data for that
resource server.
"""
def on_refresh(self, token_response: OAuthTokenResponse) -> None:
"""
By default, the on_refresh handler for a token storage adapter simply
stores the token response.
"""
self.store(token_response)
class FileAdapter(StorageAdapter, metaclass=abc.ABCMeta):
"""
File adapters are for single-user cases, where we can assume that there's a
simple file-per-user and users are only ever attempting to read their own
files.
"""
filename: str
def file_exists(self) -> bool:
"""
Check if the file used by this file storage adapter exists.
"""
return os.path.exists(self.filename)
@contextlib.contextmanager
def user_only_umask(self) -> Iterator[None]:
"""
a context manager to deny rwx to Group and World, x to User
this does not create a file, but ensures that if a file is created while in the
context manager, its permissions will be correct on unix systems
"""
old_umask = os.umask(0o177)
try:
yield
finally:
os.umask(old_umask)
| apache-2.0 | Python |
d683f56c50587d346fa6891c20d6f68f37e2c9da | update imports | wolverton-research-group/qmpy,wolverton-research-group/qmpy,wolverton-research-group/qmpy,wolverton-research-group/qmpy,wolverton-research-group/qmpy | qmpy/web/views/materials/__init__.py | qmpy/web/views/materials/__init__.py | from entry import *
from structure import *
from composition import *
from discovery import *
from chem_pots import *
from element_groups import *
from deposit import *
def common_materials_view(request):
return render_to_response('materials/index.html', {})
| from entry import *
from structure import *
from composition import *
from discovery import *
from chem_pots import *
from deposit import *
def common_materials_view(request):
return render_to_response('materials/index.html', {})
| mit | Python |
0f48f1e9570fb194a19f0d8faf8c23c879a61d8b | remove any associated .pyc file before loading a module from path; this may fix the config load problem in sasview | SasView/sasmodels,SasView/sasmodels,SasView/sasmodels,SasView/sasmodels | sasmodels/custom/__init__.py | sasmodels/custom/__init__.py | """
Custom Models
-------------
This is a place holder for the custom models namespace. When models are
loaded from a file by :func:`generate.load_kernel_module` they are loaded
as if they exist in *sasmodels.custom*. This package needs to exist for this
to occur without error.
"""
from __future__ import division, print_function
import sys
import os
from os.path import basename, splitext
try:
# Python 3.5 and up
from importlib.util import spec_from_file_location, module_from_spec # type: ignore
def load_module_from_path(fullname, path):
"""load module from *path* as *fullname*"""
spec = spec_from_file_location(fullname, os.path.expanduser(path))
module = module_from_spec(spec)
spec.loader.exec_module(module)
return module
except ImportError:
# CRUFT: python 2
import imp
def load_module_from_path(fullname, path):
"""load module from *path* as *fullname*"""
# Clear out old definitions, if any
if fullname in sys.modules:
del sys.modules[fullname]
if path.endswith(".py") and os.path.exists(path) and os.path.exists(path+"c"):
# remove automatic pyc file before loading a py file
os.unlink(path+"c")
module = imp.load_source(fullname, os.path.expanduser(path))
return module
def load_custom_kernel_module(path):
"""load SAS kernel from *path* as *sasmodels.custom.modelname*"""
# Pull off the last .ext if it exists; there may be others
name = basename(splitext(path)[0])
# Placing the model in the 'sasmodels.custom' name space.
kernel_module = load_module_from_path('sasmodels.custom.'+name,
os.path.expanduser(path))
return kernel_module
| """
Custom Models
-------------
This is a place holder for the custom models namespace. When models are
loaded from a file by :func:`generate.load_kernel_module` they are loaded
as if they exist in *sasmodels.custom*. This package needs to exist for this
to occur without error.
"""
from __future__ import division, print_function
import sys
import os
from os.path import basename, splitext
try:
# Python 3.5 and up
from importlib.util import spec_from_file_location, module_from_spec # type: ignore
def load_module_from_path(fullname, path):
"""load module from *path* as *fullname*"""
spec = spec_from_file_location(fullname, os.path.expanduser(path))
module = module_from_spec(spec)
spec.loader.exec_module(module)
return module
except ImportError:
# CRUFT: python 2
import imp
def load_module_from_path(fullname, path):
"""load module from *path* as *fullname*"""
# Clear out old definitions, if any
if fullname in sys.modules:
del sys.modules[fullname]
module = imp.load_source(fullname, os.path.expanduser(path))
#os.unlink(path+"c") # remove the automatic pyc file
return module
def load_custom_kernel_module(path):
"""load SAS kernel from *path* as *sasmodels.custom.modelname*"""
# Pull off the last .ext if it exists; there may be others
name = basename(splitext(path)[0])
# Placing the model in the 'sasmodels.custom' name space.
kernel_module = load_module_from_path('sasmodels.custom.'+name,
os.path.expanduser(path))
return kernel_module
| bsd-3-clause | Python |
96167530d37f5aab6ae91cc8ff4583a86740e3f4 | Update locustfile.py | joejcollins/WomertonFarm,joejcollins/WomertonFarm,joejcollins/WomertonFarm,joejcollins/WomertonFarm | locust_test/locustfile.py | locust_test/locustfile.py | """Generic locustfile used to load testing sites."""
from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
@task(2)
def index(self):
self.client.get("/")
@task(1)
def where(self):
self.client.get("/location")
@task(1)
def what(self):
self.client.get("/accommodation")
class LocustDelivery(HttpLocust):
task_set = UserBehavior
min_wait = 5000
max_wait = 15000
| """Generic locustfile used to load testing Zengenti sites."""
from locust import HttpLocust, TaskSet, task
class UserBehavior(TaskSet):
@task(2)
def index(self):
self.client.get("/")
@task(1)
def where(self):
self.client.get("/location")
@task(1)
def what(self):
self.client.get("/accommodation")
class LocustDelivery(HttpLocust):
task_set = UserBehavior
min_wait = 5000
max_wait = 15000
| mit | Python |
7592ca157c74a8ee66cb0ce68956f473469ffc10 | Add areyoumyfriend | bryanforbes/Erasmus | erasmus/cogs/misc.py | erasmus/cogs/misc.py | from __future__ import annotations
from discord.ext import commands
from ..context import Context
from ..erasmus import Erasmus
class Misc(commands.Cog[Context]):
def __init__(self, bot: Erasmus) -> None:
self.bot = bot
@commands.command(brief='Get the invite link for Erasmus')
@commands.cooldown(rate=2, per=30.0, type=commands.BucketType.channel)
async def invite(self, ctx: Context) -> None:
await ctx.send(
'<https://discordapp.com/oauth2/authorize?client_id='
'349394562336292876&scope=bot&permissions=388160>'
)
@commands.command(hidden=True)
@commands.cooldown(rate=2, per=30.0, type=commands.BucketType.channel)
async def areyoumyfriend(self, ctx: Context) -> None:
if ctx.author.id in {547579430164365313, 139178723235594240}:
await ctx.send(f'No, I am not your friend, {ctx.author.mention}')
else:
await ctx.send(f"Of course I'm your friend, {ctx.author.mention}")
def setup(bot: Erasmus) -> None:
bot.add_cog(Misc(bot))
| from __future__ import annotations
from discord.ext import commands
from ..context import Context
from ..erasmus import Erasmus
class Misc(commands.Cog[Context]):
def __init__(self, bot: Erasmus) -> None:
self.bot = bot
@commands.command(brief='Get the invite link for Erasmus')
@commands.cooldown(rate=2, per=30.0, type=commands.BucketType.channel)
async def invite(self, ctx: Context) -> None:
await ctx.send(
'<https://discordapp.com/oauth2/authorize?client_id='
'349394562336292876&scope=bot&permissions=388160>'
)
def setup(bot: Erasmus) -> None:
bot.add_cog(Misc(bot))
| bsd-3-clause | Python |
9d908b1c59d02d32f1bcf425d6157e330ed0d11c | Fix codepoint-based-lookup (eg. .u 203D) in Python 3 | Uname-a/knife_scraper,Uname-a/knife_scraper,Uname-a/knife_scraper | willie/modules/unicode_info.py | willie/modules/unicode_info.py | #coding: utf8
"""
codepoints.py - Willie Codepoints Module
Copyright 2013, Edward Powell, embolalia.net
Copyright 2008, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://willie.dfbta.net
"""
from __future__ import unicode_literals
import unicodedata
import sys
from willie.module import commands, example, NOLIMIT
if sys.version_info.major >= 3:
unichr = chr
@commands('u')
@example('.u ‽', 'U+203D INTERROBANG (‽)')
@example('.u 203D', 'U+203D INTERROBANG (‽)')
def codepoint(bot, trigger):
arg = trigger.group(2).strip()
if len(arg) == 0:
bot.reply('What code point do you want me to look up?')
return NOLIMIT
elif len(arg) > 1:
try:
arg = unichr(int(arg, 16))
except:
bot.reply("That's not a valid code point.")
return NOLIMIT
# Get the hex value for the code point, and drop the 0x from the front
point = str(hex(ord(u'' + arg)))[2:]
# Make the hex 4 characters long with preceding 0s, and all upper case
point = point.rjust(4, str('0')).upper()
try:
name = unicodedata.name(arg)
except ValueError:
return 'U+%s (No name found)' % point
if not unicodedata.combining(arg):
template = 'U+%s %s (%s)'
else:
template = 'U+%s %s (\xe2\x97\x8c%s)'
bot.say(template % (point, name, arg))
if __name__ == "__main__":
from willie.test_tools import run_example_tests
run_example_tests(__file__)
| #coding: utf8
"""
codepoints.py - Willie Codepoints Module
Copyright 2013, Edward Powell, embolalia.net
Copyright 2008, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://willie.dfbta.net
"""
from __future__ import unicode_literals
import unicodedata
from willie.module import commands, example, NOLIMIT
@commands('u')
@example('.u 203D', 'U+203D INTERROBANG (‽)')
def codepoint(bot, trigger):
arg = trigger.group(2).strip()
if len(arg) == 0:
bot.reply('What code point do you want me to look up?')
return NOLIMIT
elif len(arg) > 1:
try:
arg = unichr(int(arg, 16))
except:
bot.reply("That's not a valid code point.")
return NOLIMIT
# Get the hex value for the code point, and drop the 0x from the front
point = str(hex(ord(u'' + arg)))[2:]
# Make the hex 4 characters long with preceding 0s, and all upper case
point = point.rjust(4, str('0')).upper()
try:
name = unicodedata.name(arg)
except ValueError:
return 'U+%s (No name found)' % point
if not unicodedata.combining(arg):
template = 'U+%s %s (%s)'
else:
template = 'U+%s %s (\xe2\x97\x8c%s)'
bot.say(template % (point, name, arg))
if __name__ == "__main__":
from willie.test_tools import run_example_tests
run_example_tests(__file__)
| mit | Python |
7d1a903845db60186318575db11a712cd62d884d | Fix mypy import errors due to removed services | amolenaar/gaphor,amolenaar/gaphor | win-installer/gaphor-script.py | win-installer/gaphor-script.py | if __name__ == "__main__":
import gaphor
from gaphor import core
from gaphor.services.componentregistry import ComponentRegistry
from gaphor.ui.consolewindow import ConsoleWindow
from gaphor.services.copyservice import CopyService
from gaphor.plugins.diagramlayout import DiagramLayout
from gaphor.ui.mainwindow import Diagrams
from gaphor.UML.elementfactory import ElementFactory
from gaphor.ui.elementeditor import ElementEditor
from gaphor.services.eventmanager import EventManager
from gaphor.services.helpservice import HelpService
from gaphor.ui.mainwindow import MainWindow
from gaphor.services.properties import Properties
from gaphor.plugins.pynsource import PyNSource
from gaphor.services.sanitizerservice import SanitizerService
from gaphor.services.undomanager import UndoManager
from gaphor.plugins.xmiexport import XMIExport
gaphor.main()
| if __name__ == "__main__":
import gaphor
from gaphor import core
from gaphor.services.actionmanager import ActionManager
from gaphor.plugins.alignment import Alignment
from gaphor.services.componentregistry import ComponentRegistry
from gaphor.ui.consolewindow import ConsoleWindow
from gaphor.services.copyservice import CopyService
from gaphor.services.diagramexportmanager import DiagramExportManager
from gaphor.plugins.diagramlayout import DiagramLayout
from gaphor.ui.mainwindow import Diagrams
from gaphor.UML.elementfactory import ElementFactory
from gaphor.ui.elementeditor import ElementEditor
from gaphor.services.eventmanager import EventManager
from gaphor.services.filemanager import FileManager
from gaphor.services.helpservice import HelpService
from gaphor.ui.mainwindow import MainWindow
from gaphor.ui.mainwindow import Namespace
from gaphor.services.properties import Properties
from gaphor.plugins.pynsource import PyNSource
from gaphor.services.sanitizerservice import SanitizerService
from gaphor.ui.mainwindow import Toolbox
from gaphor.services.undomanager import UndoManager
from gaphor.plugins.xmiexport import XMIExport
gaphor.main()
| lgpl-2.1 | Python |
c9d9375db1a70e6095d76344773699cb5189fa43 | Update rasa_core/policies/mapping_policy.py | RasaHQ/rasa_nlu,RasaHQ/rasa_core,RasaHQ/rasa_core,RasaHQ/rasa_nlu,RasaHQ/rasa_nlu,RasaHQ/rasa_core | rasa_core/policies/mapping_policy.py | rasa_core/policies/mapping_policy.py | import logging
import os
from typing import Any, List, Text
from rasa_core.actions.action import ACTION_LISTEN_NAME
from rasa_core import utils
from rasa_core.domain import Domain
from rasa_core.policies.policy import Policy
from rasa_core.trackers import DialogueStateTracker
from rasa_core.constants import MAPPING_SCORE
logger = logging.getLogger(__name__)
class MappingPolicy(Policy):
"""Policy which maps intents directly to actions.
Intents can be assigned actions in the domain file which are to be
executed whenever the intent is detected. This policy takes precedence over
any other policy."""
def __init__(self) -> None:
"""Create a new Mapping policy."""
super(MappingPolicy, self).__init__()
self.last_action_name = None
def train(self, *args, **kwargs) -> None:
"""Does nothing. This policy is deterministic."""
pass
def predict_action_probabilities(self,
tracker: DialogueStateTracker,
domain: Domain) -> List[float]:
"""Predicts the assigned action.
If the current intent is assigned to an action that action will be
predicted with the highest probability of all policies. If it is not
the policy will predict zero for every action."""
intent = tracker.latest_message.intent.get('name')
action_name = domain.intent_properties.get(intent, {}).get('maps_to')
prediction = [0.0] * domain.num_actions
if action_name is not None:
if self.last_action_name == action_name:
idx = domain.index_for_action(ACTION_LISTEN_NAME)
prediction[idx] = MAPPING_SCORE
else:
idx = domain.index_for_action(action_name)
prediction[idx] = MAPPING_SCORE
self.last_action_name = action_name
return prediction
def persist(self, *args) -> None:
"""Does nothing since there is no data to be saved."""
pass
@classmethod
def load(cls, *args) -> 'MappingPolicy':
"""Just returns the class since there is no data to be loaded."""
return cls()
| import logging
import os
from typing import Any, List, Text
from rasa_core.actions.action import ACTION_LISTEN_NAME
from rasa_core import utils
from rasa_core.domain import Domain
from rasa_core.policies.policy import Policy
from rasa_core.trackers import DialogueStateTracker
from rasa_core.constants import MAPPING_SCORE
logger = logging.getLogger(__name__)
class MappingPolicy(Policy):
"""Policy which maps intents directly to actions.
Intents can be assigned actioins in the domain file which are to be
executed whenever the intent is detected. This policy takes precedence over
any other policy."""
def __init__(self) -> None:
"""Create a new Mapping policy."""
super(MappingPolicy, self).__init__()
self.last_action_name = None
def train(self, *args, **kwargs) -> None:
"""Does nothing. This policy is deterministic."""
pass
def predict_action_probabilities(self,
tracker: DialogueStateTracker,
domain: Domain) -> List[float]:
"""Predicts the assigned action.
If the current intent is assigned to an action that action will be
predicted with the highest probability of all policies. If it is not
the policy will predict zero for every action."""
intent = tracker.latest_message.intent.get('name')
action_name = domain.intent_properties.get(intent, {}).get('maps_to')
prediction = [0.0] * domain.num_actions
if action_name is not None:
if self.last_action_name == action_name:
idx = domain.index_for_action(ACTION_LISTEN_NAME)
prediction[idx] = MAPPING_SCORE
else:
idx = domain.index_for_action(action_name)
prediction[idx] = MAPPING_SCORE
self.last_action_name = action_name
return prediction
def persist(self, *args) -> None:
"""Does nothing since there is no data to be saved."""
pass
@classmethod
def load(cls, *args) -> 'MappingPolicy':
"""Just returns the class since there is no data to be loaded."""
return cls()
| apache-2.0 | Python |
cf011d9c8018a0ceba732c5fbb3fc7bdd58a3011 | Upgrade step small fix | veroc/Bika-LIMS,veroc/Bika-LIMS,veroc/Bika-LIMS,labsanmartin/Bika-LIMS,labsanmartin/Bika-LIMS,rockfruit/bika.lims,labsanmartin/Bika-LIMS,rockfruit/bika.lims | bika/lims/upgrade/to318.py | bika/lims/upgrade/to318.py | from Acquisition import aq_inner
from Acquisition import aq_parent
from Products.CMFCore.utils import getToolByName
from bika.lims.permissions import AddMultifile
from Products.Archetypes.BaseContent import BaseContent
from bika.lims.upgrade import stub
from bika.lims import logger
def upgrade(tool):
"""Upgrade step required for Bika LIMS 3.1.8
"""
portal = aq_parent(aq_inner(tool))
setup = portal.portal_setup
qi = portal.portal_quickinstaller
ufrom = qi.upgradeInfo('bika.lims')['installedVersion']
logger.info("Upgrading Bika LIMS: %s -> %s" % (ufrom, '318'))
# Updated profile steps
setup.runImportStepFromProfile('profile-bika.lims:default', 'jsregistry')
setup.runImportStepFromProfile('profile-bika.lims:default', 'typeinfo')
setup.runImportStepFromProfile('profile-bika.lims:default', 'workflow-csv')
# Adding Multifile content type
at = getToolByName(portal, 'archetype_tool')
at.setCatalogsByType('Multifile', ['bika_setup_catalog', ])
# Adding indexes
bsc = getToolByName(portal, 'bika_setup_catalog', None)
if 'getMethodID' not in bsc.indexes():
bsc.addIndex('getMethodID', 'FieldIndex')
if 'getDocumentID' not in bsc.indexes():
bsc.addIndex('getDocumentID', 'FieldIndex')
# Define permissions for Multifile
mp = portal.manage_permission
mp(AddMultifile, ['Manager', 'Owner', 'LabManager', 'LabClerk'], 1)
# Update workflow permissions
wf = getToolByName(portal, 'portal_workflow')
wf.updateRoleMappings()
# Migrations
HEALTH245(portal)
return True
def HEALTH245(portal):
""" Set the '-' as default separator in all ids. Otherwise, new
records will be created without '-', which has been used since
now by default
"""
for p in portal.bika_setup.getPrefixes():
p['separator']='-' if (not p.get('separator', None)) else p['separator']
| from Acquisition import aq_inner
from Acquisition import aq_parent
from Products.CMFCore.utils import getToolByName
from bika.lims.permissions import AddMultifile
from Products.Archetypes.BaseContent import BaseContent
from bika.lims.upgrade import stub
from bika.lims import logger
def upgrade(tool):
"""Upgrade step required for Bika LIMS 3.1.8
"""
portal = aq_parent(aq_inner(tool))
setup = portal.portal_setup
qi = portal.portal_quickinstaller
ufrom = qi.upgradeInfo('bika.lims')['installedVersion']
logger.info("Upgrading Bika LIMS: %s -> %s" % (ufrom, '318'))
# Updated profile steps
setup.runImportStepFromProfile('profile-bika.lims:default', 'jsregistry')
setup.runImportStepFromProfile('profile-bika.lims:default', 'typeinfo')
setup.runImportStepFromProfile('profile-bika.lims:default', 'workflow-csv')
# Adding Multifile content type
at = getToolByName(portal, 'archetype_tool')
at.setCatalogsByType('Multifile', ['bika_setup_catalog', ])
# Adding indexes
bsc = getToolByName(portal, 'bika_setup_catalog', None)
if 'getMethodID' not in bsc.indexes():
bsc.addIndex('getMethodID', 'FieldIndex')
if 'getDocumentID' not in bsc.indexes():
bsc.addIndex('getDocumentID', 'FieldIndex')
# Define permissions for Multifile
mp = portal.manage_permission
mp(AddMultifile, ['Manager', 'Owner', 'LabManager', 'LabClerk'], 1)
# Update workflow permissions
wf = getToolByName(portal, 'portal_workflow')
wf.updateRoleMappings()
# Migrations
HEALTH245(portal)
qi = portal.portal_quickinstaller
setup.setLastVersionForProfile("profile-bika.lims:default", "3.1.8")
return True
def HEALTH245(portal):
""" Set the '-' as default separator in all ids. Otherwise, new
records will be created without '-', which has been used since
now by default
"""
for p in portal.bika_setup.getPrefixes():
p['separator']='-' if not p['separator'] else p['separator']
| agpl-3.0 | Python |
3646aba05566631f07695b2b5e6ad2fe0ece309b | Patch version bump to 29.3.1 | alphagov/notifications-utils | notifications_utils/version.py | notifications_utils/version.py | __version__ = '29.3.1'
| __version__ = '29.3.0'
| mit | Python |
1c2b06829c597d2287b7546b1a28661be44735c6 | Remove unnecessary try catch | adityahase/frappe,almeidapaulopt/frappe,frappe/frappe,saurabh6790/frappe,mhbu50/frappe,mhbu50/frappe,saurabh6790/frappe,frappe/frappe,almeidapaulopt/frappe,mhbu50/frappe,adityahase/frappe,StrellaGroup/frappe,frappe/frappe,saurabh6790/frappe,adityahase/frappe,StrellaGroup/frappe,mhbu50/frappe,StrellaGroup/frappe,yashodhank/frappe,yashodhank/frappe,almeidapaulopt/frappe,saurabh6790/frappe,adityahase/frappe,yashodhank/frappe,yashodhank/frappe,almeidapaulopt/frappe | frappe/desk/form/save.py | frappe/desk/form/save.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.desk.form.load import run_onload
@frappe.whitelist()
def savedocs(doc, action):
"""save / submit / update doclist"""
try:
doc = frappe.get_doc(json.loads(doc))
set_local_name(doc)
# action
doc.docstatus = {"Save":0, "Submit": 1, "Update": 1, "Cancel": 2}[action]
if doc.docstatus==1:
doc.submit()
else:
doc.save()
# update recent documents
run_onload(doc)
send_updated_docs(doc)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
raise
@frappe.whitelist()
def cancel(doctype=None, name=None, workflow_state_fieldname=None, workflow_state=None):
"""cancel a doclist"""
try:
doc = frappe.get_doc(doctype, name)
if workflow_state_fieldname and workflow_state:
doc.set(workflow_state_fieldname, workflow_state)
doc.cancel()
send_updated_docs(doc)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
frappe.msgprint(frappe._("Did not cancel"))
raise
def send_updated_docs(doc):
from .load import get_docinfo
get_docinfo(doc)
d = doc.as_dict()
if hasattr(doc, 'localname'):
d["localname"] = doc.localname
frappe.response.docs.append(d)
def set_local_name(doc):
def _set_local_name(d):
if doc.get('__islocal') or d.get('__islocal'):
d.localname = d.name
d.name = None
_set_local_name(doc)
for child in doc.get_all_children():
_set_local_name(child)
if doc.get("__newname"):
doc.name = doc.get("__newname")
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.desk.form.load import run_onload
@frappe.whitelist()
def savedocs(doc, action):
"""save / submit / update doclist"""
try:
doc = frappe.get_doc(json.loads(doc))
set_local_name(doc)
# action
doc.docstatus = {"Save":0, "Submit": 1, "Update": 1, "Cancel": 2}[action]
if doc.docstatus==1:
doc.submit()
else:
try:
doc.save()
except frappe.NameError as e:
# Duplicate Error message already shown on db_insert
raise
# update recent documents
run_onload(doc)
send_updated_docs(doc)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
raise
@frappe.whitelist()
def cancel(doctype=None, name=None, workflow_state_fieldname=None, workflow_state=None):
"""cancel a doclist"""
try:
doc = frappe.get_doc(doctype, name)
if workflow_state_fieldname and workflow_state:
doc.set(workflow_state_fieldname, workflow_state)
doc.cancel()
send_updated_docs(doc)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
frappe.msgprint(frappe._("Did not cancel"))
raise
def send_updated_docs(doc):
from .load import get_docinfo
get_docinfo(doc)
d = doc.as_dict()
if hasattr(doc, 'localname'):
d["localname"] = doc.localname
frappe.response.docs.append(d)
def set_local_name(doc):
def _set_local_name(d):
if doc.get('__islocal') or d.get('__islocal'):
d.localname = d.name
d.name = None
_set_local_name(doc)
for child in doc.get_all_children():
_set_local_name(child)
if doc.get("__newname"):
doc.name = doc.get("__newname")
| mit | Python |
db02b2cc2b8de947511fa0e750697a3b89a88714 | Remove some more unused code. | gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine | openquake/utils/db/__init__.py | openquake/utils/db/__init__.py | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010-2011, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# only, as published by the Free Software Foundation.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License version 3 for more details
# (a copy is included in the LICENSE file that accompanied this code).
#
# You should have received a copy of the GNU Lesser General Public License
# version 3 along with OpenQuake. If not, see
# <http://www.gnu.org/licenses/lgpl-3.0.txt> for a copy of the LGPLv3 License.
"""
This module contains constants and some basic utilities and scaffolding to
assist with database interactions.
"""
| # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010-2011, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# only, as published by the Free Software Foundation.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License version 3 for more details
# (a copy is included in the LICENSE file that accompanied this code).
#
# You should have received a copy of the GNU Lesser General Public License
# version 3 along with OpenQuake. If not, see
# <http://www.gnu.org/licenses/lgpl-3.0.txt> for a copy of the LGPLv3 License.
"""
This module contains constants and some basic utilities and scaffolding to
assist with database interactions.
"""
# Tablespaces
HZRDI_TS = 'hzrdi'
ADMIN_TS = 'admin'
EQCAT_TS = 'eqcat'
# Table/column dicts.
# These can be used as a template for doing db inserts.
SIMPLE_FAULT = dict.fromkeys([
# required:
'owner_id', 'gid', 'dip', 'upper_depth', 'lower_depth', 'edge',
# xor:
'mfd_tgr_id', 'mgf_evd_id',
# optional:
'name', 'description', 'outline'])
SOURCE = dict.fromkeys([
# required:
'owner_id', 'simple_fault_id', 'gid', 'si_type', 'tectonic_region',
# optional:
'name', 'description', 'rake', 'hypocentral_depth', 'r_depth_distr_id',
'input_id'])
MFD_EVD = dict.fromkeys([
# required:
'owner_id', 'magnitude_type', 'min_val', 'max_val', 'bin_size',
'mfd_values',
# optional:
'total_cumulative_rate', 'total_moment_rate'])
MFD_TGR = dict.fromkeys([
# required:
'owner_id', 'magnitude_type', 'min_val', 'max_val', 'a_val', 'b_val',
# optional:
'total_cumulative_rate', 'total_moment_rate'])
| agpl-3.0 | Python |
5aca39cef15ea4381b30127b8ded31ec37ffd273 | Add image to ifff request | niqdev/packtpub-crawler,niqdev/packtpub-crawler,niqdev/packtpub-crawler | script/notification/ifttt.py | script/notification/ifttt.py | from logs import *
import requests
class Ifttt(object):
"""
"""
def __init__(self, config, packpub_info, upload_info):
self.__packpub_info = packpub_info
self.__url = "https://maker.ifttt.com/trigger/{eventName}/with/key/{apiKey}".format(
eventName=config.get('ifttt', 'ifttt.event_name'),
apiKey=config.get('ifttt', 'ifttt.key')
)
def send(self):
r = requests.post(self.__url, data = {
'value1':self.__packpub_info['title'].encode('utf-8'),
'value2':self.__packpub_info['description'].encode('utf-8'),
'value3':self.__packpub_info['url_image']
})
log_success('[+] notification sent to IFTTT')
def sendError(self, exception, source):
title = "packtpub-crawler [{source}]: Could not download ebook".format(source=source)
r = requests.post(self.__url, data = {'value1':title, 'value2':repr(exception)})
log_success('[+] error notification sent to IFTTT')
| from logs import *
import requests
class Ifttt(object):
"""
"""
def __init__(self, config, packpub_info, upload_info):
self.__packpub_info = packpub_info
self.__url = "https://maker.ifttt.com/trigger/{eventName}/with/key/{apiKey}".format(
eventName=config.get('ifttt', 'ifttt.event_name'),
apiKey=config.get('ifttt', 'ifttt.key')
)
def send(self):
r = requests.post(self.__url, data = {'value1':self.__packpub_info['title'].encode('utf-8'), 'value2':self.__packpub_info['description'].encode('utf-8')})
log_success('[+] notification sent to IFTTT')
def sendError(self, exception, source):
title = "packtpub-crawler [{source}]: Could not download ebook".format(source=source)
r = requests.post(self.__url, data = {'value1':title, 'value2':repr(exception)})
log_success('[+] error notification sent to IFTTT')
| mit | Python |
754af1b3b06c489929c302ee1ec2a17bbf681cd1 | Remove unused import time | daybarr/timelapse,daybarr/timelapse | server/download.py | server/download.py | #!/usr/bin/env python
from __future__ import print_function
import argparse
import logging
import os
import sys
import pysftp
LOG_FORMAT = '%(asctime)-15s %(name)-10s %(levelname)-7s %(message)s'
# max number of files to download per sftp connection - not getting them all at
# once helps avoid lockups/hangs on the android side.
MAX_PER_CONN = 10
DEFAULT_DEST_DIR = os.path.expanduser("~/timelapse")
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('ip', help="IP address of camera")
parser.add_argument('port', type=int,
help="Port number of ssh server on camera")
parser.add_argument('username', help="Username for ssh server")
parser.add_argument('password', help="Password for ssh server")
parser.add_argument('--dest-dir', default=DEFAULT_DEST_DIR,
help="Directory to download photos to")
args = parser.parse_args(argv[1:])
if not os.path.isdir(args.dest_dir):
parser.error("--dest-dir {} must exist".format(args.dest_dir))
return args
def download_some(args):
some_downloaded = False
logger.info("Connecting")
with pysftp.Connection(args.ip, port=args.port,
username=args.username,
password=args.password) as sftp:
logger.info("Listing files")
file_names = sftp.listdir()
logger.info("Found %d files", len(file_names))
for i, file_name in enumerate(file_names[:MAX_PER_CONN]):
dest_file_name = os.path.join(args.dest_dir, file_name)
logger.info("Downloading %d/%d: %s => %s", i+1, len(file_names),
file_name, dest_file_name)
sftp.get(file_name)
logger.info("Removing %s", file_name)
sftp.unlink(file_name)
some_downloaded = True
return some_downloaded
if __name__ == "__main__":
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
args = parse_args(sys.argv)
while download_some(args):
pass
| #!/usr/bin/env python
from __future__ import print_function
import argparse
import logging
import os
import sys
import time
import pysftp
LOG_FORMAT = '%(asctime)-15s %(name)-10s %(levelname)-7s %(message)s'
# max number of files to download per sftp connection - not getting them all at
# once helps avoid lockups/hangs on the android side.
MAX_PER_CONN = 10
DEFAULT_DEST_DIR = os.path.expanduser("~/timelapse")
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('ip', help="IP address of camera")
parser.add_argument('port', type=int,
help="Port number of ssh server on camera")
parser.add_argument('username', help="Username for ssh server")
parser.add_argument('password', help="Password for ssh server")
parser.add_argument('--dest-dir', default=DEFAULT_DEST_DIR,
help="Directory to download photos to")
args = parser.parse_args(argv[1:])
if not os.path.isdir(args.dest_dir):
parser.error("--dest-dir {} must exist".format(args.dest_dir))
return args
def download_some(args):
some_downloaded = False
logger.info("Connecting")
with pysftp.Connection(args.ip, port=args.port,
username=args.username,
password=args.password) as sftp:
logger.info("Listing files")
file_names = sftp.listdir()
logger.info("Found %d files", len(file_names))
for i, file_name in enumerate(file_names[:MAX_PER_CONN]):
dest_file_name = os.path.join(args.dest_dir, file_name)
logger.info("Downloading %d/%d: %s => %s", i+1, len(file_names),
file_name, dest_file_name)
sftp.get(file_name)
logger.info("Removing %s", file_name)
sftp.unlink(file_name)
some_downloaded = True
return some_downloaded
if __name__ == "__main__":
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
args = parse_args(sys.argv)
while download_some(args):
pass
| mit | Python |
392aeb99891ff9949c9e9e205743937d8e9cb632 | Use threading.local() to store a requests.Session object per-thread and use it to perform the requests, allowing connections to be reused, speeding bot replies a lot | alvarogzp/telegram-bot,alvarogzp/telegram-bot | bot/api/telegram.py | bot/api/telegram.py | import threading
import requests
class TelegramBotApi:
"""This is a threading-safe API. Avoid breaking it by adding state."""
def __init__(self, auth_token, debug: bool):
self.base_url = "https://api.telegram.org/bot" + auth_token + "/"
self.debug = debug
self.local = threading.local()
def __getattr__(self, item):
return self.__get_request_from_function_name(item)
def __get_request_from_function_name(self, function_name):
return lambda **params: self.__send_request(function_name, params)
def __send_request(self, command, params):
request = self.__get_session().get(self.base_url + command, params=params, timeout=60)
self.__log_request(request)
response = request.json()
self.__log_response(response)
if not response["ok"]:
raise TelegramBotApiException(response["description"])
return response["result"]
def __get_session(self):
session = self.local.__dict__.get("session")
if not session:
session = requests.session()
self.local.session = session
return session
def __log_request(self, request):
if self.debug:
print(">> " + request.url)
def __log_response(self, response):
if self.debug:
print("<< " + str(response))
class TelegramBotApiException(Exception):
pass
| import requests
class TelegramBotApi:
"""This is a threading-safe API. Avoid breaking it by adding state."""
def __init__(self, auth_token, debug: bool):
self.base_url = "https://api.telegram.org/bot" + auth_token + "/"
self.debug = debug
def __getattr__(self, item):
return self.__get_request_from_function_name(item)
def __get_request_from_function_name(self, function_name):
return lambda **params: self.__send_request(function_name, params)
def __send_request(self, command, params):
request = requests.get(self.base_url + command, params=params, timeout=60)
self.__log_request(request)
response = request.json()
self.__log_response(response)
if not response["ok"]:
raise TelegramBotApiException(response["description"])
return response["result"]
def __log_request(self, request):
if self.debug:
print(">> " + request.url)
def __log_response(self, response):
if self.debug:
print("<< " + str(response))
class TelegramBotApiException(Exception):
pass
| agpl-3.0 | Python |
56cea385b0f16056d3a164676bd8d135ecf370cb | Allow access from rawgit.com | alvarogzp/telegram-games,alvarogzp/telegram-games,alvarogzp/telegram-games,alvarogzp/telegram-games | bot/game/api/api.py | bot/game/api/api.py | import http.server
import json
import socketserver
import ssl
from tools import config
LISTEN_ADDRESS = ("", 4343)
API_PATH = "/cgbapi/"
RESPONSE_ENCODING = "utf-8"
class ApiServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
def server_bind(self):
super().server_bind()
self.socket = ssl.wrap_socket(self.socket, keyfile=config.Key.SSL_API_KEY.path, certfile=config.Key.SSL_API_CERT.path, server_side=True)
class ApiRequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
if not self.path.startswith(API_PATH):
self.send_error(404)
else:
self._handle_api_request()
def _handle_api_request(self):
api_call_name = self._get_api_call_from_path()
if not hasattr(self, api_call_name):
response_code = 404
response = self._api_call_not_found()
else:
response_code = 200
api_call = getattr(self, api_call_name)
response = api_call()
encoded_response = self._encode_response(response)
self._send_api_response(response_code, encoded_response)
def _get_api_call_from_path(self):
return "api_call_" + self.path[len(API_PATH):]
def _encode_response(self, response):
return json.dumps(response).encode(RESPONSE_ENCODING)
def _send_api_response(self, response_code, encoded_response):
self.send_response(response_code)
self.send_header("Access-Control-Allow-Origin", "https://rawgit.com")
self.send_header("Content-type", "application/json")
self.send_header("Content-Length", str(len(encoded_response)))
self.end_headers()
self.wfile.write(encoded_response)
def _api_call_not_found(self):
return {"error": "Not found"}
def api_call_set_score(self):
return {"ok": "ok"}
if __name__ == "__main__":
server = ApiServer(LISTEN_ADDRESS, ApiRequestHandler)
server.serve_forever()
| import http.server
import json
import socketserver
import ssl
from tools import config
LISTEN_ADDRESS = ("", 4343)
API_PATH = "/cgbapi/"
RESPONSE_ENCODING = "utf-8"
class ApiServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
def server_bind(self):
super().server_bind()
self.socket = ssl.wrap_socket(self.socket, keyfile=config.Key.SSL_API_KEY.path, certfile=config.Key.SSL_API_CERT.path, server_side=True)
class ApiRequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
if not self.path.startswith(API_PATH):
self.send_error(404)
else:
self._handle_api_request()
def _handle_api_request(self):
api_call_name = self._get_api_call_from_path()
if not hasattr(self, api_call_name):
response_code = 404
response = self._api_call_not_found()
else:
response_code = 200
api_call = getattr(self, api_call_name)
response = api_call()
encoded_response = self._encode_response(response)
self._send_api_response(response_code, encoded_response)
def _get_api_call_from_path(self):
return "api_call_" + self.path[len(API_PATH):]
def _encode_response(self, response):
return json.dumps(response).encode(RESPONSE_ENCODING)
def _send_api_response(self, response_code, encoded_response):
self.send_response(response_code)
self.send_header("Content-type", "application/json")
self.send_header("Content-Length", str(len(encoded_response)))
self.end_headers()
self.wfile.write(encoded_response)
def _api_call_not_found(self):
return {"error": "Not found"}
def api_call_set_score(self):
return {"ok": "ok"}
if __name__ == "__main__":
server = ApiServer(LISTEN_ADDRESS, ApiRequestHandler)
server.serve_forever()
| apache-2.0 | Python |
0bc3cc9bd927540915cd99dfe351da56c37d71d2 | Fix a ResourceWarning in setuptools_build | sigmavirus24/pip,techtonik/pip,zvezdan/pip,rouge8/pip,pfmoore/pip,sigmavirus24/pip,fiber-space/pip,techtonik/pip,atdaemon/pip,zvezdan/pip,xavfernandez/pip,sbidoul/pip,zvezdan/pip,rouge8/pip,rouge8/pip,benesch/pip,pypa/pip,xavfernandez/pip,sbidoul/pip,xavfernandez/pip,sigmavirus24/pip,RonnyPfannschmidt/pip,pypa/pip,benesch/pip,pradyunsg/pip,RonnyPfannschmidt/pip,RonnyPfannschmidt/pip,techtonik/pip,pfmoore/pip,atdaemon/pip,fiber-space/pip,benesch/pip,fiber-space/pip,atdaemon/pip,pradyunsg/pip | pip/utils/setuptools_build.py | pip/utils/setuptools_build.py | # Shim to wrap setup.py invocation with setuptools
SETUPTOOLS_SHIM = (
"import setuptools, tokenize;__file__=%r;"
"f=getattr(tokenize, 'open', open)(__file__);"
"code=f.read().replace('\\r\\n', '\\n');"
"f.close();"
"exec(compile(code, __file__, 'exec'))"
)
| # Shim to wrap setup.py invocation with setuptools
SETUPTOOLS_SHIM = (
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))"
)
| mit | Python |
5e7cf63d33f000a6022e8ec7830a2166a29e5175 | Fix context if value is None | fin/froide,stefanw/froide,stefanw/froide,fin/froide,stefanw/froide,stefanw/froide,stefanw/froide,fin/froide,fin/froide | froide/helper/widgets.py | froide/helper/widgets.py | from django import forms
from django.conf import settings
from taggit.forms import TagWidget
class BootstrapChoiceMixin(object):
def __init__(self, *args, **kwargs):
kwargs.setdefault('attrs', {})
kwargs['attrs'].update({'class': 'form-check-input'})
super(BootstrapChoiceMixin, self).__init__(*args, **kwargs)
class BootstrapCheckboxInput(BootstrapChoiceMixin, forms.CheckboxInput):
pass
class BootstrapRadioSelect(BootstrapChoiceMixin, forms.RadioSelect):
option_template_name = 'helper/forms/widgets/radio_option.html'
class BootstrapFileInput(forms.FileInput):
def __init__(self, *args, **kwargs):
kwargs.setdefault('attrs', {})
kwargs['attrs'].update({'class': 'form-control'})
super(BootstrapFileInput, self).__init__(*args, **kwargs)
class PriceInput(forms.TextInput):
template_name = "helper/forms/widgets/price_input.html"
def get_context(self, name, value, attrs):
ctx = super(PriceInput, self).get_context(name, value, attrs)
ctx['widget'].setdefault('attrs', {})
ctx['widget']['attrs']['class'] = 'form-control col-3'
ctx['widget']['attrs']['pattern'] = "[\\d\\.,]*"
ctx['currency'] = settings.FROIDE_CONFIG['currency']
return ctx
class TagAutocompleteWidget(TagWidget):
template_name = 'helper/forms/widgets/tag_autocomplete.html'
class Media:
js = (
'js/tagautocomplete.js',
)
css_list = [
'css/tagautocomplete.css'
]
css = {
'screen': css_list
}
def __init__(self, *args, **kwargs):
self.autocomplete_url = kwargs.pop('autocomplete_url', None)
super().__init__(*args, **kwargs)
def get_context(self, name, value, attrs):
ctx = super().get_context(name, value, attrs)
ctx['autocomplete_url'] = self.autocomplete_url
if value is None:
ctx['tags'] = [v.tag.name for v in value]
else:
ctx['tags'] = []
return ctx
| from django import forms
from django.conf import settings
from taggit.forms import TagWidget
class BootstrapChoiceMixin(object):
def __init__(self, *args, **kwargs):
kwargs.setdefault('attrs', {})
kwargs['attrs'].update({'class': 'form-check-input'})
super(BootstrapChoiceMixin, self).__init__(*args, **kwargs)
class BootstrapCheckboxInput(BootstrapChoiceMixin, forms.CheckboxInput):
pass
class BootstrapRadioSelect(BootstrapChoiceMixin, forms.RadioSelect):
option_template_name = 'helper/forms/widgets/radio_option.html'
class BootstrapFileInput(forms.FileInput):
def __init__(self, *args, **kwargs):
kwargs.setdefault('attrs', {})
kwargs['attrs'].update({'class': 'form-control'})
super(BootstrapFileInput, self).__init__(*args, **kwargs)
class PriceInput(forms.TextInput):
template_name = "helper/forms/widgets/price_input.html"
def get_context(self, name, value, attrs):
ctx = super(PriceInput, self).get_context(name, value, attrs)
ctx['widget'].setdefault('attrs', {})
ctx['widget']['attrs']['class'] = 'form-control col-3'
ctx['widget']['attrs']['pattern'] = "[\\d\\.,]*"
ctx['currency'] = settings.FROIDE_CONFIG['currency']
return ctx
class TagAutocompleteWidget(TagWidget):
template_name = 'helper/forms/widgets/tag_autocomplete.html'
class Media:
js = (
'js/tagautocomplete.js',
)
css_list = [
'css/tagautocomplete.css'
]
css = {
'screen': css_list
}
def __init__(self, *args, **kwargs):
self.autocomplete_url = kwargs.pop('autocomplete_url', None)
super().__init__(*args, **kwargs)
def get_context(self, name, value, attrs):
ctx = super().get_context(name, value, attrs)
ctx['autocomplete_url'] = self.autocomplete_url
ctx['tags'] = [v.tag.name for v in value]
return ctx
| mit | Python |
619d934b8742eece8ed8670016994c6133428218 | test fix: merge subtests, expected by jar tests | moreati/pydgin,futurecore/pydgin,cornell-brg/pydgin,futurecore/pydgin,cornell-brg/pydgin,futurecore/pydgin,cornell-brg/pydgin,futurecore/pydgin,moreati/pydgin,moreati/pydgin,cornell-brg/pydgin,moreati/pydgin | interp_asm_test.py | interp_asm_test.py | import sys
import subprocess
sys.path.append('/Users/dmlockhart/vc/git-brg/parc/pymtl')
from inspect import getmembers, ismodule, isfunction
import pisa.pisa_inst_addu_test
import pisa.pisa_inst_subu_test
import pisa.pisa_inst_and_test
import pisa.pisa_inst_or_test
import pisa.pisa_inst_xor_test
import pisa.pisa_inst_nor_test
import pisa.pisa_inst_slt_test
import pisa.pisa_inst_sltu_test
import pisa.pisa_inst_addiu_test
import pisa.pisa_inst_andi_test
import pisa.pisa_inst_ori_test
import pisa.pisa_inst_xori_test
import pisa.pisa_inst_slti_test
import pisa.pisa_inst_sltiu_test
import pisa.pisa_inst_sll_test
import pisa.pisa_inst_srl_test
import pisa.pisa_inst_sra_test
import pisa.pisa_inst_sllv_test
import pisa.pisa_inst_srlv_test
import pisa.pisa_inst_srav_test
import pisa.pisa_inst_lui_test
import pisa.pisa_inst_j_test
import pisa.pisa_inst_jal_test
cmd = './interp_asm_jit-c'
#cmd = 'python interp_asm_jit.py'
asm = 'test.s'
#-----------------------------------------------------------------------
# collect tests
#-----------------------------------------------------------------------
tests = []
test_names = []
for mname, module in getmembers( pisa, ismodule ):
if not (mname.startswith('pisa_inst') and mname.endswith('test')):
continue
for func_name, func in getmembers( module, isfunction ):
if not (func_name.startswith('gen') and func.__module__.endswith( mname )):
continue
test = func()
name = '{}.{}'.format( mname, func_name )
test_names.append( name )
if isinstance( test, str ):
tests.append( test )
else:
tests.append( ''.join( test ) )
#-----------------------------------------------------------------------
# test_asm
#-----------------------------------------------------------------------
import pytest
@pytest.mark.parametrize( 'test_str', tests, ids=test_names )
def test_asm( test_str ):
with open(asm, 'w') as asm_file:
lines = test_str.split('\n')
for line in lines:
asm_file.write( line.lstrip()+'\n' )
try:
subprocess.check_call(
(cmd+' '+asm).split(),
env={'PYTHONPATH': '/Users/dmlockhart/vc/hg-opensource/pypy'}
)
except subprocess.CalledProcessError as e:
print test_str
raise e
print "DONE"
| import sys
import subprocess
sys.path.append('/Users/dmlockhart/vc/git-brg/parc/pymtl')
from inspect import getmembers, ismodule, isfunction
import pisa.pisa_inst_addu_test
import pisa.pisa_inst_subu_test
import pisa.pisa_inst_and_test
import pisa.pisa_inst_or_test
import pisa.pisa_inst_xor_test
import pisa.pisa_inst_nor_test
import pisa.pisa_inst_slt_test
import pisa.pisa_inst_sltu_test
import pisa.pisa_inst_addiu_test
import pisa.pisa_inst_andi_test
import pisa.pisa_inst_ori_test
import pisa.pisa_inst_xori_test
import pisa.pisa_inst_slti_test
import pisa.pisa_inst_sltiu_test
import pisa.pisa_inst_sll_test
import pisa.pisa_inst_srl_test
import pisa.pisa_inst_sra_test
import pisa.pisa_inst_sllv_test
import pisa.pisa_inst_srlv_test
import pisa.pisa_inst_srav_test
import pisa.pisa_inst_lui_test
import pisa.pisa_inst_j_test
import pisa.pisa_inst_jal_test
cmd = './interp_asm_jit-c'
#cmd = 'python interp_asm_jit.py'
asm = 'test.s'
#-----------------------------------------------------------------------
# collect tests
#-----------------------------------------------------------------------
tests = []
test_names = []
for mname, module in getmembers( pisa, ismodule ):
if not (mname.startswith('pisa_inst') and mname.endswith('test')):
continue
for func_name, func in getmembers( module, isfunction ):
if not (func_name.startswith('gen') and func.__module__.endswith( mname )):
continue
test = func()
if isinstance( test, str ):
name = '{}.{}'.format( mname, func_name )
test_names.append( name )
tests.append( test )
else:
names = [ '{}.{}_{}'.format( mname, func_name, x )
for x, _ in enumerate(test) ]
test_names.extend( names )
tests.extend( test )
#-----------------------------------------------------------------------
# test_asm
#-----------------------------------------------------------------------
import pytest
@pytest.mark.parametrize( 'test_str', tests, ids=test_names )
def test_asm( test_str ):
with open(asm, 'w') as asm_file:
lines = test_str.split('\n')
for line in lines:
asm_file.write( line.lstrip()+'\n' )
try:
subprocess.check_call(
(cmd+' '+asm).split(),
env={'PYTHONPATH': '/Users/dmlockhart/vc/hg-opensource/pypy'}
)
except subprocess.CalledProcessError as e:
print test_str
raise e
print "DONE"
| bsd-3-clause | Python |
b99719e71282a3e6a9766932b294a0ed6a7479ff | Bump version | mattrobenolt/invoke,mattrobenolt/invoke,tyewang/invoke,singingwolfboy/invoke,mkusz/invoke,pyinvoke/invoke,pyinvoke/invoke,kejbaly2/invoke,mkusz/invoke,sophacles/invoke,frol/invoke,pfmoore/invoke,pfmoore/invoke,kejbaly2/invoke,frol/invoke | invoke/_version.py | invoke/_version.py | __version_info__ = (0, 4, 0)
__version__ = '.'.join(map(str, __version_info__))
| __version_info__ = (0, 3, 0)
__version__ = '.'.join(map(str, __version_info__))
| bsd-2-clause | Python |
1b3a1d18e7a66c7067ab9d4aacddfabe33d29ca1 | add scheme (if missing) via urlparse.scheme check | mutaku/Stumpy,mutaku/Stumpy | shortener/views.py | shortener/views.py | from django.http import HttpResponse,Http404
from django import forms
from shortener.models import stumps
from django.utils.encoding import smart_str
from django.shortcuts import get_object_or_404,get_list_or_404,render_to_response,redirect
import hashlib
import urlparse
from django.db.models import Sum,Count
from django.contrib.sites.models import Site
from django.contrib.auth.decorators import login_required
import bleach
def index(request):
stumpy_domain = smart_str(Site.objects.get_current().domain)
stump_stats_num = stumps.objects.all().count()
stump_stats_visits = stumps.objects.aggregate(Sum('hits'))['%s__sum' % 'hits']
recent_stumps_list = stumps.objects.all().order_by('-id')[:5]
famous_stumps_list = stumps.objects.all().order_by('-hits')[:5]
return render_to_response('stumpy/index.html', {
'stumpy_domain': stumpy_domain,
'recent_stumps_list': recent_stumps_list,
'famous_stumps_list': famous_stumps_list,
'stump_stats_num': stump_stats_num,
'stump_stats_visits': stump_stats_visits
})
def detail(request,short):
short_clean = bleach.clean(short)
stump = get_object_or_404(stumps,shorturl=short_clean)
stump.hits += 1
stump.save()
return redirect(stump.longurl)
@login_required
def submit(request,stump):
stumpy_domain = smart_str(Site.objects.get_current().domain)
stump_clean = bleach.clean(stump)
this_stump = smart_str(stump_clean)
############################################################
############################################################
# This code portion is temporary hack for // -> /
# it will be removed once I have it fixed legit
stump_split = list(this_stump.partition(":"))
if stump_split[1] and stump_split[2].startswith("/"):
stump_split[2] = "/"+stump_split[2]
this_stump = ''.join(stump_split)
############################################################
############################################################
this_hash = hashlib.sha1(this_stump).hexdigest()
does_exist = stumps.objects.filter(hashurl=this_hash)
if not does_exist:
this_user = smart_str(request.user)
parsed_url = urlparse.urlparse(this_stump)
if not parsed_url.scheme"
this_stump = "http://"+this_stump
if parsed_url.netloc != stumpy_domain:
s = stumps(longurl=this_stump,hashurl=this_hash,cookie=this_user)
s.save()
new_stump = stumps.objects.get(id=s.id)
stumpy_domain = smart_str(Site.objects.get_current().domain)
return render_to_response('stumpy/submit.html', {
'new_stump': new_stump,
'stumpy_domain': stumpy_domain
})
else:
return HttpResponse("Sly fox eats the poisoned rabbit.")
else:
return render_to_response('stumpy/submit.html', {
'exist_stump': does_exist.get(),
'stumpy_domain': stumpy_domain
})
| from django.http import HttpResponse,Http404
from django import forms
from shortener.models import stumps
from django.utils.encoding import smart_str
from django.shortcuts import get_object_or_404,get_list_or_404,render_to_response,redirect
import hashlib
import urlparse
from django.db.models import Sum,Count
from django.contrib.sites.models import Site
from django.contrib.auth.decorators import login_required
import bleach
def index(request):
stumpy_domain = smart_str(Site.objects.get_current().domain)
stump_stats_num = stumps.objects.all().count()
stump_stats_visits = stumps.objects.aggregate(Sum('hits'))['%s__sum' % 'hits']
recent_stumps_list = stumps.objects.all().order_by('-id')[:5]
famous_stumps_list = stumps.objects.all().order_by('-hits')[:5]
return render_to_response('stumpy/index.html', {
'stumpy_domain': stumpy_domain,
'recent_stumps_list': recent_stumps_list,
'famous_stumps_list': famous_stumps_list,
'stump_stats_num': stump_stats_num,
'stump_stats_visits': stump_stats_visits
})
def detail(request,short):
short_clean = bleach.clean(short)
stump = get_object_or_404(stumps,shorturl=short_clean)
stump.hits += 1
stump.save()
return redirect(stump.longurl)
@login_required
def submit(request,stump):
stumpy_domain = smart_str(Site.objects.get_current().domain)
stump_clean = bleach.clean(stump)
this_stump = smart_str(stump_clean)
############################################################
############################################################
# This code portion is temporary hack for // -> /
# it will be removed once I have it fixed legit
stump_split = list(this_stump.partition(":"))
if stump_split[1] and stump_split[2].startswith("/"):
stump_split[2] = "/"+stump_split[2]
this_stump = ''.join(stump_split)
############################################################
# Another hack - urls that don't have http(s)/ftp etc
# are legit and get in but the redirect breaks?!
if not stump_split[1]:
this_stump = "http://"+this_stump
############################################################
############################################################
this_hash = hashlib.sha1(this_stump).hexdigest()
does_exist = stumps.objects.filter(hashurl=this_hash)
if not does_exist:
this_user = smart_str(request.user)
parsed_url = urlparse.urlparse(this_stump)
if parsed_url.netloc != stumpy_domain:
s = stumps(longurl=this_stump,hashurl=this_hash,cookie=this_user)
s.save()
new_stump = stumps.objects.get(id=s.id)
stumpy_domain = smart_str(Site.objects.get_current().domain)
return render_to_response('stumpy/submit.html', {
'new_stump': new_stump,
'stumpy_domain': stumpy_domain
})
else:
return HttpResponse("Sly fox eats the poisoned rabbit.")
else:
return render_to_response('stumpy/submit.html', {
'exist_stump': does_exist.get(),
'stumpy_domain': stumpy_domain
})
| bsd-3-clause | Python |
127860c8feee3b4bdaee1bad6c76ff1c8a8acaf1 | remove remove by try_remove closes #525 | Konubinix/weboob,eirmag/weboob,Boussadia/weboob,willprice/weboob,eirmag/weboob,sputnick-dev/weboob,laurent-george/weboob,nojhan/weboob-devel,nojhan/weboob-devel,yannrouillard/weboob,sputnick-dev/weboob,Boussadia/weboob,frankrousseau/weboob,franek/weboob,nojhan/weboob-devel,Konubinix/weboob,laurent-george/weboob,yannrouillard/weboob,RouxRC/weboob,RouxRC/weboob,franek/weboob,sputnick-dev/weboob,willprice/weboob,yannrouillard/weboob,RouxRC/weboob,Boussadia/weboob,franek/weboob,frankrousseau/weboob,eirmag/weboob,Boussadia/weboob,frankrousseau/weboob,laurent-george/weboob,Konubinix/weboob,willprice/weboob | weboob/backends/lefigaro/pages/article.py | weboob/backends/lefigaro/pages/article.py | "ArticlePage object for inrocks"
# -*- coding: utf-8 -*-
# Copyright(C) 2011 Julien Hebert
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from weboob.tools.parsers.lxmlparser import select
from weboob.tools.genericArticle import GenericNewsPage, try_remove
class ArticlePage(GenericNewsPage):
"ArticlePage object for inrocks"
def on_loaded(self):
self.main_div = self.document.getroot()
self.element_author_selector = "div.name>span"
self.element_title_selector = "h1"
self.element_body_selector = "#article"
def get_body(self):
element_body = self.get_element_body()
h1_title = select(element_body, self.element_title_selector, 1)
div_infos = select(element_body, "div.infos", 1)
el_script = select(element_body, "script", 1)
element_body.remove(h1_title)
element_body.remove(div_infos)
element_body.remove(toolsbar)
try_remove(element_body, "div.photo")
try_remove(element_body, "div.art_bandeau_bottom")
try_remove(element_body, "div.view")
try_remove(element_body, "span.auteur_long")
try_remove(element_body, "#toolsbar")
el_script.drop_tree()
element_body.find_class("texte")[0].drop_tag()
element_body.tag = "div"
return self.browser.parser.tostring(element_body)
| "ArticlePage object for inrocks"
# -*- coding: utf-8 -*-
# Copyright(C) 2011 Julien Hebert
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from weboob.tools.parsers.lxmlparser import select
from weboob.tools.genericArticle import GenericNewsPage, try_remove
class ArticlePage(GenericNewsPage):
"ArticlePage object for inrocks"
def on_loaded(self):
self.main_div = self.document.getroot()
self.element_author_selector = "div.name>span"
self.element_title_selector = "h1"
self.element_body_selector = "#article"
def get_body(self):
element_body = self.get_element_body()
h1_title = select(element_body, self.element_title_selector, 1)
div_infos = select(element_body, "div.infos", 1)
toolsbar = select(element_body, "#toolsbar", 1)
el_script = select(element_body, "script", 1)
element_body.remove(h1_title)
element_body.remove(div_infos)
element_body.remove(toolsbar)
try_remove(element_body, "div.photo")
try_remove(element_body, "div.art_bandeau_bottom")
try_remove(element_body, "div.view")
try_remove(element_body, "span.auteur_long")
el_script.drop_tree()
element_body.find_class("texte")[0].drop_tag()
element_body.tag = "div"
return self.browser.parser.tostring(element_body)
| agpl-3.0 | Python |
2fca0582d6590def19f3788e0b804a2ec85ca42d | Fix logging | opennode/nodeconductor-openstack | src/nodeconductor_openstack/log.py | src/nodeconductor_openstack/log.py | from nodeconductor.logging.loggers import EventLogger, event_logger
class BackupEventLogger(EventLogger):
resource = 'structure.Resource'
class Meta:
event_types = ('resource_backup_creation_scheduled',
'resource_backup_creation_succeeded',
'resource_backup_creation_failed',
'resource_backup_restoration_scheduled',
'resource_backup_restoration_succeeded',
'resource_backup_restoration_failed',
'resource_backup_deletion_scheduled',
'resource_backup_deletion_succeeded',
'resource_backup_deletion_failed',
'resource_backup_schedule_creation_succeeded',
'resource_backup_schedule_update_succeeded',
'resource_backup_schedule_deletion_succeeded',
'resource_backup_schedule_activated',
'resource_backup_schedule_deactivated')
class InstanceFlavorChangeEventLogger(EventLogger):
resource = 'structure.Resource'
flavor = 'openstack.Flavor'
class Meta:
event_types = ('resource_flavor_change_scheduled',
'resource_flavor_change_succeeded',
'resource_flavor_change_failed')
class InstanceVolumeChangeEventLogger(EventLogger):
resource = 'structure.Resource'
volume_size = int
class Meta:
nullable_fields = ['volume_size']
event_types = ('resource_volume_extension_scheduled',
'resource_volume_extension_succeeded',
'resource_volume_extension_failed')
event_logger.register('openstack_backup', BackupEventLogger)
event_logger.register('openstack_flavor', InstanceFlavorChangeEventLogger)
event_logger.register('openstack_volume', InstanceVolumeChangeEventLogger)
| from nodeconductor.logging.loggers import EventLogger, event_logger
class BackupEventLogger(EventLogger):
resource = 'structure.Resource'
class Meta:
event_types = ('resource_backup_creation_scheduled',
'resource_backup_creation_succeeded',
'resource_backup_creation_failed',
'resource_backup_restoration_scheduled',
'resource_backup_restoration_succeeded',
'resource_backup_restoration_failed',
'resource_backup_deletion_scheduled',
'resource_backup_deletion_succeeded',
'resource_backup_deletion_failed',
'resource_backup_schedule_creation_succeeded',
'resource_backup_schedule_update_succeeded',
'resource_backup_schedule_deletion_succeeded',
'resource_backup_schedule_activated',
'resource_backup_schedule_deactivated')
class InstanceFlavorChangeEventLogger(EventLogger):
resource = 'structure.Resource'
flavor = 'nodeconductor_openstack.Flavor'
class Meta:
event_types = ('resource_flavor_change_scheduled',
'resource_flavor_change_succeeded',
'resource_flavor_change_failed')
class InstanceVolumeChangeEventLogger(EventLogger):
resource = 'structure.Resource'
volume_size = int
class Meta:
nullable_fields = ['volume_size']
event_types = ('resource_volume_extension_scheduled',
'resource_volume_extension_succeeded',
'resource_volume_extension_failed')
event_logger.register('openstack_backup', BackupEventLogger)
event_logger.register('openstack_flavor', InstanceFlavorChangeEventLogger)
event_logger.register('openstack_volume', InstanceVolumeChangeEventLogger)
| mit | Python |
f171cd79fbee3cf9cfd378f9b34bf8fef351a292 | return the gravity comp response correctly | HLP-R/hlpr_kinesthetic_teaching | hlpr_kinesthetic_interaction/src/hlpr_kinesthetic_interaction/jaco_arm.py | hlpr_kinesthetic_interaction/src/hlpr_kinesthetic_interaction/jaco_arm.py | #!/usr/bin/env python
import rospy
from hlpr_manipulation_utils.manipulator import Gripper
from wpi_jaco_msgs.srv import GravComp
from kinova_msgs.srv import Start, Stop
"""
jaco_arm.py
Simple wrapper that abstracts out the arm class so that other arms
can use kinesthetic_interaction
"""
class Arm():
GRAVITY_COMP_SERVICE = "/jaco_arm/grav_comp"
ENABLE_7DOF_GRAVITY_COMP_SERVICE = "/j2s7s300/in/start_gravity_comp"
DISABLE_7DOF_GRAVITY_COMP_SERVICE = "/j2s7s300/in/stop_gravity_comp"
def __init__(self, is7DOF = False):
# Setup gravity compensation
rospy.logwarn("Waiting for gravity compensation service")
if (is7DOF):
rospy.wait_for_service(Arm.ENABLE_7DOF_GRAVITY_COMP_SERVICE)
rospy.wait_for_service(Arm.DISABLE_7DOF_GRAVITY_COMP_SERVICE)
self.enableGravComp = rospy.ServiceProxy(Arm.ENABLE_7DOF_GRAVITY_COMP_SERVICE, Start)
self.disableGravComp = rospy.ServiceProxy(Arm.DISABLE_7DOF_GRAVITY_COMP_SERVICE, Stop)
self.gravity_comp = self.setGravityComp
else:
rospy.wait_for_service(Arm.GRAVITY_COMP_SERVICE)
self.gravity_comp = rospy.ServiceProxy(Arm.GRAVITY_COMP_SERVICE, GravComp)
rospy.logwarn("Gravity compenstation service loaded")
# Initialize the gripper
self.gripper = Gripper()
def setGravityComp(self, toggle):
if toggle:
return self.enableGravComp()
else:
return self.disableGravComp()
| #!/usr/bin/env python
import rospy
from hlpr_manipulation_utils.manipulator import Gripper
from wpi_jaco_msgs.srv import GravComp
from kinova_msgs.srv import Start, Stop
"""
jaco_arm.py
Simple wrapper that abstracts out the arm class so that other arms
can use kinesthetic_interaction
"""
class Arm():
GRAVITY_COMP_SERVICE = "/jaco_arm/grav_comp"
ENABLE_7DOF_GRAVITY_COMP_SERVICE = "/j2s7s300/in/start_gravity_comp"
DISABLE_7DOF_GRAVITY_COMP_SERVICE = "/j2s7s300/in/stop_gravity_comp"
def __init__(self, is7DOF = False):
# Setup gravity compensation
rospy.logwarn("Waiting for gravity compensation service")
if (is7DOF):
rospy.wait_for_service(Arm.ENABLE_7DOF_GRAVITY_COMP_SERVICE)
rospy.wait_for_service(Arm.DISABLE_7DOF_GRAVITY_COMP_SERVICE)
self.enableGravComp = rospy.ServiceProxy(Arm.ENABLE_7DOF_GRAVITY_COMP_SERVICE, Start)
self.disableGravComp = rospy.ServiceProxy(Arm.DISABLE_7DOF_GRAVITY_COMP_SERVICE, Stop)
self.gravity_comp = self.setGravityComp
else:
rospy.wait_for_service(Arm.GRAVITY_COMP_SERVICE)
self.gravity_comp = rospy.ServiceProxy(Arm.GRAVITY_COMP_SERVICE, GravComp)
rospy.logwarn("Gravity compenstation service loaded")
# Initialize the gripper
self.gripper = Gripper()
def setGravityComp(self, toggle):
if toggle:
self.enableGravComp()
else:
self.disableGravComp()
| bsd-3-clause | Python |
dfd1aeba24c8373b64a073f5f779a22311db8c42 | add price | it-projects-llc/website-addons,it-projects-llc/website-addons,it-projects-llc/website-addons | website_sale_quantity_hide/__openerp__.py | website_sale_quantity_hide/__openerp__.py | # -*- coding: utf-8 -*-
{
'name': "Hide quantity field in web shop",
'summary': "Allows to sale only 1 item at once",
'author': 'IT-Projects LLC, Ivan Yelizariev',
'license': 'GPL-3',
"price": 20.00,
"currency": "EUR",
'category': 'Website',
'website': 'https://twitter.com/yelizariev',
'version': '1.0.0',
'depends': ['website_sale'],
'data': [
'views/product.xml',
'views/templates.xml'
],
}
| # -*- coding: utf-8 -*-
{
'name': "Hide quantity field in web shop",
'summary': "Allows to sale only 1 item at once",
'author': 'IT-Projects LLC, Ivan Yelizariev',
'license': 'GPL-3',
'category': 'Website',
'website': 'https://twitter.com/yelizariev',
'version': '1.0.0',
'depends': ['website_sale'],
'data': [
'views/product.xml',
'views/templates.xml'
],
}
| mit | Python |
a018da55959b438f803e0ede7855f34a1a0712c4 | Load security data | cubells/l10n-spain,cubells/l10n-spain,cubells/l10n-spain | l10n_es_ticketbai_api/__manifest__.py | l10n_es_ticketbai_api/__manifest__.py | # Copyright 2021 Binovo IT Human Project SL
# Copyright 2021 Landoo Sistemas de Informacion SL
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
"name": "TicketBAI - API",
"version": "14.0.2.2.1",
"category": "Accounting & Finance",
"website": "https://github.com/OCA/l10n-spain",
"author": "Binovo," "Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"auto_install": False,
"development_status": "Beta",
"maintainers": ["ao-landoo"],
"depends": ["base", "base_setup"],
"external_dependencies": {
"python": [
"cryptography",
"pyOpenSSL",
"qrcode",
"xmlsig",
"xmltodict",
],
},
"data": [
"security/ir.model.access.csv",
"security/l10n_es_ticketbai_security.xml",
"data/tax_agency_data.xml",
"data/ticketbai_invoice.xml",
"views/l10n_es_ticketbai_api_views.xml",
"views/res_company_views.xml",
"views/res_config_settings_views.xml",
"views/ticketbai_certificate_views.xml",
"views/ticketbai_installation_views.xml",
],
"demo": ["demo/res_partner_demo.xml"],
}
| # Copyright 2021 Binovo IT Human Project SL
# Copyright 2021 Landoo Sistemas de Informacion SL
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
"name": "TicketBAI - API",
"version": "14.0.2.2.1",
"category": "Accounting & Finance",
"website": "https://github.com/OCA/l10n-spain",
"author": "Binovo," "Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"auto_install": False,
"development_status": "Beta",
"maintainers": ["ao-landoo"],
"depends": ["base", "base_setup"],
"external_dependencies": {
"python": [
"cryptography",
"pyOpenSSL",
"qrcode",
"xmlsig",
"xmltodict",
],
},
"data": [
"security/ir.model.access.csv",
"data/tax_agency_data.xml",
"data/ticketbai_invoice.xml",
"views/l10n_es_ticketbai_api_views.xml",
"views/res_company_views.xml",
"views/res_config_settings_views.xml",
"views/ticketbai_certificate_views.xml",
"views/ticketbai_installation_views.xml",
],
"demo": ["demo/res_partner_demo.xml"],
}
| agpl-3.0 | Python |
f832bd54dd293c9b76d46d48e00ba1430eab5566 | Fix usage message | plamere/spotipy | examples/user_playlists_contents.py | examples/user_playlists_contents.py | # shows a user's playlists (need to be authenticated via oauth)
import sys
import os
import spotipy
import spotipy.util as util
def show_tracks(results):
for i, item in enumerate(tracks['items']):
track = item['track']
print(" %d %32.32s %s" % (i, track['artists'][0]['name'], track['name']))
if __name__ == '__main__':
if len(sys.argv) > 1:
username = sys.argv[1]
else:
print("Whoops, need your username!")
print("usage: python user_playlists_contents.py [username]")
sys.exit()
token = util.prompt_for_user_token(username)
if token:
top = 40
sp = spotipy.Spotify(auth=token)
playlists = sp.user_playlists(username)
for playlist in playlists['items']:
if playlist['owner']['id'] == username:
print()
print(playlist['name'])
print(' total tracks', playlist['tracks']['total'])
results = sp.user_playlist(username, playlist['id'], fields="tracks,next")
tracks = results['tracks']
show_tracks(tracks)
while tracks['next']:
tracks = sp.next(tracks)
show_tracks(tracks)
else:
print("Can't get token for", username)
| # shows a user's playlists (need to be authenticated via oauth)
import sys
import os
import spotipy
import spotipy.util as util
def show_tracks(results):
for i, item in enumerate(tracks['items']):
track = item['track']
print(" %d %32.32s %s" % (i, track['artists'][0]['name'], track['name']))
if __name__ == '__main__':
if len(sys.argv) > 1:
username = sys.argv[1]
else:
print("Whoops, need your username!")
print("usage: python user_playlists.py [username]")
sys.exit()
token = util.prompt_for_user_token(username)
if token:
top = 40
sp = spotipy.Spotify(auth=token)
playlists = sp.user_playlists(username)
for playlist in playlists['items']:
if playlist['owner']['id'] == username:
print()
print(playlist['name'])
print(' total tracks', playlist['tracks']['total'])
results = sp.user_playlist(username, playlist['id'], fields="tracks,next")
tracks = results['tracks']
show_tracks(tracks)
while tracks['next']:
tracks = sp.next(tracks)
show_tracks(tracks)
else:
print("Can't get token for", username)
| mit | Python |
78232ed4b075bc66ecd52108df22362c453b4da7 | Update vuln-39.py | mtesauro/gauntlt-demo,gauntlt/gauntlt-demo,mtesauro/gauntlt-demo,gauntlt/gauntlt-demo | examples/webgoat/vuln-39/vuln-39.py | examples/webgoat/vuln-39/vuln-39.py | import requests
import json
loginurl = 'http://127.0.0.1:8080/WebGoat/login.mvc'
authurl = 'http://127.0.0.1:8080/WebGoat/j_spring_security_check'
menuurl = 'http://127.0.0.1:8080/WebGoat/service/lessonmenu.mvc'
attackurl = 'http://127.0.0.1:8080/WebGoat/'
login = {"username":"guest", "password":"guest"}
purchase = {"PRC1":"%240.00", "QTY1":"11", "TOT1":"%240.00", "PRC2":"%240.00", "QTY2":"1", "TOT2":"%240.00", "PRC3":"240.00", "QTY3":"1", "TOT3":"%240.00", "PRC4":"%240.00", "QTY4":"1", "TOT4":"%240.00", "SUBTOT":"%240.00", "GRANDTOT":"%240.00","field2":"4128+3214+0002+1999", "field1":"rgrsehreh", "SUBMIT":"Purchase"}
session = requests.Session()
first = session.get(loginurl)
second = session.post(authurl, login)
third = session.get(menuurl)
for entry in range(len(third.json())):
if third.json()[entry]['name'] == "AJAX Security":
for child in range(len(third.json()[entry]['children'])):
if third.json()[entry]['children'][child]['name'] == "Insecure Client Storage":
attackurl += third.json()[entry]['children'][child]['link']
fourth = session.get(attackurl)
fifth = session.post(attackurl, purchase)
if fifth.status_code == 200:
print "Vulnerable"
else:
print "No Vulnerability"
| import requests
import json
loginurl = 'http://127.0.0.1:8080/WebGoat/login.mvc'
authurl = 'http://127.0.0.1:8080/WebGoat/j_spring_security_check'
menuurl = 'http://127.0.0.1:8080/WebGoat/service/lessonmenu.mvc'
attackurl = 'http://127.0.0.1:8080/WebGoat/'
login = {"username":"guest", "password":"guest"}
purchase = {"PRC1":"%240.00", "QTY1":"11", "TOT1":"%240.00", "PRC2":"%240.00", "QTY2":"1", "TOT2":"%240.00", "PRC3":"240.00", "QTY3":"1", "TOT3":"%240.00", "PRC4":"%240.00", "QTY4":"1", "TOT4":"%240.00", "SUBTOT":"%240.00", "GRANDTOT":"%240.00","field2":"4128+3214+0002+1999", "field1":"rgrsehreh", "SUBMIT":"Purchase"}
session = requests.Session()
first = session.get(loginurl)
second = session.post(authurl, login)
third = session.get(menuurl)
for entry in range(len(third.json())):
if third.json()[entry]['name'] == "AJAX Security":
for child in range(len(third.json()[entry]['children'])):
if third.json()[entry]['children'][child]['name'] == "Insecure Client Storage":
attackurl += third.json()[entry]['children'][child]['link']
fourth = session.get(attackurl)
fifth = session.post(attackurl, purchase)
if fifth.status_code == 200:
print "Vulnerable"
else:
print "No Vulnerability"
| mit | Python |
a760d60ec2b37613c621508001b57dc81b464b5c | add precision | charliezon/deep_stock | experiments/exp_20170722_01/main.py | experiments/exp_20170722_01/main.py | import xgboost as xgb
# Accuracy on test set: 63%
# read in data
dtrain = xgb.DMatrix('../../data/data_20170722_01/train_data.txt')
dtest = xgb.DMatrix('../../data/data_20170722_01/test_data.txt')
# specify parameters via map, definition are same as c++ version
param = {'max_depth':22, 'eta':0.1, 'silent':0, 'objective':'binary:logistic','min_child_weight':3,'gamma':14 }
# specify validations set to watch performance
watchlist = [(dtest,'eval'), (dtrain,'train')]
num_round = 1000
bst = xgb.train(param, dtrain, num_round, watchlist)
# this is prediction
preds = bst.predict(dtest)
labels = dtest.get_label()
print ('error=%f' % ( sum(1 for i in range(len(preds)) if int(preds[i]>0.5)!=labels[i]) /float(len(preds))))
print ('correct=%f' % ( sum(1 for i in range(len(preds)) if int(preds[i]>0.5)==labels[i]) /float(len(preds))))
print ('precision=%f' % ( sum(1 for i in range(len(preds)) if int(preds[i]>0.5)==labels[i] and labels[i]==1) /float(sum(1 for i in range(len(preds)) if preds[i]==1)))) | import xgboost as xgb
# Accuracy on test set: 63%
# read in data
dtrain = xgb.DMatrix('../../data/data_20170722_01/train_data.txt')
dtest = xgb.DMatrix('../../data/data_20170722_01/test_data.txt')
# specify parameters via map, definition are same as c++ version
param = {'max_depth':22, 'eta':0.1, 'silent':0, 'objective':'binary:logistic','min_child_weight':3,'gamma':14 }
# specify validations set to watch performance
watchlist = [(dtest,'eval'), (dtrain,'train')]
num_round = 1000
bst = xgb.train(param, dtrain, num_round, watchlist)
# this is prediction
preds = bst.predict(dtest)
labels = dtest.get_label()
print ('error=%f' % ( sum(1 for i in range(len(preds)) if int(preds[i]>0.5)!=labels[i]) /float(len(preds))))
print ('correct=%f' % ( sum(1 for i in range(len(preds)) if int(preds[i]>0.5)==labels[i]) /float(len(preds)))) | mit | Python |
99baef55bc5e2d2311ba356ccf29bd8c16b99f64 | add watchlist | charliezon/deep_stock | experiments/exp_20170722_01/main.py | experiments/exp_20170722_01/main.py | import xgboost as xgb
# read in data
dtrain = xgb.DMatrix('../../data/data_20170722_01/train_data.txt')
dtest = xgb.DMatrix('../../data/data_20170722_01/test_data.txt')
# specify parameters via map, definition are same as c++ version
param = {'max_depth':22, 'eta':0.1, 'silent':0, 'objective':'binary:logistic','min_child_weight':3,'gamma':14 }
# specify validations set to watch performance
watchlist = [(dtest,'eval'), (dtrain,'train')]
num_round = 33
bst = xgb.train(param, dtrain, num_round, watchlist)
# this is prediction
preds = bst.predict(dtest)
labels = dtest.get_label()
print ('error=%f' % ( sum(1 for i in range(len(preds)) if int(preds[i]>0.5)!=labels[i]) /float(len(preds))))
print ('correct=%f' % ( sum(1 for i in range(len(preds)) if int(preds[i]>0.5)==labels[i]) /float(len(preds)))) | import xgboost as xgb
# read in data
dtrain = xgb.DMatrix('../../data/data_20170722_01/train_data.txt')
dtest = xgb.DMatrix('../../data/data_20170722_01/test_data.txt')
# specify parameters via map
param = {'max_depth':2, 'eta':1, 'silent':1, 'objective':'binary:logistic' }
num_round = 2
bst = xgb.train(param, dtrain, num_round)
# make prediction
preds = bst.predict(dtest) | mit | Python |
0dbcc4c1604cca0e77b0cd3c1a812398c1d4bc56 | Implement read_data and write_data in curate_data_parse.py. translate_to_parse unfinished. | rice-apps/atlas,rice-apps/atlas,rice-apps/atlas | scripts/curate_data_parse.py | scripts/curate_data_parse.py | """
This script curates the Places data in a format compatible with the Place class
in the Parse Rice Maps Data store.
Input File: places_data.json
Output File: places_data_parse.json
"""
import json
def read_data(f_name):
"""
Given an input file name f_name, reads the JSON data inside returns as
Python data object.
"""
f = open(f_name)
json_data = json.loads(f.read())
return json_data
def write_data(f_name, data):
"""
Given an output data object, writes as JSON to the specified output file
name f_name.
"""
json_data = json.dump(data, f_name)
def translate_to_parse(place):
"""
Given a python dictionary place, translates the data format to make it
compatible with the Parse class.
Example Input:
{
"name":"Anderson Biological Laboratories, M.D.",
"type":"building",
"abbreviation":"ABL",
"location":{
"latitude":"29.718644",
"longitude":"-95.402363"
}
Example Output:
{
"name": "Anderson Biological Laboratories, M.D.",
"types": [],
"symbol": "ABL",
"location": {"__type":"GeoPoint","latitude":29.718644,"longitude":-95.402363},
"geometryType": "GeoPoint"
}
"""
out = {}
out["name"] = place["name"]
out["types"] = []
out["symbol"] = place["abbreviation"]
out["location"] = {"_type":"GeoPoint",
"latitude":float(place["location"]["latitue"]),
"longitude":float(place["location"]["longitude"]}
return out
def main():
input_data = read_data('places_data.json')
output_data = [translate_to_parse(place) for place in input_data]
write_data('places_data_parse.json', output_data)
| """
This script curates the Places data in a format compatible with the Place class
in the Parse Rice Maps Data store.
Input File: places_data.json
Output File: places_data_parse.json
"""
def read_data(f_name):
"""
Given an input file name f_name, reads the JSON data inside returns as
Python data object.
"""
pass
def write_data(f_name, data):
"""
Given an output data object, writes as JSON to the specified output file
name f_name.
"""
pass
def translate_to_parse(place):
"""
Given a python dictionary place, translates the data format to make it
compatible with the Parse class.
Example Input:
{
"name":"Anderson Biological Laboratories, M.D.",
"type":"building",
"abbreviation":"ABL",
"location":{
"latitude":"29.718644",
"longitude":"-95.402363"
}
Example Output:
{
"name": "Anderson Biological Laboratories, M.D.",
"types": [],
"symbol": "ABL",
"location": {"__type":"GeoPoint","latitude":29.718644,"longitude":-95.402363},
"geometryType": "GeoPoint"
}
"""
pass
def main():
input_data = read_data('places_data.json')
output_data = [translate_to_parse(place) for place in input_data]
write_data('places_data_parse.json', output_data) | mit | Python |
37c35db753425dfcd14be4e72b8aea95afaf7762 | Fix KeyError of OS_AUTH_URL | opnfv/functest,opnfv/functest,mywulin/functest,mywulin/functest | functest/cli/commands/cli_os.py | functest/cli/commands/cli_os.py | #!/usr/bin/env python
#
# jose.lausuch@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import os
import click
from six.moves import urllib
from functest.ci import check_deployment
from functest.utils import constants
class OpenStack(object):
def __init__(self):
self.os_auth_url = os.environ.get('OS_AUTH_URL', None)
self.endpoint_ip = None
self.endpoint_port = None
self.openstack_creds = constants.ENV_FILE
if self.os_auth_url:
self.endpoint_ip = urllib.parse.urlparse(self.os_auth_url).hostname
self.endpoint_port = urllib.parse.urlparse(self.os_auth_url).port
def ping_endpoint(self):
if self.os_auth_url is None:
click.echo("Source the OpenStack credentials first")
exit(0)
response = os.system("ping -c 1 " + self.endpoint_ip + ">/dev/null")
if response == 0:
return 0
else:
click.echo("Cannot talk to the endpoint %s\n" % self.endpoint_ip)
exit(0)
@staticmethod
def show_credentials():
dic_credentials = {}
for key, value in os.environ.items():
if key.startswith('OS_'):
dic_credentials.update({key: value})
return dic_credentials
def check(self):
self.ping_endpoint()
deployment = check_deployment.CheckDeployment()
deployment.check_all()
class CliOpenStack(OpenStack):
@staticmethod
def show_credentials():
dic_credentials = OpenStack.show_credentials()
for key, value in dic_credentials.items():
if key.startswith('OS_'):
click.echo("{}={}".format(key, value))
| #!/usr/bin/env python
#
# jose.lausuch@ericsson.com
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import os
import click
from six.moves import urllib
from functest.ci import check_deployment
from functest.utils import constants
class OpenStack(object):
def __init__(self):
self.os_auth_url = os.environ['OS_AUTH_URL']
self.endpoint_ip = None
self.endpoint_port = None
self.openstack_creds = constants.ENV_FILE
if self.os_auth_url:
self.endpoint_ip = urllib.parse.urlparse(self.os_auth_url).hostname
self.endpoint_port = urllib.parse.urlparse(self.os_auth_url).port
def ping_endpoint(self):
if self.os_auth_url is None:
click.echo("Source the OpenStack credentials first")
exit(0)
response = os.system("ping -c 1 " + self.endpoint_ip + ">/dev/null")
if response == 0:
return 0
else:
click.echo("Cannot talk to the endpoint %s\n" % self.endpoint_ip)
exit(0)
@staticmethod
def show_credentials():
dic_credentials = {}
for key, value in os.environ.items():
if key.startswith('OS_'):
dic_credentials.update({key: value})
return dic_credentials
def check(self):
self.ping_endpoint()
deployment = check_deployment.CheckDeployment()
deployment.check_all()
class CliOpenStack(OpenStack):
@staticmethod
def show_credentials():
dic_credentials = OpenStack.show_credentials()
for key, value in dic_credentials.items():
if key.startswith('OS_'):
click.echo("{}={}".format(key, value))
| apache-2.0 | Python |
92e728a1a1c2f770adeb175a61433da125571198 | fix todo path | SymbiFlow/prjxray,SymbiFlow/prjxray,SymbiFlow/prjxray,SymbiFlow/prjxray,SymbiFlow/prjxray | fuzzers/056-rempips/generate.py | fuzzers/056-rempips/generate.py | #!/usr/bin/env python3
from prjxray.segmaker import Segmaker
verbose = 1
segmk = Segmaker("design.bits")
tiledata = dict()
pipdata = dict()
ignpip = set()
todo = set()
print("Loading todo from ../todo.txt.")
with open("../../todo.txt", "r") as f:
for line in f:
line = tuple(line.strip().split("."))
verbose and print('todo', line)
todo.add(line)
print("Loading tags from design.txt.")
with open("design.txt", "r") as f:
for line in f:
tile, pip, src, dst, pnum, pdir = line.split()
_, pip = pip.split(".")
_, src = src.split("/")
_, dst = dst.split("/")
pnum = int(pnum)
pdir = int(pdir)
if tile not in tiledata:
tiledata[tile] = {"pips": set(), "srcs": set(), "dsts": set()}
if pip in pipdata:
assert pipdata[pip] == (src, dst)
else:
pipdata[pip] = (src, dst)
tiledata[tile]["pips"].add(pip)
tiledata[tile]["srcs"].add(src)
tiledata[tile]["dsts"].add(dst)
if pdir == 0:
tiledata[tile]["srcs"].add(dst)
tiledata[tile]["dsts"].add(src)
if pnum == 1 or pdir == 0:
verbose and print('ignore pnum == 1 or pdir == 0: ', pip)
ignpip.add(pip)
t = ("_".join(tile.split("_")[0:2]), dst, src)
if t not in todo:
verbose and print('ignore not todo: ', t)
ignpip.add(pip)
for tile, pips_srcs_dsts in tiledata.items():
pips = pips_srcs_dsts["pips"]
srcs = pips_srcs_dsts["srcs"]
dsts = pips_srcs_dsts["dsts"]
for pip, src_dst in pipdata.items():
src, dst = src_dst
if pip in ignpip:
pass
elif pip in pips:
segmk.add_tile_tag(tile, "%s.%s" % (dst, src), 1)
elif src_dst[1] not in dsts:
segmk.add_tile_tag(tile, "%s.%s" % (dst, src), 0)
segmk.compile()
segmk.write()
| #!/usr/bin/env python3
from prjxray.segmaker import Segmaker
verbose = 1
segmk = Segmaker("design.bits")
tiledata = dict()
pipdata = dict()
ignpip = set()
todo = set()
print("Loading todo from ../todo.txt.")
with open("../todo.txt", "r") as f:
for line in f:
line = tuple(line.strip().split("."))
verbose and print('todo', line)
todo.add(line)
print("Loading tags from design.txt.")
with open("design.txt", "r") as f:
for line in f:
tile, pip, src, dst, pnum, pdir = line.split()
_, pip = pip.split(".")
_, src = src.split("/")
_, dst = dst.split("/")
pnum = int(pnum)
pdir = int(pdir)
if tile not in tiledata:
tiledata[tile] = {"pips": set(), "srcs": set(), "dsts": set()}
if pip in pipdata:
assert pipdata[pip] == (src, dst)
else:
pipdata[pip] = (src, dst)
tiledata[tile]["pips"].add(pip)
tiledata[tile]["srcs"].add(src)
tiledata[tile]["dsts"].add(dst)
if pdir == 0:
tiledata[tile]["srcs"].add(dst)
tiledata[tile]["dsts"].add(src)
if pnum == 1 or pdir == 0:
verbose and print('ignore pnum == 1 or pdir == 0: ', pip)
ignpip.add(pip)
t = ("_".join(tile.split("_")[0:2]), dst, src)
if t not in todo:
verbose and print('ignore not todo: ', t)
ignpip.add(pip)
for tile, pips_srcs_dsts in tiledata.items():
pips = pips_srcs_dsts["pips"]
srcs = pips_srcs_dsts["srcs"]
dsts = pips_srcs_dsts["dsts"]
for pip, src_dst in pipdata.items():
src, dst = src_dst
if pip in ignpip:
pass
elif pip in pips:
segmk.add_tile_tag(tile, "%s.%s" % (dst, src), 1)
elif src_dst[1] not in dsts:
segmk.add_tile_tag(tile, "%s.%s" % (dst, src), 0)
segmk.compile()
segmk.write()
| isc | Python |
fc4b1e3f03fefd0108ced28b78240650ba453ca6 | update imports in utils.py | jnoortheen/django-utils-plus | utils_plus/utils.py | utils_plus/utils.py | import os
import django.urls
import django.apps
IP_ADDRESS_HEADERS = ('HTTP_X_REAL_IP', 'HTTP_CLIENT_IP', 'HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR')
def get_ip_address(request):
for header in IP_ADDRESS_HEADERS:
addr = request.META.get(header)
if addr:
return addr.split(',')[0].strip()
def reverse_url(urlname, *args, **kwargs):
"""
utility method to wrap arguments & kwargs passed to reverse_lazy for url construction elegantly
Args:
urlname (str):
*args:
**kwargs:
Returns:
str: reverse matched URL path
"""
return django.urls.reverse_lazy(urlname, args=args, kwargs=kwargs)
def app_fixtures(*app_names):
"""
return all fixture names inside app
Args:
*app_name (list):
Returns:
list:
Usage:
>>> app_fixtures('test_app')
['communication.json', 'classcommunication.json', ]
"""
files = []
for app_name in app_names:
config = django.apps.apps.get_app_config(app_name)
path = os.path.abspath(os.path.join(config.path, 'fixtures'))
if os.path.exists(path):
files.extend([i for i in os.listdir(path) if i.endswith('.json')])
return files
| import os
from django.urls import reverse_lazy
from django.apps import apps
IP_ADDRESS_HEADERS = ('HTTP_X_REAL_IP', 'HTTP_CLIENT_IP', 'HTTP_X_FORWARDED_FOR', 'REMOTE_ADDR')
def get_ip_address(request):
for header in IP_ADDRESS_HEADERS:
addr = request.META.get(header)
if addr:
return addr.split(',')[0].strip()
def reverse_url(urlname, *args, **kwargs):
"""
utility method to wrap arguments & kwargs passed to reverse_lazy for url construction elegantly
Args:
urlname (str):
*args:
**kwargs:
Returns:
str: reverse matched URL path
"""
return reverse_lazy(urlname, args=args, kwargs=kwargs)
def app_fixtures(*app_names):
"""
return all fixture names inside app
Args:
*app_name (list):
Returns:
list:
Usage:
>>> app_fixtures('test_app')
['communication.json', 'classcommunication.json', ]
"""
files = []
for app_name in app_names:
config = apps.get_app_config(app_name)
path = os.path.abspath(os.path.join(config.path, 'fixtures'))
if os.path.exists(path):
files.extend([i for i in os.listdir(path) if i.endswith('.json')])
return files
| mit | Python |
5de5bb74a3e329305ba07ffce6551fa99b9b705f | bump rest retries to 3 | UWIT-IAM/iam-resttools | resttools/dao_implementation/live.py | resttools/dao_implementation/live.py | """
Provides access to the http connection pools and
connections for live data from a web service
"""
import logging
import ssl
import time
import socket
from urlparse import urlparse
from urllib3 import connection_from_url
import urllib3
# temporary during testing
urllib3.disable_warnings()
logging.captureWarnings(True)
def get_con_pool(host,
key_file=None,
cert_file=None,
ca_file=None,
socket_timeout=15.0,
max_pool_size=3,
verify_https=True):
"""
Return a ConnectionPool instance of given host
:param socket_timeout:
socket timeout for each connection in seconds
"""
kwargs = {
"timeout": socket_timeout,
"maxsize": max_pool_size,
"block": True,
}
if key_file is not None and cert_file is not None:
kwargs["key_file"] = key_file
kwargs["cert_file"] = cert_file
if urlparse(host).scheme == "https":
kwargs["ssl_version"] = ssl.PROTOCOL_TLSv1
if verify_https:
kwargs["cert_reqs"] = "CERT_REQUIRED"
kwargs["ca_certs"] = ca_file
return connection_from_url(host, **kwargs)
def get_live_url(con_pool,
method,
host,
url,
headers,
retries=3,
body=None,
service_name=None):
"""
Return a connection from the pool and perform an HTTP request.
:param con_pool:
is the http connection pool associated with the service
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param host:
the url of the server host.
:param headers:
headers to include with the request
:param body:
the POST, PUT body of the request
"""
timeout = con_pool.timeout
start_time = time.time()
response = con_pool.urlopen(method, url, body=body, headers=headers, retries=retries, timeout=timeout)
request_time = time.time() - start_time
return response
| """
Provides access to the http connection pools and
connections for live data from a web service
"""
import logging
import ssl
import time
import socket
from urlparse import urlparse
from urllib3 import connection_from_url
import urllib3
# temporary during testing
urllib3.disable_warnings()
logging.captureWarnings(True)
def get_con_pool(host,
key_file=None,
cert_file=None,
ca_file=None,
socket_timeout=15.0,
max_pool_size=3,
verify_https=True):
"""
Return a ConnectionPool instance of given host
:param socket_timeout:
socket timeout for each connection in seconds
"""
kwargs = {
"timeout": socket_timeout,
"maxsize": max_pool_size,
"block": True,
}
if key_file is not None and cert_file is not None:
kwargs["key_file"] = key_file
kwargs["cert_file"] = cert_file
if urlparse(host).scheme == "https":
kwargs["ssl_version"] = ssl.PROTOCOL_TLSv1
if verify_https:
kwargs["cert_reqs"] = "CERT_REQUIRED"
kwargs["ca_certs"] = ca_file
return connection_from_url(host, **kwargs)
def get_live_url(con_pool,
method,
host,
url,
headers,
retries=1,
body=None,
service_name=None):
"""
Return a connection from the pool and perform an HTTP request.
:param con_pool:
is the http connection pool associated with the service
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param host:
the url of the server host.
:param headers:
headers to include with the request
:param body:
the POST, PUT body of the request
"""
timeout = con_pool.timeout
start_time = time.time()
response = con_pool.urlopen(method, url, body=body, headers=headers, retries=retries, timeout=timeout)
request_time = time.time() - start_time
return response
| apache-2.0 | Python |
9b0e221c2a45e71159c782db43b0d371fec0debb | Add methods | Kyria/LazyBlacksmith,Kyria/LazyBlacksmith,Kyria/LazyBlacksmith,Kyria/LazyBlacksmith | lazyblacksmith/models/eve_sde/item.py | lazyblacksmith/models/eve_sde/item.py | from . import db
from .activity import Activity
from flask import url_for
class Item(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=False)
name = db.Column(db.String(100), nullable=True)
max_production_limit = db.Column(db.Integer, nullable=True)
# foreign keys
activities = db.relationship('Activity', backref='blueprint', lazy='dynamic')
activity_products = db.relationship('ActivityProduct', backref='blueprint', lazy='dynamic', foreign_keys='ActivityProduct.item_id')
activity_skills = db.relationship('ActivitySkill', backref='blueprint', lazy='dynamic', foreign_keys='ActivitySkill.item_id')
activity_materials = db.relationship('ActivityMaterial', backref='blueprint', lazy='dynamic', foreign_keys='ActivityMaterial.item_id')
product_for_activities = db.relationship('ActivityProduct', backref='product', lazy='dynamic', foreign_keys='ActivityProduct.product_id')
skill_for_activities = db.relationship('ActivitySkill', backref='skill', lazy='dynamic', foreign_keys='ActivitySkill.skill_id')
material_for_activities = db.relationship('ActivityMaterial', backref='material', lazy='dynamic', foreign_keys='ActivityMaterial.material_id')
def icon_32(self):
static_url = "ccp/Types/%d_32.png" % self.id
return url_for('static', filename=static_url)
def icon_64(self):
static_url = "ccp/Types/%d_64.png" % self.id
return url_for('static', filename=static_url)
def is_manufactured(self):
if self.product_for_activities.filter_by(activity=Activity.ACTIVITY_MANUFACTURING).count() > 0:
return True
return False | from . import db
class Item(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=False)
name = db.Column(db.String(100), nullable=True)
max_production_limit = db.Column(db.Integer, nullable=True)
# foreign keys
activities = db.relationship('Activity', backref='blueprint', lazy='dynamic')
activity_products = db.relationship('ActivityProduct', backref='blueprint', lazy='dynamic', foreign_keys='ActivityProduct.item_id')
activity_skills = db.relationship('ActivitySkill', backref='blueprint', lazy='dynamic', foreign_keys='ActivitySkill.item_id')
activity_materials = db.relationship('ActivityMaterial', backref='blueprint', lazy='dynamic', foreign_keys='ActivityMaterial.item_id')
product_for_activities = db.relationship('ActivityProduct', backref='product', lazy='dynamic', foreign_keys='ActivityProduct.product_id')
skill_for_activities = db.relationship('ActivitySkill', backref='skill', lazy='dynamic', foreign_keys='ActivitySkill.skill_id')
material_for_activities = db.relationship('ActivityMaterial', backref='material', lazy='dynamic', foreign_keys='ActivityMaterial.material_id')
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.