commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
361be2ee7ee5ca2282b6dd5d7423ec8b7df79cdc | fix dll path | voyagersearch/voyager-py,voyagersearch/voyager-py | extractors/vgextractors/__init__.py | extractors/vgextractors/__init__.py | # -*- coding: utf-8 -*-
# (C) Copyright 2014 Voyager Search
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import glob
import platform
def append_or_set_path(path):
try:
p = os.environ['PATH']
if len(p) > 0 and not p.endswith(os.pathsep):
p += os.pathsep
p += path
os.environ['PATH'] = p
except KeyError:
os.environ['PATH'] = path
# Add Python dependent libraries to the system paths.
arch_dir = 'win32_x86'
if platform.system() == 'Darwin':
arch_dir = 'darwin_x86_64'
elif platform.system() == 'Linux':
arch_dir = 'linux_amd64'
dll_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), '..', 'arch', arch_dir))
append_or_set_path(dll_path)
egg_path = os.path.join(dll_path, 'py')
sys.path.append(egg_path)
sys.path.append(os.path.dirname(dll_path))
libs = glob.glob(os.path.join(egg_path, '*.egg'))
for lib in libs:
sys.path.append(lib)
# Add Voyager Extractor Modules
extractors_path = os.path.dirname(__file__)
extractors = []
for ext in glob.glob(os.path.join(extractors_path, '*Extractor.py')):
extractors.append(os.path.basename(ext)[:-3])
del os,sys,glob,extractors_path
__all__ = extractors
| # -*- coding: utf-8 -*-
# (C) Copyright 2014 Voyager Search
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import glob
import platform
def append_or_set_path(path):
try:
p = os.environ['PATH']
if len(p) > 0 and not p.endswith(os.pathsep):
p += os.pathsep
p += path
os.environ['PATH'] = p
except KeyError:
os.environ['PATH'] = path
# Add Python dependent libraries to the system paths.
arch_dir = 'win32_x86'
if platform.system() == 'Darwin':
arch_dir = 'darwin_x86_64'
elif platform.system() == 'Linux':
arch_dir = 'linux_amd64'
dll_path = r"C:\Voyager\server_1.9.9.502\app\arch\win32_x86"
# dll_path = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), '..', 'arch', arch_dir))
append_or_set_path(dll_path)
egg_path = os.path.join(dll_path, 'py')
sys.path.append(egg_path)
sys.path.append(os.path.dirname(dll_path))
libs = glob.glob(os.path.join(egg_path, '*.egg'))
for lib in libs:
sys.path.append(lib)
# Add Voyager Extractor Modules
extractors_path = os.path.dirname(__file__)
extractors = []
for ext in glob.glob(os.path.join(extractors_path, '*Extractor.py')):
extractors.append(os.path.basename(ext)[:-3])
del os,sys,glob,extractors_path
__all__ = extractors
| apache-2.0 | Python |
1ae90e4a1dda2d600cff7a0b5b0d85f6d0fdfc62 | Update docstring for Travis | skearnes/pylearn2,skearnes/pylearn2,skearnes/pylearn2,skearnes/pylearn2 | pylearn2/train_extensions/tests/test_roc_auc.py | pylearn2/train_extensions/tests/test_roc_auc.py | """
Tests for ROC AUC.
"""
import unittest
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
class TestRocAucChannel(unittest.TestCase):
"""Train a simple model and calculate ROC AUC for monitoring datasets."""
def setUp(self):
skip_if_no_sklearn()
def test_roc_auc(self):
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
test_yaml = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState {},
num_examples: 1000,
dim: 15,
num_classes: 2,
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 15,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: 'h0',
dim: 15,
sparse_init: 15,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
n_classes: 2,
irange: 0.005,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: 'valid_y_roc_auc',
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {},
],
}
"""
| import unittest
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
class TestRocAucChannel(unittest.TestCase):
"""Train a simple model and calculate ROC AUC for monitoring datasets."""
def setUp(self):
skip_if_no_sklearn()
def test_roc_auc(self):
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
test_yaml = """
!obj:pylearn2.train.Train {
dataset:
&train !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState {},
num_examples: 1000,
dim: 15,
num_classes: 2,
},
model: !obj:pylearn2.models.mlp.MLP {
nvis: 15,
layers: [
!obj:pylearn2.models.mlp.Sigmoid {
layer_name: 'h0',
dim: 15,
sparse_init: 15,
},
!obj:pylearn2.models.mlp.Softmax {
layer_name: 'y',
n_classes: 2,
irange: 0.005,
}
],
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
monitoring_dataset: {
'train': *train,
},
batches_per_iter: 1,
monitoring_batches: 1,
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: 'valid_y_roc_auc',
prop_decrease: 0.,
N: 1,
},
],
},
},
extensions: [
!obj:pylearn2.train_extensions.roc_auc.RocAucChannel {},
],
}
"""
| bsd-3-clause | Python |
2f95e0e2dab0bbddf5d7fcde7dbc489bfb95d056 | fix compatibility issue | frankwiles/django-admin-views,frankwiles/django-admin-views | admin_views/templatetags/admin_views.py | admin_views/templatetags/admin_views.py | import sys
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.admin import site
from ..admin import AdminViews
register = template.Library()
if sys.version_info < (3,):
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
@register.simple_tag
def get_admin_views(app, perms):
output = []
STATIC_URL = settings.STATIC_URL
for k, v in site._registry.items():
app_name = app.get('app_label', app['name'].lower())
if app_name not in str(k._meta):
continue
if isinstance(v, AdminViews):
for type, name, link, perm in v.output_urls:
if perm and not perm in perms:
continue
if type == 'url':
img_url = "%sadmin_views/icons/link.png" % STATIC_URL
alt_text = "Link to '%s'" % name
else:
img_url = "%sadmin_views/icons/view.png" % STATIC_URL
alt_text = "Custom admin view '%s'" % name
output.append(
u("""<tr>
<th scope="row">
<img src="%s" alt="%s" />
<a href="%s">%s</a></th>
<td> </td>
<td> </td>
</tr>
""") % (img_url, alt_text, link, name)
)
return "".join(output)
| import sys
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.admin import site
from ..admin import AdminViews
register = template.Library()
if sys.version_info < (3,):
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
@register.simple_tag
def get_admin_views(app, perms):
output = []
STATIC_URL = settings.STATIC_URL
for k, v in site._registry.items():
if app['app_label'] not in str(k._meta):
continue
if isinstance(v, AdminViews):
for type, name, link, perm in v.output_urls:
if perm and not perm in perms:
continue
if type == 'url':
img_url = "%sadmin_views/icons/link.png" % STATIC_URL
alt_text = "Link to '%s'" % name
else:
img_url = "%sadmin_views/icons/view.png" % STATIC_URL
alt_text = "Custom admin view '%s'" % name
output.append(
u("""<tr>
<th scope="row">
<img src="%s" alt="%s" />
<a href="%s">%s</a></th>
<td> </td>
<td> </td>
</tr>
""") % (img_url, alt_text, link, name)
)
return "".join(output)
| bsd-3-clause | Python |
ba6c83bcf0e053e654b018274d41775c4b4a98ed | implement dumpy me command | marioidival/dumpyme | src/commander.py | src/commander.py | import os
import click
from utils.config import DumpyConfig
from utils.reader import DumpyReader
from tasks.executor import DumpyExecutor
from utils.supported_databases import SUPPORTED
@click.group()
def dumpy():
"""Command Line package to get dumps"""
@dumpy.command()
def init():
"""Create dumpfile to user"""
if init:
dumpy_conf = DumpyConfig()
we_file = os.path.join(os.path.dirname(__file__),
'templates/tmp_dumpyme.ini')
result = dumpy_conf.move_config_file(we_file)
if result:
click.echo("dumpyfile in your home directory as ~/.dumpyfile.ini")
else:
click.echo("dumpyfile already exists in home directory")
@dumpy.command()
@click.option("--project", prompt="Project name")
@click.option("--host", prompt="Host of project")
@click.option("--user", prompt="User of host")
@click.option("--db", prompt="Name of db")
@click.option("--db_name", prompt="DB Type (e.g: mongodb, postgresql...)",
type=click.Choice(SUPPORTED))
def add(project, host, user, db, db_name):
"""Add new project in dumpfile"""
dumpy_reader = DumpyReader()
if dumpy_reader.dumpyfile:
result = dumpy_reader.add_section_project(
project=project, host=host, user=user, db=db, db_name=db_name
)
if result:
click.echo("adding in dumpyfile")
click.echo("Project: {}".format(project))
click.echo("Host: {}".format(host))
click.echo("Database: {}".format(db))
click.echo("Database Type: {}".format(db_name))
# Error message
@dumpy.command()
@click.argument("project")
def delete(project):
"""Delete project of dumpfile"""
dumpy_reader = DumpyReader()
if dumpy_reader.dumpyfile:
result = dumpy_reader.remove_section_project(project)
if result:
click.echo("project removed sucessfully")
@dumpy.command()
@click.argument("project")
def me(project):
"""Get dumps of project"""
executor = DumpyExecutor(project)
executor.run()
| import os
import click
from utils.config import DumpyConfig
from utils.reader import DumpyReader
@click.group()
def dumpy():
"""Command Line package to get dumps"""
@dumpy.command()
def init():
"""Create dumpfile to user"""
if init:
dumpy_conf = DumpyConfig()
we_file = os.path.join(os.path.dirname(__file__),
'templates/tmp_dumpyme.ini')
result = dumpy_conf.move_config_file(we_file)
if result:
click.echo("dumpyfile in your home directory as ~/.dumpyfile.ini")
else:
click.echo("dumpyfile already exists in home directory")
@dumpy.command()
@click.option("--project", prompt="Project name")
@click.option("--host", prompt="Host of project")
@click.option("--user", prompt="User of host")
@click.option("--db", prompt="Name of db")
@click.option("--db_name", prompt="DB Type (e.g: mongodb, postgresql...)",
type=click.Choice(['mongodb']))
def add(project, host, user, db, db_name):
"""Add new project in dumpfile"""
dumpy_reader = DumpyReader()
if dumpy_reader.dumpyfile:
result = dumpy_reader.add_section_project(
project=project, host=host, user=user, db=db, db_name=db_name
)
if result:
click.echo("adding in dumpyfile")
click.echo("Project: {}".format(project))
click.echo("Host: {}".format(host))
click.echo("Database: {}".format(db))
click.echo("Database Type: {}".format(db_name))
# Error message
@dumpy.command()
@click.argument("project")
def delete(project):
"""Delete project of dumpfile"""
dumpy_reader = DumpyReader()
if dumpy_reader.dumpyfile:
result = dumpy_reader.remove_section_project(project)
if result:
click.echo("project removed sucessfully")
@dumpy.command()
@click.argument("project")
def me(project):
"""Get dumps of project"""
| mit | Python |
d70f7e838d9dcd3fce5ece228e94a3028e9e5293 | add Category model | free-free/pyblog,free-free/pyblog,free-free/pyblog,free-free/pyblog | app/models.py | app/models.py | #!/usr/bin/env python3.5
from tools.column import Column
from tools.field import String,Int,Float,Text,Boolean
from tools.model import Model
from tools.database import *
from tools.log import *
import time
class User(Model):
__table__='users'
id=Column(Int(4,unsigned=True),primary_key=True,null=False,auto_increment=True)
user_name=Column(String(50),unique_key=True,null=False)
password=Column(String(100),null=False)
email=Column(String(50),unique_key=True,null=False)
user_image=Column(String(300))
last_login=Column(String(20))
create_at=Column(Float(),default=time.time())
gender=Column(Int(1,unsigned=True))
location=Column(String(50))
desc=Column(String(600))
class Article(Model):
__table__='articles'
id=Column(Int(4,unsigned=True),primary_key=True,null=False,auto_increment=True)
uid=Column(Int(4,unsigned=True),null=False)
cate_id=Column(Int(4,unsigned=True),null=False)
content=Column(Text())
post_at=Column(Float(),default=time.time())
modify_at=Column(String(20))
auth_password=Column(String(100),default="")
abstract=Column(String(400))
view_num=Column(Int(4,unsigned=True),default=0)
class Category(Model):
__table__='categorys'
id=Column(Int(4,unsigned=True),primary_key=True,auto_increment=True)
uid=Column(Int(4,unsigned=True))
cate_text=Column(String(100))
cate_image=Column(String(200))
creat_at=Column(Flot(),default=time.time())
article_num=Column(Int(4,unsigned=True),default=0)
cate_desc=Column(String(400),default='')
#class Need(Model):
# __table__='needs'
# id=Column(Int(4,unsigned=True),primary_key=True,null=False,auto_increment=True)
# user_id=Column(Int(4,unsigned=True),null=False)
# content=Column(Text(),null=False)
# create_at=Column(Float(),default=time.time())
# is_solved=Column(Boolean(),default=False)
# solved_user_id=Column(Int(4,unsigned=True),default=0)
if __name__=='__main__':
print(Need().__table__)
print(Need().__columns__)
print(User().__columns__)
print(User().__table__)
| #!/usr/bin/env python3.5
from tools.column import Column
from tools.field import String,Int,Float,Text,Boolean
from tools.model import Model
from tools.database import *
from tools.log import *
import time
class User(Model):
__table__='users'
id=Column(Int(4,unsigned=True),primary_key=True,null=False,auto_increment=True)
user_name=Column(String(50),unique_key=True,null=False)
password=Column(String(100),null=False)
email=Column(String(50),unique_key=True,null=False)
user_image=Column(String(300))
last_login=Column(String(20))
create_at=Column(Float(),default=time.time())
gender=Column(Int(1,unsigned=True))
location=Column(String(50))
desc=Column(String(600))
class Article(Model):
__table__='articles'
id=Column(Int(4,unsigned=True),primary_key=True,null=False,auto_increment=True)
uid=Column(Int(4,unsigned=True),null=False)
cate_id=Column(Int(4,unsigned=True),null=False)
content=Column(Text())
post_at=Column(Float(),default=time.time())
modify_at=Column(String(20))
auth_password=Column(String(100),default="")
abstract=Column(String(400))
view_num=Column(Int(4,unsigned=True),default=0)
class Category(Model):
__table__='categorys'
#class Need(Model):
# __table__='needs'
# id=Column(Int(4,unsigned=True),primary_key=True,null=False,auto_increment=True)
# user_id=Column(Int(4,unsigned=True),null=False)
# content=Column(Text(),null=False)
# create_at=Column(Float(),default=time.time())
# is_solved=Column(Boolean(),default=False)
# solved_user_id=Column(Int(4,unsigned=True),default=0)
if __name__=='__main__':
print(Need().__table__)
print(Need().__columns__)
print(User().__columns__)
print(User().__table__)
| mit | Python |
91141713b672f56a8c45f0250b7e9216a69237f8 | Increase splinter wait time to 15 seconds | alphagov/backdrop,alphagov/backdrop,alphagov/backdrop | features/support/splinter_client.py | features/support/splinter_client.py | import logging
from pymongo import MongoClient
from splinter import Browser
from features.support.http_test_client import HTTPTestClient
from features.support.support import Api
class SplinterClient(object):
def __init__(self, database_name):
self.database_name = database_name
self._write_api = Api.start('write', '5001')
def storage(self):
return MongoClient('localhost', 27017)[self.database_name]
def before_scenario(self):
self.browser = Browser('phantomjs', wait_time=15)
def after_scenario(self):
self.browser.quit()
def spin_down(self):
self._write_api.stop()
def get(self, url, headers=None):
self.browser.visit(self._write_api.url(url))
return SplinterResponse(self.browser)
class SplinterResponse:
def __init__(self, browser):
self.status_code = browser.status_code
self.data = None
self.headers = None
| import logging
from pymongo import MongoClient
from splinter import Browser
from features.support.http_test_client import HTTPTestClient
from features.support.support import Api
class SplinterClient(object):
def __init__(self, database_name):
self.database_name = database_name
self._write_api = Api.start('write', '5001')
def storage(self):
return MongoClient('localhost', 27017)[self.database_name]
def before_scenario(self):
self.browser = Browser('phantomjs')
def after_scenario(self):
self.browser.quit()
def spin_down(self):
self._write_api.stop()
def get(self, url, headers=None):
self.browser.visit(self._write_api.url(url))
return SplinterResponse(self.browser)
class SplinterResponse:
def __init__(self, browser):
self.status_code = browser.status_code
self.data = None
self.headers = None
| mit | Python |
5a36943988088027e13c1499a90be7a0cf9ee8e2 | Enable colors only for supported | d6e/coala,MattAllmendinger/coala,scottbelden/coala,saurabhiiit/coala,lonewolf07/coala,NiklasMM/coala,meetmangukiya/coala,ayushin78/coala,djkonro/coala,SambitAcharya/coala,rimacone/testing2,coala-analyzer/coala,sils1297/coala,rimacone/testing2,refeed/coala,vinc456/coala,impmihai/coala,Asalle/coala,sudheesh001/coala,coala-analyzer/coala,netman92/coala,Tanmay28/coala,scottbelden/coala,nemaniarjun/coala,yashtrivedi96/coala,scriptnull/coala,arjunsinghy96/coala,scriptnull/coala,FeodorFitsner/coala,damngamerz/coala,ayushin78/coala,yashLadha/coala,ManjiriBirajdar/coala,Nosferatul/coala,FeodorFitsner/coala,AbdealiJK/coala,rresol/coala,Balaji2198/coala,arush0311/coala,Uran198/coala,coala/coala,saurabhiiit/coala,karansingh1559/coala,NalinG/coala,JohnS-01/coala,JohnS-01/coala,scriptnull/coala,ManjiriBirajdar/coala,arjunsinghy96/coala,tltuan/coala,mr-karan/coala,aptrishu/coala,MattAllmendinger/coala,RJ722/coala,mr-karan/coala,JohnS-01/coala,refeed/coala,yashtrivedi96/coala,abhiroyg/coala,RJ722/coala,tltuan/coala,stevemontana1980/coala,scriptnull/coala,Asnelchristian/coala,tushar-rishav/coala,kartikeys98/coala,yashtrivedi96/coala,SanketDG/coala,andreimacavei/coala,vinc456/coala,yland/coala,AbdealiJK/coala,SambitAcharya/coala,Tanmay28/coala,CruiseDevice/coala,incorrectusername/coala,NalinG/coala,rresol/coala,Tanmay28/coala,kartikeys98/coala,netman92/coala,meetmangukiya/coala,tltuan/coala,AbdealiJK/coala,incorrectusername/coala,yashLadha/coala,swatilodha/coala,netman92/coala,AdeshAtole/coala,aptrishu/coala,kartikeys98/coala,vinc456/coala,SambitAcharya/coala,tushar-rishav/coala,AdeshAtole/coala,scottbelden/coala,andreimacavei/coala,mr-karan/coala,Balaji2198/coala,coala-analyzer/coala,meetmangukiya/coala,stevemontana1980/coala,CruiseDevice/coala,arafsheikh/coala,arafsheikh/coala,coala/coala,djkonro/coala,arush0311/coala,sils1297/coala,sagark123/coala,andreimacavei/coala,NiklasMM/coala,FeodorFitsner/coala,swatilodha/coala,sagark123/coala,SambitAcharya/coala,yashLadha/coala,svsn2117/coala,nemaniarjun/coala,Tanmay28/coala,Tanmay28/coala,karansingh1559/coala,aptrishu/coala,nemaniarjun/coala,ayushin78/coala,svsn2117/coala,SambitAcharya/coala,NalinG/coala,Asnelchristian/coala,Uran198/coala,sudheesh001/coala,MariosPanag/coala,NalinG/coala,jayvdb/coala,saurabhiiit/coala,yland/coala,stevemontana1980/coala,Shade5/coala,Nosferatul/coala,NalinG/coala,lonewolf07/coala,Tanmay28/coala,shreyans800755/coala,Shade5/coala,shreyans800755/coala,Nosferatul/coala,Balaji2198/coala,RJ722/coala,sophiavanvalkenburg/coala,sagark123/coala,Asalle/coala,NiklasMM/coala,arush0311/coala,sils1297/coala,abhiroyg/coala,refeed/coala,ManjiriBirajdar/coala,rresol/coala,dagdaggo/coala,Tanmay28/coala,coala/coala,impmihai/coala,MariosPanag/coala,SanketDG/coala,damngamerz/coala,impmihai/coala,dagdaggo/coala,djkonro/coala,Asnelchristian/coala,rimacone/testing2,tushar-rishav/coala,SanketDG/coala,jayvdb/coala,Tanmay28/coala,CruiseDevice/coala,sudheesh001/coala,scriptnull/coala,Uran198/coala,NalinG/coala,sophiavanvalkenburg/coala,SambitAcharya/coala,MattAllmendinger/coala,incorrectusername/coala,scriptnull/coala,svsn2117/coala,Shade5/coala,sophiavanvalkenburg/coala,scriptnull/coala,damngamerz/coala,arafsheikh/coala,swatilodha/coala,shreyans800755/coala,NalinG/coala,d6e/coala,dagdaggo/coala,Asalle/coala,yland/coala,d6e/coala,karansingh1559/coala,SambitAcharya/coala,AdeshAtole/coala,lonewolf07/coala,MariosPanag/coala,arjunsinghy96/coala,abhiroyg/coala,jayvdb/coala | coalib/output/printers/ConsolePrinter.py | coalib/output/printers/ConsolePrinter.py | import platform
from coalib.output.printers.ColoredLogPrinter import ColoredLogPrinter
from coalib.output.printers.LOG_LEVEL import LOG_LEVEL
class ConsolePrinter(ColoredLogPrinter):
"""
A simple printer for the console that supports colors and logs.
Note that pickling will not pickle the output member.
"""
def __init__(self,
log_level=LOG_LEVEL.WARNING,
timestamp_format="%X",
print_colored=platform.system() in ("Linux",)):
ColoredLogPrinter.__init__(self,
log_level=log_level,
timestamp_format=timestamp_format,
print_colored=print_colored)
def _print_uncolored(self, output, **kwargs):
print(output, end="")
def _print_colored(self, output, color=None, **kwargs):
color_code_dict = {
'black': '0;30',
'bright gray': '0;37',
'blue': '0;34',
'white': '1;37',
'green': '0;32',
'bright blue': '1;34',
'cyan': '0;36',
'bright green': '1;32',
'red': '0;31',
'bright cyan': '1;36',
'purple': '0;35',
'bright red': '1;31',
'yellow': '0;33',
'bright purple': '1;35',
'dark gray': '1;30',
'bright yellow': '1;33',
'normal': '0'}
color_code = color_code_dict.get(color, None)
if color_code is None:
raise ValueError("Invalid color value")
print('\033[' + color_code + 'm' + output + '\033[0m', end="")
| from coalib.output.printers.ColoredLogPrinter import ColoredLogPrinter
from coalib.output.printers.LOG_LEVEL import LOG_LEVEL
class ConsolePrinter(ColoredLogPrinter):
"""
A simple printer for the console that supports colors and logs.
Note that pickling will not pickle the output member.
"""
def __init__(self,
log_level=LOG_LEVEL.WARNING,
timestamp_format="%X",
print_colored=True):
ColoredLogPrinter.__init__(self,
log_level=log_level,
timestamp_format=timestamp_format,
print_colored=print_colored)
def _print_uncolored(self, output, **kwargs):
print(output, end="")
def _print_colored(self, output, color=None, **kwargs):
color_code_dict = {
'black': '0;30',
'bright gray': '0;37',
'blue': '0;34',
'white': '1;37',
'green': '0;32',
'bright blue': '1;34',
'cyan': '0;36',
'bright green': '1;32',
'red': '0;31',
'bright cyan': '1;36',
'purple': '0;35',
'bright red': '1;31',
'yellow': '0;33',
'bright purple': '1;35',
'dark gray': '1;30',
'bright yellow': '1;33',
'normal': '0'}
color_code = color_code_dict.get(color, None)
if color_code is None:
raise ValueError("Invalid color value")
print('\033[' + color_code + 'm' + output + '\033[0m', end="")
| agpl-3.0 | Python |
b45261c3dc66e7089e016ebad1121f72fe7ffb80 | Change initial debug configuration to: reset/halt, load, init break points | platformio/platformio-core,platformio/platformio-core,platformio/platformio | platformio/commands/debug/initcfgs.py | platformio/commands/debug/initcfgs.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
GDB_DEFAULT_INIT_CONFIG = """
define pio_reset_halt_target
monitor reset halt
end
define pio_reset_target
monitor reset
end
target extended-remote $DEBUG_PORT
monitor init
pio_reset_halt_target
$LOAD_CMDS
$INIT_BREAK
"""
GDB_STUTIL_INIT_CONFIG = """
define pio_reset_halt_target
monitor halt
monitor reset
end
define pio_reset_target
monitor reset
end
target extended-remote $DEBUG_PORT
$INIT_BREAK
pio_reset_halt_target
$LOAD_CMDS
pio_reset_halt_target
"""
GDB_JLINK_INIT_CONFIG = """
define pio_reset_halt_target
monitor halt
monitor reset
end
define pio_reset_target
monitor reset
end
target extended-remote $DEBUG_PORT
pio_reset_halt_target
$LOAD_CMDS
$INIT_BREAK
"""
GDB_BLACKMAGIC_INIT_CONFIG = """
define pio_reset_halt_target
set language c
set *0xE000ED0C = 0x05FA0004
set $busy = (*0xE000ED0C & 0x4)
while ($busy)
set $busy = (*0xE000ED0C & 0x4)
end
set language auto
end
define pio_reset_target
pio_reset_halt_target
end
target extended-remote $DEBUG_PORT
monitor swdp_scan
attach 1
set mem inaccessible-by-default off
$LOAD_CMDS
$INIT_BREAK
set language c
set *0xE000ED0C = 0x05FA0004
set $busy = (*0xE000ED0C & 0x4)
while ($busy)
set $busy = (*0xE000ED0C & 0x4)
end
set language auto
"""
GDB_MSPDEBUG_INIT_CONFIG = """
define pio_reset_halt_target
end
define pio_reset_target
end
target extended-remote $DEBUG_PORT
monitor erase
pio_reset_halt_target
$LOAD_CMDS
$INIT_BREAK
"""
GDB_QEMU_INIT_CONFIG = """
define pio_reset_halt_target
monitor system_reset
end
define pio_reset_target
pio_reset_halt_target
end
target extended-remote $DEBUG_PORT
pio_reset_halt_target
$LOAD_CMDS
$INIT_BREAK
"""
| # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
GDB_DEFAULT_INIT_CONFIG = """
define pio_reset_halt_target
monitor reset halt
end
define pio_reset_target
monitor reset
end
target extended-remote $DEBUG_PORT
$INIT_BREAK
pio_reset_halt_target
$LOAD_CMDS
monitor init
pio_reset_halt_target
"""
GDB_STUTIL_INIT_CONFIG = """
define pio_reset_halt_target
monitor halt
monitor reset
end
define pio_reset_target
monitor reset
end
target extended-remote $DEBUG_PORT
$INIT_BREAK
pio_reset_halt_target
$LOAD_CMDS
pio_reset_halt_target
"""
GDB_JLINK_INIT_CONFIG = """
define pio_reset_halt_target
monitor halt
monitor reset
end
define pio_reset_target
monitor reset
end
target extended-remote $DEBUG_PORT
$INIT_BREAK
pio_reset_halt_target
$LOAD_CMDS
"""
GDB_BLACKMAGIC_INIT_CONFIG = """
define pio_reset_halt_target
set language c
set *0xE000ED0C = 0x05FA0004
set $busy = (*0xE000ED0C & 0x4)
while ($busy)
set $busy = (*0xE000ED0C & 0x4)
end
set language auto
end
define pio_reset_target
pio_reset_halt_target
end
target extended-remote $DEBUG_PORT
monitor swdp_scan
attach 1
set mem inaccessible-by-default off
$INIT_BREAK
$LOAD_CMDS
set language c
set *0xE000ED0C = 0x05FA0004
set $busy = (*0xE000ED0C & 0x4)
while ($busy)
set $busy = (*0xE000ED0C & 0x4)
end
set language auto
"""
GDB_MSPDEBUG_INIT_CONFIG = """
define pio_reset_halt_target
end
define pio_reset_target
end
target extended-remote $DEBUG_PORT
$INIT_BREAK
monitor erase
$LOAD_CMDS
pio_reset_halt_target
"""
GDB_QEMU_INIT_CONFIG = """
define pio_reset_halt_target
monitor system_reset
end
define pio_reset_target
pio_reset_halt_target
end
target extended-remote $DEBUG_PORT
$INIT_BREAK
$LOAD_CMDS
pio_reset_halt_target
"""
| apache-2.0 | Python |
508ee1dcb88e6bd8ee4f96a16b79e8643497c9fd | Add negative expressions | bryantrobbins/baseball,bryantrobbins/baseball,bryantrobbins/baseball,bryantrobbins/baseball,bryantrobbins/baseball | api/btr3baseball/ExpressionValidator.py | api/btr3baseball/ExpressionValidator.py | from pyparsing import Literal,CaselessLiteral,Word,Combine,Group,Optional,\
ZeroOrMore,Forward,nums,alphas,ParseException
point = Literal( "." )
fnumber = Combine( Word( "+-"+nums, nums ) + Optional( point + Optional( Word( nums ) ) ) )
quote = Literal("'").suppress()
comma = Literal(",")
ident = Word(alphas)
plus = Literal( "+" )
minus = Literal( "-" )
mult = Literal( "*" )
div = Literal( "/" )
lpar = Literal( "(" )
rpar = Literal( ")" )
num_const = fnumber
str_const = quote + ident + quote
addop = plus | minus
multop = mult | div
expop = Literal( "^" )
expr = Forward()
func = ident + lpar + Optional( expr + ZeroOrMore( (comma + expr) ) ) + rpar
atom = Optional('-') + ( num_const | func )
factor = Forward()
factor << atom
term = factor + ZeroOrMore( ( multop + factor ))
expr << (term + ZeroOrMore( ( addop + term )) | str_const)
class ExpressionValidator:
def __init__(self):
self.grammar = expr
def parseExpression(self, strEx):
try:
results = self.grammar.parseString(strEx, parseAll=True).dump()
return ExpressionValidatorResult(expression = strEx, tokens = results)
except ParseException as e:
return ExpressionValidatorResult(expression = strEx, exception = e)
class ExpressionValidatorResult:
def __init__(self, expression = None, tokens = None, exception = None):
self.expression = expression
self.tokens = tokens
if exception != None:
self.message = str(exception)
self.location = exception.loc
def __str__(self):
if self.tokens != None:
return self.tokens.__str__()
if self.message != None:
return 'In expression "{}": {}'.format(self.expression, self.message)
# Some quick tests (temp)
vv = ExpressionValidator()
ee = [
"2 * COL('HR')",
"2",
"'BRYAN'",
"hi(2)",
"hi(2,3,4)",
"-hi(2,3,4)",
]
for ex in ee:
result = vv.parseExpression(ex)
print('{} => {}'.format(ex, result))
| from pyparsing import Literal,CaselessLiteral,Word,Combine,Group,Optional,\
ZeroOrMore,Forward,nums,alphas,ParseException
point = Literal( "." )
fnumber = Combine( Word( "+-"+nums, nums ) + Optional( point + Optional( Word( nums ) ) ) )
quote = Literal("'").suppress()
comma = Literal(",")
ident = Word(alphas)
plus = Literal( "+" )
minus = Literal( "-" )
mult = Literal( "*" )
div = Literal( "/" )
lpar = Literal( "(" )
rpar = Literal( ")" )
num_const = fnumber
str_const = quote + ident + quote
addop = plus | minus
multop = mult | div
expop = Literal( "^" )
expr = Forward()
func = ident + lpar + Optional( expr + ZeroOrMore( (comma + expr) ) ) + rpar
atom = num_const | func
factor = Forward()
factor << atom
term = factor + ZeroOrMore( ( multop + factor ))
expr << (term + ZeroOrMore( ( addop + term )) | str_const)
class ExpressionValidator:
def __init__(self):
self.grammar = expr
def parseExpression(self, strEx):
try:
results = self.grammar.parseString(strEx, parseAll=True).dump()
return ExpressionValidatorResult(expression = strEx, tokens = results)
except ParseException as e:
return ExpressionValidatorResult(expression = strEx, exception = e)
class ExpressionValidatorResult:
def __init__(self, expression = None, tokens = None, exception = None):
self.expression = expression
self.tokens = tokens
if exception != None:
self.message = str(exception)
self.location = exception.loc
def __str__(self):
if self.tokens != None:
return self.tokens.__str__()
if self.message != None:
return 'In expression "{}": {}'.format(self.expression, self.message)
# Some quick tests (temp)
vv = ExpressionValidator()
ee = [
"2 * COL('HR')",
"2",
"'BRYAN'",
"hi(2)",
"hi(2,3,4)",
]
for ex in ee:
result = vv.parseExpression(ex)
print('{} => {}'.format(ex, result))
| apache-2.0 | Python |
a8bd3ea91563f087b6aa4b651606f4c38beba913 | Bump pyleus to 0.1.9 | mzbyszynski/pyleus,ecanzonieri/pyleus,Yelp/pyleus,Yelp/pyleus,stallman-cui/pyleus,patricklucas/pyleus,jirafe/pyleus,poros/pyleus,jirafe/pyleus,imcom/pyleus,poros/pyleus,stallman-cui/pyleus,mzbyszynski/pyleus,dapuck/pyleus,patricklucas/pyleus,dapuck/pyleus,imcom/pyleus,ecanzonieri/pyleus,imcom/pyleus | pyleus/__init__.py | pyleus/__init__.py | import os
import sys
__version__ = '0.1.9'
BASE_JAR = "pyleus-base.jar"
BASE_JAR_INSTALL_DIR = "share/pyleus"
BASE_JAR_PATH = os.path.join(sys.prefix, BASE_JAR_INSTALL_DIR, BASE_JAR)
| import os
import sys
__version__ = '0.1.8'
BASE_JAR = "pyleus-base.jar"
BASE_JAR_INSTALL_DIR = "share/pyleus"
BASE_JAR_PATH = os.path.join(sys.prefix, BASE_JAR_INSTALL_DIR, BASE_JAR)
| apache-2.0 | Python |
99beb21b982efae4a9b3c07b23ae8185727f0d3b | tweak code style in autil | civalin/cmdlr,civalin/cmdlr | src/cmdlr/autil.py | src/cmdlr/autil.py | """Analyzer utils."""
import json
import subprocess
from tempfile import NamedTemporaryFile
from shutil import which
from collections import namedtuple
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from .exception import ExternalDependencyNotFound
_JSResult = namedtuple('JSResult', ['eval', 'env'])
def run_in_nodejs(js):
"""Dispatch to external nodejs and get the eval result.
Args:
js(str): javascript code without escaped.
Returns:
JSResult type result, already converted from build-in json module.
"""
cmd = which('node')
if not cmd:
raise ExternalDependencyNotFound('Can not found node js in system.')
full_code = '''const vm = require('vm');
const sandbox = {{}};
vm.createContext(sandbox);
code = {};
evalValue = vm.runInContext(code, sandbox);
console.log(JSON.stringify({{eval: evalValue, env: sandbox}}))
'''.format(json.dumps(js))
with NamedTemporaryFile(mode='wt') as f:
f.write(full_code)
f.flush()
ret_value = subprocess.check_output([
cmd,
f.name,
])
return _JSResult(**json.loads(ret_value.decode()))
_FetchResult = namedtuple('FetchResult', ['soup', 'get_abspath'])
async def fetch(url, request, encoding='utf8', **req_kwargs):
"""Get BeautifulSoup from remote url."""
async with request(url, **req_kwargs) as resp:
binary = await resp.read()
text = binary.decode(encoding, errors='ignore')
soup = BeautifulSoup(text, 'lxml')
base_url = str(resp.url)
def get_abspath(url):
return urljoin(base_url, url)
return _FetchResult(soup=soup,
get_abspath=get_abspath)
| """Analyzer utils."""
import shutil
import json
import tempfile
import subprocess
from collections import namedtuple
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from .exception import ExternalDependencyNotFound
_JSResult = namedtuple('JSResult', ['eval', 'env'])
def run_in_nodejs(js):
"""Dispatch to external nodejs and get the eval result.
Args:
js(str): javascript code without escaped.
Returns:
JSResult type result, already converted from build-in json module.
"""
cmd = shutil.which('node')
if not cmd:
raise ExternalDependencyNotFound('Can not found node js in system.')
full_code = '''const vm = require('vm');
const sandbox = {{}};
vm.createContext(sandbox);
code = {};
evalValue = vm.runInContext(code, sandbox);
console.log(JSON.stringify({{eval: evalValue, env: sandbox}}))
'''.format(json.dumps(js))
with tempfile.NamedTemporaryFile(mode='wt') as f:
f.write(full_code)
f.flush()
ret_value = subprocess.check_output([
cmd,
f.name,
])
return _JSResult(**json.loads(ret_value.decode()))
_FetchResult = namedtuple('FetchResult', ['soup', 'get_abspath'])
async def fetch(url, request, encoding='utf8', **req_kwargs):
"""Get BeautifulSoup from remote url."""
async with request(url, **req_kwargs) as resp:
binary = await resp.read()
text = binary.decode(encoding, errors='ignore')
soup = BeautifulSoup(text, 'lxml')
base_url = str(resp.url)
def get_abspath(url):
return urljoin(base_url, url)
return _FetchResult(soup=soup,
get_abspath=get_abspath)
| mit | Python |
0ae8662557081a55b04c78912e20aa4fe1c58d0d | change search url to filter male people | kgilbert-cmu/flairlog | flairs.py | flairs.py | import Config
import Dictionary
import praw
def main():
r = praw.Reddit(user_agent = Config.user_agent)
r.login(Config.username, Config.password)
subreddit = r.get_subreddit(Config.subreddit)
collect = {}
for flair in subreddit.get_flair_list(limit = None):
(_, user, text) = flair.values()
cleaned = text
# flair pre-processing
if cleaned.count(" ") >= 2:
cleaned = " ".join(cleaned.split(" ")[0:2])
postfix = ["'", "(", "-"]
for char in postfix:
if char in cleaned:
cleaned = cleaned[:cleaned.index(char)]
cleaned = cleaned.strip().lower()
if cleaned in Dictionary.translate:
college = Dictionary.translate[cleaned]
elif cleaned == "":
college = "{ Flair not set }"
else:
college = "{ Flair not recognized }"
user = user + ' (' + (cleaned).encode('ascii', 'replace') + ')'
if college in collect:
collect[college].append('/u/' + user)
else:
collect[college] = ['/u/' + user]
for college in sorted(collect):
print "###", link(college), "\n*", "\n* ".join(collect[college]), "\n"
def die():
sys.exit(1)
def link(college):
if '{' in college:
return college
html = clean(college)
fof = "https://www.facebook.com/search/people/?q=friends%20of%20my%20friends%20who%20are%20men%20and%20go%20to%20{}%20and%20like%20Sigma%20Chi%20Fraternity"
return "[{}]({})".format(college, fof.format(html))
def clean(text):
array = list(text)
cleaned = []
while len(array) > 0:
char = array.pop(0)
if char == " ":
cleaned.append("%20")
elif char == "(" or char == "[":
counter = 1
char = array.pop(0)
while len(array) > 0 and counter >= 1:
if char == "(" or char == "[":
counter += 1
elif char == ")" or char == "]":
counter -= 1
char = array.pop(0)
else:
cleaned.append(char)
return "".join(cleaned)
if __name__ == "__main__":
try:
main()
except SystemError:
print "Bot was killed."
| import Config
import Dictionary
import praw
def main():
r = praw.Reddit(user_agent = Config.user_agent)
r.login(Config.username, Config.password)
subreddit = r.get_subreddit(Config.subreddit)
collect = {}
for flair in subreddit.get_flair_list(limit = None):
(_, user, text) = flair.values()
cleaned = text
# flair pre-processing
if cleaned.count(" ") >= 2:
cleaned = " ".join(cleaned.split(" ")[0:2])
postfix = ["'", "(", "-"]
for char in postfix:
if char in cleaned:
cleaned = cleaned[:cleaned.index(char)]
cleaned = cleaned.strip().lower()
if cleaned in Dictionary.translate:
college = Dictionary.translate[cleaned]
elif cleaned == "":
college = "{ Flair not set }"
else:
college = "{ Flair not recognized }"
user = user + ' (' + (cleaned).encode('ascii', 'replace') + ')'
if college in collect:
collect[college].append('/u/' + user)
else:
collect[college] = ['/u/' + user]
for college in sorted(collect):
print "###", link(college), "\n*", "\n* ".join(collect[college]), "\n"
def die():
sys.exit(1)
def link(college):
if '{' in college:
return college
html = clean(college)
fof = "https://www.facebook.com/search/top/?q=friends%20of%20my%20friends%20who%20go%20to%20{}%20and%20like%20Sigma%20Chi%20Fraternity"
return "[{}]({})".format(college, fof.format(html))
def clean(text):
array = list(text)
cleaned = []
while len(array) > 0:
char = array.pop(0)
if char == " ":
cleaned.append("%20")
elif char == "(" or char == "[":
counter = 1
char = array.pop(0)
while len(array) > 0 and counter >= 1:
if char == "(" or char == "[":
counter += 1
elif char == ")" or char == "]":
counter -= 1
char = array.pop(0)
else:
cleaned.append(char)
return "".join(cleaned)
if __name__ == "__main__":
try:
main()
except SystemError:
print "Bot was killed."
| mit | Python |
035c32a99e6e03b2498db75dc52d9bf6818813c2 | Bump version. | KmolYuan/pyslvs,KmolYuan/pyslvs,KmolYuan/pyslvs,KmolYuan/pyslvs | pyslvs/__init__.py | pyslvs/__init__.py | # -*- coding: utf-8 -*-
"""Kernel of Pyslvs."""
__all__ = [
'__version__',
'Coordinate',
'plap',
'pllp',
'plpp',
'pxy',
'expr_parser',
'expr_solving',
'data_collecting',
'get_vlinks',
'VJoint',
'VPoint',
'VLink',
'SolverSystem',
'norm_path',
'curvature',
'derivative',
'path_signature',
'cross_correlation',
'Planar',
't_config',
'EStack',
'vpoint_dof',
'color_names',
'color_rgb',
'parse_params',
'parse_pos',
'parse_vpoints',
'parse_vlinks',
'edges_view',
'graph2vpoints',
'PointArgs',
'LinkArgs',
'example_list',
'all_examples',
'collection_list',
'all_collections',
'efd_fitting',
'get_include',
]
__version__ = "20.06.0.dev0"
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2020"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
import pywt as _
from .expression import get_vlinks, VJoint, VPoint, VLink, Coordinate
from .bfgs import SolverSystem
from .triangulation import t_config, EStack
from .tinycadlib import (
plap,
pllp,
plpp,
pxy,
vpoint_dof,
expr_parser,
expr_solving,
data_collecting,
)
from .planar_linkage import (Planar, norm_path, curvature, derivative,
path_signature, cross_correlation)
from .expression_parser import (
color_names,
color_rgb,
parse_params,
parse_pos,
parse_vpoints,
parse_vlinks,
edges_view,
graph2vpoints,
PointArgs,
LinkArgs,
)
from .example import example_list, all_examples
from .collection import collection_list, all_collections
from .efd import efd_fitting
def get_include() -> str:
"""Get include directory."""
from os.path import dirname
return dirname(__file__)
| # -*- coding: utf-8 -*-
"""Kernel of Pyslvs."""
__all__ = [
'__version__',
'Coordinate',
'plap',
'pllp',
'plpp',
'pxy',
'expr_parser',
'expr_solving',
'data_collecting',
'get_vlinks',
'VJoint',
'VPoint',
'VLink',
'SolverSystem',
'norm_path',
'curvature',
'derivative',
'path_signature',
'cross_correlation',
'Planar',
't_config',
'EStack',
'vpoint_dof',
'color_names',
'color_rgb',
'parse_params',
'parse_pos',
'parse_vpoints',
'parse_vlinks',
'edges_view',
'graph2vpoints',
'PointArgs',
'LinkArgs',
'example_list',
'all_examples',
'collection_list',
'all_collections',
'efd_fitting',
'get_include',
]
__version__ = "20.05.0"
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2020"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
import pywt as _
from .expression import get_vlinks, VJoint, VPoint, VLink, Coordinate
from .bfgs import SolverSystem
from .triangulation import t_config, EStack
from .tinycadlib import (
plap,
pllp,
plpp,
pxy,
vpoint_dof,
expr_parser,
expr_solving,
data_collecting,
)
from .planar_linkage import (Planar, norm_path, curvature, derivative,
path_signature, cross_correlation)
from .expression_parser import (
color_names,
color_rgb,
parse_params,
parse_pos,
parse_vpoints,
parse_vlinks,
edges_view,
graph2vpoints,
PointArgs,
LinkArgs,
)
from .example import example_list, all_examples
from .collection import collection_list, all_collections
from .efd import efd_fitting
def get_include() -> str:
"""Get include directory."""
from os.path import dirname
return dirname(__file__)
| agpl-3.0 | Python |
9516115f722fb3f95882553d8077bf1ab4a670ef | FIX web_demo upload was not processing grayscale correctly | wangg12/caffe,longjon/caffe,gnina/gnina,gogartom/caffe-textmaps,wangg12/caffe,tackgeun/caffe,longjon/caffe,wangg12/caffe,tackgeun/caffe,tackgeun/caffe,gogartom/caffe-textmaps,gnina/gnina,tackgeun/caffe,gnina/gnina,CZCV/s-dilation-caffe,gnina/gnina,wangg12/caffe,longjon/caffe,gnina/gnina,gogartom/caffe-textmaps,CZCV/s-dilation-caffe,longjon/caffe,CZCV/s-dilation-caffe,CZCV/s-dilation-caffe,gnina/gnina,gogartom/caffe-textmaps | examples/web_demo/exifutil.py | examples/web_demo/exifutil.py | """
This script handles the skimage exif problem.
"""
from PIL import Image
import numpy as np
ORIENTATIONS = { # used in apply_orientation
2: (Image.FLIP_LEFT_RIGHT,),
3: (Image.ROTATE_180,),
4: (Image.FLIP_TOP_BOTTOM,),
5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90),
6: (Image.ROTATE_270,),
7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270),
8: (Image.ROTATE_90,)
}
def open_oriented_im(im_path):
im = Image.open(im_path)
if hasattr(im, '_getexif'):
exif = im._getexif()
if exif is not None and 274 in exif:
orientation = exif[274]
im = apply_orientation(im, orientation)
img = np.asarray(im).astype(np.float32) / 255.
if img.ndim == 2:
img = img[:, :, np.newaxis]
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def apply_orientation(im, orientation):
if orientation in ORIENTATIONS:
for method in ORIENTATIONS[orientation]:
im = im.transpose(method)
return im
| """
This script handles the skimage exif problem.
"""
from PIL import Image
import numpy as np
ORIENTATIONS = { # used in apply_orientation
2: (Image.FLIP_LEFT_RIGHT,),
3: (Image.ROTATE_180,),
4: (Image.FLIP_TOP_BOTTOM,),
5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90),
6: (Image.ROTATE_270,),
7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270),
8: (Image.ROTATE_90,)
}
def open_oriented_im(im_path):
im = Image.open(im_path)
if hasattr(im, '_getexif'):
exif = im._getexif()
if exif is not None and 274 in exif:
orientation = exif[274]
im = apply_orientation(im, orientation)
return np.asarray(im).astype(np.float32) / 255.
def apply_orientation(im, orientation):
if orientation in ORIENTATIONS:
for method in ORIENTATIONS[orientation]:
im = im.transpose(method)
return im
| bsd-2-clause | Python |
ccc289a725ac92c8a5acb3ff101c2bf07234e3f6 | Switch docs to github pages | karimbahgat/Pytess,karimbahgat/Pytess | pytess/__init__.py | pytess/__init__.py | """
# Pytess
Pure Python tessellation of points into polygons, including
Delauney/Thiessin, and Voronoi polygons. Built as a
convenient user interface for Bill Simons/Carson Farmer python port of
Steven Fortune C++ version of a Delauney triangulator.
## Platforms
Tested on Python version 2.x and 3.x.
## Dependencies
Pure Python, no dependencies.
## Installing it
Pytess is installed with pip from the commandline:
pip install pytess
## Usage
To triangulate a set of points, simply do:
import pytess
points = [(1,1), (5,5), (3,5), (8,1)]
triangles = pytess.triangulate(points)
And for voronoi diagrams:
import pytess
points = [(1,1), (5,5), (3,5), (8,1)]
voronoipolys = pytess.voronoi(points)
## More Information:
- [Home Page](http://github.com/karimbahgat/Pytess)
- [API Documentation](https://karimbahgat.github.io/Pytess/)
## License:
This code is free to share, use, reuse,
and modify according to the MIT license, see license.txt
## Credits:
I just made it more convenient to use for end-users and uploaded it to PyPi.
The real credit goes to Bill Simons/Carson Farmer and Steven Fortune for
implementing the algorithm in the first place.
- Karim Bahgat
- Michael Currie
"""
__version__ = "1.0.0"
from .main import *
| """
# Pytess
Pure Python tessellation of points into polygons, including
Delauney/Thiessin, and Voronoi polygons. Built as a
convenient user interface for Bill Simons/Carson Farmer python port of
Steven Fortune C++ version of a Delauney triangulator.
## Platforms
Tested on Python version 2.x and 3.x.
## Dependencies
Pure Python, no dependencies.
## Installing it
Pytess is installed with pip from the commandline:
pip install pytess
## Usage
To triangulate a set of points, simply do:
import pytess
points = [(1,1), (5,5), (3,5), (8,1)]
triangles = pytess.triangulate(points)
And for voronoi diagrams:
import pytess
points = [(1,1), (5,5), (3,5), (8,1)]
voronoipolys = pytess.voronoi(points)
## More Information:
- [Home Page](http://github.com/karimbahgat/Pytess)
- [API Documentation](http://pythonhosted.org/Pytess)
## License:
This code is free to share, use, reuse,
and modify according to the MIT license, see license.txt
## Credits:
I just made it more convenient to use for end-users and uploaded it to PyPi.
The real credit goes to Bill Simons/Carson Farmer and Steven Fortune for
implementing the algorithm in the first place.
- Karim Bahgat
- Michael Currie
"""
__version__ = "1.0.0"
from .main import *
| mit | Python |
4ca9594b5208a9d2774048b0519d9c2abe06b6db | allow multiple trees in conversion to numpy array | rootpy/rootpy,ndawe/rootpy,rootpy/rootpy,ndawe/rootpy,kreczko/rootpy,kreczko/rootpy,ndawe/rootpy,rootpy/rootpy,kreczko/rootpy | rootpy/root2array.py | rootpy/root2array.py | """
This module should handle:
* conversion of TTrees into NumPy arrays
* conversion of TTrees into carrays (http://pypi.python.org/pypi/carray)
"""
from .types import Variable, convert
import numpy as np
def to_numpy_array(*trees, branches=None, use_cache=False, cache_size=1000000):
# if branches is None then select only branches with basic types
# i.e. no vectors or other special objects
_branches = []
if branches is None:
for name, value in tree.buffer.items():
if isinstance(value, Variable):
_branches.append((name, value))
else:
for branch in branches:
if branch not in tree.buffer:
raise ValueError("Branch %s does not exist in tree" % branch)
value = tree.buffer[branch]
if not isinstance(value, Variable):
raise TypeError("Branch %s is not a basic type: %s" %
(branch, type(value)))
_branches.append((branch, tree.buffer[branch]))
if not _branches:
return None
dtype = [(name, convert('ROOTCODE', 'NUMPY', value.type)) for name, value in _branches]
total_entries = sum([tree.GetEntries() for tree in trees])
array = np.recarray(shape=(total_entries,), dtype=dtype)
i = 0
for tree in trees:
tree.use_cache(use_cache, cache_size=cache_size, learn_entries=1)
if use_cache:
tree.always_read([name for name, value in _branches])
for entry in tree:
for j, (branch, value) in enumerate(_branches):
array[i][j] = entry[branch].value
i += 1
return array
| """
This module should handle:
* conversion of TTrees into NumPy arrays
* conversion of TTrees into carrays (http://pypi.python.org/pypi/carray)
"""
from .types import Variable, convert
import numpy as np
def to_numpy_array(tree, branches=None, use_cache=False, cache_size=1000000):
# if branches is None then select only branches with basic types
# i.e. no vectors or other special objects
_branches = []
if branches is None:
for name, value in tree.buffer.items():
if isinstance(value, Variable):
_branches.append((name, value))
else:
for branch in branches:
if branch not in tree.buffer:
raise ValueError("Branch %s does not exist in tree" % branch)
value = tree.buffer[branch]
if not isinstance(value, Variable):
raise TypeError("Branch %s is not a basic type: %s" %
(branch, type(value)))
_branches.append((branch, tree.buffer[branch]))
if not _branches:
return None
dtype = [(name, convert('ROOTCODE', 'NUMPY', value.type)) for name, value in _branches]
array = np.recarray(shape=(tree.GetEntries(),), dtype=dtype)
tree.use_cache(use_cache, cache_size=cache_size, learn_entries=1)
if use_cache:
tree.always_read([name for name, value in _branches])
for i, entry in enumerate(tree):
for j, (branch, value) in enumerate(_branches):
array[i][j] = value.value
return array
| bsd-3-clause | Python |
e39409b6376c3d370a3affb166b0bbe8e1769629 | Make an explicit list for Python 3. | eliteraspberries/avena | avena/xcor2.py | avena/xcor2.py | #!/usr/bin/env python
'''Cross-correlation of image arrays'''
from numpy import (
multiply as _multiply,
ones as _ones,
sqrt as _sqrt,
zeros as _zeros,
)
from numpy.fft import (
fftshift as _fftshift,
ifftshift as _ifftshift,
rfft2 as _rfft2,
irfft2 as _irfft2,
)
from . import filter, image, tile
_DETREND_FACTOR = 0.10
def _detrend_filter(array):
m, n = array.shape
r = int(_sqrt(m * n) * _DETREND_FACTOR)
f = filter._high_pass_filter((m, n), r)
_multiply(array, f, out=array)
def _zeropad(array, size):
m, n = array.shape
p, q = size
z = _zeros((p, q), dtype=array.dtype)
z[:m, :n] = array
return z
def _xcor2_shape(shapes):
shape1, shape2 = shapes
a, b = shape1
c, d = shape2
return (a + c, b + d)
def _center(array, shape):
m, n = array.shape
a, b = shape
i, j = (m - a) // 2, (n - b) // 2
return array[i:(i + a), j:(j + b)]
def _xcor2(array1, array2):
x = tile.tile9_periodic(array1)
a, b = x.shape
y = array2[::-1, ::-1]
c, d = y.shape
m, n = _xcor2_shape(((a, b), (c, d)))
x = _zeropad(x, (m, n))
y = _zeropad(y, (m, n))
X = _rfft2(x)
Y = _rfft2(y)
X = _fftshift(X)
Y = _fftshift(Y)
_detrend_filter(X)
_detrend_filter(Y)
_multiply(X, Y, out=X)
X = _ifftshift(X)
x = _irfft2(X, s=(m, n))
z = _center(x, (a // 3 + c, b // 3 + d))
z = _center(z, (a // 3, b // 3))
return z
def xcor2(array1, array2):
'''Compute the cross-correlation of two image arrays.'''
z = _ones(array1.shape[:2])
channel_pairs = list(zip(
image.get_channels(array1),
image.get_channels(array2),
))
for (xi, yi) in channel_pairs:
xcori = _xcor2(xi, yi)
_multiply(z, xcori, out=z)
return z
if __name__ == '__main__':
pass
| #!/usr/bin/env python
'''Cross-correlation of image arrays'''
from numpy import (
multiply as _multiply,
ones as _ones,
sqrt as _sqrt,
zeros as _zeros,
)
from numpy.fft import (
fftshift as _fftshift,
ifftshift as _ifftshift,
rfft2 as _rfft2,
irfft2 as _irfft2,
)
from . import filter, image, tile
_DETREND_FACTOR = 0.10
def _detrend_filter(array):
m, n = array.shape
r = int(_sqrt(m * n) * _DETREND_FACTOR)
f = filter._high_pass_filter((m, n), r)
_multiply(array, f, out=array)
def _zeropad(array, size):
m, n = array.shape
p, q = size
z = _zeros((p, q), dtype=array.dtype)
z[:m, :n] = array
return z
def _xcor2_shape(shapes):
shape1, shape2 = shapes
a, b = shape1
c, d = shape2
return (a + c, b + d)
def _center(array, shape):
m, n = array.shape
a, b = shape
i, j = (m - a) // 2, (n - b) // 2
return array[i:(i + a), j:(j + b)]
def _xcor2(array1, array2):
x = tile.tile9_periodic(array1)
a, b = x.shape
y = array2[::-1, ::-1]
c, d = y.shape
m, n = _xcor2_shape(((a, b), (c, d)))
x = _zeropad(x, (m, n))
y = _zeropad(y, (m, n))
X = _rfft2(x)
Y = _rfft2(y)
X = _fftshift(X)
Y = _fftshift(Y)
_detrend_filter(X)
_detrend_filter(Y)
_multiply(X, Y, out=X)
X = _ifftshift(X)
x = _irfft2(X, s=(m, n))
z = _center(x, (a // 3 + c, b // 3 + d))
z = _center(z, (a // 3, b // 3))
return z
def xcor2(array1, array2):
'''Compute the cross-correlation of two image arrays.'''
z = _ones(array1.shape[:2])
channel_pairs = zip(
image.get_channels(array1),
image.get_channels(array2),
)
for (xi, yi) in channel_pairs:
xcori = _xcor2(xi, yi)
_multiply(z, xcori, out=z)
return z
if __name__ == '__main__':
pass
| isc | Python |
e5ac319a926b6cc81280f8e954badb3a8fb374d2 | Update utils.py | pathakvaidehi2391/WorkSpace,pathakvaidehi2391/WorkSpace | azure/utils.py | azure/utils.py | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# Built-in Imports
import os
import requests
import json
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.decorators import operation
import constants
@operation
def validate_node_properties(key, ctx_node_properties):
if key not in ctx_node_properties:
raise NonRecoverableError('{0} is a required input. Unable to create.'.format(key))
@operation
def list_all_resource_groups(**_):
subscription_id = ctx.node.properties['subscription_id']
list_resource_groups_url='https://management.azure.com/subscriptions/'+subscription_id+'/resourcegroups?api-version='+constants.api_version
list_rg=requests.get(url=list_resource_groups_url, headers=constants.headers)
print list_rg.text
#rg_list= extract from json file
#return rg_list
@operation
def list_all_storage_accounts(**_):
resource_group_name = ctx.node.properties['vm_name']+'_resource_group'
subscription_id = ctx.node.properties['subscription_id']
list_storage_accounts_url='https://management.azure.com/subscriptions/'+subscription_id+'/resourceGroups/'+resource_group_name+'/providers/Microsoft.Storage/storageAccounts?api-version='+constants.api_version
list_sg = requests.get(url=list_storage_accounts_url, headers = constants.headers)
print list_sg.text
#sg_account_name_list= #extract sg_name
#return sg_account_name_list
@operation
def list_all_vnets(**_):
resource_group_name = ctx.node.properties['vm_name']+'_resource_group'
subscription_id = ctx.node.properties['subscription_id']
list_vnets_url='https://management.azure.com/subscriptions/'+subscription_id+'/resourceGroups/'+resource_group_name+'/providers/microsoft.network/virtualnetworks?api-version='+constants.api_version
list_vnet = requests.get(url=list_vnets_url, headers = constants.headers)
print list_vnet.text
#vnet_list= #extract vnet_name
#return vnet_list
@operation
def list_all_virtual_machines(**_):
resource_group_name = ctx.node.properties['vm_name']+'_resource_group'
subscription_id = ctx.node.properties['subscription_id']
list_virtual_machines_url='https://management.azure.com/subscriptions/'+subscription_id+'/resourceGroups/'+resource_group_name+'/providers/Microsoft.Compute/virtualmachines?api-version='+constants.api_version
list_vms = requests.get(url=list_virtual_machines_url, headers = constants.headers)
print list_vms.text
#vm_list= #extract vnet_name
#return vm_list
| ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# Built-in Imports
import os
import requests
import json
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.decorators import operation
import constants
@operation
def validate_node_properties(key, ctx_node_properties):
if key not in ctx_node_properties:
raise NonRecoverableError('{0} is a required input. Unable to create.'.format(key))
@operation
def list_all_resource_groups(**_):
list_resource_groups_url='https://management.azure.com/subscriptions/'+constants.subscription_id+'/resourcegroups?api-version='+constants.api_version
list_rg=requests.get(url=list_resource_groups_url, headers=constants.headers)
print list_rg.text
#rg_list= extract from json file
#return rg_list
@operation
def list_all_storage_accounts(**_):
resource_group_name = ctx.node.properties['resource_group_name']
list_storage_accounts_url='https://management.azure.com/subscriptions/'+constants.subscription_id+'/resourceGroups/'+resource_group_name+'/providers/Microsoft.Storage/storageAccounts?api-version='+constants.api_version
list_sg = requests.get(url=list_storage_accounts_url, headers = constants.headers)
print list_sg.text
#sg_account_name_list= #extract sg_name
#return sg_account_name_list
@operation
def list_all_vnets(**_):
resource_group_name = ctx.node.properties['resource_group_name']
list_vnets_url='https://management.azure.com/subscriptions/'+constants.subscription_id+'/resourceGroups/'+resource_group_name+'/providers/microsoft.network/virtualnetworks?api-version='+constants.api_version
list_vnet = requests.get(url=list_vnets_url, headers = constants.headers)
print list_vnet.text
#vnet_list= #extract vnet_name
#return vnet_list
@operation
def list_all_virtual_machines(**_):
resource_group_name = ctx.node.properties['resource_group_name']
list_virtual_machines_url='https://management.azure.com/subscriptions/'+constants.subscription_id+'/resourceGroups/'+resource_group_name+'/providers/Microsoft.Compute/virtualmachines?api-version='+constants.api_version
list_vms = requests.get(url=list_virtual_machines_url, headers = constants.headers)
print list_vms.text
#vm_list= #extract vnet_name
#return vm_list
| apache-2.0 | Python |
b62cb22d4186be47924e241a373209d71a9b80ae | use path method from base Storage | masci/django-appengine-toolkit,masci/django-appengine-toolkit,masci/django-appengine-toolkit | appengine_toolkit/storage.py | appengine_toolkit/storage.py | from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.utils import timezone
import os
import mimetypes
import cloudstorage
from google.appengine.ext import blobstore
from google.appengine.api import images
from .settings import appengine_toolkit_settings
class GoogleCloudStorage(Storage):
"""
"""
def __init__(self):
try:
cloudstorage.validate_bucket_name(appengine_toolkit_settings.BUCKET_NAME)
except ValueError:
raise ImproperlyConfigured("Please specify a valid value for APPENGINE_TOOLKIT['BUCKET_NAME'] setting")
self._bucket = '/' + appengine_toolkit_settings.BUCKET_NAME
def path(self, name):
"""
Returns the full path to the file, including leading '/' and bucket name.
Access to the bucket root are not allowed.
"""
if not name:
raise SuspiciousOperation("Attempted access to '%s' denied." % name)
return os.path.join(self._bucket, name)
def _open(self, name, mode='rb'):
return cloudstorage.open(self.path(name), 'r')
def _save(self, name, content):
realname = self.path(name)
content_t = mimetypes.guess_type(realname)[0]
with cloudstorage.open(realname, 'w', content_type=content_t, options={'x-goog-acl': 'public-read'}) as f:
f.write(content.read())
return os.path.join(self._bucket, realname)
def delete(self, name):
try:
cloudstorage.delete(self.path(name))
except cloudstorage.NotFoundError:
pass
def exists(self, name):
try:
cloudstorage.stat(self.path(name))
return True
except cloudstorage.NotFoundError:
return False
def listdir(self, path):
return [], [obj.filename for obj in cloudstorage.listbucket(path)]
def size(self, name):
filestat = cloudstorage.stat(self.path(name))
return filestat.st_size
def url(self, name):
key = blobstore.create_gs_key('/gs' + name)
return images.get_serving_url(key)
def created_time(self, name):
filestat = cloudstorage.stat(self.path(name))
creation_date = timezone.datetime.fromtimestamp(filestat.st_ctime)
return timezone.make_aware(creation_date, timezone.get_current_timezone())
| from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured
from django.utils import timezone
import os
import mimetypes
import cloudstorage
from google.appengine.ext import blobstore
from google.appengine.api import images
from .settings import appengine_toolkit_settings
class GoogleCloudStorage(Storage):
"""
"""
def __init__(self):
try:
cloudstorage.validate_bucket_name(appengine_toolkit_settings.BUCKET_NAME)
except ValueError:
raise ImproperlyConfigured("Please specify a valid value for APPENGINE_TOOLKIT['BUCKET_NAME'] setting")
self._bucket = '/' + appengine_toolkit_settings.BUCKET_NAME
def _realpath(self, name):
return os.path.join(self._bucket, name) if name else self._bucket
def _open(self, name, mode='rb'):
return cloudstorage.open(os.path.join(self._bucket, name), 'r')
def _save(self, name, content):
realname = self._realpath(name)
content_t = mimetypes.guess_type(realname)[0]
with cloudstorage.open(realname, 'w', content_type=content_t, options={'x-goog-acl': 'public-read'}) as f:
f.write(content.read())
return os.path.join(self._bucket, realname)
def delete(self, name):
try:
cloudstorage.delete(self._realpath(name))
except cloudstorage.NotFoundError:
pass
def exists(self, name):
try:
cloudstorage.stat(self._realpath(name))
return True
except cloudstorage.NotFoundError:
return False
def listdir(self, path):
return [], [obj.filename for obj in cloudstorage.listbucket(self._realpath(path))]
def size(self, name):
filestat = cloudstorage.stat(self._realpath(name))
return filestat.st_size
def url(self, name):
key = blobstore.create_gs_key('/gs' + name)
return images.get_serving_url(key)
def created_time(self, name):
filestat = cloudstorage.stat(self._realpath(name))
return timezone.datetime.fromtimestamp(filestat.st_ctime)
| bsd-3-clause | Python |
ae38c24ecbd20a5fa6bc8cd0e101bb20c436412a | Fix main __init__ py instruction order (#887) | QISKit/qiskit-sdk-py,QISKit/qiskit-sdk-py,QISKit/qiskit-sdk-py | qiskit/__init__.py | qiskit/__init__.py | # -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=wrong-import-order
# pylint: disable=redefined-builtin
"""Main QISKit public functionality."""
import os
import pkgutil
# First, check for required Python and API version
from . import _util
from ._qiskiterror import QISKitError
from ._classicalregister import ClassicalRegister
from ._quantumregister import QuantumRegister
from ._quantumcircuit import QuantumCircuit
from ._gate import Gate
from ._compositegate import CompositeGate
from ._instruction import Instruction
from ._instructionset import InstructionSet
from ._reset import Reset
from ._measure import Measure
from .result import Result
# The qiskit.extensions.x imports needs to be placed here due to the
# mechanism for adding gates dynamically.
import qiskit.extensions.standard
import qiskit.extensions.quantum_initializer
# Allow extending this namespace. Please note that currently this line needs
# to be placed *before* the wrapper imports or any non-import code.
__path__ = pkgutil.extend_path(__path__, __name__)
# Import circuit drawing methods by default
# This is wrapped in a try because the Travis tests fail due to non-framework
# Python build since using pyenv
try:
from qiskit.tools.visualization import (circuit_drawer, plot_histogram)
except (ImportError, RuntimeError) as expt:
print("Error: {0}".format(expt))
from .wrapper._wrapper import (
available_backends, local_backends, remote_backends,
get_backend, compile, execute, register, unregister,
registered_providers, load_qasm_string, load_qasm_file, least_busy,
store_credentials)
# Import the wrapper, to make it available when doing "import qiskit".
from . import wrapper
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(ROOT_DIR, "VERSION.txt"), "r") as version_file:
__version__ = version_file.read().strip()
| # -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=wrong-import-order
# pylint: disable=redefined-builtin
"""Main QISKit public functionality."""
import os
import pkgutil
# First, check for required Python and API version
from . import _util
from ._qiskiterror import QISKitError
from ._classicalregister import ClassicalRegister
from ._quantumregister import QuantumRegister
from ._quantumcircuit import QuantumCircuit
from ._gate import Gate
from ._compositegate import CompositeGate
from ._instruction import Instruction
from ._instructionset import InstructionSet
from ._reset import Reset
from ._measure import Measure
from .result import Result
# The qiskit.extensions.x imports needs to be placed here due to the
# mechanism for adding gates dynamically.
import qiskit.extensions.standard
import qiskit.extensions.quantum_initializer
# Import circuit drawing methods by default
# This is wrapped in a try because the Travis tests fail due to non-framework
# Python build since using pyenv
try:
from qiskit.tools.visualization import (circuit_drawer, plot_histogram)
except (ImportError, RuntimeError) as expt:
print("Error: {0}".format(expt))
# Allow extending this namespace. Please note that currently this line needs
# to be placed *before* the wrapper imports.
__path__ = pkgutil.extend_path(__path__, __name__)
from .wrapper._wrapper import (
available_backends, local_backends, remote_backends,
get_backend, compile, execute, register, unregister,
registered_providers, load_qasm_string, load_qasm_file, least_busy,
store_credentials)
# Import the wrapper, to make it available when doing "import qiskit".
from . import wrapper
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(ROOT_DIR, "VERSION.txt"), "r") as version_file:
__version__ = version_file.read().strip()
| apache-2.0 | Python |
425adbd5f4dc475f4d3a7ce31f7df9ca1e6e3ba1 | check class format | doc212/theros,doc212/theros,doc212/theros,doc212/theros,doc212/theros | import.py | import.py | #!/usr/bin/env python
"""
A script to import initial data from ProEco into Theros
"""
import logging
import argparse
import sys
import re
parser=argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("worksFile", help="the csv file containing the raw works export from ProEco", metavar="CSV_FILE" , default="travaux.csv", nargs="?")
parser.add_argument("-v","--verbose", action="store_const", dest="logging", const=logging.DEBUG, default=logging.INFO, help="show debug logs")
parser.add_argument("--dsn", help="the dsn to use for db operations", action="store", dest="dsn", default="theros_dev")
parser.add_argument("--is", "--insert-student", help="insert (ignoring duplicates) students into database (requires --dsn)", action="store_true", dest="insertStudents")
args=parser.parse_args()
logging.basicConfig(level=args.logging, stream=sys.stdout)
logger=logging.getLogger()
if args.insertStudents and not args.dsn:
parser.error("--is requires --dsn")
worksFile=args.worksFile
works=[]
classes=set()
students=set()
with open(worksFile) as fh:
header=True
for i,line in enumerate(fh):
if header:
header=False
continue
line=line.decode("utf8")
klass,student, dummy, foo, desc, grp = map(lambda s:s.strip(), line.split("\t"))
klass=klass.replace(" ","").upper()
if not re.search(r"^\d[A-Z]+$", klass):
raise ValueError, "line %i contains bad class: %s"%(i+1, klass)
student=student.replace(" "," ")
classes.add(klass)
students.add(student)
if desc:
works.append((klass, student, desc))
logger.debug("keep %s", works[-1])
else:
logger.debug("discarded line %s", line.strip())
logger.info("got %i works, %i students, %i classes", len(works), len(students), len(classes))
if args.insertStudents:
logging.info("inserting students")
import pyodbc
conn=pyodbc.connect(dsn=args.dsn)
try:
db=conn.cursor()
params=[(s,) for s in sorted(students)]
db.executemany("INSERT IGNORE INTO student(st_name) VALUES (?)", params)
conn.commit()
finally:
conn.close()
| #!/usr/bin/env python
"""
A script to import initial data from ProEco into Theros
"""
import logging
import argparse
import sys
parser=argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("worksFile", help="the csv file containing the raw works export from ProEco", metavar="CSV_FILE" , default="travaux.csv", nargs="?")
parser.add_argument("-v","--verbose", action="store_const", dest="logging", const=logging.DEBUG, default=logging.INFO, help="show debug logs")
parser.add_argument("--dsn", help="the dsn to use for db operations", action="store", dest="dsn", default="theros_dev")
parser.add_argument("--is", "--insert-student", help="insert (ignoring duplicates) students into database (requires --dsn)", action="store_true", dest="insertStudents")
args=parser.parse_args()
logging.basicConfig(level=args.logging, stream=sys.stdout)
logger=logging.getLogger()
if args.insertStudents and not args.dsn:
parser.error("--is requires --dsn")
worksFile=args.worksFile
works=[]
classes=set()
students=set()
with open(worksFile) as fh:
header=True
for line in fh:
if header:
header=False
continue
line=line.decode("utf8")
klass,student, dummy, foo, desc, grp = map(lambda s:s.strip(), line.split("\t"))
klass=klass.replace(" ","")
student=student.replace(" "," ")
classes.add(klass)
students.add(student)
if desc:
works.append((klass, student, desc))
logger.debug("keep %s", works[-1])
else:
logger.debug("discarded line %s", line.strip())
logger.info("got %i works, %i students, %i classes", len(works), len(students), len(classes))
if args.insertStudents:
logging.info("inserting students")
import pyodbc
conn=pyodbc.connect(dsn=args.dsn)
try:
db=conn.cursor()
params=[(s,) for s in sorted(students)]
db.executemany("INSERT IGNORE INTO student(st_name) VALUES (?)", params)
conn.commit()
finally:
conn.close()
| mit | Python |
cb4c0cb2c35d97e0364a4c010715cdf15d261e4c | Add a method to get username if users have valid cookie | lttviet/udacity-final | basehandler.py | basehandler.py | # -*- conding: utf-8 -*-
import os
import jinja2
import webapp2
import utils
JINJA_ENV = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True)
class BaseHandler(webapp2.RequestHandler):
def render(self, template, **kw):
"""Method render takes a template file and key-value pairs.
It substitutes keys found in template with values in pairs.
The resulted page is sent back to user."""
t = JINJA_ENV.get_template(template)
self.response.write(t.render(kw))
def set_cookie(self, user):
"""Set user cookie in headers."""
cookie = utils.make_cookie(user)
self.response.headers.add_header(
'Set-Cookie',
'user={}; Path=/'.format(cookie))
def logout(self):
"""Set user cookie to empty in headers."""
self.response.headers.add_header('Set-Cookie',
'user=;Path=/')
def get_username(self):
"""Check if user has a valid cookie.
Returns username if cookie is valid."""
cookie = self.request.cookies.get('user')
if cookie and utils.valid_cookie(cookie):
username = cookie.split('|')[0]
return username
| # -*- conding: utf-8 -*-
import os
import jinja2
import webapp2
import utils
JINJA_ENV = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True)
class BaseHandler(webapp2.RequestHandler):
def render(self, template, **kw):
"""Method render takes a template file and key-value pairs.
It substitutes keys found in template with values in pairs.
The resulted page is sent back to user."""
t = JINJA_ENV.get_template(template)
self.response.write(t.render(kw))
def set_cookie(self, user):
"""Set user cookie in headers."""
cookie = utils.make_cookie(user)
self.response.headers.add_header(
'Set-Cookie',
'user={}; Path=/'.format(cookie))
def logout(self):
"""Set user cookie to empty in headers."""
self.response.headers.add_header('Set-Cookie',
'user=;Path=/')
| mit | Python |
dd7f7b482bce0a6b9d412458ff24e5b4350cdd50 | fix admin import/export after refactor | batiste/django-page-cms,akaihola/django-page-cms,pombredanne/django-page-cms-1,remik/django-page-cms,batiste/django-page-cms,akaihola/django-page-cms,remik/django-page-cms,pombredanne/django-page-cms-1,akaihola/django-page-cms,remik/django-page-cms,remik/django-page-cms,batiste/django-page-cms,pombredanne/django-page-cms-1 | pages/admin/actions.py | pages/admin/actions.py | from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse
from django.conf import settings as global_settings
from django.db import transaction
from django.shortcuts import redirect, render_to_response
from django.template import RequestContext
from pages import settings
from pages.http import get_language_from_request
from pages.utils import get_placeholders, pages_to_json, json_to_pages
from pages.models import Page
JSON_PAGE_EXPORT_FILENAME = 'cms_pages.json'
def export_pages_as_json(modeladmin, request, queryset):
response = HttpResponse(mimetype="application/json")
response['Content-Disposition'] = 'attachment; filename=%s' % (
JSON_PAGE_EXPORT_FILENAME,)
response.write(pages_to_json(queryset))
return response
export_pages_as_json.short_description = _("Export pages as JSON")
@transaction.commit_on_success
def import_pages_from_json(request,
template_name='admin/pages/page/import_pages.html'):
try:
j = request.FILES['json']
except KeyError:
return redirect('admin:page-index')
errors, pages_created = json_to_pages(j, request.user,
get_language_from_request(request))
return render_to_response(template_name, {
'errors': errors,
'pages_created': pages_created,
'app_label': 'pages',
'opts': Page._meta,
}, RequestContext(request))
| from django.utils.translation import ugettext_lazy as _
from django.http import HttpResponse
from django.conf import settings as global_settings
from django.db import transaction
from django.shortcuts import redirect, render_to_response
from django.template import RequestContext
from pages import settings
from pages.http import get_language_from_request
from pages.utils import get_placeholders
from pages.models import Page
JSON_PAGE_EXPORT_FILENAME = 'cms_pages.json'
def export_pages_as_json(modeladmin, request, queryset):
response = HttpResponse(mimetype="application/json")
response['Content-Disposition'] = 'attachment; filename=%s' % (
JSON_PAGE_EXPORT_FILENAME,)
response.write(pages_to_json(queryset))
return response
export_pages_as_json.short_description = _("Export pages as JSON")
@transaction.commit_on_success
def import_pages_from_json(request,
template_name='admin/pages/page/import_pages.html'):
try:
j = request.FILES['json']
except KeyError:
return redirect('admin:page-index')
errors, pages_created = json_to_pages(j, request.user,
get_language_from_request(request))
return render_to_response(template_name, {
'errors': errors,
'pages_created': pages_created,
'app_label': 'pages',
'opts': Page._meta,
}, RequestContext(request))
| bsd-3-clause | Python |
d33d7e5bf29d8c135c68eb5f1206d2f7df6f42ed | Add URL and description to search index of FoiRequest | catcosmo/froide,fin/froide,catcosmo/froide,fin/froide,ryankanno/froide,stefanw/froide,catcosmo/froide,okfse/froide,ryankanno/froide,ryankanno/froide,CodeforHawaii/froide,LilithWittmann/froide,okfse/froide,CodeforHawaii/froide,ryankanno/froide,stefanw/froide,LilithWittmann/froide,okfse/froide,fin/froide,okfse/froide,CodeforHawaii/froide,stefanw/froide,catcosmo/froide,CodeforHawaii/froide,LilithWittmann/froide,okfse/froide,LilithWittmann/froide,ryankanno/froide,stefanw/froide,catcosmo/froide,fin/froide,LilithWittmann/froide,stefanw/froide,CodeforHawaii/froide | froide/foirequest/search_indexes.py | froide/foirequest/search_indexes.py | from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
| from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
| mit | Python |
3bfe8ec4e10bef79b847d7c5930e0521b817dfb7 | remove SumIfMetric and CountIfMetric from __all__ | juiceinc/recipe | recipe/__init__.py | recipe/__init__.py | # -*- coding: utf-8 -*-
"""
Recipe
~~~~~~~~~~~~~~~~~~~~~
"""
import logging
from flapjack_stack import FlapjackStack
from recipe import default_settings
from recipe.core import Recipe
from recipe.exceptions import BadIngredient, BadRecipe
from recipe.ingredients import (
Dimension, DivideMetric, Filter, Having, IdValueDimension, Ingredient,
LookupDimension, Metric, WtdAvgMetric
)
from recipe.oven import get_oven
from recipe.shelf import AutomaticShelf, Shelf
SETTINGS = FlapjackStack()
SETTINGS.add_layer(default_settings)
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
__version__ = '0.2.0'
__all__ = [
'BadIngredient', 'BadRecipe', 'Ingredient', 'Dimension', 'LookupDimension',
'IdValueDimension', 'Metric', 'DivideMetric', 'WtdAvgMetric',
'Filter', 'Having', 'Recipe', 'Shelf',
'AutomaticShelf', 'SETTINGS', 'get_oven'
]
| # -*- coding: utf-8 -*-
"""
Recipe
~~~~~~~~~~~~~~~~~~~~~
"""
import logging
from flapjack_stack import FlapjackStack
from recipe import default_settings
from recipe.core import Recipe
from recipe.exceptions import BadIngredient, BadRecipe
from recipe.ingredients import (
Dimension, DivideMetric, Filter, Having, IdValueDimension, Ingredient,
LookupDimension, Metric, WtdAvgMetric
)
from recipe.oven import get_oven
from recipe.shelf import AutomaticShelf, Shelf
SETTINGS = FlapjackStack()
SETTINGS.add_layer(default_settings)
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
__version__ = '0.2.0'
__all__ = [
'BadIngredient', 'BadRecipe', 'Ingredient', 'Dimension', 'LookupDimension',
'IdValueDimension', 'Metric', 'DivideMetric', 'WtdAvgMetric',
'CountIfMetric', 'SumIfMetric', 'Filter', 'Having', 'Recipe', 'Shelf',
'AutomaticShelf', 'SETTINGS', 'get_oven'
]
| mit | Python |
2897320deed93f1d119751244abf80360c31b39c | Update __init__.py. | patrickbird/cachupy | cachupy/__init__.py | cachupy/__init__.py | from cachupy.cache import Cache
| mit | Python | |
b66efd2e15d044cd2fd3c7d305424e1b7260aa3a | Update lists.py | KouKariya/tic-tac-toe-py | ref/lists/lists.py | ref/lists/lists.py | #lists.py
#Written by Jesse Gallarzo
#run in Python3 otherwise change input to raw_input for python2
listOfNames = []
#A for loop that runs five times.
#For every iteration, the name variable gets added to the list until the loop stops.
for num in range(5):
name = input('Enter a name: ')
listOfNames.append(name)
print(listOfNames)
#For every value(num) within the list 'listOfNames'
#print out a specific value from the list in the index denoted by 'num'.
for num in range(len(listOfNames)):
print(listOfNames[num])
#Below is a 3 dimentional list.
#The 3x3 list serves as an example to demonstrate how to create lists within lists, how to iterate through a 3 dimentional list
# and how to print out its content.
threeThreeGrid = [[' ' for i in range(3)] for j in range(3)]
for i in range(3):
for j in range(3):
name = input('Enter a name: ')
threeThreeGrid[i][j] = name
print(threeThreeGrid)
| #lists.py
#Written by Jesse Gallarzo
listOfNames = []
#A for loop that runs five times.
#For every iteration, the name variable gets added to the list until the loop stops.
for num in range(5):
name = input('Enter a name: ')
listOfNames.append(name)
print(listOfNames)
#For every value(num) within the list 'listOfNames'
#print out a specific value from the list in the index denoted by 'num'.
for num in range(len(listOfNames)):
print(listOfNames[num])
#Below is a 3 dimentional list.
#The 3x3 list serves as an example to demonstrate how to create lists within lists, how to iterate through a 3 dimentional list
# and how to print out its content.
threeThreeGrid = [[' ' for i in range(3)] for j in range(3)]
for i in range(3):
for j in range(3):
name = input('Enter a name: ')
threeThreeGrid[i][j] = name
print(threeThreeGrid)
| mit | Python |
0d3750128e2c8363d48d68c1ca95fe7b0b7a9086 | add description of how to create custom types | mistermatti/plugz,mistermatti/plugz | plugz/plugintypes/standard_plugin_type.py | plugz/plugintypes/standard_plugin_type.py | from abc import abstractmethod
from plugz import PluginTypeBase
class StandardPluginType(PluginTypeBase):
""" Simple plugin type that requires an activate() method.
Plugins that derive from this type will have to implement
activate() or plugz will refuse to load the plugin.
The plugintype needs to be set as well. Set it to a string
that describes the class. It does not have to be the class
name but it might make things easier for you.
"""
plugintype = __name__
@abstractmethod
def activate(self): pass
| from abc import abstractmethod
from plugz import PluginTypeBase
class StandardPluginType(PluginTypeBase):
""" Simple plugin type that requires an activate() method. """
plugintype = 'StandardPluginType'
def __init__(self, ):
""" """
super(StandardPluginType).__init__(self)
@abstractmethod
def activate(self):
""" """
raise NotImplementedError, self.__class__
| bsd-3-clause | Python |
ffe41990bd0e06c8d2ac2edc640b77fa7229d044 | modify some print out information | imwithye/git-ignore,imwithye/git-ignore | git-ignore/git_ignore.py | git-ignore/git_ignore.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Ciel, http://ciel.im
# Distributed under terms of the MIT license.
import sys
from git_ignore_add import git_ignore_add
from git_ignore_save import git_ignore_save
from git_ignore_list import git_ignore_list
from git_ignore_show import git_ignore_show
from git_ignore_which import git_ignore_which
from git_ignore_delete import git_ignore_delete
# cat ignores >> .gitignore
def add(languages):
git_ignore_add(languages)
# save current .gitignore
def save(filenames):
if len(filenames)<1:
filename = ""
else:
filename = filenames[0]
git_ignore_save(filename)
# list all user ignore files
def list():
git_ignore_list()
# delete user ignore files
def delete(filenames):
git_ignore_delete(filenames)
# cat .gitignore file
def show(languages):
git_ignore_show(languages)
# print which ignore file will be imported
def which(languages):
git_ignore_which(languages)
# print usage
def usage():
print "usage: git ignore <subcommand>"
print
print "Available subcommands are:"
print " add <project type> Add gitignore files. Try use 'git ignore add Python C'"
print " save [project type] Save current .gitignore file as a template"
print " list List all saved ignore templates"
print " delete [ignore file] Delete .gitignore or ignore templates"
print " show [ignore type] Cat .gitignore or ignore templates"
print " which <ignore type> Show which ignore file will be imported"
print " usage Show this help message and exit"
print " version Show version and exit"
print
print "http://github.com/imwithye/git-ignore"
print "git ignore, copyright Ciel <imwithye@gmail.com>"
# print version
def version():
print "git ignore, version 0.2"
print
print "http://github.com/imwithye/git-ignore"
print "git ignore, copyright Ciel <imwithye@gmail.com>"
# subcommand router
def select(argv):
if argv[1] == "add":
add(argv[2:])
exit()
elif argv[1] == "save":
save(argv[2:])
exit()
elif argv[1] == "list":
list()
exit()
elif argv[1] == "delete" or argv[1] == "remove":
delete(argv[2:])
exit()
elif argv[1] == "show":
show(argv[2:])
exit()
elif argv[1] == "which":
which(argv[2:])
exit()
elif argv[1] == "help" or argv[1] == "usage":
usage()
exit()
elif argv[1] == "version":
version()
exit()
else:
print "unknown subcommand"
usage()
exit()
if __name__ == "__main__":
if len(sys.argv)==1:
sys.argv.append("usage")
select(sys.argv)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Ciel, http://ciel.im
# Distributed under terms of the MIT license.
import sys
from git_ignore_add import git_ignore_add
from git_ignore_save import git_ignore_save
from git_ignore_list import git_ignore_list
from git_ignore_show import git_ignore_show
from git_ignore_which import git_ignore_which
from git_ignore_delete import git_ignore_delete
# cat ignores >> .gitignore
def add(languages):
git_ignore_add(languages)
# save current .gitignore
def save(filenames):
if len(filenames)<1:
filename = ""
else:
filename = filenames[0]
git_ignore_save(filename)
# list all user ignore files
def list():
git_ignore_list()
# delete user ignore files
def delete(filenames):
git_ignore_delete(filenames)
# cat .gitignore file
def show(languages):
git_ignore_show(languages)
# print which ignore file will be imported
def which(languages):
git_ignore_which(languages)
# print usage
def usage():
print "usage: git ignore <subcommand>"
print
print "Available subcommands are:"
print " add <project type> Add gitignore files. Try use 'git ignore add Python C'"
print " save [project type] Save current .gitignore file as a template"
print " list List all saved ignore files"
print " delete [ignore file] Delete .gitignore or ignore template"
print " show [ignore type] Cat .gitignore or ignore template"
print " which <ignore type> Show which ignore file will be imported"
print " usage Show this help message and exit"
print " version Show version and exit"
print
print "http://github.com/imwithye/git-ignore"
print "git ignore, copyright Ciel <imwithye@gmail.com>"
# print version
def version():
print "git ignore, version 0.2"
print
print "http://github.com/imwithye/git-ignore"
print "git ignore, copyright Ciel <imwithye@gmail.com>"
# subcommand router
def select(argv):
if argv[1] == "add":
add(argv[2:])
exit()
elif argv[1] == "save":
save(argv[2:])
exit()
elif argv[1] == "list":
list()
exit()
elif argv[1] == "delete" or argv[1] == "remove":
delete(argv[2:])
exit()
elif argv[1] == "show":
show(argv[2:])
exit()
elif argv[1] == "which":
which(argv[2:])
exit()
elif argv[1] == "help" or argv[1] == "usage":
usage()
exit()
elif argv[1] == "version":
version()
exit()
else:
print "unknown subcommand"
usage()
exit()
if __name__ == "__main__":
if len(sys.argv)==1:
sys.argv.append("usage")
select(sys.argv)
| mit | Python |
4c646128cfcb6d59445890c257447f01ed77a706 | Fix python syntax bug in update email fwd's script | tfiers/arenberg-online,tfiers/arenberg-online,tfiers/arenberg-online | core/management/update_email_forwards.py | core/management/update_email_forwards.py | # Virtual alias file syntax:
# email, space, email, (space, email, space, email,) newline, (repeat)
# Example:
# groep@arenbergorkest.be jef@gmail.com jos@hotmail.com
# jef@arenbergokest.be jef@gmail.com
# Catchall alias email = '@arenbergorkest.be'
from email_aliases import aliases
c = '' # New content of postfix virtual aliases file
for alias in aliases:
c += '{} {}\n'.format(alias['email']+'@arenbergorkest.be', ' '.join(alias['destinations']))
from subprocess import call
VIRTUAL_ALIAS_FILE = '/etc/postfix/virtual'
with open(VIRTUAL_ALIAS_FILE, 'w') as f:
f.write(c)
call(['sudo', 'postmap', VIRTUAL_ALIAS_FILE])
| # Virtual alias file syntax:
# email, space, email, (space, email, space, email,) newline, (repeat)
# Example:
# groep@arenbergorkest.be jef@gmail.com jos@hotmail.com
# jef@arenbergokest.be jef@gmail.com
# Catchall alias email = '@arenbergorkest.be'
from email_aliases import aliases
c = '' # New content of postfix virtual aliases file
for alias in aliases:
c += '{} {}\n'.format(alias.email+'@arenbergorkest.be', ' '.join(alias.destinations))
from subprocess import call
VIRTUAL_ALIAS_FILE = '/etc/postfix/virtual'
with open(VIRTUAL_ALIAS_FILE, 'w') as f:
f.write(c)
call(['sudo', 'postmap', VIRTUAL_ALIAS_FILE])
| mit | Python |
3ec923f4052bf7b66e5a694a1377124cc85f399c | Remove another unused test | jamesturk/tot,jamesturk/tot,jamesturk/tot,jamesturk/tot,jamesturk/tot | bills/tests.py | bills/tests.py | from django.test import override_settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from preferences.models import Preferences
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class BillViewTests(StaticLiveServerTestCase):
fixtures = ['fl_testdata.json']
def setUp(self):
u = User.objects.create_user('test')
p = Preferences.objects.create(user=u)
self.apikey = p.apikey
def test_by_topic_view(self):
response = self.client.get(reverse('by_topic'))
self.assertEqual(response.status_code, 200)
def test_latest_actions_page(self):
response = self.client.get(reverse('latest'))
self.assertEqual(response.status_code, 200)
def test_by_legislator_view(self):
response = self.client.get(reverse('by_legislator'))
self.assertEqual(response.status_code, 200)
def test_by_location_view(self):
response = self.client.get(reverse('by_location'))
self.assertEqual(response.status_code, 200)
| from django.test import override_settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from preferences.models import Preferences
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class BillViewTests(StaticLiveServerTestCase):
fixtures = ['fl_testdata.json']
def setUp(self):
u = User.objects.create_user('test')
p = Preferences.objects.create(user=u)
self.apikey = p.apikey
def test_by_topic_view(self):
response = self.client.get(reverse('by_topic'))
self.assertEqual(response.status_code, 200)
def test_latest_actions_page(self):
response = self.client.get(reverse('latest'))
self.assertEqual(response.status_code, 200)
def test_by_legislator_view(self):
response = self.client.get(reverse('by_legislator'))
self.assertEqual(response.status_code, 200)
def test_by_location_view(self):
response = self.client.get(reverse('by_location'))
self.assertEqual(response.status_code, 200)
def test_by_location_view_selected(self):
response = self.client.get(reverse('by_location_selected', args=['dudleyville']))
self.assertEqual(response.status_code, 200)
| mit | Python |
6a5e113cd78a27abb5eb4c249998fde5dfb41a27 | fix visual indentation | CartoDB/carto-python,CartoDB/cartodb-python | carto/exceptions.py | carto/exceptions.py | """
Module for carto-python exceptions definitions
.. module:: carto.exceptions
:platform: Unix, Windows
:synopsis: Module for carto-python exceptions definitions
.. moduleauthor:: Daniel Carrion <daniel@carto.com>
.. moduleauthor:: Alberto Romeu <alrocar@carto.com>
"""
class CartoException(Exception):
"""
Any Exception produced by carto-python should be wrapped around this class
"""
pass
class CartoRateLimitException(CartoException):
def __init__(self, exception, response):
super()
self.limit = response.headers['Carto-Rate-Limit-Limit']
self.remaining = response.headers['Carto-Rate-Limit-Remaining']
self.retryAfter = response.headers['Retry-After']
self.reset = response.headers['Carto-Rate-Limit-Reset']
@staticmethod
def isResponseRateLimited(response):
if (response.status_code == 429 and 'Retry-After' in response.headers and
int(response.headers['Retry-After']) >= 0):
return True
return False
| """
Module for carto-python exceptions definitions
.. module:: carto.exceptions
:platform: Unix, Windows
:synopsis: Module for carto-python exceptions definitions
.. moduleauthor:: Daniel Carrion <daniel@carto.com>
.. moduleauthor:: Alberto Romeu <alrocar@carto.com>
"""
class CartoException(Exception):
"""
Any Exception produced by carto-python should be wrapped around this class
"""
pass
class CartoRateLimitException(CartoException):
def __init__(self, exception, response):
super()
self.limit = response.headers['Carto-Rate-Limit-Limit']
self.remaining = response.headers['Carto-Rate-Limit-Remaining']
self.retryAfter = response.headers['Retry-After']
self.reset = response.headers['Carto-Rate-Limit-Reset']
@staticmethod
def isResponseRateLimited(response):
if (response.status_code == 429 and
'Retry-After' in response.headers and
int(response.headers['Retry-After']) >= 0):
return True
return False
| bsd-3-clause | Python |
48f5f051c9dae382cf535b5067f4df539a5e01a4 | fix buildbucket property parsing | eunchong/build,eunchong/build,eunchong/build,eunchong/build | scripts/master/buildbucket/common.py | scripts/master/buildbucket/common.py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import logging
from twisted.python import log as twistedLog
LOG_PREFIX = '[buildbucket] '
# Buildbot-related constants.
BUILD_PROPERTY = 'build'
BUILDBUCKET_CHANGE_ID_PROPERTY = 'change_id'
CHANGE_CATEGORY = 'buildbucket'
CHANGE_REASON = 'buildbucket'
INFO_PROPERTY = 'buildbucket' # A Buildbot property for buildbucket info.
# UTC datetime corresponding to zero Unix timestamp.
EPOCH = datetime.datetime.utcfromtimestamp(0)
class Error(Exception):
"""Buildbucket-specific error."""
def log(message, level=None):
if level is None:
level = logging.INFO
twistedLog.msg('%s%s' % (LOG_PREFIX, message), loglevel=level)
def log_on_error(deferred, msg_prefix=None):
msg_prefix = msg_prefix or ''
def on_failure(failure):
msg = msg_prefix
if msg:
msg += ': '
msg += '%s' % failure
log(msg, level=logging.ERROR)
deferred.addErrback(on_failure)
# Copied from "utils" appengine component
# https://chromium.googlesource.com/infra/swarming/+/master/appengine/components/components/utils.py
def datetime_to_timestamp(value):
"""Converts UTC datetime to integer timestamp in microseconds since epoch."""
if not isinstance(value, datetime.datetime):
raise ValueError(
'Expecting datetime object, got %s instead' % type(value).__name__)
if value.tzinfo is not None:
raise ValueError('Only UTC datetime is supported')
dt = value - EPOCH
return dt.microseconds + 1000 * 1000 * (dt.seconds + 24 * 3600 * dt.days)
# Copied from "utils" appengine component
# https://chromium.googlesource.com/infra/swarming/+/master/appengine/components/components/utils.py
def timestamp_to_datetime(value):
"""Converts integer timestamp in microseconds since epoch to UTC datetime."""
if not isinstance(value, (int, long, float)):
raise ValueError(
'Expecting a number, got %s instead' % type(value).__name__)
return EPOCH + datetime.timedelta(microseconds=value)
def parse_info_property(value):
"""Parses the buildbot build property containing buildbucket INFO."""
try:
# Be compatible with existing builds that still store a dict.
if isinstance(value, dict):
# Due to a bug introduced in http://crrev.com/1328623003 (reverted), we
# may have old builds that have 'build' value serialized as JSON.
if isinstance(value.get('build'), basestring):
value['build'] = json.loads(value['build'])
return value
parsed = json.loads(value)
if not isinstance(parsed, dict):
raise ValueError('must be a JSON dict')
return parsed
except ValueError as e:
# Include value of the buildbucket property into the
msg = 'failed to parse %s property "%s": %s' % (INFO_PROPERTY, value, e)
log(msg, logging.ERROR)
raise ValueError(msg)
| # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import logging
from twisted.python import log as twistedLog
LOG_PREFIX = '[buildbucket] '
# Buildbot-related constants.
BUILD_PROPERTY = 'build'
BUILDBUCKET_CHANGE_ID_PROPERTY = 'change_id'
CHANGE_CATEGORY = 'buildbucket'
CHANGE_REASON = 'buildbucket'
INFO_PROPERTY = 'buildbucket' # A Buildbot property for buildbucket info.
# UTC datetime corresponding to zero Unix timestamp.
EPOCH = datetime.datetime.utcfromtimestamp(0)
class Error(Exception):
"""Buildbucket-specific error."""
def log(message, level=None):
if level is None:
level = logging.INFO
twistedLog.msg('%s%s' % (LOG_PREFIX, message), loglevel=level)
def log_on_error(deferred, msg_prefix=None):
msg_prefix = msg_prefix or ''
def on_failure(failure):
msg = msg_prefix
if msg:
msg += ': '
msg += '%s' % failure
log(msg, level=logging.ERROR)
deferred.addErrback(on_failure)
# Copied from "utils" appengine component
# https://chromium.googlesource.com/infra/swarming/+/master/appengine/components/components/utils.py
def datetime_to_timestamp(value):
"""Converts UTC datetime to integer timestamp in microseconds since epoch."""
if not isinstance(value, datetime.datetime):
raise ValueError(
'Expecting datetime object, got %s instead' % type(value).__name__)
if value.tzinfo is not None:
raise ValueError('Only UTC datetime is supported')
dt = value - EPOCH
return dt.microseconds + 1000 * 1000 * (dt.seconds + 24 * 3600 * dt.days)
# Copied from "utils" appengine component
# https://chromium.googlesource.com/infra/swarming/+/master/appengine/components/components/utils.py
def timestamp_to_datetime(value):
"""Converts integer timestamp in microseconds since epoch to UTC datetime."""
if not isinstance(value, (int, long, float)):
raise ValueError(
'Expecting a number, got %s instead' % type(value).__name__)
return EPOCH + datetime.timedelta(microseconds=value)
def parse_info_property(value):
"""Parses the buildbot build property containing buildbucket INFO."""
try:
# Be compatible with existing builds that still store a dict.
if isinstance(value, dict):
# Due to a bug introduced in http://crrev.com/1328623003 (reverted), we
# may have old builds that have 'build' value serialized as JSON.
if isinstance(value['build'], basestring):
value['build'] = json.loads(value['build'])
return value
parsed = json.loads(value)
if not isinstance(parsed, dict):
raise ValueError('must be a JSON dict')
return parsed
except ValueError as e:
# Include value of the buildbucket property into the
msg = 'failed to parse %s property "%s": %s' % (INFO_PROPERTY, value, e)
log(msg, logging.ERROR)
raise ValueError(msg)
| bsd-3-clause | Python |
8be03d5a8508c982f6626a5f756bdf267f3dbe6d | Update echo_bot.py | Cretezy/pymessenger2,davidchua/pymessenger,karlinnolabs/pymessenger | examples/echo_bot.py | examples/echo_bot.py | """
This bot listens to port 5002 for incoming connections from Facebook. It takes
in any messages that the bot receives and echos it back.
"""
from flask import Flask, request
from pymessenger.bot import Bot
app = Flask(__name__)
ACCESS_TOKEN = ""
VERIFY_TOKEN = ""
bot = Bot(ACCESS_TOKEN)
@app.route("/", methods=['GET', 'POST'])
def hello():
if request.method == 'GET':
if request.args.get("hub.verify_token") == VERIFY_TOKEN:
return request.args.get("hub.challenge")
else:
return 'Invalid verification token'
if request.method == 'POST':
output = request.get_json()
for event in output['entry']:
messaging = event['messaging']
for x in messaging:
if x.get('message'):
recipient_id = x['sender']['id']
if x['message'].get('text'):
message = x['message']['text']
bot.send_text_message(recipient_id, message)
if x['message'].get('attachments'):
for att in x['message'].get('attachments'):
bot.send_attachment_url(recipient_id, att['type'], att['payload']['url'])
else:
pass
return "Success"
if __name__ == "__main__":
app.run(port=5002, debug=True)
| """
This bot listens to port 5002 for incoming connections from Facebook. It takes
in any messages that the bot receives and echos it back.
"""
from flask import Flask, request
from pymessenger.bot import Bot
app = Flask(__name__)
ACCESS_TOKEN = ""
VERIFY_TOKEN = ""
bot = Bot(ACCESS_TOKEN)
@app.route("/", methods=['GET', 'POST'])
def hello():
if request.method == 'GET':
if request.args.get("hub.verify_token") == VERIFY_TOKEN:
return request.args.get("hub.challenge")
else:
return 'Invalid verification token'
if request.method == 'POST':
output = request.get_json()
for event in output['entry']:
messaging = event['messaging']
for x in messaging:
if x.get('message'):
recipient_id = x['sender']['id']
if x['message'].get('text'):
message = x['message']['text']
bot.send_text_message(recipient_id, message)
if x['message'].get('attachment'):
bot.send_attachment_url(recipient_id, x['message']['attachment']['type'],
x['message']['attachment']['payload']['url'])
else:
pass
return "Success"
if __name__ == "__main__":
app.run(port=5002, debug=True)
| mit | Python |
624b3a9606c27076562ed522c919e480d72b86f9 | Allow passing multiple tokens to write() | edgedb/edgedb,edgedb/edgedb,edgedb/edgedb | edgedb/lang/common/ast/codegen.py | edgedb/lang/common/ast/codegen.py | ##
# Portions Copyright (c) 2008-2010 MagicStack Inc.
# Portions Copyright (c) 2008 Armin Ronacher.
# All rights reserved.
#
# This code is licensed under the PSFL license.
##
import itertools
from .visitor import NodeVisitor
class SourceGenerator(NodeVisitor):
"""This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with=' '*4, add_line_information=False,
pretty=True):
self.result = []
self.indent_with = indent_with
self.add_line_information = add_line_information
self.indentation = 0
self.new_lines = 0
self.current_line = 1
self.pretty = pretty
def write(self, *x, delimiter=None):
if self.new_lines:
if self.result and self.pretty:
self.current_line += self.new_lines
self.result.append('\n' * self.new_lines)
if self.pretty:
self.result.append(self.indent_with * self.indentation)
else:
self.result.append(' ')
self.new_lines = 0
if delimiter:
self.result.append(x[0])
chain = itertools.chain.from_iterable
chunks = chain((delimiter, v) for v in x[1:])
else:
chunks = x
for chunk in chunks:
if chunk is None:
raise ValueError('invalid text chunk in codegen')
self.result.append(chunk)
def newline(self, node=None, extra=0):
self.new_lines = max(self.new_lines, 1 + extra)
if node is not None and self.add_line_information:
self.write('# line: %s' % node.lineno)
self.new_lines = 1
@classmethod
def to_source(cls, node, indent_with=' '*4, add_line_information=False,
pretty=True):
generator = cls(indent_with, add_line_information, pretty=pretty)
generator.visit(node)
return ''.join(generator.result)
| ##
# Portions Copyright (c) 2008-2010 MagicStack Inc.
# Portions Copyright (c) 2008 Armin Ronacher.
# All rights reserved.
#
# This code is licensed under the PSFL license.
##
from .visitor import NodeVisitor
class SourceGenerator(NodeVisitor):
"""This visitor is able to transform a well formed syntax tree into python
sourcecode. For more details have a look at the docstring of the
`node_to_source` function.
"""
def __init__(self, indent_with=' '*4, add_line_information=False,
pretty=True):
self.result = []
self.indent_with = indent_with
self.add_line_information = add_line_information
self.indentation = 0
self.new_lines = 0
self.current_line = 1
self.pretty = pretty
def write(self, x):
if self.new_lines:
if self.result and self.pretty:
self.current_line += self.new_lines
self.result.append('\n' * self.new_lines)
if self.pretty:
self.result.append(self.indent_with * self.indentation)
else:
self.result.append(' ')
self.new_lines = 0
self.result.append(x)
def newline(self, node=None, extra=0):
self.new_lines = max(self.new_lines, 1 + extra)
if node is not None and self.add_line_information:
self.write('# line: %s' % node.lineno)
self.new_lines = 1
@classmethod
def to_source(cls, node, indent_with=' '*4, add_line_information=False,
pretty=True):
generator = cls(indent_with, add_line_information, pretty=pretty)
generator.visit(node)
return ''.join(generator.result)
| apache-2.0 | Python |
f7611e37ef1e0dfaa568515be365d50b3edbd11c | Fix plugin import for astropy 2.x | astropy/ccdproc,mwcraig/ccdproc | ccdproc/conftest.py | ccdproc/conftest.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# this contains imports plugins that configure py.test for astropy tests.
# by importing them here in conftest.py they are discoverable by py.test
# no matter how it is invoked within the source tree.
import os
try:
from astropy.tests.plugins.display import (pytest_report_header,
PYTEST_HEADER_MODULES,
TESTED_VERSIONS)
except ImportError:
# When using astropy 2.0
from astropy.tests.pytest_plugins import (pytest_report_header,
PYTEST_HEADER_MODULES,
TESTED_VERSIONS)
try:
# This is the way to get plugins in astropy 2.x
from astropy.tests.pytest_plugins import *
except ImportError:
# Otherwise they are installed as separate packages that pytest
# automagically finds.
pass
from .tests.pytest_fixtures import *
# This is to figure out ccdproc version, rather than using Astropy's
try:
from .version import version
except ImportError:
version = 'dev'
packagename = os.path.basename(os.path.dirname(__file__))
TESTED_VERSIONS[packagename] = version
# Uncomment the following line to treat all DeprecationWarnings as
# exceptions
# enable_deprecations_as_exceptions()
# Add astropy to test header information and remove unused packages.
try:
PYTEST_HEADER_MODULES['Astropy'] = 'astropy'
PYTEST_HEADER_MODULES['astroscrappy'] = 'astroscrappy'
PYTEST_HEADER_MODULES['reproject'] = 'reproject'
del PYTEST_HEADER_MODULES['h5py']
except KeyError:
pass
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
# this contains imports plugins that configure py.test for astropy tests.
# by importing them here in conftest.py they are discoverable by py.test
# no matter how it is invoked within the source tree.
import os
try:
from astropy.tests.plugins.display import (pytest_report_header,
PYTEST_HEADER_MODULES,
TESTED_VERSIONS)
except ImportError:
# When using astropy 2.0
from astropy.tests.pytest_plugins import (pytest_report_header,
PYTEST_HEADER_MODULES,
TESTED_VERSIONS)
from .tests.pytest_fixtures import *
# This is to figure out ccdproc version, rather than using Astropy's
try:
from .version import version
except ImportError:
version = 'dev'
packagename = os.path.basename(os.path.dirname(__file__))
TESTED_VERSIONS[packagename] = version
# Uncomment the following line to treat all DeprecationWarnings as
# exceptions
# enable_deprecations_as_exceptions()
# Add astropy to test header information and remove unused packages.
try:
PYTEST_HEADER_MODULES['Astropy'] = 'astropy'
PYTEST_HEADER_MODULES['astroscrappy'] = 'astroscrappy'
PYTEST_HEADER_MODULES['reproject'] = 'reproject'
del PYTEST_HEADER_MODULES['h5py']
except KeyError:
pass
| bsd-3-clause | Python |
f5daf719d9d358522feec212be5b538399faf4fd | Add missing Coore Actions (#116) | steve-bate/openhab2-jython | Core/automation/lib/python/core/actions.py | Core/automation/lib/python/core/actions.py | import sys
from core import osgi
__all__ = []
oh1_actions = osgi.find_services("org.openhab.core.scriptengine.action.ActionService", None) or []
oh2_actions = osgi.find_services("org.eclipse.smarthome.model.script.engine.action.ActionService", None) or []
_module = sys.modules[__name__]
for s in oh1_actions + oh2_actions:
action_class = s.actionClass
name = action_class.simpleName
setattr(_module, name, action_class)
__all__.append(name)
try:
from org.openhab.core.model.script.actions import Exec
from org.openhab.core.model.script.actions import HTTP
from org.openhab.core.model.script.actions import LogAction
from org.openhab.core.model.script.actions import Ping
except:
from org.eclipse.smarthome.model.script.actions import Exec
from org.eclipse.smarthome.model.script.actions import HTTP
from org.eclipse.smarthome.model.script.actions import LogAction
from org.eclipse.smarthome.model.script.actions import Ping
static_imports = [Exec, HTTP, LogAction, Ping]
for s in static_imports:
name = s.simpleName
setattr(_module, name, s)
__all__.append(name)
| import sys
from core import osgi
__all__ = []
oh1_actions = osgi.find_services("org.openhab.core.scriptengine.action.ActionService", None) or []
oh2_actions = osgi.find_services("org.eclipse.smarthome.model.script.engine.action.ActionService", None) or []
_module = sys.modules[__name__]
for s in oh1_actions + oh2_actions:
action_class = s.actionClass
name = action_class.simpleName
setattr(_module, name, action_class)
__all__.append(name)
| epl-1.0 | Python |
f340dfe4bad3685541050cb91a9cdce6808ad9a1 | remove call log | lite3/adbtool | adbtool/cmd.py | adbtool/cmd.py | #!/usr/bin/env python
# encoding=utf-8
import os
import sys
import shlex
import subprocess
import math
# return (output, isOk)
def call(cmd, printOutput=False):
# print("call %s" % cmd)
output = None
isOk = True
if sys.platform == 'win32':
args = cmd
else:
# linux must split arguments
args = shlex.split(cmd)
try:
if printOutput:
isOk = subprocess.call(args) == 0
else:
output = subprocess.check_output(args)
# python3 output is bytes
output = output.decode("utf-8")
return (output, isOk)
except subprocess.CalledProcessError as e:
print(e.output)
return (e.output, isOk)
def getAdb():
androidHome = os.getenv('ANDROID_HOME')
if androidHome is None:
androidHome = os.getenv('ANDROID_SDK')
if androidHome is None:
# print('can not found ANDROID_HOME/ANDROID_SDK in environment value')
return "adb"
return os.path.join(androidHome, 'platform-tools/adb')
def versionnum(a):
arr = a.split('.')
multiple = 10000
n = 0
for i in range(0, min(len(arr), 3)):
n += int(arr[i]) * multiple
multiple /= 100
return n
def getAapt(vername = None):
androidHome = os.getenv('ANDROID_HOME')
if androidHome is None:
androidHome = os.getenv('ANDROID_SDK')
if androidHome is None:
print('can not found ANDROID_HOME/ANDROID_SDK in environment value')
return "aapt"
aaptname = 'aapt.exe' if sys.platform == 'win32' else'aapt'
buildtools = os.path.join(androidHome, 'build-tools')
if os.path.isdir(buildtools):
dirs = os.listdir(buildtools)
dirs.sort(reverse = True, key=versionnum)
for dir in dirs:
filename = os.path.join(buildtools, dir, aaptname)
if os.path.isfile(filename):
return filename
print('can not found aapt in ANDROID_HOME/ANDROID_SDK')
| #!/usr/bin/env python
# encoding=utf-8
import os
import sys
import shlex
import subprocess
import math
# return (output, isOk)
def call(cmd, printOutput=False):
print("call %s" % cmd)
output = None
isOk = True
if sys.platform == 'win32':
args = cmd
else:
# linux must split arguments
args = shlex.split(cmd)
try:
if printOutput:
isOk = subprocess.call(args) == 0
else:
output = subprocess.check_output(args)
# python3 output is bytes
output = output.decode("utf-8")
return (output, isOk)
except subprocess.CalledProcessError as e:
print(e.output)
return (e.output, isOk)
def getAdb():
androidHome = os.getenv('ANDROID_HOME')
if androidHome is None:
androidHome = os.getenv('ANDROID_SDK')
if androidHome is None:
print('can not found ANDROID_HOME/ANDROID_SDK in environment value')
return "adb"
return os.path.join(androidHome, 'platform-tools/adb')
def versionnum(a):
arr = a.split('.')
multiple = 10000
n = 0
for i in range(0, min(len(arr), 3)):
n += int(arr[i]) * multiple
multiple /= 100
return n
def getAapt(vername = None):
androidHome = os.getenv('ANDROID_HOME')
if androidHome is None:
androidHome = os.getenv('ANDROID_SDK')
if androidHome is None:
print('can not found ANDROID_HOME/ANDROID_SDK in environment value')
return "aapt"
aaptname = 'aapt.exe' if sys.platform == 'win32' else'aapt'
buildtools = os.path.join(androidHome, 'build-tools')
if os.path.isdir(buildtools):
dirs = os.listdir(buildtools)
dirs.sort(reverse = True, key=versionnum)
for dir in dirs:
filename = os.path.join(buildtools, dir, aaptname)
if os.path.isfile(filename):
return filename
print('can not found aapt in ANDROID_HOME/ANDROID_SDK')
| mit | Python |
e410bad282d8129ed3099f1fa1edd09d82aaf77c | bump version [ci skip] | cenkalti/kuyruk,cenkalti/kuyruk | kuyruk/__init__.py | kuyruk/__init__.py | from __future__ import absolute_import
import logging
from kuyruk.kuyruk import Kuyruk
from kuyruk.worker import Worker
from kuyruk.task import Task
from kuyruk.config import Config
__version__ = '0.14.7'
try:
# not available in python 2.6
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Add NullHandler to prevent logging warnings on startup
null_handler = NullHandler()
logging.getLogger('kuyruk').addHandler(null_handler)
logging.getLogger('pika').addHandler(null_handler)
| from __future__ import absolute_import
import logging
from kuyruk.kuyruk import Kuyruk
from kuyruk.worker import Worker
from kuyruk.task import Task
from kuyruk.config import Config
__version__ = '0.14.6'
try:
# not available in python 2.6
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Add NullHandler to prevent logging warnings on startup
null_handler = NullHandler()
logging.getLogger('kuyruk').addHandler(null_handler)
logging.getLogger('pika').addHandler(null_handler)
| mit | Python |
ca82a17a51842e4631ba2e1d30cc76feede8e3eb | add def Overview | kaduuuken/achievementsystem,kaduuuken/achievementsystem | achievements/views.py | achievements/views.py | from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.views.generic import ListView
from models import Category, Achievement, Trophies
import settings
def Overview(request):
category_list = Category.objects.filter(parent_category__isnull=True)
trophies_list = [None]*(settings.SET_PARAMETER+1)
trophies = Trophies.objects.all()
for trophy in trophies:
trophies_list[trophy.position] = trophy.achievement
print trophies_list
return render_to_response('achievements/index.html',{'category_list': category_list, 'trophies_list': trophies_list}) | # Create your views here.
| bsd-2-clause | Python |
e669a86ab18331ec5a5f9d568cf9b3363bae7bb4 | Make dacapo harness time re private. | fhirschmann/penchy,fhirschmann/penchy | penchy/jobs/filters.py | penchy/jobs/filters.py | """
This module provides filters.
"""
import re
from pprint import pprint
from penchy.jobs.elements import Filter, SystemFilter
class Tamiflex(Filter):
pass
class HProf(Filter):
pass
class DacapoHarness(Filter):
"""
Filters output of a DaCapo Harness.
Inputs:
- ``stderr``: List of Paths to stderror output files
- ``exit_code``: List of program exit codes
Outputs:
- ``failures``: failure count per invocation ([int])
- ``times``: execution time per itertion per invocation ([[int]])
- ``valid``: flag that indicates if execution was valid
"""
inputs = [('stderr', list, str),
('exit_code', list, int)]
outputs = [('failures', list, int, int),
('times', list, list, int),
('valid', list, bool)]
_TIME_RE = re.compile(
r"""
(?:completed\ warmup\ \d+| # for iterations
(?P<success>FAILED|PASSED)) # check if run failed or passed
\ in\ (?P<time>\d+)\ msec # time of execution
""", re.VERBOSE)
def _run(self, **kwargs):
exit_codes = kwargs['exit_code']
stderror = kwargs['stderr']
for f, exit_code in zip(stderror, exit_codes):
failures = 0
times = []
with open(f) as fobj:
buf = fobj.read()
for match in DacapoHarness._TIME_RE.finditer(buf):
success, time = match.groups()
if success is not None and success == 'FAILED':
failures += 1
times.append(int(time))
self.out['failures'].append(failures)
self.out['times'].append(times)
self.out['valid'].append(exit_code == 0 and failures == 0)
class Send(SystemFilter):
inputs = [('environment', dict),
('payload', object)]
pass
class Receive(SystemFilter):
inputs = [('environment', dict)]
outputs = [('results', dict, list)]
pass
class Print(Filter):
"""
Prints everything fed to it on stdout.
"""
inputs = None
def run(self, **kwargs): # pragma: no cover
pprint(kwargs)
class Plot(Filter):
pass
class Upload(Filter):
pass
| """
This module provides filters.
"""
import re
from pprint import pprint
from penchy.jobs.elements import Filter, SystemFilter
class Tamiflex(Filter):
pass
class HProf(Filter):
pass
class DacapoHarness(Filter):
"""
Filters output of a DaCapo Harness.
Inputs:
- ``stderr``: List of Paths to stderror output files
- ``exit_code``: List of program exit codes
Outputs:
- ``failures``: failure count per invocation ([int])
- ``times``: execution time per itertion per invocation ([[int]])
- ``valid``: flag that indicates if execution was valid
"""
inputs = [('stderr', list, str),
('exit_code', list, int)]
outputs = [('failures', list, int, int),
('times', list, list, int),
('valid', list, bool)]
TIME_RE = re.compile(
r"""
(?:completed\ warmup\ \d+| # for iterations
(?P<success>FAILED|PASSED)) # check if run failed or passed
\ in\ (?P<time>\d+)\ msec # time of execution
""", re.VERBOSE)
def _run(self, **kwargs):
exit_codes = kwargs['exit_code']
stderror = kwargs['stderr']
for f, exit_code in zip(stderror, exit_codes):
failures = 0
times = []
with open(f) as fobj:
buf = fobj.read()
for match in DacapoHarness.TIME_RE.finditer(buf):
success, time = match.groups()
if success is not None and success == 'FAILED':
failures += 1
times.append(int(time))
self.out['failures'].append(failures)
self.out['times'].append(times)
self.out['valid'].append(exit_code == 0 and failures == 0)
class Send(SystemFilter):
inputs = [('environment', dict),
('payload', object)]
pass
class Receive(SystemFilter):
inputs = [('environment', dict)]
outputs = [('results', dict, list)]
pass
class Print(Filter):
"""
Prints everything fed to it on stdout.
"""
inputs = None
def run(self, **kwargs): # pragma: no cover
pprint(kwargs)
class Plot(Filter):
pass
class Upload(Filter):
pass
| mit | Python |
aa27510776dec590b4acaa8104ae078664c0d96e | Add WrongInputException, signals that filter rcved invalid input. | fhirschmann/penchy,fhirschmann/penchy | penchy/jobs/filters.py | penchy/jobs/filters.py | """
This module provides filters.
"""
import re
from pprint import pprint
from penchy.jobs.elements import Filter, SystemFilter
class WrongInputError(Exception):
"""
Filter received input it was not expecting and cannot process.
"""
pass
class Tamiflex(Filter):
pass
class HProf(Filter):
pass
class DacapoHarness(Filter):
"""
Filters output of a DaCapo Harness.
Inputs:
- ``stderr``: List of Paths to stderror output files
- ``exit_code``: List of program exit codes
Outputs:
- ``failures``: failure count per invocation ([int])
- ``times``: execution time per itertion per invocation ([[int]])
- ``valid``: flag that indicates if execution was valid
"""
inputs = [('stderr', list, str),
('exit_code', list, int)]
outputs = [('failures', list, int, int),
('times', list, list, int),
('valid', list, bool)]
_TIME_RE = re.compile(
r"""
(?:completed\ warmup\ \d+| # for iterations
(?P<success>FAILED|PASSED)) # check if run failed or passed
\ in\ (?P<time>\d+)\ msec # time of execution
""", re.VERBOSE)
def _run(self, **kwargs):
exit_codes = kwargs['exit_code']
stderror = kwargs['stderr']
for f, exit_code in zip(stderror, exit_codes):
failures = 0
times = []
with open(f) as fobj:
buf = fobj.read()
for match in DacapoHarness._TIME_RE.finditer(buf):
success, time = match.groups()
if success is not None and success == 'FAILED':
failures += 1
times.append(int(time))
self.out['failures'].append(failures)
self.out['times'].append(times)
self.out['valid'].append(exit_code == 0 and failures == 0)
class Send(SystemFilter):
inputs = [('environment', dict),
('payload', object)]
pass
class Receive(SystemFilter):
inputs = [('environment', dict)]
outputs = [('results', dict, list)]
pass
class Print(Filter):
"""
Prints everything fed to it on stdout.
"""
inputs = None
def run(self, **kwargs): # pragma: no cover
pprint(kwargs)
class Plot(Filter):
pass
class Upload(Filter):
pass
| """
This module provides filters.
"""
import re
from pprint import pprint
from penchy.jobs.elements import Filter, SystemFilter
class Tamiflex(Filter):
pass
class HProf(Filter):
pass
class DacapoHarness(Filter):
"""
Filters output of a DaCapo Harness.
Inputs:
- ``stderr``: List of Paths to stderror output files
- ``exit_code``: List of program exit codes
Outputs:
- ``failures``: failure count per invocation ([int])
- ``times``: execution time per itertion per invocation ([[int]])
- ``valid``: flag that indicates if execution was valid
"""
inputs = [('stderr', list, str),
('exit_code', list, int)]
outputs = [('failures', list, int, int),
('times', list, list, int),
('valid', list, bool)]
_TIME_RE = re.compile(
r"""
(?:completed\ warmup\ \d+| # for iterations
(?P<success>FAILED|PASSED)) # check if run failed or passed
\ in\ (?P<time>\d+)\ msec # time of execution
""", re.VERBOSE)
def _run(self, **kwargs):
exit_codes = kwargs['exit_code']
stderror = kwargs['stderr']
for f, exit_code in zip(stderror, exit_codes):
failures = 0
times = []
with open(f) as fobj:
buf = fobj.read()
for match in DacapoHarness._TIME_RE.finditer(buf):
success, time = match.groups()
if success is not None and success == 'FAILED':
failures += 1
times.append(int(time))
self.out['failures'].append(failures)
self.out['times'].append(times)
self.out['valid'].append(exit_code == 0 and failures == 0)
class Send(SystemFilter):
inputs = [('environment', dict),
('payload', object)]
pass
class Receive(SystemFilter):
inputs = [('environment', dict)]
outputs = [('results', dict, list)]
pass
class Print(Filter):
"""
Prints everything fed to it on stdout.
"""
inputs = None
def run(self, **kwargs): # pragma: no cover
pprint(kwargs)
class Plot(Filter):
pass
class Upload(Filter):
pass
| mit | Python |
3d283ff2eff893f3dee72cb53199a9a87c5f6e12 | Update SensorClasses.py | purduerov/X9-Core,purduerov/X9-Core,purduerov/X9-Core,purduerov/X9-Core,purduerov/X9-Core,purduerov/X9-Core | sensors/SensorClass/SensorClasses.py | sensors/SensorClass/SensorClasses.py | from Adafruit_BNO055 import BNO055
import logging
import sys
import time
# Create and configure the BNO sensor connection. Make sure only ONE of the
# below 'bno = ...' lines is uncommented:
# Raspberry Pi configuration with serial UART and RST connected to GPIO 18:
# if not bno.begin():
# raise RuntimeError('Failed to initialize BNO055! Check the Sensor DUMBASS')
class IMU(object):
def __init__(self):
bno = BNO055.BNO055(rst=18)
bno.begin()
def imu_get_data(self):
heading, roll, pitch = bno.read_euler()
gyro_x, gyro_y, gyro_z = bno.read_gyroscope()
accel_x, accel_y, accel_z = bno.read_accelerometer()
LinAccel_x, LinAccel_y, LinAccel_z = bno.read_linear_accelerometer()
temp = bno.read_temp()
return {'Heading': heading, 'Roll': roll, 'Pitch': pitch, 'Gyro-X': gyro_x, 'Gyro-Y': gyro_y, 'Gyro-Z': gyro_z,
'Acceleration-X': accel_x, 'Acceleration-Y': accel_y, 'Acceleration-Z': accel_z,
'Linear Acceleration-X': LinAccel_x, 'Linear Acceleration-Y': LinAccel_y, 'Linear Acceleration-Z': LinAccel_z,
'Temp' : temp}
def get_calibration(self):
cal_array = bno.get_calibration()
return cal_array
def reset_calibration(self):
cal_array_original = self.get_calibration()
bno.set_calibration(bno.get_calibration())
return cal_array_original
def set_calibration(self, data):
bno.set_calibration(data)
return
def sitrep (self):
sys, gyro, accel, mag = bno.get_calibration_status()
sys_stat, sys_test, sys_err = bno.get_system_status(True)
good_status = [3,3,3,3,1,0x0F,0]
test_array = [sys,gyro,accel,mag,sys_stat, sys_test, sys_err]
for x in range(0, 4):
if test_array[x] != 3:
return False
if test_array[4] == 1:
return False
if test_array[5] != 0x0F:
return False
if test_array[6] != 0:
return False
return True
| from Adafruit_BNO055 import BNO055
import logging
import sys
import time
# Create and configure the BNO sensor connection. Make sure only ONE of the
# below 'bno = ...' lines is uncommented:
# Raspberry Pi configuration with serial UART and RST connected to GPIO 18:
# BeagleBone Black configuration with default I2C connection (SCL=P9_19, SDA=P9_20),
# and RST connected to pin P9_12:
# bno = BNO055.BNO055(rst='P9_12')
# if not bno.begin():
# raise RuntimeError('Failed to initialize BNO055! Check the Sensor DUMBASS')
class IMU(object):
def __init__(self):
bno.begin()
bno = BNO055.BNO055(rst=18)
def imu_get_data(self):
heading, roll, pitch = bno.read_euler()
gyro_x, gyro_y, gyro_z = bno.read_gyroscope()
accel_x, accel_y, accel_z = bno.read_accelerometer()
LinAccel_x, LinAccel_y, LinAccel_z = bno.read_linear_accelerometer()
temp = bno.read_temp()
return {'Heading': heading, 'Roll': roll, 'Pitch': pitch, 'Gyro-X': gyro_x, 'Gyro-Y': gyro_y, 'Gyro-Z': gyro_z,
'Acceleration-X': accel_x, 'Acceleration-Y': accel_y, 'Acceleration-Z': accel_z,
'Linear Acceleration-X': LinAccel_x, 'Linear Acceleration-Y': LinAccel_y, 'Linear Acceleration-Z': LinAccel_z,
'Temp' : temp}
def get_calibration(self):
cal_array = bno.get_calibration()
return cal_array
def reset_calibration(self):
cal_array_original = self.get_calibration()
bno.set_calibration(bno.get_calibration())
return cal_array_original
def set_calibration(self, data):
bno.set_calibration(data)
return
def sitrep (self):
sys, gyro, accel, mag = bno.get_calibration_status()
sys_stat, sys_test, sys_err = bno.get_system_status(True)
good_status = [3,3,3,3,1,0x0F,0]
test_array = [sys,gyro,accel,mag,sys_stat, sys_test, sys_err]
for x in range(0, 4):
if test_array[x] != 3:
return False
if test_array[4] == 1:
return False
if test_array[5] != 0x0F:
return False
if test_array[6] != 0:
return False
return True
| mit | Python |
64ec4e02fe84a729b6a25bcdbdaf7946c1922b5f | Add manage to admin tasks | RomanZWang/osf.io,mluke93/osf.io,kwierman/osf.io,felliott/osf.io,binoculars/osf.io,chennan47/osf.io,brianjgeiger/osf.io,asanfilippo7/osf.io,mluo613/osf.io,zamattiac/osf.io,emetsger/osf.io,caneruguz/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,RomanZWang/osf.io,abought/osf.io,binoculars/osf.io,HalcyonChimera/osf.io,emetsger/osf.io,adlius/osf.io,brianjgeiger/osf.io,DanielSBrown/osf.io,KAsante95/osf.io,icereval/osf.io,GageGaskins/osf.io,cslzchen/osf.io,rdhyee/osf.io,laurenrevere/osf.io,Ghalko/osf.io,CenterForOpenScience/osf.io,TomHeatwole/osf.io,TomBaxter/osf.io,samchrisinger/osf.io,abought/osf.io,cwisecarver/osf.io,wearpants/osf.io,rdhyee/osf.io,KAsante95/osf.io,acshi/osf.io,crcresearch/osf.io,kwierman/osf.io,brianjgeiger/osf.io,acshi/osf.io,amyshi188/osf.io,adlius/osf.io,aaxelb/osf.io,amyshi188/osf.io,zachjanicki/osf.io,mluo613/osf.io,amyshi188/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,asanfilippo7/osf.io,felliott/osf.io,chrisseto/osf.io,zachjanicki/osf.io,amyshi188/osf.io,cwisecarver/osf.io,kch8qx/osf.io,doublebits/osf.io,KAsante95/osf.io,kch8qx/osf.io,caseyrollins/osf.io,asanfilippo7/osf.io,CenterForOpenScience/osf.io,RomanZWang/osf.io,laurenrevere/osf.io,doublebits/osf.io,mfraezz/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,DanielSBrown/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,kch8qx/osf.io,Ghalko/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,felliott/osf.io,mluo613/osf.io,zamattiac/osf.io,monikagrabowska/osf.io,sloria/osf.io,felliott/osf.io,pattisdr/osf.io,billyhunt/osf.io,icereval/osf.io,acshi/osf.io,TomBaxter/osf.io,SSJohns/osf.io,Nesiehr/osf.io,RomanZWang/osf.io,chrisseto/osf.io,leb2dg/osf.io,rdhyee/osf.io,aaxelb/osf.io,adlius/osf.io,caneruguz/osf.io,KAsante95/osf.io,mluke93/osf.io,zamattiac/osf.io,Nesiehr/osf.io,pattisdr/osf.io,adlius/osf.io,acshi/osf.io,erinspace/osf.io,mluo613/osf.io,pattisdr/osf.io,hmoco/osf.io,SSJohns/osf.io,Johnetordoff/osf.io,brandonPurvis/osf.io,TomBaxter/osf.io,doublebits/osf.io,sloria/osf.io,leb2dg/osf.io,zachjanicki/osf.io,doublebits/osf.io,monikagrabowska/osf.io,mfraezz/osf.io,Nesiehr/osf.io,brandonPurvis/osf.io,crcresearch/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,billyhunt/osf.io,mluke93/osf.io,alexschiller/osf.io,SSJohns/osf.io,samchrisinger/osf.io,hmoco/osf.io,alexschiller/osf.io,caseyrollins/osf.io,crcresearch/osf.io,kwierman/osf.io,jnayak1/osf.io,KAsante95/osf.io,caneruguz/osf.io,Johnetordoff/osf.io,billyhunt/osf.io,saradbowman/osf.io,cslzchen/osf.io,Nesiehr/osf.io,erinspace/osf.io,baylee-d/osf.io,zachjanicki/osf.io,kch8qx/osf.io,wearpants/osf.io,aaxelb/osf.io,hmoco/osf.io,abought/osf.io,alexschiller/osf.io,TomHeatwole/osf.io,DanielSBrown/osf.io,acshi/osf.io,caseyrollins/osf.io,brandonPurvis/osf.io,TomHeatwole/osf.io,leb2dg/osf.io,jnayak1/osf.io,GageGaskins/osf.io,jnayak1/osf.io,cwisecarver/osf.io,laurenrevere/osf.io,GageGaskins/osf.io,cslzchen/osf.io,cwisecarver/osf.io,emetsger/osf.io,rdhyee/osf.io,erinspace/osf.io,mattclark/osf.io,Ghalko/osf.io,mluo613/osf.io,sloria/osf.io,alexschiller/osf.io,chennan47/osf.io,chrisseto/osf.io,billyhunt/osf.io,GageGaskins/osf.io,doublebits/osf.io,brandonPurvis/osf.io,billyhunt/osf.io,Johnetordoff/osf.io,mluke93/osf.io,cslzchen/osf.io,mfraezz/osf.io,monikagrabowska/osf.io,chrisseto/osf.io,jnayak1/osf.io,brandonPurvis/osf.io,chennan47/osf.io,kch8qx/osf.io,SSJohns/osf.io,leb2dg/osf.io,mattclark/osf.io,GageGaskins/osf.io,TomHeatwole/osf.io,saradbowman/osf.io,samchrisinger/osf.io,RomanZWang/osf.io,alexschiller/osf.io,binoculars/osf.io,Ghalko/osf.io,wearpants/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,zamattiac/osf.io,samchrisinger/osf.io,emetsger/osf.io,asanfilippo7/osf.io,hmoco/osf.io,kwierman/osf.io,mattclark/osf.io,baylee-d/osf.io,wearpants/osf.io,DanielSBrown/osf.io,abought/osf.io | admin/tasks.py | admin/tasks.py | import os
from invoke import task, run
from website import settings
HERE = os.path.dirname(os.path.abspath(__file__))
@task()
def manage(*args):
"""Take arguments and run python manage
:param args: ex. runserver, migrate
"""
if os.getcwd() != HERE:
os.chdir(HERE)
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"'
arg_str = ' '.join(args)
cmd = '{} python ../manage.py {}'.format(env, arg_str)
run(cmd, echo=True, pty=True)
@task()
def assets(dev=False, watch=False):
"""Install and build static assets for admin."""
if os.getcwd() != HERE:
os.chdir(HERE)
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
if os.getcwd() != HERE:
os.chdir(HERE)
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin',
'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.admin.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['bower'])
def bower_install():
if os.getcwd() != HERE:
os.chdir(HERE)
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
| import os
from invoke import task, run
from website import settings
HERE = os.path.dirname(os.path.abspath(__file__))
@task()
def assets(dev=False, watch=False):
"""Install and build static assets for admin."""
if os.getcwd() != HERE:
os.chdir(HERE)
npm = 'npm install'
if not dev:
npm += ' --production'
run(npm, echo=True)
bower_install()
# Always set clean=False to prevent possible mistakes
# on prod
webpack(clean=False, watch=watch, dev=dev)
@task(aliases=['pack'])
def webpack(clean=False, watch=False, dev=False):
"""Build static assets with webpack."""
if clean:
clean_assets()
if os.getcwd() != HERE:
os.chdir(HERE)
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin',
'webpack.js')
args = [webpack_bin]
if settings.DEBUG_MODE and dev:
args += ['--colors']
else:
args += ['--progress']
if watch:
args += ['--watch']
config_file = 'webpack.admin.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
run(command, echo=True)
@task
def clean_assets():
"""Remove built JS files."""
public_path = os.path.join(HERE, 'static', 'public')
js_path = os.path.join(public_path, 'js')
run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['bower'])
def bower_install():
if os.getcwd() != HERE:
os.chdir(HERE)
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
run('{} prune'.format(bower_bin), echo=True)
run('{} install'.format(bower_bin), echo=True)
| apache-2.0 | Python |
a80fe93110b5d0accbfa77d7dac7afe2df5a4688 | update module to v.3 l10n_it | dhp-denero/LibrERP,dhp-denero/LibrERP,odoousers2014/LibrERP,iw3hxn/LibrERP,dhp-denero/LibrERP,iw3hxn/LibrERP,dhp-denero/LibrERP,odoousers2014/LibrERP,iw3hxn/LibrERP,dhp-denero/LibrERP,iw3hxn/LibrERP,odoousers2014/LibrERP,odoousers2014/LibrERP,odoousers2014/LibrERP,iw3hxn/LibrERP | l10n_it/__openerp__.py | l10n_it/__openerp__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010
# OpenERP Italian Community (<http://www.openerp-italia.org>)
# Servabit srl
# Agile Business Group sagl
# Domsense srl
# Albatos srl
#
# Copyright (C) 2011-2012
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# Copyright (C) 2013
# Didotech srl
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Italy - Accounting",
"version": "1.2.3",
"depends": [
'base_vat',
'account',
'account_chart',
'base_iban',
'partner_subaccount',
'l10n_it_base',
'l10n_it_account',
'base_ordered',
'base_vat_unique'
],
"author": "OpenERP Italian Community & Didotech SRL",
"description": """
Piano dei conti italiano di un'impresa generica.
Versione personalizzata per utilizzo con Zucchetti G1.
================================================
Italian accounting chart and localization. For Zucchetti G1
Personalized version
""",
"license": "AGPL-3",
"category": "Localization/Account Charts",
'website': 'http://www.openerp-italia.org/',
'data': [
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'account_chart.xml',
'data/account.tax.template.csv',
'data/account.fiscal.position.template.csv',
'data/account.fiscal.position.tax.template.csv',
'l10n_chart_it_generic.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
# 'certificate': '00926677190009155165',
'images': [
'images/config_chart_l10n_it.jpeg',
'images/l10n_it_chart.jpeg'
],
}
| # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010
# OpenERP Italian Community (<http://www.openerp-italia.org>)
# Servabit srl
# Agile Business Group sagl
# Domsense srl
# Albatos srl
#
# Copyright (C) 2011-2012
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# Copyright (C) 2013
# Didotech srl
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Italy - Accounting",
"version": "1.2.3",
"depends": [
'base_vat',
'account',
'account_chart',
'base_iban',
'partner_subaccount',
'l10n_it_base',
'l10n_it_account',
'base_ordered',
'base_vat_unique'
],
"author": "OpenERP Italian Community & Didotech SRL",
"description": """
Piano dei conti italiano di un'impresa generica.
Versione personalizzata per utilizzo con Zucchetti G1.
================================================
Italian accounting chart and localization. For Zucchetti G1
Personalized version
""",
"license": "AGPL-3",
"category": "Localization/Account Charts",
'website': 'http://www.openerp-italia.org/',
'init_xml': [],
'update_xml': [
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'account_chart.xml',
'data/account.tax.template.csv',
'data/account.fiscal.position.template.csv',
'data/account.fiscal.position.tax.template.csv',
'l10n_chart_it_generic.xml',
],
'demo_xml': [],
'installable': True,
'auto_install': False,
# 'certificate': '00926677190009155165',
'images': [
'images/config_chart_l10n_it.jpeg',
'images/l10n_it_chart.jpeg'
],
}
| agpl-3.0 | Python |
5d948ab54bee6ce858b92190f885cbfe1c2203d6 | print tags in the right order | nearai/program_synthesis,nearai/program_synthesis | program_synthesis/algolisp/tools/timer.py | program_synthesis/algolisp/tools/timer.py | import collections
import time
def _print_tags(order, tags, counts={}):
result = ', '.join([
"%s=%.5f (%d)" % (tag, value, counts.get(tag, 0))
for tag, value in tags.items()
])
print(result)
class Timer(object):
def __init__(self):
self.tag_order = []
self.tags = collections.defaultdict(float)
self.counts = collections.defaultdict(int)
self.last_time = time.time()
def reset(self):
self.last_time = time.time()
def display(self):
_print_tags(self.tag_order, self.tags, self.counts)
def tag(self, tag):
if tag not in self.tags:
self.tag_order.append(tag)
self.tags[tag] = time.time() - self.last_time
self.last_time = time.time()
def acc(self, tag):
if tag not in self.tag_order:
self.tag_order.append(tag)
self.tags[tag] += time.time() - self.last_time
self.counts[tag] += 1
self.last_time = time.time()
_TIMERS = {}
def timer(name):
global _TIMERS
if name not in _TIMERS:
_TIMERS[name] = Timer()
return _TIMERS[name]
| import collections
import time
def _print_tags(tags, counts={}):
result = ', '.join([
"%s=%.5f (%d)" % (tag, value, counts.get(tag, 0))
for tag, value in tags.items()
])
print(result)
class Timer(object):
def __init__(self):
self.tags = collections.defaultdict(float)
self.counts = collections.defaultdict(int)
self.last_time = time.time()
def reset(self):
self.last_time = time.time()
def display(self):
_print_tags(self.tags, self.counts)
def tag(self, tag):
self.tags[tag] = time.time() - self.last_time
self.last_time = time.time()
def acc(self, tag):
self.tags[tag] += time.time() - self.last_time
self.counts[tag] += 1
self.last_time = time.time()
_TIMERS = {}
def timer(name):
global _TIMERS
if name not in _TIMERS:
_TIMERS[name] = Timer()
return _TIMERS[name]
| apache-2.0 | Python |
478fa936cd1451405c11fac4ae8f960a4491b5e6 | handle all errors, format exceptions that were not caught | ceph/ceph-installer,ceph/mariner-installer,ceph/ceph-installer,ceph/ceph-installer | ceph_installer/cli/main.py | ceph_installer/cli/main.py | import sys
from tambo import Transport
import ceph_installer
from ceph_installer.cli import log
from ceph_installer.cli import dev, task
from ceph_installer.cli.decorators import catches
class CephInstaller(object):
_help = """
A command line utility to install and configure Ceph using an HTTP API as a REST service
to call Ansible.
Version: %s
Global Options:
-h, --help, help Show this program's help menu
--log, --logging Set the level of logging. Acceptable values:
debug, warning, error, critical
%s
"""
mapper = {'dev': dev.Dev, 'task': task.Task}
def __init__(self, argv=None, parse=True):
self.plugin_help = "No plugins found/loaded"
if argv is None:
argv = sys.argv
if parse:
self.main(argv)
def help(self, subhelp):
version = ceph_installer.__version__
return self._help % (version, subhelp)
@catches(KeyboardInterrupt, handle_all=True, logger=log)
def main(self, argv):
parser = Transport(argv, mapper=self.mapper,
options=[], check_help=False,
check_version=False)
parser.parse_args()
parser.catch_help = self.help(parser.subhelp())
parser.catch_version = ceph_installer.__version__
parser.mapper = self.mapper
if len(argv) <= 1:
return parser.print_help()
parser.dispatch()
parser.catches_help()
parser.catches_version()
| import sys
from tambo import Transport
import ceph_installer
from ceph_installer.cli import log
from ceph_installer.cli import dev, task
from ceph_installer.cli.decorators import catches
class CephInstaller(object):
_help = """
A command line utility to install and configure Ceph using an HTTP API as a REST service
to call Ansible.
Version: %s
Global Options:
-h, --help, help Show this program's help menu
--log, --logging Set the level of logging. Acceptable values:
debug, warning, error, critical
%s
"""
mapper = {'dev': dev.Dev, 'task': task.Task}
def __init__(self, argv=None, parse=True):
self.plugin_help = "No plugins found/loaded"
if argv is None:
argv = sys.argv
if parse:
self.main(argv)
def help(self, subhelp):
version = ceph_installer.__version__
return self._help % (version, subhelp)
@catches(KeyboardInterrupt, logger=log)
def main(self, argv):
parser = Transport(argv, mapper=self.mapper,
options=[], check_help=False,
check_version=False)
parser.parse_args()
parser.catch_help = self.help(parser.subhelp())
parser.catch_version = ceph_installer.__version__
parser.mapper = self.mapper
if len(argv) <= 1:
return parser.print_help()
parser.dispatch()
parser.catches_help()
parser.catches_version()
| mit | Python |
381f8f11fb942ad672844c1828a624bd2638d8ae | Add config getter | viniciuschiele/central | configd/interpolation.py | configd/interpolation.py | """
Interpolator implementations.
"""
import re
from . import abc
from .exceptions import InterpolatorError
from .utils.compat import string_types, text_type
__all__ = [
'StrInterpolator',
]
class StrInterpolator(abc.StrInterpolator):
"""
A `abc.StrInterpolator` implementation that resolves a string
with replaceable variables in such format {variable}
using the provided lookup object to lookup replacement values.
Example usage:
.. code-block:: python
from configd.config import MemoryConfig
from configd.interpolation import StrInterpolator, ConfigStrLookup
config = MemoryConfig(data={'property1': 1})
interpolator = StrInterpolator()
lookup = ConfigStrLookup(config)
value = interpolator.resolve('{property1}', lookup)
print(value)
"""
def __init__(self):
self._pattern = re.compile('\{(.*?)\}')
def resolve(self, value, lookup):
"""
Resolve a string with replaceable variables using the provided
lookup object to lookup replacement values.
An exception is thrown for variables without a replacement value.
:param str value: The value that contains variables to be resolved.
:param abc.StrLookup lookup: The lookup object to lookup replacement values.
:return str: The interpolated string.
"""
if value is None or not isinstance(value, string_types):
raise TypeError('value must be a str')
if lookup is None or not isinstance(lookup, abc.StrLookup):
raise TypeError('lookup must be an abc.StrLookup')
variables = self._pattern.findall(value)
for variable in variables:
replace_value = lookup.lookup(variable)
if replace_value is None:
raise InterpolatorError('Interpolation variable %s not found' % text_type(variable))
value = value.replace('{' + variable + '}', replace_value)
return value
class ConfigStrLookup(abc.StrLookup):
"""
A `StrLookup` implementation that lookup keys in a `abc.Config` object.
:param abc.Config config: The config object to lookup keys.
"""
def __init__(self, config):
self._config = config
@property
def config(self):
"""
Get the config object.
:return abc.Config: The config object.
"""
return self._config
def lookup(self, key):
"""
Lookup the given key in a config object.
:param str key: The key to lookup.
:return str: The value if found, otherwise None.
"""
return self._config.get(key, cast=text_type)
| """
Interpolator implementations.
"""
import re
from . import abc
from .exceptions import InterpolatorError
from .utils.compat import string_types, text_type
__all__ = [
'StrInterpolator',
]
class StrInterpolator(abc.StrInterpolator):
"""
A `abc.StrInterpolator` implementation that resolves a string
with replaceable variables in such format {variable}
using the provided lookup object to lookup replacement values.
Example usage:
.. code-block:: python
from configd.config import MemoryConfig
from configd.interpolation import StrInterpolator, ConfigStrLookup
config = MemoryConfig(data={'property1': 1})
interpolator = StrInterpolator()
lookup = ConfigStrLookup(config)
value = interpolator.resolve('{property1}', lookup)
print(value)
"""
def __init__(self):
self._pattern = re.compile('\{(.*?)\}')
def resolve(self, value, lookup):
"""
Resolve a string with replaceable variables using the provided
lookup object to lookup replacement values.
An exception is thrown for variables without a replacement value.
:param str value: The value that contains variables to be resolved.
:param abc.StrLookup lookup: The lookup object to lookup replacement values.
:return str: The interpolated string.
"""
if value is None or not isinstance(value, string_types):
raise TypeError('value must be a str')
if lookup is None or not isinstance(lookup, abc.StrLookup):
raise TypeError('lookup must be an abc.StrLookup')
variables = self._pattern.findall(value)
for variable in variables:
replace_value = lookup.lookup(variable)
if replace_value is None:
raise InterpolatorError('Interpolation variable %s not found' % text_type(variable))
value = value.replace('{' + variable + '}', replace_value)
return value
class ConfigStrLookup(abc.StrLookup):
"""
A `StrLookup` implementation that lookup keys in a `abc.Config` object.
:param abc.Config config: The config object to lookup keys.
"""
def __init__(self, config):
self._config = config
def lookup(self, key):
"""
Lookup the given key in a config object.
:param str key: The key to lookup.
:return str: The value if found, otherwise None.
"""
return self._config.get(key, cast=text_type)
| mit | Python |
d46a5c47c667a3ba17e943687c40678cb639c45b | update GO obo-xml file date | OpenBEL/resource-generator | changelog_config.py | changelog_config.py | # coding: utf-8
'''
changelog_config.py
Configuration for the change-log script. Provides a
mapping for each dataset to its proper parser. Each
dataset is independant, and can be commented/uncommented
as desired by the user.
'''
from collections import OrderedDict
import parsers
changelog_data = OrderedDict()
changelog_data['entrez_info.gz'] = \
('ftp://ftp.ncbi.nih.gov/gene/DATA/gene_history.gz', parsers.EntrezGeneHistoryParser)
changelog_data['hgnc.tsv'] = \
('http://www.genenames.org/cgi-bin/hgnc_downloads?title=HGNC+output+data&hgnc_dbtag=on&preset=all&status=Approved&status=Entry+Withdrawn&status_opt=2&level=pri&=on&where=&order_by=gd_app_sym_sort&limit=&format=text&submit=submit&.cgifields=&.cgifields=level&.cgifields=chr&.cgifields=status&.cgifields=hgnc_dbtag', parsers.HGNCParser)
changelog_data['mgi.rpt'] = \
('ftp://ftp.informatics.jax.org/pub/reports/MRK_List1.rpt', parsers.MGIParser)
changelog_data['rgd.txt'] = \
('ftp://rgd.mcw.edu/pub/data_release/GENES_RAT.txt', parsers.RGDParser)
changelog_data['delac_sp.txt'] = \
('ftp://ftp.uniprot.org/pub/databases/uniprot/knowledgebase/docs/delac_sp.txt', parsers.SwissWithdrawnParser)
changelog_data['gobp.xml.gz'] = \
('http://archive.geneontology.org/latest-full/go_201309-termdb.obo-xml.gz', parsers.GOBPParser)
changelog_data['gocc.xml.gz'] = \
('http://archive.geneontology.org/latest-full/go_201309-termdb.obo-xml.gz', parsers.GOCCParser)
changelog_data['chebi.owl'] = \
('ftp://ftp.ebi.ac.uk/pub/databases/chebi/ontology/chebi.owl', parsers.CHEBIParser)
changelog_data['replace2013.txt'] = \
('ftp://nlmpubs.nlm.nih.gov/online/mesh/.newterms/replace2013.txt', parsers.MESHChangesParser)
changelog_data['SCHEM_to_CHEBIID.txt'] = \
('SCHEM_to_CHEBIID.txt', parsers.SCHEMtoCHEBIParser)
changelog_data['SDIS_to_DO.txt'] = \
('SDIS_to_DO.txt', parsers.SDIStoDOParser)
changelog_data['doid.owl'] = \
('http://purl.obolibrary.org/obo/doid.owl', parsers.DODeprecatedParser)
| # coding: utf-8
'''
changelog_config.py
Configuration for the change-log script. Provides a
mapping for each dataset to its proper parser. Each
dataset is independant, and can be commented/uncommented
as desired by the user.
'''
from collections import OrderedDict
import parsers
changelog_data = OrderedDict()
changelog_data['entrez_info.gz'] = \
('ftp://ftp.ncbi.nih.gov/gene/DATA/gene_history.gz', parsers.EntrezGeneHistoryParser)
changelog_data['hgnc.tsv'] = \
('http://www.genenames.org/cgi-bin/hgnc_downloads?title=HGNC+output+data&hgnc_dbtag=on&preset=all&status=Approved&status=Entry+Withdrawn&status_opt=2&level=pri&=on&where=&order_by=gd_app_sym_sort&limit=&format=text&submit=submit&.cgifields=&.cgifields=level&.cgifields=chr&.cgifields=status&.cgifields=hgnc_dbtag', parsers.HGNCParser)
changelog_data['mgi.rpt'] = \
('ftp://ftp.informatics.jax.org/pub/reports/MRK_List1.rpt', parsers.MGIParser)
changelog_data['rgd.txt'] = \
('ftp://rgd.mcw.edu/pub/data_release/GENES_RAT.txt', parsers.RGDParser)
changelog_data['delac_sp.txt'] = \
('ftp://ftp.uniprot.org/pub/databases/uniprot/knowledgebase/docs/delac_sp.txt', parsers.SwissWithdrawnParser)
changelog_data['gobp.xml.gz'] = \
('http://archive.geneontology.org/latest-full/go_201307-termdb.obo-xml.gz', parsers.GOBPParser)
changelog_data['gocc.xml.gz'] = \
('http://archive.geneontology.org/latest-full/go_201307-termdb.obo-xml.gz', parsers.GOCCParser)
changelog_data['chebi.owl'] = \
('ftp://ftp.ebi.ac.uk/pub/databases/chebi/ontology/chebi.owl', parsers.CHEBIParser)
changelog_data['replace2013.txt'] = \
('ftp://nlmpubs.nlm.nih.gov/online/mesh/.newterms/replace2013.txt', parsers.MESHChangesParser)
changelog_data['SCHEM_to_CHEBIID.txt'] = \
('SCHEM_to_CHEBIID.txt', parsers.SCHEMtoCHEBIParser)
changelog_data['SDIS_to_DO.txt'] = \
('SDIS_to_DO.txt', parsers.SDIStoDOParser)
changelog_data['doid.owl'] = \
('http://purl.obolibrary.org/obo/doid.owl', parsers.DODeprecatedParser)
| apache-2.0 | Python |
77302f755c6c9065d79048c2ff8e3a06cdb35f42 | resolve issue in "link", fix #174 | robinandeer/chanjo | chanjo/load/link.py | chanjo/load/link.py | # -*- coding: utf-8 -*-
from chanjo.store import Gene, Transcript
from .utils import get_or_build_exon, _exon_kwargs
def rows(session, row_data):
"""Handle rows of sambamba output."""
exons = (row(session, data) for data in row_data)
return exons
def row(session, data):
"""Link transcripts and genes."""
# start with exons object
exon_filters = _exon_kwargs(data)
exon_obj = get_or_build_exon(session, exon_filters)
# store created gene objects
genes = {}
for tx_id, gene_id in data['elements']:
gene_obj = session.query(Gene).filter_by(gene_id=gene_id).first()
if gene_obj is None:
# create new gene and store to avoid conflicts
genes[gene_id] = gene_obj = (genes.get(gene_id) or
Gene(gene_id=gene_id))
tx_filters = {'transcript_id': tx_id}
tx_obj = session.query(Transcript).filter_by(**tx_filters).first()
if tx_obj is None:
# create new transcript and link with gene
tx_obj = Transcript(**tx_filters)
tx_obj.gene = gene_obj
if tx_obj not in exon_obj.transcripts:
# link exon to the transcript
exon_obj.transcripts.append(tx_obj)
return exon_obj
| # -*- coding: utf-8 -*-
from chanjo.store import Gene, Transcript
from .utils import get_or_build_exon
def rows(session, row_data):
"""Handle rows of sambamba output."""
exons = (row(session, data) for data in row_data)
return exons
def row(session, data):
"""Link transcripts and genes."""
# start with exons object
exon_obj = get_or_build_exon(session, data)
# store created gene objects
genes = {}
for tx_id, gene_id in data['elements']:
gene_obj = session.query(Gene).filter_by(gene_id=gene_id).first()
if gene_obj is None:
# create new gene and store to avoid conflicts
genes[gene_id] = gene_obj = (genes.get(gene_id) or
Gene(gene_id=gene_id))
tx_filters = {'transcript_id': tx_id}
tx_obj = session.query(Transcript).filter_by(**tx_filters).first()
if tx_obj is None:
# create new transcript and link with gene
tx_obj = Transcript(**tx_filters)
tx_obj.gene = gene_obj
if tx_obj not in exon_obj.transcripts:
# link exon to the transcript
exon_obj.transcripts.append(tx_obj)
return exon_obj
| mit | Python |
cb8a840e65beea7cfe55fdd98e5ce6881f9e11b3 | Update inline.py | francis-taylor/Timotty-Master | inline.py | inline.py | # -*- coding: utf-8 -*-
import json
def make_url(self):
keyboard = {}
add = []
for i in self:
add.append(i)
keyboard['inline_keyboard'] = add
return json.dumps(keyboard)
| # -*- coding: utf-8 -*-
import json
def make_url(self):
keyboard = {}
ia = []
for i in self:
ia.append(i)
keyboard['inline_keyboard'] = ia
return json.dumps(keyboard) | mit | Python |
ea99bdc94af28f9f8261c819eb131b465cda686f | add disambiguated data to invpat file | nikken1/patentprocessor,nikken1/patentprocessor,yngcan/patentprocessor,yngcan/patentprocessor,nikken1/patentprocessor,yngcan/patentprocessor,funginstitute/patentprocessor,funginstitute/patentprocessor,funginstitute/patentprocessor | get_invpat.py | get_invpat.py | from lib import alchemy
import pandas as pd
session_generator = alchemy.session_generator
session = session_generator()
#res = session.execute('select rawinventor.name_first, rawinventor.name_last, rawlocation.city, rawlocation.state, \
# rawlocation.country, rawinventor.sequence, patent.id, \
# year(application.date), year(patent.date), rawassignee.organization, uspc.mainclass_id, inventor.id \
# from rawinventor left join patent on patent.id = rawinventor.patent_id \
# left join application on application.patent_id = patent.id \
# left join rawlocation on rawlocation.id = rawinventor.rawlocation_id \
# left join rawassignee on rawassignee.patent_id = patent.id \
# left join uspc on uspc.patent_id = patent.id \
# left join inventor on inventor.id = rawinventor.inventor_id \
# where uspc.sequence = 0;')
res = session.execute('select rawinventor.name_first, rawinventor.name_last, location.city, location.state, \
location.country, rawinventor.sequence, patent.id, year(application.date), \
year(patent.date), rawassignee.organization, uspc.mainclass_id, inventor.id \
from rawinventor, rawlocation, patent, application, rawassignee, uspc, inventor,location \
where rawinventor.patent_id = patent.id and \
application.patent_id = patent.id and \
rawlocation.id = rawinventor.rawlocation_id and \
location.id = rawlocation.location_id and \
rawassignee.patent_id = patent.id and \
uspc.patent_id = patent.id and \
inventor.id = rawinventor.inventor_id;')
data = pd.DataFrame.from_records(res.fetchall())
data = data.drop_duplicates((6,11))
data.columns = ['first_name', 'last_name', 'city', 'state', 'country', 'sequence', 'patent', 'app_year', 'grant_year', 'assignee', 'mainclass', 'inventorid']
data.to_csv('invpat.csv',index=False,encoding='utf8')
| from lib import alchemy
import pandas as pd
session_generator = alchemy.session_generator
session = session_generator()
#res = session.execute('select rawinventor.name_first, rawinventor.name_last, rawlocation.city, rawlocation.state, \
# rawlocation.country, rawinventor.sequence, patent.id, \
# year(application.date), year(patent.date), rawassignee.organization, uspc.mainclass_id, inventor.id \
# from rawinventor left join patent on patent.id = rawinventor.patent_id \
# left join application on application.patent_id = patent.id \
# left join rawlocation on rawlocation.id = rawinventor.rawlocation_id \
# left join rawassignee on rawassignee.patent_id = patent.id \
# left join uspc on uspc.patent_id = patent.id \
# left join inventor on inventor.id = rawinventor.inventor_id \
# where uspc.sequence = 0;')
res = session.execute('select rawinventor.name_first, rawinventor.name_last, rawlocation.city, rawlocation.state, \
rawlocation.country, rawinventor.sequence, patent.id, year(application.date), \
year(patent.date), rawassignee.organization, uspc.mainclass_id, inventor.id \
from rawinventor, rawlocation, patent, application, rawassignee, uspc, inventor \
where rawinventor.patent_id = patent.id and \
application.patent_id = patent.id and \
rawlocation.id = rawinventor.rawlocation_id and \
rawassignee.patent_id = patent.id and \
uspc.patent_id = patent.id and \
inventor.id = rawinventor.inventor_id;')
data = pd.DataFrame.from_records(res.fetchall())
data = data.drop_duplicates((6,11))
data.columns = ['first_name', 'last_name', 'city', 'state', 'country', 'sequence', 'patent', 'app_year', 'grant_year', 'assignee', 'mainclass', 'inventorid']
data.to_csv('invpat.csv',index=False,encoding='utf8')
| bsd-2-clause | Python |
cde272222ef3889d6f9e92f9c3be32de0b661dfd | Add DeprecationWarning for tours when fetched | gadventures/gapipy | gapipy/resources/tour/tour.py | gapipy/resources/tour/tour.py | # Python 2 and 3
from __future__ import unicode_literals
import warnings
from ..base import Resource
from .departure import Departure
from .tour_dossier import TourDossier
class Tour(Resource):
_resource_name = 'tours'
_is_parent_resource = True
_as_is_fields = ['id', 'href', 'product_line']
_date_fields = ['departures_start_date', 'departures_end_date']
_resource_fields = [('tour_dossier', TourDossier)]
_resource_collection_fields = [('departures', Departure)]
def __init__(self, *args, **kwargs):
warnings.warn("""
The `tours` resource will be deprecated in the near
future in favour of `tour_dossiers`. Please reference
`tour_dossiers` going forward
""", DeprecationWarning)
super(Tour, self).__init__(*args, **kwargs)
def get_brief_itinerary(self):
return self.tour_dossier.get_brief_itinerary()
def get_detailed_itinerary(self):
return self.tour_dossier.get_detailed_itinerary()
def get_map_url(self):
return self.tour_dossier.get_map_url()
def get_banner_url(self):
return self.tour_dossier.get_banner_url()
def get_visited_countries(self):
return self.tour_dossier.get_visited_countries()
def get_trip_detail(self, label):
return self.tour_dossier.get_trip_detail(label)
| # Python 2 and 3
from __future__ import unicode_literals
from ..base import Resource
from .departure import Departure
from .tour_dossier import TourDossier
class Tour(Resource):
_resource_name = 'tours'
_is_parent_resource = True
_as_is_fields = ['id', 'href', 'product_line']
_date_fields = ['departures_start_date', 'departures_end_date']
_resource_fields = [('tour_dossier', TourDossier)]
_resource_collection_fields = [('departures', Departure)]
def get_brief_itinerary(self):
return self.tour_dossier.get_brief_itinerary()
def get_detailed_itinerary(self):
return self.tour_dossier.get_detailed_itinerary()
def get_map_url(self):
return self.tour_dossier.get_map_url()
def get_banner_url(self):
return self.tour_dossier.get_banner_url()
def get_visited_countries(self):
return self.tour_dossier.get_visited_countries()
def get_trip_detail(self, label):
return self.tour_dossier.get_trip_detail(label)
| mit | Python |
420e5a12daf45841cce581e5359c5214e3e0895b | switch to generic GLSL attributes | laanwj/hw2view | visualize.py | visualize.py | from __future__ import division, print_function
from OpenGL.GL import *
from OpenGL.GL import shaders
from OpenGL.GLUT import *
from OpenGL.GLU import *
from parse_bg import parse_bg, PRIM_TRIANGLE_STRIP, PRIM_TRIANGLES
import time
window = 0
width, height = 500, 400
gl_types = {
PRIM_TRIANGLES: GL_TRIANGLES,
PRIM_TRIANGLE_STRIP: GL_TRIANGLE_STRIP
}
starttime = time.time()
def reshape(w, h):
global width, height
width = w
height = h
def draw():
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, width/height, 1.0, 100.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glRotate((time.time()-starttime)*30.0, 0.0, 1.0, 0.0)
shaders.glUseProgram(background_shader)
for numverts,vertsize,vertdata,facelists in bgdata:
glVertexAttribPointer(vertex_loc, 4, GL_FLOAT, False, vertsize, vertdata[0:])
glEnableVertexAttribArray(vertex_loc)
glVertexAttribPointer(color_loc, 4, GL_BYTE, True, vertsize, vertdata[16:])
glEnableVertexAttribArray(color_loc)
for typ, count, facedata in facelists:
glDrawElements(gl_types[typ], count, GL_UNSIGNED_SHORT, facedata)
glDisableVertexAttribArray(vertex_loc)
glDisableVertexAttribArray(color_loc)
shaders.glUseProgram(0)
glutSwapBuffers()
def idle():
glutPostRedisplay()
# fetch data
filename='background/m03/m03.hod'
bgdata = parse_bg(filename)
# initialization
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(width, height)
glutInitWindowPosition(0, 0)
window = glutCreateWindow("homeworld2 background")
glutDisplayFunc(draw)
glutReshapeFunc(reshape)
glutIdleFunc(idle)
VERTEX_SHADER = shaders.compileShader("""
#version 120
attribute vec4 inVertex;
attribute vec4 inColor;
void main()
{
gl_Position = gl_ModelViewProjectionMatrix * inVertex;
gl_FrontColor = inColor.abgr;
}
""", GL_VERTEX_SHADER)
FRAGMENT_SHADER = shaders.compileShader("""
#version 120
void main()
{
gl_FragColor = gl_Color;
}""", GL_FRAGMENT_SHADER)
background_shader = shaders.compileProgram(VERTEX_SHADER,FRAGMENT_SHADER)
vertex_loc = glGetAttribLocation(background_shader, "inVertex")
color_loc = glGetAttribLocation(background_shader, "inColor")
glutMainLoop()
| from __future__ import division, print_function
from OpenGL.GL import *
from OpenGL.GL import shaders
from OpenGL.GLUT import *
from OpenGL.GLU import *
from parse_bg import parse_bg, PRIM_TRIANGLE_STRIP, PRIM_TRIANGLES
import time
window = 0
width, height = 500, 400
gl_types = {
PRIM_TRIANGLES: GL_TRIANGLES,
PRIM_TRIANGLE_STRIP: GL_TRIANGLE_STRIP
}
starttime = time.time()
def reshape(w, h):
global width, height
width = w
height = h
def draw():
glViewport(0, 0, width, height)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, width/height, 1.0, 100.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glRotate((time.time()-starttime)*30.0, 0.0, 1.0, 0.0)
shaders.glUseProgram(background_shader)
for numverts,vertsize,vertdata,facelists in bgdata:
glVertexPointer(4, GL_FLOAT, vertsize, vertdata[0:])
glEnableClientState(GL_VERTEX_ARRAY)
glColorPointer(4, GL_BYTE, vertsize, vertdata[16:])
glEnableClientState(GL_COLOR_ARRAY)
for typ, count, facedata in facelists:
glDrawElements(gl_types[typ], count, GL_UNSIGNED_SHORT, facedata)
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
shaders.glUseProgram(0)
glutSwapBuffers()
def idle():
glutPostRedisplay()
# fetch data
filename='background/m03/m03.hod'
bgdata = parse_bg(filename)
# initialization
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(width, height)
glutInitWindowPosition(0, 0)
window = glutCreateWindow("homeworld2 background")
glutDisplayFunc(draw)
glutReshapeFunc(reshape)
glutIdleFunc(idle)
VERTEX_SHADER = shaders.compileShader("""
#version 120
void main()
{
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_FrontColor = gl_Color.abgr;
}
""", GL_VERTEX_SHADER)
FRAGMENT_SHADER = shaders.compileShader("""
#version 120
void main()
{
gl_FragColor = gl_Color;
}""", GL_FRAGMENT_SHADER)
background_shader = shaders.compileProgram(VERTEX_SHADER,FRAGMENT_SHADER)
glutMainLoop()
| mit | Python |
5366c1c61062ee5c83f82a1b4db36b9da56e15b4 | Fix to updateOrgs.py | yangle/HaliteIO,HaliteChallenge/Halite-II,lanyudhy/Halite-II,lanyudhy/Halite-II,HaliteChallenge/Halite,yangle/HaliteIO,yangle/HaliteIO,lanyudhy/Halite-II,yangle/HaliteIO,HaliteChallenge/Halite-II,HaliteChallenge/Halite,yangle/HaliteIO,lanyudhy/Halite-II,HaliteChallenge/Halite,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite,HaliteChallenge/Halite-II,yangle/HaliteIO,HaliteChallenge/Halite,lanyudhy/Halite-II,lanyudhy/Halite-II,yangle/HaliteIO,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II,HaliteChallenge/Halite,HaliteChallenge/Halite-II,lanyudhy/Halite-II,yangle/HaliteIO,lanyudhy/Halite-II,yangle/HaliteIO,HaliteChallenge/Halite-II,yangle/HaliteIO,HaliteChallenge/Halite-II,HaliteChallenge/Halite,yangle/HaliteIO,HaliteChallenge/Halite,HaliteChallenge/Halite,HaliteChallenge/Halite-II,lanyudhy/Halite-II,yangle/HaliteIO,HaliteChallenge/Halite,lanyudhy/Halite-II,lanyudhy/Halite-II,HaliteChallenge/Halite,HaliteChallenge/Halite,HaliteChallenge/Halite-II,HaliteChallenge/Halite-II | admin/updateOrgs.py | admin/updateOrgs.py | import configparser
import pymysql
import urllib.request
parser = configparser.ConfigParser()
parser.read("../halite.ini")
DB_CONFIG = parser["database"]
db = pymysql.connect(host=DB_CONFIG["hostname"], user=DB_CONFIG['username'], passwd=DB_CONFIG['password'], db=DB_CONFIG['name'], cursorclass=pymysql.cursors.DictCursor)
cursor = db.cursor()
cursor.execute("select email, userID, organization from User")
users = cursor.fetchall()
orgs = [line.strip().split(" - ") for line in open("../website/organizationWhitelist.txt").readlines()]
for user in users:
if user["email"] == None:
continue
realUserOrg = "Other"
try:
emailDomain = user["email"].split("@")[1]
except:
pass
for org in orgs:
if emailDomain == org[1]:
realUserOrg = org[0]
break
if (realUserOrg != "Other" or user["organization"] == "") and realUserOrg != user["organization"]:
print("%s, %s, %s" % (realUserOrg, user["organization"], user["email"]))
cursor.execute("update User set organization = '"+realUserOrg+"' where userID="+str(user["userID"]))
db.commit()
| import configparser
import pymysql
import urllib.request
parser = configparser.ConfigParser()
parser.read("../halite.ini")
DB_CONFIG = parser["database"]
db = pymysql.connect(host=DB_CONFIG["hostname"], user=DB_CONFIG['username'], passwd=DB_CONFIG['password'], db=DB_CONFIG['name'], cursorclass=pymysql.cursors.DictCursor)
cursor = db.cursor()
cursor.execute("select email, userID, organization from User")
users = cursor.fetchall()
orgs = [line.strip().split(" - ") for line in open("../website/organizationWhitelist.txt").readlines()]
for user in users:
if user["email"] == None:
continue
realUserOrg = "Other"
emailDomain = user["email"].split("@")[1]
for org in orgs:
if emailDomain == org[1]:
realUserOrg = org[0]
break
if (realUserOrg != "Other" or user["organization"] == "") and realUserOrg != user["organization"]:
print("%s, %s, %s" % (realUserOrg, user["organization"], user["email"]))
cursor.execute("update User set organization = '"+realUserOrg+"' where userID="+str(user["userID"]))
db.commit()
| mit | Python |
3c3e7a68b3fd2f8c600995c4fb65a3ebaf7ef2f5 | Allow auth_strategy to be toggled | kickstandproject/ripcord | ripcord/api/app.py | ripcord/api/app.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
# Copyright © 2012 New Dream Network, LLC (DreamHost)
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pecan
from oslo.config import cfg
from ripcord.api import acl
from ripcord.api import config
from ripcord.api import hooks
from ripcord.api import middleware
auth_opts = [
cfg.StrOpt(
'auth_strategy', default='keystone',
help='The strategy to use for auth: noauth or keystone.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
def get_pecan_config():
# Set up the pecan configuration
filename = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None):
app_hooks = [
hooks.DBHook(),
]
if not pecan_config:
pecan_config = get_pecan_config()
pecan.configuration.set_config(dict(pecan_config, overwrite=True))
static_root = None
if CONF.debug:
static_root = pecan_config.app.static_root,
app = pecan.make_app(
pecan_config.app.root,
static_root=static_root,
template_path=pecan_config.app.template_path,
debug=CONF.debug,
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
)
if pecan_config.app.enable_acl:
return acl.install(app, cfg.CONF)
return app
class VersionSelectorApplication(object):
def __init__(self):
pc = get_pecan_config()
pc.app.enable_acl = (CONF.auth_strategy == 'keystone')
self.v1 = setup_app(pecan_config=pc)
def __call__(self, environ, start_response):
return self.v1(environ, start_response)
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
# Copyright © 2012 New Dream Network, LLC (DreamHost)
# Copyright (C) 2013 PolyBeacon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pecan
from oslo.config import cfg
from ripcord.api import acl
from ripcord.api import config
from ripcord.api import hooks
from ripcord.api import middleware
auth_opts = [
cfg.StrOpt(
'auth_strategy', default='keystone',
help='The strategy to use for auth: noauth or keystone.'),
]
CONF = cfg.CONF
CONF.register_opts(auth_opts)
def get_pecan_config():
# Set up the pecan configuration
filename = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None):
app_hooks = [
hooks.DBHook(),
]
if not pecan_config:
pecan_config = get_pecan_config()
pecan.configuration.set_config(dict(pecan_config, overwrite=True))
static_root = None
if CONF.debug:
static_root = pecan_config.app.static_root,
app = pecan.make_app(
pecan_config.app.root,
static_root=static_root,
template_path=pecan_config.app.template_path,
debug=CONF.debug,
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
)
if pecan_config.app.enable_acl:
return acl.install(app, cfg.CONF)
return app
class VersionSelectorApplication(object):
def __init__(self):
self.v1 = setup_app()
def __call__(self, environ, start_response):
return self.v1(environ, start_response)
| apache-2.0 | Python |
acfffbacde7e09c5d0edf2aef7cdb72b9e39a500 | update init file to recognize myconfig | okkhoy/minecraft-rl | rlglue/__init__.py | rlglue/__init__.py | mit | Python | ||
5625fe9b09dad59b4500c7b3f455db8658d1f5a2 | Add 2016nano. | mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju | agent_paths.py | agent_paths.py | #!/usr/bin/env python3
from argparse import ArgumentParser
import json
import os.path
import re
import sys
from simplestreams.generate_simplestreams import json_dump
def main():
parser = ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
paths_hashes = {}
with open(args.input) as input_file:
stanzas = json.load(input_file)
hashes = {}
old_hash_urls = {}
for stanza in stanzas:
path_hash = stanza['sha256']
old_hash_urls[path_hash] = stanza['item_url']
agent_filename = stanza['path'].split('/')[-1]
path = 'agent/{}/{}'.format(stanza['version'], agent_filename)
path = re.sub('-win(2012(hv)?(r2)?|2016(nano)?|7|8|81|10)-',
'-windows-', path)
paths_hashes.setdefault(path, stanza['sha256'])
if paths_hashes[path] != path_hash:
raise ValueError('Conflicting hash')
stanza['path'] = path
hashes[path] = path_hash
ph_list = {}
for path, path_hash in hashes.items():
ph_list.setdefault(path_hash, set()).add(path)
for path_hash, paths in ph_list.items():
if len(paths) > 1:
print(paths)
json_dump(stanzas, args.output)
agent_downloads = []
for stanza in stanzas:
agent_downloads.append({
'path': stanza['path'],
'sha256': stanza['sha256'],
'url': old_hash_urls[stanza['sha256']],
})
json_dump(agent_downloads, 'downloads-' + args.output)
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python3
from argparse import ArgumentParser
import json
import os.path
import re
import sys
from simplestreams.generate_simplestreams import json_dump
def main():
parser = ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
paths_hashes = {}
with open(args.input) as input_file:
stanzas = json.load(input_file)
hashes = {}
old_hash_urls = {}
for stanza in stanzas:
path_hash = stanza['sha256']
old_hash_urls[path_hash] = stanza['item_url']
agent_filename = stanza['path'].split('/')[-1]
path = 'agent/{}/{}'.format(stanza['version'], agent_filename)
path = re.sub('-win(2012(hv)?(r2)?|2016|7|8|81|10)-', '-windows-',
path)
paths_hashes.setdefault(path, stanza['sha256'])
if paths_hashes[path] != path_hash:
raise ValueError('Conflicting hash')
stanza['path'] = path
hashes[path] = path_hash
ph_list = {}
for path, path_hash in hashes.items():
ph_list.setdefault(path_hash, set()).add(path)
for path_hash, paths in ph_list.items():
if len(paths) > 1:
print(paths)
json_dump(stanzas, args.output)
agent_downloads = []
for stanza in stanzas:
agent_downloads.append({
'path': stanza['path'],
'sha256': stanza['sha256'],
'url': old_hash_urls[stanza['sha256']],
})
json_dump(agent_downloads, 'downloads-' + args.output)
if __name__ == '__main__':
sys.exit(main())
| agpl-3.0 | Python |
f380b886b5461a21de73175eedd019c2237d457a | Fix payment config page | pferreir/indico,mvidalgarcia/indico,ThiefMaster/indico,mic4ael/indico,DirkHoffmann/indico,mvidalgarcia/indico,pferreir/indico,OmeGak/indico,mvidalgarcia/indico,indico/indico,indico/indico,ThiefMaster/indico,DirkHoffmann/indico,mic4ael/indico,OmeGak/indico,OmeGak/indico,DirkHoffmann/indico,ThiefMaster/indico,mvidalgarcia/indico,mic4ael/indico,OmeGak/indico,indico/indico,mic4ael/indico,pferreir/indico,ThiefMaster/indico,DirkHoffmann/indico,indico/indico,pferreir/indico | indico/modules/payment/views.py | indico/modules/payment/views.py | # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from MaKaC.webinterface.pages.admins import WPAdminsBase
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceDefaultDisplayBase, WPConferenceModifBase
class WPPaymentJinjaMixin(WPJinjaMixin):
template_prefix = 'payment/'
class WPPaymentAdmin(WPPaymentJinjaMixin, WPAdminsBase):
sidemenu_option = 'payment'
class WPPaymentEventManagement(WPConferenceModifBase, WPPaymentJinjaMixin):
template_prefix = 'payment/'
sidemenu_option = 'payment'
def _getPageContent(self, params):
return WPPaymentJinjaMixin._getPageContent(self, params)
class WPPaymentEvent(WPConferenceDefaultDisplayBase, WPPaymentJinjaMixin):
menu_entry_name = 'registration'
def _getBody(self, params):
return WPPaymentJinjaMixin._getPageContent(self, params)
| # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from MaKaC.webinterface.pages.admins import WPAdminsBase
from MaKaC.webinterface.pages.base import WPJinjaMixin
from MaKaC.webinterface.pages.conferences import WPConferenceDefaultDisplayBase, WPConferenceModifBase
class WPPaymentJinjaMixin(WPJinjaMixin):
template_prefix = 'payment/'
class WPPaymentAdmin(WPPaymentJinjaMixin, WPAdminsBase):
sidemenu_option = 'payment'
class WPPaymentEventManagement(WPConferenceModifBase, WPPaymentJinjaMixin):
template_prefix = 'payment/'
sidemenu_option = 'payment'
def _getTabContent(self, params):
return WPPaymentJinjaMixin._getPageContent(self, params)
class WPPaymentEvent(WPConferenceDefaultDisplayBase, WPPaymentJinjaMixin):
menu_entry_name = 'registration'
def _getBody(self, params):
return WPPaymentJinjaMixin._getPageContent(self, params)
| mit | Python |
db2ed7a6b2686290e237286b2eeb6ac759cfd204 | fix migration | joehand/DataNews,joehand/DataNews | alembic/env.py | alembic/env.py | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from data_news import app
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
config.set_main_option('sqlalchemy.url', app.config['SQLALCHEMY_DATABASE_URI'])
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from data_news.user import User, Role
from data_news.frontend import Item, Vote
from data_news.background import Twitter
from data_news import db
target_metadata = db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from data_news import app
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
config.set_main_option('sqlalchemy.url', app.config['SQLALCHEMY_DATABASE_URI'])
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from data_news.models import *
from data_news import db
target_metadata = db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| bsd-3-clause | Python |
993d6be40e11121609b7e9592956e1060a0cb6f7 | Fix "ImportError: No module named application" | fert89/prueba-3-heroku-flask,Agreste/MobUrbRoteiro,Agreste/MobUrbRoteiro,san-bil/astan,Agreste/MobUrbRoteiro,albertogg/flask-bootstrap-skel,akhilaryan/clickcounter,fert89/prueba-3-heroku-flask,san-bil/astan,san-bil/astan,san-bil/astan | alembic/env.py | alembic/env.py | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import sys
import os.path
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from application import app, db
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Overwrite the sqlalchemy.url in the alembic.ini file.
config.set_main_option('sqlalchemy.url', app.config['SQLALCHEMY_DATABASE_URI'])
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
from application import app, db
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Overwrite the sqlalchemy.url in the alembic.ini file.
config.set_main_option('sqlalchemy.url', app.config['SQLALCHEMY_DATABASE_URI'])
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| bsd-3-clause | Python |
4d0ef33c43b0ef3d23f0c7991a86c1c08d486b43 | remove unused import | crateio/crate.web,crateio/crate.web,dstufft/jutils | crate_project/apps/packages/templatetags/package_tags.py | crate_project/apps/packages/templatetags/package_tags.py | from django import template
from django.db.models import F, Sum
from packages.models import Package, Release, ReleaseFile
register = template.Library()
@register.assignment_tag
def package_download_count(package_name=None):
# @@@ Cache this to cut down on queries
count = 0
if package_name is None:
# Total Download Count
count = ReleaseFile.objects.all().aggregate(total_downloads=Sum("downloads")).get("total_downloads", 0)
else:
count = ReleaseFile.objects.filter(
release__package__name=package_name
).aggregate(total_downloads=Sum("downloads")).get("total_downloads", 0)
return count
@register.assignment_tag
def package_count():
# @@@ Cache this to cut down on queries
return Package.objects.all().count()
@register.assignment_tag
def get_oldest_package():
# @@@ Cache this to cut down on queries
pkgs = Package.objects.all().order_by("created")[:1]
if pkgs:
return pkgs[0]
else:
return None
@register.assignment_tag
def new_releases(num):
return Release.objects.all().order_by("-created").select_related("package")[:num]
@register.assignment_tag
def updated_releases(num):
return [x.release for x in ReleaseFile.objects.exclude(created=F("release__created")).order_by("-created")[:num]]
@register.assignment_tag
def package_versions(package_name, num=5):
return Release.objects.filter(package__name=package_name).order_by("-order")[:num]
@register.assignment_tag
def package_version_count(package_name):
return Release.objects.filter(package__name=package_name).count()
| import datetime
from django import template
from django.db.models import F, Sum
from packages.models import Package, Release, ReleaseFile
register = template.Library()
@register.assignment_tag
def package_download_count(package_name=None):
# @@@ Cache this to cut down on queries
count = 0
if package_name is None:
# Total Download Count
count = ReleaseFile.objects.all().aggregate(total_downloads=Sum("downloads")).get("total_downloads", 0)
else:
count = ReleaseFile.objects.filter(
release__package__name=package_name
).aggregate(total_downloads=Sum("downloads")).get("total_downloads", 0)
return count
@register.assignment_tag
def package_count():
# @@@ Cache this to cut down on queries
return Package.objects.all().count()
@register.assignment_tag
def get_oldest_package():
# @@@ Cache this to cut down on queries
pkgs = Package.objects.all().order_by("created")[:1]
if pkgs:
return pkgs[0]
else:
return None
@register.assignment_tag
def new_releases(num):
return Release.objects.all().order_by("-created").select_related("package")[:num]
@register.assignment_tag
def updated_releases(num):
return [x.release for x in ReleaseFile.objects.exclude(created=F("release__created")).order_by("-created")[:num]]
@register.assignment_tag
def package_versions(package_name, num=5):
return Release.objects.filter(package__name=package_name).order_by("-order")[:num]
@register.assignment_tag
def package_version_count(package_name):
return Release.objects.filter(package__name=package_name).count()
| bsd-2-clause | Python |
6bc3454bb647a4fc49f8be85adf64ec7eaac0a47 | Update acceptance.py | pmutale/www.mutale.nl,pmutale/www.mutale.nl,pmutale/www.mutale.nl | settings/labels/mutale/acceptance.py | settings/labels/mutale/acceptance.py | from settings.labels.mutale.base import *
from themes.secrets import read_mailpass
DEBUG = False
ALLOWED_HOSTS = ['mutale.herokuapp.com', '127.0.0.1', 'localhost', 'mutale-dev-a.herokuapp.com',
'mutale-prd.herokuapp.com', 'stick2uganda.mutale.nl']
DATABASES = {
'default':
read_pgpass('dc8pvi4eohh58t')
}
email_settings = read_mailpass('webmaster@mutale.nl')
DATABASES['default']['CONN_MAX_AGE'] = 500
EMAIL_HOST = email_settings['host']
EMAIL_PORT = email_settings['port']
EMAIL_HOST_USER = email_settings['user']
EMAIL_HOST_PASSWORD = email_settings['password']
EMAIL_USE_SSL = email_settings['ssl']
| from settings.labels.mutale.base import *
import dj_database_url
from themes import secrets
DEBUG = False
ALLOWED_HOSTS = ['mutale.herokuapp.com',
'127.0.0.1', 'localhost',
'mutale-acc.herokuapp.com',
'mutale-dev.herokuapp.com']
DATABASE_URL = 'postgres://oogcsuzgfwhqbc:0da4b0d51b2f508e4c00308e3c583c2dd9999b6b439a5501dcd643602b455167@ec2-54' \
'-247-92-185.eu-west-1.compute.amazonaws.com:5432/dmtkic08buj90'
DATABASES = {
'default':
dj_database_url.config(default=DATABASE_URL)
}
DATABASES['default']['CONN_MAX_AGE'] = 500
EMAIL_HOST = secrets.email_settings['host']
EMAIL_PORT = secrets.email_settings['port']
EMAIL_HOST_USER = secrets.email_settings['user']
EMAIL_HOST_PASSWORD = secrets.email_settings['password']
EMAIL_USE_SSL = secrets.email_settings['ssl']
| unlicense | Python |
b4cb77fb0f3215190779c390218ecbd03ae506e8 | Add docstrings | robbie-c/git-lang-guesser | git-lang-guesser/guess_lang.py | git-lang-guesser/guess_lang.py | from collections import Counter
LANGUAGE_KEY = "language"
def count_languages(repos, filter_none=True):
"""
Count the occurances of each language in a list of repositories
:param repos: A list of repositories, which should be dictionaries with a "language" key
:param filter_none: Whether to ignore repositories with no or None language
:return: A collections.Counter representing the number of occurances
"""
langs = (repo.get(LANGUAGE_KEY, None) for repo in repos)
# filter out None
if filter_none:
langs = filter(lambda x: x is not None, langs)
# a Counter does all the heavy lifting for us
return Counter(langs)
def guess_favourite(repos):
"""
Returns the most common language (except None) in the list of repos
:param repos: A list of repositories, which should be dictionaries with a "language" key
:return: The most common language. In the case of a tie, it is undefined which of the most common is chosen.
"""
counter = count_languages(repos)
if counter:
[(favourite, count)] = counter.most_common(1)
return favourite
else:
return None
if __name__ == "__main__":
import pprint
from . import git_requester
my_repos = git_requester.get_public_repos_for_user("robbie-c")
pprint.pprint(my_repos)
lang = guess_favourite(my_repos)
print(lang) | from collections import Counter
LANGUAGE_KEY = "language"
def count_languages(repos, filter_none=True):
langs = (repo.get(LANGUAGE_KEY, None) for repo in repos)
# filter out None
if filter_none:
langs = filter(lambda x: x is not None, langs)
# a Counter does all the heavy lifting for us
return Counter(langs)
def guess_favourite(repos):
counter = count_languages(repos)
if counter:
[(favourite, count)] = counter.most_common(1)
return favourite
else:
return None
if __name__ == "__main__":
import pprint
from . import git_requester
my_repos = git_requester.get_public_repos_for_user("robbie-c")
pprint.pprint(my_repos)
lang = guess_favourite(my_repos)
print(lang) | mit | Python |
1ff7d8ecc964a99278a5511f45a66b7d55654395 | Fix thanks to Joel for spotting it | bliksemlabs/rrrr | web-uwsgi.py | web-uwsgi.py | import uwsgi
import zmq
import struct
COMMON_HEADERS = [('Content-Type', 'application/json'), ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', 'Requested-With,Content-Type')]
context = zmq.Context()
def light(environ, start_response):
if environ['PATH_INFO'] in ['/favicon.ico']:
start_response('404 NOK', COMMON_HEADERS)
return ''
qstring = environ['QUERY_STRING']
if qstring == '':
start_response('406 NOK', COMMON_HEADERS)
return ''
request_bliksem = context.socket(zmq.REQ)
request_bliksem.connect("tcp://127.0.0.1:9292")
poller = zmq.Poller()
poller.register(request_bliksem, zmq.POLLIN)
request_bliksem.send(qstring)
socks = dict(poller.poll(1000))
if socks.get(request_bliksem) == zmq.POLLIN:
reply = request_bliksem.recv()
start_response('200 OK', COMMON_HEADERS + [('Content-length', str(len(reply)))])
return reply
else:
start_response('500 NOK', COMMON_HEADERS)
return ''
uwsgi.applications = {'': light}
| import uwsgi
import zmq
import struct
COMMON_HEADERS = [('Content-Type', 'application/json'), ('Access-Control-Allow-Origin', '*'), ('Access-Control-Allow-Headers', 'Requested-With,Content-Type')]
context = zmq.Context()
def light(environ, start_response):
if environ['PATH_INFO'] in ['/favicon.ico']:
start_response('404 NOK', COMMON_HEADERS)
return ''
qstring = environ['QUERY_STRING']
if qstring == '':
start_response('406 NOK', COMMON_HEADERS)
return ''
request_bliksem = context.socket(zmq.REQ)
request_bliksem.connect("tcp://127.0.0.1:9292")
poller = zmq.Poller()
poller.register(request_bliksem, zmq.POLLIN)
request_bliksem.send(qstring)
socks = dict(poller.poll(1000))
if socks.get(request_bliksem) == zmq.POLLIN:
reply = request_bliksem.recv()
start_response('500 NOK', COMMON_HEADERS + [('Content-length', str(len(reply)))])
return reply
else:
start_response('500 NOK', COMMON_HEADERS)
return ''
uwsgi.applications = {'': light}
| bsd-2-clause | Python |
bb434f82f0883f2a85992f6b41a042d08ddb6449 | Update __init__.py | anuragpapineni/Hearthbreaker-evolved-agent,anuragpapineni/Hearthbreaker-evolved-agent,jirenz/CS229_Project,slaymaker1907/hearthbreaker,pieiscool/edited-hearthbreaker,noa/hearthbreaker,danielyule/hearthbreaker,kingoflolz/hearthbreaker,anuragpapineni/Hearthbreaker-evolved-agent,pieiscool/edited-hearthbreaker,Ragowit/hearthbreaker,slaymaker1907/hearthbreaker | hsgame/cards/minions/__init__.py | hsgame/cards/minions/__init__.py | __author__ = 'Daniel'
from hsgame.cards.minions.neutral import (
BloodfenRaptor,
IronbeakOwl,
NoviceEngineer,
StonetuskBoar,
WarGolem,
MogushanWarden,
OasisSnapjaw,
FaerieDragon,
KoboldGeomancer,
ElvenArcher,
IronfurGrizzly,
ArgentSquire,
SilvermoonGuardian,
TwilightDrake,
MagmaRager,
DireWolfAlpha,
WorgenInfiltrator,
Archmage,
DalaranMage,
Malygos,
AzureDrake,
OgreMagi,
BloodmageThalnos,
LootHoarder,
MoltenGiant,
SeaGiant,
MountainGiant,
LeperGnome
)
from hsgame.cards.minions.druid import (
KeeperOfTheGrove,
DruidOfTheClaw,
AncientOfLore,
AncientOfWar,
IronbarkProtector,
Cenarius
)
from hsgame.cards.minions.hunter import (
TimberWolf
)
from hsgame.cards.minions.mage import (
ManaWyrm,
SorcerersApprentice,
KirinTorMage,
EtherealArcanist,
WaterElemental,
ArchmageAntonidas
)
from hsgame.cards.minions.paladin import (
AldorPeacekeeper,
ArgentProtector,
GuardianOfKings
)
from hsgame.cards.minions.priest import (
CabalShadowPriest,
Lightspawn,
Lightwell,
NorthshireCleric
)
| __author__ = 'Daniel'
from hsgame.cards.minions.neutral import (
BloodfenRaptor,
IronbeakOwl,
NoviceEngineer,
StonetuskBoar,
WarGolem,
MogushanWarden,
OasisSnapjaw,
FaerieDragon,
KoboldGeomancer,
ElvenArcher,
IronfurGrizzly,
ArgentSquire,
SilvermoonGuardian,
TwilightDrake,
MagmaRager,
DireWolfAlpha,
WorgenInfiltrator,
Archmage,
DalaranMage,
Malygos,
AzureDrake,
OgreMagi
)
from hsgame.cards.minions.druid import (
KeeperOfTheGrove,
DruidOfTheClaw,
AncientOfLore,
AncientOfWar,
IronbarkProtector,
Cenarius
)
from hsgame.cards.minions.hunter import (
TimberWolf
)
from hsgame.cards.minions.mage import (
ManaWyrm,
SorcerersApprentice,
KirinTorMage,
EtherealArcanist,
WaterElemental,
ArchmageAntonidas
)
from hsgame.cards.minions.paladin import (
AldorPeacekeeper,
ArgentProtector,
GuardianOfKings
)
from hsgame.cards.minions.priest import (
CabalShadowPriest,
Lightspawn,
Lightwell,
NorthshireCleric
)
| mit | Python |
03ea65721a2a0979c6b42dfcec8737f9f438d6f0 | Bump to version 0.11.0 (#212) | gaopeiliang/aiodocker,gaopeiliang/aiodocker,gaopeiliang/aiodocker,barrachri/aiodocker,barrachri/aiodocker,paultag/aiodocker,barrachri/aiodocker | aiodocker/__init__.py | aiodocker/__init__.py | from .docker import Docker
__version__ = '0.11.0'
__all__ = ("Docker", )
| from .docker import Docker
__version__ = '0.11.0a0'
__all__ = ("Docker", )
| mit | Python |
85269dda0369f1575ffaddee83cd63c5d468e3b2 | remove debug | sk2/autonetkit | autonetkit/plugins/naming.py | autonetkit/plugins/naming.py | def network_hostname(node):
print "%s_%s" % (node.Network, node.label)
return "%s_%s" % (node.Network, node.label)
| def network_hostname(node):
print node.dump()
print "%s_%s" % (node.Network, node.label)
return "%s_%s" % (node.Network, node.label)
| bsd-3-clause | Python |
620d2b04f6632779cb0015558bcab035a3aefecd | support config_local.py | hades/chukchi,hades/chukchi | chukchi/config/__init__.py | chukchi/config/__init__.py | # This file is part of Chukchi, the free web-based RSS aggregator
#
# Copyright (C) 2013 Edward Toroshchin <chukchi-project@hades.name>
#
# Chukchi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Chukchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# Please see the file COPYING in the root directory of this project.
# If you are unable to locate this file, see <http://www.gnu.org/licenses/>.
from . import defaults
config = defaults
try:
import config_local
for key in config_local.__dict__:
if key.upper() == key:
setattr(config, key, getattr(config_local, key))
except ImportError:
pass
# vi: sw=4:ts=4:et
| # This file is part of Chukchi, the free web-based RSS aggregator
#
# Copyright (C) 2013 Edward Toroshchin <chukchi-project@hades.name>
#
# Chukchi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Chukchi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# Please see the file COPYING in the root directory of this project.
# If you are unable to locate this file, see <http://www.gnu.org/licenses/>.
from . import defaults
config = defaults
# vi: sw=4:ts=4:et
| agpl-3.0 | Python |
0f3ce188fee12c07dbd56aa6a459c04b5ed07778 | update to execute headder | mfasq1Monash/FIT3140 | robotcontroller.py | robotcontroller.py | '''
Author: Michael Asquith, Aaron Gruneklee
Created: 2014.12.12
Last Modified: 2014.12.23
The brain of the robot. Has access to the robot's I/O, interpreter and
instructions
'''
from interpreter import Interpreter
from robotio import RobotIO
from codeblock import CodeBlock
from maze import Maze
class RobotController():
"""studentProgram is the robot's instructions"""
def __init__(self):
self.maze = Maze()
self.robotio = RobotIO(self.maze)
def executeProgram(self, programfile):
"""Executes the robot's program"""
code = open('user_file',r)
inter = Interpreter(self.robotio)
for line in self.program:
inter.interpret(code)
def getRobotLocationAndFacing(self):
return self.robotio.getLocationAndFacing()
def getMaze(self):
return self.maze.getMaze()
| '''
Author: Michael Asquith, Aaron Gruneklee
Created: 2014.12.12
Last Modified: 2014.12.23
The brain of the robot. Has access to the robot's I/O, interpreter and
instructions
'''
from interpreter import Interpreter
from robotio import RobotIO
from codeblock import CodeBlock
from maze import Maze
class RobotController():
"""studentProgram is the robot's instructions"""
def __init__(self):
self.maze = Maze()
self.robotio = RobotIO(self.maze)
def executeProgram(self):
"""Executes the robot's program"""
code = open('user_file',r)
inter = Interpreter(self.robotio)
for line in self.program:
inter.interpret(code)
def getRobotLocationAndFacing(self):
return self.robotio.getLocationAndFacing()
def getMaze(self):
return self.maze.getMaze()
| mit | Python |
dd78f68ed0e6dcecc66f1b87972f73f887edfb3e | Add exception handling | globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service | dbaas/integrations/monitoring/manager.py | dbaas/integrations/monitoring/manager.py | from dbaas_dbmonitor.provider import DBMonitorProvider
from dbaas_zabbix.provider import ZabbixProvider
import logging
LOG = logging.getLogger(__name__)
class MonitoringManager():
@classmethod
def create_monitoring(cls, databaseinfra):
try:
LOG.info("Creating monitoring...")
ZabbixProvider().create_monitoring(dbinfra=databaseinfra)
return DBMonitorProvider().create_dbmonitor_monitoring(databaseinfra)
except Exception, e:
LOG.warn("Exception: %s" % e)
return None
@classmethod
def remove_monitoring(cls, databaseinfra):
try:
LOG.info("Removing monitoring...")
ZabbixProvider().destroy_monitoring(dbinfra=databaseinfra)
return DBMonitorProvider().remove_dbmonitor_monitoring(databaseinfra)
except Exception, e:
LOG.warn("Exception: %s" % e)
return None
| from dbaas_dbmonitor.provider import DBMonitorProvider
from dbaas_zabbix.provider import ZabbixProvider
import logging
LOG = logging.getLogger(__name__)
class MonitoringManager():
@classmethod
def create_monitoring(cls, databaseinfra):
LOG.info("Creating monitoring...")
ZabbixProvider().create_monitoring(dbinfra=databaseinfra)
return DBMonitorProvider().create_dbmonitor_monitoring(databaseinfra)
@classmethod
def remove_monitoring(cls, databaseinfra):
LOG.info("Removing monitoring...")
ZabbixProvider().destroy_monitoring(dbinfra=databaseinfra)
return DBMonitorProvider().remove_dbmonitor_monitoring(databaseinfra) | bsd-3-clause | Python |
32db10f40693d5ace65226c46b0f67d75045e440 | Bump version to 0.2.2 | jreese/aiosqlite | aiosqlite/__init__.py | aiosqlite/__init__.py | # Copyright 2017 John Reese
# Licensed under the MIT license
'''asyncio bridge to sqlite3'''
from sqlite3 import sqlite_version, sqlite_version_info
from .core import connect, Connection, Cursor
__version__ = '0.2.2'
__all__ = [
'__version__',
'sqlite_version',
'sqlite_version_info',
'connect',
'Connection',
'Cursor',
]
| # Copyright 2017 John Reese
# Licensed under the MIT license
'''asyncio bridge to sqlite3'''
from sqlite3 import sqlite_version, sqlite_version_info
from .core import connect, Connection, Cursor
__version__ = '0.2.0'
__all__ = [
'__version__',
'sqlite_version',
'sqlite_version_info',
'connect',
'Connection',
'Cursor',
]
| mit | Python |
6be7a88edf0cff942236799907c177775041a349 | Fix flake8 errors | albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com | albertyw.com/utils.py | albertyw.com/utils.py | import datetime
import os
import dotenv
from getenv import env
import markdown2
import pytz
root_path = os.path.dirname(os.path.realpath(__file__)) + '/../'
dotenv.read_dotenv(os.path.join(root_path, '.env'))
# See https://github.com/trentm/python-markdown2/wiki/Extras
MARKDOWN_EXTRAS = [
'code-friendly',
'fenced-code-blocks',
'smarty-pants',
'tables',
]
def prune_note_files(note_files):
files = [note_file for note_file in note_files if '~' not in note_file]
return files
def get_note_files():
current_directory = os.path.dirname(os.path.realpath(__file__))
notes_directory = os.path.join(current_directory, 'notes')
files = os.listdir(notes_directory)
files.sort(reverse=True)
files = prune_note_files(files)
files = [os.path.join(notes_directory, note_file) for note_file in files]
return files
def get_notes():
note_files = get_note_files()
timezone = pytz.timezone(env('DISPLAY_TIMEZONE'))
notes = []
for note_file in note_files:
with open(note_file) as note_handle:
note = note_handle.readlines()
note = [line.strip() for line in note]
if len(note) < 3 or not note[1].isdigit():
continue
timestamp = int(note[1])
note_parsed = {}
note_parsed['title'] = note[0]
note_parsed['time'] = datetime.datetime.fromtimestamp(
timestamp, timezone)
note_parsed['note'] = markdown2.markdown(
"\n".join(note[2:]),
extras=MARKDOWN_EXTRAS,
)
notes.append(note_parsed)
return notes
| import datetime
import os
import dotenv
from getenv import env
import markdown2
import pytz
root_path = os.path.dirname(os.path.realpath(__file__)) + '/../'
dotenv.read_dotenv(os.path.join(root_path, '.env'))
# See https://github.com/trentm/python-markdown2/wiki/Extras
MARKDOWN_EXTRAS = [
'code-friendly',
'fenced-code-blocks',
'smarty-pants',
'tables',
]
def prune_note_files(note_files):
files = [note_file for note_file in note_files if '~' not in note_file]
return files
def get_note_files():
current_directory = os.path.dirname(os.path.realpath(__file__))
notes_directory = os.path.join(current_directory, 'notes')
files = os.listdir(notes_directory)
files.sort(reverse=True)
files = prune_note_files(files)
files = [os.path.join(notes_directory, note_file) for note_file in files]
return files
def get_notes():
note_files = get_note_files()
timezone = pytz.timezone(env('DISPLAY_TIMEZONE'))
notes = []
for note_file in note_files:
with open(note_file) as note_handle:
note = note_handle.readlines()
note = [line.strip() for line in note]
if len(note) < 3 or not note[1].isdigit():
continue
timestamp = int(note[1])
note_parsed = {}
note_parsed['title'] = note[0]
note_parsed['time'] = datetime.datetime.fromtimestamp(
timestamp, timezone)
note_parsed['note'] = markdown2.markdown("\n".join(note[2:]), extras=MARKDOWN_EXTRAS)
notes.append(note_parsed)
return notes
| mit | Python |
f4a123f1bdc02df483ff0c199625be9d17b1d32a | fix override of logging methods | Nic30/HWToolkit | hwt/simulator/vcdHdlSimConfig.py | hwt/simulator/vcdHdlSimConfig.py | from datetime import datetime
from pprint import pprint
import sys
from hwt.hdlObjects.types.bits import Bits
from hwt.hdlObjects.types.boolean import Boolean
from hwt.simulator.hdlSimConfig import HdlSimConfig
from hwt.simulator.vcdWritter import VcdWritter
from hwt.hdlObjects.types.enum import Enum
class VcdHdlSimConfig(HdlSimConfig):
supported_type_classes = (Boolean, Bits, Enum)
def __init__(self, dumpFile=sys.stdout):
self.vcdWritter = VcdWritter(dumpFile)
self.logPropagation = False
self.logApplyingValues = False
# unit : signal | unit
# signal : None
self.registered = {}
def logApplyingValues(self, simulator, values):
pprint((simulator.now, values))
def logPropagation(self, simulator, signal, process):
print("%d: Signal.simPropagateChanges %s -> %s"
% (simulator.now, signal.name, str(process.name))
)
def vcdRegisterUnit(self, unit):
with self.vcdWritter.module(unit._name) as m:
for se in unit._cntx.signals:
if isinstance(se._dtype, self.supported_type_classes):
m.var(se)
for u in unit._units:
self.vcdRegisterUnit(u)
def _registerSignal(self, sig):
self.registered[sig] = None
def beforeSim(self, simulator, synthesisedUnit):
"""
This method is called before first step of simulation.
"""
self.vcdWritter.date(datetime.now())
self.vcdWritter.timescale(1)
self.vcdRegisterUnit(synthesisedUnit)
self.vcdWritter.enddefinitions()
def logChange(self, nowTime, sig, nextVal):
"""
This method is called for every value change of any signal.
"""
try:
self.vcdWritter.change(nowTime, sig, nextVal)
except KeyError:
# not every signal has to be registered
pass
| from datetime import datetime
from pprint import pprint
import sys
from hwt.hdlObjects.types.bits import Bits
from hwt.hdlObjects.types.boolean import Boolean
from hwt.simulator.hdlSimConfig import HdlSimConfig
from hwt.simulator.vcdWritter import VcdWritter
from hwt.hdlObjects.types.enum import Enum
class VcdHdlSimConfig(HdlSimConfig):
supported_type_classes = (Boolean, Bits, Enum)
def __init__(self, dumpFile=sys.stdout):
super().__init__()
self.vcdWritter = VcdWritter(dumpFile)
self.logPropagation = False
self.logApplyingValues = False
# unit : signal | unit
# signal : None
self.registered = {}
def logApplyingValues(self, simulator, values):
pprint((simulator.now, values))
def logPropagation(self, simulator, signal, process):
print("%d: Signal.simPropagateChanges %s -> %s"
% (simulator.now, signal.name, str(process.name))
)
def vcdRegisterUnit(self, unit):
with self.vcdWritter.module(unit._name) as m:
for se in unit._cntx.signals:
if isinstance(se._dtype, self.supported_type_classes):
m.var(se)
for u in unit._units:
self.vcdRegisterUnit(u)
def _registerSignal(self, sig):
self.registered[sig] = None
def beforeSim(self, simulator, synthesisedUnit):
"""
This method is called before first step of simulation.
"""
self.vcdWritter.date(datetime.now())
self.vcdWritter.timescale(1)
self.vcdRegisterUnit(synthesisedUnit)
self.vcdWritter.enddefinitions()
def logChange(self, nowTime, sig, nextVal):
"""
This method is called for every value change of any signal.
"""
try:
self.vcdWritter.change(nowTime, sig, nextVal)
except KeyError:
# not every signal has to be registered
pass
| mit | Python |
f929d80c5a4363994968248d87a892b1c2ef61d4 | Use actual sha for static:debug image (and update static:latest) (#1804) | bazelbuild/rules_docker,bazelbuild/rules_docker,bazelbuild/rules_docker,bazelbuild/rules_docker | go/static.bzl | go/static.bzl | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generated file with dependencies for language rule."""
# !!!! THIS IS A GENERATED FILE TO NOT EDIT IT BY HAND !!!!
#
# To regenerate this file, run ./update_deps.sh from the root of the
# git repository.
DIGESTS = {
# "gcr.io/distroless/static:debug" circa 2021-04-07 9:30 -0400
"debug": "sha256:acbec568a18bc35f06f95878bcc4b1aa82991824a60174b2233408bd97e25dab",
# "gcr.io/distroless/static:latest" circa 2021-04-07 9:30 -0400
"latest": "sha256:a7752b29b18bb106938caefd8dcce8a94199022cbd06ea42268b968f35e837a8",
}
| # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generated file with dependencies for language rule."""
# !!!! THIS IS A GENERATED FILE TO NOT EDIT IT BY HAND !!!!
#
# To regenerate this file, run ./update_deps.sh from the root of the
# git repository.
DIGESTS = {
# "gcr.io/distroless/static:debug" circa 2021-03-14 11:14 -0400
"debug": "sha256:359e0c5c9a1364d82f567db01e1419dead4dfc04d33271248f9c713007d0c22e",
# "gcr.io/distroless/static:latest" circa 2021-03-14 11:14 -0400
"latest": "sha256:359e0c5c9a1364d82f567db01e1419dead4dfc04d33271248f9c713007d0c22e",
}
| apache-2.0 | Python |
f294fe77e30298aa606fe603a8c8851c0120b283 | make integrate work on bulk data | yngcan/patentprocessor,nikken1/patentprocessor,nikken1/patentprocessor,funginstitute/patentprocessor,yngcan/patentprocessor,funginstitute/patentprocessor,nikken1/patentprocessor,funginstitute/patentprocessor,yngcan/patentprocessor | integrate.py | integrate.py | #!/usr/bin/env python
"""
Takes in a CSV file that represents the output of the disambiguation engine:
Patent Number, Firstname, Lastname, Unique_Inventor_ID
Groups by Unique_Inventor_ID and then inserts them into the Inventor table using
lib.alchemy.match
"""
import sys
import lib.alchemy as alchemy
from lib.util.csv_reader import read_file
from lib.handlers.xml_util import normalize_document_identifier
from collections import defaultdict
import cPickle as pickle
import linecache
from datetime import datetime
def integrate(filename, disambiginput):
blocks = defaultdict(list)
print 'Gathering blocks'
for index, line in enumerate(read_file(filename)):
if index % 100000 == 0:
print index, str(datetime.now())
unique_inventor_id = line[0]
oldline = linecache.getline(disambiginput, index+1).split('\t')
patent_number, name_first, name_last = oldline[0], oldline[2], oldline[3]
patent_number = normalize_document_identifier(patent_number)
rawinventors = alchemy.session.query(alchemy.schema.RawInventor).filter_by(
patent_id = patent_number,
name_first = name_first,
name_last = name_last).all()
blocks[unique_inventor_id].extend(rawinventors)
pickle.dump(blocks, open('integrate.db', 'wb'))
print 'Starting commits'
i = 0
for block in blocks.itervalues():
i += 1
if i % 10000 == 0:
print i
alchemy.match(block, alchemy.session, commit=True)
print str(datetime.now())
else:
alchemy.match(block, alchemy.session, commit=False)
alchemy.match(block, alchemy.session)
def main():
if len(sys.argv) <= 1:
print 'USAGE: python integrate.py <path-to-csv-file>'
sys.exit()
filename = sys.argv[1]
disambiginput = sys.argv[2]
integrate(filename, disambiginput)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""
Takes in a CSV file that represents the output of the disambiguation engine:
Patent Number, Firstname, Lastname, Unique_Inventor_ID
Groups by Unique_Inventor_ID and then inserts them into the Inventor table using
lib.alchemy.match
"""
import sys
import lib.alchemy as alchemy
from lib.util.csv_reader import read_file
from lib.handlers.xml_util import normalize_document_identifier
from collections import defaultdict
import cPickle as pickle
def integrate(filename):
blocks = defaultdict(list)
for line in read_file(filename):
patent_number, name_first, name_last, unique_inventor_id = line
patent_number = normalize_document_identifier(patent_number)
rawinventors = alchemy.session.query(alchemy.grant.RawInventor).filter_by(
patent_id = patent_number,
name_first = name_first,
name_last = name_last).all()
blocks[unique_inventor_id].extend(rawinventors)
pickle.dump(blocks, open('integrate.db', 'wb'))
for block in blocks.itervalues():
alchemy.match(block, alchemy.session)
def main():
if len(sys.argv) <= 1:
print 'USAGE: python integrate.py <path-to-csv-file>'
sys.exit()
filename = sys.argv[1]
integrate(filename)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
ec785103f7dcc2df8ee31acb1325c2181f65698c | bump version to 3.1.0 | ndawe/root_numpy,ibab/root_numpy,ibab/root_numpy,ndawe/root_numpy,rootpy/root_numpy,ndawe/root_numpy,ibab/root_numpy,scikit-hep/root_numpy,scikit-hep/root_numpy,ndawe/root_numpy,rootpy/root_numpy,rootpy/root_numpy,scikit-hep/root_numpy,rootpy/root_numpy,scikit-hep/root_numpy,ibab/root_numpy | root_numpy/info.py | root_numpy/info.py | """
_
_ __ ___ ___ | |_ _ __ _ _ _ __ ___ _ __ _ _
| '__/ _ \ / _ \| __| | '_ \| | | | '_ ` _ \| '_ \| | | |
| | | (_) | (_) | |_ | | | | |_| | | | | | | |_) | |_| |
|_| \___/ \___/ \__|___|_| |_|\__,_|_| |_| |_| .__/ \__, | {0}
|_____| |_| |___/
"""
__version__ = '3.1.0.dev'
__doc__ = __doc__.format(__version__)
| """
_
_ __ ___ ___ | |_ _ __ _ _ _ __ ___ _ __ _ _
| '__/ _ \ / _ \| __| | '_ \| | | | '_ ` _ \| '_ \| | | |
| | | (_) | (_) | |_ | | | | |_| | | | | | | |_) | |_| |
|_| \___/ \___/ \__|___|_| |_|\__,_|_| |_| |_| .__/ \__, | {0}
|_____| |_| |___/
"""
__version__ = '3.0.2.dev'
__doc__ = __doc__.format(__version__)
| bsd-3-clause | Python |
ec74d02b09b110df2a4e7be20e9b852a77771c8d | Create form to create Professor | mazulo/simplemooc,mazulo/simplemooc | simplemooc/accounts/forms.py | simplemooc/accounts/forms.py | from django import forms
# from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
from simplemooc.core.utils import generate_hash_key
from simplemooc.core.mail import send_mail_template
from simplemooc.accounts.models import PasswordReset, Professor
User = get_user_model()
class PasswordResetForm(forms.Form):
email = forms.EmailField(label='Email')
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(email=email).exists():
return email
raise forms.ValidationError('Email não cadastrado.')
def save(self):
user = User.objects.get(email=self.cleaned_data['email'])
key = generate_hash_key(user.username)
reset = PasswordReset(key=key, user=user)
reset.save()
template_name = 'accounts/password_reset_mail.html'
subject = 'Criar nova senha no Simple MOOC'
context = {
'reset': reset
}
send_mail_template(subject, template_name, context, [user.email])
class RegisterForm(forms.ModelForm):
is_professor = forms.BooleanField(
label='Você é professor?',
initial=False,
required=False
)
password1 = forms.CharField(
label='Senha',
widget=forms.PasswordInput
)
password2 = forms.CharField(
label='Confirmação de senha',
widget=forms.PasswordInput
)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError('Confirmação de senha incorreta.')
return password2
def save(self, commit=True):
user = super(RegisterForm, self).save(commit=False)
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class Meta:
model = User
fields = ['username', 'email', 'is_professor']
class RegisterProfessorForm(RegisterForm):
class Meta(RegisterForm.Meta):
model = Professor
class EditAccountForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'email', 'name']
| from django import forms
# from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import get_user_model
from simplemooc.core.utils import generate_hash_key
from simplemooc.core.mail import send_mail_template
from simplemooc.accounts.models import PasswordReset
User = get_user_model()
class PasswordResetForm(forms.Form):
email = forms.EmailField(label='Email')
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(email=email).exists():
return email
raise forms.ValidationError('Email não cadastrado.')
def save(self):
user = User.objects.get(email=self.cleaned_data['email'])
key = generate_hash_key(user.username)
reset = PasswordReset(key=key, user=user)
reset.save()
template_name = 'accounts/password_reset_mail.html'
subject = 'Criar nova senha no Simple MOOC'
context = {
'reset': reset
}
send_mail_template(subject, template_name, context, [user.email])
class RegisterForm(forms.ModelForm):
password1 = forms.CharField(label='Senha', widget=forms.PasswordInput)
password2 = forms.CharField(
label='Confirmação de senha',
widget=forms.PasswordInput
)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError('Confirmação de senha incorreta.')
return password2
def save(self, commit=True):
user = super(RegisterForm, self).save(commit=False)
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class Meta:
model = User
fields = ['username', 'email']
class EditAccountForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'email', 'name']
| mit | Python |
624baee346bbb24d06b25e3f7c07ef8138dda174 | Remove unused imports from vyconf.types.dummy | vyos-legacy/vyconfd,vyos-legacy/vyconfd | libraries/vyconf/types/dummy/dummy.py | libraries/vyconf/types/dummy/dummy.py | # vyconf.types.dummy: dummy type validators for demonstration and testing
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
from vyconf.types import TypeValidator, ValidationError, ConstraintFormatError
class AlwaysValid(TypeValidator):
""" Dumb type validator which thinks any value is valid """
name = "alwaysvalid"
def __init__(self):
super(AlwaysValid, self).__init__()
@classmethod
def validate(self, value, constraint=None):
pass
class NeverValid(TypeValidator):
""" Dumb type validator which thinks the value is never valid """
name = "nevervalid"
def __init__(self):
super(NeverValid, self).__init__()
@classmethod
def validate(self, value, constraint=None):
raise ValidationError("Value {0} is not a valid value of type {1}".format(value, self.name))
class BadConstraint(TypeValidator):
""" Dumb type validator, always complains about constraint format """
name = "badconstraint"
def __init__(self):
super(BadConstraint, self).__init__()
@classmethod
def validate(self, value, constraint=None):
raise ConstraintFormatError("Constraint string {0} is not valid for type {1}".format(value, self.name))
| # vyconf.types.dummy: dummy type validators for demonstration and testing
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import sys
import inspect
from vyconf.types import TypeValidator, ValidationError, ConstraintFormatError
class AlwaysValid(TypeValidator):
""" Dumb type validator which thinks any value is valid """
name = "alwaysvalid"
def __init__(self):
super(AlwaysValid, self).__init__()
@classmethod
def validate(self, value, constraint=None):
pass
class NeverValid(TypeValidator):
""" Dumb type validator which thinks the value is never valid """
name = "nevervalid"
def __init__(self):
super(NeverValid, self).__init__()
@classmethod
def validate(self, value, constraint=None):
raise ValidationError("Value {0} is not a valid value of type {1}".format(value, self.name))
class BadConstraint(TypeValidator):
""" Dumb type validator, always complains about constraint format """
name = "badconstraint"
def __init__(self):
super(BadConstraint, self).__init__()
@classmethod
def validate(self, value, constraint=None):
raise ConstraintFormatError("Constraint string {0} is not valid for type {1}".format(value, self.name))
| lgpl-2.1 | Python |
81e68ca702e4b6ef559bf034033817475a0b4bdc | add StaticApplication to root package | kezabelle/clastic,kezabelle/clastic | clastic/__init__.py | clastic/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
clastic
~~~~~~~
A functional Python web framework that streamlines explicit
development practices while eliminating global state.
:copyright: (c) 2012 by Mahmoud Hashemi
:license: BSD, see LICENSE for more details.
"""
import server
from core import (RESERVED_ARGS,
Application,
SubApplication,
Route)
from middleware import Middleware, DummyMiddleware, GetParamMiddleware
from render import json_response, default_response
from meta import MetaApplication
from static import StaticApplication
from werkzeug.wrappers import Request, Response
from werkzeug.utils import redirect, append_slash_redirect
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
clastic
~~~~~~~
A functional Python web framework that streamlines explicit
development practices while eliminating global state.
:copyright: (c) 2012 by Mahmoud Hashemi
:license: BSD, see LICENSE for more details.
"""
import server
from core import (
RESERVED_ARGS,
Application,
SubApplication,
Route
)
from middleware import Middleware, DummyMiddleware, GetParamMiddleware
from render import json_response, default_response
from meta import MetaApplication
from werkzeug.wrappers import Request, Response
from werkzeug.utils import redirect, append_slash_redirect
| bsd-3-clause | Python |
59e31ecf0e23eadfb064baf1e50269795e1ecd45 | test times | tschaume/pymatgen,vorwerkc/pymatgen,czhengsci/pymatgen,fraricci/pymatgen,gVallverdu/pymatgen,vorwerkc/pymatgen,mbkumar/pymatgen,setten/pymatgen,montoyjh/pymatgen,tallakahath/pymatgen,johnson1228/pymatgen,montoyjh/pymatgen,blondegeek/pymatgen,setten/pymatgen,davidwaroquiers/pymatgen,Bismarrck/pymatgen,johnson1228/pymatgen,xhqu1981/pymatgen,mbkumar/pymatgen,davidwaroquiers/pymatgen,dongsenfo/pymatgen,vorwerkc/pymatgen,richardtran415/pymatgen,gpetretto/pymatgen,nisse3000/pymatgen,matk86/pymatgen,johnson1228/pymatgen,gpetretto/pymatgen,richardtran415/pymatgen,gVallverdu/pymatgen,aykol/pymatgen,aykol/pymatgen,czhengsci/pymatgen,Bismarrck/pymatgen,tschaume/pymatgen,setten/pymatgen,xhqu1981/pymatgen,aykol/pymatgen,Bismarrck/pymatgen,gpetretto/pymatgen,matk86/pymatgen,johnson1228/pymatgen,nisse3000/pymatgen,ndardenne/pymatgen,tallakahath/pymatgen,dongsenfo/pymatgen,gmatteo/pymatgen,gmatteo/pymatgen,davidwaroquiers/pymatgen,richardtran415/pymatgen,blondegeek/pymatgen,fraricci/pymatgen,mbkumar/pymatgen,gpetretto/pymatgen,Bismarrck/pymatgen,fraricci/pymatgen,setten/pymatgen,nisse3000/pymatgen,tschaume/pymatgen,montoyjh/pymatgen,ndardenne/pymatgen,gVallverdu/pymatgen,tschaume/pymatgen,Bismarrck/pymatgen,blondegeek/pymatgen,dongsenfo/pymatgen,dongsenfo/pymatgen,richardtran415/pymatgen,tallakahath/pymatgen,davidwaroquiers/pymatgen,nisse3000/pymatgen,mbkumar/pymatgen,fraricci/pymatgen,gVallverdu/pymatgen,vorwerkc/pymatgen,czhengsci/pymatgen,ndardenne/pymatgen,czhengsci/pymatgen,montoyjh/pymatgen,xhqu1981/pymatgen,matk86/pymatgen,matk86/pymatgen,blondegeek/pymatgen,tschaume/pymatgen | pymatgen/io/gwwrapper/tests/test_times.py | pymatgen/io/gwwrapper/tests/test_times.py | #!/usr/bin/env python
__author__ = 'setten'
import timeit
import os
def test_write():
"""file write test function"""
l = []
for i in range(100000):
l.append(i)
f = open('test', 'w')
f.write(str(l))
f.close()
def test_read():
f = open('test', mode='r')
line = f.read()
f.close()
def test_make_folders():
for i in range(0, 100, 1):
os.mkdir('test_folder_%s' % i)
os.rmdir('test_folder_%s' % i)
def test_cd():
for i in range(0, 100, 1):
os.mkdir('test_folder_%s' % i)
os.chdir('test_folder_%s' % i)
os.chdir('..')
os.rmdir('test_folder_%s' % i)
if __name__ == '__main__':
n = 100
print 'write', timeit.timeit("test_write()", setup="from __main__ import test_write", number=n)
print 'read', timeit.timeit("test_read()", setup="from __main__ import test_read", number=n)
print 'mk folders', timeit.timeit("test_make_folders()", setup="from __main__ import test_make_folders", number=n)
print 'cd folders', timeit.timeit("test_cd()", setup="from __main__ import test_cd", number=n)
| #!/usr/bin/env python
__author__ = 'setten'
import timeit
import os
def test_write():
"""file write test function"""
l = []
for i in range(100000):
l.append(i)
f = open('test', 'w')
f.write(str(l))
f.close()
def test_read():
f = open('test', mode='r')
line = f.read()
f.close()
def test_folders():
for n in range(0, 100, 1):
os.mkdir('test_folder')
os.chdir('test_folder')
os.chdir('..')
os.rmdir('test_folder')
if __name__ == '__main__':
n = 100
print 'write', timeit.timeit("test_write()", setup="from __main__ import test_write", number=n)
print 'read', timeit.timeit("test_read()", setup="from __main__ import test_read", number=n)
print 'folders', timeit.timeit("test_folders()", setup="from __main__ import test_folders", number=n)
| mit | Python |
6c0949c50ab5b7b2ca6e26a48cb6f53866f82d0a | Update gettlds.py helper script | moreati/python3-openid,necaris/python3-openid,moreati/python3-openid,misli/python3-openid,necaris/python3-openid,moreati/python3-openid,isagalaev/sm-openid,misli/python3-openid,misli/python3-openid | admin/gettlds.py | admin/gettlds.py | #!/usr/bin/env python3
"""
Fetch the current TLD list from the IANA Web site, parse it, and print
an expression suitable for direct insertion into each library's trust
root validation module.
Usage:
python gettlds.py (php|python|ruby)
Then cut-n-paste.
"""
import urllib.request
import sys
LANGS = {
'php': (
r"'/\.(", # prefix
"'", # line prefix
"|", # separator
"|' .", # line suffix
r")\.?$/'" # suffix
),
'python': (
"['",
"'",
"', '",
"',",
"']"
),
'ruby': (
"%w'",
"",
" ",
"",
"'"
),
}
if __name__ == '__main__':
lang = sys.argv[1]
prefix, line_prefix, separator, line_suffix, suffix = LANGS[lang]
iana_url = 'http://data.iana.org/TLD/tlds-alpha-by-domain.txt'
with urllib.request.urlopen(iana_url) as iana_resource:
tlds = []
output_line = "" # initialize a line of output
for input_line in iana_resource:
if input_line.startswith(b'#'): # skip comments
continue
tld = input_line.decode("utf-8").strip().lower()
nxt_output_line = output_line + prefix + tld # update current line
if len(nxt_output_line) > 60:
# Long enough -- print it and reinitialize to only hold the
# most recent TLD
print(output_line + line_suffix)
output_line = line_prefix + tld
else:
# Not long enough, so update it to the concatenated version
output_line = nxt_output_line
prefix = separator
# Print the final line of remaining output
print(output_line + suffix)
| """
Fetch the current TLD list from the IANA Web site, parse it, and print
an expression suitable for direct insertion into each library's trust
root validation module
Usage:
python gettlds.py (php|python|ruby)
Then cut-n-paste.
"""
import urllib2
import sys
langs = {
'php': (r"'/\.(",
"'", "|", "|' .",
r")\.?$/'"),
'python': ("['",
"'", "', '", "',",
"']"),
'ruby': ("%w'",
"", " ", "",
"'"),
}
lang = sys.argv[1]
prefix, line_prefix, separator, line_suffix, suffix = langs[lang]
f = urllib2.urlopen('http://data.iana.org/TLD/tlds-alpha-by-domain.txt')
tlds = []
output_line = ""
for input_line in f:
if input_line.startswith('#'):
continue
tld = input_line.strip().lower()
new_output_line = output_line + prefix + tld
if len(new_output_line) > 60:
print output_line + line_suffix
output_line = line_prefix + tld
else:
output_line = new_output_line
prefix = separator
print output_line + suffix
| apache-2.0 | Python |
67ce14f2c3650033714461ef95c38a54c6817a2a | add tests | dswah/pyGAM | pygam/tests/test_GAM_params.py | pygam/tests/test_GAM_params.py | # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pygam import *
def test_lam_non_neg_array_like(cake_X_y):
"""
lambda must be a non-negative float or array of floats
"""
X, y = cake_X_y
try:
gam = LinearGAM(lam=-1).fit(X, y)
except ValueError:
assert(True)
try:
gam = LinearGAM(lam=['hi']).fit(X, y)
except ValueError:
assert(True)
def test_penalties_must_be_or_contain_callable_or_auto(mcycle_X_y):
"""
penalty matrix must be/contain callable or auto, otherwise raise ValueError
"""
X, y = mcycle_X_y
with pytest.raises(ValueError):
gam = LinearGAM(terms=s(0, penalties='continuous'))
# now do iterable
with pytest.raises(ValueError):
gam = LinearGAM(s(0, penalties=['continuous']))
def test_intercept(mcycle_X_y):
"""
should be able to just fit intercept
"""
X, y = mcycle_X_y
gam = LinearGAM(terms=intercept)
gam.fit(X, y)
def test_require_one_term(mcycle_X_y):
"""
need at least one term
"""
X, y = mcycle_X_y
gam = LinearGAM(terms=[])
with pytest.raises(ValueError):
gam.fit(X, y)
def test_linear_regression(mcycle_X_y):
"""
should be able to do linear regression
"""
X, y = mcycle_X_y
gam = LinearGAM(l(0)).fit(X, y)
assert(gam._is_fitted)
def test_compute_stats_even_if_not_enough_iters(default_X_y):
"""
GAM should collect model statistics after optimization ends even if it didnt converge
"""
X, y = default_X_y
gam = LogisticGAM(max_iter=1).fit(X, y)
assert(hasattr(gam, 'statistics_'))
def test_easy_plural_arguments(wage_X_y):
"""
it should easy to set global term arguments
"""
X, y = wage_X_y
gam = LinearGAM(n_splines=10).fit(X, y)
assert gam._is_fitted
assert gam.n_splines == [10] * X.shape[1]
class TestRegressions(object):
def test_no_explicit_terms_custom_lambda(self, wage_X_y):
X, y = wage_X_y
# before easy-pluralization, this command would fail
gam = LinearGAM(lam=0.6).gridsearch(X, y)
assert gam._is_fitted
# same with
gam = LinearGAM()
gam.n_splines = 10
gam.gridsearch(X, y)
assert gam._is_fitted
# TODO categorical dtypes get no fit linear even if fit linear TRUE
# TODO categorical dtypes get their own number of splines
# TODO can force continuous dtypes on categorical vars if wanted
| # -*- coding: utf-8 -*-
import numpy as np
import pytest
from pygam import *
def test_lam_non_neg_array_like(cake_X_y):
"""
lambda must be a non-negative float or array of floats
"""
X, y = cake_X_y
try:
gam = LinearGAM(lam=-1).fit(X, y)
except ValueError:
assert(True)
try:
gam = LinearGAM(lam=['hi']).fit(X, y)
except ValueError:
assert(True)
def test_penalties_must_be_or_contain_callable_or_auto(mcycle_X_y):
"""
penalty matrix must be/contain callable or auto, otherwise raise ValueError
"""
X, y = mcycle_X_y
with pytest.raises(ValueError):
gam = LinearGAM(terms=s(0, penalties='continuous'))
# now do iterable
with pytest.raises(ValueError):
gam = LinearGAM(s(0, penalties=['continuous']))
def test_intercept(mcycle_X_y):
"""
should be able to just fit intercept
"""
X, y = mcycle_X_y
gam = LinearGAM(terms=intercept)
gam.fit(X, y)
def test_require_one_term(mcycle_X_y):
"""
need at least one term
"""
X, y = mcycle_X_y
gam = LinearGAM(terms=[])
with pytest.raises(ValueError):
gam.fit(X, y)
def test_linear_regression(mcycle_X_y):
"""
should be able to do linear regression
"""
X, y = mcycle_X_y
gam = LinearGAM(l(0)).fit(X, y)
assert(gam._is_fitted)
def test_compute_stats_even_if_not_enough_iters(default_X_y):
"""
should be able to do linear regression
"""
X, y = default_X_y
gam = LogisticGAM(max_iter=1).fit(X, y)
assert(hasattr(gam, 'statistics_'))
# TODO categorical dtypes get no fit linear even if fit linear TRUE
# TODO categorical dtypes get their own number of splines
# TODO can force continuous dtypes on categorical vars if wanted
| apache-2.0 | Python |
3b0c721afbe8b8d86174378fea4b81fc2dc24aba | Include subsecond ticking times | cropleyb/pentai,cropleyb/pentai,cropleyb/pentai | gui_player.py | gui_player.py |
from kivy.clock import Clock
import time
import audio as a_m
from defines import *
class GuiPlayer(object):
def __init__(self, colour, widget, game):
self.colour = colour
self.player = game.get_player(colour)
self.widget = widget
self.game = game
total_time = game.get_total_time()
self.total_time = total_time
self.show_remaining()
self.ticking = False
self.last_tick_time = time.time()
def prompt_for_move(self, colour):
if not self.ticking:
self.tick_audio(0, colour)
self.tick_video(0)
self.ticking = True
def make_move(self):
# Stop both timers
elapsed = time.time() - self.last_tick_time
# In case of leap seconds, time changes etc.
if elapsed > 0:
self.game.tick(self.colour, elapsed)
Clock.unschedule(self.tick_audio)
Clock.unschedule(self.tick_video)
self.ticking = False
def tick_audio(self, dt, colour=None):
if colour is None:
colour = self.last_colour
else:
self.last_colour = colour
if dt > 0:
# Make tick sound
a_m.instance.tick(colour)
tt = self.total_time
rem = self.game.remaining_time(self.colour)
if rem > 0:
interval = (.5 * (1 + (rem / tt))) ** 2
Clock.schedule_once(self.tick_audio, interval)
def tick_video(self, dt):
rem = self.game.tick(self.colour, dt)
next_tick_diff = rem % 1.0
self.last_tick_time = time.time()
self.show_remaining()
if rem > 0:
Clock.schedule_once(self.tick_video, next_tick_diff)
else:
other_colour = opposite_colour(self.game.to_move_colour())
def refresh(self):
self.show_remaining()
def show_remaining(self):
rem = round(self.game.remaining_time(self.colour))
self.widget.text = "%2d:%02d" % (rem/60, rem%60)
|
from kivy.clock import Clock
import audio as a_m
from defines import *
class GuiPlayer(object):
def __init__(self, colour, widget, game):
self.colour = colour
self.player = game.get_player(colour)
self.widget = widget
self.game = game
total_time = game.get_total_time()
self.total_time = total_time
self.show_remaining()
self.ticking = False
def prompt_for_move(self, colour):
if not self.ticking:
self.tick_audio(0, colour)
self.tick_video(0)
self.ticking = True
def make_move(self):
# Stop both timers
Clock.unschedule(self.tick_audio)
Clock.unschedule(self.tick_video)
self.ticking = False
def tick_audio(self, dt, colour=None):
if colour is None:
colour = self.last_colour
else:
self.last_colour = colour
if dt > 0:
# Make tick sound
a_m.instance.tick(colour)
tt = self.total_time
rem = self.game.remaining_time(self.colour)
if rem > 0:
interval = (.5 * (1 + (rem / tt))) ** 2
Clock.schedule_once(self.tick_audio, interval)
def tick_video(self, dt):
rem = self.game.tick(self.colour, dt)
next_tick_time = rem % 1.0
self.show_remaining()
if rem > 0:
Clock.schedule_once(self.tick_video, next_tick_time)
else:
other_colour = opposite_colour(self.game.to_move_colour())
def refresh(self):
self.show_remaining()
def show_remaining(self):
rem = round(self.game.remaining_time(self.colour))
self.widget.text = "%2d:%02d" % (rem/60, rem%60)
| mit | Python |
be759ce3ae4843a6b14e6d9883f38c6175ec5691 | Change regex to make it </path> inclusive | JWDebelius/American-Gut,wasade/American-Gut,biocore/American-Gut,EmbrietteH/American-Gut,JWDebelius/American-Gut,mortonjt/American-Gut,EmbrietteH/American-Gut,cuttlefishh/American-Gut,wasade/American-Gut,biocore/American-Gut | americangut/format.py | americangut/format.py | #!/usr/bin/env python
__author__ = "Yoshiki Vazquez Baeza"
__copyright__ = "Copyright 2013, The American Gut Project"
__credits__ = ["Yoshiki Vazquez Baeza", "Adam Robbins-Pianka"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "Yoshiki Vazquez Baeza"
__email__ = "yoshiki.vazquezbaeza@colorado.edu"
from re import compile, findall, escape
def format_print_for_magnified_sample(sample_id, per_sample_file_string,
global_file_string, preserve_Z_position=False):
"""Format SVG files as generated by Emperor for per sample figures
Inputs:
sample_id: identifier to look for in both files
per_sample_file_string: file containing a single sample highlighted
global_file_string: file containing multiple elements where the single
sample is highlighted
preserve_Z_position: whether the highlighted object should or should not be
positioned in the correct place depth-wise
Output:
formatted_string: SVG formatted string where the sample_id element is
highlighed in the global_file_string
Raises:
RuntimeError, if there's an inconsistency or there are non-matching sample
identifiers between files
"""
escaped_sample_id = escape(sample_id)
# find the matches of the tags only within the same path
re = compile('<path id="%s".*?</path>' % escaped_sample_id)
big_sphere_contents = findall(re, per_sample_file_string)
small_sphere_contents = findall(re, global_file_string)
# this indicates an internal inconsistency so let the user know
if big_sphere_contents == [] or small_sphere_contents == []:
raise RuntimeError, "There's a problem with the formatting of the SVG"+\
" files"
temp = global_file_string
# this will make the sample to be placed in it's correct Z position
if preserve_Z_position:
# iter over each of the matches
for index in range(0, min([len(small_sphere_contents),
len(big_sphere_contents)])):
temp = temp.replace(small_sphere_contents[index],
big_sphere_contents[index])
# this will make the sample positioned at the very top of the plot
else:
# remove all the occurrences of such object from the plot
for index in range(0, len(small_sphere_contents)):
temp = temp.replace(small_sphere_contents[index], '')
# append at the very end the object to the plot
temp = temp.replace('</svg>', ''.join([element for element
in big_sphere_contents])+'</svg>')
return temp
| #!/usr/bin/env python
__author__ = "Yoshiki Vazquez Baeza"
__copyright__ = "Copyright 2013, The American Gut Project"
__credits__ = ["Yoshiki Vazquez Baeza"]
__license__ = "BSD"
__version__ = "unversioned"
__maintainer__ = "Yoshiki Vazquez Baeza"
__email__ = "yoshiki.vazquezbaeza@colorado.edu"
from re import compile, findall, escape
def format_print_for_magnified_sample(sample_id, per_sample_file_string,
global_file_string, preserve_Z_position=False):
"""Format SVG files as generated by Emperor for per sample figures
Inputs:
sample_id: identifier to look for in both files
per_sample_file_string: file containing a single sample highlighted
global_file_string: file containing multiple elements where the single
sample is highlighted
preserve_Z_position: whether the highlighted object should or should not be
positioned in the correct place depth-wise
Output:
formatted_string: SVG formatted string where the sample_id element is
highlighed in the global_file_string
Raises:
RuntimeError, if there's an inconsistency or there are non-matching sample
identifiers between files
"""
escaped_sample_id = escape(sample_id)
re = compile('<path id="%s".*?>' % escaped_sample_id)
big_sphere_contents = findall(re, per_sample_file_string)
small_sphere_contents = findall(re, global_file_string)
# this indicates an internal inconsistency so let the user know
if big_sphere_contents == [] or small_sphere_contents == []:
raise RuntimeError, "There's a problem with the formatting of the SVG"+\
" files"
temp = global_file_string
# this will make the sample to be placed in it's correct Z position
if preserve_Z_position:
# iter over each of the matches
for index in range(0, min([len(small_sphere_contents),
len(big_sphere_contents)])):
temp = temp.replace(small_sphere_contents[index],
big_sphere_contents[index])
# this will make the sample positioned at the very top of the plot
else:
# remove all the occurrences of such object from the plot
for index in range(0, len(small_sphere_contents)):
temp = temp.replace(small_sphere_contents[index], '')
# append at the very end the object to the plot
temp = temp.replace('</svg>', ''.join([element for element
in big_sphere_contents])+'</svg>')
return temp
| bsd-3-clause | Python |
6303224fe1d61bf60908d80d9c93ca500dcf652e | add pool info | N0stack/tora,N0stack/tora | src/info/pool.py | src/info/pool.py | # coding: UTF-8
import libvirt
from xml.dom import minidom
from kvmconnect.base import BaseReadOnly
class PoolInfo(BaseReadOnly):
"""
Show Storage Pool's information
"""
def __init__(self):
super().__init__()
# Show storage pool's information
def get_pool_info_all(self):
storage_pool = []
pools = self.connection.listAllStoragePools(0)
for pool in pools:
pool_info = {}
info = pool.info()
pool_info.update({'name': pool.name()})
pool_info.update({'uuid': pool.UUIDString()})
pool_info.update({'Autostart': pool.autostart()})
pool_info.update({'state': info[0]})
pool_info.update({'capacity': info[1]})
pool_info.update({'allocation': info[2]})
pool_info.update({'available': info[3]})
pool_info.update({'is_active': pool.isActive()})
pool_info.update({'is_persistent': pool.isPersistent()})
pool_info.update({'volumes': pool.listVolumes()})
storage_pool.append(pool_info)
return {"pools": storage_pool}
def get_pool_info(self, name):
pool = self.connection.storagePoolLookupByName(name)
info = pool.info()
pool_info = {}
pool_info.update({'name': pool.name()})
pool_info.update({'name': pool.UUIDString()})
pool_info.update({'name': pool.autostart()})
pool_info.update({'name': info[0]()})
pool_info.update({'name': info[1]()})
pool_info.update({'name': info[2]()})
pool_info.update({'name': info[3]()})
pool_info.update({'name': pool.isActive()})
pool_info.update({'name': pool.isPersistent()})
pool_info.update({'name': pool.listVolumes()})
return pool_info
| # coding: UTF-8
import libvirt
from xml.dom import minidom
from kvmconnect.base import BaseReadOnly
class PoolInfo(BaseReadOnly):
"""
Show Storage Pool's information
"""
def __init__(self):
super().__init__()
# Show storage pool's information
def get_pool_info_all(self):
storage_pool = []
pools = self.connection.listAllStoragePools(0)
for pool in pools:
pool_info = {}
info = pool.info()
pool_info.update({'name': pool.name()})
pool_info.update({'uuid': pool.UUIDString()})
pool_info.update({'Autostart': pool.autostart()})
pool_info.update({'state': info[0]})
pool_info.update({'capacity': info[1]})
pool_info.update({'allocation': info[2]})
pool_info.update({'available': info[3]})
pool_info.update({'is_active': pool.isActive()})
pool_info.update({'is_persistent': pool.isPersistent()})
pool_info.update({'volumes': pool.listVolumes()})
storage_pool.append(pool_info)
return {"pools": storage_pool}
| mit | Python |
7d3a45afec5a02608e5ec198fcf9d4b0f6cbd437 | Remove CSRF protection from tests | hizardapp/Hizard,hizardapp/Hizard,hizardapp/Hizard | hyrodactil/hyrodactil/settings/test.py | hyrodactil/hyrodactil/settings/test.py | from base import *
########## TEST SETTINGS
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_TOP_LEVEL = SITE_ROOT
TEST_DISCOVER_ROOT = join(SITE_ROOT, "tests")
# Default is test*.py
TEST_DISCOVER_PATTERN = "*"
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
######### FAST HASHING FOR PASSWORDS
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
SOUTH_TESTS_MIGRATE = False
MEDIA_ROOT = normpath(join(SITE_ROOT, 'tests/media'))
| from base import *
########## TEST SETTINGS
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_TOP_LEVEL = SITE_ROOT
TEST_DISCOVER_ROOT = join(SITE_ROOT, "tests")
# Default is test*.py
TEST_DISCOVER_PATTERN = "*"
########## IN-MEMORY TEST DATABASE
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
},
}
######### FAST HASHING FOR PASSWORDS
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
SOUTH_TESTS_MIGRATE = False
MEDIA_ROOT = normpath(join(SITE_ROOT, 'tests/media'))
| mit | Python |
84c5d376997f0db55ce1a458c3c1b5d5b0aa8442 | fix bug in webserver | zhemao/lerner,zhemao/lerner | webserver.py | webserver.py | #!/usr/bin/env python
from flask import Flask, request
from gevent import monkey; monkey.patch_socket()
from gevent.wsgi import WSGIServer
import redis
ps = redis.StrictRedis().pubsub()
app = Flask(__name__)
@app.route('/github', methods=['POST'])
def github():
payload = request.form['payload']
owner = payload['repository']['owner']['name']
repo_name = owner + '/' + payload['repository']['name']
repo_url = payload['repository']['url']
headcommit = payload['commits'][0]
commit_message = headcommit['message']
author = headcommit['author']['name']
message = '%s commited to %s: %s' % (author, repo_name, commit_message)
print(message)
ps.publish(repo_url, message)
if __name__ == '__main__':
WSGIServer(('0.0.0.0', 8080), app).serve_forever()
| #!/usr/bin/env python
from flask import Flask, request
from gevent import monkey; monkey.patch_socket()
from gevent.wsgi import WSGIServer
import redis
ps = redis.StrictRedis().pubsub()
app = Flask(__name__)
@app.route('/github', methods=['POST'])
def github():
payload = request.POST['payload']
owner = payload['repository']['owner']['name']
repo_name = owner + '/' + payload['repository']['name']
repo_url = payload['repository']['url']
headcommit = payload['commits'][0]
commit_message = headcommit['message']
author = headcommit['author']['name']
message = '%s commited to %s: %s' % (author, repo_name, commit_message)
ps.publish(repo_url, message)
if __name__ == '__main__':
WSGIServer(('0.0.0.0', 8080), app).serve_forever()
| mit | Python |
e39f2243cd596da8558a7e83364d55a16c332f67 | Fix setting access | dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq | corehq/sql_db/routers.py | corehq/sql_db/routers.py | from django.conf import settings
from .config import PartitionConfig
PROXY_APP = 'sql_proxy_accessors'
FORM_PROCESSOR_APP = 'form_processor'
SQL_ACCESSORS_APP = 'sql_accessors'
ICDS_REPORTS_APP = 'icds_reports'
class PartitionRouter(object):
def db_for_read(self, model, **hints):
return db_for_read_write(model)
def db_for_write(self, model, **hints):
return db_for_read_write(model)
def allow_migrate(self, db, model):
app_label = model._meta.app_label
return allow_migrate(db, app_label)
class MonolithRouter(object):
def allow_migrate(self, db, app_label, model=None, **hints):
return app_label != PROXY_APP
def allow_migrate(db, app_label):
if app_label == ICDS_REPORTS_APP:
return hasattr(settings, "ICDS_UCR_DATABASE_ALIAS") and db == settings.ICDS_UCR_DATABASE_ALIAS
if not settings.USE_PARTITIONED_DATABASE:
return app_label != PROXY_APP
partition_config = PartitionConfig()
if app_label == PROXY_APP:
return db == partition_config.get_proxy_db()
elif app_label == FORM_PROCESSOR_APP:
return (
db == partition_config.get_proxy_db() or
db in partition_config.get_form_processing_dbs()
)
elif app_label == SQL_ACCESSORS_APP:
return db in partition_config.get_form_processing_dbs()
else:
return db == partition_config.get_main_db()
def db_for_read_write(model):
if not settings.USE_PARTITIONED_DATABASE:
return 'default'
app_label = model._meta.app_label
config = PartitionConfig()
if app_label == FORM_PROCESSOR_APP:
return config.get_proxy_db()
else:
return config.get_main_db()
| from django.conf import settings
from .config import PartitionConfig
PROXY_APP = 'sql_proxy_accessors'
FORM_PROCESSOR_APP = 'form_processor'
SQL_ACCESSORS_APP = 'sql_accessors'
ICDS_REPORTS_APP = 'icds_reports'
class PartitionRouter(object):
def db_for_read(self, model, **hints):
return db_for_read_write(model)
def db_for_write(self, model, **hints):
return db_for_read_write(model)
def allow_migrate(self, db, model):
app_label = model._meta.app_label
return allow_migrate(db, app_label)
class MonolithRouter(object):
def allow_migrate(self, db, app_label, model=None, **hints):
return app_label != PROXY_APP
def allow_migrate(db, app_label):
if app_label == ICDS_REPORTS_APP:
return db == settings.ICDS_UCR_DATABASE_ALIAS
if not settings.USE_PARTITIONED_DATABASE:
return app_label != PROXY_APP
partition_config = PartitionConfig()
if app_label == PROXY_APP:
return db == partition_config.get_proxy_db()
elif app_label == FORM_PROCESSOR_APP:
return (
db == partition_config.get_proxy_db() or
db in partition_config.get_form_processing_dbs()
)
elif app_label == SQL_ACCESSORS_APP:
return db in partition_config.get_form_processing_dbs()
else:
return db == partition_config.get_main_db()
def db_for_read_write(model):
if not settings.USE_PARTITIONED_DATABASE:
return 'default'
app_label = model._meta.app_label
config = PartitionConfig()
if app_label == FORM_PROCESSOR_APP:
return config.get_proxy_db()
else:
return config.get_main_db()
| bsd-3-clause | Python |
4fbb9020bcd280f70f9905886cdf2ec3d581f1ed | Update loggers configuration | ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner | application.py | application.py | #!/usr/bin/env python
import os
from app import create_app
from flask.ext.script import Manager, Server
import watchtower
import logging
application = create_app(
os.getenv('EQ_ENVIRONMENT') or 'development'
)
application.debug = True
manager = Manager(application)
port = int(os.environ.get('PORT', 5000))
manager.add_command("runserver", Server(host='0.0.0.0', port=port))
cloud_watch_handler = watchtower.CloudWatchLogHandler()
logging.basicConfig(level=logging.INFO)
logging.getLogger().addHandler(cloud_watch_handler)
logging.getLogger(__name__).addHandler(cloud_watch_handler)
logging.getLogger('werkzeug').addHandler(cloud_watch_handler)
if __name__ == '__main__':
manager.run()
| #!/usr/bin/env python
import os
from app import create_app
from flask.ext.script import Manager, Server
import watchtower
import logging
application = create_app(
os.getenv('EQ_ENVIRONMENT') or 'development'
)
application.debug = True
manager = Manager(application)
port = int(os.environ.get('PORT', 5000))
manager.add_command("runserver", Server(host='0.0.0.0', port=port))
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.addHandler(watchtower.CloudWatchLogHandler())
if __name__ == '__main__':
manager.run()
| mit | Python |
d11f95b356ef8f53d063c4196a84b1de38a8ea00 | fix name of info.pool | N0stack/tora,N0stack/tora | src/info/pool.py | src/info/pool.py | # coding: UTF-8
import libvirt
from xml.dom import minidom
from kvmconnect.base import BaseReadOnly
class PoolInfo(BaseReadOnly):
"""
Show Storage Pool's information
"""
def __init__(self):
super().__init__()
# Show storage pool's information
def get_pool_info_all(self):
storage_pool = []
pools = self.connection.listAllStoragePools(0)
for pool in pools:
pool_info = {}
info = pool.info()
pool_info.update({'name': pool.name()})
pool_info.update({'uuid': pool.UUIDString()})
pool_info.update({'Autostart': pool.autostart()})
pool_info.update({'state': info[0]})
pool_info.update({'capacity': info[1]})
pool_info.update({'allocation': info[2]})
pool_info.update({'available': info[3]})
pool_info.update({'is_active': pool.isActive()})
pool_info.update({'is_persistent': pool.isPersistent()})
pool_info.update({'volumes': pool.listVolumes()})
storage_pool.append(pool_info)
return {"pools": storage_pool}
| # coding: UTF-8
import libvirt
from xml.dom import minidom
from kvmconnect.base import BaseReadOnly
class PoolInfo(BaseReadOnly):
"""
Show Storage Pool's information
"""
def __init__(self):
super().__init__()
# Show storage pool's information
def get_storage_info_all(self):
storage = []
pools = self.connection.listAllStoragePools(0)
for pool in pools:
pool_info = {}
info = pool.info()
pool_info.update({'name': pool.name()})
pool_info.update({'uuid': pool.UUIDString()})
pool_info.update({'Autostart': pool.autostart()})
pool_info.update({'state': info[0]})
pool_info.update({'capacity': info[1]})
pool_info.update({'allocation': info[2]})
pool_info.update({'available': info[3]})
pool_info.update({'is_active': pool.isActive()})
pool_info.update({'is_persistent': pool.isPersistent()})
pool_info.update({'volumes': pool.listVolumes()})
storage.append(pool_info)
return storage
| mit | Python |
abbbc5eb7ea0c49868631af5ce5bb2502bae46ed | Change name of pandas letor converter | jma127/pyltr,jma127/pyltr | pyltr/data/pandas_converter.py | pyltr/data/pandas_converter.py | import pandas as pd
class PandasLetorConverter(object):
'''
Class Converter implements parsing from original letor txt files to
pandas data frame representation.
'''
def __init__(self, path):
'''
Arguments:
path: path to letor txt file
'''
self._path = path
@property
def path(self):
return self._path
@path.setter
def path(self, p):
self._path = p
def _load_file(self):
'''
Loads and parses raw letor txt file.
Return:
letor txt file parsed to csv in raw format
'''
return pd.read_csv(str(self._path), sep=" ", header=None)
def _drop_col(self, df):
'''
Drops last column, which was added in the parsing procedure due to a
trailing white space for each sample in the text file
Arguments:
df: pandas dataframe
Return:
df: original df with last column dropped
'''
return df.drop(df.columns[-1], axis=1)
def _split_colon(self, df):
'''
Splits the data on the colon and transforms it into a tabular format
where columns are features and rows samples. Cells represent feature
values per sample.
Arguments:
df: pandas dataframe object
Return:
df: original df with string pattern ':' removed; columns named appropriately
'''
for col in range(1,len(df.columns)):
df.loc[:,col] = df.loc[:,col].apply(lambda x: str(x).split(':')[1])
df.columns = ['rel', 'qid'] + [str(x) for x in range(1,len(df.columns)-1)] # renaming cols
return df
def convert(self):
'''
Performs final conversion.
Return:
fully converted pandas dataframe
'''
df_raw = self._load_file(self._path)
df_drop = self._drop_col(df_raw)
return self._split_colon(df_drop)
| import pandas as pd
class Pandas_Converter(object):
'''
Class Converter implements parsing from original letor txt files to
pandas data frame representation.
'''
def __init__(self, path):
'''
Arguments:
path: path to letor txt file
'''
self._path = path
@property
def path(self):
return self._path
@path.setter
def path(self, p):
self._path = p
def _load_file(self):
'''
Loads and parses raw letor txt file.
Return:
letor txt file parsed to csv in raw format
'''
return pd.read_csv(str(self._path), sep=" ", header=None)
def _drop_col(self, df):
'''
Drops last column, which was added in the parsing procedure due to a
trailing white space for each sample in the text file
Arguments:
df: pandas dataframe
Return:
df: original df with last column dropped
'''
return df.drop(df.columns[-1], axis=1)
def _split_colon(self, df):
'''
Splits the data on the colon and transforms it into a tabular format
where columns are features and rows samples. Cells represent feature
values per sample.
Arguments:
df: pandas dataframe object
Return:
df: original df with string pattern ':' removed; columns named appropriately
'''
for col in range(1,len(df.columns)):
df.loc[:,col] = df.loc[:,col].apply(lambda x: str(x).split(':')[1])
df.columns = ['rel', 'qid'] + [str(x) for x in range(1,len(df.columns)-1)] # renaming cols
return df
def convert(self):
'''
Performs final conversion.
Return:
fully converted pandas dataframe
'''
df_raw = self._load_file(self._path)
df_drop = self._drop_col(df_raw)
return self._split_colon(df_drop)
| bsd-3-clause | Python |
08aa5214a1b1a5fc6872de76b12cf97f5ceb03c9 | Disable JHU kpoint generationt test. | gmatteo/pymatgen,vorwerkc/pymatgen,gVallverdu/pymatgen,fraricci/pymatgen,gVallverdu/pymatgen,gVallverdu/pymatgen,fraricci/pymatgen,vorwerkc/pymatgen,davidwaroquiers/pymatgen,gmatteo/pymatgen,davidwaroquiers/pymatgen,davidwaroquiers/pymatgen,davidwaroquiers/pymatgen,gVallverdu/pymatgen,vorwerkc/pymatgen,vorwerkc/pymatgen,fraricci/pymatgen,fraricci/pymatgen | pymatgen/ext/tests/test_jhu.py | pymatgen/ext/tests/test_jhu.py | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import requests
from pymatgen.ext.jhu import get_kpoints
from pymatgen.io.vasp.inputs import Incar
from pymatgen.io.vasp.sets import MPRelaxSet
from pymatgen.util.testing import PymatgenTest
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2017, The Materials Project"
__maintainer__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
__date__ = "June 22, 2017"
website_is_up = requests.get("http://muellergroup.jhu.edu:8080").status_code == 200
@unittest.skipIf(True, "This code is way too buggy to be tested.")
class JhuTest(PymatgenTest):
_multiprocess_shared_ = True
def test_get_kpoints(self):
si = PymatgenTest.get_structure("Si")
input_set = MPRelaxSet(si)
kpoints = get_kpoints(si, incar=input_set.incar)
if __name__ == "__main__":
unittest.main()
| # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import requests
from pymatgen.ext.jhu import get_kpoints
from pymatgen.io.vasp.inputs import Incar
from pymatgen.io.vasp.sets import MPRelaxSet
from pymatgen.util.testing import PymatgenTest
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2017, The Materials Project"
__maintainer__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
__date__ = "June 22, 2017"
website_is_up = requests.get("http://muellergroup.jhu.edu:8080").status_code == 200
@unittest.skipIf(not website_is_up, "http://muellergroup.jhu.edu:8080 is down.")
class JhuTest(PymatgenTest):
_multiprocess_shared_ = True
def test_get_kpoints(self):
si = PymatgenTest.get_structure("Si")
input_set = MPRelaxSet(si)
kpoints = get_kpoints(si, incar=input_set.incar)
if __name__ == "__main__":
unittest.main()
| mit | Python |
89d9787fc5aa595f6d93d49565313212c2f95b6b | Add simple form to flask upload server | stephenbradshaw/pentesting_stuff,stephenbradshaw/pentesting_stuff,stephenbradshaw/pentesting_stuff,stephenbradshaw/pentesting_stuff | helper_servers/flask_upload.py | helper_servers/flask_upload.py | from flask import Flask, render_template, request, redirect, url_for
from werkzeug.utils import secure_filename
import datetime
import os
def timestamp():
return datetime.datetime.now().strftime('%Y%m%d%H%M%S')
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = '/uploads'
@app.route('/', methods=['GET'])
def index():
return '''
<!doctype html>
<title>Hi</title>
Hi
'''
#curl -F file=@"/tmp/test.txt" https://[site]/[app_path]/ul
@app.route('/ul', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file:
filename = '{}_{}.data'.format(timestamp(), secure_filename(file.filename))
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('upload_file'))
return '''
<!doctype html>
<title>Upload</title>
<form enctype="multipart/form-data" action="/ul" method="POST">
<input type="file" id="file" name="file">
<input type="submit">
</form>
'''
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000)
| from flask import Flask, render_template, request, redirect, url_for
from werkzeug.utils import secure_filename
import datetime
import os
def timestamp():
return datetime.datetime.now().strftime('%Y%m%d%H%M%S')
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = '/uploads'
@app.route('/', methods=['GET'])
def index():
return '''
<!doctype html>
<title>Hi</title>
Hi
'''
#curl -F file=@"/tmp/test.txt" https://[site]/[app_path]/ul
@app.route('/ul', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file:
filename = '{}_{}.data'.format(timestamp(), secure_filename(file.filename))
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('upload_file'))
return '''
<!doctype html>
<title>Hi</title>
Hi
'''
| bsd-3-clause | Python |
be3f2579907cc21e3320626b29348aaf97cb7986 | Modify error message | thombashi/pytablereader,thombashi/pytablereader,thombashi/pytablereader | pytablereader/csv/formatter.py | pytablereader/csv/formatter.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import dataproperty
from ..data import TableData
from ..error import InvalidDataError
from ..formatter import TableFormatter
class CsvTableFormatter(TableFormatter):
def to_table_data(self):
if dataproperty.is_empty_sequence(self._loader.header_list):
header_list = self._source_data[0]
if any([
dataproperty.is_empty_string(header) for header in header_list
]):
raise InvalidDataError(
"the first line includes empty string item."
"all of the items should contain header name."
"actual={}".format(header_list))
data_matrix = self._source_data[1:]
else:
header_list = self._loader.header_list
data_matrix = self._source_data
if len(data_matrix) == 0:
raise InvalidDataError(
"data row must be greater or equal than one")
self._loader.inc_table_count()
yield TableData(
self._loader.make_table_name(),
header_list, data_matrix)
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import dataproperty
from ..data import TableData
from ..error import InvalidDataError
from ..formatter import TableFormatter
class CsvTableFormatter(TableFormatter):
def to_table_data(self):
if dataproperty.is_empty_sequence(self._loader.header_list):
header_list = self._source_data[0]
if any([
dataproperty.is_empty_string(header) for header in header_list
]):
raise InvalidDataError(
"the first line includes empty string item: "
"the first line expected to contain header data.")
data_matrix = self._source_data[1:]
else:
header_list = self._loader.header_list
data_matrix = self._source_data
if len(data_matrix) == 0:
raise InvalidDataError(
"data row must be greater or equal than one")
self._loader.inc_table_count()
yield TableData(
self._loader.make_table_name(),
header_list, data_matrix)
| mit | Python |
5c3c90c9c26a2dbe6eba9a1ebb49f1a24cbc251f | Remove unused test setting | webkom/holonet,webkom/holonet,webkom/holonet | holonet/settings/test.py | holonet/settings/test.py | MASTER_DOMAINS = [
'test.holonet.no'
]
TEST_RUNNER = "djcelery.contrib.test_runner.CeleryTestSuiteRunner"
SENDER_WHITELIST_ENABLED = False
DOMAIN_WHITELIST_ENABLED = False
SECRET_KEY = 'test'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '127.0.0.1',
'NAME': 'holonet'
}
}
| MASTER_DOMAINS = [
'test.holonet.no'
]
TEST_RUNNER = "djcelery.contrib.test_runner.CeleryTestSuiteRunner"
SENDER_WHITELIST_ENABLED = False
DOMAIN_WHITELIST_ENABLED = False
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
SECRET_KEY = 'test'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': '127.0.0.1',
'NAME': 'hansolo'
}
}
| mit | Python |
ce1a286ee6d02cfd89097adacac4991f1477950d | Revert "test add_physical_volume()" | nschloe/python4gmsh | test/examples/swiss_cheese.py | test/examples/swiss_cheese.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygmsh as pg
import numpy as np
def generate():
geom = pg.Geometry()
X0 = np.array([
[0.0, 0.0, 0.0],
[0.5, 0.3, 0.1],
[-0.5, 0.3, 0.1],
[0.5, -0.3, 0.1]
])
R = np.array([0.1, 0.2, 0.1, 0.14])
holes = [
geom.add_ball(x0, r, with_volume=False, lcar=0.2*r).surface_loop
for x0, r in zip(X0, R)
]
# geom.add_box(
# -1, 1,
# -1, 1,
# -1, 1,
# lcar=0.2,
# holes=holes
# )
geom.add_ball([0, 0, 0], 1.0, lcar=0.2, holes=holes)
# Fails on travis for some reason. TODO fix
# geom.add_physical_volume(ball, label='cheese')
return geom, 4.07064892966291
if __name__ == '__main__':
import meshio
out = pg.generate_mesh(generate())
meshio.write('swiss_cheese.vtu', *out)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygmsh as pg
import numpy as np
def generate():
geom = pg.Geometry()
X0 = np.array([
[0.0, 0.0, 0.0],
[0.5, 0.3, 0.1],
[-0.5, 0.3, 0.1],
[0.5, -0.3, 0.1]
])
R = np.array([0.1, 0.2, 0.1, 0.14])
holes = [
geom.add_ball(x0, r, with_volume=False, lcar=0.2*r).surface_loop
for x0, r in zip(X0, R)
]
# geom.add_box(
# -1, 1,
# -1, 1,
# -1, 1,
# lcar=0.2,
# holes=holes
# )
ball = geom.add_ball([0, 0, 0], 1.0, lcar=0.2, holes=holes)
geom.add_physical_volume(ball, label='cheese')
return geom, 4.07064892966291
if __name__ == '__main__':
import meshio
out = pg.generate_mesh(generate())
meshio.write('swiss_cheese.vtu', *out)
| bsd-3-clause | Python |
e674fe96a95422167a270eb76f5d3ea14b03122f | Revert show_cv2 example | toinsson/pyrealsense,toinsson/pyrealsense,toinsson/pyrealsense | examples/show_cv2.py | examples/show_cv2.py | import logging
logging.basicConfig(level=logging.INFO)
import time
import numpy as np
import cv2
import pyrealsense as pyrs
from pyrealsense.constants import rs_option
with pyrs.Service() as serv:
with serv.Device() as dev:
dev.apply_ivcam_preset(0)
try: # set custom gain/exposure values to obtain good depth image
custom_options = [(rs_option.RS_OPTION_R200_LR_EXPOSURE, 30.0),
(rs_option.RS_OPTION_R200_LR_GAIN, 100.0)]
dev.set_device_options(*zip(*custom_options))
except pyrs.RealsenseError:
pass # options are not available on all devices
cnt = 0
last = time.time()
smoothing = 0.9
fps_smooth = 30
while True:
cnt += 1
if (cnt % 10) == 0:
now = time.time()
dt = now - last
fps = 10/dt
fps_smooth = (fps_smooth * smoothing) + (fps * (1.0-smoothing))
last = now
dev.wait_for_frames()
c = dev.color
c = cv2.cvtColor(c, cv2.COLOR_RGB2BGR)
d = dev.depth * dev.depth_scale * 1000
d = cv2.applyColorMap(d.astype(np.uint8), cv2.COLORMAP_RAINBOW)
cd = np.concatenate((c, d), axis=1)
cv2.putText(cd, str(fps_smooth)[:4], (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0))
cv2.imshow('', cd)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| import logging
logging.basicConfig(level=logging.INFO)
import time
import numpy as np
import cv2
import pyrealsense as pyrs
from pyrealsense.constants import rs_option
color_stream = pyrs.stream.ColorStream(color_format='bgr')
depth_stream = pyrs.stream.DepthStream()
def convert_z16_to_bgr(frame):
hist = np.histogram(frame, bins=0x10000)[0]
hist = np.cumsum(hist)
hist -= hist[0]
rgb_frame = np.empty(frame.shape[:2] + (3,), dtype=np.uint8)
zeros = frame == 0
non_zeros = frame != 0
f = hist[frame[non_zeros]] * 255 / hist[0xFFFF]
rgb_frame[non_zeros, 0] = 255 - f
rgb_frame[non_zeros, 1] = 0
rgb_frame[non_zeros, 2] = f
rgb_frame[zeros, 0] = 20
rgb_frame[zeros, 1] = 5
rgb_frame[zeros, 2] = 0
return rgb_frame
with pyrs.Service() as serv:
with serv.Device(streams=(color_stream, depth_stream)) as dev:
dev.apply_ivcam_preset(0)
try: # set custom gain/exposure values to obtain good depth image
custom_options = [(rs_option.RS_OPTION_R200_LR_EXPOSURE, 30.0),
(rs_option.RS_OPTION_R200_LR_GAIN, 100.0)]
dev.set_device_options(*zip(*custom_options))
except pyrs.RealsenseError:
pass # options are not available on all devices
cnt = 0
last = time.time()
smoothing = 0.9
fps_smooth = 30
while True:
cnt += 1
if (cnt % 10) == 0:
now = time.time()
dt = now - last
fps = 10/dt
fps_smooth = (fps_smooth * smoothing) + (fps * (1.0-smoothing))
last = now
dev.wait_for_frames()
c = dev.color # set to bgr
d = dev.depth # * dev.depth_scale * 1000
# d = cv2.applyColorMap(d.astype(np.uint8), cv2.COLORMAP_RAINBOW)
d = convert_z16_to_bgr(d)
cd = np.concatenate((c, d), axis=1)
cv2.putText(cd, str(fps_smooth)[:4], (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0))
cv2.imshow('', cd)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| apache-2.0 | Python |
6a86f901082dbb069b3364719c800ed6c404ead1 | Update bitbucket link. | jaraco/hgtools | hgtools/managers/subprocess.py | hgtools/managers/subprocess.py | import os
import subprocess
from . import base
from . import cmd
class Subprocess:
env = None
def _invoke(self, *params):
"""
Invoke self.exe as a subprocess
"""
cmd = [self.exe] + list(params)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.location,
env=self.env,
)
stdout, stderr = proc.communicate()
if not proc.returncode == 0:
raise RuntimeError(stderr.strip() or stdout.strip())
return stdout.decode('utf-8')
class MercurialManager(Subprocess, cmd.Mercurial, base.RepoManager):
"""
A RepoManager implemented by calling into the 'hg' command-line
as a subprocess.
"""
priority = 1
if os.path.isdir('.hg'):
priority += 1
@property
def env(self):
"""
Return an environment safe for calling an `hg` subprocess.
Removes MACOSX_DEPLOYMENT_TARGET from the env, as if there's a
mismatch between the local Python environment and the environment
in which `hg` is installed, it will cause an exception. See
https://github.com/jaraco/hgtools/issues/7 for details.
"""
env = os.environ.copy()
env.pop('MACOSX_DEPLOYMENT_TARGET', None)
return env
class GitManager(Subprocess, cmd.Git, base.RepoManager):
"""
A RepoManager implemented by calling into the 'git' command-line
as a subprocess.
"""
priority = 1
if os.path.isdir('.git'):
priority += 1
| import os
import subprocess
from . import base
from . import cmd
class Subprocess:
env = None
def _invoke(self, *params):
"""
Invoke self.exe as a subprocess
"""
cmd = [self.exe] + list(params)
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.location,
env=self.env,
)
stdout, stderr = proc.communicate()
if not proc.returncode == 0:
raise RuntimeError(stderr.strip() or stdout.strip())
return stdout.decode('utf-8')
class MercurialManager(Subprocess, cmd.Mercurial, base.RepoManager):
"""
A RepoManager implemented by calling into the 'hg' command-line
as a subprocess.
"""
priority = 1
if os.path.isdir('.hg'):
priority += 1
@property
def env(self):
"""
Return an environment safe for calling an `hg` subprocess.
Removes MACOSX_DEPLOYMENT_TARGET from the env, as if there's a
mismatch between the local Python environment and the environment
in which `hg` is installed, it will cause an exception. See
https://bitbucket.org/jaraco/hgtools/issue/7 for details.
"""
env = os.environ.copy()
env.pop('MACOSX_DEPLOYMENT_TARGET', None)
return env
class GitManager(Subprocess, cmd.Git, base.RepoManager):
"""
A RepoManager implemented by calling into the 'git' command-line
as a subprocess.
"""
priority = 1
if os.path.isdir('.git'):
priority += 1
| mit | Python |
e3950a577be91c1ab1c0f9d8ac948f4ed941adb0 | remove the message in HumanAgent about typing pass to pass, since that operation is not supported. | andysalerno/reversi_ai,andysalerno/reversi-ai,andysalerno/reversi_ai | agents/human_agent.py | agents/human_agent.py | from agents.agent import Agent
class HumanAgent(Agent):
"""This agent is controlled by a human, who inputs moves via stdin."""
def __init__(self, reversi, color, **kwargs):
pass
def get_action(self, game_state, legal_moves):
choice = None
while True:
raw_choice = input('Enter a move x,y: ')
if raw_choice == 'pass':
return None
if raw_choice == 'exit' or raw_choice == 'quit':
quit()
if len(raw_choice) != 3:
print('input must be 3 long, formatted x,y')
continue
if raw_choice[1] != ',':
print('comma separator not found.')
continue
if not raw_choice[0].isdigit() or not raw_choice[2].isdigit():
print('couldn\'t determine x,y from your input.')
continue
choice = (int(raw_choice[0]), int(raw_choice[2]))
break
return choice
| from agents.agent import Agent
class HumanAgent(Agent):
"""This agent is controlled by a human, who inputs moves via stdin."""
def __init__(self, reversi, color, **kwargs):
pass
def get_action(self, game_state, legal_moves):
choice = None
while True:
raw_choice = input('Enter a move x,y (or pass to pass): ')
if raw_choice == 'pass':
return None
if raw_choice == 'exit' or raw_choice == 'quit':
quit()
if len(raw_choice) != 3:
print('input must be 3 long, formatted x,y')
continue
if raw_choice[1] != ',':
print('comma separator not found.')
continue
if not raw_choice[0].isdigit() or not raw_choice[2].isdigit():
print('couldn\'t determine x,y from your input.')
continue
choice = (int(raw_choice[0]), int(raw_choice[2]))
break
return choice
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.