commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
7430dabe21fe505e411a1fefc2fb05218af84057 | add iterator to test case | yuyu2172/chainercv,chainer/chainercv,pfnet/chainercv,yuyu2172/chainercv,chainer/chainercv | tests/evaluations_tests/test_eval_semantic_segmentation.py | tests/evaluations_tests/test_eval_semantic_segmentation.py | import unittest
import numpy as np
from chainer import testing
from chainercv.evaluations import calc_confusion_matrix
from chainercv.evaluations import eval_semantic_segmentation
def _pred_iterator():
pred_labels = np.repeat([[[1, 1, 0], [0, 0, 1]]], 2, axis=0)
for pred_label in pred_labels:
yield pred_label
def _gt_iterator():
gt_labels = np.repeat([[[1, 0, 0], [0, -1, 1]]], 2, axis=0)
for gt_label in gt_labels:
yield gt_label
@testing.parameterize(
{'pred_labels': _pred_iterator(),
'gt_labels': _gt_iterator(),
'iou': np.array([4. / 6., 4. / 6.])
},
{'pred_labels': np.repeat([[[1, 1, 0], [0, 0, 1]]], 2, axis=0),
'gt_labels': np.repeat([[[1, 0, 0], [0, -1, 1]]], 2, axis=0),
'iou': np.array([4. / 6., 4. / 6.])
},
{'pred_labels': [np.array([[1, 1, 0], [0, 0, 1]]),
np.array([[1, 1, 0], [0, 0, 1]])],
'gt_labels': [np.array([[1, 0, 0], [0, -1, 1]]),
np.array([[1, 0, 0], [0, -1, 1]])],
'iou': np.array([4. / 6., 4. / 6.])
},
{'pred_labels': np.array([[[0, 0, 0], [0, 0, 0]]]),
'gt_labels': np.array([[[1, 1, 1], [1, 1, 1]]]),
'iou': np.array([0, 0]),
}
)
class TestEvalSemanticSegmentation(unittest.TestCase):
n_class = 2
def test_calc_confusion_matrix(self):
iou = eval_semantic_segmentation(
self.pred_labels, self.gt_labels, self.n_class)
np.testing.assert_equal(iou, self.iou)
@testing.parameterize(
{'pred_label': np.array([[1, 1, 0], [0, 0, 1]]),
'gt_label': np.array([[1, 0, 0], [0, -1, 1]]),
'confusion': np.array([[2, 1], [0, 2]])
},
{'pred_label': np.array([[0, 0, 0], [0, 0, 0]]),
'gt_label': np.array([[1, 1, 1], [1, 1, -1]]),
'confusion': np.array([[0, 0], [5, 0]])
}
)
class TestCalcConfusionMatrix(unittest.TestCase):
n_class = 2
def test_calc_confusion_matrix(self):
confusion = calc_confusion_matrix(
self.pred_label, self.gt_label, self.n_class)
np.testing.assert_equal(confusion, self.confusion)
testing.run_module(__name__, __file__)
| import unittest
import numpy as np
from chainer import testing
from chainercv.evaluations import calc_confusion_matrix
from chainercv.evaluations import eval_semantic_segmentation
@testing.parameterize(
{'pred_labels': np.repeat([[[1, 1, 0], [0, 0, 1]]], 2, axis=0),
'gt_labels': np.repeat([[[1, 0, 0], [0, -1, 1]]], 2, axis=0),
'iou': np.array([4. / 6., 4. / 6.])
},
{'pred_labels': [np.array([[1, 1, 0], [0, 0, 1]]),
np.array([[1, 1, 0], [0, 0, 1]])],
'gt_labels': [np.array([[1, 0, 0], [0, -1, 1]]),
np.array([[1, 0, 0], [0, -1, 1]])],
'iou': np.array([4. / 6., 4. / 6.])
},
{'pred_labels': np.array([[[0, 0, 0], [0, 0, 0]]]),
'gt_labels': np.array([[[1, 1, 1], [1, 1, 1]]]),
'iou': np.array([0, 0]),
}
)
class TestEvalSemanticSegmentation(unittest.TestCase):
n_class = 2
def test_calc_confusion_matrix(self):
iou = eval_semantic_segmentation(
self.pred_labels, self.gt_labels, self.n_class)
np.testing.assert_equal(iou, self.iou)
@testing.parameterize(
{'pred_label': np.array([[1, 1, 0], [0, 0, 1]]),
'gt_label': np.array([[1, 0, 0], [0, -1, 1]]),
'confusion': np.array([[2, 1], [0, 2]])
},
{'pred_label': np.array([[0, 0, 0], [0, 0, 0]]),
'gt_label': np.array([[1, 1, 1], [1, 1, -1]]),
'confusion': np.array([[0, 0], [5, 0]])
}
)
class TestCalcConfusionMatrix(unittest.TestCase):
n_class = 2
def test_calc_confusion_matrix(self):
confusion = calc_confusion_matrix(
self.pred_label, self.gt_label, self.n_class)
np.testing.assert_equal(confusion, self.confusion)
testing.run_module(__name__, __file__)
| mit | Python |
f42276007ea1feb4453780f8f9c7e63043b29a23 | fix "connection lost" spam when server-ip isn't blank. | SupaHam/mark2,frostyfrog/mark2,frostyfrog/mark2,SupaHam/mark2 | services/ping.py | services/ping.py | import re
import struct
from twisted.application.service import Service
from twisted.internet import task, reactor
from twisted.internet.protocol import Protocol, ClientFactory
from events import StatPlayerCount, ServerOutputConsumer, ACCEPTED
class PingProtocol(Protocol):
def connectionMade(self):
self.buff = ""
self.transport.write('\xFE\x01')
def dataReceived(self, data):
self.buff += data
if len(self.buff) >= 3:
l = struct.unpack('>h', self.buff[1:3])[0]
if len(self.buff) >= 3 + l * 2:
data = self.buff[9:].decode('utf-16be').split('\x00')
self.dispatch(StatPlayerCount(source="ping", players_current=data[3], players_max=data[4]))
self.transport.loseConnection()
class PingFactory(ClientFactory):
noisy = False
def __init__(self, interval, host, port, dispatch):
self.host = host
self.port = port
self.dispatch = dispatch
t = task.LoopingCall(self.loop)
t.start(interval, now=False)
def loop(self):
reactor.connectTCP(self.host, self.port, self)
def buildProtocol(self, addr):
pr = PingProtocol()
pr.dispatch = self.dispatch
return pr
class Ping(Service):
name = "ping"
def __init__(self, parent, host, port, interval):
h = host if host else '127.0.0.1'
parent.events.register(self.whine, ServerOutputConsumer, pattern='\/%s\:\d+ lost connection' % re.escape(h))
self.factory = PingFactory(interval, host, port, parent.events.dispatch)
def whine(self, event):
return ACCEPTED
def stopService(self):
self.factory.stopFactory()
Service.stopService(self)
| import struct
from twisted.application.service import Service
from twisted.internet import task, reactor
from twisted.internet.protocol import Protocol, ClientFactory
from events import StatPlayerCount, ServerOutputConsumer, ACCEPTED
class PingProtocol(Protocol):
def connectionMade(self):
self.buff = ""
self.transport.write('\xFE\x01')
def dataReceived(self, data):
self.buff += data
if len(self.buff) >= 3:
l = struct.unpack('>h', self.buff[1:3])[0]
if len(self.buff) >= 3 + l * 2:
data = self.buff[9:].decode('utf-16be').split('\x00')
self.dispatch(StatPlayerCount(source="ping", players_current=data[3], players_max=data[4]))
self.transport.loseConnection()
class PingFactory(ClientFactory):
noisy = False
def __init__(self, interval, host, port, dispatch):
self.host = host
self.port = port
self.dispatch = dispatch
t = task.LoopingCall(self.loop)
t.start(interval, now=False)
def loop(self):
reactor.connectTCP(self.host, self.port, self)
def buildProtocol(self, addr):
pr = PingProtocol()
pr.dispatch = self.dispatch
return pr
class Ping(Service):
name = "ping"
def __init__(self, parent, host, port, interval):
parent.events.register(self.whine, ServerOutputConsumer, pattern='\/127\.0\.0\.1\:\d+ lost connection')
self.factory = PingFactory(interval, host, port, parent.events.dispatch)
def whine(self, event):
return ACCEPTED
def stopService(self):
self.factory.stopFactory()
Service.stopService(self)
| mit | Python |
271a506f9000b5206057de6091f5d5314a5b1e65 | bump versions | licenses/lice | lice/__init__.py | lice/__init__.py | __version__ = "0.5"
def main():
from lice.core import main
main()
| __version__ = "0.4"
def main():
from lice.core import main
main()
| bsd-3-clause | Python |
d5f3031562c5c158a731ca5b93442273725b04ba | Use date/time when determining notices | statgen/encore,statgen/encore,statgen/encore,statgen/encore,statgen/encore,statgen/encore | encore/notice.py | encore/notice.py | import sql_pool
import MySQLdb
class Notice:
def __init__(self, notice_id):
self.notice_id = notice
self.message = None
self.start_date = None
self.end_date = None
def as_object(self):
obj = {"notice_id": self.notice_id,
"message": self.messsage,
"start_date": self.start_date,
"end_date": self.end_date}
return obj
@staticmethod
def get(notice_id, config):
db = sql_pool.get_conn()
cur = db.cursor(MySQLdb.cursors.DictCursor)
sql = """
SELECT id, mesaage
DATE_FORMAT(start_date, '%%Y-%%m-%%d %%H:%%i:%%s') AS creation_date
FROM notices
WHERE id = uuid_to_bin(%s)
"""
cur.execute(sql, (notice_id,))
result = cur.fetchone()
if result is not None:
n = Notice(geno_id, meta)
n.message = result["message"]
n.start_date = result["start_date"]
n.end_date = result["end_date"]
else:
n = None
return n
@staticmethod
def list_current(config = None):
db = sql_pool.get_conn()
cur = db.cursor(MySQLdb.cursors.DictCursor)
sql = """
SELECT id, message,
DATE_FORMAT(start_date, '%Y-%m-%d %H:%i:%s') AS creation_date,
DATE_FORMAT(end_date, '%Y-%m-%d %H:%i:%s') AS end_date
FROM notices
WHERE start_date <= CURRENT_TIMESTAMP() and (end_date is NULL or end_date > CURRENT_TIMESTAMP())
ORDER BY start_date DESC
"""
cur.execute(sql)
results = cur.fetchall()
return results
| import sql_pool
import MySQLdb
class Notice:
def __init__(self, notice_id):
self.notice_id = notice
self.message = None
self.start_date = None
self.end_date = None
def as_object(self):
obj = {"notice_id": self.notice_id,
"message": self.messsage,
"start_date": self.start_date,
"end_date": self.end_date}
return obj
@staticmethod
def get(notice_id, config):
db = sql_pool.get_conn()
cur = db.cursor(MySQLdb.cursors.DictCursor)
sql = """
SELECT id, mesaage
DATE_FORMAT(start_date, '%%Y-%%m-%%d %%H:%%i:%%s') AS creation_date
FROM notices
WHERE id = uuid_to_bin(%s)
"""
cur.execute(sql, (notice_id,))
result = cur.fetchone()
if result is not None:
n = Notice(geno_id, meta)
n.message = result["message"]
n.start_date = result["start_date"]
n.end_date = result["end_date"]
else:
n = None
return n
@staticmethod
def list_current(config = None):
db = sql_pool.get_conn()
cur = db.cursor(MySQLdb.cursors.DictCursor)
sql = """
SELECT id, message,
DATE_FORMAT(start_date, '%Y-%m-%d %H:%i:%s') AS creation_date,
DATE_FORMAT(end_date, '%Y-%m-%d %H:%i:%s') AS end_date
FROM notices
WHERE start_date <= CURDATE() and (end_date is NULL or end_date > CURDATE())
ORDER BY start_date DESC
"""
cur.execute(sql)
results = cur.fetchall()
return results
| agpl-3.0 | Python |
f730b60c961d540f8a6d338cb259d193b30ed7e9 | Add __repr__ and __str__ to models | gmacon/postfix-aliases,gmacon/postfix-aliases | postfix_aliases/models.py | postfix_aliases/models.py | from flask_login import UserMixin
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Domain(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True)
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.name)
class Mailbox(UserMixin, db.Model):
__table_args__ = (
db.UniqueConstraint('localpart', 'domain_id'),
)
id = db.Column(db.Integer, primary_key=True)
localpart = db.Column(db.String(128))
domain_id = db.Column(db.Integer, db.ForeignKey('domain.id'))
domain = db.relationship('Domain', backref='mailboxes')
password = db.Column(db.String(128))
def __str__(self):
return '{}@{}'.format(self.localpart, self.domain.name)
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self)
class Alias(db.Model):
__table_args__ = (
db.UniqueConstraint('localpart', 'domain_id'),
)
id = db.Column(db.Integer, primary_key=True)
localpart = db.Column(db.String(128))
domain_id = db.Column(db.Integer, db.ForeignKey('domain.id'))
domain = db.relationship('Domain', backref='aliases')
mailbox_id = db.Column(db.Integer, db.ForeignKey('mailbox.id'))
mailbox = db.relationship('Mailbox', backref='aliases')
def __str__(self):
return '{}@{}'.format(self.localpart, self.domain.name)
def __repr__(self):
return '<{}: {} for {}>'.format(self.__class__.__name__, self,
self.mailbox)
| from flask_login import UserMixin
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Domain(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True)
class Mailbox(UserMixin, db.Model):
__table_args__ = (
db.UniqueConstraint('localpart', 'domain_id'),
)
id = db.Column(db.Integer, primary_key=True)
localpart = db.Column(db.String(128))
domain_id = db.Column(db.Integer, db.ForeignKey('domain.id'))
domain = db.relationship('Domain', backref='mailboxes')
password = db.Column(db.String(128))
class Alias(db.Model):
__table_args__ = (
db.UniqueConstraint('localpart', 'domain_id'),
)
id = db.Column(db.Integer, primary_key=True)
localpart = db.Column(db.String(128))
domain_id = db.Column(db.Integer, db.ForeignKey('domain.id'))
domain = db.relationship('Domain', backref='aliases')
mailbox_id = db.Column(db.Integer, db.ForeignKey('mailbox.id'))
mailbox = db.relationship('Mailbox', backref='aliases')
| mit | Python |
fa02eb8e0bd043293eb96a1be93ee88bea6084b2 | Clean up messy syntax | isaacmg/fb_scraper,isaacmg/fb_scraper | tests/integrations_tests.py | tests/integrations_tests.py | import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from fb_posts import *
scrape_posts_from_last_scrape("115285708497149")
scrape_posts_from_last_scrape_kafka("319872211700098")
scrape_all_comments("319872211700098")
| import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from fb_posts import scrape_comments_from_last_scrape, scrape_posts_from_last_scrape, scrape_posts_from_last_scrape_kafka, scrape_all_comments
scrape_posts_from_last_scrape("115285708497149")
scrape_posts_from_last_scrape_kafka("319872211700098")
scrape_all_comments("319872211700098")
| apache-2.0 | Python |
559d516d1f4cea4156522ebd22d6d155c8f38b84 | change "name" to "label" | MBALearnsToCode/CorpFin,MBALearnsToCode/FinSymPy,MBALearnsToCode/FinSymPy,MBALearnsToCode/CorpFin | CorpFin/Security.py | CorpFin/Security.py |
class Security:
def __init__(self, label='', par=0., val=0.):
self.name = label
self.par = par
self.val = val
def __repr__(self):
if self.name:
s = ' "%s"' % self.name
else:
s = ''
return 'Security' + s + ': Par = %.3g, Val = %.3g' % (self.par, self.val)
DOLLAR = Security(label='$', par=1., val=1.)
|
class Security:
def __init__(self, name='', par=0., val=0.):
self.name = name
self.par = par
self.val = val
def __repr__(self):
if self.name:
s = ' "%s"' % self.name
else:
s = ''
return 'Security' + s + ': Par = %.3g, Val = %.3g' % (self.par, self.val)
DOLLAR = Security(name='$', par=1., val=1.)
| mit | Python |
effbc4bf8131cbd355155c1c72a858204311f7be | update how tours are listed | dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq | corehq/apps/tour/tours.py | corehq/apps/tour/tours.py | from django.core.urlresolvers import reverse
from corehq.apps.tour.models import has_user_seen_tour
from corehq.apps.tour.views import EndTourView
class StaticGuidedTour(object):
def __init__(self, slug, template):
self.slug = slug
self.template = template
def get_tour_data(self):
return {
'slug': self.slug,
'template': self.template,
'endUrl': reverse(EndTourView.urlname, args=(self.slug,)),
}
def is_enabled(self, user):
return has_user_seen_tour(user, self.slug)
NEW_BLANK_APP = StaticGuidedTour(
'new_blank_app', 'tour/config/new_blank_app.html'
)
NEW_TEMPLATE_APP = StaticGuidedTour(
'new_template_app', 'tour/config/new_template_app.html'
)
| import collections
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_noop as _
from corehq.apps.tour.utils import tour_is_enabled_for_user
GuidedTourStep = collections.namedtuple('GuidedTourStep', ['element', 'title', 'content'])
class StaticGuidedTour(object):
def __init__(self, slug, steps):
self.slug = slug
if not isinstance(steps, list):
raise ValueError("steps should be a list of GuidedTourStep")
self.steps = steps
def get_tour_data(self):
return {
'steps': map(lambda s: dict(s._asdict()), self.steps),
}
def is_enabled(self, user):
return tour_is_enabled_for_user(self.slug, user)
SIMPLE_NEW_APP = StaticGuidedTour('simple_new_app', [
GuidedTourStep(
'#',
_("Welcome to the App Builder"),
_("Click 'Next' for a quick introduction to this page.")
),
GuidedTourStep(
'#',
_("Edit a form"),
_("Build and edit form questions and logic in our easy Form Builder.")
),
GuidedTourStep(
'#',
_("Preview a form"),
_("Check out what your form looks like in a web preview")
),
GuidedTourStep(
'#',
_("Deploy your App"),
_("Click here to install your app on a mobile device")
),
GuidedTourStep(
'#',
_("Happy App Building!"),
_(mark_safe("""For more advice, tutorials, and answers to your questions,
please checkout our <a href="http://help.commcarehq.org/">Help Site</a>."""))
)
])
| bsd-3-clause | Python |
7039bfdd0e7384bf1ae934a3cea2486becd5a48f | Upgrade mockito to 2.23.0 | GerritCodeReview/plugins_webhooks | external_plugin_deps.bzl | external_plugin_deps.bzl | load("//tools/bzl:maven_jar.bzl", "maven_jar")
def external_plugin_deps():
maven_jar(
name = "mockito",
artifact = "org.mockito:mockito-core:2.23.0",
sha1 = "497ddb32fd5d01f9dbe99a2ec790aeb931dff1b1",
deps = [
"@byte-buddy//jar",
"@byte-buddy-agent//jar",
"@objenesis//jar",
],
)
BYTE_BUDDY_VERSION = "1.9.0"
maven_jar(
name = "byte-buddy",
artifact = "net.bytebuddy:byte-buddy:" + BYTE_BUDDY_VERSION,
sha1 = "8cb0d5baae526c9df46ae17693bbba302640538b",
)
maven_jar(
name = "byte-buddy-agent",
artifact = "net.bytebuddy:byte-buddy-agent:" + BYTE_BUDDY_VERSION,
sha1 = "37b5703b4a6290be3fffc63ae9c6bcaaee0ff856",
)
maven_jar(
name = "objenesis",
artifact = "org.objenesis:objenesis:2.6",
sha1 = "639033469776fd37c08358c6b92a4761feb2af4b",
)
| load("//tools/bzl:maven_jar.bzl", "maven_jar")
def external_plugin_deps():
maven_jar(
name = "mockito",
artifact = "org.mockito:mockito-core:2.9.0",
sha1 = "f28b9606eca8da77e10df30a7e301f589733143e",
deps = [
"@byte-buddy//jar",
"@objenesis//jar",
],
)
maven_jar(
name = "byte-buddy",
artifact = "net.bytebuddy:byte-buddy:1.7.0",
sha1 = "48481d20ed4334ee0abfe8212ecb44e0233a97b5",
)
maven_jar(
name = "objenesis",
artifact = "org.objenesis:objenesis:2.6",
sha1 = "639033469776fd37c08358c6b92a4761feb2af4b",
)
| apache-2.0 | Python |
e98d1fd364f9eff5497306b08b0fe5176b3c7379 | Initialize the ship with active set up to True | nephilahacks/spider-eats-the-kiwi | entities/ship.py | entities/ship.py | from .base import BaseEntity
from kivy.core.window import Window
class Ship(BaseEntity):
def __init__(self, *args, **kwargs):
super(Ship, self).__init__(*args, **kwargs)
self.active = True | from .base import BaseEntity
from kivy.core.window import Window
class Ship(BaseEntity):
pass | mit | Python |
b031c1d6d2a4e6214bf53357dd400c0d54b1c099 | Use StrictRedis's from_url() | nickfrostatx/expenses,nickfrostatx/expenses,nickfrostatx/expenses | expenses/app.py | expenses/app.py | # -*- coding: utf-8 -*-
"""Flask application factory."""
from flask import Flask
from redis import StrictRedis
from . import __name__ as package_name
import os
def create_app():
"""Return an instance of the main Flask application."""
app = Flask(package_name)
app.config.setdefault('REDIS_URL', 'redis://localhost')
from .model import db
db.init_app(app)
@app.before_first_request
def init_db():
app.redis = StrictRedis.from_url(app.config['REDIS_URL'])
from .error import register_error_handler, html_handler
register_error_handler(app, html_handler)
from .session import LazyRedisSessionInterface
app.session_interface = LazyRedisSessionInterface()
from .util import price_filter
app.jinja_env.filters['price'] = price_filter
from .views import views
app.register_blueprint(views)
return app
| # -*- coding: utf-8 -*-
"""Flask application factory."""
from flask import Flask
from redis import from_url
from . import __name__ as package_name
import os
def create_app():
"""Return an instance of the main Flask application."""
app = Flask(package_name)
app.config.setdefault('REDIS_URL', 'redis://localhost')
from .model import db
db.init_app(app)
@app.before_first_request
def init_db():
app.redis = from_url(app.config['REDIS_URL'])
from .error import register_error_handler, html_handler
register_error_handler(app, html_handler)
from .session import LazyRedisSessionInterface
app.session_interface = LazyRedisSessionInterface()
from .util import price_filter
app.jinja_env.filters['price'] = price_filter
from .views import views
app.register_blueprint(views)
return app
| mit | Python |
c481a6266668e747dfaf3a9a86904530d01e6ade | Fix some tests. | hello-base/web,hello-base/web,hello-base/web,hello-base/web | tests/people/test_models.py | tests/people/test_models.py | # -*- coding: utf-8 -*-
import datetime
import pytest
from components.people.models import Group, Idol, Membership, Staff
from components.people.factories import (GroupFactory, IdolFactory,
MembershipFactory, StaffFactory)
pytestmark = pytest.mark.django_db
class TestGroups:
def test_group_factory(self):
factory = GroupFactory()
assert isinstance(factory, Group)
assert 'group' in factory.romanized_name
assert factory.identifier == 'group'
def test_group_get_absolute_url(self, client):
factory = GroupFactory()
response = client.get(factory.get_absolute_url())
assert response.status_code == 200
def test_group_age(self):
active = GroupFactory(started=datetime.date.today() - datetime.timedelta(days=366))
inactive = GroupFactory(started=datetime.date.today() - datetime.timedelta(days=366), ended=datetime.date.today())
assert active.age == 1
assert active.age_in_days == 366
assert inactive.age == 1
assert inactive.age_in_days == 366
class TestIdols:
def test_idol_factory(self):
factory = IdolFactory()
assert isinstance(factory, Idol)
assert factory.identifier == 'idol'
assert 'family' in factory.romanized_family_name
assert 'given' in factory.romanized_given_name
def test_idol_get_absolute_url(self, client):
factory = IdolFactory()
response = client.get(factory.get_absolute_url())
assert response.status_code == 200
def test_idol_name_with_alias(self):
factory = IdolFactory(alias=u'ジュンジュン', romanized_alias='JunJun')
assert factory.name == u'ジュンジュン'
assert factory.romanized_name == 'JunJun'
def test_idol_gaijin(self):
nihonjin = IdolFactory()
assert not nihonjin.is_gaijin()
gaijin = IdolFactory(romanized_family_name='Sandbo', romanized_given_name='Lehua')
assert gaijin.is_gaijin()
assert gaijin.romanized_name == 'Lehua Sandbo'
class TestStaff:
def test_staff_factory(self):
factory = StaffFactory()
assert isinstance(factory, Staff)
assert 'family' in factory.romanized_family_name
assert 'given' in factory.romanized_given_name
class TestMemberships:
def test_membership_factory(self):
factory = MembershipFactory()
assert isinstance(factory, Membership)
assert isinstance(factory.group, Group)
assert isinstance(factory.idol, Idol)
| # -*- coding: utf-8 -*-
import datetime
import pytest
from components.people.models import Group, Idol, Membership, Staff
from components.people.factories import (GroupFactory, IdolFactory,
MembershipFactory, StaffFactory)
pytestmark = pytest.mark.django_db
class TestGroups:
def test_group_factory(self):
factory = GroupFactory()
assert isinstance(factory, Group)
assert 'group' in factory.romanized_name
assert factory.identifier == 'group'
def test_group_get_absolute_url(self, client):
factory = GroupFactory()
response = client.get(factory.get_absolute_url())
assert response.status_code == 200
def test_group_age(self):
active = GroupFactory(started=datetime.date.today() - datetime.timedelta(days=366))
inactive = GroupFactory(started=datetime.date.today() - datetime.timedelta(days=366), ended=datetime.date.today())
assert active.age == 1
assert active.age_in_days == 366
assert inactive.age == 1
assert inactive.age_in_days == 366
class TestIdols:
def test_idol_factory(self):
factory = IdolFactory()
assert isinstance(factory, Idol)
assert factory.identifier == 'idol'
assert 'family' in factory.romanized_family_name
assert 'given' in factory.romanized_given_name
def test_idol_get_absolute_url(self, client):
factory = IdolFactory()
response = client.get(factory.get_absolute_url())
assert response.status_code == 200
def test_idol_name_with_alias(self):
factory = IdolFactory(alias='ジュンジュン', romanized_alias='JunJun')
assert factory.name == 'ジュンジュン'
assert factory.romanized_name == 'JunJun'
def test_idol_gaijin(self):
nihonjin = IdolFactory()
assert not nihonjin.is_gaijin()
gaijin = IdolFactory(family_name='Sandbo', given_name='Lehua')
assert gaijin.is_gaijin()
assert gaijin.romanized_name == 'Lehua Sandbo'
class TestStaff:
def test_staff_factory(self):
factory = StaffFactory()
assert isinstance(factory, Staff)
assert 'family' in factory.romanized_family_name
assert 'given' in factory.romanized_given_name
class TestMemberships:
def test_membership_factory(self):
factory = MembershipFactory()
assert isinstance(factory, Membership)
assert isinstance(factory.group, Group)
assert isinstance(factory.idol, Idol)
| apache-2.0 | Python |
9993853448c593ab11ddf7d7c12bbcd307829901 | remove label from captcha | AccentDesign/wagtailstreamforms,AccentDesign/wagtailstreamforms,AccentDesign/wagtailstreamforms,AccentDesign/wagtailstreamforms | wagtailstreamforms/forms.py | wagtailstreamforms/forms.py | from django import forms
from captcha.fields import ReCaptchaField
from wagtail.wagtailforms.forms import FormBuilder as OrigFormBuilder
from wagtailstreamforms.utils import recaptcha_enabled
class FormBuilder(OrigFormBuilder):
def __init__(self, fields, **kwargs):
self.add_recaptcha = kwargs.pop('add_recaptcha')
super(FormBuilder, self).__init__(fields)
def create_regex_field(self, field, options):
if field.regex_validator:
# there is a selected validator so use it
options.update({
'regex': field.regex_validator.regex,
'error_messages': {'invalid': field.regex_validator.error_message}
})
else:
# otherwise allow anything
options.update({'regex': '(.*?)'})
return forms.RegexField(**options)
# doing this here rather than init as although works test are failing all over the place
OrigFormBuilder.FIELD_TYPES.update({'regexfield': create_regex_field})
@property
def formfields(self):
fields = super(FormBuilder, self).formfields
# add form id to identify the form type
fields['form_id'] = forms.CharField(widget=forms.HiddenInput)
fields['form_reference'] = forms.CharField(widget=forms.HiddenInput)
# if enabled add recaptcha field
if self.add_recaptcha and recaptcha_enabled():
fields['recaptcha'] = ReCaptchaField(label='')
return fields
| from django import forms
from captcha.fields import ReCaptchaField
from wagtail.wagtailforms.forms import FormBuilder as OrigFormBuilder
from wagtailstreamforms.utils import recaptcha_enabled
class FormBuilder(OrigFormBuilder):
def __init__(self, fields, **kwargs):
self.add_recaptcha = kwargs.pop('add_recaptcha')
super(FormBuilder, self).__init__(fields)
def create_regex_field(self, field, options):
if field.regex_validator:
# there is a selected validator so use it
options.update({
'regex': field.regex_validator.regex,
'error_messages': {'invalid': field.regex_validator.error_message}
})
else:
# otherwise allow anything
options.update({'regex': '(.*?)'})
return forms.RegexField(**options)
OrigFormBuilder.FIELD_TYPES.update(
{'regexfield': create_regex_field}
)
@property
def formfields(self):
fields = super(FormBuilder, self).formfields
# add form id to identify the form type
fields['form_id'] = forms.CharField(widget=forms.HiddenInput)
fields['form_reference'] = forms.CharField(widget=forms.HiddenInput)
# if enabled add recaptcha field
if self.add_recaptcha and recaptcha_enabled():
fields['recaptcha'] = ReCaptchaField()
return fields
| mit | Python |
ca862a62f93273efd0561ed1d37087cd8edefaed | bump version | ramusus/django-facebook-api | facebook_api/__init__.py | facebook_api/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright 2011-2015 ramusus
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (0, 6, 6)
__version__ = '.'.join(map(str, VERSION))
| # -*- coding: utf-8 -*-
#
# Copyright 2011-2015 ramusus
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (0, 6, 5)
__version__ = '.'.join(map(str, VERSION))
| bsd-3-clause | Python |
df2d61d1b9dc9dc22e745a6bd06bc8feeb766bab | Fix roles migration script. | eevee/floof,eevee/floof,eevee/floof | migration/versions/006_Add_roles.py | migration/versions/006_Add_roles.py | from sqlalchemy import *
from migrate import *
from sqlalchemy.orm import sessionmaker, relation
from sqlalchemy.ext.declarative import declarative_base
TableBase = declarative_base()
class Role(TableBase):
__tablename__ = 'roles'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode(127), nullable=False)
description = Column(Unicode, nullable=True)
class Privilege(TableBase):
__tablename__ = 'privileges'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode(127), nullable=False)
description = Column(Unicode, nullable=True)
class RolePrivilege(TableBase):
__tablename__ = 'role_privileges'
role_id = Column(Integer, ForeignKey('roles.id'), primary_key=True, nullable=False)
priv_id = Column(Integer, ForeignKey('privileges.id'), primary_key=True, nullable=False)
Role.privileges = relation(Privilege, secondary=RolePrivilege.__table__)
def upgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
# Add tables and columns
Role.__table__.create()
Privilege.__table__.create()
RolePrivilege.__table__.create()
# Add canonical privileges and roles
upload_art = Privilege(name=u'upload_art', description=u'Can upload art')
admin_priv = Privilege(name=u'admin', description=u'Can administrate')
base_user = Role(name=u'user', description=u'Basic user', privileges=[upload_art])
admin_user = Role(name=u'admin', description=u'Administrator', privileges=[admin_priv, upload_art])
Session = sessionmaker(bind=migrate_engine)()
Session.add_all([base_user, admin_user])
Session.commit()
def downgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
RolePrivilege.__table__.drop()
Role.__table__.drop()
Privilege.__table__.drop()
| from sqlalchemy import *
from migrate import *
from sqlalchemy.orm import sessionmaker, relation
from sqlalchemy.ext.declarative import declarative_base
TableBase = declarative_base()
class Role(TableBase):
__tablename__ = 'roles'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode(127), nullable=False)
description = Column(Unicode, nullable=True)
class Privilege(TableBase):
__tablename__ = 'privileges'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(Unicode(127), nullable=False)
description = Column(Unicode, nullable=True)
class RolePrivilege(TableBase):
__tablename__ = 'role_privileges'
role_id = Column(Integer, ForeignKey('roles.id'), primary_key=True, nullable=False)
priv_id = Column(Integer, ForeignKey('privileges.id'), primary_key=True, nullable=False)
Role.privileges = relation(Privilege, secondary=RolePrivilege.__table__)
def upgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
# Add tables and columns
Role.__table__.create()
Privilege.__table__.create()
RolePrivilege.__table__.create()
# Add canonical privileges and roles
upload_art = Privilege(name=u'upload_art')
admin_priv = Privilege(name=u'admin')
base_user = Role(name=u'user', privileges=[upload_art])
admin_user = Role(name=u'admin', privileges=[admin_priv, upload_art])
Session = sessionmaker(bind=migrate_engine)()
Session.add_all([base_user, admin_user])
Session.commit()
# Add canonical privileges and roles
upload_art = Privilege(name=u'upload_art', description=u'Can upload art')
admin_priv = Privilege(name=u'admin', description=u'Can administrate')
base_user = Role(name=u'user', description=u'Basic user', privileges=[upload_art])
admin_user = Role(name=u'admin', description=u'Administrator', privileges=[admin_priv, upload_art])
Session = sessionmaker(bind=migrate_engine)()
Session.add_all([base_user, admin_user])
Session.commit()
def downgrade(migrate_engine):
TableBase.metadata.bind = migrate_engine
RolePrivilege.__table__.drop()
Role.__table__.drop()
Privilege.__table__.drop()
| isc | Python |
ac4ba844aade398ee18c9f11326035ec33e8af6b | Fix print statement in lint.py | PostDispatchInteractive/app-template,PostDispatchInteractive/app-template,PostDispatchInteractive/app-template,PostDispatchInteractive/app-template | fabfile/lint.py | fabfile/lint.py | #!/usr/bin/env python
"""
Commands for linting JavaScript
"""
from glob import glob
import os
from fabric.api import local, task
import app
@task(default=True)
def lint():
"""
Run ESLint on all .js files.
"""
for path in glob('www/js/*.js'):
filename = os.path.split(path)[-1]
name = os.path.splitext(filename)[0]
exceptions = ['app_config.js']
if '.min.js' not in filename and filename not in exceptions:
try:
local( 'node_modules/eslint/bin/eslint.js %s || exit 0' % (path) )
except:
print('It looks like "eslint" isn\'t installed. Try running: "npm install"')
raise
| #!/usr/bin/env python
"""
Commands for linting JavaScript
"""
from glob import glob
import os
from fabric.api import local, task
import app
@task(default=True)
def lint():
"""
Run ESLint on all .js files.
"""
for path in glob('www/js/*.js'):
filename = os.path.split(path)[-1]
name = os.path.splitext(filename)[0]
exceptions = ['app_config.js']
if '.min.js' not in filename and filename not in exceptions:
try:
local( 'node_modules/eslint/bin/eslint.js %s || exit 0' % (path) )
except:
print 'It looks like "eslint" isn\'t installed. Try running: "npm install"'
raise
| mit | Python |
bc8be113b50cc733a9c924d5be7a0488a947e347 | Use apt safe-upgrade. | sociateru/fabtools,wagigi/fabtools-python,bitmonk/fabtools,prologic/fabtools,fabtools/fabtools,badele/fabtools,n0n0x/fabtools-python,pombredanne/fabtools,pahaz/fabtools,davidcaste/fabtools,ronnix/fabtools,ahnjungho/fabtools,AMOSoft/fabtools,hagai26/fabtools | fabtools/deb.py | fabtools/deb.py | """
Fabric tools for managing Debian/Ubuntu packages
"""
from fabric.api import *
def update_index():
"""
Quietly update package index
"""
sudo("aptitude -q -q update")
def upgrade():
"""
Upgrade all packages
"""
sudo("aptitude --assume-yes safe-upgrade")
def is_installed(pkg_name):
"""
Check if .deb package is installed
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = run("dpkg -s %(pkg_name)s" % locals())
for line in res.splitlines():
if line.startswith("Status: "):
status = line[8:]
if "installed" in status.split(' '):
return True
return False
def install(packages, update=False, options=None):
"""
Install .deb package(s)
"""
if update:
update_index()
if options is None:
options = []
if not isinstance(packages, basestring):
packages = " ".join(packages)
options.append("--assume-yes")
options = " ".join(options)
sudo('aptitude install %(options)s %(packages)s' % locals())
def preseed_package(pkg_name, preseed):
"""
Enable unattended package installation by preseeding debconf parameters
"""
for q_name, _ in preseed.items():
q_type, q_answer = _
sudo('echo "%(pkg_name)s %(q_name)s %(q_type)s %(q_answer)s" | debconf-set-selections' % locals())
| """
Fabric tools for managing Debian/Ubuntu packages
"""
from fabric.api import *
def update_index():
"""
Quietly update package index
"""
sudo("aptitude -q -q update")
def upgrade():
"""
Upgrade all packages
"""
sudo("aptitude --assume-yes upgrade")
def is_installed(pkg_name):
"""
Check if .deb package is installed
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
res = run("dpkg -s %(pkg_name)s" % locals())
for line in res.splitlines():
if line.startswith("Status: "):
status = line[8:]
if "installed" in status.split(' '):
return True
return False
def install(packages, update=False, options=None):
"""
Install .deb package(s)
"""
if update:
update_index()
if options is None:
options = []
if not isinstance(packages, basestring):
packages = " ".join(packages)
options.append("--assume-yes")
options = " ".join(options)
sudo('aptitude install %(options)s %(packages)s' % locals())
def preseed_package(pkg_name, preseed):
"""
Enable unattended package installation by preseeding debconf parameters
"""
for q_name, _ in preseed.items():
q_type, q_answer = _
sudo('echo "%(pkg_name)s %(q_name)s %(q_type)s %(q_answer)s" | debconf-set-selections' % locals())
| bsd-2-clause | Python |
23073881aafcd0e7fdff024d3ef6c2f93a48dad3 | Fix keystone config group name | StackStorm/mistral,openstack/mistral,dennybaa/mistral,dennybaa/mistral,openstack/mistral,StackStorm/mistral,dzimine/mistral,TimurNurlygayanov/mistral | mistral/utils/openstack/keystone.py | mistral/utils/openstack/keystone.py | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneclient.v3 import client as keystone_client
from oslo.config import cfg
from mistral import context
CONF = cfg.CONF
def client():
ctx = context.ctx()
auth_url = CONF.keystone.auth_uri
keystone = keystone_client.Client(username=ctx['user_name'],
token=ctx['auth_token'],
tenant_id=ctx['project_id'],
auth_url=auth_url)
keystone.management_url = auth_url
return keystone
def client_for_trusts(username, password, project_name=None, trust_id=None,
project_id=None):
auth_url = CONF.keystone.auth_uri
client = keystone_client.Client(username=username,
password=password,
tenant_name=project_name,
tenant_id=project_id,
auth_url=auth_url,
trust_id=trust_id)
client.management_url = auth_url
return client
| # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneclient.v3 import client as keystone_client
from oslo.config import cfg
from mistral import context
CONF = cfg.CONF
def client():
ctx = context.ctx()
auth_url = CONF.keystone_authtoken.auth_uri
keystone = keystone_client.Client(username=ctx['user_name'],
token=ctx['auth_token'],
tenant_id=ctx['project_id'],
auth_url=auth_url)
keystone.management_url = auth_url
return keystone
def client_for_trusts(username, password, project_name=None, trust_id=None,
project_id=None):
auth_url = CONF.keystone_authtoken.auth_uri
client = keystone_client.Client(username=username,
password=password,
tenant_name=project_name,
tenant_id=project_id,
auth_url=auth_url,
trust_id=trust_id)
client.management_url = auth_url
return client
| apache-2.0 | Python |
d52cd5285eacacd534663e1bf4b10ddff92037df | Fix error in function call | NLeSC/cptm,NLeSC/cptm | cptm/tabular2cpt_input.py | cptm/tabular2cpt_input.py | """Script that converts a field in a tabular data file to cptm input files
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
import pandas as pd
import logging
import sys
import argparse
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
frogclient = get_frogclient()
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
p = Perspective('', pos_topic_words(), pos_opinion_words())
if i % 25 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(input_data[args.text_field])))
if pd.notnull(text):
for pos, lemma in pos_and_lemmas(text, frogclient):
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
try:
file_name = '{}.txt'.format(input_data['id'][i])
except:
file_name = '{}.txt'.format(i)
p.write2file(args.out_dir, file_name)
| """Script that converts a field in a tabular data file to cptm input files
Used for the CAP vragenuurtje data.
Uses frog to pos-tag and lemmatize the data.
Usage: python tabular2cpt_input.py <csv of excel file> <full text field name>
<dir out>
"""
import pandas as pd
import logging
import sys
import argparse
from cptm.utils.inputgeneration import Perspective, remove_trailing_digits
from cptm.utils.dutchdata import pos_topic_words, pos_opinion_words, word_types
from cptm.utils.frog import get_frogclient, pos_and_lemmas
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('in_file', help='excel or csv file containing text data')
parser.add_argument('text_field', help='name of the text field')
parser.add_argument('out_dir', help='directory where output should be stored')
args = parser.parse_args()
frogclient = get_frogclient()
if args.in_file.endswith('.xls') or args.in_file.endswith('.xlsx'):
input_data = pd.read_excel(args.in_file)
else:
input_data = pd.read_csv(args.in_file)
for i, text in enumerate(input_data[args.text_field]):
p = Perspective('', pos_topic_words(), pos_opinion_words())
if i % 25 == 0:
logger.info('Processing text {} of {}'.format(i + 1,
len(input_data[args.text_field])))
if pd.notnull(text):
for pos, lemma in pos_and_lemmas():
if pos in word_types():
p.add(pos, remove_trailing_digits(lemma))
try:
file_name = '{}.txt'.format(input_data['id'][i])
except:
file_name = '{}.txt'.format(i)
p.write2file(args.out_dir, file_name)
| apache-2.0 | Python |
3ca11cd2ba0bcff8bbc4d01df2ba5b72f5b2e4b0 | Remove the plural from the url | robhudson/warehouse,mattrobenolt/warehouse,techtonik/warehouse,techtonik/warehouse,mattrobenolt/warehouse,mattrobenolt/warehouse,robhudson/warehouse | warehouse/packaging/urls.py | warehouse/packaging/urls.py | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from werkzeug.routing import Rule, EndpointPrefix
urls = [
EndpointPrefix("warehouse.packaging.views.", [
Rule(
"/project/<project_name>/",
methods=["GET"],
endpoint="project_detail",
),
Rule(
"/project/<project_name>/<version>/",
methods=["GET"],
endpoint="project_detail",
),
]),
]
| # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from werkzeug.routing import Rule, EndpointPrefix
urls = [
EndpointPrefix("warehouse.packaging.views.", [
Rule(
"/projects/<project_name>/",
methods=["GET"],
endpoint="project_detail",
),
Rule(
"/projects/<project_name>/<version>/",
methods=["GET"],
endpoint="project_detail",
),
]),
]
| apache-2.0 | Python |
33c82aaaf4e13a8762fc98ea3ad9b6757751cee9 | Update process-schedules.py | JeffreyPowell/pi-heating-hub,JeffreyPowell/pi-heating-hub,JeffreyPowell/pi-heating-hub | cron/process-schedules.py | cron/process-schedules.py | #!/usr/bin/env python
import MySQLdb
#import datetime
#import urllib2
#import os
servername = "localhost"
username = "pi"
password = "password"
dbname = "pi_heating_db"
cnx = MySQLdb.connect(host=servername, user=username, passwd=password, db=dbname)
cursorupdate = cnx.cursor()
query = ("UPDATE `timers` set value = value-1 WHERE value > 0;")
cursorupdate.execute(query)
cursorupdate.close()
cnx.commit()
cnx.close()
| apache-2.0 | Python | |
190546fa029b47adbfc2c008e505169294c9bb67 | Update WebClient.py | VitorHugoAguiar/ProBot,VitorHugoAguiar/ProBot,VitorHugoAguiar/ProBot,VitorHugoAguiar/ProBot | ProBot_BeagleBone/WebClient.py | ProBot_BeagleBone/WebClient.py | #!/usr/bin/python
import sys
import zmq
import SocketCommunication
from twisted.internet import reactor
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.python import log
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
# Initialization of classes from local files
Pub_Sub = SocketCommunication.publisher_and_subscriber()
class EchoClientProtocol(WebSocketClientProtocol):
def onMessage(self, payload, isBinary):
if not isBinary:
publisher=Pub_Sub.publisher(payload.decode('utf8'))
class EchoClientFactory(ReconnectingClientFactory, WebSocketClientFactory):
protocol = EchoClientProtocol
maxDelay = 10
def startedConnecting(self, connector):
print('Started to connect.')
def clientConnectionLost(self, connector, reason):
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need the WebSocket server address, i.e. ws://139.162.157.96:9000")
sys.exit(1)
log.startLogging(sys.stdout)
factory = EchoClientFactory(sys.argv[1])
connectWS(factory)
reactor.run()
| #!/usr/bin/python
import sys
import zmq
import SocketCommunication
from twisted.internet import reactor
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.python import log
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
Pub_Sub = SocketCommunication.publisher_and_subscriber()
class EchoClientProtocol(WebSocketClientProtocol):
def onMessage(self, payload, isBinary):
if not isBinary:
publisher=Pub_Sub.publisher(payload.decode('utf8'))
class EchoClientFactory(ReconnectingClientFactory, WebSocketClientFactory):
protocol = EchoClientProtocol
maxDelay = 10
def startedConnecting(self, connector):
print('Started to connect.')
def clientConnectionLost(self, connector, reason):
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Need the WebSocket server address, i.e. ws://139.162.157.96:9000")
sys.exit(1)
log.startLogging(sys.stdout)
factory = EchoClientFactory(sys.argv[1])
connectWS(factory)
reactor.run()
| agpl-3.0 | Python |
4c230f587b66be4a2acfdf3814fa813914a1ab4e | Add upvotes/downvotes to urbandict plugin (closes #166) | JohnMaguire/Cardinal | plugins/urbandict/plugin.py | plugins/urbandict/plugin.py | import logging
from cardinal.decorators import command, help
import requests
URBANDICT_API_PREFIX = 'http://api.urbandictionary.com/v0/define'
class UrbanDictPlugin:
def __init__(self):
self.logger = logging.getLogger(__name__)
@command(['ud', 'urbandict'])
@help('Returns the top Urban Dictionary definition for a given word.')
@help('Syntax: .ud <word>')
def get_ud(self, cardinal, user, channel, msg):
try:
word = msg.split(' ', 1)[1]
except IndexError:
cardinal.sendMsg(channel, 'Syntax: .ud <word>')
return
try:
url = URBANDICT_API_PREFIX
data = requests.get(url, params={'term': word}).json()
entry = data['list'].pop(0)
definition = entry['definition']
thumbs_up = entry['thumbs_up']
thumbs_down = entry['thumbs_down']
link = entry['permalink']
response = ' UD [%s] - %s [\u25b2%d|\u25bc%d] - %s' % (
word, definition, thumbs_up, thumbs_down, link
)
cardinal.sendMsg(channel, response)
except Exception:
self.logger.exception("Error with definition: %s", word)
cardinal.sendMsg(channel, "Could not retrieve definition for %s" % word)
def setup():
return UrbanDictPlugin()
| from urllib.request import urlopen
import json
from cardinal.decorators import command, help
URBANDICT_API_PREFIX = 'http://api.urbandictionary.com/v0/define?term='
class UrbanDictPlugin:
@command(['ud', 'urbandict'])
@help('Returns the top Urban Dictionary definition for a given word.')
@help('Syntax: .ud <word>')
def get_ud(self, cardinal, user, channel, msg):
try:
word = msg.split(' ', 1)[1]
except IndexError:
cardinal.sendMsg(channel, 'Syntax: .ud <word>')
return
try:
url = URBANDICT_API_PREFIX + word
f = urlopen(url).read()
data = json.loads(f)
word_def = data['list'][0]['definition']
link = data['list'][0]['permalink']
response = 'UD for %s: %s (%s)' % (word, word_def, link)
cardinal.sendMsg(channel, response)
except Exception:
cardinal.sendMsg(channel, "Could not retrieve definition for %s" % word)
def setup():
return UrbanDictPlugin()
| mit | Python |
0ad6845b5ea1c4151143bd8e6902595bcd64845f | Add logging.error to run_cmd | sjktje/sjkscan,sjktje/sjkscan | sjkscan/utils.py | sjkscan/utils.py | import argparse
import datetime
import logging
import os
import shutil
import subprocess
from .config import config
from . import __version__
def run_cmd(args):
"""Run shell command and return its output.
:param args: list or string of shell command and arguments
:returns: output of command
"""
if isinstance(args, list):
args = ' '.join(args)
logging.debug('run_cmd: %s', args)
try:
result = subprocess.run(
args,
stdout=subprocess.PIPE,
shell=True).stdout
except OSError as e:
logging.error('Execution failed: %s', e)
raise
return result
def files(dir, ext=None):
"""Yield regular files in directory, optionally of specific extension.
This function is a generator, and could be used like:
for f in utils.files('/some/directory', 'pnm'):
do_something_to_the_pnm(f)
:param dir: directory to traverse
:param ext: extension of files to list. Leading dot is ignored.
"""
for f in os.scandir(dir):
if not f.is_file():
continue
if ext and not f.name.endswith('.{}'.format(ext.lstrip('.'))):
continue
yield f.name
def move(old, new):
"""Move file
:param old: file to move
:param new: new location/filename
"""
# TODO: This should be a logger statement.
print('Move: {} -> {}'.format(old, new))
shutil.move(old, new)
def remove(file):
"""Remove file.
:param file: file to remove
"""
# TODO: This should be a logger statement.
print('Remove: {}'.format(file))
os.remove(file)
def is_scan_name(name):
"""Determine whether name (probably) is the name of a scan directory.
:param dir: directory name to check
:returns: True if it is a scan directory, False if not.
"""
try:
datetime.datetime.strptime(name, config['Paths']['dir_format'])
except ValueError:
return False
else:
return True
def version():
"""Return sjkscan version.
:returns: version string
"""
return __version__
def parse_args(argv=None):
"""Parse command line arguments.
:param argv: array of command line arguments (sys.argv)
:returns: object with program arguments as attributes
"""
parser = argparse.ArgumentParser()
parser.add_argument('-V', '--version', action='version', version='%(prog)s v{}'.format(version()), help='print version and exit')
return parser.parse_args(argv)
| import argparse
import datetime
import logging
import os
import shutil
import subprocess
from .config import config
from . import __version__
def run_cmd(args):
"""Run shell command and return its output.
:param args: list or string of shell command and arguments
:returns: output of command
"""
if isinstance(args, list):
args = ' '.join(args)
logging.debug('run_cmd: %s', args)
try:
result = subprocess.run(
args,
stdout=subprocess.PIPE,
shell=True).stdout
except OSError as e:
print('Execution failed: {}'.format(e))
return result
def files(dir, ext=None):
"""Yield regular files in directory, optionally of specific extension.
This function is a generator, and could be used like:
for f in utils.files('/some/directory', 'pnm'):
do_something_to_the_pnm(f)
:param dir: directory to traverse
:param ext: extension of files to list. Leading dot is ignored.
"""
for f in os.scandir(dir):
if not f.is_file():
continue
if ext and not f.name.endswith('.{}'.format(ext.lstrip('.'))):
continue
yield f.name
def move(old, new):
"""Move file
:param old: file to move
:param new: new location/filename
"""
# TODO: This should be a logger statement.
print('Move: {} -> {}'.format(old, new))
shutil.move(old, new)
def remove(file):
"""Remove file.
:param file: file to remove
"""
# TODO: This should be a logger statement.
print('Remove: {}'.format(file))
os.remove(file)
def is_scan_name(name):
"""Determine whether name (probably) is the name of a scan directory.
:param dir: directory name to check
:returns: True if it is a scan directory, False if not.
"""
try:
datetime.datetime.strptime(name, config['Paths']['dir_format'])
except ValueError:
return False
else:
return True
def version():
"""Return sjkscan version.
:returns: version string
"""
return __version__
def parse_args(argv=None):
"""Parse command line arguments.
:param argv: array of command line arguments (sys.argv)
:returns: object with program arguments as attributes
"""
parser = argparse.ArgumentParser()
parser.add_argument('-V', '--version', action='version', version='%(prog)s v{}'.format(version()), help='print version and exit')
return parser.parse_args(argv)
| bsd-2-clause | Python |
6b2202d0b7a4ef544b63e1692e40c5fec9c5930a | Extend import statement to support Python 3 | plotly/dash,plotly/dash,plotly/dash,plotly/dash,plotly/dash | dash_renderer/__init__.py | dash_renderer/__init__.py | # For reasons that I don't fully understand,
# unless I include __file__ in here, the packaged version
# of this module will just be a .egg file, not a .egg folder.
# And if it's just a .egg file, it won't include the necessary
# dependencies from MANIFEST.in.
# Found the __file__ clue by inspecting the `python setup.py install`
# command in the dash_html_components package which printed out:
# `dash_html_components.__init__: module references __file__`
# TODO - Understand this better
from .version import __version__
__file__
# Dash renderer's dependencies get loaded in a special order by the server:
# React bundles first, the renderer bundle at the very end.
_js_dist_dependencies = [
{
'external_url': [
'https://unpkg.com/react@15.4.2/dist/react.min.js',
'https://unpkg.com/react-dom@15.4.2/dist/react-dom.min.js'
],
'relative_package_path': [
'react@15.4.2.min.js',
'react-dom@15.4.2.min.js'
],
'namespace': 'dash_renderer'
}
]
_js_dist = [
{
'relative_package_path': 'bundle.js',
"external_url": (
'https://unpkg.com/dash-renderer@{}'
'/dash_renderer/bundle.js'
).format(__version__),
'namespace': 'dash_renderer'
}
]
| # For reasons that I don't fully understand,
# unless I include __file__ in here, the packaged version
# of this module will just be a .egg file, not a .egg folder.
# And if it's just a .egg file, it won't include the necessary
# dependencies from MANIFEST.in.
# Found the __file__ clue by inspecting the `python setup.py install`
# command in the dash_html_components package which printed out:
# `dash_html_components.__init__: module references __file__`
# TODO - Understand this better
from version import __version__
__file__
# Dash renderer's dependencies get loaded in a special order by the server:
# React bundles first, the renderer bundle at the very end.
_js_dist_dependencies = [
{
'external_url': [
'https://unpkg.com/react@15.4.2/dist/react.min.js',
'https://unpkg.com/react-dom@15.4.2/dist/react-dom.min.js'
],
'relative_package_path': [
'react@15.4.2.min.js',
'react-dom@15.4.2.min.js'
],
'namespace': 'dash_renderer'
}
]
_js_dist = [
{
'relative_package_path': 'bundle.js',
"external_url": (
'https://unpkg.com/dash-renderer@{}'
'/dash_renderer/bundle.js'
).format(__version__),
'namespace': 'dash_renderer'
}
]
| mit | Python |
9539275c3d3d0b6dd4ff95c5e0795faea4112b02 | Remove usage of versiontools from dashboard_app/__init__.py | OSSystems/lava-server,Linaro/lava-server,Linaro/lava-server,OSSystems/lava-server,Linaro/lava-server,Linaro/lava-server,OSSystems/lava-server | dashboard_app/__init__.py | dashboard_app/__init__.py | # Copyright (C) 2010 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of Launch Control.
#
# Launch Control is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# Launch Control is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Launch Control. If not, see <http://www.gnu.org/licenses/>.
"""
Dashboard Application (package)
"""
__version__ = (0, 5, 0, "dev", 0)
| # Copyright (C) 2010 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of Launch Control.
#
# Launch Control is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# Launch Control is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Launch Control. If not, see <http://www.gnu.org/licenses/>.
"""
Dashboard Application (package)
"""
__version__ = (0, 4, 0, "candidate", 9)
try:
import versiontools
__version__ = versiontools.Version.from_tuple(__version__)
except ImportError:
pass
| agpl-3.0 | Python |
0574b7dad5225444f0f7292374f645a6dc2d305b | test cleanup | mitodl/lore,amir-qayyum-khan/lore,amir-qayyum-khan/lore,amir-qayyum-khan/lore,mitodl/lore,mitodl/lore,mitodl/lore,mitodl/lore,amir-qayyum-khan/lore,amir-qayyum-khan/lore | importer/tests/test_views.py | importer/tests/test_views.py | """
Test the importer views to make sure they work.
"""
from __future__ import unicode_literals
import logging
from learningresources.models import LearningResource, Course
from learningresources.tests.base import LoreTestCase
from .test_import import get_course_zip
HTTP_OK = 200
log = logging.getLogger(__name__)
class TestViews(LoreTestCase):
"""Hit each view."""
def test_status_get(self):
"""Status page."""
resp = self.client.get("/importer/status", follow=True)
self.assertTrue(resp.status_code == HTTP_OK)
body = resp.content.decode("utf-8")
self.assertTrue("Number of learning resources:" in body)
def test_upload_get(self):
"""GET upload page."""
resp = self.client.get("/importer/upload", follow=True)
body = resp.content.decode("utf-8")
self.assertTrue('enctype="multipart/form-data"' in body)
def test_upload_post(self):
"""POST upload page."""
self.assertTrue(LearningResource.objects.count() == 0)
body = self.upload_test_file()
self.assertTrue(LearningResource.objects.count() == 5)
# We should have been redirected to the Listing page.
self.assertTrue('<h1>Listing</h1>' in body)
def test_upload_duplicate(self):
"""Gracefully inform the user."""
self.assertTrue(Course.objects.count() == 0)
self.upload_test_file()
self.assertTrue(Course.objects.count() == 1)
body = self.upload_test_file()
self.assertTrue(Course.objects.count() == 1)
self.assertTrue("Duplicate course" in body)
def upload_test_file(self):
"""Used multiple times in tests"""
with open(get_course_zip(), "rb") as post_file:
resp = self.client.post(
"/importer/upload/",
{"course_file": post_file, "repository": self.repo.id},
follow=True
)
return resp.content.decode("utf-8")
| """
Test the importer views to make sure they work.
"""
from __future__ import unicode_literals
import logging
from learningresources.models import LearningResource, Course
from learningresources.tests.base import LoreTestCase
from .test_import import get_course_zip
HTTP_OK = 200
log = logging.getLogger(__name__)
class TestViews(LoreTestCase):
"""Hit each view."""
def test_status_get(self):
"""Status page."""
resp = self.client.get("/importer/status", follow=True)
self.assertTrue(resp.status_code == HTTP_OK)
body = resp.content.decode("utf-8")
self.assertTrue("Number of learning resources:" in body)
def test_upload_get(self):
"""GET upload page."""
resp = self.client.get("/importer/upload", follow=True)
body = resp.content.decode("utf-8")
self.assertTrue('enctype="multipart/form-data"' in body)
def test_upload_post(self):
"""POST upload page."""
log.debug("in test_upload_post")
log.debug("%s resources before", LearningResource.objects.count())
self.assertTrue(LearningResource.objects.count() == 0)
with open(get_course_zip(), "rb") as post_file:
resp = self.client.post(
"/importer/upload/",
{"course_file": post_file, "repository": self.repo.id},
follow=True
)
log.debug("%s resources after", LearningResource.objects.count())
self.assertTrue(LearningResource.objects.count() == 5)
# We should have been redirected to the Listing page.
body = resp.content.decode("utf-8")
self.assertTrue('<h1>Listing</h1>' in body)
def test_upload_duplicate(self):
"""Gracefully inform the user."""
self.assertTrue(Course.objects.count() == 0)
with open(get_course_zip(), "rb") as post_file:
self.client.post(
"/importer/upload/",
{"course_file": post_file, "repository": self.repo.id},
follow=True
)
self.assertTrue(Course.objects.count() == 1)
with open(get_course_zip(), "rb") as post_file:
resp = self.client.post(
"/importer/upload/",
{"course_file": post_file, "repository": self.repo.id},
follow=True
)
self.assertTrue(Course.objects.count() == 1)
body = resp.content.decode("utf-8")
log.debug(body)
self.assertTrue("Duplicate course" in body)
| agpl-3.0 | Python |
fe65cca3ab2fe3c0f2c05531e761e5a7bd83dbd4 | add cluster info api | InterestingLab/elasticmanager | cluster/models.py | cluster/models.py | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from elasticsearch import Elasticsearch
@python_2_unicode_compatible
class ElasticCluster(models.Model):
class Meta:
db_table = 'cluster_elastic_cluster'
# cluster name
name = models.CharField(max_length=128)
host = models.CharField(max_length=256)
port = models.IntegerField()
def __str__(self):
return '{name} {host}:{port}'.format(name=self.name, host=self.host, port=self.port)
def address(self):
return '{host}:{port}'.format(host=self.host, port=self.port)
def client(self, timeout=30):
return Elasticsearch(self.address(), timeout=timeout)
def info(self):
info = self.client().info()
ret = {
'cluster_name': info['cluster_name'],
'elasticsearch_version': info['version']['number'],
'lucene_version': info['version']['lucene_version'],
}
return ret
def health(self):
es = self.client()
return es.cluster.health()
def pending_tasks(self):
es = self.client()
tasks = es.cluster.pending_tasks()
return len(tasks), tasks
| from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from elasticsearch import Elasticsearch
@python_2_unicode_compatible
class ElasticCluster(models.Model):
class Meta:
db_table = 'cluster_elastic_cluster'
# cluster name
name = models.CharField(max_length=128)
host = models.CharField(max_length=256)
port = models.IntegerField()
def __str__(self):
return '{name} {host}:{port}'.format(name=self.name, host=self.host, port=self.port)
def address(self):
return '{host}:{port}'.format(host=self.host, port=self.port)
def client(self, timeout=30):
return Elasticsearch(self.address(), timeout=timeout)
def health(self):
es = self.client()
return es.cluster.health()
def pending_tasks(self):
es = self.client()
tasks = es.cluster.pending_tasks()
return len(tasks), tasks
| mit | Python |
ad276d549eebe9c6fe99a629a76f02fc04b2bd51 | Simplify pubannotation test to not check exact numbers | jakelever/kindred,jakelever/kindred | tests/test_pubannotation.py | tests/test_pubannotation.py |
import kindred
def test_pubannotation():
corpus = kindred.pubannotation.load('bionlp-st-gro-2013-development')
assert isinstance(corpus,kindred.Corpus)
fileCount = len(corpus.documents)
entityCount = sum([ len(d.entities) for d in corpus.documents ])
relationCount = sum([ len(d.relations) for d in corpus.documents ])
assert fileCount > 0
assert relationCount > 0
assert entityCount > 0
if __name__ == '__main__':
test_pubannotation()
|
import kindred
def test_pubannotation():
corpus = kindred.pubannotation.load('bionlp-st-gro-2013-development')
assert isinstance(corpus,kindred.Corpus)
fileCount = len(corpus.documents)
entityCount = sum([ len(d.entities) for d in corpus.documents ])
relationCount = sum([ len(d.relations) for d in corpus.documents ])
assert fileCount == 50
assert relationCount == 1454
assert entityCount == 2657
if __name__ == '__main__':
test_pubannotation()
| mit | Python |
75d0b325ac16b05bdc22591a7532c5543ffe26d2 | Add wagtail_hooks tests | gasman/wagtaildraftail,springload/wagtaildraftail,springload/wagtaildraftail,springload/wagtaildraftail,gasman/wagtaildraftail,gasman/wagtaildraftail,gasman/wagtaildraftail,springload/wagtaildraftail | tests/test_wagtail_hooks.py | tests/test_wagtail_hooks.py | from __future__ import absolute_import, unicode_literals
import unittest
from wagtail.wagtailcore.hooks import get_hooks
from wagtaildraftail.wagtail_hooks import draftail_editor_css, draftail_editor_js
class TestWagtailHooks(unittest.TestCase):
def test_editor_css(self):
self.assertEqual(
draftail_editor_css(), '<link rel="stylesheet" href="/static/wagtaildraftail/wagtaildraftail.css">')
def test_insert_editor_css_hook(self):
hooks = get_hooks('insert_editor_css')
self.assertIn(draftail_editor_css, hooks, 'Editor CSS should be inserted automatically.')
def test_editor_js(self):
self.assertEqual(
draftail_editor_js(), '<script src="/static/wagtaildraftail/wagtaildraftail.js"></script>')
def test_insert_editor_js(self):
hooks = get_hooks('insert_editor_js')
self.assertIn(draftail_editor_js, hooks, 'Editor JS should be inserted automatically.')
| from __future__ import absolute_import, unicode_literals
import unittest
from wagtaildraftail.wagtail_hooks import draftail_editor_css, draftail_editor_js
class TestWagtailHooks(unittest.TestCase):
def test_editor_css(self):
self.assertEqual(
draftail_editor_css(), '<link rel="stylesheet" href="/static/wagtaildraftail/wagtaildraftail.css">')
def test_editor_js(self):
self.assertEqual(
draftail_editor_js(), '<script src="/static/wagtaildraftail/wagtaildraftail.js"></script>')
| mit | Python |
e7587adfa574f17998168aa09eb924ecd78bd74f | rename for clarity | AntreasAntoniou/DeepClassificationBot,AntreasAntoniou/DeepClassificationBot | tests/test_name_extractor.py | tests/test_name_extractor.py | # -*- coding: utf-8 -*-
import collections
import requests
import name_extractor
def test_top_n_shows(monkeypatch):
for report, expected in [
(two_shows, [['Steins;Gate'], ['Fullmetal Alchemist: Brotherhood']]),
(no_shows, []),
]:
monkeypatch.setattr(requests, 'get', mock_get(report))
shows = name_extractor.get_top_n_shows(100)
assert shows == expected
def mock_get(content):
def _mock_get(*args, **kwargs):
return MockResponse(content)
return _mock_get
MockResponse = collections.namedtuple('Response', 'content')
two_shows = '''
<report>
<item id="11770">
<anime href="/encyclopedia/anime.php?id=11770">Steins;Gate (TV)</anime>
</item>
<item id="10216">
<anime href="/encyclopedia/anime.php?id=10216">Fullmetal Alchemist: Brotherhood (TV)</anime>
</item>
</report>
'''
no_shows = '''
<report>
</report>
'''
| # -*- coding: utf-8 -*-
import collections
import requests
import name_extractor
def test_top_n_shows(monkeypatch):
for report, expected in [
(two_items, ['Steins;Gate', 'Fullmetal Alchemist: Brotherhood']),
(no_items, []),
]:
monkeypatch.setattr(requests, 'get', mock_get(report))
shows = name_extractor.get_top_n_shows(100)
assert shows == expected
def mock_get(content):
def _mock_get(*args, **kwargs):
return MockResponse(content)
return _mock_get
MockResponse = collections.namedtuple('Response', 'content')
two_items = '''
<report>
<item id="11770">
<anime href="/encyclopedia/anime.php?id=11770">Steins;Gate (TV)</anime>
</item>
<item id="10216">
<anime href="/encyclopedia/anime.php?id=10216">Fullmetal Alchemist: Brotherhood (TV)</anime>
</item>
</report>
'''
no_items = '''
<report>
</report>
'''
| mit | Python |
2cff29fa2ec89c4ced09691c455e3aa554be4f9f | Add test for display-cop-names | adrianmoisey/lint-review,zoidbergwill/lint-review,markstory/lint-review,markstory/lint-review,markstory/lint-review,adrianmoisey/lint-review,zoidbergwill/lint-review,zoidbergwill/lint-review | tests/tools/test_rubocop.py | tests/tools/test_rubocop.py | from os.path import abspath
from lintreview.review import Problems
from lintreview.review import Comment
from lintreview.utils import in_path
from lintreview.tools.rubocop import Rubocop
from unittest import TestCase
from unittest import skipIf
from nose.tools import eq_
rubocop_missing = not(in_path('rubocop'))
class TestRubocop(TestCase):
needs_rubocop = skipIf(rubocop_missing, 'Missing rubocop, cannot run')
fixtures = [
'tests/fixtures/rubocop/no_errors.rb',
'tests/fixtures/rubocop/has_errors.rb',
]
def setUp(self):
self.problems = Problems()
self.tool = Rubocop(self.problems)
def test_match_file(self):
self.assertFalse(self.tool.match_file('test.py'))
self.assertFalse(self.tool.match_file('dir/name/test.py'))
self.assertTrue(self.tool.match_file('test.rb'))
self.assertTrue(self.tool.match_file('dir/name/test.rb'))
@needs_rubocop
def test_process_files__one_file_pass(self):
self.tool.process_files([self.fixtures[0]])
eq_([], self.problems.all(self.fixtures[0]))
@needs_rubocop
def test_process_files__one_file_fail(self):
linty_filename = abspath(self.fixtures[1])
self.tool.process_files([linty_filename])
problems = self.problems.all(linty_filename)
expected = Comment(linty_filename, 4, 4,
'C: Trailing whitespace detected.')
eq_(expected, problems[5])
@needs_rubocop
def test_process_files_two_files(self):
self.tool.process_files(self.fixtures)
linty_filename = abspath(self.fixtures[1])
eq_(6, len(self.problems.all(linty_filename)))
freshly_laundered_filename = abspath(self.fixtures[0])
eq_([], self.problems.all(freshly_laundered_filename))
@needs_rubocop
def test_process_files_one_file_fail_display_cop_names(self):
options = {
'display_cop_names': 'True',
}
self.tool = Rubocop(self.problems, options)
linty_filename = abspath(self.fixtures[1])
self.tool.process_files([linty_filename])
problems = self.problems.all(linty_filename)
expected = Comment(linty_filename, 3, 3,
'C: Metrics/LineLength: Line is too long. [82/80]')
eq_(expected, problems[4])
| from os.path import abspath
from lintreview.review import Problems
from lintreview.review import Comment
from lintreview.utils import in_path
from lintreview.tools.rubocop import Rubocop
from unittest import TestCase
from unittest import skipIf
from nose.tools import eq_
rubocop_missing = not(in_path('rubocop'))
class TestRubocop(TestCase):
needs_rubocop = skipIf(rubocop_missing, 'Missing rubocop, cannot run')
fixtures = [
'tests/fixtures/rubocop/no_errors.rb',
'tests/fixtures/rubocop/has_errors.rb',
]
def setUp(self):
self.problems = Problems()
self.tool = Rubocop(self.problems)
def test_match_file(self):
self.assertFalse(self.tool.match_file('test.py'))
self.assertFalse(self.tool.match_file('dir/name/test.py'))
self.assertTrue(self.tool.match_file('test.rb'))
self.assertTrue(self.tool.match_file('dir/name/test.rb'))
@needs_rubocop
def test_process_files__one_file_pass(self):
self.tool.process_files([self.fixtures[0]])
eq_([], self.problems.all(self.fixtures[0]))
@needs_rubocop
def test_process_files__one_file_fail(self):
linty_filename = abspath(self.fixtures[1])
self.tool.process_files([linty_filename])
problems = self.problems.all(linty_filename)
expected = Comment(linty_filename, 4, 4,
'C: Trailing whitespace detected.')
eq_(expected, problems[5])
@needs_rubocop
def test_process_files_two_files(self):
self.tool.process_files(self.fixtures)
linty_filename = abspath(self.fixtures[1])
eq_(6, len(self.problems.all(linty_filename)))
freshly_laundered_filename = abspath(self.fixtures[0])
eq_([], self.problems.all(freshly_laundered_filename))
| mit | Python |
7cd2a9ba465c84c979985791e135025f448d0dcc | add pipeline settings | Crayzero/crawler | first_scrapy/settings.py | first_scrapy/settings.py | # Scrapy settings for first_scrapy project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'first_scrapy'
SPIDER_MODULES = ['first_scrapy.spiders']
NEWSPIDER_MODULE = 'first_scrapy.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'first_scrapy (+http://www.yourdomain.com)'
ITEM_PIPELINES=['first_scrapy.pipelines.StartupNews'] | # Scrapy settings for first_scrapy project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'first_scrapy'
SPIDER_MODULES = ['first_scrapy.spiders']
NEWSPIDER_MODULE = 'first_scrapy.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'first_scrapy (+http://www.yourdomain.com)'
| mit | Python |
be9a7601296d274b858e299435e1c6f903d3c564 | remove init | freevo/kaa-base,freevo/kaa-base | test/glib.py | test/glib.py | import kaa
class Test():
@kaa.threaded(kaa.GOBJECT)
@kaa.synchronized()
def foo(self):
import time
time.sleep(0.4)
return kaa.is_mainthread()
@kaa.coroutine()
def test(self):
r = yield self.foo()
print 'foo', kaa.is_mainthread(), r
@kaa.synchronized()
def has_to_wait(self):
print 'go'
if 1:
kaa.gobject_set_threaded()
else:
kaa.main.select_notifier('gtk')
t = Test()
kaa.OneShotTimer(t.test).start(0.1)
kaa.OneShotTimer(t.has_to_wait).start(0.2)
kaa.main.run()
print 'done'
| import kaa
class Test():
@kaa.threaded(kaa.GOBJECT)
@kaa.synchronized()
def foo(self):
import time
time.sleep(0.4)
return kaa.is_mainthread()
@kaa.coroutine()
def test(self):
r = yield self.foo()
print 'foo', kaa.is_mainthread(), r
@kaa.synchronized()
def has_to_wait(self):
print 'go'
if 1:
kaa.main.select_notifier('generic')
kaa.gobject_set_threaded()
else:
kaa.main.select_notifier('gtk')
t = Test()
kaa.OneShotTimer(t.test).start(0.1)
kaa.OneShotTimer(t.has_to_wait).start(0.2)
kaa.main.run()
print 'done'
| lgpl-2.1 | Python |
f73b04bb17febf7b8eee05e389d071e4a83912c7 | test with json messages | mgax/zechat,mgax/zechat | testsuite/test_transport.py | testsuite/test_transport.py | import pytest
from mock import Mock, call
from flask import json
@pytest.fixture
def node():
from zechat.node import Node
return Node()
def mock_ws(client_id):
ws = Mock(id=client_id)
ws.out = []
ws.send.side_effect = lambda i: ws.out.append(json.loads(i))
return ws
def handle(node, ws, incoming):
ws.receive.side_effect = [json.dumps(i) for i in incoming] + [None]
with node.transport(ws) as transport:
transport.handle()
return ws.out
def msg(recipient, text):
return dict(recipient=recipient, text=text)
def test_roundtrip(node):
out = handle(node, mock_ws('one'), [msg('one', 'foo'), msg('one', 'bar')])
assert out == [msg('one', 'foo'), msg('one', 'bar')]
def test_peer_receives_messages(node):
peer_ws = mock_ws('two')
with node.transport(peer_ws):
handle(node, mock_ws('one'), [msg('two', 'foo'), msg('two', 'bar')])
assert peer_ws.out == [msg('two', 'foo'), msg('two', 'bar')]
| import pytest
from mock import Mock, call
from flask import json
@pytest.fixture
def node():
from zechat.node import Node
return Node()
def mock_ws(client_id):
ws = Mock(id=client_id)
ws.out = []
ws.send.side_effect = lambda i: ws.out.append(json.loads(i))
return ws
def handle(node, ws, incoming):
ws.receive.side_effect = [json.dumps(i) for i in incoming] + [None]
with node.transport(ws) as transport:
transport.handle()
return ws.out
def test_roundtrip(node):
out = handle(node, mock_ws('one'), ['foo', 'bar'])
assert out == ['foo', 'bar']
def test_peer_receives_messages(node):
peer_ws = mock_ws('two')
with node.transport(peer_ws):
handle(node, mock_ws('one'), ['foo', 'bar'])
assert peer_ws.out == ['foo', 'bar']
| mit | Python |
e38a97b9a4252aa5d1c825d564284e4d7ba23d0d | Fix importlib error detection for Python 3.5, compatible with 2.x | django-fluent/django-fluent-comments,edoburu/django-fluent-comments,django-fluent/django-fluent-comments,django-fluent/django-fluent-comments,edoburu/django-fluent-comments,django-fluent/django-fluent-comments,edoburu/django-fluent-comments | fluent_comments/utils.py | fluent_comments/utils.py | """
Internal utils
"""
import sys
import traceback
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from fluent_comments import appsettings
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module # Python 2.6 compatibility
def get_comment_template_name(comment):
"""
Internal function for the rendering of comments.
"""
ctype = ContentType.objects.get_for_id(comment.content_type_id)
return [
"comments/%s/%s/comment.html" % (ctype.app_label, ctype.model),
"comments/%s/comment.html" % ctype.app_label,
"comments/comment.html"
]
def get_comment_context_data(comment, action=None):
"""
Internal function for the rendering of comments.
"""
return {
'comment': comment,
'action': action,
'preview': (action == 'preview'),
'USE_THREADEDCOMMENTS': appsettings.USE_THREADEDCOMMENTS,
}
def import_symbol(import_path, setting_name):
"""
Import a class or function by name.
"""
mod_name, class_name = import_path.rsplit('.', 1)
# import module
try:
mod = import_module(mod_name)
cls = getattr(mod, class_name)
except ImportError as e:
__, __, exc_traceback = sys.exc_info()
frames = traceback.extract_tb(exc_traceback)
if len(frames) > 1 and any('importlib' not in f[0] for f in frames[1:]):
raise # import error is a level deeper.
raise ImproperlyConfigured("{0} does not point to an existing class: {1}".format(setting_name, import_path))
except AttributeError:
raise ImproperlyConfigured("{0} does not point to an existing class: {1}".format(setting_name, import_path))
return cls
| """
Internal utils
"""
import sys
import traceback
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from fluent_comments import appsettings
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module # Python 2.6 compatibility
def get_comment_template_name(comment):
"""
Internal function for the rendering of comments.
"""
ctype = ContentType.objects.get_for_id(comment.content_type_id)
return [
"comments/%s/%s/comment.html" % (ctype.app_label, ctype.model),
"comments/%s/comment.html" % ctype.app_label,
"comments/comment.html"
]
def get_comment_context_data(comment, action=None):
"""
Internal function for the rendering of comments.
"""
return {
'comment': comment,
'action': action,
'preview': (action == 'preview'),
'USE_THREADEDCOMMENTS': appsettings.USE_THREADEDCOMMENTS,
}
def import_symbol(import_path, setting_name):
"""
Import a class or function by name.
"""
mod_name, class_name = import_path.rsplit('.', 1)
# import module
try:
mod = import_module(mod_name)
cls = getattr(mod, class_name)
except ImportError as e:
__, __, exc_traceback = sys.exc_info()
frames = traceback.extract_tb(exc_traceback)
if len(frames) > 2:
raise # import error is a level deeper.
raise ImproperlyConfigured("{0} does not point to an existing class: {1}".format(setting_name, import_path))
except AttributeError:
raise ImproperlyConfigured("{0} does not point to an existing class: {1}".format(setting_name, import_path))
return cls
| apache-2.0 | Python |
246c4bed63b59763c84e7bc89c1a3b22a36157f1 | Bump version to 1.2 | edoburu/django-fluent-utils | fluent_utils/__init__.py | fluent_utils/__init__.py | # following PEP 386
__version__ = "1.2"
| # following PEP 386
__version__ = "1.1.6"
| apache-2.0 | Python |
4b659b7b2552da033753349e059eee172025e00e | Reorder imports based on isort rules. | adbpy/wire-protocol | adbwp/__init__.py | adbwp/__init__.py | """
adbwp
~~~~~
Android Debug Bridge (ADB) Wire Protocol.
"""
# pylint: disable=wildcard-import
from . import exceptions, header, message
from .exceptions import *
from .header import Header
from .message import Message
__all__ = exceptions.__all__ + ['header', 'message', 'Header', 'Message']
__version__ = '0.0.1'
| """
adbwp
~~~~~
Android Debug Bridge (ADB) Wire Protocol.
"""
# pylint: disable=wildcard-import
from . import exceptions
from .exceptions import *
from . import header
from .header import Header
from . import message
from .message import Message
__all__ = exceptions.__all__ + ['header', 'message', 'Header', 'Message']
__version__ = '0.0.1'
| apache-2.0 | Python |
33641ac7d517f436fd69bd567e21aebcb1e908cd | Update admin | colajam93/aurpackager,colajam93/aurpackager,colajam93/aurpackager,colajam93/aurpackager | manager/admin.py | manager/admin.py | from django.contrib import admin
from manager.models import Package, Build
class PackageAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'source')
admin.site.register(Package, PackageAdmin)
class BuildAdmin(admin.ModelAdmin):
list_display = ('id', 'package', 'version', 'date', 'status')
admin.site.register(Build, BuildAdmin)
| from django.contrib import admin
from manager.models import Package, Build
class PackageAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'source')
admin.site.register(Package, PackageAdmin)
class BuildAdmin(admin.ModelAdmin):
list_display = ('id', 'package', 'version', 'date')
admin.site.register(Build, BuildAdmin)
| mit | Python |
12b1b7a477cc99e1c3ec3405269999c7974677b6 | Move getting the event loop out of try/except | mwfrojdman/aioinotify | aioinotify/cli.py | aioinotify/cli.py | import logging
from argparse import ArgumentParser
import asyncio
from .protocol import connect_inotify
logger = logging.getLogger(__name__)
def main():
parser = ArgumentParser()
parser.add_argument(
'-ll', '--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], default='WARNING')
parser.add_argument('paths', nargs='+', help='File path(s) to watch for file system events')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level))
loop = asyncio.get_event_loop()
try:
_, inotify = loop.run_until_complete(connect_inotify())
@asyncio.coroutine
def run(inotify):
@asyncio.coroutine
def callback(event):
print(event)
for path in args.paths:
watch = yield from inotify.watch(callback, path, all_events=True)
logger.debug('Added watch %s for all events in %s', watch.watch_descriptor, path)
yield from inotify.close_event.wait()
try:
loop.run_until_complete(run(inotify))
except KeyboardInterrupt:
inotify.close()
loop.run_until_complete(inotify.close_event.wait())
finally:
loop.close()
if __name__ == '__main__':
main()
| import logging
from argparse import ArgumentParser
import asyncio
from .protocol import connect_inotify
logger = logging.getLogger(__name__)
def main():
parser = ArgumentParser()
parser.add_argument(
'-ll', '--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], default='WARNING')
parser.add_argument('paths', nargs='+', help='File path(s) to watch for file system events')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level))
try:
loop = asyncio.get_event_loop()
_, inotify = loop.run_until_complete(connect_inotify())
@asyncio.coroutine
def run(inotify):
@asyncio.coroutine
def callback(event):
print(event)
for path in args.paths:
watch = yield from inotify.watch(callback, path, all_events=True)
logger.debug('Added watch %s for all events in %s', watch.watch_descriptor, path)
yield from inotify.close_event.wait()
try:
loop.run_until_complete(run(inotify))
except KeyboardInterrupt:
inotify.close()
loop.run_until_complete(inotify.close_event.wait())
finally:
loop.close()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
a6eb9eabc2931cbb647d245332bc3eb9ac77598b | Add a soup.find API test | kovidgoyal/html5-parser,kovidgoyal/html5-parser | test/soup.py | test/soup.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: Apache 2.0 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
from html5_parser.soup import parse
from . import TestCase
class SoupTest(TestCase):
def test_simple_soup(self):
root = parse('<p>\n<a>y</a>z<x:x>1</x:x>')
self.ae(
type('')(root), '<html><head></head><body><p>\n<a>y</a>z<x:x>1</x:x></p></body></html>')
root = parse('<svg></svg></body></html>')
root = parse('<p><!-- ---->')
self.ae(type('')(root), '<html><head></head><body><p><!-- ----></p></body></html>')
root = parse('<p><i><b>')
self.ae(type('')(root), '<html><head></head><body><p><i><b></b></i></p></body></html>')
def test_attr_soup(self):
root = parse('<p a=1 b=2 ID=3><a a=a>')
self.ae(dict(root.body.p.attrs), {'a': '1', 'b': '2', 'id': '3'})
self.ae(dict(root.body.p.a.attrs), {'a': 'a'})
self.ae(type('')(root.find(name='a', a='a')), '<a a="a"></a>')
root = parse('<p a=1><svg><image xlink:href="h">')
self.ae(
type('')(root),
'<html><head></head><body>'
'<p a="1"><svg><image xlink:href="h"></image></svg></p>'
'</body></html>'
)
root = parse('<html xml:lang="en" lang="fr"><p>')
self.ae(dict(root.attrs), {'xml:lang': 'en', 'lang': 'fr'})
root = parse('<p><x xmlns:a="b">')
self.ae(type('')(root), '<html><head></head><body><p><x xmlns:a="b"></x></p></body></html>')
| #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: Apache 2.0 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
from html5_parser.soup import parse
from . import TestCase
class SoupTest(TestCase):
def test_simple_soup(self):
root = parse('<p>\n<a>y</a>z<x:x>1</x:x>')
self.ae(
type('')(root), '<html><head></head><body><p>\n<a>y</a>z<x:x>1</x:x></p></body></html>')
root = parse('<svg></svg></body></html>')
root = parse('<p><!-- ---->')
self.ae(type('')(root), '<html><head></head><body><p><!-- ----></p></body></html>')
root = parse('<p><i><b>')
self.ae(type('')(root), '<html><head></head><body><p><i><b></b></i></p></body></html>')
def test_attr_soup(self):
root = parse('<p a=1 b=2 ID=3><a a=a>')
self.ae(dict(root.body.p.attrs), {'a': '1', 'b': '2', 'id': '3'})
self.ae(dict(root.body.p.a.attrs), {'a': 'a'})
root = parse('<p a=1><svg><image xlink:href="h">')
self.ae(
type('')(root),
'<html><head></head><body>'
'<p a="1"><svg><image xlink:href="h"></image></svg></p>'
'</body></html>'
)
root = parse('<html xml:lang="en" lang="fr"><p>')
self.ae(dict(root.attrs), {'xml:lang': 'en', 'lang': 'fr'})
root = parse('<p><x xmlns:a="b">')
self.ae(type('')(root), '<html><head></head><body><p><x xmlns:a="b"></x></p></body></html>')
| apache-2.0 | Python |
a60a840ff938030e2790bdae7d0fd933073cd96c | update West Oxfordshire import script for parl.2017-06-08 (closes #953) | DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_west_oxfordshire.py | polling_stations/apps/data_collection/management/commands/import_west_oxfordshire.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000181'
addresses_name = 'parl.2017-06-08/Version 1/West Oxfordshire Democracy_Club__08June2017.tsv'
stations_name = 'parl.2017-06-08/Version 1/West Oxfordshire Democracy_Club__08June2017.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000181'
addresses_name = 'May 2017/WestOxfordshire_Democracy_Club__04May2017.tsv'
stations_name = 'May 2017/WestOxfordshire_Democracy_Club__04May2017.tsv'
elections = [
'local.oxfordshire.2017-05-04',
'parl.2017-06-08'
]
csv_delimiter = '\t'
| bsd-3-clause | Python |
ef8dc1b4696b053896c315c54e0bfd375c25b7f5 | Update config.py | A1014280203/Ugly-Distributed-Crawler | master/config.py | master/config.py | headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/'
'537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Referer': ''
}
r_server = {
'ip': 'localhost',
'port': '6379',
'passwd': '',
's_proxy': 'proxy_ip',
# the name of set which stores url of posts
's_url': 'url'
}
settings = {
# 使用代理时最大尝试次数
'maxtries': 3,
# 每个版块遍历的页数
'b_pages': 5,
# 合格的回复下限
'reply': 45,
# 合格的阅读下限
'read': 10000,
}
| headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/'
'537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
'Referer': ''
}
r_server = {
'ip': 'localhost',
'port': '6379',
'passwd': '',
's_proxy': 'proxy_ip',
's_url': 'url'
}
settings = {
# 使用代理时最大尝试次数
'maxtries': 3,
# 每个版块遍历的页数
'b_pages': 5,
# 合格的回复下限
'reply': 45,
# 合格的阅读下限
'read': 10000,
}
| mpl-2.0 | Python |
e4885689fc2f9e9f37814326a9eeef07ee4483f9 | Bump version to 1.8alpha3 | rolandgeider/wger,wger-project/wger,petervanderdoes/wger,kjagoo/wger_stark,wger-project/wger,kjagoo/wger_stark,rolandgeider/wger,kjagoo/wger_stark,petervanderdoes/wger,wger-project/wger,petervanderdoes/wger,wger-project/wger,kjagoo/wger_stark,rolandgeider/wger,rolandgeider/wger,petervanderdoes/wger | wger/__init__.py | wger/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright: 2011, 2012 by OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
VERSION = (1, 8, 0, 'alpha', 3)
RELEASE = False
def get_version(version=None, release=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
if release is None:
release = RELEASE
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
main_parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:main_parts])
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
else:
sub = ''
if not release:
sub += '-dev'
return main + sub
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright: 2011, 2012 by OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
VERSION = (1, 8, 0, 'alpha', 2)
RELEASE = False
def get_version(version=None, release=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
if release is None:
release = RELEASE
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
main_parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:main_parts])
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
else:
sub = ''
if not release:
sub += '-dev'
return main + sub
| agpl-3.0 | Python |
0c09c230c6c52ce86fe1a7220aea4e70da34e258 | Bump version | petervanderdoes/wger,petervanderdoes/wger,wger-project/wger,petervanderdoes/wger,wger-project/wger,wger-project/wger,petervanderdoes/wger,wger-project/wger | wger/__init__.py | wger/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright: 2011, 2012 by OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
VERSION = (2, 1, 0, 'alpha', 1)
RELEASE = False
def get_version(version=None, release=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
if release is None:
release = RELEASE
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
main_parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:main_parts])
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
else:
sub = ''
if not release:
sub += '.dev0'
return main + sub
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright: 2011, 2012 by OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
VERSION = (2, 0, 0, 'final', 1)
RELEASE = True
def get_version(version=None, release=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
if release is None:
release = RELEASE
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
main_parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:main_parts])
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
else:
sub = ''
if not release:
sub += '.dev0'
return main + sub
| agpl-3.0 | Python |
9384f76d4ecfe2a822747020ba20771019105aaa | Set threads to daemons so that they exit when the main thread exits | boundary/boundary-plugin-shell,boundary/boundary-plugin-shell,jdgwartney/boundary-plugin-shell,jdgwartney/boundary-plugin-shell | metric_thread.py | metric_thread.py | #!/usr/bin/env python
# Copyright 2014 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread, Lock
import time
from metric_item import MetricItem
from exec_proc import ExecProc
from sys import stdout
stdoutmutex = Lock()
class MetricThread(Thread):
def __init__(self,item,mutex):
Thread.__init__(self,name=item.getName())
self.setDaemon(True)
self.mutex = mutex
self.pollingInterval = item.getPollingInterval()
self.name = item.getName()
self.proc = ExecProc()
self.proc.setCommand(item.getCommand())
self.proc.setDebug(item.getDebug())
def run(self): # run provides thread logic
while True:
output = self.proc.execute()
with self.mutex:
stdout.write(output)
stdout.flush()
time.sleep(self.pollingInterval)
| #!/usr/bin/env python
# Copyright 2014 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread, Lock
import time
from metric_item import MetricItem
from exec_proc import ExecProc
from sys import stdout
stdoutmutex = Lock()
class MetricThread(Thread):
def __init__(self,item,mutex):
Thread.__init__(self,name=item.getName())
self.mutex = mutex
self.pollingInterval = item.getPollingInterval()
self.name = item.getName()
self.proc = ExecProc()
self.proc.setCommand(item.getCommand())
self.proc.setDebug(item.getDebug())
def run(self): # run provides thread logic
while True:
output = self.proc.execute()
with self.mutex:
stdout.write(output)
stdout.flush()
time.sleep(self.pollingInterval)
| apache-2.0 | Python |
43f9f148b7b6d7a483a5c585f3bb9802f74d314f | bump version | tizz98/prosperworks-api | prosperworks/constants.py | prosperworks/constants.py | __version__ = "0.0.5"
# Headers
# values
CONTENT_TYPE = "application/json"
APPLICATION = "developer_api"
# keys
ACCESS_TOKEN_HEADER = "X-PW-AccessToken"
APPLICATION_HEADER = "X-PW-Application"
EMAIL_HEADER = "X-PW-UserEmail"
# Order by most recent first, default will be API_VERSIONS[0]
API_VERSIONS = (
"v1",
)
BASE_URL = "https://api.prosperworks.com/developer_api/{version}/"
| __version__ = "0.0.4"
# Headers
# values
CONTENT_TYPE = "application/json"
APPLICATION = "developer_api"
# keys
ACCESS_TOKEN_HEADER = "X-PW-AccessToken"
APPLICATION_HEADER = "X-PW-Application"
EMAIL_HEADER = "X-PW-UserEmail"
# Order by most recent first, default will be API_VERSIONS[0]
API_VERSIONS = (
"v1",
)
BASE_URL = "https://api.prosperworks.com/developer_api/{version}/"
| mit | Python |
164b07fefdd8db74ccce7ff44c33a6120cd98c86 | Fix xlrd issue Column headers must be strings | mfraezz/modular-file-renderer,rdhyee/modular-file-renderer,AddisonSchiller/modular-file-renderer,mfraezz/modular-file-renderer,TomBaxter/modular-file-renderer,rdhyee/modular-file-renderer,AddisonSchiller/modular-file-renderer,CenterForOpenScience/modular-file-renderer,felliott/modular-file-renderer,AddisonSchiller/modular-file-renderer,icereval/modular-file-renderer,haoyuchen1992/modular-file-renderer,CenterForOpenScience/modular-file-renderer,Johnetordoff/modular-file-renderer,haoyuchen1992/modular-file-renderer,TomBaxter/modular-file-renderer,felliott/modular-file-renderer,rdhyee/modular-file-renderer,felliott/modular-file-renderer,mfraezz/modular-file-renderer,Johnetordoff/modular-file-renderer,CenterForOpenScience/modular-file-renderer,icereval/modular-file-renderer,mfraezz/modular-file-renderer,haoyuchen1992/modular-file-renderer,TomBaxter/modular-file-renderer,Johnetordoff/modular-file-renderer,icereval/modular-file-renderer,felliott/modular-file-renderer,AddisonSchiller/modular-file-renderer,rdhyee/modular-file-renderer,TomBaxter/modular-file-renderer,Johnetordoff/modular-file-renderer,CenterForOpenScience/modular-file-renderer,haoyuchen1992/modular-file-renderer | mfr/ext/tabular/libs/xlrd_tools.py | mfr/ext/tabular/libs/xlrd_tools.py | import xlrd
from ..exceptions import TableTooBigException, EmptyTableException
from ..configuration import config
from ..utilities import header_population
from ..compat import range
def xlsx_xlrd(fp):
"""Read and convert a xlsx file to JSON format using the xlrd library
:param fp: File pointer object
:return: tuple of table headers and data
"""
max_size = config['max_size']
wb = xlrd.open_workbook(fp.name)
# Currently only displays the first sheet if there are more than one.
sheet = wb.sheets()[0]
if sheet.ncols > max_size or sheet.nrows > max_size:
raise TableTooBigException("Table is too large to render.")
if sheet.ncols < 1 or sheet.nrows < 1:
raise EmptyTableException("Table is empty or corrupt.")
fields = sheet.row_values(0) if sheet.nrows else []
fields = [str(value) or 'Unnamed: {0}'.format(index+1) for index, value in enumerate(fields)]
data = [dict(zip(fields, sheet.row_values(row_index)))
for row_index in range(1, sheet.nrows)]
header = header_population(fields)
return header, data
| import xlrd
from ..exceptions import TableTooBigException, EmptyTableException
from ..configuration import config
from ..utilities import header_population
from ..compat import range
def xlsx_xlrd(fp):
"""Read and convert a xlsx file to JSON format using the xlrd library
:param fp: File pointer object
:return: tuple of table headers and data
"""
max_size = config['max_size']
wb = xlrd.open_workbook(fp.name)
# Currently only displays the first sheet if there are more than one.
sheet = wb.sheets()[0]
if sheet.ncols > max_size or sheet.nrows > max_size:
raise TableTooBigException("Table is too large to render.")
if sheet.ncols < 1 or sheet.nrows < 1:
raise EmptyTableException("Table is empty or corrupt.")
fields = sheet.row_values(0) if sheet.nrows else []
fields = [value or 'Unnamed: {0}'.format(index+1) for index, value in enumerate(fields)]
data = [dict(zip(fields, sheet.row_values(row_index)))
for row_index in range(1, sheet.nrows)]
header = header_population(fields)
return header, data
| apache-2.0 | Python |
031aed2929710c2ed80885031ba83858d2f350b9 | fix translate command | appu1232/Selfbot-for-Discord | cogs/translate.py | cogs/translate.py | import requests
import discord
import json
from urllib import parse
from bs4 import BeautifulSoup
from discord.ext import commands
'''Translator cog - Love Archit & Lyric'''
class Translate:
def __init__(self, bot):
self.bot = bot
# Thanks to lyric for helping me in making this possible. You are not so bad afterall :] ~~jk~~
@commands.command(pass_context=True)
async def translate(self, ctx, to_language, *, msg):
"""Translates words from one language to another. Do >help translate for more information.
Usage:
>translate <new language> <words> - Translate words from one language to another. Full language names must be used.
The original language will be assumed automatically.
"""
await self.bot.delete_message(ctx.message)
codes = requests.get("http://lyricly.tk/langs.json").text
lang_codes = json.loads(codes)
real_language = False
to_language = to_language.lower()
for entry in lang_codes:
if to_language in lang_codes[entry]["name"].replace(";", "").replace(",", "").lower().split():
language = lang_codes[entry]["name"].replace(";", "").replace(",", "").split()[0]
to_language = entry
real_language = True
if real_language:
translate = requests.get("https://translate.google.com/m?hl={}&sl=auto&q={}".format(to_language, msg)).text
result = str(translate).split('class="t0">')[1].split("</div>")[0]
result = BeautifulSoup(result, "lxml").text
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name="Original", value=msg, inline=False)
embed.add_field(name=language, value=result.replace("&", "&"), inline=False)
if result == msg:
embed.add_field(name="Warning", value="This language may not be supported by Google Translate.")
await self.bot.send_message(ctx.message.channel, "", embed=embed)
else:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + "That's not a real language.")
def setup(bot):
bot.add_cog(Translate(bot))
| import requests
import discord
from urllib import parse
from bs4 import BeautifulSoup
from discord.ext import commands
'''Translator cog - Love Archit & Lyric'''
class Translate:
def __init__(self, bot):
self.bot = bot
# Thanks to lyric for helping me in making this possible. You are not so bad afterall :] ~~jk~~
@commands.command(pass_context=True)
async def translate(self, ctx, to_language, *, msg):
"""Translates words from one language to another. Do >help translate for more information.
Usage:
>translate <new language> <words> - Translate words from one language to another. Full language names must be used.
The original language will be assumed automatically.
"""
await self.bot.delete_message(ctx.message)
codes = requests.get("http://lyricly.tk/langs.json").text
lang_codes = json.loads(codes)
real_language = False
to_language = to_language.lower()
for entry in lang_codes:
if to_language in lang_codes[entry]["name"].replace(";", "").replace(",", "").lower().split():
language = lang_codes[entry]["name"].replace(";", "").replace(",", "").split()[0]
to_language = entry
real_language = True
if real_language:
translate = requests.get("https://translate.google.com/m?hl={}&sl=auto&q={}".format(to_language, msg)).text
result = str(translate).split('class="t0">')[1].split("</div>")[0]
result = BeautifulSoup(result, "lxml").text
embed = discord.Embed(color=discord.Color.blue())
embed.add_field(name="Original", value=msg, inline=False)
embed.add_field(name=language, value=result.replace("&", "&"), inline=False)
if result == msg:
embed.add_field(name="Warning", value="This language may not be supported by Google Translate.")
await self.bot.send_message(ctx.message.channel, "", embed=embed)
else:
await self.bot.send_message(ctx.message.channel, self.bot.bot_prefix + "That's not a real language.")
def setup(bot):
bot.add_cog(Translate(bot))
| mit | Python |
4d8a6196425f6a113bc4653100b1a183634f8f9b | add /assemblee/ just after france/ in urls | yohanboniface/memopol-core,yohanboniface/memopol-core,yohanboniface/memopol-core | memopol2/urls.py | memopol2/urls.py | import os
from django.conf.urls.defaults import patterns, include, url
from django.views.generic.simple import direct_to_template
from django.conf import settings
from django.contrib import admin
from django.views.static import serve
admin.autodiscover()
urlpatterns = patterns('', # pylint: disable=C0103
url(r'^$', direct_to_template, {'template' : 'home.html'}, name='index'),
url(r'^europe/parliament/', include('meps.urls', namespace='meps', app_name='meps')),
url(r'^france/assemblee/', include('mps.urls', namespace='mps', app_name='mps')),
url(r'^votes/', include('votes.urls', namespace='votes', app_name='votes')),
url(r'^list/', include('queries.urls', namespace='queries', app_name='queries')),
url(r'^trends/', include('trends.urls', namespace='trends', app_name='trends')),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
# hack to autodiscover static files location in dev mode
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^static/(.*)$', serve, {'document_root': os.path.join(settings.PROJECT_PATH, 'static')}),
)
# TODO: static files location in production
# should never be served by django, settings.MEDIA_URL is the right way to do
| import os
from django.conf.urls.defaults import patterns, include, url
from django.views.generic.simple import direct_to_template
from django.conf import settings
from django.contrib import admin
from django.views.static import serve
admin.autodiscover()
urlpatterns = patterns('', # pylint: disable=C0103
url(r'^$', direct_to_template, {'template' : 'home.html'}, name='index'),
url(r'^europe/parliament/', include('meps.urls', namespace='meps', app_name='meps')),
url(r'^france/', include('mps.urls', namespace='mps', app_name='mps')),
url(r'^votes/', include('votes.urls', namespace='votes', app_name='votes')),
url(r'^list/', include('queries.urls', namespace='queries', app_name='queries')),
url(r'^trends/', include('trends.urls', namespace='trends', app_name='trends')),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
# hack to autodiscover static files location in dev mode
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^static/(.*)$', serve, {'document_root': os.path.join(settings.PROJECT_PATH, 'static')}),
)
# TODO: static files location in production
# should never be served by django, settings.MEDIA_URL is the right way to do
| agpl-3.0 | Python |
b858b4e230222de125b30f9ec3f70b72056322ba | Set version 2.0.1-pre. | jdswinbank/Comet,jdswinbank/Comet | comet/__init__.py | comet/__init__.py | __description__ = "VOEvent Broker"
__url__ = "http://comet.transientskp.org/"
__author__ = "John Swinbank"
__contact__ = "swinbank@princeton.edu"
__version__ = "2.0.1-pre"
| __description__ = "VOEvent Broker"
__url__ = "http://comet.transientskp.org/"
__author__ = "John Swinbank"
__contact__ = "swinbank@princeton.edu"
__version__ = "2.0.0"
| bsd-2-clause | Python |
c15a9622ab31d09fec9a12c34584342df12ae362 | fix login_allowed check | coco-project/coco,coco-project/coco,coco-project/coco,coco-project/coco | ipynbsrv/core/auth/checks.py | ipynbsrv/core/auth/checks.py | # from ipynbsrv.core.models import BackendUser
def login_allowed(user):
"""
@user_passes_test decorator to check whether the user is allowed to access the application or not.
We do not want to allow non-UserBackend users to access the application
(because we need the LDAP entry for the shares etc.) so we check that here.
"""
if user is None or user.get_username() is None:
return False
return hasattr(user, 'backend_user')
| # from ipynbsrv.core.models import BackendUser
def login_allowed(user):
"""
@user_passes_test decorator to check whether the user is allowed to access the application or not.
We do not want to allow non-UserBackend users to access the application
(because we need the LDAP entry for the shares etc.) so we check that here.
"""
return True
# if user is None or user.get_username() is None:
# return False
# return BackendUser.objects.filter(user__id=user.id).exists()
| bsd-3-clause | Python |
3dfa128111027cc77ac1294ccb78f7687f80d069 | add tests for parsing description | tswicegood/maxixe | maxixe/tests/parser.py | maxixe/tests/parser.py | import textwrap
import unittest
from .. import parser
basic = """
Feature: This is a feature
In order to understand the system under test
As a developer and a business user
I want to be able to parse features
""".strip()
lower = """
feature: This is a feature
In order to understand the system under test
As a developer and a business user
I want to be able to parse features
""".strip()
upper = """
FEATURE: This is a feature
In order to understand the system under test
As a developer and a business user
I want to be able to parse features
""".strip()
leet = """
FeaTuRe: This is a feature
In order to understand the system under test
As a developer and a business user
I want to be able to parse features
""".strip()
class FeatureNameParsingTestCase(unittest.TestCase):
expected = "This is a feature"
def assert_expected_name(self, feature):
self.assertEqual(feature.name, self.expected)
def test_can_parse_feature_name(self):
self.assert_expected_name(parser.parse_feature(basic))
def test_can_parse_feature_with_lowercase_name(self):
self.assert_expected_name(parser.parse_feature(lower))
def test_can_parse_feature_with_uppercase_name(self):
self.assert_expected_name(parser.parse_feature(upper))
def test_can_parse_feature_with_weird_caps_name(self):
self.assert_expected_name(parser.parse_feature(leet))
class FeatureDescriptionTestCase(unittest.TestCase):
def to_description(self, s):
return textwrap.dedent(s).strip()
def test_can_parse_feature_description(self):
self.assertEqual(self.to_description("""
In order to understand the system under test
As a developer and a business user
I want to be able to parse features
"""), parser.parse_feature(basic).description)
| import unittest
from .. import parser
basic = """
Feature: This is a feature
In order to understand the system under test
As a developer and a business user
I want to be able to parse features
""".strip()
lower = """
feature: This is a feature
In order to understand the system under test
As a developer and a business user
I want to be able to parse features
""".strip()
upper = """
FEATURE: This is a feature
In order to understand the system under test
As a developer and a business user
I want to be able to parse features
""".strip()
leet = """
FeaTuRe: This is a feature
In order to understand the system under test
As a developer and a business user
I want to be able to parse features
""".strip()
class FeatureNameParsingTestCase(unittest.TestCase):
expected = "This is a feature"
def assert_expected_name(self, feature):
self.assertEqual(feature.name, self.expected)
def test_can_parse_feature_name(self):
self.assert_expected_name(parser.parse_feature(basic))
def test_can_parse_feature_with_lowercase_name(self):
self.assert_expected_name(parser.parse_feature(lower))
def test_can_parse_feature_with_uppercase_name(self):
self.assert_expected_name(parser.parse_feature(upper))
def test_can_parse_feature_with_weird_caps_name(self):
self.assert_expected_name(parser.parse_feature(leet))
| apache-2.0 | Python |
5d1e8e4d4ba9c78c83d9cc82f250c8e6766c0ca8 | Simplify the accuracy implementation | niboshi/chainer,ikasumi/chainer,niboshi/chainer,pfnet/chainer,sou81821/chainer,sinhrks/chainer,benob/chainer,tigerneil/chainer,ktnyt/chainer,AlpacaDB/chainer,cupy/cupy,niboshi/chainer,ysekky/chainer,cupy/cupy,minhpqn/chainer,AlpacaDB/chainer,okuta/chainer,anaruse/chainer,1986ks/chainer,delta2323/chainer,hvy/chainer,kikusu/chainer,chainer/chainer,umitanuki/chainer,chainer/chainer,keisuke-umezawa/chainer,hvy/chainer,keisuke-umezawa/chainer,ktnyt/chainer,wkentaro/chainer,muupan/chainer,niboshi/chainer,jnishi/chainer,masia02/chainer,okuta/chainer,t-abe/chainer,yanweifu/chainer,jnishi/chainer,okuta/chainer,hvy/chainer,aonotas/chainer,truongdq/chainer,muupan/chainer,laysakura/chainer,chainer/chainer,wkentaro/chainer,ytoyama/yans_chainer_hackathon,keisuke-umezawa/chainer,kikusu/chainer,Kaisuke5/chainer,jnishi/chainer,hidenori-t/chainer,cupy/cupy,tscohen/chainer,ktnyt/chainer,kiyukuta/chainer,wkentaro/chainer,wkentaro/chainer,kashif/chainer,truongdq/chainer,okuta/chainer,benob/chainer,ktnyt/chainer,rezoo/chainer,keisuke-umezawa/chainer,chainer/chainer,hvy/chainer,jnishi/chainer,cupy/cupy,cemoody/chainer,sinhrks/chainer,tkerola/chainer,ronekko/chainer,t-abe/chainer | chainer/functions/accuracy.py | chainer/functions/accuracy.py | import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Accuracy(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
t_type.shape[0] == x_type.shape[0],
)
for i in range(2, x_type.ndim.eval()):
type_check.expect(x_type.shape[i] == 1)
def forward(self, inputs):
xpy = cuda.get_array_module(*inputs)
y, t = inputs
y = y.reshape(len(y), -1) # flatten
pred = y.argmax(axis=1)
return xpy.asarray((pred == t).mean(dtype='f')),
def accuracy(y, t):
"""Computes muticlass classification accuracy of the minibatch.
Args:
y (Variable): Variable holding a matrix whose (i, j)-th element
indicates the score of the class j at the i-th example.
t (Variable): Variable holding an int32 vector of groundtruth labels.
Returns:
Variable: A variable holding a scalar array of the accuracy.
.. note:: This function is non-differentiable.
"""
return Accuracy()(y, t)
| import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Accuracy(function.Function):
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim >= 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
t_type.shape[0] == x_type.shape[0],
)
for i in range(2, x_type.ndim.eval()):
type_check.expect(x_type.shape[i] == 1)
def forward_cpu(self, inputs):
y, t = inputs
y = y.reshape(y.shape[0], y.size / y.shape[0]) # flatten
pred = y.argmax(axis=1)
return numpy.array((pred == t).mean(dtype=numpy.float32)),
def forward_gpu(self, inputs):
x, t = inputs
fragments = cuda.empty((x.shape[0],), dtype=numpy.int8)
cuda.elementwise(
['fragments', 'x', 't', 'c'],
'''
float maxval = x[i * c];
int amax = 0;
for (int j = 1; j < c; ++j) {
if (maxval < x[i * c + j]) {
maxval = x[i * c + j];
amax = j;
}
}
fragments[i] = amax == t[i];
''', 'accuracy_fwd_map')(fragments, x, t, numpy.int32(x.shape[1]))
y = cuda.cupy.sum(fragments, dtype=numpy.float32)
y /= x.shape[0]
return y,
def accuracy(y, t):
"""Computes muticlass classification accuracy of the minibatch.
Args:
y (Variable): Variable holding a matrix whose (i, j)-th element
indicates the score of the class j at the i-th example.
t (Variable): Variable holding an int32 vector of groundtruth labels.
Returns:
Variable: A variable holding a scalar array of the accuracy.
.. note:: This function is non-differentiable.
"""
return Accuracy()(y, t)
| mit | Python |
b27b5eaf56f0d5c739d7422f65e2e7e5cde8ed7f | Fix up the email script | pydotorg/pypi,pydotorg/pypi,pydotorg/pypi,pydotorg/pypi | tools/email_renamed_users.py | tools/email_renamed_users.py | import smtplib
import pickle
import sys
import os
from email.mime.text import MIMEText
# Workaround current bug in docutils:
# http://permalink.gmane.org/gmane.text.docutils.devel/6324
import docutils.utils
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path = [root] + sys.path
import config
import store
config = config.Config("config.ini")
store = store.Store(config)
EMAIL_PLURAL = """
Hello there!
PyPI has begun to enforce restrictions on what a valid Python package name
contains.
These rules are:
* Must contain ONLY ASCII letters, digits, underscores, hyphens, and periods
* Must begin and end with an ASCII letter or digit
You are listed as an owner or maintainer on %(old)s.
Due to the new rules these packages will be renamed to %(new)s.
These new names represent what someone using pip or easy_install would already
have had to use in order to install your packages.
I am sorry for any inconvenience this may have caused you.
"""
EMAIL_SINGLE = """
Hello there!
PyPI has begun to enforce restrictions on what a valid Python package name
contains.
These rules are:
* Must contain ONLY ASCII letters, digits, underscores, hyphens, and periods
* Must begin and end with an ASCII letter or digit
You are listed as an owner or maintainer on "%(old)s".
Due to the new rules this package will be renamed to "%(new)s".
This new name represents what someone using pip or easy_install would
already have had to use in order to install your package.
I am sorry for any inconvenience this may have caused you.
"""
with open("renamed.pkl") as pkl:
renamed = pickle.load(pkl)
# Build up a list of all users to email
users = {}
for old, new in renamed:
for role in store.get_package_roles(new):
user_packages = users.setdefault(role["user_name"], [])
user_packages.append((old, new))
# Email each user
server = smtplib.SMTP(config.mailhost)
for username, packages in users.iteritems():
packages = sorted(set(packages))
user = store.get_user(username)
if not user["email"]:
continue
if len(packages) > 1:
msg = MIMEText(EMAIL_PLURAL % {
"old": ", ".join(['"%s"' % x[0] for x in packages]),
"new": ", ".join(['"%s"' % x[1] for x in packages]),
})
elif packages:
msg = MIMEText(EMAIL_SINGLE % {
"old": packages[0][0],
"new": packages[0][1],
})
msg["Subject"] = "Important notice about your PyPI packages"
msg["From"] = "donald@python.org"
msg["To"] = user["email"]
server.sendmail("donald@python.org", [user["email"]], msg.as_string())
server.quit()
| import smtplib
import pickle
import sys
import os
from email.mime.text import MIMEText
# Workaround current bug in docutils:
# http://permalink.gmane.org/gmane.text.docutils.devel/6324
import docutils.utils
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path = [root] + sys.path
import config
import store
config = config.Config("config.ini")
store = store.Store(config)
EMAIL_PLURAL = """
Hello there!
PyPI has begun to enforce restrictions on what a valid Python package name
contains. These rules are:
* Must contain ONLY ASCII letters, digits, underscores, hyphens, and
periods
* Must begin and end with an ASCII letter or digit
You are listed as an owner or maintainer on %(old)s. Due to
the new rules these packages will be renamed to %(new)s.
These new names represent what someone using pip or easy_install would
already have had to use in order to install your packages.
I am sorry for any inconvenience this may have caused you.
"""
EMAIL_SINGLE = """
Hello there!
PyPI has begun to enforce restrictions on what a valid Python package name
contains. These rules are:
* Must contain ONLY ASCII letters, digits, underscores, hyphens, and
periods
* Must begin and end with an ASCII letter or digit
You are listed as an owner or maintainer on %(old)s. Due to
the new rules this package will be renamed to %(new)s.
These new names represent what someone using pip or easy_install would
already have had to use in order to install your package.
I am sorry for any inconvenience this may have caused you.
"""
with open("renamed.pkl") as pkl:
renamed = pickle.load(pkl)
# Build up a list of all users to email
users = {}
for old, new in renamed:
for role in store.get_package_roles(new):
user_packages = users.setdefault(role["user_name"], [])
user_packages.append((old, new))
# Email each user
server = smtplib.SMTP(config.mailhost)
for username, packages in users.iteritems():
user = store.get_user(username)
if not user["email"]:
continue
if len(packages) > 1:
msg = MIMEText(EMAIL_PLURAL % {
"old": ", ".join([x[0] for x in packages]),
"new": ", ".join([x[1] for x in packages]),
})
elif packages:
msg = MIMEText(EMAIL_SINGLE % {
"old": packages[0][0],
"new": packages[0][1],
})
msg["Subject"] = "Important notice about your PyPI packages"
msg["From"] = "donald@python.org"
msg["To"] = user["email"]
server.sendmail("donald@python.org", [user["email"]], msg.as_string())
server.quit()
| bsd-3-clause | Python |
dfb19163b0538508e7f0a820ff017ad9a8041727 | test data improvement | moto-timo/robotframework,joongh/robotframework,Colorfulstan/robotframework,yahman72/robotframework,SivagnanamCiena/robotframework,un33k/robotframework,moto-timo/robotframework,Colorfulstan/robotframework,stasiek/robotframework,snyderr/robotframework,synsun/robotframework,snyderr/robotframework,dkentw/robotframework,wojciechtanski/robotframework,JackNokia/robotframework,jaloren/robotframework,SivagnanamCiena/robotframework,synsun/robotframework,suvarnaraju/robotframework,joongh/robotframework,joongh/robotframework,alexandrul-ci/robotframework,stasiek/robotframework,un33k/robotframework,SivagnanamCiena/robotframework,jorik041/robotframework,xiaokeng/robotframework,yahman72/robotframework,snyderr/robotframework,rwarren14/robotframework,dkentw/robotframework,JackNokia/robotframework,wojciechtanski/robotframework,edbrannin/robotframework,edbrannin/robotframework,kurtdawg24/robotframework,eric-stanley/robotframework,wojciechtanski/robotframework,moto-timo/robotframework,yonglehou/robotframework,kurtdawg24/robotframework,wojciechtanski/robotframework,Colorfulstan/robotframework,kyle1986/robortframe,xiaokeng/robotframework,joongh/robotframework,ChrisHirsch/robotframework,nmrao/robotframework,fingeronthebutton/robotframework,jorik041/robotframework,wojciechtanski/robotframework,alexandrul-ci/robotframework,yahman72/robotframework,JackNokia/robotframework,userzimmermann/robotframework,yahman72/robotframework,moto-timo/robotframework,synsun/robotframework,alexandrul-ci/robotframework,jaloren/robotframework,HelioGuilherme66/robotframework,suvarnaraju/robotframework,edbrannin/robotframework,rwarren14/robotframework,ashishdeshpande/robotframework,alexandrul-ci/robotframework,alexandrul-ci/robotframework,stasiek/robotframework,kyle1986/robortframe,yonglehou/robotframework,edbrannin/robotframework,ChrisHirsch/robotframework,eric-stanley/robotframework,JackNokia/robotframework,HelioGuilherme66/robotframework,Colorfulstan/robotframework,nmrao/robotframework,HelioGuilherme66/robotframework,nmrao/robotframework,SivagnanamCiena/robotframework,jorik041/robotframework,xiaokeng/robotframework,synsun/robotframework,eric-stanley/robotframework,ashishdeshpande/robotframework,yahman72/robotframework,ashishdeshpande/robotframework,robotframework/robotframework,ChrisHirsch/robotframework,dkentw/robotframework,userzimmermann/robotframework,yonglehou/robotframework,Colorfulstan/robotframework,jorik041/robotframework,nmrao/robotframework,kurtdawg24/robotframework,robotframework/robotframework,suvarnaraju/robotframework,JackNokia/robotframework,snyderr/robotframework,suvarnaraju/robotframework,snyderr/robotframework,SivagnanamCiena/robotframework,yonglehou/robotframework,robotframework/robotframework,edbrannin/robotframework,jaloren/robotframework,un33k/robotframework,yonglehou/robotframework,userzimmermann/robotframework,userzimmermann/robotframework,moto-timo/robotframework,eric-stanley/robotframework,fingeronthebutton/robotframework,rwarren14/robotframework,dkentw/robotframework,rwarren14/robotframework,jaloren/robotframework,xiaokeng/robotframework,kurtdawg24/robotframework,ChrisHirsch/robotframework,nmrao/robotframework,fingeronthebutton/robotframework,kurtdawg24/robotframework,un33k/robotframework,synsun/robotframework,joongh/robotframework,userzimmermann/robotframework,stasiek/robotframework,rwarren14/robotframework,kyle1986/robortframe,un33k/robotframework,jorik041/robotframework,suvarnaraju/robotframework,dkentw/robotframework,stasiek/robotframework,fingeronthebutton/robotframework,jaloren/robotframework,kyle1986/robortframe,ashishdeshpande/robotframework,ashishdeshpande/robotframework,xiaokeng/robotframework,kyle1986/robortframe,ChrisHirsch/robotframework,fingeronthebutton/robotframework | tools/libdoc/test/regular.py | tools/libdoc/test/regular.py | class regular:
"""This is a very regular test library"""
def __init__(self, arg1='hello', arg2='world'):
"""Constructs a new regular test library
See `keyword`
Examples:
| regular | foo | bar |
| regular | | # default values are used |
"""
self.arg1 = arg1
self.arg2 = arg2
def keyword(self):
"""A keyword
See `get hello` for details"""
pass
def get_hello(self):
"""Get the intialization variables
See `initialization` for explanation of arguments
and `introduction` for introduction"""
return self.arg1, self.arg2
| class regular:
"""This is a very regular test library"""
def __init__(self, arg1='hello', arg2='world'):
"""Constructs a new regular test library
Examples:
| regular | foo | bar |
| regular | | # default values are used |
"""
self.arg1 = arg1
self.arg2 = arg2
def keyword(self):
"""A keyword
See `get hello` for details"""
pass
def get_hello(self):
"""Get the intialization variables
See `initialization` for explanation of arguments
and `introduction` for introduction"""
return self.arg1, self.arg2
| apache-2.0 | Python |
ddb87a049adfe0f05f00a33f5cd39ddbd62ebfed | Bump version to dev | pv/mediasnake,pv/mediasnake,pv/mediasnake | mediasnake/__init__.py | mediasnake/__init__.py | __version__ = "0.2.dev"
| __version__ = "0.1"
| bsd-3-clause | Python |
f0a2124d40939d2d42e950ba040200cfd7f5c08d | Set extra fields for Character class | lerrua/merlin-engine | merlin/engine.py | merlin/engine.py | from merlin import configs
from merlin.exceptions import DeadException
SHOW_MESSAGES = getattr(configs, 'SHOW_MESSAGES', False)
class Character(object):
def __init__(self, name, base_attack, base_hp, extra={}):
self.name = name
self.base_attack = base_attack
self.base_hp = base_hp
self.is_dead = False
self.battle = Prepare(self)
self.extra = extra
self.item = Item(self)
@property
def status(self):
return self.__dict__
class Hero(Character):
pass
class Monster(Character):
pass
class Item(object):
def __init__(self, character):
self.char = character
def collect(self, foe):
if not isinstance(foe, Monster):
raise TypeError('foe should be a Monster object')
if not foe.is_dead:
print 'unable to collect item.' if SHOW_MESSAGES else None
raise Exception('unable to collect item')
def use(self):
pass
def drop(self):
pass
def trade(self, character):
pass
class Prepare(object):
"""
Prepare the champions for the battle!
"""
def __init__(self, character):
self.char = character
@property
def status(self):
return self.char.status
def set_damage(self, attack):
return attack
def attack(self, foe):
if not isinstance(foe, Monster):
raise TypeError('foe should be a Monster object')
if foe.is_dead:
raise DeadException('foe is already dead! Stop hit him!')
foe.base_hp = foe.base_hp - self.set_damage(self.char.base_attack)
if foe.base_hp <= 0:
foe.is_dead = True
print 'foe is dead.' if SHOW_MESSAGES else None
return foe.base_hp
| from merlin import configs
from merlin.exceptions import DeadException
SHOW_MESSAGES = getattr(configs, 'SHOW_MESSAGES', False)
class Character(object):
def __init__(self, name, base_attack, base_hp):
self.name = name
self.base_attack = base_attack
self.base_hp = base_hp
self.is_dead = False
self.battle = Prepare(self)
self.item = Item(self)
@property
def status(self):
return self.__dict__
class Hero(Character):
pass
class Monster(Character):
pass
class Item(object):
def __init__(self, character):
self.char = character
def collect(self, foe):
if not isinstance(foe, Monster):
raise TypeError('foe should be a Monster object')
if not foe.is_dead:
print 'unable to collect item.' if SHOW_MESSAGES else None
raise Exception('unable to collect item')
def use(self):
pass
def drop(self):
pass
def trade(self, character):
pass
class Prepare(object):
"""
Prepare the champions for the battle!
"""
def __init__(self, character):
self.char = character
@property
def status(self):
return self.char.status
def set_damage(self, attack):
return attack
def attack(self, foe):
if not isinstance(foe, Monster):
raise TypeError('foe should be a Monster object')
if foe.is_dead:
raise DeadException('foe is already dead! Stop hit him!')
foe.base_hp = foe.base_hp - self.set_damage(self.char.base_attack)
if foe.base_hp <= 0:
foe.is_dead = True
print 'foe is dead.' if SHOW_MESSAGES else None
return foe.base_hp
| mit | Python |
9f01777e4d985aef6a77208f9839bd257e7be995 | Update station.py | elailai94/EasyTicket | Source-Code/Station/station.py | Source-Code/Station/station.py | #==============================================================================
# EasyTicket
#
# @description: Module for providing methods to work with Station objects
# @author: Elisha Lai
# @version: 1.2 16/04/2015
#==============================================================================
# Station module (station.py)
# Object definition
class Station:
'Fields: name, zone, lines'
# A Station is an object in which
# - name is a Str (name of a station)
# - zone is an Int (zone that this station is located in)
# - lines is a (listof Str) (lines that this station is part of)
# Initializes the object.
def __init__(self, name, zone):
self.name = name
self.zone = zone
self.lines = []
# Returns a string representation of the object.
def __repr__(self):
# Stores the index counter
i = 0
# Stores the concatenated string
lines_str = ''
for line in self.lines:
lines_str += line
if (i < (len(self.lines) - 1)): # Last element in the list?
lines_str += ', '
i += 1
return 'Station Name:%s Zone:%d Lines:%s' \
% (self.name, self.zone, lines_str)
# Returns True if self and other represent stations with the same name
# and False otherwise.
def __eq__(self, other):
return isinstance(self, Station) and \
isinstance(other, Station) and \
self.name == other.name
# Returns True if self and other represent stations with different names
# and False otherwise.
def __ne__(self, other):
return self != other
# Adds a line to the lines field.
def add_line(self, line):
isinstance(self, Station)
self.lines.append(line)
| #==============================================================================
# EasyTicket
#
# @description: Module for providing methods to work with Station objects
# @author: Elisha Lai
# @version: 1.2 16/04/2015
#==============================================================================
# Station module (station.py)
# Object definition
class Station:
'Fields: name, zone, lines'
# A Station is an object in which
# - name is a Str (name of a station)
# - zone is an Int (zone that this station is located in)
# - lines is a (listof Str) (lines that this station is part of)
# Initializes the object.
def __init__(self, name, zone):
self.name = name
self.zone = zone
self.lines = []
# Returns a string representation of the object.
def __repr__(self):
i = 0
lines_str = ''
for line in self.lines:
lines_str += line
if (i < (len(self.lines) - 1)):
lines_str += ', '
i += 1
return 'Station Name:%s Zone:%d Lines:%s' \
% (self.name, self.zone, lines_str)
# Returns True if self and other represent stations with the same name
# and False otherwise.
def __eq__(self, other):
return isinstance(self, Station) and \
isinstance(other, Station) and \
self.name == other.name
# Returns True if self and other represent stations with different names
# and False otherwise.
def __ne__(self, other):
return self != other
# Adds a line to the lines field.
def add_line(self, line):
isinstance(self, Station)
self.lines.append(line)
| mit | Python |
eb7281ee406e80870a8fa7f6a9b7a878d8b75cd4 | Rewrite the oa start date script to fix some errors | DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj | portality/migrate/2966_add_oa_start_date_from_backup/add_oa_start_date_from_backup.py | portality/migrate/2966_add_oa_start_date_from_backup/add_oa_start_date_from_backup.py | import csv
from datetime import datetime
import esprit
from portality.core import es_connection
from portality.models import Journal
from portality.util import ipt_prefix
def reinstate_oa_start(csv_reader):
""" Write the OA start date into Journals from the CSV file containing rows of id,oa_start_date """
batch = []
batch_size = 1000
header = next(csv_reader)
if header is not None:
for row in csv_reader:
j = Journal.pull(row[0])
if j is not None and "oa_start" not in j["bibjson"]:
j.bibjson().oa_start = row[1]
batch.append({'doc': j.data})
if len(batch) >= batch_size:
print('{0}, writing {1} to {2}'.format(datetime.now(), len(batch), ipt_prefix('journal')))
r = esprit.raw.bulk(es_connection, batch, idkey="doc.id", type_=ipt_prefix("journal"), bulk_type="update")
assert r.status_code == 200, r.json()
batch = []
if len(batch) > 0:
print('{0}, final result set / writing {1} to {2}'.format(datetime.now(), len(batch), ipt_prefix('journal')))
r = esprit.raw.bulk(es_connection, batch, idkey="doc.id", type_=ipt_prefix("journal"), bulk_type="update")
assert r.status_code == 200, r.json()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data", help="csv with backup data", required=True)
args = parser.parse_args()
try:
with open(args.data) as f:
reinstate_oa_start(csv.reader(f))
except Exception as e:
print("Could not process file: " + args.data + ". Error: " + str(e))
raise
| import csv
from copy import deepcopy
from datetime import datetime
import esprit
from portality.core import es_connection
from portality.models import Journal
from portality.settings import BASE_FILE_PATH
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data", help="csv with backup data")
args = parser.parse_args()
if not args.data:
print("Please specify a csv data file path with the -d option")
parser.print_help()
exit()
#args.data = BASE_FILE_PATH + "/migrate/2966_add_oa_start_date_from_backup/oa_start_out.csv"
try:
f = open(args.data)
except:
print("Could not open file: " + args.data + ". Try again.")
batch = []
batch_size = 1000
csv_reader = csv.reader(f)
header = next(csv_reader)
if header is not None:
for row in csv_reader:
print(row[0])
j = Journal.pull(row[0])
if j is not None and "oa_start" not in j["bibjson"]:
data = j.data
data["bibjson"]["oa_start"] = row[1]
batch.append(data)
if len(batch) >= batch_size:
print(datetime.now(), "writing ", len(batch), "to", "journal")
esprit.raw.bulk(es_connection, batch, idkey="id", type_="journal", bulk_type="index")
batch = []
if len(batch) > 0:
print(datetime.now(), "final result set / writing ", len(batch), "to", "journal")
esprit.raw.bulk(es_connection, batch, idkey="id", type_="journal", bulk_type="index")
f.close()
| apache-2.0 | Python |
7dc7fb9772c40fb318aff2047d5411c66c57a8d2 | fix privilege handling in channels with capital letters | anqxyr/jarvis | jarvis/modules/jarvis_irc.py | jarvis/modules/jarvis_irc.py | #!/usr/bin/env python3
"""
Jarvis IRC Wrapper.
This module connects jarvis functions to allow them to be called from irc.
"""
###############################################################################
# Module Imports
###############################################################################
import arrow
import functools
import sopel
import textwrap
import jarvis
###############################################################################
def send(bot, text, private=False, notice=False):
"""Send irc message."""
text = str(text)
tr = bot._trigger
jarvis.db.Message.create(
user=bot.config.core.nick,
channel=tr.sender,
time=arrow.utcnow().timestamp,
text=text)
mode = 'NOTICE' if notice else 'PRIVMSG'
recipient = tr.nick if private or notice else tr.sender
try:
bot.sending.acquire()
text = textwrap.wrap(text, width=420)[0]
bot.write((mode, recipient), text)
finally:
bot.sending.release()
def privileges(bot, nick):
channels = bot.privileges.items()
return {str(k).lower(): v[nick] for k, v in channels if nick in v}
@sopel.module.rule('.*')
def dispatcher(bot, tr):
inp = jarvis.core.Inp(
tr.group(0), tr.nick, tr.sender,
functools.partial(send, bot),
functools.partial(privileges, bot, tr.nick),
bot.write)
jarvis.core.dispatcher(inp)
@sopel.module.interval(3600)
def refresh(bot):
jarvis.core.refresh()
@sopel.module.interval(28800)
def tweet(bot):
jarvis.tools.post_on_twitter()
@sopel.module.event('JOIN')
@sopel.module.rule('.*')
def ban_on_join(bot, tr):
inp = jarvis.core.Inp(
None, tr.nick, tr.sender,
functools.partial(send, bot),
functools.partial(privileges, bot, tr.nick),
bot.write)
inp.send(jarvis.autoban.autoban(inp, tr.nick, tr.host))
| #!/usr/bin/env python3
"""
Jarvis IRC Wrapper.
This module connects jarvis functions to allow them to be called from irc.
"""
###############################################################################
# Module Imports
###############################################################################
import arrow
import functools
import sopel
import textwrap
import jarvis
###############################################################################
def send(bot, text, private=False, notice=False):
"""Send irc message."""
text = str(text)
tr = bot._trigger
jarvis.db.Message.create(
user=bot.config.core.nick,
channel=tr.sender,
time=arrow.utcnow().timestamp,
text=text)
mode = 'NOTICE' if notice else 'PRIVMSG'
recipient = tr.nick if private or notice else tr.sender
try:
bot.sending.acquire()
text = textwrap.wrap(text, width=420)[0]
bot.write((mode, recipient), text)
finally:
bot.sending.release()
def privileges(bot, nick):
channels = bot.privileges.items()
return {str(k): v[nick] for k, v in channels if nick in v}
@sopel.module.rule('.*')
def dispatcher(bot, tr):
inp = jarvis.core.Inp(
tr.group(0), tr.nick, tr.sender,
functools.partial(send, bot),
functools.partial(privileges, bot, tr.nick),
bot.write)
jarvis.core.dispatcher(inp)
@sopel.module.interval(3600)
def refresh(bot):
jarvis.core.refresh()
@sopel.module.interval(28800)
def tweet(bot):
jarvis.tools.post_on_twitter()
@sopel.module.event('JOIN')
@sopel.module.rule('.*')
def ban_on_join(bot, tr):
inp = jarvis.core.Inp(
None, tr.nick, tr.sender,
functools.partial(send, bot),
functools.partial(privileges, bot, tr.nick),
bot.write)
inp.send(jarvis.autoban.autoban(inp, tr.nick, tr.host))
| mit | Python |
0f62dc9ba898db96390658107e9ebe9930f8b90a | Make plugin work in python 3 | EliRibble/mothermayi-isort | mmiisort/main.py | mmiisort/main.py | from isort import SortImports
import mothermayi.colors
import mothermayi.errors
def plugin():
return {
'name' : 'isort',
'pre-commit' : pre_commit,
}
def do_sort(filename):
results = SortImports(filename)
return results.in_lines != results.out_lines
def get_status(had_changes):
return mothermayi.colors.red('unsorted') if had_changes else mothermayi.colors.green('sorted')
def pre_commit(config, staged):
changes = [do_sort(filename) for filename in staged]
messages = [get_status(had_change) for had_change in changes]
lines = [" {0:<30} ... {1:<10}".format(filename, message) for filename, message in zip(staged, messages)]
result = "\n".join(lines)
if any(changes):
raise mothermayi.errors.FailHook(result)
return result
| from isort import SortImports
import itertools
import mothermayi.colors
import mothermayi.errors
def plugin():
return {
'name' : 'isort',
'pre-commit' : pre_commit,
}
def do_sort(filename):
results = SortImports(filename)
return results.in_lines != results.out_lines
def get_status(had_changes):
return mothermayi.colors.red('unsorted') if had_changes else mothermayi.colors.green('sorted')
def pre_commit(config, staged):
changes = [do_sort(filename) for filename in staged]
messages = [get_status(had_change) for had_change in changes]
lines = [" {0:<30} ... {1:<10}".format(filename, message) for filename, message in itertools.izip(staged, messages)]
result = "\n".join(lines)
if any(changes):
raise mothermayi.errors.FailHook(result)
return result
| mit | Python |
0dc8eafd576acfb0ab133b14e2bd1ebb2157da95 | fix typos in help (#4297) | yugangw-msft/azure-cli,yugangw-msft/azure-cli,QingChenmsft/azure-cli,samedder/azure-cli,yugangw-msft/azure-cli,QingChenmsft/azure-cli,samedder/azure-cli,yugangw-msft/azure-cli,QingChenmsft/azure-cli,samedder/azure-cli,QingChenmsft/azure-cli,yugangw-msft/azure-cli,samedder/azure-cli,yugangw-msft/azure-cli | src/command_modules/azure-cli-container/azure/cli/command_modules/container/_help.py | src/command_modules/azure-cli-container/azure/cli/command_modules/container/_help.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
helps['container'] = """
type: group
short-summary: (Preview) Manage Azure Container Instances.
"""
helps['container create'] = """
type: command
short-summary: Create a container group.
examples:
- name: Create a container group and specify resources required.
text: az container create -g MyResourceGroup --name myalpine --image alpine:latest --cpu 1 --memory 1
- name: Create a container group with OS type.
text: az container create -g MyResourceGroup --name mywinapp --image winappimage:latest --os-type Windows --cpu 2 --memory 3.5
- name: Create a container group with public IP address.
text: az container create -g MyResourceGroup --name myalpine --image alpine:latest --ip-address public
- name: Create a container group with starting command line.
text: az container create -g MyResourceGroup --name myalpine --image alpine:latest --command-line "/bin/sh -c '/path to/myscript.sh'"
- name: Create a container group with environment variables.
text: az container create -g MyResourceGroup --name myalpine --image alpine:latest -e key1=value1 key2=value2
- name: Create a container group using container image from Azure Container Registry.
text: az container create -g MyResourceGroup --name myalpine --image myAcrRegistry.azurecr.io/alpine:latest --registry-password password
- name: Create a container group using container image from other private container image registry.
text: az container create -g MyResourceGroup --name myapp --image myimage:latest --cpu 1 --memory 1.5 --registry-login-server myregistry.com --registry-username username --registry-password password
"""
helps['container delete'] = """
type: command
short-summary: Delete a container group.
"""
helps['container list'] = """
type: command
short-summary: List container groups.
"""
helps['container show'] = """
type: command
short-summary: Show the details of a container group.
"""
helps['container logs'] = """
type: command
short-summary: Tail the log of a container group.
"""
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.help_files import helps
helps['container'] = """
type: group
short-summary: (Preview) Manage Azure Container Instances.
"""
helps['container create'] = """
type: command
short-summary: Create a container group.
examples:
- name: Create a container group and specify resources required.
text: az container create -g MyResourceGroup --name myalpine --image alpine:latest --cpu 1 --memory 1
- name: Create a container group with OS type.
text: az container create -g MyResourceGroup --name mywinapp --image winappimage:latest --os-type Windows --cpu 2 --memory 3.5
- name: Create a container group with public IP address.
text: az container create -g MyResourceGroup --name myalpine --image alpine:latest --ip-address public
- name: Create a container group with starting command line.
text: az container create -g MyResourceGroup --name myalpine --image alpine:latest --command-line "/bin/sh -c '/path to/myscript.sh'"
- name: Create a container group with envrionment variables.
text: az contanier create -g MyResourceGroup --name myalpine --image alpine:latest -e key1=value1 key2=value2
- name: Create a container group using container image from Azure Container Registry.
text: az container create -g MyResourceGroup --name myalpine --image myAcrRegistry.azurecr.io/alpine:latest --registry-password password
- name: Create a container group using container image from other private container image registry.
text: az container create -g MyResourceGroup --name myapp --image myimage:latest --cpu 1 --memory 1.5 --registry-login-server myregistry.com --registry-username username --registry-password password
"""
helps['container delete'] = """
type: command
short-summary: Delete a container group.
"""
helps['container list'] = """
type: command
short-summary: List container groups.
"""
helps['container show'] = """
type: command
short-summary: Show the details of a container group.
"""
helps['container logs'] = """
type: command
short-summary: Tail the log of a container group.
"""
| mit | Python |
e08927d69c0ed3682997d7f123f0fd2aec1d11a8 | Tag new release: 2.8.2 | Floobits/floobits-sublime,Floobits/floobits-sublime | floo/version.py | floo/version.py | PLUGIN_VERSION = '2.8.2'
# The line above is auto-generated by tag_release.py. Do not change it manually.
try:
from .common import shared as G
assert G
except ImportError:
from common import shared as G
G.__VERSION__ = '0.11'
G.__PLUGIN_VERSION__ = PLUGIN_VERSION
| PLUGIN_VERSION = '2.8.1'
# The line above is auto-generated by tag_release.py. Do not change it manually.
try:
from .common import shared as G
assert G
except ImportError:
from common import shared as G
G.__VERSION__ = '0.11'
G.__PLUGIN_VERSION__ = PLUGIN_VERSION
| apache-2.0 | Python |
56199ec6848959226422cc8d6f91924e7030c238 | Use newer protocol version. | Floobits/floobits-sublime,Floobits/floobits-sublime | floo/version.py | floo/version.py | PLUGIN_VERSION = '2.5.3'
# The line above is auto-generated by tag_release.py. Do not change it manually.
try:
from .common import shared as G
assert G
except ImportError:
from common import shared as G
G.__VERSION__ = '0.11'
G.__PLUGIN_VERSION__ = PLUGIN_VERSION
| PLUGIN_VERSION = '2.5.3'
# The line above is auto-generated by tag_release.py. Do not change it manually.
try:
from .common import shared as G
assert G
except ImportError:
from common import shared as G
G.__VERSION__ = '0.03'
G.__PLUGIN_VERSION__ = PLUGIN_VERSION
| apache-2.0 | Python |
4197a491224eea724e5334abc4a7dbbf23425c1b | Switch off robots handling in PyPI example to keep it working | python-mechanize/mechanize,python-mechanize/mechanize | examples/pypi.py | examples/pypi.py | #!/usr/bin/env python
# Search PyPI, the Python Package Index, and retrieve latest mechanize
# tarball.
# This is just to demonstrate mechanize: You should use EasyInstall to
# do this, not this silly script.
import sys, os, re
import mechanize
b = mechanize.Browser(
# mechanize's XHTML support needs work, so is currently switched off. If
# we want to get our work done, we have to turn it on by supplying a
# mechanize.Factory (with XHTML support turned on):
factory=mechanize.DefaultFactory(i_want_broken_xhtml_support=True)
)
# Addition 2005-06-13: Be naughty, since robots.txt asks not to
# access /pypi now. We're not madly searching for everything, so
# I don't feel too guilty.
b.set_handle_robots(False)
# search PyPI
b.open("http://www.python.org/pypi")
b.follow_link(text="Search", nr=1)
b.select_form(nr=0)
b["name"] = "mechanize"
b.submit()
# 2005-05-20 no longer necessary, only one version there, so PyPI takes
# us direct to PKG-INFO page
## # find latest release
## VERSION_RE = re.compile(r"(?P<major>\d+)\.(?P<minor>\d+)\.(?P<bugfix>\d+)"
## r"(?P<state>[ab])?(?:-pre)?(?P<pre>\d+)?$")
## def parse_version(text):
## m = VERSION_RE.match(text)
## if m is None:
## raise ValueError
## return tuple([m.groupdict()[part] for part in
## ("major", "minor", "bugfix", "state", "pre")])
## MECHANIZE_RE = re.compile(r"mechanize-?(.*)")
## links = b.links(text_regex=MECHANIZE_RE)
## versions = []
## for link in links:
## m = MECHANIZE_RE.search(link.text)
## version_string = m.group(1).strip(' \t\xa0')
## tup = parse_version(version_string)[:3]
## versions.append(tup)
## latest = links[versions.index(max(versions))]
# get tarball
## b.follow_link(latest) # to PKG-INFO page
r = b.follow_link(text_regex=re.compile(r"\.tar\.gz"))
filename = os.path.basename(b.geturl())
if os.path.exists(filename):
sys.exit("%s already exists, not grabbing" % filename)
f = file(filename, "wb")
while 1:
data = r.read(1024)
if not data: break
f.write(data)
f.close()
| #!/usr/bin/env python
# Search PyPI, the Python Package Index, and retrieve latest mechanize
# tarball.
# This is just to demonstrate mechanize: You should use EasyInstall to
# do this, not this silly script.
import sys, os, re
import mechanize
b = mechanize.Browser(
# mechanize's XHTML support needs work, so is currently switched off. If
# we want to get our work done, we have to turn it on by supplying a
# mechanize.Factory (with XHTML support turned on):
factory=mechanize.DefaultFactory(i_want_broken_xhtml_support=True)
)
# search PyPI
b.open("http://www.python.org/pypi")
b.follow_link(text="Search", nr=1)
b.select_form(nr=0)
b["name"] = "mechanize"
b.submit()
# 2005-05-20 no longer necessary, only one version there, so PyPI takes
# us direct to PKG-INFO page
## # find latest release
## VERSION_RE = re.compile(r"(?P<major>\d+)\.(?P<minor>\d+)\.(?P<bugfix>\d+)"
## r"(?P<state>[ab])?(?:-pre)?(?P<pre>\d+)?$")
## def parse_version(text):
## m = VERSION_RE.match(text)
## if m is None:
## raise ValueError
## return tuple([m.groupdict()[part] for part in
## ("major", "minor", "bugfix", "state", "pre")])
## MECHANIZE_RE = re.compile(r"mechanize-?(.*)")
## links = b.links(text_regex=MECHANIZE_RE)
## versions = []
## for link in links:
## m = MECHANIZE_RE.search(link.text)
## version_string = m.group(1).strip(' \t\xa0')
## tup = parse_version(version_string)[:3]
## versions.append(tup)
## latest = links[versions.index(max(versions))]
# get tarball
## b.follow_link(latest) # to PKG-INFO page
r = b.follow_link(text_regex=re.compile(r"\.tar\.gz"))
filename = os.path.basename(b.geturl())
if os.path.exists(filename):
sys.exit("%s already exists, not grabbing" % filename)
f = file(filename, "wb")
while 1:
data = r.read(1024)
if not data: break
f.write(data)
f.close()
| bsd-3-clause | Python |
014c8ca68b196c78b9044b194b762cdb3dfe6c78 | Add comment description of methods for gitlab hook | pipex/gitbot,pipex/gitbot,pipex/gitbot | app/hooks/views.py | app/hooks/views.py | from __future__ import absolute_import
from __future__ import unicode_literals
from app import app, webhooks
@webhooks.hook(
app.config.get('GITLAB_HOOK','/hooks/gitlab'),
handler='gitlab')
class Gitlab:
def issue(self, data):
# if the repository belongs to a group check if a channel with the same
# name (lowercased and hyphened) exists
# Check if a channel with the same repository name exists
# If the channel exists post to that channel
# If not post to general or other defined by configuration
# publish the issue to the found channel including the Title, Message
# and the creator and responsible if defined
pass
def push(self, data):
# Read commit list to update commit count for user
pass
def tag_push(self, data):
# Publish news of the new version of the repo in general
pass
def merge_request(self, data):
# Notify in the channel
pass
def commit_comment(self, data):
# Notify comment and receiver in the channel
pass
def issue_comment(self, data):
# Notify comment and receiver in the channel
pass
def merge_request_comment(self, data):
# Notify comment and receiver in the channel
pass
def snippet_comment(self, data):
# Do nothing for now
pass
| from __future__ import absolute_import
from __future__ import unicode_literals
from app import app, webhooks
@webhooks.hook(
app.config.get('GITLAB_HOOK','/hooks/gitlab'),
handler='gitlab')
class Gitlab:
def issue(self, data):
pass
def push(self, data):
pass
def tag_push(self, data):
pass
def merge_request(self, data):
pass
def commit_comment(self, data):
pass
def issue_comment(self, data):
pass
def merge_request_comment(self, data):
pass
def snippet_comment(self, data):
pass
| apache-2.0 | Python |
b3029282329c6e7f36fb65ffaef30b7558e0e06f | Make source and sink more alike. | ajdavis/asyncio,1st1/asyncio,vxgmichel/asyncio,fallen/asyncio,jashandeep-sohi/asyncio,Martiusweb/asyncio,manipopopo/asyncio,manipopopo/asyncio,ajdavis/asyncio,Martiusweb/asyncio,haypo/trollius,gsb-eng/asyncio,vxgmichel/asyncio,gvanrossum/asyncio,gvanrossum/asyncio,fallen/asyncio,vxgmichel/asyncio,haypo/trollius,gsb-eng/asyncio,haypo/trollius,gvanrossum/asyncio,jashandeep-sohi/asyncio,gsb-eng/asyncio,1st1/asyncio,Martiusweb/asyncio,1st1/asyncio,manipopopo/asyncio,ajdavis/asyncio,jashandeep-sohi/asyncio,fallen/asyncio | examples/sink.py | examples/sink.py | """Test service that accepts connections and reads all data off them."""
import sys
from tulip import *
server = None
def dprint(*args):
print('sink:', *args, file=sys.stderr)
class Service(Protocol):
def connection_made(self, tr):
dprint('connection from', tr.get_extra_info('socket').getpeername())
dprint('my socket is', tr.get_extra_info('socket').getsockname())
self.tr = tr
self.total = 0
def data_received(self, data):
if data == b'stop':
dprint('stopping server')
server.close()
self.tr.close()
return
self.total += len(data)
dprint('received', len(data), 'bytes; total', self.total)
if self.total > 1e6:
dprint('closing due to too much data')
self.tr.close()
def connection_lost(self, how):
dprint('closed', repr(how))
@coroutine
def start(loop):
global server
server = yield from loop.create_server(Service, 'localhost', 1111)
dprint('serving', [s.getsockname() for s in server.sockets])
yield from server.wait_closed()
def main():
if '--iocp' in sys.argv:
from tulip.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
loop = get_event_loop()
loop.run_until_complete(start(loop))
loop.close()
if __name__ == '__main__':
main()
| """Test service that accepts connections and reads all data off them."""
import sys
from tulip import *
server = None
def dprint(*args):
print('sink:', *args, file=sys.stderr)
class Service(Protocol):
def connection_made(self, tr):
dprint('connection from', tr.get_extra_info('socket').getpeername())
dprint('my socket is', tr.get_extra_info('socket').getsockname())
self.tr = tr
self.total = 0
def data_received(self, data):
if data == b'stop':
dprint('stopping server')
server.close()
self.tr.close()
return
self.total += len(data)
dprint('received', len(data), 'bytes; total', self.total)
if self.total > 1e6:
dprint('closing due to too much data')
self.tr.close()
def connection_lost(self, how):
dprint('closed', repr(how))
@coroutine
def start(loop):
svr = yield from loop.create_server(Service, 'localhost', 1111)
return svr
def main():
if '--iocp' in sys.argv:
from tulip.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
loop = get_event_loop()
global server
server = loop.run_until_complete(start(loop))
dprint('serving', [s.getsockname() for s in server.sockets])
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
6ecada90e944ee976197e0ee79baf1d711a20803 | Add honeypot field to feedback form | ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public | cla_public/apps/base/forms.py | cla_public/apps/base/forms.py | # -*- coding: utf-8 -*-
"Base forms"
from flask_wtf import Form
from wtforms import StringField, TextAreaField
from cla_public.apps.base.fields import MultiRadioField
from cla_public.apps.base.constants import FEEL_ABOUT_SERVICE, \
HELP_FILLING_IN_FORM
from cla_public.apps.checker.honeypot import Honeypot
class FeedbackForm(Honeypot, Form):
difficulty = TextAreaField(u'Did you have any difficulty with this service?')
ideas = TextAreaField(u'Do you have any ideas for how it could be improved?')
feel_about_service = MultiRadioField(
u'Overall, how did you feel about the service you received today?',
choices=FEEL_ABOUT_SERVICE)
help_filling_in_form = MultiRadioField(
u'Did you have any help filling in this form?',
choices=HELP_FILLING_IN_FORM)
| # -*- coding: utf-8 -*-
"Base forms"
from flask_wtf import Form
from wtforms import StringField, TextAreaField
from cla_public.apps.base.fields import MultiRadioField
from cla_public.apps.base.constants import FEEL_ABOUT_SERVICE, \
HELP_FILLING_IN_FORM
class FeedbackForm(Form):
difficulty = TextAreaField(u'Did you have any difficulty with this service?')
ideas = TextAreaField(u'Do you have any ideas for how it could be improved?')
feel_about_service = MultiRadioField(
u'Overall, how did you feel about the service you received today?',
choices=FEEL_ABOUT_SERVICE)
help_filling_in_form = MultiRadioField(
u'Did you have any help filling in this form?',
choices=HELP_FILLING_IN_FORM)
| mit | Python |
13110cb0f14b8f13d726389c753cca22b15960e8 | Update product_supplierinfo.py | Daniel-CA/odoomrp-wip-public,alhashash/odoomrp-wip,odoomrp/odoomrp-wip,odoocn/odoomrp-wip,Antiun/odoomrp-wip,esthermm/odoomrp-wip,sergiocorato/odoomrp-wip,Endika/odoomrp-wip,jorsea/odoomrp-wip,factorlibre/odoomrp-wip,InakiZabala/odoomrp-wip,diagramsoftware/odoomrp-wip,agaldona/odoomrp-wip-1,diagramsoftware/odoomrp-wip,StefanRijnhart/odoomrp-wip,ddico/odoomrp-wip,Daniel-CA/odoomrp-wip-public,Eficent/odoomrp-wip,esthermm/odoomrp-wip,Eficent/odoomrp-wip,xpansa/odoomrp-wip,oihane/odoomrp-wip,jobiols/odoomrp-wip,alfredoavanzosc/odoomrp-wip-1,agaldona/odoomrp-wip-1,numerigraphe/odoomrp-wip,factorlibre/odoomrp-wip,invitu/odoomrp-wip,jobiols/odoomrp-wip,raycarnes/odoomrp-wip,dvitme/odoomrp-wip,sergiocorato/odoomrp-wip,michaeljohn32/odoomrp-wip,odoomrp/odoomrp-wip,maljac/odoomrp-wip,slevenhagen/odoomrp-wip-npg,windedge/odoomrp-wip,oihane/odoomrp-wip | product_supplierinfo_for_customer/models/product_supplierinfo.py | product_supplierinfo_for_customer/models/product_supplierinfo.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
class ProductSupplierinfo(models.Model):
_inherit = 'product.supplierinfo'
type = fields.Selection([('customer', 'Customer'),
('supplier', 'Supplier')], string='Type',
default='supplier')
@api.multi
@api.onchange('type')
def onchange_type(self):
if self.type == 'supplier':
return {'domain': {'name': [('supplier', '=', True)]}}
elif self.type == 'customer':
return {'domain': {'name': [('customer', '=', True)]}}
return {'domain': {'name': []}}
| # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api
class ProductSupplierinfo(models.Model):
_inherit = 'product.supplierinfo'
type = fields.Selection([('customer', 'Customer'),
('supplier', 'Supplier')], string='Type',
default='supplier')
name = fields.Many2one(domain=[])
@api.multi
@api.onchange('type')
def onchange_type(self):
if self.type == 'supplier':
return {'domain': {'name': [('supplier', '=', True)]}}
elif self.type == 'customer':
return {'domain': {'name': [('customer', '=', True)]}}
return {'domain': {'name': []}}
| agpl-3.0 | Python |
bf5d0a9700382835a0e543622f327431b752f539 | Update P8_coinFlip.py added docstring and wrapped in main() function | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | pythontutorials/books/AutomateTheBoringStuff/Ch10/P8_coinFlip.py | pythontutorials/books/AutomateTheBoringStuff/Ch10/P8_coinFlip.py | """Coin flip
This program simulates flipping a coin 1000 times and prints the number
of times it landed on heads.
"""
def main():
import random
heads = 0
for i in range(1, 1001):
if random.randint(0, 1) == 1:
heads += 1
if i == 500:
print("Halfway done!")
print("Heads came up " + str(heads) + " times.")
if __name__ == '__main__':
main()
| # This program simulates flipping a coin 1000 times
import random
heads = 0
for i in range(1, 1001):
if random.randint(0, 1) == 1:
heads += 1
if i == 500:
print("Halfway done!")
print("Heads came up " + str(heads) + " times.")
| mit | Python |
05b7748713187883eb31919b78986b69b0590667 | add coverage | mljar/mljar-api-python | tests/run.py | tests/run.py | '''
MLJAR unit tests.
'''
import os
import unittest
from project_client_test import ProjectClientTest
#from dataset_client_test import DatasetClientTest
#from experiment_client_test import ExperimentClientTest
#from result_client_test import ResultClientTest
#from mljar_test import MljarTest
if __name__ == '__main__':
unittest.main()
| '''
MLJAR unit tests.
'''
import os
import unittest
from project_client_test import ProjectClientTest
from dataset_client_test import DatasetClientTest
from experiment_client_test import ExperimentClientTest
from result_client_test import ResultClientTest
from mljar_test import MljarTest
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python |
29256e658b975eadfaea64a2dc4427849204e6b2 | fix transport tests being affected by env settings | sassoftware/conary,sassoftware/conary,sassoftware/conary,sassoftware/conary,sassoftware/conary | testsuite.py | testsuite.py | #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import warnings
import sys
from testrunner import suite
from conary import sqlite3
from conary_test import resources
class Suite(suite.TestSuite):
testsuite_module = sys.modules[__name__]
topLevelStrip = 0
def setupPaths(self):
# turn off rpm locking via a DSO override. We have to
# keep a reference to the handle or else dlclose() will be
# called on it. Yes, this is ugly, but for some reason RPM
# has a global variable for the location of the lock file
# that only gets filled in the first time you ask for the rpm
# database lock. Thus you can't use more than one root directory
# during any single execution of rpmlib code.
assert 'rpm._rpm' not in sys.modules
from conary_test import norpmlock
norpmlock.open(resources.get_path('conary_test', '_norpmlock.so'))
if sqlite3.sqlite_version_info() < (3,7,0):
warnings.warn("conary.sqlite3 is linked against a too-old system "
"sqlite that is known to have bugs affecting the "
"repository.")
# Some transport tests are affected by proxy environment settings
for transport in ('http', 'https', 'ftp', 'all', 'no'):
name = '%s_proxy' % transport
os.environ.pop(name, None)
os.environ.pop(name.upper(), None)
def getCoverageExclusions(self, handler, environ):
return ['scripts/.*', 'epdb.py', 'stackutil.py']
def getCoverageDirs(self, handler, environ):
# TODO: policy
return [ resources.get_path('conary') ]
def sortTests(self, tests):
# Filter out e.g. conary.pysqlite3.test
return [x for x in tests if x.startswith('conary_test')]
_s = Suite()
setup = _s.setup
main = _s.main
if __name__ == '__main__':
_s.run()
| #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import sys
from testrunner import suite
from conary import sqlite3
from conary_test import resources
class Suite(suite.TestSuite):
testsuite_module = sys.modules[__name__]
topLevelStrip = 0
def setupPaths(self):
# turn off rpm locking via a DSO override. We have to
# keep a reference to the handle or else dlclose() will be
# called on it. Yes, this is ugly, but for some reason RPM
# has a global variable for the location of the lock file
# that only gets filled in the first time you ask for the rpm
# database lock. Thus you can't use more than one root directory
# during any single execution of rpmlib code.
assert 'rpm._rpm' not in sys.modules
from conary_test import norpmlock
norpmlock.open(resources.get_path('conary_test', '_norpmlock.so'))
if sqlite3.sqlite_version_info() < (3,7,0):
warnings.warn("conary.sqlite3 is linked against a too-old system "
"sqlite that is known to have bugs affecting the "
"repository.")
def getCoverageExclusions(self, handler, environ):
return ['scripts/.*', 'epdb.py', 'stackutil.py']
def getCoverageDirs(self, handler, environ):
# TODO: policy
return [ resources.get_path('conary') ]
def sortTests(self, tests):
# Filter out e.g. conary.pysqlite3.test
return [x for x in tests if x.startswith('conary_test')]
_s = Suite()
setup = _s.setup
main = _s.main
if __name__ == '__main__':
_s.run()
| apache-2.0 | Python |
b297ecd987e74a33a5152bc9546f14fdde4bdca2 | Edit Order admin | ballpor98/TheDarkCompany,ballpor98/TheDarkCompany,ballpor98/TheDarkCompany | gamingSpot/shop/admin.py | gamingSpot/shop/admin.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import *
# Register your models here.
'''class ImageInline(admin.StackedInline):
model = Image
extra = 1'''
class ProductAdmin(admin.ModelAdmin):
list_display = ('name','brand','price','lates_update')
list_filter = ['brand','lates_update']
#inlines = [ImageInline]
class OrderAdmin(admin.ModelAdmin):
list_display = ('member_id','status','date','total','product_list')
list_filter = ['member_id','status','date','total']
admin.site.register(Product,ProductAdmin)
'''admin.site.register(Member)'''
admin.site.register(Order,OrderAdmin)
admin.site.register(MyUser)
#admin.site.register(Description)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import *
# Register your models here.
'''class ImageInline(admin.StackedInline):
model = Image
extra = 1'''
class ProductAdmin(admin.ModelAdmin):
list_display = ('name','brand','price','lates_update')
list_filter = ['brand','lates_update']
#inlines = [ImageInline]
admin.site.register(Product,ProductAdmin)
'''admin.site.register(Member)'''
admin.site.register(Order)
admin.site.register(MyUser)
#admin.site.register(Description)
| apache-2.0 | Python |
bd353f9145dfe75657d4bad24b667d864190f397 | bump to 0.4.5 | tsuru/tsuru-unit-agent | tsuru_unit_agent/__init__.py | tsuru_unit_agent/__init__.py | __version__ = "0.4.5"
| __version__ = "0.4.4"
| bsd-3-clause | Python |
f35c32123ce4f1b80e6654163075f40a2660b562 | Bump SDL2 version | mfichman/winbrew | formula/sdl2.py | formula/sdl2.py | import winbrew
import os
import glob
class Sdl2(winbrew.Formula):
url = 'http://libsdl.org/release/SDL2-2.0.3.zip'
homepage = 'http://libsdl.org'
sha1 = '9283f1ce25b8f3155b6960b214cb6a706c285e27'
build_deps = ()
deps = ()
def directx(self):
"""
Find the DirectX SDK and set the INCLUDE/LIBPATH env vars to include
the path to the header/library files.
"""
sdks = glob.glob("C:\\Program Files*\\Microsoft DirectX SDK*")
try:
sdk = sdks[0]
except IndexError, e:
self.error("no DirectX SDK found")
os.environ['LIBPATH'] = ';'.join((
os.environ.get('LIBPATH', ''),
os.path.join(sdk, 'Lib', 'x86'),
))
os.environ['INCLUDE'] = ';'.join((
os.environ.get('INCLUDE', ''),
os.path.join(sdk, 'Include')
))
def install(self):
self.directx()
self.cd('VisualC')
self.msbuild(winbrew.msbuild_args+('/p:VCBuildAdditionalOptions=/useenv', 'SDL_VS2010.sln'))
self.includes('include', 'SDL2')
def test(self):
pass
| import winbrew
import os
import glob
class Sdl2(winbrew.Formula):
url = 'http://libsdl.org/release/SDL2-2.0.1.zip'
homepage = 'http://libsdl.org'
sha1 = '9283f1ce25b8f3155b6960b214cb6a706c285e27'
build_deps = ()
deps = ()
def directx(self):
"""
Find the DirectX SDK and set the INCLUDE/LIBPATH env vars to include
the path to the header/library files.
"""
sdks = glob.glob("C:\\Program Files*\\Microsoft DirectX SDK*")
try:
sdk = sdks[0]
except IndexError, e:
self.error("no DirectX SDK found")
os.environ['LIBPATH'] = ';'.join((
os.environ.get('LIBPATH', ''),
os.path.join(sdk, 'Lib', 'x86'),
))
os.environ['INCLUDE'] = ';'.join((
os.environ.get('INCLUDE', ''),
os.path.join(sdk, 'Include')
))
def install(self):
self.directx()
self.cd('VisualC')
self.msbuild(winbrew.msbuild_args+('/p:VCBuildAdditionalOptions=/useenv', 'SDL_VS2010.sln'))
self.includes('include', 'SDL2')
def test(self):
pass
| mit | Python |
bba4a2f22306e905951379efe74806f164fa1327 | implement dummy post create view | byteweaver/django-forums,byteweaver/django-forums,ckcnik/django-forums,ckcnik/django-forums | forums/views.py | forums/views.py | from django.views.generic import ListView, DetailView, FormView
from forms import TopicCreateForm, PostCreateForm
from models import Category, Topic, Forum
class CategoryListView(ListView):
model = Category
class ForumDetailView(DetailView):
model = Forum
class TopicDetailView(DetailView):
model = Topic
class TopicCreateView(FormView):
template_name = 'forums/topic_create.html'
form_class = TopicCreateForm
class PostCreateView(FormView):
template_name = 'forums/post_create.html'
form_class = PostCreateForm
| from django.views.generic import ListView, DetailView, FormView
from forms import TopicCreateForm
from models import Category, Topic, Forum
class CategoryListView(ListView):
model = Category
class ForumDetailView(DetailView):
model = Forum
class TopicDetailView(DetailView):
model = Topic
class TopicCreateView(FormView):
template_name = 'forums/topic_create.html'
form_class = TopicCreateForm
| bsd-3-clause | Python |
1fc0a9b6a5e9be2530058bd8f704ccb2eba2c758 | Change Auto Correct Message Style | iGene/igene_bot,aver803bath5/igene_bot | models/google.py | models/google.py | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import logging
import re
import requests
import urllib.parse
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def google(bot, update):
search = update.message.text
search = re.sub(r'^(?i)google ','',search)
logger.info("Google %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find('h3', {'class': 'r'}).find('a').attrs['href']
result = urllib.parse.unquote(result)
if_http_start_regex = re.compile('^http')
if_http_start = if_http_start_regex.match(str(result))
if if_http_start == None:
remove_url_q_re = re.compile('^\/url\?q=')
remove_url_sa_re = re.compile('\&sa.+')
result = re.sub(remove_url_q_re, '', result)
result = re.sub(remove_url_sa_re, '', result)
update.message.reply_text(result)
else:
update.message.reply_text(result)
def images(bot, update):
search = update.message.text
search = re.sub(r'%(?i)image ','',search)
logger.info("Google image search %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?tbm=isch&q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("gstatic.com")})]
update.message.reply_text(images[0])
def correct(bot, update):
search = update.message.text
user = update.message.from_user.username
logger.info("Auto correct")
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find('a',{'class': 'spell'})
if not result is None:
update.message.reply_text(user+' 的意思也許是\n'++result.text)
| # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import logging
import re
import requests
import urllib.parse
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def google(bot, update):
search = update.message.text
search = re.sub(r'^(?i)google ','',search)
logger.info("Google %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find('h3', {'class': 'r'}).find('a').attrs['href']
result = urllib.parse.unquote(result)
if_http_start_regex = re.compile('^http')
if_http_start = if_http_start_regex.match(str(result))
if if_http_start == None:
remove_url_q_re = re.compile('^\/url\?q=')
remove_url_sa_re = re.compile('\&sa.+')
result = re.sub(remove_url_q_re, '', result)
result = re.sub(remove_url_sa_re, '', result)
update.message.reply_text(result)
else:
update.message.reply_text(result)
def images(bot, update):
search = update.message.text
search = re.sub(r'%(?i)image ','',search)
logger.info("Google image search %s" %search)
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?tbm=isch&q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("gstatic.com")})]
update.message.reply_text(images[0])
def correct(bot, update):
search = update.message.text
logger.info("Auto correct")
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get('https://www.google.com/search?q='+ search, headers)
soup = BeautifulSoup(r.text, "html.parser")
result = soup.find('a',{'class': 'spell'})
if not result is None:
update.message.reply_text(result.text)
| mit | Python |
4f69e2cc8cec816b9c75ba8a58ee801870ee7bd0 | delete print | it-projects-llc/misc-addons,it-projects-llc/misc-addons,it-projects-llc/misc-addons | website_redirect/website_redirect_models.py | website_redirect/website_redirect_models.py | from openerp import api, models, fields, SUPERUSER_ID
from openerp.http import request
import fnmatch
import werkzeug.utils
class website_redirect(models.Model):
_name = 'website.redirect'
_order = 'sequence,id'
name = fields.Char('Name')
active = fields.Boolean('Active', default=True)
sequence = fields.Integer('Sequence')
domain = fields.Char('Domain Name', placeholder='odoo.com', help='keep empty to apply rules for any domain')
rule_ids = fields.One2many('website.redirect.rule', 'redirect_id', string='Rules')
class website_redirect(models.Model):
_name = 'website.redirect.rule'
_order = 'sequence,id'
sequence = fields.Integer('Sequence')
pattern = fields.Char('From', help='Unix shell-style wildcards. Check https://docs.python.org/2/library/fnmatch.html for details')
target = fields.Char('To')
redirect_id = fields.Many2one('website.redirect')
class ir_http(models.AbstractModel):
_inherit = 'ir.http'
def _dispatch(self):
host = request.httprequest.environ.get('HTTP_HOST', '').split(':')[0]
www, _, h = host.partition('.')
if www == 'www':
host = h
path = request.httprequest.path
redirect_ids = self.pool['website.redirect'].search(request.cr, SUPERUSER_ID, [])
for redirect in self.pool['website.redirect'].browse(request.cr, SUPERUSER_ID, redirect_ids):
if redirect.domain and redirect.domain != host:
continue
for rule in redirect.rule_ids:
if fnmatch.fnmatch(path, rule.pattern):
code = 302
return werkzeug.utils.redirect(rule.target, code)
return super(ir_http, self)._dispatch()
| from openerp import api, models, fields, SUPERUSER_ID
from openerp.http import request
import fnmatch
import werkzeug.utils
class website_redirect(models.Model):
_name = 'website.redirect'
_order = 'sequence,id'
name = fields.Char('Name')
active = fields.Boolean('Active', default=True)
sequence = fields.Integer('Sequence')
domain = fields.Char('Domain Name', placeholder='odoo.com', help='keep empty to apply rules for any domain')
rule_ids = fields.One2many('website.redirect.rule', 'redirect_id', string='Rules')
class website_redirect(models.Model):
_name = 'website.redirect.rule'
_order = 'sequence,id'
sequence = fields.Integer('Sequence')
pattern = fields.Char('From', help='Unix shell-style wildcards. Check https://docs.python.org/2/library/fnmatch.html for details')
target = fields.Char('To')
redirect_id = fields.Many2one('website.redirect')
class ir_http(models.AbstractModel):
_inherit = 'ir.http'
def _dispatch(self):
host = request.httprequest.environ.get('HTTP_HOST', '').split(':')[0]
www, _, h = host.partition('.')
if www == 'www':
host = h
path = request.httprequest.path
print 'path', path
redirect_ids = self.pool['website.redirect'].search(request.cr, SUPERUSER_ID, [])
for redirect in self.pool['website.redirect'].browse(request.cr, SUPERUSER_ID, redirect_ids):
if redirect.domain and redirect.domain != host:
continue
for rule in redirect.rule_ids:
if fnmatch.fnmatch(path, rule.pattern):
code = 302
return werkzeug.utils.redirect(rule.target, code)
return super(ir_http, self)._dispatch()
| mit | Python |
09081e9adb4f6a2338a2b7c8812fd816079a4296 | add delete_all() ToDoItem method to delete all items in a given list | davidnjakai/bc-8-todo-console-application | todo_item.py | todo_item.py | from database import conn2, c2
class ToDoItem(object):
"""docstring for ToDoItem"""
def __init__(self, content = '1st item', complete = 0):
self.content = content
self.complete = complete
def save_item(self, listname):
SQL = "INSERT INTO todoitem (CONTENT, COMPLETE, LISTNAME) VALUES ('{0}', '{1}', '{2}')"\
.format(self.content, self.complete, listname)
c2.execute(SQL)
conn2.commit()
def delete_item(self, listname):
SQL = "DELETE FROM todoitem WHERE CONTENT = '{0}' AND LISTNAME = '{1}'".format(self.content, listname)
c2.execute(SQL)
conn2.commit()
def delete_all(self, listname):
SQL = "DELETE FROM todoitem WHERE LISTNAME = '{0}'".format(listname)
c2.execute(SQL)
conn2.commit()
def get_items(self, listname):
SQL = "SELECT CONTENT, COMPLETE FROM todoitem WHERE LISTNAME = '{0}'".format(listname)
return c2.execute(SQL)
def edit_item(self, content):
self.content = content
def finish_item(self):
self.complete = 1
| from database import conn2, c2
class ToDoItem(object):
"""docstring for ToDoItem"""
def __init__(self, content = '1st item', complete = 0):
self.content = content
self.complete = complete
def save_item(self, listname):
SQL = "INSERT INTO todoitem (CONTENT, COMPLETE, LISTNAME) VALUES ('{0}', '{1}', '{2}')"
c2.execute(SQL.format(self.content, self.complete, listname))
conn2.commit()
def delete_item(self, listname):
SQL = "DELETE FROM todoitem WHERE CONTENT = '{0}' AND LISTNAME = '{1}'".format(self.content, listname)
c2.execute(SQL)
conn2.commit()
def get_items(self, listname):
SQL = "SELECT CONTENT, COMPLETE FROM todoitem WHERE LISTNAME = '{0}'".format(listname)
return c2.execute(SQL)
def edit_item(self, content):
self.content = content
def finish_item(self):
self.complete = 1
| mit | Python |
66bc76acf8ffaaf5f6abf8cdcf018f1f3f2d07eb | add delete_list() method in ToDoList to delete records from database | davidnjakai/bc-8-todo-console-application | todo_list.py | todo_list.py | import todo_item
import sqlite3
class ToDoList(object):
def __init__(self, name, description, todo_items):
if type(name) != type(''):
self.name = 'Enter valid name'
else:
self.name = name
if type(description) != type(''):
self.description = 'Enter valid description'
else:
self.description = description
self.todo_items = todo_items
self.save_list()
def add_todo(self, content, complete = False, *args):
if type(complete) != type(True):
self.complete = False
return
if type(content) != type(''):
return 'Enter valid content'
item = todo_item.ToDoItem(content, complete, *args)
self.todo_items.append(item)
def finish_item(self, index):
if index >= len(self.todo_items) or index < 0:
return 'That to do item does not exist'
self.todo_items[index] = True
def edit_item(self, index, content):
self.todo_items[index] = content
def delete_item(self, index):
del self.todo_items[index]
def percentage_completed(self):
completed_items = 0
for item in self.todo_items:
if item.complete:
completed_items += 1
percentage = 100 * (completed_items/len(self.todo_items))
return percentage
def save_list(self):
conn = sqlite3.connect('crollodb.db')
c = conn.cursor()
SQL = "INSERT INTO todolist (NAME, DESCRIPTION) VALUES ('{0}', '{1}')"
c.execute(SQL.format(self.name, self.description))
conn.commit()
conn.close()
def delete_list(self):
conn = sqlite3.connect('crollodb.db')
c = conn.cursor()
if len(self.todo_items) > 0:
for item in self.todo_items:
SQL = "DELETE FROM todoitem WHERE CONTENT = {0}".format(item)
c.execute(SQL)
SQL = "DELETE FROM todolist WHERE NAME = {0}".format(self.name)
conn.commit()
conn.close()
| import todo_item
import sqlite3
class ToDoList(object):
def __init__(self, name, description, todo_items):
if type(name) != type(''):
self.name = 'Enter valid name'
else:
self.name = name
if type(description) != type(''):
self.description = 'Enter valid description'
else:
self.description = description
self.todo_items = todo_items
def add_todo(self, content, complete = False, *args):
if type(complete) != type(True):
self.complete = False
return
if type(content) != type(''):
return 'Enter valid content'
item = todo_item.ToDoItem(content, complete, *args)
self.todo_items.append(item)
def finish_item(self, index):
if index >= len(self.todo_items) or index < 0:
return 'That to do item does not exist'
self.todo_items[index] = True
def edit_item(self, index, content):
self.todo_items[index] = content
def delete_item(self, index):
del self.todo_items[index]
def percentage_completed(self):
completed_items = 0
for item in self.todo_items:
if item.complete:
completed_items += 1
percentage = 100 * (completed_items/len(self.todo_items))
return percentage
def save_list(self):
conn = sqlite3.connect('crollodb.db')
c = conn.cursor()
SQL = "INSERT INTO todolist (NAME, DESCRIPTION) VALUES ('{0}', '{1}')"
c.execute(SQL.format(self.name, self.description))
conn.commit()
conn.close() | mit | Python |
e5de8dc7e964d6938f5ff352bb1b524fe5f7ce8d | Fix calling npm/bower | getslash/mailboxer,vmalloc/mailboxer,vmalloc/mailboxer,vmalloc/mailboxer,getslash/mailboxer,getslash/mailboxer | _lib/frontend.py | _lib/frontend.py | import os
import subprocess
from contextlib import contextmanager
import logbook
import click
from .bootstrapping import from_env, from_project_root
_logger = logbook.Logger(__name__)
@click.group()
def frontend():
pass
@frontend.command()
@click.option("--watch", is_flag=True)
def build(watch):
_bootstrap_npm()
if watch:
_execute("gulp watch")
else:
_execute("gulp")
def _bootstrap_npm():
with _get_timestamp_update_context(
from_env("npm.timestamp"), ["bower.json", "package.json"]) as uptodate:
if not uptodate:
_logger.info("Bootstrapping npm environment...")
_execute("npm install")
_execute("npm install gulp")
_execute("npm install -g gulp")
_execute("npm install -g bower")
_execute("bower install --allow-root -f -V")
@contextmanager
def _get_timestamp_update_context(timestamp_path, paths):
timestamp = _get_timestamp(timestamp_path)
path_timestamps = [_get_timestamp(from_project_root(p))
for p in paths]
uptodate = timestamp != 0 and timestamp > max(path_timestamps)
yield uptodate
with open(timestamp_path, "w"):
pass
def _execute(cmd):
subprocess.check_call(cmd, shell=True, cwd=from_project_root())
def _get_timestamp(path):
try:
return os.stat(path).st_mtime
except OSError:
return 0
| import os
import subprocess
from contextlib import contextmanager
import logbook
import click
from .bootstrapping import from_env, from_project_root
_logger = logbook.Logger(__name__)
@click.group()
def frontend():
pass
@frontend.command()
@click.option("--watch", is_flag=True)
def build(watch):
_bootstrap_npm()
if watch:
_execute("gulp watch")
else:
_execute("gulp")
def _bootstrap_npm():
with _get_timestamp_update_context(
from_env("npm.timestamp"), ["bower.json", "package.json"]) as uptodate:
if not uptodate:
_logger.info("Bootstrapping npm environment...")
_execute("npm install")
_execute("npm install gulp")
_execute("npm install -g gulp")
_execute("npm install -g bower")
_execute("bower install --allow-root -f")
@contextmanager
def _get_timestamp_update_context(timestamp_path, paths):
timestamp = _get_timestamp(timestamp_path)
path_timestamps = [_get_timestamp(from_project_root(p))
for p in paths]
uptodate = timestamp != 0 and timestamp > max(path_timestamps)
yield uptodate
with open(timestamp_path, "w"):
pass
def _execute(cmd):
subprocess.call(cmd, shell=True, cwd=from_project_root())
def _get_timestamp(path):
try:
return os.stat(path).st_mtime
except OSError:
return 0
| mit | Python |
7750b900b6ab6f136aee64fb806c95b0bfef1d8d | set Metwit base url to api.metwit.com | metwit/metwit-python | metwit/metwit.py | metwit/metwit.py | from .rest import Resource, RestApi
class Metwit(RestApi):
base_url = "https://api.metwit.com/"
token_url = 'https://api.metwit.com/token/'
dialog_url = "https://metwit.com/oauth/authorize/"
weather = Resource('/v2/weather/')
metags = Resource('/v2/metags/')
users = Resource('/v2/users/')
| from .rest import Resource, RestApi
class Metwit(RestApi):
#base_url = "https://api.metwit.com/"
#token_url = 'https://api.metwit.com/token/'
dialog_url = "https://metwit.com/oauth/authorize/"
base_url = "http://127.0.0.1:8000/"
token_url = 'http://127.0.0.1:8000/token/'
weather = Resource('/v2/weather/')
metags = Resource('/v2/metags/')
users = Resource('/v2/users/')
| bsd-3-clause | Python |
1f00dd47cc3490e7da68180258c365bf9b8583ba | Bump to version 0.42.5 | reubano/tabutils,reubano/tabutils,reubano/meza,reubano/tabutils,reubano/meza,reubano/meza | meza/__init__.py | meza/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
meza
~~~~
Provides methods for reading and processing data from tabular formatted files
Attributes:
CURRENCIES [tuple(unicode)]: Currency symbols to remove from decimal
strings.
ENCODING (str): Default file encoding.
DEFAULT_DATETIME (obj): Default datetime object
"""
from datetime import datetime as dt
from os import path as p
__version__ = '0.42.5'
__title__ = 'meza'
__package_name__ = 'meza'
__author__ = 'Reuben Cummings'
__description__ = 'A Python toolkit for processing tabular data'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
CURRENCIES = ('$', '£', '€')
ENCODING = 'utf-8'
DEFAULT_DATETIME = dt(9999, 12, 31, 0, 0, 0)
BOM = '\ufeff'
PARENT_DIR = p.abspath(p.dirname(p.dirname(__file__)))
DATA_DIR = p.join(PARENT_DIR, 'data', 'test')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
meza
~~~~
Provides methods for reading and processing data from tabular formatted files
Attributes:
CURRENCIES [tuple(unicode)]: Currency symbols to remove from decimal
strings.
ENCODING (str): Default file encoding.
DEFAULT_DATETIME (obj): Default datetime object
"""
from datetime import datetime as dt
from os import path as p
__version__ = '0.42.4'
__title__ = 'meza'
__package_name__ = 'meza'
__author__ = 'Reuben Cummings'
__description__ = 'A Python toolkit for processing tabular data'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
CURRENCIES = ('$', '£', '€')
ENCODING = 'utf-8'
DEFAULT_DATETIME = dt(9999, 12, 31, 0, 0, 0)
BOM = '\ufeff'
PARENT_DIR = p.abspath(p.dirname(p.dirname(__file__)))
DATA_DIR = p.join(PARENT_DIR, 'data', 'test')
| mit | Python |
fee5beb59bfb2a68ba1409e7dfb3cad1521ca486 | Add count employees | DataViva/dataviva-api,jdmmiranda307/dataviva-api,daniel1409/dataviva-api | app/models/rais.py | app/models/rais.py | from sqlalchemy import Column, Integer, String, Numeric, func
from app import db
class Rais(db.Model):
__tablename__ = 'rais'
ocupation = Column(String(6), primary_key=True)
cnae = Column(String(5), primary_key=True)
cnae_section = Column(String(1), primary_key=True)
cnae_division = Column(String(2), primary_key=True)
literacy = Column(String(1), primary_key=True)
establishment = Column(String(14), primary_key=True)
simple = Column(String(1), primary_key=True)
municipality = Column(String(7), primary_key=True)
employee = Column(String(11), primary_key=True)
color = Column(String(2), primary_key=True)
gender = Column(String(1), primary_key=True)
establishment_size = Column(String(1), primary_key=True)
year = Column(Integer, primary_key=True)
age = Column(Integer)
wage_received = Column(Numeric(17,2))
average_monthly_wage = Column(Numeric(17,2))
@classmethod
def dimensions(cls):
return [
'ocupation',
'cnae',
'cnae_section',
'cnae_division',
'literacy',
'establishment',
'simple',
'municipality',
'employee',
'color',
'gender',
'establishment_size',
'year',
]
@classmethod
def agg_values(cls):
return [
func.avg(cls.age),
func.avg(cls.average_monthly_wage),
func.sum(cls.wage_received),
func.count(cls.employee),
]
@classmethod
def value_headers(cls):
return ['age', 'wage_received', 'average_monthly_wage', 'jobs']
| from sqlalchemy import Column, Integer, String, Numeric, func
from app import db
class Rais(db.Model):
__tablename__ = 'rais'
ocupation = Column(String(6), primary_key=True)
cnae = Column(String(5), primary_key=True)
cnae_section = Column(String(1), primary_key=True)
cnae_division = Column(String(2), primary_key=True)
literacy = Column(String(1), primary_key=True)
establishment = Column(String(14), primary_key=True)
simple = Column(String(1), primary_key=True)
municipality = Column(String(7), primary_key=True)
employee = Column(String(11), primary_key=True)
color = Column(String(2), primary_key=True)
gender = Column(String(1), primary_key=True)
establishment_size = Column(String(1), primary_key=True)
year = Column(Integer, primary_key=True)
age = Column(Integer)
wage_received = Column(Numeric(17,2))
average_monthly_wage = Column(Numeric(17,2))
@classmethod
def dimensions(cls):
return [
'ocupation',
'cnae',
'cnae_section',
'cnae_division',
'literacy',
'establishment',
'simple',
'municipality',
'employee',
'color',
'gender',
'establishment_size',
'year',
]
@classmethod
def agg_values(cls):
return [
func.avg(cls.age),
func.avg(cls.average_monthly_wage),
func.sum(cls.wage_received),
]
@classmethod
def value_headers(cls):
return ['age', 'wage_received', 'average_monthly_wage']
| mit | Python |
0eb8c60c647c76ad244073cabc66883000c4faa2 | print sql to debug | YFFY/Eredar | ext/databaser.py | ext/databaser.py | #! /usr/bin/env python
# --*-- coding:utf-8 --*--
import MySQLdb
from ext.util import *
from config.setting import *
class Dber(object):
def __init__(self):
self.host = database.get('host')
self.port = database.get('port')
self.user = database.get('user')
self.password = database.get('password')
self.dbname = database.get('databasename')
self.conn = None
self.setConnection()
def setConnection(self):
try:
self.conn = MySQLdb.Connect(host=self.host,user=self.user,passwd=self.password,db=self.dbname,port=self.port,charset='utf8')
self.cur = self.conn.cursor()
except Exception as ex:
traceback.print_exc()
def syncDruidData(self, druidresult):
if isinstance(druidresult, list):
for result in druidresult:
print result
insertSql = "insert into {0} values {1}".format(database.get('tablename'), unicode2str(result))
print insertSql
self.insertRecord(insertSql)
break
self.setCommit()
def insertRecord(self, sql):
if self.conn:
self.cur.execute(sql)
def setCommit(self):
self.conn.commit()
def getRecord(self, sql):
if self.conn:
executeStatus = self.cur.execute(sql)
return self.cur.fetchall()
| #! /usr/bin/env python
# --*-- coding:utf-8 --*--
import MySQLdb
from ext.util import *
from config.setting import *
class Dber(object):
def __init__(self):
self.host = database.get('host')
self.port = database.get('port')
self.user = database.get('user')
self.password = database.get('password')
self.dbname = database.get('databasename')
self.conn = None
self.setConnection()
def setConnection(self):
try:
self.conn = MySQLdb.Connect(host=self.host,user=self.user,passwd=self.password,db=self.dbname,port=self.port,charset='utf8')
self.cur = self.conn.cursor()
except Exception as ex:
traceback.print_exc()
def syncDruidData(self, druidresult):
if isinstance(druidresult, list):
for result in druidresult:
insertSql = "insert into {0} values {1}".format(database.get('tablename'), unicode2str(result))
print insertSql
self.insertRecord(insertSql)
self.setCommit()
def insertRecord(self, sql):
if self.conn:
self.cur.execute(sql)
def setCommit(self):
self.conn.commit()
def getRecord(self, sql):
if self.conn:
executeStatus = self.cur.execute(sql)
return self.cur.fetchall()
| apache-2.0 | Python |
45ebf89018512f93592083cf0cf0413d4f8c6d4a | Add blogger authentication | jamalmoir/pyblogit | pyblogit/api_interface.py | pyblogit/api_interface.py | """
pyblogit.api_interface
~~~~~~~~~~~~~~~~~~~~~~
This modules acts as an interface between pyblogit and various
blogging platform apis.
"""
import oauth2client.client
import oauth2client.file
import httplib2
class BloggerInterface(object):
"""Connects to blogger api and authorises client."""
def __init__(self):
#TODO
def get_access_code(self):
"""Opens dafualt browser to the google auth page and provides
them with an authorisation code."""
flow = oauth2client.client.OAuth2WebServerFlow(client_id,
client_secret, scope)
auth_uri = flow.step1_get_authorize_url()
webbrowser.open_new_tab(auth_uri)
def get_credentials(self):
"""Gets google api credentials, or generates new credentials
if they don't exist or are invalid."""
storage = oauth2client.file.Storage('credentials.dat')
credentials = storage.get()
if not credentials or credientials.invalid:
credentials = tools.run_flow(flow, storage,
tools.argparser.parse_args())
return credentials
def get_service(self):
"""Returns an authorised blogger api service."""
http = httplib2.Http()
http = credentials.authorize(http)
service = build('blogger', 'v3', http=http)
return service
def get_client():
interface = BloggerInterface()
client = interface.get_client()
return client
def get_blog(url):
client = get_client()
| """
pyblogit.api_interface
~~~~~~~~~~~~~~~~~~~~~~
This modules acts as an interface between pyblogit and various
blogging platform apis.
"""
import gdata.gauth
import gdata.blogger.client
class BloggerInterface(object):
def __init__(self):
self._CLIENT_ID = client_id
self._CLIENT_SECRET = client_secret
self._SCOPE = 'https://www.googleapis.com/auth/blogger'
def get_access_code(self):
"""Opens dafualt browser to the google auth page and provides
them with an access code."""
token = gdata.gauth.OAuth2Token(
client_id = self.CLIENT_ID,
client_secret = self.CLIENT_SECRET,
scope = self.SCOPE,
user_agent = 'pyblogit')
url = token.generate_authorize_url(redirect_uri='urn:ietf:wg:oauth:2.0:oob')
webbrowser.open_new_tab(url)
def generate_token(self, code):
"""Generates new api access token."""
self._token = token.get_access_token(code)
def get_client(self):
"""Returns an authorised blogger api client."""
client = gdata.blogger.client.BloggerClient()
self._token.authorize(client)
return client
| mit | Python |
afd7687c1140612527f0846a7225dd6adc669c2e | update io plugin interface | pyexcel/pyexcel-chart,pyexcel/pyexcel-chart | pyexcel_chart/__init__.py | pyexcel_chart/__init__.py | """
pyexcel_chart
~~~~~~~~~~~~~~~~~~~
chart drawing plugin for pyexcel
:copyright: (c) 2016-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for further details
"""
from pyexcel.internal.common import PyexcelPluginList
PyexcelPluginList(__name__).add_a_renderer(
submodule='chart.ChartRenderer',
file_types=['svg'],
stream_type='binary'
)
| """
pyexcel_chart
~~~~~~~~~~~~~~~~~~~
chart drawing plugin for pyexcel
:copyright: (c) 2016-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for further details
"""
from pyexcel.internal.common import PyexcelPluginList
__pyexcel_plugins__ = PyexcelPluginList(__name__).add_a_renderer(
submodule='chart',
file_types=['svg'],
stream_type='binary'
)
| bsd-3-clause | Python |
7d41495562185eebc72ab6eb05a0f27fd527d70f | Bump version | piotrjakimiak/cmsplugin-text-ng | cmsplugin_text_ng/__init__.py | cmsplugin_text_ng/__init__.py | __version__ = '0.7'
default_app_config = 'cmsplugin_text_ng.apps.CmsPluginTextNgConfig'
| __version__ = '0.6'
default_app_config = 'cmsplugin_text_ng.apps.CmsPluginTextNgConfig'
| bsd-3-clause | Python |
4d8a51f429b8d148444e69ecaf8db7c45a1a3bad | allow text before a line line comment | pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments,pathawks/pygments | pygments/lexers/puppet.py | pygments/lexers/puppet.py | # -*- coding: utf-8 -*-
"""
pygments.lexers.puppet
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Puppet DSL.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import *
__all__ = ['PuppetLexer']
class PuppetLexer(RegexLexer):
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
]
}
| # -*- coding: utf-8 -*-
"""
pygments.lexers.puppet
~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Puppet DSL.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import *
__all__ = ['PuppetLexer']
class PuppetLexer(RegexLexer):
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
(r'#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
]
}
| bsd-2-clause | Python |
7468c05ad1a231c36f831a82e02c06804626f827 | update version | vparitskiy/data-importer,vparitskiy/data-importer | data_importer/__init__.py | data_importer/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = 'Data Importer'
__version__ = '1.6.2'
__author__ = 'Valder Gallo <valdergallo@gmail.com>'
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = 'Data Importer'
__version__ = '1.4.0'
__author__ = 'Valder Gallo <valdergallo@gmail.com>'
| bsd-2-clause | Python |
aca53b30f310aac506ce2e17cfd5f876f9f186b6 | Add crontab decorator | gaujin/tornado-crontab | tornado_crontab/_crontab.py | tornado_crontab/_crontab.py | import functools
import math
from crontab import CronTab
from tornado.ioloop import PeriodicCallback
class CronTabCallback(PeriodicCallback):
def __init__(self, callback, schedule, io_loop=None):
self.__crontab = CronTab(schedule)
super(CronTabCallback, self).__init__(
callback, self._calc_callbacktime(), io_loop)
def _calc_callbacktime(self, now=None):
return math.ceil(self.__crontab.next(now)) * 1000.0
def _schedule_next(self):
self.callback_time = self._calc_callbacktime()
super(CronTabCallback, self)._schedule_next()
def crontab(schedule, io_loop=None):
def receive_func(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
_func = functools.partial(func, *args, **kwargs)
CronTabCallback(_func, schedule, io_loop).start()
return wrapper
return receive_func
| import math
from crontab import CronTab
from tornado.ioloop import PeriodicCallback
class CronTabCallback(PeriodicCallback):
def __init__(self, callback, crontab, io_loop=None):
self.__crontab = CronTab(crontab)
super(CronTabCallback, self).__init__(
callback, self._calc_callbacktime(), io_loop)
def _calc_callbacktime(self, now=None):
return math.ceil(self.__crontab.next(now)) * 1000.0
def _schedule_next(self):
self.callback_time = self._calc_callbacktime()
super(CronTabCallback, self)._schedule_next()
| mit | Python |
f68ca64c01fda8c6f7a29d4e287cc816bd1e03d0 | bump in __version__ | datascopeanalytics/django-flux,datascopeanalytics/django-flux | flux/__init__.py | flux/__init__.py | __version__ = "0.1.3"
| __version__ = "0.1.2"
| mit | Python |
349565f3b246f94b35c877bfeacc4daa7478e9b4 | Improve urlconf construction (fix bug 690440) | hoosteeno/mozillians,mozilla/mozillians,akarki15/mozillians,fxa90id/mozillians,brian-yang/mozillians,mozilla/mozillians,akatsoulas/mozillians,johngian/mozillians,akarki15/mozillians,ChristineLaMuse/mozillians,safwanrahman/mozillians,justinpotts/mozillians,fxa90id/mozillians,anistark/mozillians,satdav/mozillians,safwanrahman/mozillians,akatsoulas/mozillians,mozilla/mozillians,glogiotatidis/mozillians-new,anistark/mozillians,mozilla/mozillians,anistark/mozillians,akatsoulas/mozillians,safwanrahman/mozillians,chirilo/mozillians,justinpotts/mozillians,brian-yang/mozillians,satdav/mozillians,glogiotatidis/mozillians-new,brian-yang/mozillians,johngian/mozillians,justinpotts/mozillians,glogiotatidis/mozillians-new,hoosteeno/mozillians,hoosteeno/mozillians,justinpotts/mozillians,johngian/mozillians,brian-yang/mozillians,hoosteeno/mozillians,akarki15/mozillians,chirilo/mozillians,akarki15/mozillians,satdav/mozillians,safwanrahman/mozillians,akatsoulas/mozillians,fxa90id/mozillians,anistark/mozillians,ChristineLaMuse/mozillians,glogiotatidis/mozillians-new,ChristineLaMuse/mozillians,chirilo/mozillians,satdav/mozillians,fxa90id/mozillians,johngian/mozillians,chirilo/mozillians | apps/users/urls.py | apps/users/urls.py | from django.conf.urls.defaults import patterns, url
from django.contrib.auth import views as auth_views
from jinjautils import jinja_for_django
from users import forms
from . import views
# So we can use the contrib logic for password resets, etc.
auth_views.render_to_response = jinja_for_django
urlpatterns = patterns('',
url(r'^login$', views.login,
dict(authentication_form=forms.AuthenticationForm), name='login'),
url(r'^logout$', auth_views.logout, dict(redirect_field_name='next'),
name='logout'),
url(r'^register$', views.register, name='register'),
url(r'^confirm$', views.confirm, name='confirm'),
url(r'^send_confirmation$', views.send_confirmation,
name='send_confirmation'),
url(r'^password_change$', views.password_change,
name='password_change'),
url(r'^password_change_done$', auth_views.password_change_done,
name='password_change_done'),
url(r'^password_reset$', views.password_reset,
name='password_reset'),
url(r'^password_reset_check_mail$', views.password_reset_check_mail,
name='password_reset_check_mail'),
url(r'^password_reset_confirm/'
'(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-'
'[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm,
name='password_reset_confirm'),
url(r'^password_reset_complete$', auth_views.password_reset_complete,
name='password_reset_complete'),
)
| from django.conf.urls.defaults import patterns, url
from django.contrib.auth import views as auth_views
from jinjautils import jinja_for_django
from session_csrf import anonymous_csrf
from users import forms
from . import views
# So we can use the contrib logic for password resets, etc.
auth_views.render_to_response = jinja_for_django
urlpatterns = patterns('',
url(r'^login', views.login,
dict(authentication_form=forms.AuthenticationForm), name='login'),
url(r'^logout', auth_views.logout, dict(redirect_field_name='next'),
name='logout'),
url(r'^register', views.register, name='register'),
url(r'^confirm', views.confirm, name='confirm'),
url(r'^send_confirmation', views.send_confirmation,
name='send_confirmation'),
url(r'^password_change', views.password_change,
name='password_change'),
url(r'^password_change_done', auth_views.password_change_done,
name='password_change_done'),
url(r'^password_reset$', views.password_reset,
name='password_reset'),
url(r'^password_reset_check_mail$', views.password_reset_check_mail,
name='password_reset_check_mail'),
url(r'^password_reset_confirm/'
'(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-'
'[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm,
name='password_reset_confirm'),
url(r'^password_reset_complete$', auth_views.password_reset_complete,
name='password_reset_complete'),
)
| bsd-3-clause | Python |
694bec1a07b1ba63ce4294879a09a2521c3ec122 | Fix user admin | mupi/escolamupi,hacklabr/timtec,virgilio/timtec,mupi/timtec,mupi/tecsaladeaula,AllanNozomu/tecsaladeaula,mupi/tecsaladeaula,AllanNozomu/tecsaladeaula,mupi/tecsaladeaula,mupi/timtec,mupi/escolamupi,AllanNozomu/tecsaladeaula,GustavoVS/timtec,hacklabr/timtec,mupi/timtec,GustavoVS/timtec,virgilio/timtec,hacklabr/timtec,mupi/tecsaladeaula,GustavoVS/timtec,mupi/timtec,AllanNozomu/tecsaladeaula,GustavoVS/timtec,hacklabr/timtec,virgilio/timtec,virgilio/timtec | course/admin.py | course/admin.py | from django.contrib import admin
from django.contrib.admin import ModelAdmin
from django.contrib.auth.admin import UserAdmin
from suit.admin import SortableTabularInline
from models import *
admin.site.register(TimtecUser, UserAdmin)
admin.site.register(Video)
admin.site.register(CourseProfessor)
class LessonInline(SortableTabularInline):
model = Lesson
sortable = 'position'
class CourseAdmin(ModelAdmin):
inlines = (LessonInline,)
admin.site.register(Course, CourseAdmin)
admin.site.register(Lesson)
admin.site.register(Activity)
admin.site.register(Unit)
admin.site.register(Answer) | from django.contrib import admin
from django.contrib.admin import ModelAdmin
from suit.admin import SortableTabularInline
from models import *
admin.site.register(TimtecUser)
admin.site.register(Video)
admin.site.register(CourseProfessor)
class LessonInline(SortableTabularInline):
model = Lesson
sortable = 'position'
class CourseAdmin(ModelAdmin):
inlines = (LessonInline,)
admin.site.register(Course, CourseAdmin)
admin.site.register(Lesson)
admin.site.register(Activity)
admin.site.register(Unit)
admin.site.register(Answer) | agpl-3.0 | Python |
6bd3d3a1ef972ce830024c9ccdc8c028eb2718e7 | Remove all refrences to imdb in the code | Uname-a/knife_scraper,Uname-a/knife_scraper,Uname-a/knife_scraper | modules/movie.py | modules/movie.py | #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
imdb.py - Willie Movie Information Module
Copyright © 2012, Elad Alfassa, <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
This module relies on imdbapi.com
"""
import json
import web
def movie(willie, trigger):
"""
Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link.
"""
if not trigger.group(2):
return
word=trigger.group(2).rstrip()
word=word.replace(" ", "+")
uri="http://www.imdbapi.com/?t="+word
u = web.get_urllib_object(uri, 30)
data = json.load(u) #data is a Dict containing all the information we need
u.close()
if data['Response'] == 'False':
if 'Error' in data:
message = '[MOVIE] %s' % data['Error']
else:
willie.debug('movie', 'Got an error from the imdb api, search phrase was %s' % word, 'warning')
willie.debug('movie', str(data), 'warning')
message = '[MOVIE] Got an error from imdbapi'
else:
message = '[MOVIE] Title: ' +data['Title']+ \
' | Year: ' +data['Year']+ \
' | Rating: ' +data['imdbRating']+ \
' | Genre: ' +data['Genre']+ \
' | IMDB Link: http://imdb.com/title/' + data['imdbID']
willie.say(message)
movie.commands = ['movie', 'imdb']
movie.example = '.movie Movie Title'
if __name__ == '__main__':
print __doc__.strip()
| #!/usr/bin/env python
# -*- coding: utf8 -*-
"""
imdb.py - Willie Movie Information Module
Copyright © 2012, Elad Alfassa, <elad@fedoraproject.org>
Licensed under the Eiffel Forum License 2.
This module relies on imdbapi.com
"""
import json
import web
def imdb(willie, trigger):
"""
Returns some information about a movie, like Title, Year, Rating, Genre and IMDB Link.
"""
if not trigger.group(2):
return
word=trigger.group(2).rstrip()
word=word.replace(" ", "+")
uri="http://www.imdbapi.com/?t="+word
u = web.get_urllib_object(uri, 30)
data = json.load(u) #data is a Dict containing all the information we need
u.close()
if data['Response'] == 'False':
if 'Error' in data:
message = '[MOVIE] %s' % data['Error']
else:
willie.debug('movie', 'Got an error from the imdb api, search phrase was %s' % word, 'warning')
willie.debug('movie', str(data), 'warning')
message = '[MOVIE] Got an error from the IMDB api'
else:
message = '[MOVIE] Title: ' +data['Title']+ \
' | Year: ' +data['Year']+ \
' | Rating: ' +data['imdbRating']+ \
' | Genre: ' +data['Genre']+ \
' | IMDB Link: http://imdb.com/title/' + data['imdbID']
willie.say(message)
imdb.commands = ['movie', 'imdb']
imdb.example = '.movie Movie Title'
if __name__ == '__main__':
print __doc__.strip()
| mit | Python |
76b837fd6d783b9a67740b76af2fd1ec80ea229a | Fix for ddate weekday | Flat/JiyuuBot,Zaexu/JiyuuBot | modules/ddate.py | modules/ddate.py | def ddate(self, msginfo):
import datetime
import calendar
dSeasons = ["Chaos", "Discord", "Confusion", "Beureacracy", "The Aftermath"]
dDays = ["Sweetmorn", "Boomtime", "Pungenday", "Prickle-Prickle", "Setting Orange"]
year = int(datetime.datetime.now().strftime("%Y"))
month = int(datetime.datetime.now().strftime("%m"))
day = int(datetime.datetime.now().strftime("%d"))
today = datetime.date(year, month, day)
boolLeapYear = calendar.isleap(year)
if boolLeapYear and month == 2 and day == 29:
self.conman.gen_send("Today is St. Tib's Day, %s YOLD" % year + 1166, msginfo)
return 0
dayofYear = today.timetuple().tm_yday - 1
if boolLeapYear and dayofYear >=60:
dayofYear -= 1
dSeason, dDay = divmod(dayofYear, 73)
dDayName = (dayofYear)%5
dDay += 1
if 10 <= dDay % 100 < 20:
dDay = str(dDay) + 'th'
else:
dDay = str(dDay) + {1 : 'st', 2 : 'nd', 3 : 'rd'}.get(dDay % 10, "th")
self.conman.gen_send("Today is %s, the %s day of %s in the Year of Our Lady of Discord %d" % (dDays[dDayName], dDay, dSeasons[dSeason], year + 1166), msginfo)
self.commandlist["ddate"] = {
"type": MAPTYPE_COMMAND,
"function": ddate,
"help": "Prints the current Discordian date"
}
| def ddate(self, msginfo):
import datetime
import calendar
dSeasons = ["Chaos", "Discord", "Confusion", "Beureacracy", "The Aftermath"]
dDays = ["Sweetmorn", "Boomtime", "Pungenday", "Prickle-Prickle", "Setting Orange"]
year = int(datetime.datetime.now().strftime("%Y"))
month = int(datetime.datetime.now().strftime("%m"))
day = int(datetime.datetime.now().strftime("%d"))
today = datetime.date(year, month, day)
boolLeapYear = calendar.isleap(year)
if boolLeapYear and month == 2 and day == 29:
self.conman.gen_send("Today is St. Tib's Day, %s YOLD" % year + 1166, msginfo)
return 0
dayofYear = today.timetuple().tm_yday - 1
if boolLeapYear and dayofYear >=60:
dayofYear -= 1
dSeason, dDay = divmod(dayofYear, 73)
dDayName = (day-1)%5
dDay += 1
if 10 <= dDay % 100 < 20:
dDay = str(dDay) + 'th'
else:
dDay = str(dDay) + {1 : 'st', 2 : 'nd', 3 : 'rd'}.get(dDay % 10, "th")
self.conman.gen_send("Today is %s, the %s day of %s in the Year of Our Lady of Discord %d" % (dDays[dDayName], dDay, dSeasons[dSeason], year + 1166), msginfo)
self.commandlist["ddate"] = {
"type": MAPTYPE_COMMAND,
"function": ddate,
"help": "Prints the current Discordian date"
}
| agpl-3.0 | Python |
151e50e41ab7931cbce108817b21a4aefdffc8b8 | Simplify plugin | ropez/pytest-describe | pytest_describe/plugin.py | pytest_describe/plugin.py | import imp
import sys
import types
import pytest
def trace_function(funcobj, *args, **kwargs):
"""Call a function, and return its locals"""
funclocals = {}
def _tracefunc(frame, event, arg):
if event == 'call':
# Activate local trace for first call only
if frame.f_back.f_locals.get('_tracefunc') == _tracefunc:
return _tracefunc
elif event == 'return':
funclocals.update(frame.f_locals)
sys.settrace(_tracefunc)
try:
funcobj(*args, **kwargs)
finally:
sys.settrace(None)
return funclocals
def make_module_from_function(funcobj):
"""Evaluates the local scope of a function, as if it was a module"""
module = imp.new_module(funcobj.__name__)
module.__dict__.update(trace_function(funcobj))
return module
class DescribeBlock(pytest.Module):
"""Module-like object representing the scope of a describe block"""
def __init__(self, funcobj, path, parent):
super(DescribeBlock, self).__init__(path, parent)
self.funcobj = funcobj
def _makeid(self):
"""Magic that makes fixtures local to each scope"""
return self.parent.nodeid + '::' + self.funcobj.__name__
def _importtestmodule(self):
"""Import a describe block as if it was a module"""
return make_module_from_function(self.funcobj)
def funcnamefilter(self, name):
"""Treat all nested functions as tests, without requiring the 'test_' prefix"""
return not name.startswith('_')
def classnamefilter(self, name):
"""Don't allow test classes inside describe"""
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__,
repr(self.funcobj.__name__))
def pytest_pycollect_makeitem(__multicall__, collector, name, obj):
if isinstance(obj, types.FunctionType):
for prefix in collector.config.getini('describe_prefixes'):
if obj.__name__.startswith(prefix):
return DescribeBlock(obj, collector.fspath, collector)
return __multicall__.execute()
def pytest_addoption(parser):
parser.addini("describe_prefixes", type="args", default=("describe",),
help="prefixes for Python describe function discovery")
| import imp
import sys
import types
import pytest
def trace_function(funcobj, *args, **kwargs):
"""Call a function, and return its locals"""
funclocals = {}
def _tracefunc(frame, event, arg):
if event == 'call':
# Activate local trace for first call only
if frame.f_back.f_locals.get('_tracefunc') == _tracefunc:
return _tracefunc
elif event == 'return':
funclocals.update(frame.f_locals)
sys.settrace(_tracefunc)
try:
funcobj(*args, **kwargs)
finally:
sys.settrace(None)
return funclocals
def make_module_from_function(funcobj):
"""Evaluates the local scope of a function, as if it was a module"""
module = imp.new_module(funcobj.__name__)
module.__dict__.update(trace_function(funcobj))
return module
class DescribeBlock(pytest.Module):
"""Module-like object representing the scope of a describe block"""
def __init__(self, funcobj, path, parent):
super(DescribeBlock, self).__init__(path, parent)
self.funcobj = funcobj
def _makeid(self):
"""Magic that makes fixtures local to each scope"""
return self.parent.nodeid + '::' + self.funcobj.__name__
def _importtestmodule(self):
"""Import a describe block as if it was a module"""
return make_module_from_function(self.funcobj)
def funcnamefilter(self, name):
"""Treat all nested functions as tests, without requiring the 'test_' prefix"""
if name.startswith('_'):
return False
for prefix in self.config.getini('describe_prefixes'):
if name.startswith(prefix):
return False
else:
return True
def classnamefilter(self, name):
"""Don't allow test classes inside describe"""
return False
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__,
repr(self.funcobj.__name__))
def pytest_pycollect_makeitem(__multicall__, collector, name, obj):
res = __multicall__.execute()
if res is not None:
return res
if isinstance(obj, types.FunctionType):
for prefix in collector.config.getini('describe_prefixes'):
if obj.__name__.startswith(prefix):
return DescribeBlock(obj, collector.fspath, collector)
def pytest_addoption(parser):
parser.addini("describe_prefixes", type="args", default=("describe",),
help="prefixes for Python describe function discovery")
| mit | Python |
b1168c523ed544f16d2de0979a0e427f414dfc54 | add a generic function to print a dictionary | sillyfellow/rosalind | python_village/rosalib.py | python_village/rosalib.py |
def is_integer(s):
try:
int(s)
return True
except ValueError:
return False
def hyopotenuse(a, b):
return (a * a) + (b * b)
def get_input_integers(count):
numbers = raw_input().split()
if len(numbers) != count or (not reduce(lambda x, y: is_integer(x) and y, numbers, True)):
raise "Input must be %d integers" % count
return [int(x) for x in numbers]
def print_dict(d):
for key, value in d.iteritems():
print key, value
|
def is_integer(s):
try:
int(s)
return True
except ValueError:
return False
def hyopotenuse(a, b):
return (a * a) + (b * b)
def get_input_integers(count):
numbers = raw_input().split()
if len(numbers) != count or (not reduce(lambda x, y: is_integer(x) and y, numbers, True)):
raise "Input must be %d integers" % count
return [int(x) for x in numbers]
| apache-2.0 | Python |
a706a22261af1cdf420eef25ef9b5d5722392226 | Test comment | dariusbakunas/rawdisk | rawdisk/util/rawstruct.py | rawdisk/util/rawstruct.py | """Helper class used as a parent class for most filesystem classes.
"""
import struct
import hexdump
import uuid
class RawStruct(object):
def __init__(self, data = None):
self._data = data
@property
def data(self):
return self._data
@property
def size(self):
return len(self._data)
@data.setter
def data(self, value):
self._data = value
def load_from_source(self, source, offset, length):
source.seek(offset)
self._data = source.read(length)
def get_chunk(self, offset, length):
return self.data[offset:offset+length]
def get_uuid(self, offset):
return uuid.UUID(bytes_le=self.get_string(offset, 16))
def get_field(self, offset, length, format):
return struct.unpack(format, self.data[offset:offset+length])[0]
def get_ubyte(self, offset):
return struct.unpack("B", self.data[offset:offset+1])[0]
def get_ushort(self, offset, big_endian = False):
if (big_endian):
return struct.unpack(">H", self.data[offset:offset+2])[0]
return struct.unpack("<H", self.data[offset:offset+2])[0]
def get_uint(self, offset, big_endian = False):
if (big_endian):
return struct.unpack(">I", self.data[offset:offset+4])[0]
return struct.unpack("<I", self.data[offset:offset+4])[0]
def get_ulong(self, offset, big_endian = False):
if (big_endian):
return struct.unpack(">L", self.data[offset:offset+4])[0]
return struct.unpack("<L", self.data[offset:offset+4])[0]
def get_ulonglong(self, offset, big_endian = False):
if (big_endian):
return struct.unpack(">Q", self.data[offset:offset+8])[0]
return struct.unpack("<Q", self.data[offset:offset+8])[0]
def get_string(self, offset, length):
return struct.unpack(str(length) + "s", self.data[
offset:offset+length
])[0]
def hexdump(self):
hexdump.hexdump(self._data) | import struct
import hexdump
import uuid
class RawStruct(object):
def __init__(self, data = None):
self._data = data
@property
def data(self):
return self._data
@property
def size(self):
return len(self._data)
@data.setter
def data(self, value):
self._data = value
def load_from_source(self, source, offset, length):
source.seek(offset)
self._data = source.read(length)
def get_chunk(self, offset, length):
return self.data[offset:offset+length]
def get_uuid(self, offset):
return uuid.UUID(bytes_le=self.get_string(offset, 16))
def get_field(self, offset, length, format):
return struct.unpack(format, self.data[offset:offset+length])[0]
def get_ubyte(self, offset):
return struct.unpack("B", self.data[offset:offset+1])[0]
def get_ushort(self, offset, big_endian = False):
if (big_endian):
return struct.unpack(">H", self.data[offset:offset+2])[0]
return struct.unpack("<H", self.data[offset:offset+2])[0]
def get_uint(self, offset, big_endian = False):
if (big_endian):
return struct.unpack(">I", self.data[offset:offset+4])[0]
return struct.unpack("<I", self.data[offset:offset+4])[0]
def get_ulong(self, offset, big_endian = False):
if (big_endian):
return struct.unpack(">L", self.data[offset:offset+4])[0]
return struct.unpack("<L", self.data[offset:offset+4])[0]
def get_ulonglong(self, offset, big_endian = False):
if (big_endian):
return struct.unpack(">Q", self.data[offset:offset+8])[0]
return struct.unpack("<Q", self.data[offset:offset+8])[0]
def get_string(self, offset, length):
return struct.unpack(str(length) + "s", self.data[
offset:offset+length
])[0]
def hexdump(self):
hexdump.hexdump(self._data) | bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.