commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
49f68d89d28650609e92db515063d3847d614eb8
|
use isotropic sigma
|
tests/mesh/test_cylMeshInnerProducts.py
|
tests/mesh/test_cylMeshInnerProducts.py
|
from SimPEG import Mesh
import numpy as np
import sympy
from sympy.abc import r, t, z
import unittest
TOL = 1e-1
class CylInnerProducts_Test(unittest.TestCase):
def test_FaceInnerProduct(self):
# Here we will make up some j vectors that vary in space
# j = [j_r, j_z] - to test face inner products
j = sympy.Matrix([
r**2 * z,
r * z**2
])
# Create an isotropic sigma vector
Sig = sympy.Matrix([
[540/sympy.pi*(r*z)**2, 0 ],
[ 0 , 540/sympy.pi*(r*z)**4],
])
# Do the inner product! - we are in cyl coordinates!
jTSj = j.T*Sig*j
ans = sympy.integrate(
sympy.integrate(
sympy.integrate(r * jTSj, (r,0,1)), # we are in cyl coordinates
(t,0,2.*sympy.pi)),
(z,0,1))[0] # The `[0]` is to make it an int.
def get_vectors(mesh):
""" Get Vectors sig, sr. jx from sympy"""
f_jr = sympy.lambdify((r,z), j[0], 'numpy')
f_jz = sympy.lambdify((r,z), j[1], 'numpy')
f_sigr = sympy.lambdify((r,z), Sig[0], 'numpy')
f_sigz = sympy.lambdify((r,z), Sig[1], 'numpy')
jr = f_jr(mesh.gridFx[:,0], mesh.gridFx[:,2])
jz = f_jz(mesh.gridFz[:,0], mesh.gridFz[:,2])
sigr = f_sigr(mesh.gridCC[:,0], mesh.gridCC[:,2])
return sigr, np.r_[jr, jz]
n = 100.
mesh = Mesh.CylMesh([n, 1, n])
sig, jv = get_vectors(mesh)
MfSig = mesh.getFaceInnerProduct(sig)
numeric_ans = jv.T.dot(MfSig.dot(jv))
print('------ Testing Face Inner Product-----------')
print(' Analytic: {analytic}, Numeric: {numeric}'.format(analytic=ans, numeric=numeric_ans))
assert(np.abs(ans-numeric_ans) < TOL)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000004
|
@@ -481,18 +481,18 @@
%5B
-54
+10
0/sympy.
@@ -542,10 +542,10 @@
,
-54
+10
0/sy
@@ -562,9 +562,9 @@
z)**
-4
+2
%5D,%0A
@@ -1932,16 +1932,19 @@
%3C TOL)%0A%0A
+%0A%0A%0A
if __nam
|
5912b58233a795d5c6e6a377441c609f15634cb0
|
Add 'warning' as an alias to 'warn'
|
oslo/messaging/notify/notifier.py
|
oslo/messaging/notify/notifier.py
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
import uuid
from oslo.config import cfg
import six
from stevedore import named
from oslo.messaging.openstack.common import timeutils
from oslo.messaging import serializer as msg_serializer
_notifier_opts = [
cfg.MultiStrOpt('notification_driver',
default=[],
help='Driver or drivers to handle sending notifications'),
cfg.ListOpt('notification_topics',
default=['notifications', ],
deprecated_name='topics',
deprecated_group='rpc_notifier2',
help='AMQP topic used for OpenStack notifications'),
]
_LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class _Driver(object):
def __init__(self, conf, topics, transport):
self.conf = conf
self.topics = topics
self.transport = transport
@abc.abstractmethod
def notify(self, ctxt, msg, priority):
pass
class Notifier(object):
"""Send notification messages.
The Notifier class is used for sending notification messages over a
messaging transport or other means.
Notification messages follow the following format::
{'message_id': str(uuid.uuid4()),
'publisher_id': 'compute.host1',
'timestamp': timeutils.utcnow(),
'priority': 'WARN',
'event_type': 'compute.create_instance',
'payload': {'instance_id': 12, ... }}
A Notifier object can be instantiated with a transport object and a
publisher ID:
notifier = notifier.Notifier(get_transport(CONF), 'compute')
and notifications are sent via drivers chosen with the notification_driver
config option and on the topics consen with the notification_topics config
option.
Alternatively, a Notifier object can be instantiated with a specific
driver or topic::
notifier = notifier.Notifier(RPC_TRANSPORT,
'compute.host',
driver='messaging',
topic='notifications')
Notifier objects are relatively expensive to instantiate (mostly the cost
of loading notification drivers), so it is possible to specialize a given
Notifier object with a different publisher id using the prepare() method::
notifier = notifier.prepare(publisher_id='compute')
notifier.info(ctxt, event_type, payload)
"""
def __init__(self, transport, publisher_id=None,
driver=None, topic=None,
serializer=None):
"""Construct a Notifier object.
:param transport: the transport to use for sending messages
:type transport: oslo.messaging.Transport
:param publisher_id: field in notifications sent, e.g. 'compute.host1'
:type publisher_id: str
:param driver: a driver to lookup from oslo.messaging.notify.drivers
:type driver: str
:param topic: the topic which to send messages on
:type topic: str
:param serializer: an optional entity serializer
:type serializer: Serializer
"""
self.conf = transport.conf
self.conf.register_opts(_notifier_opts)
self.transport = transport
self.publisher_id = publisher_id
self._driver_names = ([driver] if driver is not None
else self.conf.notification_driver)
self._topics = ([topic] if topic is not None
else self.conf.notification_topics)
self._serializer = serializer or msg_serializer.NoOpSerializer()
self._driver_mgr = named.NamedExtensionManager(
'oslo.messaging.notify.drivers',
names=self._driver_names,
invoke_on_load=True,
invoke_args=[self.conf],
invoke_kwds={
'topics': self._topics,
'transport': self.transport,
},
)
_marker = object()
def prepare(self, publisher_id=_marker):
"""Return a specialized Notifier instance.
Returns a new Notifier instance with the supplied publisher_id. Allows
sending notifications from multiple publisher_ids without the overhead
of notification driver loading.
:param publisher_id: field in notifications sent, e.g. 'compute.host1'
:type publisher_id: str
"""
return _SubNotifier._prepare(self, publisher_id)
def _notify(self, ctxt, event_type, payload, priority, publisher_id=None):
payload = self._serializer.serialize_entity(ctxt, payload)
ctxt = self._serializer.serialize_context(ctxt)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id or self.publisher_id,
event_type=event_type,
priority=priority,
payload=payload,
timestamp=str(timeutils.utcnow()))
def do_notify(ext):
try:
ext.obj.notify(ctxt, msg, priority)
except Exception as e:
_LOG.exception("Problem '%(e)s' attempting to send to "
"notification system. Payload=%(payload)s",
dict(e=e, payload=payload))
if self._driver_mgr.extensions:
self._driver_mgr.map(do_notify)
def debug(self, ctxt, event_type, payload):
"""Send a notification at debug level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, e.g. 'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
"""
self._notify(ctxt, event_type, payload, 'DEBUG')
def info(self, ctxt, event_type, payload):
"""Send a notification at info level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, e.g. 'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
"""
self._notify(ctxt, event_type, payload, 'INFO')
def warn(self, ctxt, event_type, payload):
"""Send a notification at warning level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, e.g. 'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
"""
self._notify(ctxt, event_type, payload, 'WARN')
def error(self, ctxt, event_type, payload):
"""Send a notification at error level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, e.g. 'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
"""
self._notify(ctxt, event_type, payload, 'ERROR')
def critical(self, ctxt, event_type, payload):
"""Send a notification at critical level.
:param ctxt: a request context dict
:type ctxt: dict
:param event_type: describes the event, e.g. 'compute.create_instance'
:type event_type: str
:param payload: the notification payload
:type payload: dict
"""
self._notify(ctxt, event_type, payload, 'CRITICAL')
class _SubNotifier(Notifier):
_marker = Notifier._marker
def __init__(self, base, publisher_id):
self._base = base
self.conf = base.conf
self.transport = base.transport
self.publisher_id = publisher_id
self._serializer = self._base._serializer
self._driver_mgr = self._base._driver_mgr
def _notify(self, ctxt, event_type, payload, priority):
super(_SubNotifier, self)._notify(ctxt, event_type, payload, priority)
@classmethod
def _prepare(cls, base, publisher_id=_marker):
if publisher_id is cls._marker:
publisher_id = base.publisher_id
return cls(base, publisher_id)
|
Python
| 0.998216
|
@@ -7289,24 +7289,44 @@
d, 'WARN')%0A%0A
+ warning = warn%0A%0A
def erro
|
490230242d51d23650406085a7af92dfbb14c16d
|
Use shop ID from order
|
byceps/blueprints/shop/orders/views.py
|
byceps/blueprints/shop/orders/views.py
|
"""
byceps.blueprints.shop.orders.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ....services.party import service as party_service
from ....services.shop.order import service as order_service
from ....services.shop.shop import service as shop_service
from ....services.snippet.transfer.models import Scope
from ....util.framework.blueprint import create_blueprint
from ....util.framework.templating import templated
from ...authentication.decorators import login_required
from ...snippet.templating import render_snippet_as_partial
blueprint = create_blueprint('shop_orders', __name__)
@blueprint.route('')
@login_required
@templated
def index():
"""List orders placed by the current user for the current party."""
current_user = g.current_user
party = party_service.find_party(g.party_id)
shop = shop_service.find_shop_for_party(party.id)
if shop:
orders = order_service.get_orders_placed_by_user_for_shop(
current_user.id, shop.id)
else:
orders = []
return {
'party_title': party.title,
'orders': orders,
}
@blueprint.route('/<uuid:order_id>')
@login_required
@templated
def view(order_id):
"""Show a single order (if it belongs to the current user and party)."""
current_user = g.current_user
order = order_service.find_order_with_details(order_id)
if order is None:
abort(404)
if order.placed_by_id != current_user.id:
# Order was not placed by the current user.
abort(404)
shop = shop_service.get_shop(order.shop_id)
if shop.party_id != g.party_id:
# Order does not belong to the current party.
abort(404)
template_context = {
'order': order,
}
if order.is_open:
template_context['payment_instructions'] \
= _get_payment_instructions(shop.id, order.order_number)
return template_context
def _get_payment_instructions(shop_id, order_number):
scope = Scope('shop', str(shop_id))
context = {'order_number': order_number}
return render_snippet_as_partial('payment_instructions', scope=scope,
context=context)
|
Python
| 0
|
@@ -1963,33 +1963,11 @@
ons(
-shop.id, order.order_numb
+ord
er)%0A
@@ -2031,27 +2031,11 @@
ons(
-shop_id, order_numb
+ord
er):
@@ -2065,16 +2065,22 @@
p', str(
+order.
shop_id)
@@ -2108,24 +2108,30 @@
er_number':
+order.
order_number
|
652711e9a4884a31be74df6ae791e47dcd401871
|
remove deprecated test suite declarations
|
account_partner_required/tests/__init__.py
|
account_partner_required/tests/__init__.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account partner required module for OpenERP
# Copyright (C) 2014 Acsone (http://acsone.eu).
# @author Stéphane Bidoul <stephane.bidoul@acsone.eu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_account_partner_required
fast_suite = [
]
checks = [
test_account_partner_required,
]
|
Python
| 0
|
@@ -1074,71 +1074,4 @@
red%0A
-%0Afast_suite = %5B%0A%5D%0A%0Achecks = %5B%0A test_account_partner_required,%0A%5D%0A
|
76453755c53aeabb082847b4c9635528490a8830
|
Update states.py
|
weiss/flows/states.py
|
weiss/flows/states.py
|
"""
This file defines the concrete control flow logic
Author: Ming Fang
"""
from weiss.flows.abstractState import State, AbstractState
from weiss.utils.switch import switch
from weiss.dialogue.actions import Action
"""
Definitions of the system states
"""
"""
Definition of the control flow
1. Next Random Comment
2. Next Opposite Comment
3. Next Positive Comment
4. Next Negative Comment
5. Next Random Entity (within current type)
6. Sentiment Stats
7. Entity Selection (base on key and within current type)
8. Type Selection
9. Greeting
10. Unknown Action
"""
"""
Systen initialization state
the beginning point of the dialog
"""
class SystemInitiative(AbstractState):
_npa = set([Action.EntitySelection,
Action.TypeSelection])
def __init__(self, uid):
AbstractState.__init__(self, uid)
@property
def sid(self):
return State.SystemInitiative
@property
def nextPossibleActions(self):
return self._npa
def transit(self, aid):
for case in switch(aid):
if case(Action.EntitySelection):
return Stete.EntitySelected
if case(Action.TypeSelection):
return State.TypeSelected
if case():
raise KeyError("Invaild action")
"""
Type selected state
The followings should be determined:
curr_tid
"""
class TypeSelected(AbstractState):
_npa = set([Action.NextRandomEntity,
Action.EntitySelection,
Action.TypeSelection])
def __init__(self, uid):
AbstractState.__init__(self, uid)
@property
def sid(self):
return State.TypeSelected
@property
def nextPossibleActions(self):
return self._npa
def transit(self, aid):
for case in switch(aid):
if case(Action.NextRandomEntity):
return State.EntitySelected
if case(Action.EntitySelection):
return State.EntitySelected
if case(Action.TypeSelection):
return State.TypeSelected
if case():
raise KeyError("Invaild action id")
"""
Entity selceted state
The followings should be determined:
curr_tid
curr_eid
"""
class EntitySelected(AbstractState):
_npa = set([Action.NextRandomComment,
Action.NextPositiveComment,
Action.NextNegativeComment,
Action.NextRandomEntity,
Action.SentimentStats,
Action.EntitySelection,
Action.TypeSelection])
def __init__(self, uid):
AbstractState.__init__(self, uid)
@property
def sid(self):
return State.EntitySelected
@property
def nextPossibleActions(self):
return self._npa
def transit(self, aid):
for case in switch(aid):
if case(Action.NextRandomComment):
return State.CommentSelected
if case(Action.NextPositiveComment):
return State.CommentSelected
if case(Action.NextNegativeComment):
return State.CommentSelected
if case(Action.NextRandomEntity):
return State.EntitySelected
if case(Action.SentimentStats):
return State.EntitySelecetd
if case(Action.EntitySelection):
return State.EntitySelected
if case(Action.TypeSelection):
return State.TypeSelected
if case():
raise KeyError("Invaild action id")
"""
Comment selecetd state
The followings should be determined:
curr_tid
curr_eid
curr_cid
"""
class CommentSelected(AbstractState):
_npa = set([Action.NextRandomComment,
Action.NextOppositeComment,
Action.NextPositiveComment,
Action.NextNegativeComment,
Action.NextRandomEntity,
Action.SentimentStats,
Action.EntitySelection,
Action.TypeSelection])
def __init__(self, uid):
AbstractState.__init__(self, uid)
@property
def sid(self):
return State.CommentSelected
@property
def nextPossibleActions(self):
return self._npa
def transit(self, aid):
for case in switch(aid):
if case(Action.NextRandomComment):
return State.CommentSelected
if case(Action.NextOppositeComment):
return State.CommentSelected
if case(Action.NextPositiveComment):
return State.CommentSelected
if case(Action.NextNegativeComment):
return State.CommentSelected
if case(Action.NextRandomEntity):
return State.EntitySelected
if case(Action.SentimentStats):
return State.CommentSelected
if case(Action.EntitySelection):
return State.EntitySelected
if case(Action.TypeSelection):
return State.TypeSelected
if case():
raise KeyError("Invaild action")
"""
Range Selected state
"""
class RangeSelected(AbstractState):
def __init__(self, uid):
AbstractState.__init__(self, uid)
@property
def sid(self):
return State.RangeSelected
@property
def nextPossibleActions(self):
raise NotImplementedError()
def transit(self, aid):
raise NotImplementedError()
|
Python
| 0.998823
|
@@ -1102,17 +1102,17 @@
eturn St
-e
+a
te.Entit
|
7706c282758a1fb6f4894993fad007a99879a554
|
Create score.py
|
score.py
|
score.py
|
"""
@authors: Nishanth and Nikhith !!
"""
from pycricbuzz import Cricbuzz
import sys
""" Writing a CLI for Live score """
try:
cric_obj = Cricbuzz() # cric_obj contains object instance of Cricbuzz Class
matches = cric_obj.matches()
except:
print "Connection dengindi bey!"
sys.exit(0)
# matches func is returning List of dictionaries
""" Key items in match dict : 1) status -- ex) Starts on Jun 15 at 09:30 GMT
2) mnum -- ex) 2nd Semi-Final (A2 VS B1)
3) mchdesc-- ex) BAN vs IND
4) srs -- ex) ICC Champions Trophy, 2017
5) mchstate- ex) preview / abandon / Result / complete
6) type -- ex) ODI
7) id -- ex) 4 / 6 (anything random given)
"""
"""CLI must contain commands for
-- current matches
-- selecting match by match id
-- getCommentary
"""
def upcomingmatches():
"""Prints upcoming matches list
"""
count = 1
for match in matches:
if match['mchstate'] == "preview":
print str(count)+". "+str(match['mchdesc'])+ " - "+ str(match['srs'])+"- - "+str(match['status'])
count = count + 1
def currentlive():
"""Prints Current LIVE MATCHES"""
count = 1
for match in matches:
if match['mchstate'] == "inprogress":
print str(match['mchdesc'])+" -> "+str(match['id'])
count = count + 1
if match['mchstate'] == "delay":
print str(match['mchdesc'])+" -> match has been delayed due to rain..! Enjoy the drizzle..!!"
if count == 1:
print "\nNO LIVE MATCHES RIGHT NOW!\n"
print "UPCOMING MATCHES TODAY!"
upcomingmatches()
else:
id = input("Enter match id : ")
gotolive(id)
def calculate_runrate(runs, overs):
balls = str(overs)
arr = balls.split('.')
if len(arr) == 2:
rr = float(int(arr[0])*6)+int(arr[1])
else:
rr = float(int(arr[0])*6)
return (float(runs)/rr)*6
def gotolive(matchid):
batobj = cric_obj.livescore(matchid)['batting']
bowlobj = cric_obj.livescore(matchid)['bowling']
print " "+str(batobj['team'])+" vs "+str(bowlobj['team'])+"\n"
if (bowlobj['score'] == []):
print "1st INNINGS: "+str(batobj['team'])+" => "+str(batobj['score'][0]['runs'])+"/"+str(batobj['score'][0]['wickets'])+" ("+str(batobj['score'][0]['overs'])+" Overs)"
print "Batting:"
try:
print " " + str(batobj['batsman'][0]['name']) + " : " + str(batobj['batsman'][0]['runs']) + " (" + str(batobj['batsman'][0]['balls']) + ")"
print " " + str(batobj['batsman'][1]['name']) + " : " + str(batobj['batsman'][1]['runs']) + " (" + str(batobj['batsman'][1]['balls']) + ")"
except:
print "Hurray.. Its out..!! "
print "Bowling:"
print " " + str(bowlobj['bowler'][0]['name']) + " : " + str(bowlobj['bowler'][0]['runs']) + " /" + str(bowlobj['bowler'][0]['wickets']) + " (" + str(bowlobj['bowler'][0]['overs']) + ")"
print " " + str(bowlobj['bowler'][1]['name']) + " : " + str(bowlobj['bowler'][1]['runs']) + " /" + str(bowlobj['bowler'][1]['wickets']) + " (" + str(bowlobj['bowler'][1]['overs']) + ")"
print "Runrate:"
print ' {:1.2f}'.format(calculate_runrate(str(batobj['score'][0]['runs']),str(batobj['score'][0]['overs'])))
else:
print "1st INNINGS: "+str(bowlobj['team'])+" => "+str(bowlobj['score'][0]['runs'])+"/"+str(bowlobj['score'][0]['wickets'])+" ("+str(bowlobj['score'][0]['overs'])+" Overs)"
print "2nd INNINGS: "+str(batobj['team'])+" => "+str(batobj['score'][0]['runs'])+"/"+str(batobj['score'][0]['wickets'])+" ("+str(batobj['score'][0]['overs'])+" Overs)"
print "Batting:"
try:
print " "+str(batobj['batsman'][0]['name'])+" : "+str(batobj['batsman'][0]['runs'])+" ("+str(batobj['batsman'][0]['balls'])+")"
print " " + str(batobj['batsman'][1]['name']) + " : " + str(batobj['batsman'][1]['runs']) + " (" + str(batobj['batsman'][1]['balls']) + ")"
except:
print "Hurray.. Its out..!! "
print "Bowling:"
print " " + str(bowlobj['bowler'][0]['name']) + " : " + str(bowlobj['bowler'][0]['runs'])+" /"+str(bowlobj['bowler'][0]['wickets']) + " (" + str(bowlobj['bowler'][0]['overs']) + ")"
print " " + str(bowlobj['bowler'][1]['name']) + " : " + str(bowlobj['bowler'][1]['runs']) + " /" + str(bowlobj['bowler'][1]['wickets']) + " (" + str(bowlobj['bowler'][1]['overs']) + ")"
print "Summary:"
print " " + str(cric_obj.livescore(4)['matchinfo']['status'])
while True:
currentlive()
print '\n' * 3
|
Python
| 0.000008
|
@@ -1526,19 +1526,31 @@
sc'%5D)+%22
--%3E
+ match id :
%22+str(ma
|
c4dd08d0d98ce28dd0f69270bae398b1b0ea293f
|
Fix mkdocs attr
|
momo/plugins/mkdocs.py
|
momo/plugins/mkdocs.py
|
import os
import shutil
import yaml
from momo.utils import run_cmd, mkdir_p, utf8_encode, txt_type
from momo.plugins.base import Plugin
class Mkdocs(Plugin):
mkdocs_configs = {
'theme': 'readthedocs',
}
momo_configs = {
'page_level': 1,
}
def setup(self):
configs = self.settings.plugins.get('mkdocs', {})
self.mkdocs_configs['site_name'] = self.settings.bucket.name
for k in configs:
if not k.startswith('momo_'):
self.mkdocs_configs[k] = configs[k]
for k in configs:
if k.startswith('momo_'):
self.momo_configs[k] = configs[k]
self.mkdocs_dir = os.path.join(self.settings.settings_dir, 'mkdocs')
self.docs_dir = os.path.join(self.mkdocs_dir, 'docs')
self.site_dir = os.path.join(self.mkdocs_dir, 'site')
shutil.rmtree(self.docs_dir)
mkdir_p(self.docs_dir)
mkdir_p(self.site_dir)
def _get_pages(self, root, level=0):
if level == self.momo_configs['page_level']:
filename = self._make_page(root)
return filename
else:
pages = [
{'Index': self._make_index_page(root, level + 1)}
]
pages += [
{elem.name: self._get_pages(elem, level + 1)}
for elem in root.node_svals
]
return pages
def _make_page(self, elem):
res = '%s.md' % os.path.join(*elem.path)
filename = os.path.join(self.docs_dir, res)
dirname = os.path.dirname(filename)
if dirname:
mkdir_p(dirname)
buf = []
with open(filename, 'w') as f:
buf.append(self._make_title(elem))
buf.append(self._make_attrs(elem))
buf.append(self._make_nodes(elem))
f.write(utf8_encode('\n'.join(buf)))
return res
def _make_index_page(self, elem, level):
base = os.path.join(*elem.path) if elem.path else ''
res = os.path.join(base, 'index.md')
filename = os.path.join(self.docs_dir, res)
dirname = os.path.dirname(filename)
if dirname:
mkdir_p(dirname)
buf = []
with open(filename, 'w') as f:
buf.append(self._make_title(elem))
buf.append(self._make_attrs(elem))
buf.append(self._make_nodes(elem, index=True, level=level))
f.write(utf8_encode('\n'.join(buf)))
return res
def _make_title(self, elem):
return '# %s' % elem.name
def _make_attrs(self, elem):
buf = []
buf.append('### Attributes')
for attr in elem.attr_svals:
buf.append('- %s: %s' % (attr.name,
self._link_attr_content(attr.content)))
return '\n'.join(buf)
def _link_attr_content(self, content):
if isinstance(content, txt_type) and content.startswith('http'):
content = '[%s](%s)' % (content, content)
return content
def _make_nodes(self, elem, index=False, level=None):
buf = []
if not index:
for node in elem.node_svals:
buf.append('## %s' % (node.name))
buf.append(self._make_attrs(node))
else:
buf.append('### Nodes')
for node in elem.node_svals:
if level == self.momo_configs['page_level']:
buf.append('- [%s](%s.md)' % (node.name, node.name))
else:
buf.append('- [%s](%s/index.md)' % (node.name, node.name))
return '\n'.join(buf)
def _make_mkdocs_yml(self):
mkdocs_yml = os.path.join(self.mkdocs_dir, 'mkdocs.yml')
with open(mkdocs_yml, 'w') as f:
yaml.dump(self.mkdocs_configs, f, default_flow_style=False,
allow_unicode=True)
def _make_home_page(self):
res = 'index.md'
filename = os.path.join(self.docs_dir, res)
buf = []
buf.append('# Home')
buf.append('Welcome to momo!')
with open(filename, 'w') as f:
f.write('\n'.join(buf))
return res
def _serve(self, args=None):
os.chdir(self.mkdocs_dir)
cmd = 'mkdocs'
cmd_args = ['serve']
if args is not None:
cmd_args.extend(args)
run_cmd(cmd=cmd, cmd_args=cmd_args)
def run(self, extra_args=None):
pages = self._get_pages(self.settings.bucket.root)
self.mkdocs_configs['pages'] = pages
self._make_mkdocs_yml()
self._serve(extra_args)
plugin = Mkdocs()
|
Python
| 0.000007
|
@@ -29,16 +29,28 @@
rt yaml%0A
+import glob%0A
from mom
@@ -960,16 +960,228 @@
ite_dir)
+%0A css_files = glob.glob(os.path.join(self.mkdocs_dir, '*.css'))%0A for css in css_files:%0A filename = os.path.basename(css)%0A os.symlink(css, os.path.join(self.docs_dir, filename))
%0A%0A de
@@ -2889,32 +2889,61 @@
lem.attr_svals:%0A
+ buf.append('%5Cn')%0A
buf.
@@ -2955,17 +2955,16 @@
d('- %25s:
-
%25s' %25 (a
@@ -2976,70 +2976,413 @@
ame,
-%0A self._link_attr_content(
+ self._make_attr_content(attr)))%0A return '%5Cn'.join(buf)%0A%0A def _make_attr_content(self, attr):%0A buf = %5B%5D%0A if attr.has_items:%0A buf.append('%5Cn')%0A for i, item in enumerate(attr.content, start=1):%0A buf.append(' - %25s%5B%25d%5D: %25s' %25 (attr.name, i,%0A self._link_attr_content(item)))%0A else:%0A buf.append(' %25s' %25
attr
@@ -3390,18 +3390,16 @@
content)
-))
%0A
|
a0fcd2a8986573b72871b7c04629c9e947997658
|
Fix docstring
|
tests/mock_vws/test_database_summary.py
|
tests/mock_vws/test_database_summary.py
|
"""
Tests for the mock of the database summary endpoint.
"""
from datetime import datetime, timedelta
import pytest
import requests
from freezegun import freeze_time
from requests import codes
from requests_mock import GET
from tests.conftest import VuforiaServerCredentials
from tests.mock_vws.utils import is_valid_transaction_id
from vws._request_utils import authorization_header, rfc_1123_date
@pytest.mark.usefixtures('verify_mock_vuforia')
class TestSummary:
"""
Tests for the mock of the database summary endpoint at `/summary`.
"""
def test_success(self,
vuforia_server_credentials: VuforiaServerCredentials,
) -> None:
"""It is possible to get a success response from a VWS endpoint which
requires authorization."""
date = rfc_1123_date()
content_type = 'application/json'
signature_string = authorization_header(
access_key=vuforia_server_credentials.access_key,
secret_key=vuforia_server_credentials.secret_key,
method=GET,
content=b'',
content_type=content_type,
date=date,
request_path='/summary',
)
headers = {
"Authorization": signature_string,
"Date": date,
"Content-Type": content_type,
}
response = requests.request(
method=GET,
url='https://vws.vuforia.com/summary',
headers=headers,
data=b'',
)
assert response.status_code == codes.OK
@pytest.mark.usefixtures('verify_mock_vuforia')
class TestDateHeader:
"""
Tests for what happens when the date header isn't as expected.
"""
def test_no_date_header(self,
vuforia_server_credentials:
VuforiaServerCredentials,
) -> None:
"""
A `BAD_REQUEST` response is returned when no date header is given.
"""
date = rfc_1123_date()
content_type = 'application/json'
signature_string = authorization_header(
access_key=vuforia_server_credentials.access_key,
secret_key=vuforia_server_credentials.secret_key,
method=GET,
content=b'',
content_type=content_type,
date=date,
request_path='/summary',
)
headers = {
"Authorization": signature_string,
"Content-Type": content_type,
}
response = requests.request(
method=GET,
url='https://vws.vuforia.com/summary',
headers=headers,
data=b'',
)
assert response.status_code == codes.BAD_REQUEST
assert response.json().keys() == {'transaction_id', 'result_code'}
assert is_valid_transaction_id(response.json()['transaction_id'])
assert response.json()['result_code'] == 'Fail'
@pytest.mark.parametrize(
['time_difference_from_now', 'expected_status', 'expected_result'],
[
(timedelta(minutes=4, seconds=50), codes.OK, 'Success'),
(-timedelta(minutes=4, seconds=50), codes.OK, 'Success'),
(timedelta(minutes=5, seconds=10), codes.FORBIDDEN,
'RequestTimeTooSkewed'),
(-timedelta(minutes=5, seconds=10), codes.FORBIDDEN,
'RequestTimeTooSkewed'),
],
ids=([
'Within Range After',
'Within Range Before',
'Out of Range After',
'Out of Range Before',
])
)
def test_date_within_range(self,
vuforia_server_credentials:
VuforiaServerCredentials,
time_difference_from_now,
expected_status,
expected_result,
) -> None:
"""
If a date header is within five minutes before or after the request
is set, no error is returned.
Because there is a small delay in sending requests and Vuforia isn't
consistent, some leeway is given.
"""
with freeze_time(datetime.now() + time_difference_from_now):
date = rfc_1123_date()
content_type = 'application/json'
signature_string = authorization_header(
access_key=vuforia_server_credentials.access_key,
secret_key=vuforia_server_credentials.secret_key,
method=GET,
content=b'',
content_type=content_type,
date=date,
request_path='/summary',
)
headers = {
"Authorization": signature_string,
"Date": date,
"Content-Type": content_type,
}
response = requests.request(
method=GET,
url='https://vws.vuforia.com/summary',
headers=headers,
data=b'',
)
assert response.status_code == expected_status
assert is_valid_transaction_id(response.json()['transaction_id'])
assert response.json()['result_code'] == expected_result
|
Python
| 0.00003
|
@@ -4064,16 +4064,17 @@
is se
+n
t, no er
@@ -4091,16 +4091,151 @@
urned.%0A%0A
+ If the date header is more than five minutes before or after the%0A request is sent, a %60FORBIDDEN%60 response is returned.%0A%0A
|
55983401814bc0e7158d213885ebdfdbc7e02e9b
|
Add dependency on the requests module and refactor
|
DeployUtil/authentication.py
|
DeployUtil/authentication.py
|
import urllib.request
import http.cookiejar
import DeployUtil.toolsession as session
#TODO: give an indicator of success
#TODO: handle errors a bit better.
def do_pair(ip, pin, **_args):
# IF YOU DON'T DO THIS OVER HTTPS YOU WILL GET 308s to goto HTTPS
scheme = 'https://'
port = ''
api = '/api/authorize/pair?pin={pin}&persistent=0'
verb = 'POST'
request_url = scheme + ip + port + api.format_map({'pin':pin})
https_handler = session.create_toolsess_httpsHandler()
request = urllib.request.Request(url=request_url, method=verb)
cookies = urllib.request.HTTPCookieProcessor(http.cookiejar.MozillaCookieJar("deployUtil.cookies"))
opener = urllib.request.build_opener(https_handler, cookies)
resp = opener.open(request)
cookies.cookiejar.save(ignore_discard=True)
|
Python
| 0.000001
|
@@ -4,30 +4,24 @@
ort
-urllib.
request
+s
%0Aimport
http
@@ -20,61 +20,10 @@
ort
-http.cookiejar%0Aimport DeployUtil.toolsession as sessi
+js
on%0A%0A
@@ -192,16 +192,205 @@
o HTTPS%0A
+%09# But we cannot verify our HTTPS cert yet because we cannot get it off%0A%09# of all devices.%0A%09# If the tooling gets smarter about what its talking to, then we can%0A%09# make an educated choice.%0A
%09scheme
@@ -469,24 +469,8 @@
=0'%0A
-%09verb = 'POST'%0A%0A
%09req
@@ -535,361 +535,283 @@
)%0A%0A%09
-https_handler = session.create_toolsess_httpsHandler()%0A%09request = urllib.request.Request(url=request_url, method=verb)%0A%0A%09cookies = urllib.request.HTTPCookieProcessor(http.cookiejar.MozillaCookieJar(%22deployUtil.cookies%22))%0A%09opener = urllib.request.build_opener(https_handler, cookies)%0A%09resp = opener.open(request)%0A%09cookies.cookiejar.save(ignore_discard=Tru
+with requests.Session() as session:%0A%09%09response = session.post(request_url, verify=False)%0A%09%09cookie_filename = 'deployUtil.cookies'%0A%09%09cookies = requests.utils.dict_from_cookiejar(response.cookies)%0A%09%09with open(cookie_filename,'w') as cookie_file:%0A%09%09%09json.dump(cookies, cookie_fil
e)%0A
|
40957fe0b273f92a28e0b5f27cc4a46ba5e1f2b8
|
Add coverage pragma
|
sktracker/trajectories/__init__.py
|
sktracker/trajectories/__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import logging
log = logging.getLogger(__name__)
from .trajectories import Trajectories
try:
from . import draw
__all__ = ['Trajectories', 'draw']
except ImportError:
log.warning('''Matplotlib can't be imported,'''
'''drawing module won't be available ''')
__all__ = ['Trajectories']
|
Python
| 0.000003
|
@@ -262,16 +262,36 @@
es%0A%0Atry:
+ # pragma: no cover
%0A fro
@@ -364,16 +364,36 @@
rtError:
+ # pragma: no cover
%0A log
|
0610cebeccbd8431906558506ba22654e1104ea9
|
fix test
|
corehq/ex-submodules/couchexport/tests/test_writers.py
|
corehq/ex-submodules/couchexport/tests/test_writers.py
|
# coding: utf-8
from codecs import BOM_UTF8
import StringIO
from couchexport.writers import ZippedExportWriter, CsvFileWriter, PythonDictWriter
from django.test import SimpleTestCase
from mock import patch, Mock
class ZippedExportWriterTests(SimpleTestCase):
def setUp(self):
self.zip_file_patch = patch('zipfile.ZipFile')
self.MockZipFile = self.zip_file_patch.start()
self.path_mock = Mock()
self.path_mock.get_path.return_value = 'tmp'
self.writer = ZippedExportWriter()
self.writer.tables = [self.path_mock]
self.writer.file = Mock()
def tearDown(self):
self.zip_file_patch.stop()
del self.writer
def test_zipped_export_writer_unicode(self):
mock_zip_file = self.MockZipFile.return_value
self.writer.table_names = {0: u'ひらがな'}
self.writer._write_final_result()
mock_zip_file.write.assert_called_with('tmp', 'ひらがな.csv')
def test_zipped_export_writer_utf8(self):
mock_zip_file = self.MockZipFile.return_value
self.writer.table_names = {0: '\xe3\x81\xb2\xe3\x82\x89\xe3\x81\x8c\xe3\x81\xaa'}
self.writer._write_final_result()
mock_zip_file.write.assert_called_with('tmp', 'ひらがな.csv')
class CsvFileWriterTests(SimpleTestCase):
def test_csv_file_writer_bom(self):
"""
CsvFileWriter should prepend a byte-order mark to the start of the CSV file for Excel
"""
writer = CsvFileWriter()
headers = ['ham', 'spam', 'eggs']
writer.open('Spam')
writer.write_row(headers)
writer.finish()
file_start = writer.get_file().read(6)
self.assertEqual(file_start, BOM_UTF8 + 'ham')
class HeaderNameTest(SimpleTestCase):
def test_names_matching_case(self):
writer = PythonDictWriter()
stringio = StringIO.StringIO()
table_index_1 = "case_Sensitive"
table_index_2 = "case_sensitive"
table_headers = [[]]
writer.open(
[
(table_index_1, table_headers),
(table_index_2, table_headers)
],
stringio
)
writer.close()
preview = writer.get_preview()
first_sheet_name = preview[0]['table_name']
second_sheet_name = preview[1]['table_name']
self.assertNotEqual(
first_sheet_name.lower(),
second_sheet_name.lower(),
"Sheet names must not be equal. Comparison is NOT case sensitive. Names were '{}' and '{}'".format(
first_sheet_name, second_sheet_name
)
)
def test_max_header_length(self):
writer = PythonDictWriter()
writer.max_table_name_size = 10
stringio = StringIO.StringIO()
table_index = "my_table_index"
table_headers = [("first header", "second header")]
writer.open(
[(table_index, table_headers)],
stringio
)
writer.close()
preview = writer.get_preview()
self.assertGreater(len(table_index), writer.max_table_name_size)
self.assertLessEqual(len(preview[0]['table_name']), writer.max_table_name_size)
|
Python
| 0.000002
|
@@ -510,32 +510,78 @@
dExportWriter()%0A
+ self.writer.archive_basepath = 'path'%0A
self.wri
|
92ebaf9e1507acf1fc10f4448fc00db43508f23b
|
Allow alternate string key lookup for enums
|
djstripe/enums.py
|
djstripe/enums.py
|
from enum import Enum as _Enum
from django.utils.decorators import classproperty
class Enum(_Enum):
@classproperty
def choices(cls):
return tuple((k, v.value) for k, v in cls.__members__.items())
class CardTokenizationMethod(Enum):
apple_pay = "Apple Pay"
android_pay = "Android Pay"
|
Python
| 0
|
@@ -158,17 +158,34 @@
tuple((
-k
+cls.keys.get(k, k)
, v.valu
@@ -225,16 +225,329 @@
ems())%0A%0A
+ @classproperty%0A def keys(cls):%0A # Returns a mapping of key overrides.%0A # This allows using syntactically-incorrect values as keys,%0A # such as keywords (%22pass%22) or spaces (%22Diners Club%22).%0A # This cannot be an attribute, otherwise it would show up as a choice.%0A return %7B%7D%0A%0A
%0Aclass C
|
eadfd0c784d077db4b48facb5e7161c76ede598a
|
remove commented out code
|
graphitepager/worker.py
|
graphitepager/worker.py
|
import datetime
import time
import redis
import requests
import requests.exceptions
from graphitepager.config import get_config
from graphitepager.description import get_descriptions
from graphitepager.description import missing_target_descriptions
from graphitepager.graphite_data_record import GraphiteDataRecord
from graphitepager.graphite_target import get_records
from graphitepager.level import Level
from graphitepager.redis_storage import RedisStorage
from graphitepager.utils import parse_args
from notifiers.notifier_proxy import NotifierProxy
from notifiers.hipchat_notifier import HipChatNotifier
from notifiers.pagerduty_notifier import PagerdutyNotifier
from notifiers.pushbullet_notifier import PushBulletNotifier
from notifiers.slack_notifier import SlackNotifier
from notifiers.stdout_notifier import StdoutNotifier
def update_notifiers(notifier_proxy, alert, record, graphite_url):
alert_key = '{} {}'.format(alert.get('name'), record.target)
alert_level, value = alert.check_record(record)
description, html_description = get_descriptions(
graphite_url,
alert,
record,
alert_level,
value
)
notifier_proxy.notify(
alert,
alert_key,
alert_level,
description,
html_description
)
def create_notifier_proxy(config):
redis_url = config.get('REDISTOGO_URL', config.get('REDIS_URL', None))
STORAGE = RedisStorage(redis, redis_url)
klasses = [
HipChatNotifier,
PagerdutyNotifier,
PushBulletNotifier,
StdoutNotifier,
SlackNotifier,
]
notifier_proxy = NotifierProxy()
for klass in klasses:
notifier = klass(STORAGE, config)
if notifier.enabled:
print 'Enabling {0}'.format(notifier._domain)
notifier_proxy.add_notifier(notifier)
return notifier_proxy
def verify(args):
config = get_config(args.config)
config.alerts()
print 'Valid configuration, good job!'
return
def run(args):
config = get_config(args.config)
alerts = config.alerts()
notifier_proxy = create_notifier_proxy(config)
graphite_url = config.get('GRAPHITE_URL')
while True:
start_time = time.time()
seen_alert_targets = set()
for alert in alerts:
target = alert.get('target')
try:
records = get_records(
graphite_url,
requests.get,
GraphiteDataRecord,
target,
from_=alert.get('from'),
)
except requests.exceptions.RequestException:
description, html_description = missing_target_descriptions(
graphite_url,
alert,
target,
Level.NO_DATA,
None
)
notifier_proxy.notify(
alert,
target,
Level.NO_DATA,
description,
html_description
)
records = []
for record in records:
name = alert.get('name')
target = record.target
if (name, target) not in seen_alert_targets:
# print 'Checking', (name, target)
update_notifiers(
notifier_proxy,
alert,
record,
graphite_url
)
seen_alert_targets.add((name, target))
# else:
# print 'Seen', (name, target)
time_diff = time.time() - start_time
sleep_for = 60 - time_diff
if sleep_for > 0:
sleep_for = 60 - time_diff
print 'Sleeping for {0} seconds at {1}'.format(
sleep_for,
datetime.datetime.utcnow()
)
time.sleep(60 - time_diff)
def main():
args = parse_args()
if args.command == 'verify':
return verify(args)
return run(args)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -3326,63 +3326,8 @@
ts:%0A
- # print 'Checking', (name, target)%0A
@@ -3585,83 +3585,8 @@
t))%0A
- # else:%0A # print 'Seen', (name, target)%0A
|
b02323eb57b99ede10549a82fc9c6443095c093d
|
fix max line error
|
accelerator_abstract/models/base_business_proposition.py
|
accelerator_abstract/models/base_business_proposition.py
|
import swapper
from django.db import models
from accelerator_abstract.models.accelerator_model import AcceleratorModel
EXCLUDED_FIELDS = ['id', 'created_at', 'updated_at', 'startup', 'applications']
PAIN_POINT_HELP_TEXT = ('Please describe what '
'problem you are trying to solve?')
SOLUTION_HELP_TEXT = ('What is innovative about '
'your solution, technology, business model etc?')
IMPACT_HELP_TEXT = ('Define the 1 year and 5 '
'year impact that you hope to accomplish'
' including the benefit on society. What measures do you have - for example:'
' Revenue & Profit, and CO2/Emissions removed or lives saved.')
MARKET_HELP_TEXT = ('How would you define your potential market '
'and what is the total addressable market size?')
VALUE_PROPOSITION_HELP_TEXT = ('What will be your messaging to users & '
'customers and how do you plan to spread'
' that message?')
SALES_HELP_TEXT = ('Which channels will you likely reach '
'your customers/users through?')
COMPETITORS_HELP_TEXT = ('Which organizations compete with'
' your current value offering and who might '
'do so in the future?')
PRODUCT_COMPLEMENT_HELP_TEXT = ('Which organizations and/or products'
' complement your offering in the'
' market? Do you know of and/or '
'anticipate any value chain partners?')
PRIMARY_ADVANTAGE_HELP_TEXT = ('What are your primary advantages'
' relative to existing or potential'
' competitors? Why will you win?')
DRIVER_HELP_TEXT = ('What are the key drivers of '
'business economics (price points, margins, etc)?')
INTELLECTUAL_PROPERTY_HELP_TEXT = ('What IP (Intellectual Property) exist'
' for your business or in your industry?')
REGULATORY_HELP_TEXT = ('What regulatory requirements exist '
'for your business or in your industry?')
TEAM_SUMMARY_HELP_TEXT = ('Please share some background information '
'on your team members and tell us what makes'
' your team special.')
INVESTORS_HELP_TEXT = ('Please tell us about current or '
'anticipated advisors and investors.')
VALIDATION_HELP_TEXT = ('What traction have you made to '
'date with market validation?')
class BaseBusinessProposition(AcceleratorModel):
startup = models.ForeignKey(
swapper.get_model_name('accelerator', 'Startup'),
related_name='business_propositions',
on_delete=models.CASCADE)
pain_point = models.TextField(blank=True,
null=True,
help_text=PAIN_POINT_HELP_TEXT,
verbose_name="Customer Pain Point")
solution = models.TextField(blank=True,
null=True,
help_text=SOLUTION_HELP_TEXT,
verbose_name='Solution')
impact = models.TextField(blank=True,
null=True,
help_text=IMPACT_HELP_TEXT,
verbose_name='One-Year / Five-Year Impact')
market = models.TextField(
blank=True,
null=True,
help_text=MARKET_HELP_TEXT,
verbose_name='Potential Market / Addressable Size')
value_proposition = models.TextField(
blank=True,
null=True,
help_text=VALUE_PROPOSITION_HELP_TEXT,
verbose_name='Value Proposition / Marketing Message')
sales = models.TextField(
blank=True,
null=True,
help_text=SALES_HELP_TEXT,
verbose_name='Sales and Distribution / Channels')
competitors = models.TextField(
blank=True,
null=True,
help_text=COMPETITORS_HELP_TEXT,
verbose_name='Current and Future Competitors')
product_complements = models.TextField(
blank=True,
null=True,
help_text=PRODUCT_COMPLEMENT_HELP_TEXT,
verbose_name='Product Complements / Value Chain Partners')
primary_advantages = models.TextField(
blank=True,
null=True,
help_text=PRIMARY_ADVANTAGE_HELP_TEXT,
verbose_name='Primary Advantages vs Competitors')
drivers = models.TextField(
blank=True,
null=True,
help_text=DRIVER_HELP_TEXT,
verbose_name='Key Drivers of Business Economics')
intellectual_property = models.TextField(
blank=True,
null=True,
help_text=INTELLECTUAL_PROPERTY_HELP_TEXT,
verbose_name='Intellectual Property')
regulation = models.TextField(
blank=True,
null=True,
help_text=REGULATORY_HELP_TEXT,
verbose_name='Regulatory Requirements')
team_summary = models.TextField(
blank=True,
null=True,
help_text=TEAM_SUMMARY_HELP_TEXT,
verbose_name='Team (Backgrounds, advantages)')
investors = models.TextField(
blank=True,
null=True,
help_text=INVESTORS_HELP_TEXT,
verbose_name='Current or anticipated advisors or investors')
validation = models.TextField(
blank=True,
null=True,
help_text=VALIDATION_HELP_TEXT,
verbose_name='Traction and Market Validation')
class Meta(AcceleratorModel.Meta):
abstract = True
def __str__(self):
return self.startup.organization.name
def complete(self):
fields = self._meta.get_fields(include_parents=False)
for field in fields:
is_text_field = field.name not in EXCLUDED_FIELDS
value = getattr(self, field.name)
if is_text_field and (not value or len(value) < 20):
return False
return True
|
Python
| 0.000005
|
@@ -589,16 +589,39 @@
society.
+'%0A '
What me
@@ -712,16 +712,39 @@
missions
+'%0A '
removed
|
758a2c8e91651f087c0ebadaa55c1456c59609df
|
remove some logging
|
interpreter.py
|
interpreter.py
|
import parser, compiler, bytecode, objects, errors, prelude
class Interpreter(object):
def __init__(self):
self.last_bc = ''
self.context = compiler.Context()
self.import_prelude()
def import_prelude(self):
index = self.context.register_variable("print")
self.context.variables[index] = objects.Variable("print",objects.ExternalFunction("print",prelude.print_fn,1))
index = self.context.register_variable("readline")
self.context.variables[index] = objects.Variable("readline",objects.ExternalFunction("readline",prelude.readline,1))
def compile_interpret(self, ast, context=None):
if not context:
context = self.context
byte_code = compiler.compile(ast, context)
self.last_bc = ''
return self.interpret(byte_code)
def copy_context(self, code_from, code_to):
for k, v in code_from.variables.iteritems():
code_to.variables[k] = v
def interpret(self, byte_code, args=[]):
pc = 0 # program counter
stack = []
variables = [objects.Null()] * 255
assert(len(args) == len(byte_code.arguments))
#print "(running %s)" % byte_code.name
# copy args into inner context
for i in xrange(0,len(args)):
# TODO: this doesn't make sense, indexes change I think?
# Make sure these aren't getting overwritten
index = byte_code.arguments[i]
print "(arg %s going into %s)" % (args[i].dump(),index)
byte_code.variables[index] = objects.Variable("arg",args[i])
self.last_bc += byte_code.dump(True)
while pc < len(byte_code.instructions):
# the type of instruction and arg (a tuple)
opcode, arg = byte_code.instructions[pc]
#print "(%s %s %s)" % (pc, bytecode.reverse[opcode], arg)
# then increment
pc += 1
if opcode == bytecode.LOAD_CONST:
# grab a value from our constants and add to stack
value = byte_code.constants[arg]
stack.append(value)
elif opcode == bytecode.LOAD_VARIABLE:
var = byte_code.variables[arg]
assert(isinstance(var,objects.Variable))
#print "- appending value %s" % var.value.dump()
stack.append(var.value)
elif opcode == bytecode.STORE_VARIABLE:
value = stack.pop()
oldvar = byte_code.variables.get(arg,None)
if isinstance(oldvar,objects.Variable):
byte_code.variables[arg] = objects.Variable(oldvar.name,value)
else:
byte_code.variables[arg] = objects.Variable("arg",value)
stack.append(value)
elif opcode == bytecode.STORE_ARRAY:
values = []
for i in xrange(arg):
values.append(stack.pop())
stack.append(objects.Array(values))
elif opcode == bytecode.STORE_DICT:
values = objects.r_dict(objects.dict_eq,objects.dict_hash)
for i in xrange(arg):
values[stack.pop()] = stack.pop()
stack.append(objects.Dict(values))
elif opcode == bytecode.PRINT:
value = stack.pop()
print value.to_string()
stack.append(objects.Null())
elif opcode == bytecode.INDEX:
left = stack.pop()
right = stack.pop()
result = left.index(right)
stack.append(result)
elif opcode == bytecode.BINARY_ADD:
right = stack.pop()
left = stack.pop()
result = left.add(right)
stack.append(result)
elif opcode == bytecode.BINARY_SUB:
right = stack.pop()
left = stack.pop()
result = left.sub(right)
stack.append(result)
elif opcode == bytecode.BINARY_MUL:
right = stack.pop()
left = stack.pop()
result = left.mul(right)
stack.append(result)
elif opcode == bytecode.BINARY_DIV:
right = stack.pop()
left = stack.pop()
result = left.div(right)
stack.append(result)
elif opcode == bytecode.BINARY_NEQ:
right = stack.pop()
left = stack.pop()
result = left.equals(right)
result.boolvalue = not result.boolvalue
stack.append(result)
elif opcode == bytecode.BINARY_EQ:
right = stack.pop()
left = stack.pop()
result = left.equals(right)
stack.append(result)
elif opcode == bytecode.BINARY_GT:
right = stack.pop()
left = stack.pop()
result = left.gt(right)
stack.append(result)
elif opcode == bytecode.BINARY_GTE:
right = stack.pop()
left = stack.pop()
result = left.gte(right)
stack.append(result)
elif opcode == bytecode.BINARY_LT:
right = stack.pop()
left = stack.pop()
result = left.lt(right)
stack.append(result)
elif opcode == bytecode.BINARY_LTE:
right = stack.pop()
left = stack.pop()
result = left.lte(right)
stack.append(result)
elif opcode == bytecode.RETURN:
if arg == 1:
if len(stack) > 0:
result = stack.pop()
return result
return objects.Null()
elif opcode == bytecode.JUMP_IF_NOT_ZERO:
val = stack.pop()
assert(isinstance(val,objects.BaseBox))
result = val.equals(objects.Boolean(True))
assert(isinstance(result,objects.Boolean))
if result.value:
pc = arg
elif opcode == bytecode.JUMP_IF_ZERO:
val = stack.pop()
assert(isinstance(val,objects.BaseBox))
result = val.equals(objects.Boolean(True))
assert(isinstance(result,objects.Boolean))
if not result.value:
pc = arg
elif opcode == bytecode.JUMP:
pc = arg
elif opcode == bytecode.CALL:
assert(isinstance(byte_code.variables[arg],objects.Variable))
val = byte_code.variables[arg].value
if isinstance(val,objects.Function):
func = val.code
self.copy_context(byte_code,func)
args = []
if len(func.arguments) > len(stack):
raise Exception("Not enough arguments")
for i in range(0,len(func.arguments)):
args.append(stack.pop())
stack.append(self.interpret(func,args))
elif isinstance(val, objects.ExternalFunction):
# call
func = val.fn
arglen = val.args
args = []
for i in range(0,arglen):
args.append(stack.pop())
result = func(args)
stack.append(result)
else:
raise Exception("Not a function")
return stack[len(stack) - 1]
|
Python
| 0.000002
|
@@ -1538,24 +1538,25 @@
+#
print %22(arg
|
ec5cc5d30b50e12c2e11f6206c220b4f2731e352
|
implement class
|
pgmapcss/misc/pgcache.py
|
pgmapcss/misc/pgcache.py
|
class PGCache:
def __init__(self, id, read_id=False, read_geo=False):
global PGCaches
try:
PGCaches
except:
PGCaches = {}
PGCaches[id] = self
def add(self, data, id=None, geo=None):
pass
def get(self, id=None):
pass
def query(self, qry):
pass
def get_PGCache(id, read_id=False, read_geo=False):
global PGCaches
try:
PGCaches
except:
PGCaches = {}
return PGCaches[id]
|
Python
| 0.000001
|
@@ -200,147 +200,2144 @@
elf%0A
-%0A def add(self, data, id=None, geo=None):%0A pass%0A%0A def get(self, id=None):%0A pass%0A%0A def query(self, qry):%0A pass
+ self.id = id%0A self.read_id = read_id%0A self.read_geo = read_geo%0A self.cache_id = len(PGCaches)%0A%0A def add(self, data, id=None, geo=None):%0A import pickle%0A try:%0A self.plan_add%0A except:%0A self.plan_add = plpy.prepare('insert into _pgmapcss_PGCache values (%5C'' + str(self.cache_id).replace(%22'%22, %22''%22) + '%5C', $1, $2, $3)', %5B 'bytea', 'text', 'geometry' %5D)%0A%0A if id is None and self.read_id and 'id' in data:%0A id = data%5B'id'%5D%0A if geo is None and self.read_geo and 'geo' in data:%0A geo = data%5B'geo'%5D%0A%0A plpy.execute(self.plan_add, %5B pickle.dumps(data), id, geo %5D)%0A%0A def get(self, id=None):%0A import pickle%0A if id is None:%0A try:%0A self.plan_get%0A except:%0A self.plan_get = plpy.prepare('select * from _pgmapcss_PGCache where cache_id=' + str(self.cache_id).replace(%22'%22, %22''%22), %5B%5D)%0A%0A cursor = plpy.cursor(self.plan_get, %5B%5D)%0A%0A else:%0A try:%0A self.plan_get_id%0A except:%0A self.plan_get_id = plpy.prepare('select * from _pgmapcss_PGCache where id=ANY($1) and cache_id=' + str(self.cache_id).replace(%22'%22, %22''%22), %5B'text%5B%5D'%5D)%0A%0A if type(id) == str:%0A id = %5B id %5D%0A%0A cursor = plpy.cursor(self.plan_get_id, %5Bid%5D)%0A%0A for r in cursor:%0A yield pickle.loads(r%5B'data'%5D)%0A%0A def prepare(self, query, param_type=%5B%5D):%0A return plpy.prepare(query.replace('%7Btable%7D', '(select data, id, geo from _pgmapcss_PGCache where cache_id=' + str(self.cache_id).replace(%22'%22, %22''%22) + ') t'), param_type)%0A%0A def execute(self, plan, param=%5B%5D):%0A import pickle%0A ret = %5B%5D%0A%0A for r in plpy.execute(plan, param):%0A if 'data' in r:%0A r%5B'data'%5D = pickle.loads(r%5B'data'%5D)%0A ret.append(r)%0A%0A return ret%0A%0A def cursor(self, plan, param=%5B%5D):%0A import pickle%0A ret = %5B%5D%0A%0A for r in plpy.cursor(plan, param):%0A if 'data' in r:%0A r%5B'data'%5D = pickle.loads(r%5B'data'%5D)%0A yield r
%0A%0Ade
|
1f2c7f1123762e2f45b9e9ed707e6839c0246ec4
|
Revert "model is required for collection since now"
|
mongoext/collection.py
|
mongoext/collection.py
|
from __future__ import absolute_import
import pymongo
import mongoext.cursor
import mongoext.scheme
class Collection(object):
CONNECTION = None
DATABASE = None
KEYS_COMPRESSION = None
NAME = None
def __init__(self, model):
self.model = model
self.__pymongo_collection = None
if self.KEYS_COMPRESSION:
self.keys_compression = dict(self.KEYS_COMPRESSION, _id='_id')
self.keys_uncompression = {v: k for k, v in self.keys_compression.iteritems()}
else:
self.keys_compression = self.keys_uncompression = None
@property
def collection(self):
if not self.__pymongo_collection:
self.__pymongo_collection = pymongo.MongoClient(**self.CONNECTION)[self.DATABASE][self.NAME]
return self.__pymongo_collection
@property
def database(self):
return self.collection.database
def clean(self, document):
for field in (f for f, v in document.items() if v is None):
del document[field]
def pack_field(self, key):
if not self.keys_compression:
return key
return self.keys_compression.get(key, key)
def pack_fields(self, document):
if not self.keys_compression:
return document
compressed_document = {}
for key, value in document.iteritems():
if not key.startswith('$'):
key = self.keys_compression[key]
if isinstance(value, dict):
value = self.pack_fields(value)
compressed_document[key] = value
return compressed_document
def unpack_fields(self, document):
if not self.keys_uncompression:
return document
uncompressed_document = {}
for key, value in document.iteritems():
if not key.startswith('$'):
key = self.keys_uncompression[key]
if isinstance(value, dict):
value = self.unpack_fields(value)
uncompressed_document[key] = value
return uncompressed_document
def find(self, filter=None, projection=None, skip=0):
pymongo_cursor = self.collection.find(
filter=filter and self.pack_fields(filter),
projection=projection and self.pack_fields(projection),
skip=skip,
)
return mongoext.cursor.Cursor(self, pymongo_cursor)
def find_one(self, filter_or_id=None, *args, **kw):
if isinstance(filter_or_id, dict):
filter_or_id = self.pack_fields(filter_or_id)
document = self.collection.find_one(filter_or_id, *args, **kw)
if not document:
return
document = self.unpack_fields(document)
if self.model:
return self.model(**document)
else:
return document
def find_one_and_replace(self, filter, replacement, projection=None):
pymongo_cursor = self.collection.find_one_and_replace(
filter=filter and self.pack_fields(filter),
replacement=replacement and self.pack_fields(replacement),
projection=projection and self.pack_fields(projection),
)
return mongoext.cursor.Cursor(self, pymongo_cursor)
def insert(self, documents):
pymongo_documents = map(dict, documents)
pymongo_documents = [self.pack_fields(d) for d in pymongo_documents]
for document in pymongo_documents:
self.clean(document)
return self.collection.insert_many(pymongo_documents).inserted_ids
def insert_one(self, document):
document = dict(document)
self.clean(document)
document = self.pack_fields(document)
return self.collection.insert_one(document).inserted_id
def save(self, origin):
document = dict(origin)
if self.model and isinstance(origin, self.model):
mongoext.scheme.process(origin._scheme, document)
if document.get('_id'):
self.find_one_and_replace(
filter={'_id': document['_id']},
replacement=dict(document),
)
_id = document['_id']
else:
_id = self.insert_one(document)
if self.model and isinstance(origin, self.model):
origin._id = _id
else:
origin['_id'] = _id
return _id
def count(self):
return self.collection.count()
def distinct(self, key):
key = self.pack_field(key)
return self.collection.distinct(key)
def drop(self):
return self.collection.drop()
def remove(self, spec=None, multi=True):
if spec is None:
return self.collection.remove(multi=multi)
spec = self.pack_fields(spec)
return self.collection.remove(spec, multi=multi)
def update(self, spec, document, multi=False):
spec = self.pack_fields(spec)
document = dict(document)
document = self.pack_fields(document)
self.clean(document)
self.collection.update(spec, document, multi=multi)
|
Python
| 0
|
@@ -235,24 +235,29 @@
(self, model
+=None
):%0A s
|
6094b147dccc4abf3ef23d5e54b1e23a955d6ecb
|
remove prints
|
slider/templatetags/slider_tags.py
|
slider/templatetags/slider_tags.py
|
# -*- coding: utf-8 -*-
from django import template
from slider.models import SliderImage
register = template.Library()
@register.assignment_tag
def get_slider_images(limit=False, randomize=True, slider=1):
qs = SliderImage.objects.filter(is_visible=True,slider=slider)
print randomize
if randomize:
qs = qs.order_by('?')
if limit:
qs = qs[0:limit]
return qs
|
Python
| 0.000012
|
@@ -278,29 +278,8 @@
%0A
- print randomize%0A%0A
|
267fafde515cea4663a3c08ca433ee11cc9c2f79
|
Add retry & timeout in pipeline tests Problem: Test failure happens when try to get source code provider config before they are created for a new project
|
tests/integration/suite/test_pipeline.py
|
tests/integration/suite/test_pipeline.py
|
import pytest
from .pipeline_common import MockGithub
from .conftest import ProjectContext, rancher
MOCK_GITHUB_PORT = 4016
MOCK_GITHUB_HOST = "localhost:4016"
MOCK_GITHUB_REPO_URL = 'https://github.com/octocat/Hello-World.git'
MOCK_GITHUB_USER = 'octocat'
GITHUB_TYPE = 'github'
@pytest.fixture(scope="module")
def mock_github():
server = MockGithub(port=MOCK_GITHUB_PORT)
server.start()
yield server
server.shutdown_server()
@pytest.mark.nonparallel
def test_pipeline_set_up_github(admin_pc, mock_github):
client = admin_pc.client
set_up_pipeline_github(admin_pc)
configs = client.list_source_code_provider_config()
gh = None
for c in configs:
if c.type == "githubPipelineConfig":
gh = c
assert gh is not None
assert gh.enabled is True
assert gh.disable
providers = client.list_source_code_provider()
assert len(providers) == 1
gh_provider = providers.data[0]
assert gh_provider.type == 'githubProvider'
assert gh_provider.login
creds = client.list_source_code_credential()
assert len(creds) == 1
assert creds.data[0].sourceCodeType == GITHUB_TYPE
assert creds.data[0].loginName == MOCK_GITHUB_USER
repos = client.list_source_code_repository()
assert len(repos) == 1
assert repos.data[0].sourceCodeType == GITHUB_TYPE
assert repos.data[0].url == MOCK_GITHUB_REPO_URL
@pytest.mark.nonparallel
def test_pipeline_set_up_github_with_custom_role(admin_mc,
admin_pc,
mock_github,
user_factory,
remove_resource):
# Create a new user with custom global role
user = user_factory(globalRoleId="user-base")
remove_resource(user)
# Preference creation triggers user ns creation
user.client.create_preference(name="language", value="\"en-us\"")
client = admin_mc.client
project = admin_pc.project
# Add this user as project-owner
prtb_owner = client.create_project_role_template_binding(
projectId=project.id,
roleTemplateId="project-owner",
userId=user.user.id)
remove_resource(prtb_owner)
url = project.links.self + '/schemas'
user_pc = ProjectContext(None, project,
rancher.Client(url=url,
verify=False,
token=user.client.token))
set_up_pipeline_github(user_pc)
user_client = user_pc.client
creds = user_client.list_source_code_credential()
assert len(creds) == 1
assert creds.data[0].sourceCodeType == GITHUB_TYPE
assert creds.data[0].loginName == MOCK_GITHUB_USER
repos = user_client.list_source_code_repository()
assert len(repos) == 1
assert repos.data[0].sourceCodeType == GITHUB_TYPE
assert repos.data[0].url == MOCK_GITHUB_REPO_URL
@pytest.mark.nonparallel
def test_pipeline_disable_github(admin_pc, mock_github):
client = admin_pc.client
set_up_pipeline_github(admin_pc)
configs = client.list_source_code_provider_config()
gh = None
for c in configs:
if c.type == "githubPipelineConfig":
gh = c
assert gh is not None
assert gh.enabled is True
assert gh.disable
gh.disable()
providers = client.list_source_code_provider()
assert len(providers) == 0
@pytest.mark.nonparallel
def test_pipeline_github_log_in_out(admin_pc, mock_github):
client = admin_pc.client
set_up_pipeline_github(admin_pc)
providers = client.list_source_code_provider()
gh_provider = providers.data[0]
creds = client.list_source_code_credential()
creds.data[0].refreshrepos()
repos = client.list_source_code_repository()
assert len(repos) == 1
repos_by_cred = creds.data[0].repos()
assert len(repos_by_cred) == 1
creds.data[0].logout_action()
creds = client.list_source_code_credential()
assert len(creds) == 0
gh_provider.login(code='test_code')
creds = client.list_source_code_credential()
assert len(creds) == 1
def set_up_pipeline_github(user_pc):
client = user_pc.client
configs = client.list_source_code_provider_config()
gh = None
for c in configs:
if c.type == "githubPipelineConfig":
gh = c
assert gh is not None
gh.testAndApply(code="test_code",
hostname=MOCK_GITHUB_HOST,
tls=False,
clientId="test_id",
clientSecret="test_secret")
|
Python
| 0.000002
|
@@ -7,16 +7,28 @@
pytest%0A
+import time%0A
from .pi
@@ -4236,144 +4236,53 @@
-client = user_pc.client%0A configs = client.list_source_code_provider_config()%0A gh = None%0A for c in configs:%0A if c.type ==
+gh = get_source_code_provider_config(user_pc,
%22gi
@@ -4300,36 +4300,17 @@
eConfig%22
-:%0A gh = c
+)
%0A ass
@@ -4485,16 +4485,16 @@
st_id%22,%0A
-
@@ -4533,8 +4533,411 @@
ecret%22)%0A
+%0A%0Adef get_source_code_provider_config(user_pc, config_type):%0A client = user_pc.client%0A start_time = int(time.time())%0A while int(time.time()) - start_time %3C 30:%0A configs = client.list_source_code_provider_config()%0A for c in configs:%0A if c.type == config_type:%0A return c%0A time.sleep(3)%0A raise Exception('Timeout getting %7B0%7D'.format(config_type))%0A
|
2378a64ab1e106c8f6f455a9023d350eaf627767
|
add manual
|
oa_manual.py
|
oa_manual.py
|
from collections import defaultdict
from time import time
from util import elapsed
# things to set here:
# license, free_metadata_url, free_pdf_url
# free_fulltext_url is set automatically from free_metadata_url and free_pdf_url
def get_overrides_dict():
override_dict = defaultdict(dict)
# cindy wu example
override_dict["10.1038/nature21360"]["free_pdf_url"] = "https://arxiv.org/pdf/1703.01424.pdf"
# example from twitter
override_dict["10.1021/acs.jproteome.5b00852"]["free_pdf_url"] = "http://pubs.acs.org/doi/pdfplus/10.1021/acs.jproteome.5b00852"
# have the unpaywall example go straight to the PDF, not the metadata page
override_dict["10.1098/rspa.1998.0160"]["free_pdf_url"] = "https://arxiv.org/pdf/quant-ph/9706064.pdf"
# missed, not in BASE, from Maha Bali in email
override_dict["10.1080/13562517.2014.867620"]["free_pdf_url"] = "http://dar.aucegypt.edu/bitstream/handle/10526/4363/Final%20Maha%20Bali%20TiHE-PoD-Empowering_Sept30-13.pdf"
# otherwise links to figshare match that only has data, not the article
override_dict["10.1126/science.aaf3777"]["free_pdf_url"] = None
override_dict["10.1126/science.aaf3777"]["free_metadata_url"] = None
#otherwise links to a metadata page that doesn't have the PDF because have to request a copy: https://openresearch-repository.anu.edu.au/handle/1885/103608
override_dict["10.1126/science.aad2622"]["free_pdf_url"] = "https://lra.le.ac.uk/bitstream/2381/38048/6/Waters%20et%20al%20draft_post%20review_v2_clean%20copy.pdf"
# otherwise led to http://www.researchonline.mq.edu.au/vital/access/services/Download/mq:39727/DS01 and authorization error
override_dict["10.1111/j.1461-0248.2008.01185.x"]["free_pdf_url"] = None
# override old-style webpage
override_dict["10.1210/jc.2016-2141"]["free_pdf_url"] = "https://academic.oup.com/jcem/article-lookup/doi/10.1210/jc.2016-2141"
override_dict["10.1210/jc.2016-2141"]["evidence"] = "hybrid manual"
return override_dict
|
Python
| 0
|
@@ -1987,16 +1987,198 @@
anual%22%0A%0A
+ # not indexing this location yet, from @rickypo%0A override_dict%5B%2210.1207/s15327957pspr0203_4%22%5D%5B%22free_pdf_url%22%5D = %22http://www2.psych.ubc.ca/~schaller/528Readings/Kerr1998.pdf%22%0A%0A
retu
|
8206ea76804cf08298eeab8673b2326440aa8663
|
check for existing bonds before drawing
|
orbis/gui/sketchpad.py
|
orbis/gui/sketchpad.py
|
import matplotlib
import matplotlib.patches
import numpy
import wx
from plots import Plot
#====================================================================================
class SketchPad(Plot):
"""sketch pad for drawing molecules"""
ATOM_RADIUS = 0.1
PICK_TOLERANCE = 5
#----------------------------------------------------------------------
def __init__(self,*args,**kwargs):
super(SketchPad,self).__init__(*args,**kwargs)
self.axes = self.figure.add_subplot(1,1,1)
self.axes.set_aspect("equal")
#---------------------------------------------------------------------------
def on_button_up(self,event):
super(SketchPad,self).on_button_up(event)
if self.new_atom_requested():
self.add_atom()
#---------------------------------------------------------------------------
def new_atom_requested(self):
return self.was_click() and not self.was_pick()
#---------------------------------------------------------------------------
def add_atom(self):
"""Add a new atom to the sketchpad"""
coords = (self.mouse_up_x,self.mouse_up_y)
circ = matplotlib.patches.Ellipse(coords,self.ATOM_RADIUS,self.ATOM_RADIUS,picker=self.PICK_TOLERANCE)
self.axes.add_patch(circ)
self.figure.canvas.draw()
if __name__ == "__main__":
app = wx.App()
frame = wx.Frame(None)
sp = SketchPad(frame)
frame.Show()
app.MainLoop()
|
Python
| 0
|
@@ -547,32 +547,596 @@
equal%22)%0A
+self.up_atom = None%0A self.down_atom = None%0A #---------------------------------------------------------------------------%0A def on_button_down(self,event):%0A super(SketchPad,self).on_button_down(event)%0A self.down_atom = self.atom_at_event_point(event)%0A #---------------------------------------------------------------------------%0A def atom_at_event_point(self,event):%0A for patch in self.axes.patches:%0A event_in_atom,_ = patch.contains(event)%0A if event_in_atom:%0A return patch
%0A #----------
@@ -1294,32 +1294,96 @@
event)%0A %0A
+ self.up_atom = self.atom_at_event_point(event)%0A %0A
if self.
@@ -1444,194 +1444,1341 @@
-%0A%0A #---------------------------------------------------------------------------%0A def new_atom_requested(self):%0A return self.was_click() and not self.was_pick()
+elif self.new_bond_requested() and not self.bond_exists(self.up_atom,self.down_atom):%0A self.add_bond()%0A %0A #---------------------------------------------------------------------------%0A def on_pick(self,event):%0A super(SketchPad,self).on_pick(event)%0A #---------------------------------------------------------------------------%0A def new_atom_requested(self):%0A return self.was_click() and not self.was_pick() %0A #---------------------------------------------------------------------------%0A def new_bond_requested(self): %0A start_and_finish_atoms = None not in (self.up_atom, self.down_atom)%0A unique_atoms = self.up_atom is not self.down_atom%0A return start_and_finish_atoms and unique_atoms%0A #---------------------------------------------------------------------------%0A def bond_exists(self,atom_1,atom_2): %0A bond_coords = %5Bsorted(bond.get_xydata().tolist()) for bond in self.axes.lines%5D%0A bond_to_check = sorted(%5Blist(atom_1.xy),list(atom_2.xy)%5D)%0A return bond_to_check in bond_coords%0A #---------------------------------------------------------------------------%0A def get_atom_locations(self):%0A %22%22%22returns xy points of all atoms on sketchpad%22%22%22%0A return %5Batom.xy for atom in self.axes.patches%5D
%0A
@@ -3022,15 +3022,21 @@
hes.
-Ellipse
+CirclePolygon
(coo
@@ -3056,33 +3056,16 @@
_RADIUS,
-self.ATOM_RADIUS,
picker=s
@@ -3082,16 +3082,30 @@
OLERANCE
+,resolution=40
)%0A
@@ -3166,28 +3166,323 @@
.draw()%0A
-
+#---------------------------------------------------------------------------%0A def add_bond(self):%0A %22%22%22add a new bond between down_atom and up_atom%22%22%22%0A x1,y1 = self.down_atom.xy%0A x2,y2 = self.up_atom.xy%0A self.axes.plot(%5Bx1,x2%5D,%5By1,y2%5D)%0A self.figure.canvas.draw()
%0A %0Aif
|
f6740a7b2662ce8ad4112757663cf0e4ab184394
|
convert all sample fields to their respective types
|
rowprocsv.py
|
rowprocsv.py
|
"""
Module for reading and exporting csv files exported from Concept2 RowPro
"""
import datetime
import tcx
class RowProCSV:
HEADER_SUMMARY = 'Date,TotalTime,TotalDistance,'
FIELDS_SUMMARY = [
'date', 'total_time', 'total_distance', 'avg_pace', 'unit', 'origin', 'total_cals', 'duty_cycle', 'type',
'format', 'slide', 'session_id', 'rowfile_id', 'avg_hr', 'last_hr', 'offset'
]
HEADER_SAMPLES = 'Time,Distance,Pace,Watts,Cals,SPM,HR,DutyCycle,Rowfile_Id'
FIELDS_SAMPLES = ['time_ms', 'distance', 'pace', 'watts', 'cals', 'spm', 'hr', 'duty_cycle', 'rowfile_id']
date = None
datetime = None
total_time = None
total_distance = None
avg_pace = None
total_cals = None
slide = False
avg_hr = None
last_hr = None
samples = []
def __init__(self, filename):
lines = []
try:
with open(filename, 'r') as fp:
lines = fp.read().split("\r\n")
except IOError as e:
print 'Could not read file {}: {}'.format(filename, e)
summary_found = False
samples_found = False
while len(lines):
line = lines.pop(0)
if not line:
continue
if line.startswith(self.HEADER_SUMMARY):
line = lines.pop(0)
summary_data = line.split(',')
if len(summary_data) != len(self.FIELDS_SUMMARY):
print 'Warning: summary line only has {} fields, {} expected'.format(len(summary_data),
len(self.FIELDS_SUMMARY))
for field in self.FIELDS_SUMMARY:
if len(summary_data):
value = summary_data.pop(0)
if hasattr(self, field) is not None:
setattr(self, field, value)
# parse the date
try:
self.datetime = datetime.datetime.strptime(self.date, '%d/%m/%Y %H:%M:%S')
except Exception as ex:
print 'Error parsing date {}: {}'.format(self.date, ex)
# parse the slide value
self.slide = True if self.slide == 'True' else False
summary_found = True
continue
elif line.startswith(self.HEADER_SAMPLES):
while len(lines):
line = lines.pop(0).strip()
if not line:
break
sample_data = line.split(',')
sample = {}
for field in self.FIELDS_SAMPLES:
sample[field] = sample_data.pop(0) if len(sample_data) else None
# convert time from milliseconds to fractional seconds
try:
sample['time'] = float(sample['time_ms']) / 1000.0
except ValueError:
print 'Error converting "{}" to float'.format(sample['time_ms'])
self.samples.append(sample)
samples_found = True
break
if not summary_found:
print 'Warning: summary section not found in file'
if not samples_found:
print 'Warning: samples section not found in file'
def get_data(self):
return {
'datetime': self.datetime,
'total_time': self.total_time,
'total_distance': self.total_distance,
'avg_pace': self.avg_pace,
'total_cals': self.total_cals,
'slide': self.slide,
'avg_hr': self.avg_hr,
'last_hr': self.last_hr,
'samples': self.samples,
}
|
Python
| 0.002177
|
@@ -507,16 +507,26 @@
PLES = %5B
+%0A (
'time_ms
@@ -532,84 +532,227 @@
s',
-'distance', 'pace', 'watts', 'cals', 'spm', 'hr', 'duty_cycle', 'rowfile_id'
+int),%0A ('distance', float),%0A ('pace', float),%0A ('watts', float),%0A ('cals', float),%0A ('spm', int),%0A ('hr', int),%0A ('duty_cycle', float),%0A ('rowfile_id', None),%0A
%5D%0A%0A
@@ -2804,32 +2804,44 @@
for field
+, field_type
in self.FIELDS_
@@ -2877,21 +2877,11 @@
-sample%5Bfield%5D
+val
= s
@@ -2929,16 +2929,95 @@
e None%0A%0A
+ if field_type is not None and val is not None:%0A
@@ -3095,32 +3095,40 @@
+
try:%0A
@@ -3144,59 +3144,46 @@
-sample%5B'time'%5D = float(sample%5B'time_ms'%5D) / 1000.0%0A
+ val = field_type(val)%0A
@@ -3233,32 +3233,40 @@
+
+
print 'Error con
@@ -3277,30 +3277,229 @@
ing
-%22%7B%7D%22 to float'.format(
+field %7B%7D value %22%7B%7D%22 to %7B%7D'.format(field, val, str(field_type))%0A%0A sample%5Bfield%5D = val%0A%0A # convert time from milliseconds to fractional seconds%0A sample%5B'time'%5D =
samp
@@ -3511,17 +3511,25 @@
ime_ms'%5D
-)
+ / 1000.0
%0A%0A
|
d3d9e0f5c0da8408bcdb241509cb7dd1f41fd4bd
|
use hash when title not present
|
src/you_get/extractors/imgur.py
|
src/you_get/extractors/imgur.py
|
#!/usr/bin/env python
from ..common import *
from ..extractor import VideoExtractor
from .universal import *
class Imgur(VideoExtractor):
name = "Imgur"
stream_types = [
{'id': 'original'},
{'id': 'thumbnail'},
]
def prepare(self, **kwargs):
if re.search(r'imgur\.com/a/', self.url):
# album
content = get_content(self.url)
album = match1(content, r'album\s*:\s*({.*}),') or \
match1(content, r'image\s*:\s*({.*}),')
album = json.loads(album)
count = album['album_images']['count']
images = album['album_images']['images']
ext = images[0]['ext']
self.streams = {
'original': {
'src': ['http://i.imgur.com/%s%s' % (i['hash'], ext)
for i in images],
'size': sum([i['size'] for i in images]),
'container': ext[1:]
},
'thumbnail': {
'src': ['http://i.imgur.com/%ss%s' % (i['hash'], '.jpg')
for i in images],
'container': 'jpg'
}
}
self.title = album['title']
elif re.search(r'i\.imgur\.com/', self.url):
# direct image
_, container, size = url_info(self.url)
self.streams = {
'original': {
'src': [self.url],
'size': size,
'container': container
}
}
self.title = r1(r'i\.imgur\.com/([^./]*)', self.url)
else:
# gallery image
content = get_content(self.url)
image = json.loads(match1(content, r'image\s*:\s*({.*}),'))
ext = image['ext']
self.streams = {
'original': {
'src': ['http://i.imgur.com/%s%s' % (image['hash'], ext)],
'size': image['size'],
'container': ext[1:]
},
'thumbnail': {
'src': ['http://i.imgur.com/%ss%s' % (image['hash'], '.jpg')],
'container': 'jpg'
}
}
self.title = image['title']
def extract(self, **kwargs):
if 'stream_id' in kwargs and kwargs['stream_id']:
i = kwargs['stream_id']
if 'size' not in self.streams[i]:
self.streams[i]['size'] = urls_size(self.streams[i]['src'])
site = Imgur()
download = site.download_by_url
download_playlist = site.download_by_url
|
Python
| 0.000083
|
@@ -2308,24 +2308,41 @@
age%5B'title'%5D
+ or image%5B'hash'%5D
%0A%0A def ex
|
c55f21aa4925f6227086dedca2a3f839db98d8e1
|
implement unit tests for debug command
|
tests/lib/cmdline/commands/test_debug.py
|
tests/lib/cmdline/commands/test_debug.py
|
# Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import skip
import lib.cmdline.commands as commands
from .test_base import BaseCommandTest
class DebugTest(BaseCommandTest):
def setUp(self):
super().setUp()
self.command_class = commands.Debug
self.alias_commands = ['do_dbg']
@skip('NI')
def test_print_state(self):
pass
@skip('NI')
def test_interactive(self):
pass
@skip('NI')
def test_new_state(self):
pass
|
Python
| 0.000005
|
@@ -582,24 +582,29 @@
unittest
+.mock
import
skip%0A%0Aim
@@ -595,20 +595,21 @@
import
-skip
+patch
%0A%0Aimport
@@ -857,183 +857,939 @@
-@skip('NI')%0A def test_print_state(self):%0A pass%0A%0A @skip('NI')%0A def test_interactive(self):%0A pass%0A%0A @skip('NI')%0A def test_new_state(self):%0A pass
+# note: need patch the default action using the mangled named%0A @patch('lib.cmdline.commands.Debug._Debug__print_state')%0A def test_default_command_action(self, mock_print_state):%0A debug_cmd = self.get_instance()%0A debug_cmd.do_debug('')%0A self.assertTrue(mock_print_state.called)%0A%0A @patch('builtins.print')%0A def test_print_state(self, mock_print):%0A debug_cmd = self.get_instance()%0A debug_cmd.do_debug('--print-state')%0A self.assertTrue(mock_print.called)%0A mock_print.assert_called_with(debug_cmd.engine)%0A%0A @patch('code.interact')%0A def test_interactive(self, mock_interact):%0A debug_cmd = self.get_instance()%0A debug_cmd.do_debug('--interact')%0A self.assertTrue(mock_interact.called)%0A%0A def test_new_state(self):%0A debug_cmd = self.get_instance()%0A debug_cmd.do_debug('--new-state')%0A self.assertTrue(self.mock_engine.new_game.called)
%0A
|
ecdcfe6d3e5f076f92b38b4a76d2975bce2bc4a2
|
add -b and -sp
|
rna_tools/tools/md/rna_minimize.py
|
rna_tools/tools/md/rna_minimize.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import argparse
def get_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose",
action="store_true", help="be verbose")
parser.add_argument("file", help="", default="") # nargs='+')
parser.add_argument("--pymol",
action="store_true", help="be verbose")
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
if list != type(args.file):
args.file = [args.file]
for f in args.file:
print(f, '...')
pdbout = f.replace('.pdb','') + '_min.pdb'
pdb = PDBFile(f)
log = f.replace('.pdb','') + '.log'
modeller = Modeller(pdb.topology, pdb.positions)
forcefield = ForceField('amber14-all.xml', 'amber14/tip3pfb.xml')
modeller.addHydrogens(forcefield)
#modeller.addSolvent(forcefield, ionicStrength=0.1*molar)
# modeller.addSolvent(forcefield, model='tip5p')
#modeller.addSolvent(forcefield, padding=0.5*nanometers)
modeller.addSolvent(forcefield, boxSize=Vec3(5.0, 3.5, 3.5)*nanometers)
system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, #nonbondedMethod=PME,
nonbondedCutoff=1*nanometer, constraints=HBonds)
integrator = LangevinIntegrator(300*kelvin, 1/picosecond, 0.002*picoseconds)
simulation = Simulation(modeller.topology, system, integrator)
simulation.context.setPositions(modeller.positions)
simulation.minimizeEnergy()
# from http://zarbi.chem.yale.edu/ligpargen/openMM_tutorial.html
position = simulation.context.getState(getPositions=True).getPositions()
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
app.PDBFile.writeFile(simulation.topology, position,
open(pdbout, 'w'))
print('Energy at Minima is %3.3f kcal/mol' % (energy._value * KcalPerKJ))
print('saved ', pdbout)
if args.pymol:
os.system('open %s' % out)
|
Python
| 0.000004
|
@@ -43,16 +43,19 @@
-*-%0A%22%22%22%0A
+ddd
%0A%22%22%22%0Afro
@@ -90,30 +90,24 @@
nction%0Afrom
-simtk.
openmm.app i
@@ -119,22 +119,16 @@
*%0Afrom
-simtk.
openmm i
@@ -144,13 +144,14 @@
rom
-simtk
+openmm
.uni
@@ -200,16 +200,64 @@
rgparse%0A
+from rna_tools.tools.mq.lib.timex import timex%0A%0A
%0Adef get
@@ -561,16 +561,175 @@
gs='+')%0A
+ parser.add_argument(%22-b%22, %22--box-size%22, help=%22%22, default=1, type=float) # nargs='+')%0A parser.add_argument(%22-sp%22, %22--solv-padding%22, action=%22store_true%22)%0A
pars
@@ -1037,16 +1037,62 @@
'...')%0A
+%0A t = timex.Timex()%0A t.start()%0A%0A
@@ -1260,24 +1260,25 @@
itions)%0A
+%0A
forcefie
@@ -1273,49 +1273,249 @@
-forcefield = ForceField('amber14-all.xml'
+#ff = 'charmm36.xml' #ff14SB.xml' #amber14sb.xml' # 'amber14-all.xml'%0A ff = 'amber14-all.xml'%0A #ff = 'amberfb15.xml'%0A #ff = 'amber14/RNA.OL3.xml'%0A %0A #ff = 'amber99sb.xml'%0A%0A forcefield = ForceField(ff
, 'a
@@ -1700,24 +1700,25 @@
tip5p')%0A
+%0A
#modelle
@@ -1701,33 +1701,59 @@
ip5p')%0A%0A
-#
+bs = args.box_size%0A
modeller.addSolv
@@ -1772,40 +1772,206 @@
ld,
-padding=0.5*nanometers)%0A
+boxSize=Vec3(bs, bs, bs)*nanometers)%0A if args.solv_padding:%0A print(1*nanometers)%0A modeller.addSolvent(forcefield, padding=1*nanometers)%0A # 5.0, 3.5, 3.5%0A #
mode
@@ -2011,29 +2011,23 @@
ze=Vec3(
-5.0, 3.5, 3.5
+2, 2, 2
)*nanome
@@ -2024,32 +2024,33 @@
2)*nanometers)%0A
+%0A
system =
@@ -2976,12 +2976,36 @@
%25s' %25 out)%0A
+%0A print(t.end())%0A
|
482f9ffaf1c2998fafc924a91b07656d3c054c91
|
fix string
|
bin/extract_darkmatter.py
|
bin/extract_darkmatter.py
|
#!/usr/bin/env python
import argparse
import leveldb
import os
import shutil
import sys
from Bio import SeqIO
def main(args):
parser = argparse.ArgumentParser(description="Script to extract darkmatter - predicted proteins with no similarities")
parser.add_argument("-i", "--input", dest="input", help="Name of input genecall fasta file.")
parser.add_argument("-o", "--output", dest="output", help="Name of output darkmatter fasta file.")
parser.add_argument("-s", "--sims", dest="sims", help="Name of similarity file")
parser.add_argument("-d", "--db", dest="db", default=".", help="Directory to store LevelDB, default CWD")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Print informational messages")
args = parser.parse_args()
if ('sims' not in args) or (os.stat(args.sims).st_size == 0):
print "Similarity file was omitted or is empty, copying %s to %s ... " % (args.input, args.output)
shutil.copyfile(args.input, args.output)
return 0
db = leveldb.LevelDB(args.db)
shdl = open(args.sims, 'rU')
if args.verbose:
print "Reading file %s ... " % args.sims
for line in shdl:
parts = line.strip().split('\t')
db.Put(parts[0], 1)
shdl.close()
if args.verbose:
print "Done"
print "Reading file %s ... " % args.input
ihdl = open(args.input, 'rU')
ohdl = open(args.output, 'w')
g_num = 0
d_num = 0
for rec in SeqIO.parse(ihdl, 'fasta'):
g_num += 1
try:
val = db.Get(rec.id)
except KeyError:
d_num += 1
ohdl.write("%s\n%s\n"%(rec.id, str(rec.seq).upper()))
ihdl.close()
ohdl.close()
if args.verbose:
print "Done: %d darkmatter genes found out of %d total" %(d_num, g_num)
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
Python
| 0.999999
|
@@ -1256,17 +1256,19 @@
rts%5B0%5D,
-1
+%22X%22
)%0A %0A
|
75b56a313dda12299f8283016a584e1d455567ee
|
remove ignored radius_units from glyphs.py example
|
examples/glyphs/glyphs.py
|
examples/glyphs/glyphs.py
|
import numpy as np
from bokeh.models import ColumnDataSource, DataRange1d, Plot, LinearAxis, Grid
from bokeh.models.widgets import VBox, Tabs, Panel
from bokeh.models.glyphs import (AnnularWedge, Annulus, Arc, Bezier, Circle, Line, MultiLine, Oval,
Patch, Patches, Quad, Quadratic, Ray, Rect, Segment, Square, Wedge, CircleX, Triangle,
Cross, Diamond, InvertedTriangle, SquareX, Asterisk, SquareCross, DiamondCross, CircleCross)
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
xpts = np.array([-.09, -.12, .0, .12, .09])
ypts = np.array([-.1, .02, .1, .02, -.1])
source = ColumnDataSource(dict(
x = x,
y = y,
sizes = sizes,
xs = [ xpts + xx for xx in x ],
ys = [ ypts + yy for yy in y ],
xp02 = x + 0.2,
xp01 = x + 0.1,
xm01 = x - 0.1,
yp01 = y + 0.1,
ym01 = y - 0.1,
))
xdr = DataRange1d(sources=[source.columns("x")])
ydr = DataRange1d(sources=[source.columns("y")])
def screen(value):
return dict(value=value, units="screen")
glyphs = [
("annular_wedge", AnnularWedge(x="x", y="y", inner_radius=screen(10), outer_radius=screen(20), start_angle=0.6, end_angle=4.1, fill_color="#8888ee")),
("annulus", Annulus(x="x", y="y", inner_radius=screen(10), outer_radius=screen(20), fill_color="#7FC97F")),
("arc", Arc(x="x", y="y", radius=screen(20), start_angle=0.6, end_angle=4.1, line_color="#BEAED4", line_width=3)),
("bezier", Bezier(x0="x", y0="y", x1="xp02", y1="y", cx0="xp01", cy0="yp01", cx1="xm01", cy1="ym01", line_color="#D95F02", line_width=2)),
("line", Line(x="x", y="y", line_color="#F46D43")),
("multi_line", MultiLine(xs="xs", ys="ys", line_color="#8073AC", line_width=2)),
("oval", Oval(x="x", y="y", width=screen(15), height=screen(25), angle=-0.7, fill_color="#1D91C0")),
("patch", Patch(x="x", y="y", fill_color="#A6CEE3")),
("patches", Patches(xs="xs", ys="ys", fill_color="#FB9A99")),
("quad", Quad(left="x", right="xm01", top="y", bottom="ym01", fill_color="#B3DE69")),
("quadratic", Quadratic(x0="x", y0="y", x1="xp02", y1="y", cx="xp01", cy="yp01", line_color="#4DAF4A", line_width=3)),
("ray", Ray(x="x", y="y", length=45, angle=-0.7, line_color="#FB8072", line_width=2)),
("rect", Rect(x="x", y="y", width=screen(10), height=screen(20), angle=-0.7, fill_color="#CAB2D6")),
("segment", Segment(x0="x", y0="y", x1="xm01", y1="ym01", line_color="#F4A582", line_width=3)),
("wedge", Wedge(x="x", y="y", radius=screen(15), start_angle=0.6, end_angle=4.1, fill_color="#B3DE69")),
]
markers = [
("circle", Circle(x="x", y="y", radius=0.1, radius_units="data", fill_color="#3288BD")),
("circle_x", CircleX(x="x", y="y", size="sizes", line_color="#DD1C77", fill_color=None)),
("circle_cross", CircleCross(x="x", y="y", size="sizes", line_color="#FB8072", fill_color=None, line_width=2)),
("square", Square(x="x", y="y", size="sizes", fill_color="#74ADD1")),
("square_x", SquareX(x="x", y="y", size="sizes", line_color="#FDAE6B", fill_color=None, line_width=2)),
("square_cross", SquareCross(x="x", y="y", size="sizes", line_color="#7FC97F", fill_color=None, line_width=2)),
("diamond", Diamond(x="x", y="y", size="sizes", line_color="#1C9099", line_width=2)),
("diamond_cross", DiamondCross(x="x", y="y", size="sizes", line_color="#386CB0", fill_color=None, line_width=2)),
("triangle", Triangle(x="x", y="y", size="sizes", line_color="#99D594", line_width=2)),
("inverted_triangle", InvertedTriangle(x="x", y="y", size="sizes", line_color="#DE2D26", line_width=2)),
("cross", Cross(x="x", y="y", size="sizes", line_color="#E6550D", fill_color=None, line_width=2)),
("asterisk", Asterisk(x="x", y="y", size="sizes", line_color="#F0027F", fill_color=None, line_width=2)),
]
def make_tab(title, glyph):
plot = Plot(title=title, x_range=xdr, y_range=ydr)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
tab = Panel(child=plot, title=title)
return tab
def make_tabs(objs):
return Tabs(tabs=[ make_tab(title, obj) for title, obj in objs ])
layout = VBox(children=[make_tabs(glyphs), make_tabs(markers)])
doc = Document()
doc.add(layout)
if __name__ == "__main__":
filename = "glyphs.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Glyphs"))
print("Wrote %s" % filename)
view(filename)
|
Python
| 0
|
@@ -2739,29 +2739,8 @@
0.1,
- radius_units=%22data%22,
fil
|
a8ad980a0e06550b4051b67c0834349bad54de5a
|
Remove wrong __all__ variable from api module.
|
pyssh/api.py
|
pyssh/api.py
|
# -*- coding: utf-8 -*-
import ctypes
import ctypes.util
__all__ = ['library']
def load_library():
libpath = ctypes.util.find_library('ssh')
libssh = ctypes.CDLL(libpath)
return libssh
SSH_OK = 0
SSH_ERROR = -1
SSH_AGAIN = -2
SSH_EOF = -127
SSH_OPTIONS_HOST = 0
SSH_OPTIONS_PORT = 1
SSH_OPTIONS_PORT_STR = 2
SSH_OPTIONS_FD = 3
SSH_OPTIONS_USER = 4
SSH_OPTIONS_SSH_DIR = 5
SSH_OPTIONS_IDENTITY = 6
# TODO...
SSH_AUTH_SUCCESS = 0
SSH_AUTH_DENIED = 1
SSH_AUTH_PARTIAL = 2
SSH_AUTH_INFO = 3
SSH_AUTH_AGAIN = 4
SSH_AUTH_ERROR = -1
class SftpAttributes(ctypes.Structure):
_fields_ = [("name", ctypes.c_char_p),
("longname", ctypes.c_char_p),
("flags", ctypes.c_uint32),
("type", ctypes.c_uint8),
("size", ctypes.c_uint64),]
try:
library = load_library()
library.ssh_new.argtypes = []
library.ssh_new.restype = ctypes.c_void_p
library.ssh_free.argtypes = [ctypes.c_void_p]
library.ssh_connect.argtypes = [ctypes.c_void_p]
library.ssh_connect.restype = ctypes.c_int
library.ssh_disconnect.argtypes = [ctypes.c_void_p]
library.ssh_options_set.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
library.ssh_userauth_password.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p]
library.ssh_userauth_password.restype = ctypes.c_int
library.ssh_userauth_autopubkey.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
library.ssh_userauth_autopubkey.restype = ctypes.c_int
library.ssh_channel_new.argtypes = [ctypes.c_void_p]
library.ssh_channel_new.restype = ctypes.c_void_p
library.ssh_channel_open_session.argtypes = [ctypes.c_void_p]
library.ssh_channel_open_session.restype = ctypes.c_int
library.ssh_channel_request_exec.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
library.ssh_channel_request_exec.restype = ctypes.c_int
library.ssh_channel_read.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint, ctypes.c_int]
library.ssh_channel_read.restype = ctypes.c_int
library.ssh_channel_read_nonblocking.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint, ctypes.c_int]
library.ssh_channel_read_nonblocking.restype = ctypes.c_int
library.ssh_channel_write.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_uint]
library.ssh_channel_write.restype = ctypes.c_int
library.ssh_channel_send_eof.argtypes = [ctypes.c_void_p]
library.ssh_channel_send_eof.restype = ctypes.c_int
library.ssh_channel_is_eof.argtypes = [ctypes.c_void_p]
library.ssh_channel_is_eof.restype = ctypes.c_int
library.ssh_channel_is_open.argtypes = [ctypes.c_void_p]
library.ssh_channel_is_open.restype = ctypes.c_int
library.ssh_channel_is_closed.argtypes = [ctypes.c_void_p]
library.ssh_channel_is_closed.restype = ctypes.c_int
library.ssh_channel_close.argtypes = [ctypes.c_void_p]
library.ssh_channel_close.restype = ctypes.c_int
library.ssh_channel_free.argtypes = [ctypes.c_void_p]
library.ssh_channel_get_exit_status.argtypes = [ctypes.c_void_p]
library.ssh_channel_get_exit_status.restype = ctypes.c_int
library.ssh_channel_request_env.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_char_p]
library.ssh_channel_request_env.restype = ctypes.c_int
library.ssh_channel_request_pty.argtypes = [ctypes.c_void_p]
library.ssh_channel_request_pty.restype = ctypes.c_int
library.ssh_channel_request_pty_size.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
library.ssh_channel_request_pty_size.restype = ctypes.c_int
library.ssh_channel_request_shell.argtypes = [ctypes.c_void_p]
library.ssh_channel_request_shell.restype = ctypes.c_int
library.ssh_get_error.argtypes = [ctypes.c_void_p]
library.ssh_get_error.restype = ctypes.c_char_p
# SFTP
library.sftp_new.argtypes = [ctypes.c_void_p]
library.sftp_new.restype = ctypes.c_void_p
library.sftp_init.argtypes = [ctypes.c_void_p]
library.sftp_init.restype = None
library.sftp_free.argtypes = [ctypes.c_void_p]
library.sftp_fstat.argtypes = [ctypes.c_void_p]
library.sftp_fstat.restype = SftpAttributes
library.sftp_fstat.restype = ctypes.c_void_p
library.sftp_open.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int, ctypes.c_int]
library.sftp_open.restype = ctypes.c_void_p
library.sftp_close.argtypes = [ctypes.c_void_p]
library.sftp_write.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_uint]
library.sftp_write.restype = ctypes.c_int
library.sftp_seek64.argtypes = [ctypes.c_void_p, ctypes.c_ulonglong]
library.sftp_seek64.restype = ctypes.c_int
library.sftp_tell64.argtypes = [ctypes.c_void_p]
library.sftp_tell64.restype = ctypes.c_ulonglong
library.sftp_read.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint]
library.sftp_read.restype = ctypes.c_int
except AttributeError:
raise ImportError('ssh shared library not found or incompatible')
except (OSError, IOError):
raise ImportError('ssh shared library not found.\n'
'you probably had not installed libssh library.\n')
|
Python
| 0
|
@@ -56,31 +56,8 @@
il%0A%0A
-__all__ = %5B'library'%5D%0A%0A
%0Adef
|
929ce25d9097fa29766575bbd354aeb5633b20a4
|
rename some methods that got missed
|
dnstest_checks.py
|
dnstest_checks.py
|
"""
Logic for checking desired DNS configuration against actual configuration
on prod and test servers.
"""
from dnstest_dns import DNStestDNS
class DNStestChecks:
config = None
DNS = None
def __init__(self, config):
"""
init
"""
self.config = config
self.DNS = DNStestDNS()
def check_removed_name(self, n):
"""
Run tests for removed names
"""
name = n
# make sure we have a FQDN
if name.find('.') == -1:
name = name + self.config.default_domain
# resolve with both test and prod
qt = self.DNS.resolve_name(name, self.config.server_test)
qp = self.DNS.resolve_name(name, self.config.server_prod)
if 'status' in qp:
print "NG: %s got status %s from PROD - cannot remove a name that doesn't exist (PROD)" % (n, qp['status'])
return False
# else we got an answer, it's there, just look for removal
if 'status' in qt and qt['status'] == "NXDOMAIN":
print "OK: %s removed, got status NXDOMAIN (TEST)" % (n)
print "\tPROD value was %s (PROD)" % qp['answer']['data']
# check for any leftover reverse lookups
rev = self.DNS.lookup_reverse(qp['answer']['data'], self.config.server_test)
if 'answer' in rev:
print "WARNING: %s appears to still have reverse DNS set to %s (TEST)" % (n, rev['answer']['data'])
elif 'status' in qt:
print "ERROR: %s returned status %s (TEST)" % (n, qt['status'])
else:
print "NG: %s returned valid answer, not removed (TEST)" % n
return True
def check_changed_names(self, n, newn):
"""
Run tests for changed names
"""
name = n
newname = newn
# make sure we have a FQDN
if name.find('.') == -1:
name = name + self.config.default_domain
if newname.find('.') == -1:
newname = newname + self.config.default_domain
# resolve with both test and prod
qt = self.DNS.resolve_name(newname, self.config.server_test)
qp = self.DNS.resolve_name(name, self.config.server_prod)
if 'status' in qp:
print "NG: %s got status %s from PROD - cannot change a name that doesn't exist (PROD)" % (n, qp['status'])
return False
# else we got an answer, it's there, check that it's right
if 'status' in qt:
print "NG: %s got status %s (TEST)" % (newn, qt['status'])
return False
# got valid answers for both, check them
if qt['answer']['data'] != qp['answer']['data']:
print "NG: %s => %s rename is bad, resolves to %s in TEST and %s in PROD" % (n, newn, qt['answer']['data'], qp['answer']['data'])
else:
# data matches, looks good
print "OK: rename %s => %s (TEST)" % (n, newn)
# check for any leftover reverse lookups
if qt['answer']['typename'] == 'A' or qp['answer']['typename'] == 'A':
rev = self.DNS.lookup_reverse(qt['answer']['data'], self.config.server_test)
if 'answer' in rev:
if rev['answer']['data'] == newn or rev['answer']['data'] == newname:
print "\tok, reverse DNS is set correctly for %s (TEST)" % qt['answer']['data']
else:
print "\tWARNING: %s appears to still have reverse DNS set to %s (TEST)" % (n, rev['answer']['data'])
else:
print "\tWARNING: no reverse DNS appears to be set for %s (TEST)" % qt['answer']['data']
return True
def check_added_names(self, n, value):
"""
Run tests for added names
"""
name = n
# make sure we have a FQDN
if name.find('.') == -1:
name = name + self.config.default_domain
target = value
if target.find('.') == -1:
target = target + self.config.default_domain
# resolve with both test and prod
qt = self.DNS.resolve_name(name, self.config.server_test)
qp = self.DNS.resolve_name(name, self.config.server_prod)
# make sure PROD returns NXDOMAIN, since it's a new record
if 'status' in qp:
if qp['status'] != 'NXDOMAIN':
print "NG: prod server returned status %s for name %s (PROD)" % (qp['status'], n)
return False
else:
print "NG: new name %s returned valid result from prod server (PROD)" % n
return False
# check the answer we got back from TEST
if 'answer' in qt:
if qt['answer']['data'] == value or qt['answer']['data'] == target:
print "OK: %s => %s (TEST)" % (n, value)
print "\tPROD server returns NXDOMAIN for %s (PROD)" % n
else:
print "NG: %s resolves to %s instead of %s (TEST)" % (n, qt['answer']['data'], value)
print "\tPROD server returns NXDOMAIN for %s (PROD)" % n
# check reverse DNS if we say to
if self.config.have_reverse_dns and qt['answer']['typename'] == 'A':
rev = self.DNS.lookup_reverse(value, self.config.server_test)
if 'status' in rev:
print "\tREVERSE NG: got status %s for name %s (TEST)" % (rev['status'], value)
elif rev['answer']['data'] == n or rev['answer']['data'] == name:
print "\tREVERSE OK: %s => %s (TEST)" % (value, rev['answer']['data'])
else:
print "REVERSE NG: got answer %s for name %s (TEST)" % (rev['answer']['data'], value)
else:
print "NG: status %s for name %s (TEST)" % (qt['status'], n)
return True
|
Python
| 0.000001
|
@@ -1701,25 +1701,24 @@
changed_name
-s
(self, n, ne
@@ -3728,17 +3728,16 @@
ded_name
-s
(self, n
|
b49f22af96644daa67c8d75881f59edd98b652b0
|
Fix import broken by s/html/serialization
|
kraken/transcrib.py
|
kraken/transcrib.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
# -*- coding: utf-8 -*-
"""
Utility functions for ground truth transcription.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
from kraken.html import max_bbox
from kraken.lib.exceptions import KrakenInputException
from jinja2 import Environment, PackageLoader
from itertools import izip_longest
from StringIO import StringIO
import regex
import base64
import os
class TranscriptionInterface(object):
def __init__(self, font=None, font_style=None):
env = Environment(loader=PackageLoader('kraken', 'templates'))
self.tmpl = env.get_template('layout.html')
self.pages = []
self.font = {'font': font, 'style': font_style}
self.page_idx = 1
self.line_idx = 1
self.seg_idx = 1
def add_page(self, im, segmentation=None, records=None):
"""
Adds an image to the transcription interface, optionally filling in
information from a list of ocr_record objects.
Args:
im (PIL.Image): Input image
records (list): A list of ocr_record objects.
"""
page = {}
fd = StringIO()
im.save(fd, format='png')
page['index'] = self.page_idx
self.page_idx += 1
page['img'] = 'data:image/png;base64,' + base64.b64encode(fd.getvalue())
page['lines'] = []
if records:
for record in records:
splits = regex.split(u'(\s+)', record.prediction)
bbox = max_bbox(record.cuts)
line_offset = 0
segments = []
for segment, whitespace in izip_longest(splits[0::2], splits[1::2]):
if len(segment):
seg_bbox = max_bbox(record.cuts[line_offset:line_offset + len(segment)])
segments.append({'bbox': '{}, {}. {}, {}'.format(*seg_bbox), 'text': segment, 'index': self.seg_idx})
self.seg_idx += 1
line_offset += len(segment)
if whitespace:
line_offset += len(whitespace)
page['lines'].append({'index': self.line_idx, 'recognition': segments,
'bbox': '{}, {}, {}, {}'.format(int(bbox[0]),
int(bbox[1]),
int(bbox[2]),
int(bbox[3]))})
self.line_idx += 1
elif segmentation:
for bbox in segmentation:
page['lines'].append({'index': self.line_idx, 'bbox': '{}, {}, {}, {}'.format(int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))})
self.line_idx += 1
else:
raise KrakenInputException('Neither segmentations nor records given')
self.pages.append(page)
def write(self, fd):
"""
Writes the HTML file to a file descriptor.
Args:
fd (File): File descriptor to write to.
"""
fd.write(self.tmpl.render(pages=self.pages, font=self.font).encode('utf-8'))
|
Python
| 0.000001
|
@@ -802,20 +802,29 @@
kraken.
-html
+serialization
import
|
19d5d98350c8ef6f8e3d9153a899a6ce466e5e21
|
Rename `UserOwnedModelManager` to just `UserOwnedManager` for consistency with Django naming convention. Replace `_for_user` methods with methods that override the base manager methods - this should help enforce a user context for models, and implement initial set of method overrides in this manner.
|
owned_models/models.py
|
owned_models/models.py
|
from django.conf import settings
from django.db import models
class UserOwnedModelManager(models.Manager):
def filter_for_user(self, user, *args, **kwargs):
return super(UserOwnedModelManager, self).get_queryset().filter(user = user, *args, **kwargs)
def get_for_user(self, user, *args, **kwargs):
if 'user' in kwargs:
kwargs.pop('user')
return super(UserOwnedModelManager, self).get_queryset().get(user = user, *args, **kwargs)
def get_or_create_for_user(self, user, **kwargs):
return super(UserOwnedModelManager, self).get_or_create(user = user, **kwargs)
class UserOwnedModel(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, editable = False)
objects = UserOwnedModelManager()
class Meta:
abstract = True
|
Python
| 0
|
@@ -77,61 +77,519 @@
nedM
+anager(m
odel
+s.
Manager
-(models.Manager):%0A%0A def filter_for_user
+):%0A %22%22%22%0A Wraps standard Manager query methods and adds a required %60user%60 argument, to enforce all calls%0A made through this manager to be made within a user context.%0A %22%22%22%0A%0A def all(self, user):%0A return super(UserOwnedManager, self).filter(user = user)%0A%0A def filter(self, user, **kwargs):%0A return super(UserOwnedManager, self).filter(user = user, **kwargs)%0A%0A def exclude(self, user, **kwargs):%0A return self.filter(user).exclude(**kwargs)%0A%0A def get
(sel
@@ -638,37 +638,32 @@
super(UserOwnedM
-odelM
anager, self).ge
@@ -667,26 +667,8 @@
.get
-_queryset().filter
(use
@@ -674,32 +674,146 @@
er = user, *args
+, **kwargs)%0A%0A def create(self, user, **kwargs):%0A return super(UserOwnedManager, self).create(user = user
, **kwargs)%0A%0A
@@ -821,24 +821,25 @@
def get_
-f
or_
-user
+create
(self, u
@@ -835,37 +835,47 @@
ate(self, user,
-*args
+defaults = None
, **kwargs):%0A
@@ -886,56 +886,83 @@
if
-'user' in kwargs:%0A kwargs.pop('user')
+defaults is None:%0A defaults = %7B%7D%0A defaults%5B'user'%5D = user
%0A
@@ -981,37 +981,32 @@
super(UserOwnedM
-odelM
anager, self).ge
@@ -1011,40 +1011,49 @@
get_
-queryset().get(user = user, *arg
+or_create(user = user, defaults = default
s, *
@@ -1062,35 +1062,38 @@
wargs)%0A%0A def
-get
+update
_or_create_for_u
@@ -1090,17 +1090,8 @@
eate
-_for_user
(sel
@@ -1090,32 +1090,49 @@
eate(self, user,
+ defaults = None,
**kwargs):%0A
@@ -1119,32 +1119,119 @@
one, **kwargs):%0A
+ if defaults is None:%0A defaults = %7B%7D%0A defaults%5B'user'%5D = user%0A
return s
@@ -1241,29 +1241,24 @@
r(UserOwnedM
-odelM
anager, self
@@ -1251,35 +1251,38 @@
dManager, self).
-get
+update
_or_create(user
@@ -1280,32 +1280,53 @@
ate(user = user,
+ defaults = defaults,
**kwargs)%0A%0Aclas
@@ -1321,16 +1321,17 @@
wargs)%0A%0A
+%0A
class Us
@@ -1358,16 +1358,85 @@
Model):%0A
+ %22%22%22%0A Base class for models that are owned by a user.%0A %22%22%22%0A%0A
user
@@ -1525,20 +1525,50 @@
erOwnedM
+anager()%0A all_objects = m
odel
+s.
Manager(
@@ -1609,8 +1609,9 @@
t = True
+%0A
|
aa6b1daedbd911c23857033bcc601bdae37627f0
|
Fix the Stream wrapping class. It had moved from elsewhere, but wasn't corrected for its new home in util.py
|
subversion/bindings/swig/python/svn/util.py
|
subversion/bindings/swig/python/svn/util.py
|
#
# svn.util: public Python interface for miscellaneous bindings
#
# Subversion is a tool for revision control.
# See http://subversion.tigris.org for more information.
#
# ====================================================================
# Copyright (c) 2000-2001 CollabNet. All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://subversion.tigris.org/license-1.html.
# If newer versions of this license are posted there, you may use a
# newer version instead, at your option.
#
######################################################################
#
# bring all the symbols up into this module
### in the future, we may want to limit this, rename things, etc
from _util import *
def run_app(func, *args, **kw):
'''Run a function as an "APR application".
APR is initialized, and an application pool is created. Cleanup is
performed as the function exits (normally or via an exception.
'''
apr_initialize()
try:
pool = svn_pool_create(None)
try:
return apply(func, (pool,) + args, kw)
finally:
svn_pool_destroy(pool)
finally:
apr_terminate()
# some minor patchups
svn_pool_destroy = apr_pool_destroy
class Stream:
def __init__(self, stream):
self._stream = stream
def read(self, amt=None):
if amt is None:
# read the rest of the stream
chunks = [ ]
while 1:
data = util.svn_stream_read(self._stream, util.SVN_STREAM_CHUNK_SIZE)
if not data:
break
chunks.append(data)
return string.join(chunks, '')
# read the amount specified
return util.svn_stream_read(self._stream, int(amt))
def write(self, buf):
### what to do with the amount written? (the result value)
util.svn_stream_write(self._stream, buf)
|
Python
| 0.999587
|
@@ -688,16 +688,129 @@
####%0A#%0A%0A
+# to retain backwards Python compat, we don't use 'import foo as bar'%0Aimport string%0A_string = string%0Adel string%0A%0A
# bring
@@ -1600,21 +1600,16 @@
data =
-util.
svn_stre
@@ -1630,21 +1630,16 @@
stream,
-util.
SVN_STRE
@@ -1731,16 +1731,17 @@
return
+_
string.j
@@ -1800,21 +1800,16 @@
return
-util.
svn_stre
@@ -1936,13 +1936,8 @@
-util.
svn_
|
b41ad87d9cb941abce185d367c973e0a3f2802cb
|
Update flaskext sqlalchemy module import.
|
pystil/db.py
|
pystil/db.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 by Florian Mounier, Kozea
# This file is part of pystil, licensed under a 3-clause BSD license.
from flaskext.sqlalchemy import SQLAlchemy
from datetime import timedelta
from sqlalchemy import func, desc
from sqlalchemy.orm import column_property
from sqlalchemy.sql.expression import case
from . import patchpsycopg
db = SQLAlchemy()
count = func.count
sum_ = func.sum
distinct = func.distinct
date_part = func.date_part
date_trunc = func.date_trunc
split_part = func.split_part
strpos = func.strpos
substr = func.substr
length = func.length
array_agg = func.array_agg
def string(pkey=False):
return db.Column(db.String, primary_key=pkey)
def integer(pkey=False):
return db.Column(db.Integer, primary_key=pkey)
def decimal():
return db.Column(db.Numeric)
def datetime():
return db.Column(db.DateTime)
def date(pkey=False):
return db.Column(db.Date, primary_key=pkey)
def fields(clazz):
return [field
for field in clazz.__dict__
if not field.startswith("_")]
class Visit(db.Model):
"""This mapped class contains the visits"""
id = integer(pkey=True)
uuid = string()
browser_name = string()
hash = string()
host = string()
browser_version = string()
client_tz_offset = integer()
date = datetime()
last_visit = datetime()
ip = string()
language = string()
page = string()
platform = string()
query_string = db.Column('query', db.String)
referrer = string()
pretty_referrer = string()
referrer_domain = string()
site = string()
size = string()
time = db.Column(db.Interval)
country = string()
country_code = string()
city = string()
lat = decimal()
lng = decimal()
browser_name_version = column_property(
browser_name + ' ' + split_part(browser_version, '.', 1) +
case([
(browser_name.in_(['opera', 'safari', 'chrome']), '')],
else_='.' + split_part(browser_version, '.', 2)
))
day = column_property(
date_trunc('day', date))
hour = column_property(
date_part('hour', date))
spent_time = column_property(
case([
(time == None, None),
(time < timedelta(seconds=1), 0),
(time < timedelta(seconds=2), 1),
(time < timedelta(seconds=5), 2),
(time < timedelta(seconds=10), 3),
(time < timedelta(seconds=20), 4),
(time < timedelta(seconds=30), 5),
(time < timedelta(seconds=60), 6),
(time < timedelta(seconds=120), 7),
(time < timedelta(seconds=300), 8),
(time < timedelta(seconds=600), 9)
], else_=10))
subdomain = column_property(
case([
(split_part(host, '.', 3) != '', split_part(host, '.', 1))
], else_=None))
domain = column_property(
case([
(split_part(host, '.', 3) == '', host),
], else_=substr(host,
strpos(host, '.') + 1,
length(host) - strpos(host, '.') + 1)))
class Keys(db.Model):
"""This mapped lass contains the auth keys"""
id = integer(pkey=True)
key = string()
host = string()
|
Python
| 0
|
@@ -170,12 +170,9 @@
lask
-ext.
+_
sqla
|
893e5986fa3c14d28abd729f4dc4cc0d4107a850
|
fix test_get_result
|
tests/test_agent/test_http_api_state.py
|
tests/test_agent/test_http_api_state.py
|
# No shebang line, this module is meant to be imported
#
# Copyright 2014 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from contextlib import nested
from datetime import datetime, timedelta
from json import loads, dumps
try:
from httplib import ACCEPTED, OK, BAD_REQUEST
except ImportError: # pragma: no cover
from http.client import ACCEPTED, OK, BAD_REQUEST
import mock
import psutil
from twisted.web.server import NOT_DONE_YET
from twisted.internet import reactor
from pyfarm.core.enums import AgentState
from pyfarm.agent.config import config
from pyfarm.agent.http.api.state import Stop, Status
from pyfarm.agent.sysinfo import memory
from pyfarm.agent.testutil import BaseAPITestCase
from pyfarm.agent.utility import total_seconds
class TestStop(BaseAPITestCase):
URI = "/stop"
CLASS = Stop
def prepare_config(self):
super(TestStop, self).prepare_config()
config.update(run_control_file="/tmp/pyfarm/agent/should_be_running")
def test_invalid_type_for_data(self):
request = self.post(
data={"foo": 1},)
stop = Stop()
result = stop.render(request)
self.assertEqual(result, NOT_DONE_YET)
self.assertTrue(request.finished)
self.assertEqual(request.responseCode, BAD_REQUEST)
self.assertEqual(len(request.written), 1)
self.assertIn(
"Failed to validate the request data against the schema",
loads(request.written[0])["error"])
def test_stops_agent(self):
self.patch(reactor, 'stop', mock.Mock())
request = self.post(data={})
stop = Stop()
result = stop.render(request)
self.assertEqual(result, NOT_DONE_YET)
self.assertTrue(request.finished)
self.assertEqual(request.responseCode, ACCEPTED)
self.assertTrue(self.agent.stopped)
return self.agent.stopped
def test_stops_and_waits_for_agent(self):
self.patch(reactor, 'stop', mock.Mock())
request = self.post(data={"wait": True})
stop = Stop()
result = stop.render(request)
self.assertEqual(result, NOT_DONE_YET)
self.assertTrue(request.finished)
self.assertEqual(request.responseCode, OK)
self.assertTrue(self.agent.stopped)
return self.agent.stopped
class TestStatus(BaseAPITestCase):
URI = "/status"
CLASS = Status
def setUp(self):
super(TestStatus, self).setUp()
self._config = config.copy()
def tearDown(self):
super(TestStatus, self).tearDown()
config.update(self._config)
def prepare_config(self):
super(TestStatus, self).prepare_config()
config.update(
state=AgentState.ONLINE, pids=[1, 2, 3])
def test_get_requires_no_input(self):
request = self.get()
status = Status()
result = status.render(request)
self.assertEqual(result, NOT_DONE_YET)
self.assertTrue(request.finished)
self.assertEqual(request.responseCode, OK)
self.assertEqual(len(request.written), 1)
self.assertIsInstance(loads(request.written[0]), dict)
def test_get_result(self):
process = psutil.Process()
direct_child_processes = len(process.children(recursive=False))
all_child_processes = len(process.children(recursive=True))
grandchild_processes = all_child_processes - direct_child_processes
# Determine the last time we talked to the master (if ever)
contacted = config.master_contacted(update=False)
if isinstance(contacted, datetime):
contacted = datetime.utcnow() - contacted
# Determine the last time we announced ourselves to the
# master (if ever)
last_announce = config.get("last_announce", None)
if isinstance(last_announce, datetime):
last_announce = datetime.utcnow() - last_announce
future_time = config["start"] + 30
expected_data = {
"state": config["state"],
"agent_hostname": config["agent_hostname"],
"agent_process_ram": memory.process_memory(),
"consumed_ram": memory.total_consumption(),
"child_processes": direct_child_processes,
"grandchild_processes": grandchild_processes,
"pids": config["pids"],
"agent_id": str(config["agent_id"]),
"last_master_contact": contacted,
"last_announce": last_announce,
"agent_lock_file": config["agent_lock_file"],
"free_ram": 4242,
"uptime": total_seconds(
timedelta(seconds=future_time - config["start"])),
"jobs": list(config["jobtypes"].keys())}
request = self.get()
status = Status()
with nested(
mock.patch.object(memory, "free_ram", return_value=4242),
mock.patch.object(time, "time", return_value=future_time)
):
response = status.render(request)
self.assertEqual(response, NOT_DONE_YET)
self.assertTrue(request.finished)
self.assertEqual(request.responseCode, OK)
self.assertEqual(len(request.written), 1)
self.assertEqual(loads(request.written[0]), expected_data)
|
Python
| 0.000026
|
@@ -5718,16 +5718,35 @@
rtEqual(
+%0A dumps(
loads(re
@@ -5764,23 +5764,75 @@
en%5B0%5D),
-expected_data
+sort_keys=True),%0A dumps(expected_data, sort_keys=True)
)%0A
|
0606c70c29cec7025e322726b1559b7a3f4360f3
|
Remove resolution filter from Easynews search
|
stagehand/searchers/easynews.py
|
stagehand/searchers/easynews.py
|
import os
import urllib
import logging
import re
import asyncio
from bs4 import BeautifulSoup
from ..config import config
from ..toolbox import dateutils
from ..toolbox.net import download
from .base import SearcherBase, SearchResult, SearcherError
from .easynews_config import config as modconfig
__all__ = ['Searcher', 'modconfig']
log = logging.getLogger('stagehand.searchers.easynews')
class Searcher(SearcherBase):
NAME = 'easynews'
PRINTABLE_NAME = 'Easynews Global Search'
TYPE = 'http'
DEFAULT_URL_GLOBAL5 = 'https://secure.members.easynews.com/global5/index.html?gps={keywords}&sbj={subject}&from=&ns=&fil=&fex=&vc=&ac=&s1=nsubject&s1d=%2B&s2=nrfile&s2d=%2B&s3=dsize&s3d=%2B&pby=500&u=1&svL=&d1={date}&d1t=&d2=&d2t=&b1={size}&b1t=&b2=&b2t=&px1={res}&px1t=&px2=&px2t=&fps1=&fps1t=&fps2=&fps2t=&bps1=&bps1t=&bps2=&bps2t=&hz1=&hz1t=&hz2=&hz2t=&rn1=&rn1t=&rn2=&rn2t=&fly=2&pno=1&sS=5'
@asyncio.coroutine
def _search_global5(self, title, codes, size, date, res):
if not modconfig.username or not modconfig.password:
raise ValueError('Configuration lacks username and/or password')
if 0 and os.path.exists('result.rss'):
print('Using cached result.rss')
return file('result.rss').read()
url = modconfig.url or Searcher.DEFAULT_URL_GLOBAL5
url = url.format(keywords=urllib.parse.quote_plus(title), subject=codes,
date=urllib.parse.quote_plus(date), size=size, res=res)
status, rss = yield from download(url, retry=modconfig.retries,
auth=(modconfig.username, modconfig.password))
if status != 200:
# TODO: handle status codes like 401 (unauth)
raise SearcherError('HTTP status not ok (%d)' % status)
#file('result.rss', 'w').write(rss)
return rss
@asyncio.coroutine
def _search(self, series, episodes, date, min_size, quality):
title = series.cfg.search_string or series.name
# Strip problem characters from the title, and substitute alternative apostrophe
title = self.clean_title(title, apostrophe=Searcher.CLEAN_APOSTROPHE_REGEXP)
size = '%dM' % (min_size / 1048576) if min_size else '100M'
res = '1x540' if quality == 'HD' else ''
results = []
for i in range(0, len(episodes), 10):
batch = episodes[i:i+10]
codelist = [code for episode in batch \
for code in self._get_episode_codes_regexp_list([episode])]
codes = '|'.join(codelist)
log.debug('searching for %d episodes, minimum size %s and res %s, keywords=%s subject=%s',
len(batch), size, res or 'any', title, codes)
rss = yield from self._search_global5(title, codes, size, date or '', res)
soup = BeautifulSoup(rss, 'html.parser')
for item in soup.find_all('item'):
result = SearchResult(self)
result.filename = urllib.parse.unquote(os.path.split(item.enclosure['url'])[-1])
result.size = self._parse_hsize(item.enclosure['length'])
result.date = dateutils.from_rfc822(item.pubdate.contents[0])
result.subject = ''.join(item.title.contents)
result.url = item.enclosure['url']
log.debug('result: %s', result)
# TODO: parse out newsgroup
results.append(result)
return {None: results}
@asyncio.coroutine
def _get_retriever_data(self, search_result):
return {
'url': search_result.url,
'username': modconfig.username,
'password': modconfig.password,
'retry': modconfig.retries
}
def _check_results_equal(self, a, b):
try:
# Easynews URLs contain hashes of the file, which is a convenient
# value to compare, because it means that even different URLs can
# end up being the same file.
a_hash = re.search(r'/([0-9a-f]{32,})', a.url).group(1)
b_hash = re.search(r'/([0-9a-f]{32,})', b.url).group(1)
return a_hash == b_hash
except AttributeError:
# Wasn't able to find hash in URL, so compare the URLs directly.
return a.url == b.url
def enable(manager):
"""
Called by the web interface when the plugin is enabled where it was
previously disabled.
"""
# http retriever is always enabled, so no special action is needed
# when the easynews searcher is enabled.
pass
def get_config_template(manager):
return os.path.join(manager.paths.data, 'web', 'settings', 'easynews.tmpl')
|
Python
| 0
|
@@ -2260,24 +2260,103 @@
00M'%0A
+ # XXX: easynews doesn't support HEVC so remove resolution filtering.%0A #
res = '1x54
@@ -2384,16 +2384,33 @@
else ''
+%0A res = ''
%0A%0A
@@ -4826,8 +4826,9 @@
s.tmpl')
+%0A
|
be10731cab38445a3d1c3a6df3703fba3fecc93f
|
Fix accessing argv
|
examples/move-by-label.py
|
examples/move-by-label.py
|
#!/usr/bin/env python
"""
Example script to move torrents based on their label set in ruTorrent.
./move-by-label.py USERNAME HOSTNAME [PATH]
"""
from __future__ import print_function
from time import sleep
import sys
from xirvik.client import ruTorrentClient
USERNAME = sys.argv[1]
HOST = sys.arg[2]
try:
PATH = sys.argv[3]
except IndexError:
PATH = ''
PREFIX = '/torrents/{}/{}'.format(USERNAME, PATH)
if __name__ == '__main__':
client = ruTorrentClient(HOST)
count = 0
for hash, info in client.list_torrents_dict().iteritems():
name = info['name'].encode('utf-8')
label = info['custom1']
move_to = '{}/{}'.format(PREFIX, label.lower())
# Ignore torrents that are hash checking, not finished hash checking,
# not complete or that are already moved
if (info['is_hash_checking'] or
not info['is_hash_checked'] or
info['left_bytes'] > 0 or
info['base_path'].startswith(move_to)):
continue
print('Moving {} to {}/'.format(name, move_to.encode('utf-8'), name))
client.move_torrent(hash, move_to)
# Sometimes the web server cannot handle so many requests, so only
# send 10 at a time
count += 1
if count and (count % 10) == 0:
sleep(10)
|
Python
| 0.000006
|
@@ -292,16 +292,17 @@
sys.arg
+v
%5B2%5D%0Atry:
|
b2fd88546c73e4aadb0e697233a7bfd20398e429
|
Check the PBS_NUM_NODES environment variable first when auto_npar = True.
|
sshcustodian/vasp/sshjobs.py
|
sshcustodian/vasp/sshjobs.py
|
# File: sshcustodian/vasp/sshjobs.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
"""
"""
import os
import shutil
import math
from pymatgen.io.vasp import Incar
from pymatgen.io.smart import read_structure
from pymatgen.io.vasp.sets import MPVaspInputSet
from custodian.vasp.interpreter import VaspModder
from custodian.custodian.vasp import VaspJob
VASP_INPUT_FILES = {"INCAR", "POSCAR", "POTCAR", "KPOINTS"}
VASP_OUTPUT_FILES = ['DOSCAR', 'INCAR', 'KPOINTS', 'POSCAR', 'PROCAR',
'vasprun.xml', 'CHGCAR', 'CHG', 'EIGENVAL', 'OSZICAR',
'WAVECAR', 'CONTCAR', 'IBZKPT', 'OUTCAR']
class SSHVaspJob(VaspJob):
"""
"""
def __init__(self, vasp_cmd, output_file="vasp.out", suffix="",
final=True, backup=True,
default_vasp_input_set=MPVaspInputSet(), auto_npar=True,
auto_gamma=True, settings_override=None,
gamma_vasp_cmd=None, copy_magmom=False):
"""
"""
super(SSHVaspJob, self).__init__(vasp_cmd, output_file, suffix, final,
backup, default_vasp_input_set,
auto_npar, auto_gamma,
settings_override, gamma_vasp_cmd,
copy_magmom)
def setup(self):
"""
"""
files = os.listdir(".")
num_structures = 0
if not set(files).issuperset(VASP_INPUT_FILES):
for f in files:
try:
struct = read_structure(f)
num_structures += 1
except:
pass
if num_structures != 1:
raise RuntimeError("{} structures found. Unable to continue."
.format(num_structures))
else:
self.default_vis.write_input(struct, ".")
if self.backup:
for f in VASP_INPUT_FILES:
shutil.copy(f, "{}.orig".format(f))
if self.auto_npar:
try:
incar = Incar.from_file("INCAR")
#Only optimized NPAR for non-HF and non-RPA calculations.
if not (incar.get("LHFCALC") or incar.get("LRPA") or
incar.get("LEPSILON")):
if incar.get("IBRION") in [5, 6, 7, 8]:
# NPAR should not be set for Hessian matrix
# calculations, whether in DFPT or otherwise.
del incar["NPAR"]
else:
import multiprocessing
# try sge environment variable first
# (since multiprocessing counts cores on the current machine only)
ncores = os.environ.get('NSLOTS') or multiprocessing.cpu_count()
ncores = int(ncores)
for npar in range(int(math.sqrt(ncores)),
ncores):
if ncores % npar == 0:
incar["NPAR"] = npar
break
incar.write_file("INCAR")
except:
pass
if self.settings_override is not None:
VaspModder().apply_actions(self.settings_override)
|
Python
| 0
|
@@ -2710,11 +2710,11 @@
try
-sge
+pbs
env
@@ -2767,133 +2767,532 @@
#
-(since multiprocessing counts cores on the current machine only)%0A ncores = os.environ.get('NSLOTS') or
+try sge environment variable second%0A # Note!%0A # multiprocessing.cpu_count() will include hyperthreads%0A # in the CPU count, which will set NPAR to be too large%0A # and can cause the job to hang if you use compute%0A # nodes with scratch partitions.%0A ncores = (os.environ.get(%22PBS_NUM_NODES%22) or%0A os.environ.get('NSLOTS') or%0A
mul
@@ -3315,16 +3315,17 @@
_count()
+)
%0A
|
44804828d9c0e3afa08f28e5e90e3b3674adb9e5
|
fix namespace issues with mdps dict
|
tests/test_sim_setup/test_simulation.py
|
tests/test_sim_setup/test_simulation.py
|
"""This contains a set of tests for paratemp.sim_setup.Simulation"""
########################################################################
# #
# This script was written by Thomas Heavey in 2018. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2017-18 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import
import pathlib
import pytest
from paratemp.tools import cd
@pytest.mark.xfail
class TestSimulation(object):
def test_runs(self, pt_blank_dir):
from paratemp.sim_setup import Simulation
gro = pt_blank_dir / 'PT-out0.gro'
top = pt_blank_dir / 'spc-and-methanol.top'
sim = Simulation(name='test_sim',
gro=gro, top=top, base_folder=pt_blank_dir)
assert isinstance(sim, Simulation)
@pytest.fixture
def mdps(self):
min_mdp = 'examples/sample-mdps/minim.mdp'
equil_mdp = 'examples/sample-mdps/equil.mdp'
prod_mdp = 'examples/sample-mdps/prod.mdp'
mdps = dict(minimize=min_mdp, equilibrate=equil_mdp,
production=prod_mdp)
return mdps
@pytest.fixture
def sim_with_dir(self, pt_blank_dir, mdps):
from paratemp.sim_setup import Simulation
gro = pt_blank_dir / 'PT-out0.gro'
top = pt_blank_dir / 'spc-and-methanol.top'
sim = Simulation(name='sim_fixture',
gro=gro, top=top, base_folder=pt_blank_dir,
mdps=mdps)
return sim, pt_blank_dir
@pytest.fixture
def sim(self, sim_with_dir):
return sim_with_dir[0]
attrs = {'name': str,
'top': pathlib.Path,
'base_folder': pathlib.Path,
'mdps': dict,
'tprs': dict,
'deffnms': dict,
'outputs': dict,
'geometries': dict,
'folders': dict,
}
def test_attrs_exist_and_type(self, sim):
for attr in self.attrs:
assert hasattr(sim, attr)
dtype = self.attrs[attr]
assert isinstance(getattr(sim, attr), dtype)
def test_methods_exist_and_callable(self, sim, mdps):
for step in mdps:
assert hasattr(sim, step)
assert callable(getattr(sim, step))
def test_fp(self, sim):
sample_file = 'tests/__init__.py'
fp: pathlib.Path = sim._fp(sample_file)
assert fp.exists()
assert fp.is_absolute()
assert fp.is_file()
assert fp.samefile(sample_file)
def test_last_geom(self, sim):
gro: pathlib.Path = sim.last_geometry
assert isinstance(gro, pathlib.Path)
assert gro.suffix == '.gro'
assert gro.is_absolute()
assert gro.is_file()
def test_compile_tpr(self, sim_with_dir):
sim, path = sim_with_dir
step = 'minimize'
min_path: pathlib.Path = path / step
min_path.mkdir()
with cd(min_path):
tpr = sim._compile_tpr(step_name=step)
mdout = pathlib.Path('mdout.mdp').resolve()
assert isinstance(tpr, pathlib.Path)
assert tpr.exists()
assert tpr.is_absolute()
assert tpr.is_file()
assert tpr.suffix == '.tpr'
assert mdout.exists()
d_tpr = sim.tprs[step]
assert tpr.samefile(d_tpr)
assert isinstance(sim.outputs['compile_{}'.format(step)], str)
@pytest.fixture
def sim_with_tpr(self, sim_with_dir):
sim, path = sim_with_dir
step = 'minimize'
min_path: pathlib.Path = path / step
min_path.mkdir()
with cd(min_path):
tpr = sim._compile_tpr(step_name=step)
return sim, min_path, step
def test_run_mdrun(self, sim_with_tpr):
sim, path, step = sim_with_tpr
with cd(path):
gro = sim._run_mdrun(step_name=step)
assert isinstance(gro, pathlib.Path)
assert gro.exists()
assert gro.is_absolute()
assert gro.is_file()
assert gro.suffix == '.gro'
assert gro.samefile(sim.last_geometry)
assert isinstance(sim.deffnms[step], pathlib.Path)
assert isinstance(sim.outputs['run_{}'.format(step)], str)
@pytest.mark.parametrize('step', list(mdps.keys()))
def test_step_methods(self, sim, step):
method = getattr(sim, step)
step_dir = method()
assert step in step_dir.name
assert step_dir.exists()
assert step_dir.is_dir()
d_step_dir = sim.folders[step]
assert isinstance(d_step_dir, pathlib.Path)
assert step_dir.samefile(d_step_dir)
|
Python
| 0
|
@@ -2092,52 +2092,8 @@
n)%0A%0A
- @pytest.fixture%0A def mdps(self):%0A
@@ -2131,28 +2131,24 @@
/minim.mdp'%0A
-
equil_md
@@ -2184,28 +2184,24 @@
il.mdp'%0A
-
-
prod_mdp = '
@@ -2231,28 +2231,24 @@
od.mdp'%0A
-
-
mdps = dict(
@@ -2304,20 +2304,16 @@
-
producti
@@ -2328,28 +2328,8 @@
mdp)
-%0A return mdps
%0A%0A
@@ -2385,22 +2385,16 @@
lank_dir
-, mdps
):%0A
@@ -2677,16 +2677,21 @@
mdps=
+self.
mdps)%0A
@@ -3366,14 +3366,8 @@
sim
-, mdps
):%0A
@@ -3385,16 +3385,21 @@
step in
+self.
mdps:%0A
|
6ece7062e539e2196ff04c49f07913c884907878
|
rearrange lines to make colors to player map clear
|
run-relax.py
|
run-relax.py
|
#!/usr/bin/env python
import sys
import time
from Mindwave.mindwave import BluetoothHeadset, FakeHeadset
# Note: on OS X, BluetoothHeadset will not work
from parameters import SharedParameters
from threads import HeadsetThread
from gameplay import GameObject
from game_effects import generate_player_renderer
from controller import AnimationController
from renderer import Renderer
from playlist import Playlist
PLAYER_ONE_ADDRESS = '74:E5:43:BE:39:71'
PLAYER_TWO_ADDRESS = '74:E5:43:B1:96:E0'
if __name__ == '__main__':
num_args = len(sys.argv)
test = num_args > 1 and sys.argv[1] == 'test'
ip_address = None
if test and num_args > 2:
ip_address = sys.argv[2]
elif num_args > 1:
ip_address = sys.argv[1]
# ip_address = '192.168.7.2:7890'
shared_params = SharedParameters()
if not test:
shared_params.targetFrameRate = 100.0
shared_params.use_keyboard_input = False
shared_params.debug = False
player1 = FakeHeadset(random_data = True) if test else BluetoothHeadset(PLAYER_ONE_ADDRESS)
player2 = FakeHeadset(random_data = True) if test else BluetoothHeadset(PLAYER_TWO_ADDRESS)
yellowish = [1.0, 0.84, 0.28]
greenish = [0.2, 0.4, 0.]
purple = [0.2, 0., 0.3]
pink = [0.7, 0.5, 0.4]
renderer_low = generate_player_renderer(shared_params, purple, pink, inverse=True)
renderer_high = generate_player_renderer(shared_params, greenish, yellowish)
game = GameObject(shared_params, renderer_low, renderer_high)
game.start()
controller = AnimationController(game_object=game,
renderer_low=renderer_low, renderer_high=renderer_high, params=shared_params, server=ip_address)
threads = [
HeadsetThread(shared_params, player1),
HeadsetThread(shared_params, player2, use_eeg2=True),
]
for thread in threads:
thread.start()
# start the lights
time.sleep(0.05)
controller.drawingLoop()
|
Python
| 0
|
@@ -1069,16 +1069,162 @@
DDRESS)%0A
+ yellowish = %5B1.0, 0.84, 0.28%5D%0A greenish = %5B0.2, 0.4, 0.%5D%0A renderer_high = generate_player_renderer(shared_params, greenish, yellowish)%0A%0A
play
@@ -1315,74 +1315,8 @@
SS)%0A
-%0A yellowish = %5B1.0, 0.84, 0.28%5D%0A greenish = %5B0.2, 0.4, 0.%5D%0A%0A
@@ -1366,17 +1366,16 @@
5, 0.4%5D%0A
-%0A
rend
@@ -1457,88 +1457,8 @@
ue)%0A
- renderer_high = generate_player_renderer(shared_params, greenish, yellowish)
%0A
|
0f0fc4037997f6ae4eef019547e3c8d8cf05db9c
|
modify test data
|
drda/tests/test_derby.py
|
drda/tests/test_derby.py
|
##############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2016 Hajime Nakagami
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##############################################################################
import unittest
import io
import decimal
import datetime
import drda
class TestDerby(unittest.TestCase):
host = 'localhost'
database = 'testdb;create=true'
port = 1527
def setUp(self):
self.connection = drda.connect(
host=self.host,
database=self.database,
port=self.port,
)
def tearDown(self):
self.connection.close()
def test_derby(self):
cur = self.connection.cursor()
cur.execute("create table test (s varchar(20), i int, d decimal(18, 2))")
cur.execute("insert into test (s, i, d) values ('abcdefghijklmnopq', 1, 1.1)")
cur.execute("insert into test (s, i, d) values ('B', 2, 1.2)")
cur.execute("insert into test (s, i) values ('C', 3)")
cur.execute("select * from test")
|
Python
| 0.000006
|
@@ -1765,210 +1765,343 @@
te(%22
-create table test (s varchar(20), i int, d decimal(18, 2))%22)%0A cur.execute(%22insert into test (s, i, d) values ('abcdefghijklmnopq', 1, 1.1)%22)%0A cur.execute(%22insert into test (s, i, d) values
+%22%22%0A CREATE TABLE test (%0A s VARCHAR(20),%0A i int,%0A d1 decimal(2, 1),%0A d2 decimal(18, 2)%0A )%0A %22%22%22)%0A cur.execute(%22%22%22%0A INSERT INTO test (s, i, d1, d2) VALUES%0A ('abcdefghijklmnopq', 1, 1.1, 123456789.12),%0A
('B
@@ -2109,19 +2109,21 @@
, 2, 1.2
-)%22)
+, 2),
%0A
@@ -2127,60 +2127,47 @@
-cur.execute(%22insert into test (s, i) values ('C', 3)
+ ('C', 3, null, null)%0A %22%22
%22)%0A
@@ -2190,21 +2190,21 @@
te(%22
-select * from
+SELECT * FROM
tes
|
2ab1c23ca4be991c174514998496ea4f7c8f6c3a
|
Make indentation consistent with other code
|
serve.py
|
serve.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module serves a WSGI application using werkzeug.
Author: Logan Raarup <logan@logan.dk>
"""
import importlib
import os
import sys
try:
from werkzeug import serving
except ImportError: # pragma: no cover
sys.exit('Unable to import werkzeug (run: pip install werkzeug)')
def serve(cwd, app, port, host='localhost'):
sys.path.insert(0, cwd)
wsgi_fqn = app.rsplit('.', 1)
wsgi_fqn_parts = wsgi_fqn[0].rsplit('/', 1)
if len(wsgi_fqn_parts) == 2:
sys.path.insert(0, os.path.join(cwd, wsgi_fqn_parts[0]))
wsgi_module = importlib.import_module(wsgi_fqn_parts[-1])
wsgi_app = getattr(wsgi_module, wsgi_fqn[1])
# Attempt to force Flask into debug mode
try:
wsgi_app.debug = True
except: # noqa: E722
pass
os.environ['IS_OFFLINE'] = 'True'
serving.run_simple(
str(host),
int(port),
wsgi_app,
use_debugger=True,
use_reloader=True,
use_evalex=True
)
if __name__ == '__main__': # pragma: no cover
if len(sys.argv) != 5:
sys.exit('Usage: {} CWD APP PORT HOST'.format(
os.path.basename(sys.argv[0])))
serve(*sys.argv[1:])
|
Python
| 0.000144
|
@@ -894,49 +894,14 @@
- str(
host
-)
,%0A
-
@@ -915,31 +915,16 @@
(port),%0A
-
@@ -933,31 +933,16 @@
gi_app,%0A
-
@@ -956,39 +956,24 @@
ugger=True,%0A
-
use_
@@ -999,61 +999,23 @@
- use_evalex=True%0A
+use_evalex=True
)%0A%0A%0A
|
543b1853a5fa9b52c15a67c7a66ee5bd75f5c87c
|
update cabal project status whenever backend changes
|
backend_cmds.py
|
backend_cmds.py
|
import threading
import sublime
import sublime_plugin
import SublimeHaskell.internals.backend_mgr as BackendManager
import SublimeHaskell.internals.utils as Utils
import SublimeHaskell.sublime_haskell_common as Common
import SublimeHaskell.internals.settings as Settings
class SublimeHaskellStartBackend(sublime_plugin.WindowCommand):
def __init__(self, window):
super().__init__(window)
self.busy = False
def run(self, **_args):
# Prevents the Python main thread from blocking.
Utils.run_async(type(self).__name__ + '.do_startup', self.do_startup)
def do_startup(self):
backend_mgr = BackendManager.BackendManager()
with Common.status_message_process('Starting up {0} backend'.format(backend_mgr.current_backend_name), priority=1):
try:
self.busy = True
backend_mgr.set_state(BackendManager.BackendManager.INITIAL)
backend_mgr.initialize()
finally:
self.busy = False
def is_enabled(self):
return not self.busy and BackendManager.BackendManager().is_inactive_state()
class SublimeHaskellStopBackend(sublime_plugin.WindowCommand):
def __init__(self, window):
super().__init__(window)
self.busy = False
def run(self, **_args):
# Prevents the Python main thread from blocking.
Utils.run_async(type(self).__name__ + '.do_shutdown', self.do_shutdown)
def do_shutdown(self):
backend_mgr = BackendManager.BackendManager()
with Common.status_message_process('Shutting down {0} backend'.format(backend_mgr.current_backend_name), priority=1):
try:
self.busy = True
backend_mgr.shutdown_backend()
finally:
self.busy = False
def is_enabled(self):
return not (self.busy or BackendManager.BackendManager().is_inactive_state())
class SublimeHaskellRestartBackend(sublime_plugin.WindowCommand):
def __init__(self, window):
super().__init__(window)
self.restart_ev = threading.Event()
self.restart_ev.clear()
def run(self, **_args):
Utils.run_async('restarting backend', self.do_restart)
def is_enabled(self):
return not (self.restart_ev.is_set() or BackendManager.BackendManager().is_inactive_state())
def do_restart(self):
self.restart_ev.set()
try:
SublimeHaskellStopBackend(self.window).do_shutdown()
SublimeHaskellStartBackend(self.window).do_startup()
finally:
self.restart_ev.clear()
class SublimeHaskellChooseBackend(sublime_plugin.WindowCommand):
def __init__(self, window):
super().__init__(window)
self.backends = {}
self.backend_names = []
def run(self, **_args):
backend_mgr = BackendManager.BackendManager()
# Rescan for backends to ensure we have the most up-to-date list...
backend_mgr.possible_backends = backend_mgr.filter_possible(Settings.PLUGIN.backends)
if backend_mgr.possible_backends:
print('plugin \'backends\' {0}'.format([name for name in Settings.PLUGIN.backends]))
print('Possible/usable \'backends\': {0}'.format([name for name in backend_mgr.possible_backends]))
if len(backend_mgr.possible_backends) > 1:
self.backend_names = [name for name in backend_mgr.possible_backends]
self.backend_names.sort()
self.window.show_quick_panel(self.backend_names, self.change_backend)
elif len(backend_mgr.possible_backends) == 1:
backend_name = list(backend_mgr.possible_backends)[0]
sublime.message_dialog('Only one backend, \'{0}\', available -- starting it.'.format(backend_name))
self.start_new_backend(backend_name)
else:
backend_mgr.no_backends_available()
def change_backend(self, idx):
if idx >= 0:
Utils.run_async('change backend: startup', self.start_new_backend, self.backend_names[idx])
def start_new_backend(self, backend_name):
with Common.status_message_process('Changing backend to \'{0}\''.format(backend_name), priority=2):
BackendManager.BackendManager().change_current_backend(backend_name)
cabal_project_status(self.window.active_view(), BackendManager.BackendManager())
def cabal_project_status(view, backend_mgr):
vsettings = view.settings()
project_name = vsettings.get(Settings.SETTING_SUBHASK_PROJECT)
if project_name is None:
project_name = '_unknown_'
active_backend = backend_mgr.active_backend()
view.set_status('sublime_haskell_cabal', 'cabal: {0} [{1}]'.format(project_name, active_backend.backend_name()))
|
Python
| 0
|
@@ -1006,32 +1006,129 @@
elf.busy = False
+%0A cabal_project_status(self.window.active_view(), BackendManager.BackendManager())
%0A%0A def is_ena
@@ -1897,32 +1897,129 @@
elf.busy = False
+%0A cabal_project_status(self.window.active_view(), BackendManager.BackendManager())
%0A%0A def is_ena
@@ -2754,32 +2754,32 @@
finally:%0A
-
self
@@ -2797,16 +2797,109 @@
.clear()
+%0A cabal_project_status(self.window.active_view(), BackendManager.BackendManager())
%0A%0Aclass
|
89ad9ad4e8d8820b89b65b281375782d80275446
|
Fix status
|
aldryn_essential_addons_dashboard/views.py
|
aldryn_essential_addons_dashboard/views.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import re
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from versionfield.version import Version
from versionfield.constants import DEFAULT_NUMBER_BITS
from .models import Addon
import warnings
ZERO = Version('0.0.0', DEFAULT_NUMBER_BITS)
class CsrfExemptMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(CsrfExemptMixin, cls).as_view(**initkwargs)
return csrf_exempt(view)
class ProcessWebhookView(CsrfExemptMixin, View):
http_method_names = ['post', 'get']
def get_data(self, request):
payload = request.POST.get('payload', None)
return json.loads(payload) if payload else []
def get_job_python(self, job):
"""Given a single 'job' object, return the found Python."""
if job['config'] and job['config']['python']:
return job['config']['python']
return None
def get_max_python(self, matrix):
"""Returns the max. version of python in all the successful jobs."""
max_python = ZERO
for job in matrix:
if job["state"] == "finished" and job["status"] == 0:
job_python = Version(
self.get_job_python(job), DEFAULT_NUMBER_BITS)
if job_python and job_python > max_python:
max_python = job_python
if max_python > ZERO:
return max_python
return None
def get_job_django(self, job):
"""
Given a single 'job' object, return the found Django. This one is a bit
trickier as we'll have to parse it out of the ENV.
"""
pattern = re.compile('.*?django *= *(?<version>[0-9][0-9.]*).*?', re.I)
if job['config'] and job['config']['env']:
grps = re.match(pattern, job['config']['env'])
if grps:
return Version(grps.groups['django'], DEFAULT_NUMBER_BITS)
return None
def get_max_django(self, matrix):
"""Returns the max. version of django in all the successful jobs."""
max_django = ZERO
for job in matrix:
if job['state'] == 'finished' and job['status'] == 0:
job_django = Version(
self.get_job_django(job), DEFAULT_NUMBER_BITS)
if job_django and job_django > max_django:
max_django = job_django
if max_django > ZERO:
return max_django
return None
def process_data(self, addon, data):
if data['matrix']:
addon.max_python_version = self.get_max_python(data['matrix'])
addon.max_django_version = self.get_max_django(data['matrix'])
addon.build_passing = data['matrix']['status'] == 0
warnings.warn('Updating "{0}" with: {1}, {2}, {3}'.format(
addon,
addon.max_python_version,
addon.max_django_version,
addon.build_passing,
))
addon.save()
def post(self, request, *args, **kwargs):
# TODO: See: http://docs.travis-ci.com/user/notifications/#Authorization-for-Webhooks
# Too bad the docs provide the wrong headers!
slug = request.META.get('HTTP_TRAVIS_REPO_SLUG', None)
auth = request.META.get('HTTP_AUTHORIZATION', None),
addon = None
try:
addon = Addon.objects.get(repo_slug=slug)
except Addon.DoesNotExist:
pass
if addon:
data = self.get_data(request)
if data:
self.process_data(addon, data)
return HttpResponse(status=200)
def get(self, request, *args, **kwargs):
"""Just for easier testing."""
print('Received a GET request!')
return HttpResponse(status=200)
|
Python
| 0.000001
|
@@ -2846,18 +2846,8 @@
ta%5B'
-matrix'%5D%5B'
stat
|
cdedb1d6875a8ab5f42369b1801a1fc0ee205654
|
Add option to generate coverage report
|
run_tests.py
|
run_tests.py
|
#!/usr/bin/env python
"""
Driver script for testing nu-TuLiP. Try calling it with "-h" flag.
SCL; 6 May 2013.
"""
import sys
import os.path
import nose
if __name__ == "__main__":
if ("-h" in sys.argv) or ("--help" in sys.argv):
print """Usage: run_tests.py [--fast] [OPTIONS...] [[-]TESTFILES...]
TESTFILES... is space-separated list of test file names, where the
suffix "_test.py" is added to each given name. E.g.,
run_tests.py automaton
causes the automaton_test.py file to be used and no others. If no
arguments are given, then default is to run all tests. To exclude
tests that are marked as slow, use the flag "--fast".
If TESTFILES... each have a prefix of "-", then all tests *except*
those listed will be run. OPTIONS... are passed on to nose.
"""
exit(1)
if len(sys.argv) == 1:
nose.main()
if "--fast" in sys.argv:
skip_slow = True
sys.argv.remove("--fast")
else:
skip_slow = False
argv = [sys.argv[0]]
if skip_slow:
argv.append("--attr=!slow")
testfiles = []
excludefiles = []
for basename in sys.argv[1:]: # Only add extant file names
try:
with open(os.path.join("tests", basename+"_test.py"), "r") as f:
testfiles.append(basename+"_test.py")
except IOError:
if basename[0] == "-":
try:
with open(os.path.join("tests", basename[1:]+"_test.py"), "r") as f:
excludefiles.append(basename[1:]+"_test.py")
except IOError:
argv.append(basename)
else:
argv.append(basename)
if len(testfiles) > 0 and len(excludefiles) > 0:
print "You can specify files to exclude or include, but not both."
print "Try calling it with \"-h\" flag."
exit(1)
if len(excludefiles) > 0:
argv.append("--exclude="+"|".join(excludefiles))
argv.extend(testfiles)
nose.main(argv=argv)
|
Python
| 0
|
@@ -97,13 +97,13 @@
CL;
-6 May
+5 Sep
201
@@ -265,16 +265,26 @@
tests.py
+ %5B--cover%5D
%5B--fast
@@ -1019,81 +1019,329 @@
-argv = %5Bsys.argv%5B0%5D%5D%0A if skip_slow:%0A argv.append(%22--attr=!slow%22
+if %22--cover%22 in sys.argv:%0A measure_coverage = True%0A sys.argv.remove(%22--cover%22)%0A else:%0A measure_coverage = False%0A%0A argv = %5Bsys.argv%5B0%5D%5D%0A if skip_slow:%0A argv.append(%22--attr=!slow%22)%0A if measure_coverage:%0A argv.extend(%5B%22--with-coverage%22, %22--cover-html%22, %22--cover-package=tulip%22%5D
)%0A
|
b74d9d3a780082b8cb326a553a9b4c84ca5368be
|
Add IS_OFFLINE environment variable to serve
|
serve.py
|
serve.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module serves a WSGI application using werkzeug.
Author: Logan Raarup <logan@logan.dk>
"""
import importlib
import os
import sys
try:
from werkzeug import serving
except ImportError: # pragma: no cover
sys.exit('Unable to import werkzeug (run: pip install werkzeug)')
def serve(cwd, app, port):
sys.path.insert(0, cwd)
wsgi_fqn = app.rsplit('.', 1)
wsgi_fqn_parts = wsgi_fqn[0].rsplit('/', 1)
if len(wsgi_fqn_parts) == 2:
sys.path.insert(0, os.path.join(cwd, wsgi_fqn_parts[0]))
wsgi_module = importlib.import_module(wsgi_fqn_parts[-1])
wsgi_app = getattr(wsgi_module, wsgi_fqn[1])
# Attempt to force Flask into debug mode
try:
wsgi_app.debug = True
except:
pass
serving.run_simple(
'localhost', int(port), wsgi_app,
use_debugger=True, use_reloader=True, use_evalex=True)
if __name__ == '__main__': # pragma: no cover
if len(sys.argv) != 4:
sys.exit('Usage: {} CWD APP PORT'.format(
os.path.basename(sys.argv[0])))
serve(*sys.argv[1:])
|
Python
| 0
|
@@ -787,16 +787,55 @@
pass%0A%0A
+ os.environ%5B'IS_OFFLINE'%5D = 'True'%0A%0A
serv
|
224aa339ee7f1720ebb3616aa62ba06975c1a11d
|
handle .blend model sources
|
pak_profiles/common.py
|
pak_profiles/common.py
|
#! /usr/bin/env python3
#-*- coding: UTF-8 -*-
### Legal
#
# Author: Thomas DEBESSE <dev@illwieckz.net>
# License: ISC
#
file_common_deps = {
"file_base": "DEPS",
"description": "Package DEPS file",
"action": "copy",
}
file_common_external_editor = {
"file_ext": [
"xcf",
"psd",
"ora",
],
"description": "External Editor File",
"action": "ignore",
}
file_common_metada_sidecar = {
"file_ext": [
"vorbiscomment",
],
"description": "Metadata Sidecar",
"action": "ignore",
}
file_common_texture = {
"file_ext": [
"jpg",
"jpeg",
"png",
"tga",
"bmp",
"webp",
"crn",
"dds",
],
"description": "Texture",
"action": "copy",
}
file_common_sound = {
"file_ext": [
"wav",
"flac",
"ogg",
"opus",
],
"description": "Sound File",
"action": "copy",
}
file_common_script = {
"file_ext": [
"shader",
"particle",
"trail",
],
"dir_ancestor_name": "scripts",
"description": "Common Script",
"action": "copy",
}
file_common_model = {
"file_ext": [
"ase",
"iqm",
"md3",
"md5anim",
"md5mesh",
"qc",
],
"description": "Common Model File",
"action": "copy",
}
file_common_text = {
"file_ext": [
"txt",
"md",
],
"description": "Common Text file",
"action": "copy",
}
file_common_readme = {
"inherit": "file_common_text",
"file_base": "README",
"description": "Common ReadMe file",
}
file_common_nullwav = {
"inherit": "file_common_sound",
"file_ext": "wav",
"file_base": "null",
"description": "Common NULL Sound File",
"action": "copy",
}
|
Python
| 0
|
@@ -1109,32 +1109,154 @@
on%22: %22copy%22,%0A%7D%0A%0A
+file_common_model_source = %7B%0A%09%22file_ext%22: %5B%0A%09%09%22blend%22,%0A%09%5D,%0A%09%22description%22: %22Common Model Source%22,%0A%09%22action%22: %22ignore%22,%0A%7D%0A%0A
file_common_text
|
d8a4d97f97d744be695c911cd16a94b2c95ac76d
|
allow .5 seconds for all elements to be created before running selenium tests
|
tests/selenium_tests/jbrowse_selenium.py
|
tests/selenium_tests/jbrowse_selenium.py
|
import os
import time
import unittest
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import Select
class JBrowseTest (object):
base_url = "file://%s/index.html" % os.getcwd()
data_dir = None
## TestCase overrides
def setUp( self ):
self.browser = webdriver.Firefox()
self.browser.get(
self.base_url
+ ( "?data="+self.data_dir if self.data_dir else "" )
)
## convenience methods for us
def assert_element( self, xpathExpression ):
try:
el = self.browser.find_element_by_xpath( xpathExpression )
except NoSuchElementException:
assert 0, ( "can't find %s" % xpathExpression )
return el
def assert_no_element( self, xpath ):
try:
self.browser.find_element_by_xpath( xpath )
assert 0, ( "not supposed to find %s" % xpath )
except NoSuchElementException:
pass
def assert_elements( self, xpathExpression ):
try:
el = self.browser.find_elements_by_xpath( xpathExpression )
except NoSuchElementException:
assert 0, ( "can't find %s" % xpathExpression )
return el
def assert_no_js_errors( self ):
assert self.browser.find_element_by_xpath('/html/body') \
.get_attribute('JSError') == None
def do_typed_query( self, text ):
# Find the query box and put f15 into it and hit enter
qbox = self.browser.find_element_by_id("location")
qbox.clear()
qbox.send_keys( text + Keys.RETURN )
time.sleep( 0.2 )
def turn_on_track( self, tracktext ):
# find the microarray track label
tracklabel = self.assert_element( "//div[@class='tracklist-label'][contains(.,'%s')]" % tracktext )
# drag the track label over
self.actionchains() \
.move_to_element( tracklabel ) \
.click_and_hold( None ) \
.move_by_offset( 300, 50 ) \
.release( None ) \
.perform()
self.assert_no_js_errors()
def turn_off_track( self, tracktext ):
# drag the track back into the track list
track_handle = self.assert_element( "/html//div[contains(@class,'track')]//div[contains(@class,'track-label')][contains(.,'%s')]" % tracktext )
track_list = self.assert_element( "/html//div[@id='tracksAvail']" )
self.actionchains() \
.drag_and_drop( track_handle, track_list ) \
.perform()
self.assert_no_js_errors()
def actionchains( self ):
return ActionChains( self.browser )
def get_track_labels_containing( self, string ):
return self.assert_elements( "//div[contains(@class,'track-label')][contains(.,'%s')]" % string )
def select_refseq( self, name ):
refseq_selector = Select( self.browser.find_element_by_id('chrom') )
refseq_selector.select_by_value( name )
def scroll( self ):
move_right_button = self.browser.find_element_by_id('moveRight')
move_right_button.click()
time.sleep(0.5)
move_left_button = self.browser.find_element_by_id('moveLeft')
move_left_button.click()
# TODO: check the outcome of this
time.sleep(0.5)
self.assert_no_js_errors()
# scroll back and forth with the mouse
self.actionchains() \
.move_to_element( move_right_button ) \
.move_by_offset( 0, 300 ) \
.click_and_hold( None ) \
.move_by_offset( 300, 0 ) \
.release( None ) \
.move_by_offset( -100,100 ) \
.click_and_hold( None ) \
.move_by_offset( -300, 0 ) \
.release( None ) \
.perform()
self.assert_no_js_errors()
|
Python
| 0
|
@@ -629,16 +629,40 @@
)
+%0A time.sleep(0.5)
%0A%0A ##
|
52d835ec8a3dfec53c3cab23598be6f63da9addc
|
Update prims_minimum_spanning.py
|
algorithms/graph/prims_minimum_spanning.py
|
algorithms/graph/prims_minimum_spanning.py
|
import heapq # for priority queue
# prim's algo. to find weight of minimum spanning tree
def prims(graph):
vis=[]
s=[[0,1]]
prim = []
mincost=0
while(len(s)>0):
v=heapq.heappop(s)
x=v[1]
if(x in vis):
continue
mincost += v[0]
prim.append(x)
vis.append(x)
for j in g[x]:
i=j[-1]
if(i not in vis):
heapq.heappush(s,j)
return mincost
if __name__=="__main__":
# input number of nodes and edges in graph
n,e = map(int,input().split())
# initializing empty graph as a dictionary (of the form {int:list})
g=dict(zip([i for i in range(1,n+1)],[[] for i in range(n)]))
# input graph data
for i in range(e):
a,b,c=map(int,input().split())
g[a].append([c,b])
g[b].append([c,a])
# print weight of minimum spanning tree
print(prims(g))
''' tests-
Input : 4 5
1 2 7
1 4 6
2 4 9
4 3 8
2 3 6
Output : 19
Input : 5 6
1 2 3
1 3 8
2 4 5
3 4 2
3 5 4
4 5 6
Output : 14
'''
|
Python
| 0
|
@@ -1,8 +1,438 @@
+'''%0AThis Prim's Algorithm Code is for finding weight of minimum spanning tree%0Aof a connected graph.%0AFor argument graph, it should be a dictionary type%0Asuch as%0Agraph = %7B%0A 'a': %5B %5B3, 'b'%5D, %5B8,'c'%5D %5D,%0A 'b': %5B %5B3, 'a'%5D, %5B5, 'd'%5D %5D,%0A 'c': %5B %5B8, 'a'%5D, %5B2, 'd'%5D, %5B4, 'e'%5D %5D,%0A 'd': %5B %5B5, 'b'%5D, %5B2, 'c'%5D, %5B6, 'e'%5D %5D,%0A 'e': %5B %5B4, 'c'%5D, %5B6, 'd'%5D %5D%0A%7D%0A%0Awhere 'a','b','c','d','e' are nodes (these can be 1,2,3,4,5 as well)%0A'''%0A%0A%0A
import h
@@ -529,16 +529,21 @@
ms(graph
+_used
):%0A v
@@ -790,16 +790,25 @@
r j in g
+raph_used
%5Bx%5D:%0A
@@ -891,16 +891,16 @@
h(s,j)%0A%0A
+
retu
@@ -914,747 +914,4 @@
ost%0A
-%0A%0A%0Aif __name__==%22__main__%22:%0A%0A # input number of nodes and edges in graph%0A n,e = map(int,input().split())%0A%0A # initializing empty graph as a dictionary (of the form %7Bint:list%7D)%0A g=dict(zip(%5Bi for i in range(1,n+1)%5D,%5B%5B%5D for i in range(n)%5D))%0A%0A # input graph data%0A for i in range(e):%0A a,b,c=map(int,input().split())%0A g%5Ba%5D.append(%5Bc,b%5D)%0A g%5Bb%5D.append(%5Bc,a%5D)%0A%0A # print weight of minimum spanning tree%0A print(prims(g))%0A%0A ''' tests-%0A Input : 4 5%0A 1 2 7%0A 1 4 6%0A 2 4 9%0A 4 3 8%0A 2 3 6%0A Output : 19%0A%0A%0A Input : 5 6%0A 1 2 3%0A 1 3 8%0A 2 4 5%0A 3 4 2%0A 3 5 4%0A 4 5 6%0A Output : 14%0A '''%0A
|
0227f7e964dcc1a31b4accc657eb40f78b0282d1
|
Deal with Django 1.8 deprecation/removal of django.test.simple.
|
run_tests.py
|
run_tests.py
|
#!/usr/bin/env python
import os, sys
from django.conf import settings
import django
DIRNAME = os.path.dirname(__file__)
if django.VERSION[1] < 4:
# If the version is NOT django 4 or greater
# then remove the TZ setting.
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
#ROOT_URLCONF='mailqueue.urls',
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'cas',),
CAS_SERVER_URL = 'http://signin.cas.com',
)
else:
settings.configure(DEBUG=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
#ROOT_URLCONF='mailqueue.urls',
INSTALLED_APPS=('django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'cas',),
USE_TZ=True,
CAS_SERVER_URL = 'http://signin.cas.com',)
try:
# Django 1.7 needs this, but other versions dont.
django.setup()
except AttributeError:
pass
from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(['cas', ])
if failures:
sys.exit(failures)
|
Python
| 0
|
@@ -64,16 +64,57 @@
ettings%0A
+from django.test.utils import get_runner%0A
import d
@@ -1785,16 +1785,25 @@
pass%0A%0A
+try:%0A
from dja
@@ -1847,16 +1847,20 @@
eRunner%0A
+
test_run
@@ -1900,16 +1900,105 @@
sity=1)%0A
+except ImportError:%0A TestRunner = get_runner(settings)%0A test_runner = TestRunner()%0A
failures
@@ -2046,16 +2046,16 @@
ilures:%0A
-
sys.
@@ -2068,8 +2068,9 @@
ailures)
+%0A
|
608225fa8a62cb5c0aa42b1a2e371c1e1b731d58
|
fix bug in GrandPrixWidget
|
driver27/admin/common.py
|
driver27/admin/common.py
|
from django import forms
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from tabbed_admin import TabbedModelAdmin
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils.encoding import force_text
class GrandPrixWidget(forms.widgets.Select):
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
data_circuit_attr = ''
if option_value:
from driver27.models import GrandPrix
grand_prix = GrandPrix.objects.filter(pk=option_value)
if grand_prix.count() and grand_prix.first().default_circuit:
data_circuit_attr = grand_prix.first().default_circuit.pk
return format_html('<option value="{}"{} data-circuit="{}">{}</option>',
option_value, selected_html,
data_circuit_attr, force_text(option_label))
class Media:
js = ['driver27/js/select_default_circuit.js']
# http://stackoverflow.com/a/34567383
class AlwaysChangedModelForm(forms.ModelForm):
def is_empty_form(self, *args, **kwargs):
empty_form = True
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if data_value:
empty_form = False
break
return empty_form
def has_changed(self, *args, **kwargs):
""" Should returns True if data differs from initial.
By always returning true even unchanged inlines will get validated and saved."""
if self.instance.pk is None and self.initial:
if not self.changed_data:
return True
if self.is_empty_form():
return False
return super(AlwaysChangedModelForm, self).has_changed()
class RelatedCompetitionAdmin(object):
""" Aux class to share print_competitions method between driver and team """
def print_competitions(self, obj):
if hasattr(obj, 'competitions'):
return ', '.join('{competition}'.format(competition=competition)
for competition in obj.competitions.all())
else:
return None
print_competitions.short_description = _('competitions')
class CommonTabbedModelAdmin(TabbedModelAdmin):
def get_form(self, request, obj=None, **kwargs):
# just save obj reference for future processing in Inline
if request and obj:
request._obj_ = obj
return super(CommonTabbedModelAdmin, self).get_form(request=request, obj=obj, **kwargs)
|
Python
| 0
|
@@ -1114,16 +1114,24 @@
_attr =
+getattr(
grand_pr
@@ -1156,19 +1156,27 @@
_circuit
-.pk
+, 'pk', '')
%0A%0A
|
9e0f62c3eedd2c5376af4178a0ecf529898a041b
|
Update command doc for open
|
guild/commands/open_.py
|
guild/commands/open_.py
|
# Copyright 2017-2020 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
from . import runs_support
@click.command("open")
@runs_support.run_arg
@click.option("-p", "--path", metavar="PATH", help="Path to open under run directory.")
@click.option(
"-s", "--sourcecode", is_flag=True, help="Open run source code directory."
)
@click.option(
"-O",
"--output",
is_flag=True,
help="Open run output. Cannot be used with other options.",
)
@click.option("-c", "--cmd", metavar="CMD", help="Command used to open run.")
@click.option(
"--shell", is_flag=True, help="Open a new shell in run directory or PATH."
)
@click.option(
"--shell-cmd",
metavar="CMD",
help="Open a new shell in run directory or PATH using CMD.",
)
@runs_support.all_filters
@click.pass_context
@click_util.use_args
@click_util.render_doc
def open_(ctx, args):
"""Open a run path.
This command opens a path a single run.
{{ runs_support.run_arg }}
If `RUN` isn't specified, the latest run is selected.
### Run Paths
`--path` may be used to open a path within the run directory. By
default the run directory itself is opened. PATH must be relative.
`--sourcecode` may be used to open the run source code
directory. If `--path` is also specified, the path applies to the
source code directory rather than the run directory.
### Output
`--output` may be used to open the output for a run. This option
may not be used with other options.
### Open Command
`--cmd` may be used to specify the command used to open the
path. By default the system-defined program is used.
{{ runs_support.all_filters }}
"""
from . import open_impl
open_impl.main(args, ctx)
|
Python
| 0
|
@@ -1501,16 +1501,26 @@
run path
+ or output
.%0A%0A T
|
3cbe58244ca1f3e4c276623ab345aa3d95c8f925
|
Save message can show the number of lines written.
|
szu_ed.py
|
szu_ed.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2014 the Sanzang Utils authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" Sanzang Utils program module for table editing. """
import getopt
import io
import os
import signal
import sys
import unicodedata
try:
import readline
except ImportError:
pass
USAGE = """Usage: szu-ed [options] table_file
Edit translation table rules using a program of simple commands.
Options:
-h, --help print this help message and exit
"""
def set_stdio_utf8():
"""
Set standard I/O streams to UTF-8.
Attempt to reassign standard I/O streams to new streams using UTF-8.
Standard input should discard any leading BOM. If an error is raised,
assume the environment is inflexible but correct (IDLE).
"""
try:
sys.stdin = io.TextIOWrapper(
sys.stdin.detach(), encoding='utf-8-sig', line_buffering=True)
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding='utf-8', line_buffering=True)
sys.stderr = io.TextIOWrapper(
sys.stderr.detach(), encoding='utf-8', line_buffering=True)
except io.UnsupportedOperation:
pass
def read_table(tab_str):
"""
Read a translation table from a formatted string.
Given a translation table as a formatted string, load the contents and
return them as a dictionary. The keys are source terms (column 1),
while each value is a list of the corresponding terms.
"""
tab_str = unicodedata.normalize('NFC', tab_str)
tab = {}
width = -1
for line in tab_str.split('\n'):
rec = [f.strip() for f in line.split('|')]
if width != -1 and width == len(rec):
tab[rec[0]] = rec[1:]
elif width == -1 and len(rec) > 1:
width = len(rec)
tab[rec[0]] = rec[1:]
elif line.strip() != '':
raise RuntimeError('Table error: ' + line.strip())
return tab, width
def table_to_str(tab):
"""
Produce a formatted string for a translation table.
Given a table stored as a dictionary, sort the contents and return the
table as text in the translation table format for storage.
"""
table_str = ''
items = list(tab.items())
items.sort(key=lambda x: (-len(x[0]), x[0]))
for i in items:
table_str += i[0] + '|' + '|'.join(i[1]) + '\n'
return table_str
def edit(table_fpath, input_lines=None):
"""
Open a translation table and run editor commands.
Editor commands are read from standard input by default, and non-error
output is written to standard output. Errors are printed to standard
error. If a list of input lines is also specified, then read editor
input from this list instead of the standard input.
"""
if not os.path.exists(table_fpath):
open(table_fpath, 'w', encoding='utf-8').close()
tab, width = read_table('')
else:
with open(table_fpath, 'r', encoding='utf-8-sig') as fin:
tab, width = read_table(fin.read())
cmd = '\\set'
try:
while True:
if input_lines is None:
line = unicodedata.normalize('NFC', input().strip())
elif len(input_lines) > 0:
line = unicodedata.normalize('NFC', input_lines.pop(0).strip())
else:
return
if line in ('\\get', '\\rm', '\\set'):
cmd = line
elif line == '\\p':
print(table_to_str(tab), end='')
elif line == '\\q':
return
elif line == '\\w' or line == '\\wq':
with open(table_fpath, 'w', encoding='utf-8') as fout:
fout.write(table_to_str(tab))
sys.stderr.write('Saved: ' + table_fpath + '\n')
if line == '\\wq':
return
elif line != '' and not line.startswith('\\'):
if cmd == '\\get':
try:
print('%s|%s' % (line, '|'.join(tab[line])))
except KeyError:
sys.stderr.write('Not found: ' + line + '\n')
elif cmd == '\\rm':
try:
del tab[line]
except KeyError:
sys.stderr.write('Not found: ' + line + '\n')
elif cmd == '\\set':
toks = [f.strip() for f in line.split('|')]
if width == -1 and len(toks) > 1:
width = len(toks)
if len(toks) == width:
tab[toks[0]] = toks[1:]
else:
sys.stderr.write('Invalid assignment: ' + line + '\n')
elif line.strip() == '':
pass
else:
sys.stderr.write('Syntax error: ' + line + '\n')
except EOFError:
return
def main(argv):
"""
Run szu-ed as a portable command-line program.
This program handles data through standard I/O streams as UTF-8 text.
Input has any leading byte-order mark stripped if one is found. Broken
pipes and SIGINT are handled silently.
"""
set_stdio_utf8()
if 'SIGPIPE' in dir(signal):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
try:
opts, args = getopt.getopt(argv[1:], 'h', ['help'])
for option, _ in opts:
if option in ('-h', '--help'):
print(USAGE, end='')
return 0
if len(args) != 1:
sys.stderr.write(USAGE)
return 1
edit(args[0])
return 0
except getopt.GetoptError:
sys.stderr.write(USAGE)
return 1
except KeyboardInterrupt:
print()
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Python
| 0
|
@@ -4768,37 +4768,52 @@
te('
-Saved: ' + table_fpath + '%5Cn'
+%22%25s%22 (%25d lines)%5Cn' %25 (table_fpath, len(tab))
)%0A
|
885489ac23add2cbae8eb5d0215feaf120c42263
|
Fix bug where years still had brackets around them when importing from imdb
|
plexlibrary/imdbutils.py
|
plexlibrary/imdbutils.py
|
# -*- coding: utf-8 -*-
import datetime
import requests
from lxml import html
from utils import add_years
class IMDb(object):
def __init__(self, tmdb, tvdb):
self.tmdb = tmdb
self.tvdb = tvdb
def _handle_request(self, url):
"""Stolen from Automated IMDB Top 250 Plex library script by /u/SwiftPanda16
"""
r = requests.get(url)
tree = html.fromstring(r.content)
# Dict of the IMDB top 250 ids in order
titles = tree.xpath("//table[contains(@class, 'chart')]"
"//td[@class='titleColumn']/a/text()")
years = tree.xpath("//table[contains(@class, 'chart')]"
"//td[@class='titleColumn']/span/text()")
ids = tree.xpath("//table[contains(@class, 'chart')]"
"//td[@class='ratingColumn']/div//@data-titleid")
return ids, titles, years
def add_movies(self, url, movie_list=None, movie_ids=None, max_age=0):
if not movie_list:
movie_list = []
if not movie_ids:
movie_ids = []
max_date = add_years(max_age * -1)
print(u"Retrieving the IMDB list: {}".format(url))
(imdb_ids, imdb_titles, imdb_years) = self._handle_request(url)
for i, imdb_id in enumerate(imdb_ids):
# Skip already added movies
if imdb_id in movie_ids:
continue
if self.tmdb:
tmdb_data = self.tmdb.get_tmdb_from_imdb(imdb_id, 'movie')
if tmdb_data and tmdb_data['release_date']:
date = datetime.datetime.strptime(tmdb_data['release_date'],
'%Y-%m-%d')
else:
date = datetime.date(imdb_years[i], 1, 1)
# Skip old movies
if max_age != 0 and (max_date > date):
continue
movie_list.append({
'id': imdb_id,
'tmdb_id': tmdb_data['id'] if tmdb_data else None,
'title': tmdb_data['title'] if tmdb_data else imdb_titles[i],
'year': date.year,
})
movie_ids.append(imdb_id)
if tmdb_data['id']:
movie_ids.append('tmdb' + str(tmdb_data['id']))
return movie_list, movie_ids
def add_shows(self, url, show_list=None, show_ids=None, max_age=0):
if not show_list:
show_list = []
if not show_ids:
show_ids = []
curyear = datetime.datetime.now().year
print(u"Retrieving the IMDb list: {}".format(url))
data = {}
if max_age != 0:
data['extended'] = 'full'
(imdb_ids, imdb_titles, imdb_years) = self._handle_request(url)
for i, imdb_id in enumerate(imdb_ids):
# Skip already added shows
if imdb_id in show_ids:
continue
if self.tvdb:
tvdb_data = self.tvdb.get_tvdb_from_imdb(imdb_id)
if self.tmdb:
tmdb_data = self.tmdb.get_tmdb_from_imdb(imdb_id, 'tv')
if tvdb_data and tvdb_data['firstAired'] != "":
year = datetime.datetime.strptime(tvdb_data['firstAired'],
'%Y-%m-%d').year
elif tmdb_data and tmdb_data['first_air_date'] != "":
year = datetime.datetime.strptime(tmdb_data['first_air_date'],
'%Y-%m-%d').year
else:
year = imdb_years[i]
# Skip old shows
if max_age != 0 \
and (curyear - (max_age - 1)) > year:
continue
if tvdb_data:
title = tvdb_data['seriesName']
else:
title = tmdb_data['name'] if tmdb_data else imdb_titles[i]
show_list.append({
'id': imdb_id,
'tvdb_id': tvdb_data['id'] if tvdb_data else None,
'tmdb_id': tmdb_data['id'] if tmdb_data else None,
'title': title,
'year': year,
})
show_ids.append(imdb_id)
if tmdb_data and tmdb_data['id']:
show_ids.append('tmdb' + str(tmdb_data['id']))
if tvdb_data and tvdb_data['id']:
show_ids.append('tvdb' + str(tvdb_data['id']))
return show_list, show_ids
def add_items(self, item_type, url, item_list=None, item_ids=None,
max_age=0):
if item_type == 'movie':
return self.add_movies(url, movie_list=item_list,
movie_ids=item_ids, max_age=max_age)
elif item_type == 'tv':
return self.add_shows(url, show_list=item_list,
show_ids=item_ids, max_age=max_age)
|
Python
| 0
|
@@ -1713,34 +1713,48 @@
)%0A el
-se
+if imdb_years%5Bi%5D
:%0A
@@ -1772,24 +1772,32 @@
tetime.date(
+int(str(
imdb_years%5Bi
@@ -1801,14 +1801,130 @@
s%5Bi%5D
-,
+).strip(%22()%22)),%0A
1
+2
,
-1
+31)%0A else:%0A date = datetime.date.today(
)%0A%0A
@@ -3654,34 +3654,48 @@
r%0A el
-se
+if imdb_years%5Bi%5D
:%0A
@@ -3703,16 +3703,20 @@
year =
+str(
imdb_yea
@@ -3716,24 +3716,80 @@
mdb_years%5Bi%5D
+).strip(%22()%22)%0A else:%0A year = 0
%0A%0A
|
f25f935c024b243068b42a8da846579fc656982a
|
Install PhantomJS 1.9.8 from binary download
|
provision.py
|
provision.py
|
import os
import sys
import logging
import platform
try:
import sh
except ImportError:
import pbs as sh
SUPPORTED_PLATFORMS = {
"Ubuntu": [
"trusty",
],
}
APT_DEPENDENCIES = {
"trusty": [
"libffi-dev",
"memcached",
"rabbitmq-server",
"libldap2-dev",
"redis-server",
"postgresql-server-dev-all",
"libmemcached-dev",
"postgresql-9.3",
"python-dev",
"hunspell-en-us",
"nodejs",
"python-virtualenv",
"supervisor",
"git",
"phantomjs",
"npm",
"node-jquery",
"yui-compressor",
]
}
# TODO: backport node-{cssstyle,htmlparser2,nwmatcher} to trusty,
# so we can eliminate npm (above) and this section.
NPM_DEPENDENCIES = {
"trusty": [
"cssstyle",
"htmlparser2",
"nwmatcher",
]
}
VENV_PATH="/srv/zulip-venv"
ZULIP_PATH="/srv/zulip"
# tsearch-extras is an extension to postgres's built-in full-text search.
# TODO: use a real APT repository
TSEARCH_URL_BASE = "https://dl.dropboxusercontent.com/u/283158365/zuliposs/"
TSEARCH_PACKAGE_NAME = {
"trusty": "postgresql-9.3-tsearch-extras"
}
TSEARCH_VERSION = "0.1.2"
# TODO: this path is platform-specific!
TSEARCH_STOPWORDS_PATH = "/usr/share/postgresql/9.3/tsearch_data/"
REPO_STOPWORDS_PATH = os.path.join(
ZULIP_PATH,
"puppet",
"zulip",
"files",
"postgresql",
"zulip_english.stop",
)
LOUD = dict(_out=sys.stdout, _err=sys.stderr)
def main():
log = logging.getLogger("zulip-provisioner")
# TODO: support other architectures
if platform.architecture()[0] == '64bit':
arch = 'amd64'
else:
log.critical("Only amd64 is supported.")
vendor, version, codename = platform.dist()
if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
log.critical("Unsupported platform: {} {}".format(vendor, codename))
with sh.sudo:
sh.apt_get.update(**LOUD)
sh.apt_get.install(*APT_DEPENDENCIES["trusty"], assume_yes=True, **LOUD)
temp_deb_path = sh.mktemp("package_XXXXXX.deb", tmpdir=True)
sh.wget(
"{}/{}_{}_{}.deb".format(
TSEARCH_URL_BASE,
TSEARCH_PACKAGE_NAME["trusty"],
TSEARCH_VERSION,
arch,
),
output_document=temp_deb_path,
**LOUD
)
with sh.sudo:
sh.dpkg("--install", temp_deb_path, **LOUD)
with sh.sudo:
sh.rm("-rf", VENV_PATH, **LOUD)
sh.mkdir("-p", VENV_PATH, **LOUD)
sh.chown("{}:{}".format(os.getuid(), os.getgid()), VENV_PATH, **LOUD)
sh.virtualenv(VENV_PATH, **LOUD)
# Add the ./tools and ./scripts/setup directories inside the repository root to
# the system path; we'll reference them later.
orig_path = os.environ["PATH"]
os.environ["PATH"] = os.pathsep.join((
os.path.join(ZULIP_PATH, "tools"),
os.path.join(ZULIP_PATH, "scripts", "setup"),
orig_path
))
# Put Python virtualenv activation in our .bash_profile.
with open(os.path.expanduser('~/.bash_profile'), 'w+') as bash_profile:
bash_profile.writelines([
"source .bashrc\n",
"source %s\n" % (os.path.join(VENV_PATH, "bin", "activate"),),
])
# Switch current Python context to the virtualenv.
activate_this = os.path.join(VENV_PATH, "bin", "activate_this.py")
execfile(activate_this, dict(__file__=activate_this))
sh.pip.install(requirement=os.path.join(ZULIP_PATH, "requirements.txt"), **LOUD)
with sh.sudo:
sh.cp(REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH, **LOUD)
# Add additional node packages for test-js-with-node.
with sh.sudo:
sh.npm.install(*NPM_DEPENDENCIES["trusty"], g=True, prefix="/usr", **LOUD)
# Management commands expect to be run from the root of the project.
os.chdir(ZULIP_PATH)
os.system("generate_secrets.py -d")
sh.configure_rabbitmq(**LOUD)
sh.postgres_init_db(**LOUD)
sh.do_destroy_rebuild_database(**LOUD)
sh.postgres_init_test_db(**LOUD)
sh.do_destroy_rebuild_test_database(**LOUD)
sh.setup_git_repo(**LOUD)
with sh.sudo:
sh.cp(os.path.join(ZULIP_PATH, "tools", "provision", "zulip-dev.conf"), "/etc/supervisor/conf.d/zulip-dev.conf", **LOUD)
sh.service("supervisor", "restart", **LOUD)
if __name__ == "__main__":
sys.exit(main())
|
Python
| 0
|
@@ -557,29 +557,8 @@
t%22,%0A
- %22phantomjs%22,%0A
@@ -2434,32 +2434,632 @@
_path, **LOUD)%0A%0A
+ with sh.sudo:%0A PHANTOMJS_PATH = %22/srv/phantomjs%22%0A PHANTOMJS_TARBALL = os.path.join(PHANTOMJS_PATH, %22phantomjs-1.9.8-linux-x86_64.tar.bz2%22)%0A sh.mkdir(%22-p%22, PHANTOMJS_PATH, **LOUD)%0A sh.wget(%22https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-1.9.8-linux-x86_64.tar.bz2%22,%0A output_document=PHANTOMJS_TARBALL, **LOUD)%0A sh.tar(%22xj%22, directory=PHANTOMJS_PATH, file=PHANTOMJS_TARBALL, **LOUD)%0A sh.ln(%22-sf%22, os.path.join(PHANTOMJS_PATH, %22phantomjs-1.9.8-linux-x86_64%22, %22bin%22, %22phantomjs%22),%0A %22/usr/local/bin/phantomjs%22, **LOUD)%0A%0A
with sh.sudo
|
f12b3f5c5a1409f44fc2acbb54d53fc668028e4a
|
Set print options for numpy 1.14 to 1.13.
|
landlab/__init__.py
|
landlab/__init__.py
|
#! /usr/bin/env python
"""The Landlab
:Package name: TheLandlab
:Release date: 2013-03-24
:Authors: Greg Tucker, Nicole Gasparini, Erkan Istanbulluoglu, Daniel Hobley,
Sai Nudurupati, Jordan Adams, Eric Hutton
:URL: http://csdms.colorado.edu/trac/landlab
:License: MIT
"""
from __future__ import absolute_import
import os
from ._registry import registry
cite_as = registry.format_citations
__all__ = ['registry']
if 'DISPLAY' not in os.environ:
try:
import matplotlib
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
else:
matplotlib.use('Agg')
from .core.model_parameter_dictionary import ModelParameterDictionary
from .core.model_parameter_dictionary import (MissingKeyError,
ParameterValueError)
from .core.model_parameter_loader import load_params
from .core.model_component import Component
from .framework.collections import Palette, Arena, NoProvidersError
from .framework.decorators import Implements, ImplementsOrRaise
from .framework.framework import Framework
from .field.scalar_data_fields import FieldError
from .grid import *
from .plot import *
from .testing.nosetester import LandlabTester
test = LandlabTester().test
bench = LandlabTester().bench
__all__.extend(['ModelParameterDictionary', 'MissingKeyError',
'ParameterValueError', 'Component', 'Palette', 'Arena',
'NoProvidersError', 'Implements', 'ImplementsOrRaise',
'Framework', 'FieldError', 'LandlabTester', 'load_params'])
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
Python
| 0.000107
|
@@ -322,16 +322,154 @@
ort os%0A%0A
+from numpy import set_printoptions%0Atry:%0A set_printoptions(legacy='1.13')%0Aexcept TypeError:%0A pass%0Afinally:%0A del set_printoptions%0A%0A
from ._r
|
c7ec5792f689e0b9c7324edfd170ec01c85ddb6b
|
Make sure all editors picks are resaved on edit. Fixes #408
|
wagtail/wagtailsearch/views/editorspicks.py
|
wagtail/wagtailsearch/views/editorspicks.py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import permission_required
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from wagtail.wagtailsearch import models, forms
from wagtail.wagtailadmin.forms import SearchForm
@permission_required('wagtailadmin.access_admin')
@vary_on_headers('X-Requested-With')
def index(request):
is_searching = False
page = request.GET.get('p', 1)
query_string = request.GET.get('q', "")
queries = models.Query.objects.filter(editors_picks__isnull=False).distinct()
# Search
if query_string:
queries = queries.filter(query_string__icontains=query_string)
is_searching = True
# Pagination
paginator = Paginator(queries, 20)
try:
queries = paginator.page(page)
except PageNotAnInteger:
queries = paginator.page(1)
except EmptyPage:
queries = paginator.page(paginator.num_pages)
if request.is_ajax():
return render(request, "wagtailsearch/editorspicks/results.html", {
'is_searching': is_searching,
'queries': queries,
'query_string': query_string,
})
else:
return render(request, 'wagtailsearch/editorspicks/index.html', {
'is_searching': is_searching,
'queries': queries,
'query_string': query_string,
'search_form': SearchForm(data=dict(q=query_string) if query_string else None, placeholder=_("Search editor's picks")),
})
def save_editorspicks(query, new_query, editors_pick_formset):
# Save
if editors_pick_formset.is_valid():
# Set sort_order
for i, form in enumerate(editors_pick_formset.ordered_forms):
form.instance.sort_order = i
editors_pick_formset.save()
# If query was changed, move all editors picks to the new query
if query != new_query:
editors_pick_formset.get_queryset().update(query=new_query)
return True
else:
return False
@permission_required('wagtailadmin.access_admin')
def add(request):
if request.POST:
# Get query
query_form = forms.QueryForm(request.POST)
if query_form.is_valid():
query = models.Query.get(query_form['query_string'].value())
# Save editors picks
editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)
if save_editorspicks(query, query, editors_pick_formset):
messages.success(request, _("Editor's picks for '{0}' created.").format(query))
return redirect('wagtailsearch_editorspicks_index')
else:
if len(editors_pick_formset.non_form_errors()):
messages.error(request, " ".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)
else:
messages.error(request, _("Recommendations have not been created due to errors")) # specific errors will be displayed within form fields
else:
editors_pick_formset = forms.EditorsPickFormSet()
else:
query_form = forms.QueryForm()
editors_pick_formset = forms.EditorsPickFormSet()
return render(request, 'wagtailsearch/editorspicks/add.html', {
'query_form': query_form,
'editors_pick_formset': editors_pick_formset,
})
@permission_required('wagtailadmin.access_admin')
def edit(request, query_id):
query = get_object_or_404(models.Query, id=query_id)
if request.POST:
# Get query
query_form = forms.QueryForm(request.POST)
# and the recommendations
editors_pick_formset = forms.EditorsPickFormSet(request.POST, instance=query)
if query_form.is_valid():
new_query = models.Query.get(query_form['query_string'].value())
# Save editors picks
if save_editorspicks(query, new_query, editors_pick_formset):
messages.success(request, _("Editor's picks for '{0}' updated.").format(new_query))
return redirect('wagtailsearch_editorspicks_index')
else:
if len(editors_pick_formset.non_form_errors()):
messages.error(request, " ".join(error for error in editors_pick_formset.non_form_errors())) # formset level error (e.g. no forms submitted)
else:
messages.error(request, _("Recommendations have not been saved due to errors")) # specific errors will be displayed within form fields
else:
query_form = forms.QueryForm(initial=dict(query_string=query.query_string))
editors_pick_formset = forms.EditorsPickFormSet(instance=query)
return render(request, 'wagtailsearch/editorspicks/edit.html', {
'query_form': query_form,
'editors_pick_formset': editors_pick_formset,
'query': query,
})
@permission_required('wagtailadmin.access_admin')
def delete(request, query_id):
query = get_object_or_404(models.Query, id=query_id)
if request.POST:
query.editors_picks.all().delete()
messages.success(request, _("Editor's picks deleted."))
return redirect('wagtailsearch_editorspicks_index')
return render(request, 'wagtailsearch/editorspicks/confirm_delete.html', {
'query': query,
})
|
Python
| 0
|
@@ -1940,16 +1940,151 @@
er = i%0A%0A
+ # Make sure the form is marked as changed so it gets saved with the new order%0A form.has_changed = lambda: True%0A%0A
|
580cdb55be700a981946f1009a10b384f704b1b2
|
Install git pre-commit hooks in the vm as part of provisioning.
|
provision.py
|
provision.py
|
import os
import logging
import platform
try:
import sh
except ImportError:
import pbs as sh
SUPPORTED_PLATFORMS = {
"Ubuntu": [
"trusty",
],
}
APT_DEPENDENCIES = {
"trusty": [
"libffi-dev",
"memcached",
"rabbitmq-server",
"libldap2-dev",
"redis-server",
"postgresql-server-dev-all",
"libmemcached-dev",
"postgresql-9.3",
"python-dev",
"hunspell-en-us",
"nodejs",
"python-virtualenv",
"supervisor",
"git",
]
}
VENV_PATH="/srv/zulip-venv"
ZULIP_PATH="/srv/zulip"
# tsearch-extras is an extension to postgres's built-in full-text search.
# TODO: use a real APT repository
TSEARCH_URL_BASE = "https://dl.dropboxusercontent.com/u/283158365/zuliposs/"
TSEARCH_PACKAGE_NAME = {
"trusty": "postgresql-9.3-tsearch-extras"
}
TSEARCH_VERSION = "0.1.2"
# TODO: this path is platform-specific!
TSEARCH_STOPWORDS_PATH = "/usr/share/postgresql/9.3/tsearch_data/"
REPO_STOPWORDS_PATH = os.path.join(
ZULIP_PATH,
"puppet",
"zulip",
"files",
"postgresql",
"zulip_english.stop",
)
log = logging.getLogger("zulip-provisioner")
# TODO: support other architectures
if platform.architecture()[0] == '64bit':
arch = 'amd64'
else:
log.critical("Only amd64 is supported.")
vendor, version, codename = platform.dist()
if not (vendor in SUPPORTED_PLATFORMS and codename in SUPPORTED_PLATFORMS[vendor]):
log.critical("Unsupported platform: {} {}".format(vendor, codename))
with sh.sudo:
sh.apt_get.update()
sh.apt_get.install(*APT_DEPENDENCIES["trusty"], assume_yes=True)
temp_deb_path = sh.mktemp("package_XXXXXX.deb", tmpdir=True)
sh.wget(
"{}/{}_{}_{}.deb".format(
TSEARCH_URL_BASE,
TSEARCH_PACKAGE_NAME["trusty"],
TSEARCH_VERSION,
arch,
),
output_document=temp_deb_path,
)
with sh.sudo:
sh.dpkg("--install", temp_deb_path)
with sh.sudo:
sh.rm("-rf", VENV_PATH)
sh.mkdir("-p", VENV_PATH)
sh.chown("{}:{}".format(os.getuid(), os.getgid()), VENV_PATH)
sh.virtualenv(VENV_PATH)
# Add the ./tools and ./sctipts/setup directories inside the repository root to
# the system path; we'll reference them later.
orig_path = os.environ["PATH"]
os.environ["PATH"] = os.pathsep.join((
os.path.join(ZULIP_PATH, "tools"),
os.path.join(ZULIP_PATH, "scripts", "setup"),
orig_path
))
# Switch current Python context to the virtualenv.
activate_this = os.path.join(VENV_PATH, "bin", "activate_this.py")
execfile(activate_this, dict(__file__=activate_this))
sh.pip.install(requirement=os.path.join(ZULIP_PATH, "requirements.txt"))
with sh.sudo:
sh.cp(REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH)
# Management commands expect to be run from the root of the project.
os.chdir(ZULIP_PATH)
sh.configure_rabbitmq()
sh.postgres_init_db()
sh.do_destroy_rebuild_database()
sh.postgres_init_test_db()
sh.do_destroy_rebuild_test_database()
with sh.sudo:
sh.cp(os.path.join(ZULIP_PATH, "tools", "provision", "zulip-dev.conf"), "/etc/supervisor/conf.d/zulip-dev.conf")
sh.service("supervisor", "restart")
|
Python
| 0.000001
|
@@ -2977,16 +2977,36 @@
tabase()
+%0Ash.setup_git_repo()
%0A%0Awith s
|
bcac5b2faf882fda49a3bff7eae147bcb8cbd460
|
Fix spelling of setup-readme.md
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from os import sys
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
from pathlib import Path
this_directory = Path(__file__).parent
setup_readme_text = (this_directory / "setup_readme.md").read_text()
#include_dirs=[np.get_include()],
setup(
name='itk-tubetk',
version='1.1',
author='Stephen R. Aylward',
author_email='stephen.aylward@kitware.com',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKTubeTK',
description=r'An open-source toolkit, led by Kitware, Inc., for the segmentation, registration, and analysis of tubes and surfaces in images.',
long_description=setup_readme_text,
long_description_content_type='text/markdown',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit Tubes Vessels Nerves Ultrasound MRI CT Medical',
url=r'https://github.com/InsightSoftwareConsortium/ITKTubeTK/',
project_urls={
'Dashboard': 'https://open.cdash.org/index.php?project=TubeTK',
'Issue Tracker': 'https://github.com/InsightSoftwareConsortium/ITKTubeTK/issues',
'Testing Data': 'https://data.kitware.com/#collection/5888b7d38d777f4f3f3085a8/folder/58a3abf08d777f0721a65b16',
'ITK': 'https://itk.org',
},
install_requires=[
r'numpy',
r'itk>=5.3rc3',
r'itk-minimalpathextraction>=1.2.0'
]
)
|
Python
| 0.000008
|
@@ -451,25 +451,25 @@
ory / %22setup
-_
+-
readme.md%22).
|
e6c81b2173fa96d10daad1eb582443184c08889d
|
Fix log file name
|
badger/input.py
|
badger/input.py
|
# Copyright (C) 2015 SINTEF ICT,
# Applied Mathematics, Norway.
#
# Contact information:
# E-mail: eivind.fonn@sintef.no
# SINTEF ICT, Department of Applied Mathematics,
# P.O. Box 4760 Sluppen,
# 7045 Trondheim, Norway.
#
# This file is part of BADGER.
#
# BADGER is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# BADGER is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with GoTools. If not, see
# <http://www.gnu.org/licenses/>.
#
# In accordance with Section 7(b) of the GNU Affero General Public
# License, a covered work must retain the producer line in every data
# file that is created or manipulated using GoTools.
#
# Other Usage
# You can be released from the requirements of the license by purchasing
# a commercial license. Buying such a license is mandatory as soon as you
# develop commercial activities involving BADGER without disclosing the
# source code of your own applications.
#
# This file may be used in accordance with the terms contained in a
# written agreement between you and SINTEF ICT.
import argparse
import shlex
import sys
import yaml
from os.path import dirname, join
from collections import OrderedDict
from jinja2 import Template
import badger.output as output
import badger.log as log
def coerce_list(dictionary, key, split=None, required=False):
if not required:
if key not in dictionary:
dictionary[key] = []
if isinstance(dictionary[key], str):
if isinstance(split, str):
dictionary[key] = dictionary[key].split(split)
elif split:
dictionary[key] = split(dictionary[key])
else:
dictionary[key] = [dictionary[key]]
def parse_args(input=None):
parser = argparse.ArgumentParser(description='Batch job runner.')
parser.add_argument('-o', '--output', required=False, default='output.yaml',
help='The output file')
parser.add_argument('-f', '--format', required=False, default=None,
choices=output.FORMATS, help='The output format')
parser.add_argument('-d', '--dry', required=False, default=False,
action='store_true', help='Dry run')
parser.add_argument('-v', '--verbosity', required=False, default=1, type=int,
choices=range(0, 5), help='Verbosity level for stdout')
parser.add_argument('-l', '--logverbosity', required=False, default=2, type=int,
choices=range(0, 5), help='Verbosity level for log file')
parser.add_argument('file', help='Configuration file for the batch job')
args = parser.parse_args(input)
if args.format is None:
try:
args.format = args.output.split('.')[-1]
assert args.format in output.FORMATS
except (AssertionError, IndexError):
print('Unable to determine output format from filename "{}"'.format(args.output),
file=sys.stderr)
sys.exit(1)
log.stdout_verbosity = args.verbosity
log.log_verbosity = args.logverbosity
log.log_file = join(dirname(args.output), args.file + '.log')
log.log_file = args.file + '.log'
if args.logverbosity > 1:
with open(log.log_file, 'w') as f: pass
return args
# YAML is unordered by default, this is an ordered loader
# Thanks http://stackoverflow.com/a/21912744/2729168
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def treat_setup(setup):
coerce_list(setup, 'templates')
coerce_list(setup, 'files')
coerce_list(setup, 'cmdargs', split=shlex.split)
coerce_list(setup, 'executable', split=shlex.split, required=True)
coerce_list(setup, 'parse')
coerce_list(setup, 'capture')
for key in ['dependencies', 'types', 'parameters']:
if key not in setup:
setup[key] = {}
kwargs = {'variable_start_string': '$',
'variable_end_string': '$'}
for k in ['templates', 'files', 'executable', 'cmdargs', 'capture']:
setup[k] = [Template(v, **kwargs) for v in setup[k]]
for key in setup['dependencies']:
setup['dependencies'][key] = str(setup['dependencies'][key])
def load_setup(fn):
with open(fn, 'r') as f:
setup = ordered_load(f, yaml.SafeLoader)
setup = setup or {}
treat_setup(setup)
return setup
def empty_setup(executable='', **kwargs):
setup = {
'templates': [],
'files': [],
'executable': executable,
'cmdargs': [],
'parameters': OrderedDict(),
'dependencies': OrderedDict(),
'parse': [],
'types': OrderedDict(),
}
setup.update(kwargs)
treat_setup(setup)
return setup
|
Python
| 0.000019
|
@@ -3548,46 +3548,8 @@
g')%0A
- log.log_file = args.file + '.log'%0A
|
caf45bc9d92bb496a3fb32b494db623b5b405208
|
bump version
|
picker/__init__.py
|
picker/__init__.py
|
VERSION = (0, 4, 0)
default_app_config = 'picker.apps.PickerConfig'
def get_version():
return '.'.join(map(str, VERSION))
|
Python
| 0
|
@@ -11,9 +11,9 @@
(0,
-4
+5
, 0)
|
2b939c703951c0a7042fa336d9c685c437fb0586
|
Bump to version 1.2
|
setup.py
|
setup.py
|
"""Setup script for templer.django-project-app"""
from setuptools import setup
from setuptools import find_packages
version = '1.1'
setup(
name='templer.django-project-app',
version=version,
description='Templer extension for creating '
'Django applications within projects.',
long_description=open('README.rst').read(),
classifiers=[
'Environment :: Console',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Code Generators',
],
keywords='templer, django, application',
author='Fantomas42',
author_email='fantomas42@gmail.com',
url='https://github.com/Fantomas42/templer.django-project-app',
license='BSD',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['templer'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'templer.core',
],
entry_points="""
[paste.paster_create_template]
django_app = templer.django_project_app:DjangoApp
django_project_app = templer.django_project_app:DjangoProjectApp
[templer.templer_structure]
management_command = templer.django_project_app:ManagementCommandStructure
""",
)
|
Python
| 0
|
@@ -123,17 +123,17 @@
on = '1.
-1
+2
'%0A%0Asetup
|
b1501e02a53706c748f3963dd8c6769b1cf62de8
|
Add temporary wordaround for #12
|
hangups/conversation.py
|
hangups/conversation.py
|
"""Conversation objects."""
import logging
from tornado import gen
from types import SimpleNamespace
from hangups import parsers, exceptions, event, user
logger = logging.getLogger(__name__)
class Conversation(object):
"""Wrapper around Client for working with a single chat conversation."""
def __init__(self, client, conv_state, user_list):
"""Initialize a new Conversation from a ClientConversationState."""
self._client = client
self._id = conv_state.conversation_id.id_
user_list = [user_list.get_user(user.UserID(chat_id=part.id_.chat_id,
gaia_id=part.id_.gaia_id))
for part in conv_state.conversation.participant_data]
self._users = {user_.id_: user_ for user_ in user_list}
self._last_modified = parsers.from_timestamp(
conv_state.conversation.self_conversation_state.sort_timestamp
)
self._name = conv_state.conversation.name # str or None
self._chat_messages = [] # ChatMessage
for ev in conv_state.event:
try:
if ev.chat_message is not None:
self._chat_messages.append(parsers.parse_chat_message(ev))
except exceptions.ParseError as e:
logger.warning('Failed to parse chat message: {}'.format(e))
# Event fired when a new message arrives with arguments (chat_message).
self.on_message = event.Event('Conversation.on_message')
# Event fired when a user starts or stops typing with arguments
# (typing_message).
self.on_typing = event.Event('Conversation.on_typing')
@property
def id_(self):
"""Return the Conversation's ID."""
return self._id
@property
def users(self):
"""Return the list of Users participating in the Conversation."""
return list(self._users.values())
def get_user(self, user_id):
"""Return a participating use by UserID.
Raises KeyError if the user ID is not a participant.
"""
return self._users[user_id]
@property
def name(self):
""" Return chat name if it was renamed manually or None
:rtype: str
"""
return self._name
@property
def last_modified(self):
"""Return the timestamp of when the conversation was last modified."""
return self._last_modified
@property
def chat_messages(self):
"""Return a list of ChatMessages, sorted oldest to newest."""
return list(self._chat_messages)
@gen.coroutine
def send_message(self, text):
"""Send a message to this conversation.
text may not be empty.
Raises hangups.NetworkError if the message can not be sent.
"""
yield self._client.sendchatmessage(self._id, text)
class ConversationList(object):
"""Wrapper around Client that maintains a list of Conversations."""
def __init__(self, client, conv_states, user_list):
self._client = client
self._conv_dict = {} # {conv_id: Conversation}
# Initialize the list of conversation from Client's list of
# ClientConversationStates.
for conv_state in conv_states:
conv_id = conv_state.conversation_id.id_
self._conv_dict[conv_id] = Conversation(self._client, conv_state,
user_list)
self._client.on_state_update.add_observer(self._on_state_update)
self._client.on_event_notification.add_observer(
self._on_event_notification
)
# Event fired when a new message arrives with arguments (chat_message).
self.on_message = event.Event('ConversationList.on_message')
# Event fired when a user starts or stops typing with arguments
# (typing_message).
self.on_typing = event.Event('ConversationList.on_typing')
def get_all(self):
"""Return list of all Conversations."""
return list(self._conv_dict.values())
def get(self, conv_id):
"""Return a Conversation from its ID.
Raises KeyError if the conversation ID is invalid.
"""
return self._conv_dict[conv_id]
def _on_state_update(self, state_update):
"""Receive a ClientStateUpdate and fan out to Conversations."""
if state_update.typing_notification is not None:
self._handle_set_typing_notification(
state_update.typing_notification
)
if state_update.event_notification is not None:
self._on_event_notification(state_update.event_notification)
def _on_event_notification(self, event_notification):
"""Receive a ClientEventNofication and fan out to Conversations."""
if event_notification.event.chat_message is not None:
self._handle_chat_message(event_notification.event)
def _handle_chat_message(self, event_):
"""Receive ClientEvent and update the conversation with messages."""
conv_id = event_.conversation_id.id_
conv = self._conv_dict.get(conv_id, None)
if conv is not None:
try:
if event_.chat_message is not None:
res = parsers.parse_chat_message(event_)
else:
res = None
except exceptions.ParseError as e:
logger.warning('Failed to parse chat message: {}'.format(e))
else:
if res is not None:
self.on_message.fire(res)
conv.on_message.fire(res)
else:
logger.warning('Received ClientEvent for unknown conversation {}'
.format(conv_id))
def _handle_set_typing_notification(self, set_typing_notification):
"""Receive ClientSetTypingNotification and update the conversation."""
conv_id = set_typing_notification.conversation_id.id_
conv = self._conv_dict.get(conv_id, None)
if conv is not None:
res = parsers.parse_typing_status_message(set_typing_notification)
self.on_typing.fire(res)
conv.on_typing.fire(res)
else:
logger.warning('Received ClientSetTypingNotification for '
'unknown conversation {}'.format(conv_id))
|
Python
| 0
|
@@ -2087,35 +2087,295 @@
-return self._users%5Buser_id%5D
+# TODO: Remove this temporary fix. Conversations don't (can't?) know%0A # what users participated in the past and later left the conversation.%0A try:%0A return self._users%5Buser_id%5D%0A except KeyError:%0A return user.User(user_id, %22Unknown%22, None, False)
%0A%0A
|
392e2a7905cf6dffe0c3aebe19408ae65a7f1a9e
|
fix fixeddatateacher docstring
|
parlai/core/fixed_data_teacher.py
|
parlai/core/fixed_data_teacher.py
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from .agents import Teacher
import concurrent.futures
from threading import Thread
import queue
import random
class DataLoader(Thread):
"""A worker thread that provides a threadpool for data loading.
A teacher may submit a request to the loader, which will return the
appropriate data.
To submit a request, a teacher should call ``request_load`` with the
following arguments:
- ``receive_fn`` - a receive function (for receiving the data)
- ``load_fn`` - a load function (for loading the data)
- ``args`` - arguments for the load function
-> args can be either a dictionary of arguments for a function, or
a list of positional arguments
"""
def __init__(self, opt):
Thread.__init__(self, daemon=True)
self.num_workers = opt.get('numthreads', 1)
self.request_queue = queue.Queue()
def __len__(self):
return len(self.ques['questions'])
def request_load(self, receive_fn, load_fn, args):
self.request_queue.put((receive_fn, load_fn, args))
def run(self):
with concurrent.futures.ThreadPoolExecutor(max_workers=self.num_workers) as executor:
while True:
receive_fn, load_fn, args = self.request_queue.get()
if type(args) == dict:
future = executor.submit(load_fn, **args)
else:
future = executor.submit(load_fn, *args)
receive_fn(future)
class FixedDataTeacher(Teacher):
"""A teacher agent for all teachers involved in tasks with fixed data.
This class provides the following functionality for its subclasses:
- Resets a teacher
- Provides an observe method
- Computes and retrieves the next episode index for a teacher
- Provides a threadpool option for loading data (especially useful for
large data, e.g. images)
To utilize the DataLoader for threadpool loading, a teacher should
implement the ``submit_load_request`` function to send a load request
to the DataLoader by calling ``self.data_loader.request_load`` with the
appropriate arguments (``receive_fn, load_fn, args``). The DataLoader then
returns the data to the teacher's ``data_queue``, which the teacher can
poll in its ``act`` method.
The following is an example of the DataLoader usage in the VQA-V1 teacher.
1. In the teacher's ``init`` function, the teacher calls its
``submit_load_request`` function to preload an image.
2. The ``submit_load_request`` function gets the next ``episode_idx``,
and computes the image path for the load request.
3. At the end of ``submit_load_request``, the teacher calls
``self.data_loader.request_load`` with three args:
- ``self.receive`` - the function that the DataLoader calls to
return the the loaded object
- ``self.image_loader.load`` - the function used to load the image
from the image path
- ``[img_path]`` - a list of arguments for the load function, which
in this case is the path of the image.
4. In the teacher's ``act`` function, the teacher loads the data from
its data queue.
5. At the end of the ``act`` function, the teacher calls
``submit_load_request`` to preload an image for the next example.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if not hasattr(self, 'datatype'):
self.datatype = opt['datatype']
if not hasattr(self, 'random'):
self.random = self.datatype == 'train'
if not hasattr(self, 'training'):
self.training = self.datatype.startswith('train')
# for ordered data in batch mode (especially, for validation and
# testing), each teacher in the batch gets a start index and a step
# size so they all process disparate sets of the data
self.step_size = opt.get('batchsize', 1)
self.data_offset = opt.get('batchindex', 0)
self.data_queue = queue.Queue()
if shared:
self.data_loader = shared['data_loader']
else:
self.data_loader = DataLoader(opt)
self.data_loader.start()
def reset(self):
"""Reset the dialog so that it is at the start of the epoch,
and all metrics are reset.
"""
super().reset()
self.metrics.clear()
self.lastY = None
self.episode_idx = self.data_offset - self.step_size
self.episode_done = True
self.epochDone = False
self.data_queue = queue.Queue()
if (not self.random and self.data_offset >= self.num_episodes()):
self.epochDone = True
def observe(self, observation):
"""Process observation for metrics."""
if hasattr(self, 'lastY') and self.lastY is not None:
self.metrics.update(observation, self.lastY)
self.lastY = None
return observation
def submit_load_request(self):
"""An agent should implement this method to submit requests to the
data loader. At the end of this method, the agent should call
``self.data_loader.request_load()`` with the appropriate args.
"""
pass
def receive_data(self, future):
"""Function for receiving data from the data loader."""
data = future.result()
self.data_queue.put(data)
def share(self):
shared = super().share()
shared['data_loader'] = self.data_loader
return shared
def next_episode_idx(self, num_eps=None):
if not num_eps:
num_eps = self.num_episodes()
if self.random:
self.episode_idx = random.randrange(num_eps)
else:
self.episode_idx = (self.episode_idx + self.step_size) % num_eps
return self.episode_idx
def next_example(self):
if self.episode_done:
self.episode_idx = self.next_episode_idx()
self.entry_idx = 0
else:
self.entry_idx += 1
ex = self.get(self.episode_idx, self.entry_idx)
self.episode_done = ex['episode_done']
epoch_done = False
if (not self.random and self.episode_done
and self.episode_idx + self.step_size >= self.num_episodes()):
epoch_done = True
return ex, epoch_done
def num_episodes(self):
"""Get the number of episodes in this dataset."""
try:
return len(self.episodes)
except Exception:
raise RuntimeError('"num_episodes" must be overriden by children.')
def get(self, episode_idx, entry_idx=0):
"""Get the specified episode and the specified entry in that episode.
Many datasets have only single-entry episodes, so entry_idx defaults to
zero. Children must override this method in order to inherit the
`next_example` method.
"""
try:
return self.examples[episode_idx][entry_idx]
except Exception:
raise RuntimeError('"Get" method must be overriden by children.')
def act(self):
"""Send new dialog message."""
if not hasattr(self, 'epochDone'):
self.reset()
if self.epochDone and not self.training:
# need to call "reset" to repeat valid or test examples
return {'episode_done': True, 'id': self.getID()}
action, self.epochDone = self.next_example()
action['id'] = self.getID()
self.lastY = action.get('labels', None)
if not self.datatype.startswith('train') and 'labels' in action:
# move labels to eval field so not used for training
# but this way the model can use the labels for perplexity or loss
action['eval_labels'] = action.pop('labels')
return action
|
Python
| 0.000002
|
@@ -3117,16 +3117,21 @@
.receive
+_data
%60%60 - the
|
9669a99d1a76f346b2cfb9b4197636ac3142f9d2
|
Update users table in a batched manner
|
synapse/storage/schema/delta/30/as_users.py
|
synapse/storage/schema/delta/30/as_users.py
|
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.storage.appservice import ApplicationServiceStore
logger = logging.getLogger(__name__)
def run_upgrade(cur, database_engine, config, *args, **kwargs):
# NULL indicates user was not registered by an appservice.
try:
cur.execute("ALTER TABLE users ADD COLUMN appservice_id TEXT")
except:
# Maybe we already added the column? Hope so...
pass
cur.execute("SELECT name FROM users")
rows = cur.fetchall()
config_files = []
try:
config_files = config.app_service_config_files
except AttributeError:
logger.warning("Could not get app_service_config_files from config")
pass
appservices = ApplicationServiceStore.load_appservices(
config.server_name, config_files
)
owned = {}
for row in rows:
user_id = row[0]
for appservice in appservices:
if appservice.is_exclusive_user(user_id):
if user_id in owned.keys():
logger.error(
"user_id %s was owned by more than one application"
" service (IDs %s and %s); assigning arbitrarily to %s" %
(user_id, owned[user_id], appservice.id, owned[user_id])
)
owned[user_id] = appservice.id
for user_id, as_id in owned.items():
cur.execute(
database_engine.convert_param_style(
"UPDATE users SET appservice_id = ? WHERE name = ?"
),
(as_id, user_id)
)
|
Python
| 0
|
@@ -1879,20 +1879,20 @@
wned
-%5Buser_id%5D =
+.setdefault(
apps
@@ -1900,16 +1900,37 @@
rvice.id
+, %5B%5D).append(user_id)
%0A%0A fo
@@ -1935,22 +1935,23 @@
for
-user
+as
_id,
-as
+user
_id
+s
in
@@ -1961,24 +1961,159 @@
ed.items():%0A
+ n = 100%0A user_chunks = (user_ids%5Bi:i + 100%5D for i in xrange(0, len(user_ids), n))%0A for chunk in user_chunks:%0A
cur.
@@ -2133,16 +2133,20 @@
+
database
@@ -2174,16 +2174,20 @@
_style(%0A
+
@@ -2245,13 +2245,101 @@
ame
-= ?%22%0A
+IN (%25s)%22 %25 (%0A %22,%22.join(%22?%22 for _ in chunk),%0A )%0A
@@ -2365,25 +2365,32 @@
-(as_id, user_id)%0A
+ %5Bas_id%5D + chunk%0A
|
81ae2b03954f4a4ec9d19193cb117d59a4e71f3b
|
Fix incorrect logging message
|
blitz/io/client_states.py
|
blitz/io/client_states.py
|
__author__ = 'Will Hart'
from blitz.constants import *
class BaseState(object):
"""
A base state diagram which provides a few methods - this should not be directly instantiated.
All methods return a BaseState derived object which should handle future message processing
"""
def enter_state(self, tcp, state):
"""Called when entering the state"""
print "Calling base.enter_state >> " + state.__name__
return state()
def process_message(self, tcp, msg):
"""Called when a message needs processing"""
print "Calling base.process_message: " + msg
raise NotImplementedError()
def send_message(self, tcp, msg):
"""
Send the passed message over TCP and return the current state
"""
print "Calling base.send_message: " + msg
tcp._send(msg)
return self
def go_to_state(self, tcp, state):
"""
Transition to a new state and call enter_state on it
:return: the new state
"""
print "Calling base.go_to_state >> " + state.__name__
return state().enter_state(tcp, state)
def __str__(self):
return "<" + __name__ + ">"
class ClientInitState(BaseState):
"""
Handles the client starting up - sends a "logging" query
to the logger and waits for the response
"""
def enter_state(self, tcp, state):
"""Send a logging query to the logger"""
print "Calling init.enter_state"
return self.send_message(tcp, "LOGGING")
def process_message(self, tcp, msg):
print "Calling init.process_message: " + msg
if msg == "ACK":
# logger is logging, transition to LOGGING state
return self.go_to_state(tcp, ClientLoggingState)
elif msg == "NACK":
# logger is not logging, go to idle
return self.go_to_state(tcp, ClientIdleState)
else:
# no other messages are acceptable in this state
raise Exception("Unable to process the given message from InitState: " + msg)
class ClientIdleState(BaseState):
"""
Handles the client idling, waiting for further commands
"""
def process_message(self, tcp, msg):
# no server messages are acceptable in this state
print "Calling idle.process_message: " + msg
raise Exception("Received unexpected message in IdleState: " + msg)
def send_message(self, tcp, msg):
print "Calling idle.send_message: " + msg
if msg == "START":
return self.go_to_state(tcp, ClientStartingState)
elif msg[0:8] == "DOWNLOAD":
tcp._send(msg)
return self.go_to_state(tcp, ClientDownloadingState)
else:
raise Exception("Unknown message for IDLE state - " + msg)
class ClientStartingState(BaseState):
"""Handles logging starting - waits for ACK from server"""
def enter_state(self, tcp, state):
print "Calling starting.send_message: " + state.__name__
tcp._send("START")
def process_message(self, tcp, msg):
print "Calling starting.process_message: " + msg
if msg == "ACK":
return self.go_to_state(tcp, ClientLoggingState)
elif msg == "INSESSION":
return self.go_to_state(tcp, ClientLoggingState)
return self.go_to_state(tcp, ClientIdleState)
class ClientLoggingState(BaseState):
"""
Handles the client in logging state - sends periodic status updates
"""
def send_message(self, tcp, msg):
print "Calling logging.send_message: " + msg
# check if we have requested logging to stop
if msg == "STOP":
return self.go_to_state(tcp, ClientStoppingState)
# if not, are we requesting a status?
if msg == "STATUS":
tcp._send("STATUS")
elif len(msg) == COMMAND_MESSAGE_BYTES or len(msg) == SHORT_COMMAND_MESSAGE_BYTES:
# this is likely to be a data message
tcp.parse_reading(msg)
else:
# otherwise we just send the message and let the server sort it out
tcp._send(msg)
return self
class ClientStoppingState(BaseState):
"""
Handles waiting for acknowledgement from a client before entering IDLE state
"""
def enter_state(self, tcp, state):
print "Calling stopping.send_message: " + state.__name__
tcp._send("STOP")
def process_message(self, tcp, msg):
print "Calling stopping.process_message: " + msg
if msg == "ACK":
return self.go_to_state(tcp, ClientIdleState)
return self
class ClientDownloadingState(BaseState):
"""
Handles the client in logging state - sends periodic status updates
"""
def process_message(self, tcp, msg):
print "Calling downloading.process_message: " + msg
if msg == "NACK":
# the data has been received
self.send_message(tcp, "ACK")
return self.go_to_state(tcp, ClientIdleState)
# otherwise we save the data row for processing
tcp.parse_reading(msg)
return self
def go_to_state(self, tcp, state):
print "Calling downloading.go_to_state >> " + state.__name__
if type(state) == ClientIdleState:
tcp._send("ACK") # acknowledge end of download recieved
return super(ClientDownloadingState, self).go_to_state(tcp, state)
|
Python
| 0.998716
|
@@ -2971,35 +2971,34 @@
ng starting.
-s
en
-d_messag
+ter_stat
e: %22 + state
|
9bc6607b8349fef43bf219610bd8030e311348ae
|
Fix too long line
|
examples/vae/train_vae.py
|
examples/vae/train_vae.py
|
#!/usr/bin/env python
"""Chainer example: train a VAE on MNIST
"""
from __future__ import print_function
import argparse
import os
import chainer
from chainer import training
from chainer.training import extensions
import numpy as np
import net
def main():
parser = argparse.ArgumentParser(description='Chainer example: VAE')
parser.add_argument('--initmodel', '-m', default='',
help='Initialize the model from given file')
parser.add_argument('--resume', '-r', default='',
help='Resume the optimization from snapshot')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
parser.add_argument('--epoch', '-e', default=100, type=int,
help='number of epochs to learn')
parser.add_argument('--dimz', '-z', default=20, type=int,
help='dimention of encoded vector')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='learning minibatch size')
parser.add_argument('--test', action='store_true',
help='Use tiny datasets for quick tests')
args = parser.parse_args()
print('GPU: {}'.format(args.gpu))
print('# dim z: {}'.format(args.dimz))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('')
# Prepare VAE model, defined in net.py
model = net.VAE(784, args.dimz, 500)
# Setup an optimizer
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
# Initialize
if args.initmodel:
chainer.serializers.load_npz(args.initmodel, model)
# Load the MNIST dataset
train, test = chainer.datasets.get_mnist(withlabel=False)
if args.test:
train, _ = chainer.datasets.split_dataset(train, 100)
test, _ = chainer.datasets.split_dataset(test, 100)
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
repeat=False, shuffle=False)
# Set up an updater. StandardUpdater can explicitly specify a loss function
# used in the training with 'loss_func' option
updater = training.updaters.StandardUpdater(train_iter, optimizer, device=args.gpu,
loss_func=model.get_loss_func())
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu,
eval_func=model.get_loss_func(k=10)))
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=(args.epoch, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/rec_loss', 'validation/main/rec_loss', 'elapsed_time']))
trainer.extend(extensions.ProgressBar())
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
# Run the training
trainer.run()
# Visualize the results
def save_images(x, filename):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(3, 3, figsize=(9, 9), dpi=100)
for ai, xi in zip(ax.flatten(), x):
ai.imshow(xi.reshape(28, 28))
fig.savefig(filename)
model.to_cpu()
train_ind = [1, 3, 5, 10, 2, 0, 13, 15, 17]
x = chainer.Variable(np.asarray(train[train_ind]))
with chainer.using_config('train', False), chainer.no_backprop_mode():
x1 = model(x)
save_images(x.data, os.path.join(args.out, 'train'))
save_images(x1.data, os.path.join(args.out, 'train_reconstructed'))
test_ind = [3, 2, 1, 18, 4, 8, 11, 17, 61]
x = chainer.Variable(np.asarray(test[test_ind]))
with chainer.using_config('train', False), chainer.no_backprop_mode():
x1 = model(x)
save_images(x.data, os.path.join(args.out, 'test'))
save_images(x1.data, os.path.join(args.out, 'test_reconstructed'))
# draw images from randomly sampled z
z = chainer.Variable(
np.random.normal(0, 1, (9, args.dimz)).astype(np.float32))
x = model.decode(z)
save_images(x.data, os.path.join(args.out, 'sampled'))
if __name__ == '__main__':
main()
|
Python
| 0.999592
|
@@ -2461,16 +2461,25 @@
Updater(
+%0A
train_it
@@ -2492,16 +2492,24 @@
timizer,
+%0A
device=
@@ -2521,47 +2521,8 @@
gpu,
-%0A
los
|
57bd308348243d77a116f509bc08c264423789d4
|
Stop failing the build if a dependency has been removed. Review URL: http://codereview.chromium.org/7350
|
chrome/tools/build/win/dependencies.py
|
chrome/tools/build/win/dependencies.py
|
#!/usr/bin/python
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to verify a Portable Executable's dependencies.
Analyzes the input portable executable (a DLL or an EXE for example), extracts
its imports and confirms that its dependencies haven't changed. This is for
regression testing.
Returns 0 if the list matches.
1 if one or more dependencies has been removed.
2 if one or more dependencies has been added. This preempts removal
result code.
"""
import optparse
import os
import subprocess
import sys
# The default distribution name and the environment variable that overrides it.
DIST_DEFAULT = '_chromium'
DIST_ENV_VAR = 'CHROMIUM_BUILD'
DUMPBIN = "dumpbin.exe"
class Error(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def RunSystemCommand(cmd):
try:
return subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0]
except:
raise Error("Failed to execute: " + cmd)
def RunDumpbin(binary_file):
"""Runs dumpbin and parses its output.
Args: binary_file: the binary to analyze
Returns: a tuple of the dependencies and the delay-load dependencies
The output of dumpbin that we will be parsing looks like this:
--
<blah blah>
Image has the following dependencies:
foo.dll
bar.dll
Image has the following delay load dependencies:
foobar.dll
other.dll
Summary
<blah blah>
--
The following parser extracts the dll names from the above format.
"""
cmd = DUMPBIN + " /dependents " + binary_file
output = RunSystemCommand(cmd)
dependents = []
delay_loaded = []
(START, DEPENDENCIES_HEADER, DEPENDENCIES, DELAY_LOAD_HEADER, DELAY_LOAD,
SUMMARY_HEADER, SUMMARY) = (0, 1, 2, 3, 4, 5, 6)
current_section = START
# Very basic scanning.
for line in output.splitlines():
line = line.strip()
if len(line) > 1:
if line == "Image has the following dependencies:":
if current_section != START:
raise Error("Internal parsing error.")
current_section = DEPENDENCIES_HEADER
elif line == "Image has the following delay load dependencies:":
if current_section != DEPENDENCIES:
raise Error("Internal parsing error.")
current_section = DELAY_LOAD_HEADER
elif line == "Summary":
current_section = SUMMARY_HEADER
elif current_section == DEPENDENCIES:
# Got a dependent
dependents.append(line)
elif current_section == DELAY_LOAD:
# Got a delay-loaded
delay_loaded.append(line)
else:
if current_section == DEPENDENCIES_HEADER:
current_section = DEPENDENCIES
elif current_section == DELAY_LOAD_HEADER:
current_section = DELAY_LOAD
elif current_section == SUMMARY_HEADER:
current_section = SUMMARY
return dependents, delay_loaded
def Diff(name, type, current, expected, deps_file):
"""
Args: name: Portable executable name being analysed.
type: Type of dependency.
current: List of current dependencies.
expected: List of dependencies that are expected.
deps_file: File name of the .deps file.
Returns 0 if the lists are equal
1 if one entry in list1 is missing
2 if one entry in list2 is missing.
"""
# Create sets of lower-case names.
set_expected = set([x.lower() for x in expected])
set_current = set([x.lower() for x in current])
only_in_expected = set_expected - set_current
only_in_current = set_current - set_expected
# Find difference between the sets.
found_extra = 0
name = os.path.basename(name).lower()
if len(only_in_expected) or len(only_in_current):
print name.upper() + " DEPENDENCIES MISMATCH\n"
if len(only_in_expected):
found_extra = 1
print "%s is no longer dependent on these %s: %s." % (name,
type,
' '.join(only_in_expected))
print "Please update \"%s\"." % deps_file
if len(only_in_current):
found_extra = 2
string = "%s is now dependent on these %s, but shouldn't: %s." % (name,
type,
' '.join(only_in_current))
stars = '*' * len(string)
print "**" + stars + "**"
print "* " + string + " *"
print "**" + stars + "**\n"
print "Please update \"%s\"." % deps_file
return found_extra
def VerifyDependents(pe_name, dependents, delay_loaded, list_file, verbose):
"""Compare the actual dependents to the expected ones."""
scope = {}
try:
execfile(list_file, scope)
except:
raise Error("Failed to load " + list_file)
# The dependency files have dependencies in two section - dependents and delay_loaded
# Also various distributions of Chromium can have different dependencies. So first
# we read generic dependencies ("dependents" and "delay_loaded"). If distribution
# specific dependencies exist (i.e. "dependents_google_chrome" and
# "delay_loaded_google_chrome") we use those instead.
distribution = DIST_DEFAULT
if DIST_ENV_VAR in os.environ.keys():
distribution = os.environ[DIST_ENV_VAR].lower()
expected_dependents = scope["dependents"]
dist_dependents = "dependents" + distribution
if dist_dependents in scope.keys():
expected_dependents = scope[dist_dependents]
expected_delay_loaded = scope["delay_loaded"]
dist_delay_loaded = "delay_loaded" + distribution
if dist_delay_loaded in scope.keys():
expected_delay_loaded = scope[dist_delay_loaded]
if verbose:
print "Expected dependents:"
print "\n".join(expected_dependents)
print "Expected delayloaded:"
print "\n".join(expected_delay_loaded)
deps_result = Diff(pe_name,
"dll",
dependents,
expected_dependents,
list_file)
delayed_result = Diff(pe_name,
"delay loaded dll",
delay_loaded,
expected_delay_loaded,
list_file)
return max(deps_result, delayed_result)
def main(options, args):
# PE means portable executable. It's any .DLL, .EXE, .SYS, .AX, etc.
pe_name = args[0]
deps_file = args[1]
dependents, delay_loaded = RunDumpbin(pe_name)
if options.debug:
print "Dependents:"
print "\n".join(dependents)
print "Delayloaded:"
print "\n".join(delay_loaded)
return VerifyDependents(pe_name, dependents, delay_loaded, deps_file,
options.debug)
if '__main__' == __name__:
usage = "usage: %prog [options] input output"
option_parser = optparse.OptionParser(usage = usage)
option_parser.add_option("-d",
"--debug",
dest="debug",
action="store_true",
default=False,
help="Display debugging information")
options, args = option_parser.parse_args()
if len(args) != 2:
option_parser.error("Incorrect number of arguments")
sys.exit(main(options, args))
|
Python
| 0.000007
|
@@ -3891,24 +3891,34 @@
ected):%0A
+# Setting
found_extra
@@ -3921,11 +3921,175 @@
tra
-= 1
+to 1 causes the build to fail. In some case, some%0A # dependencies are stripped out on optimized build; don't break anything%0A # just for that.%0A found_extra = 0
%0A
|
da5a05c27f1c19c69ce23f5cd6cd0f09edb9d7f7
|
Refactor common serializer selection code.
|
paranuara_api/views.py
|
paranuara_api/views.py
|
from rest_framework import viewsets
from paranuara_api.models import Company, Person
from paranuara_api.serializers import (
CompanySerializer, CompanyListSerializer, PersonListSerializer,
PersonSerializer
)
class CompanyViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Company.objects.all()
lookup_field = 'index'
serializers = {
'list': CompanyListSerializer,
'retrieve': CompanySerializer,
}
def get_serializer_class(self):
return self.serializers[self.action]
class PersonViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Person.objects.all()
lookup_field = 'index'
serializers = {
'list': PersonListSerializer,
'retrieve': PersonSerializer,
}
def get_serializer_class(self):
return self.serializers[self.action]
|
Python
| 0
|
@@ -229,23 +229,165 @@
ass
-CompanyViewSet(
+MultiSerializerMixin(object):%0A%0A def get_serializer_class(self):%0A return self.serializers%5Bself.action%5D%0A%0A%0Aclass CompanyViewSet(MultiSerializerMixin,
view
@@ -588,111 +588,51 @@
%7D%0A%0A
- def get_serializer_class(self):%0A return self.serializers%5Bself.action%5D%0A%0A%0Aclass PersonViewSet(
+%0Aclass PersonViewSet(MultiSerializerMixin,
view
@@ -830,86 +830,4 @@
%7D%0A%0A
- def get_serializer_class(self):%0A return self.serializers%5Bself.action%5D%0A%0A
|
a2a849f3d425e9c544a66d2b04ab80555be16add
|
Fix path error
|
drivnal/handlers/path.py
|
drivnal/handlers/path.py
|
from drivnal.constants import *
from drivnal.backup import Client
import drivnal.utils as utils
from drivnal import server
import os
import flask
@server.app.route('/path', methods=['GET'])
@server.app.route('/path/<path:path>', methods=['GET'])
def path_get(path=None):
path = '/' + (path or '')
paths = []
if path != '/':
paths.append({
'name': '..',
'path': os.path.abspath(os.path.join(path, os.pardir)),
})
try:
path_list = os.listdir(path)
except OSError:
return utils.jsonify({
'error': PATH_NOT_FOUND,
'error_msg': error.strerror,
}), 404
for name in sorted(path_list):
full_path = os.path.join(path, name)
if not os.path.isdir(full_path):
continue
paths.append({
'name': name,
'path': full_path,
})
return utils.jsonify(paths)
|
Python
| 0.000014
|
@@ -524,16 +524,23 @@
OSError
+, error
:%0A
|
9728d151967b6796ef2a34d8a9867fd109fe48f3
|
remove psutil from setup.py
|
setup.py
|
setup.py
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Programming Language :: Python
Programming Language :: JavaScript
Topic :: Database
Topic :: Software Development :: Libraries :: Python Modules
Operating System :: Unix
"""
import sys
try:
from setuptools import setup
except ImportError:
from ez_setup import setup
use_setup_tools()
from setuptools import setup
extra_opts = {"test_suite": "tests",
"tests_require": ["psutil>=2.0"]}
if sys.version_info[:2] == (2, 6):
# Need unittest2 to run unittests in Python 2.6
extra_opts["tests_require"] += ["unittest2"]
extra_opts["test_suite"] = "unittest2.collector"
setup(name='mongo-connector',
version="1.1.1+",
author="MongoDB, Inc.",
author_email='mongodb-user@googlegroups.com',
description='Mongo Connector',
keywords='mongo-connector',
url='https://github.com/10gen-labs/mongo-connector',
license="http://www.apache.org/licenses/LICENSE-2.0.html",
platforms=["any"],
classifiers=filter(None, classifiers.split("\n")),
install_requires=['pymongo', 'pysolr >= 3.1.0', 'elasticsearch'],
packages=["mongo_connector", "mongo_connector.doc_managers"],
package_data={
'mongo_connector.doc_managers': ['schema.xml']
},
entry_points={
'console_scripts': [
'mongo-connector = mongo_connector.connector:main',
],
},
**extra_opts
)
|
Python
| 0.000001
|
@@ -1075,56 +1075,8 @@
sts%22
-,%0A %22tests_require%22: %5B%22psutil%3E=2.0%22%5D
%7D%0A%0Ai
@@ -1193,17 +1193,16 @@
quire%22%5D
-+
= %5B%22unit
|
90cec05ed692e9be580d0df6a7738684fe76a6a1
|
Add print method
|
publisher.py
|
publisher.py
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(description='Upload Android app to Google Play.')
argparser.add_argument('service_account_email',
help='EXAMPLE@developer.gserviceaccount.com')
argparser.add_argument('key_file',
help='The path to the Key file.')
argparser.add_argument('package_name',
help='The package name. Example: com.android.sample')
argparser.add_argument('apk_file',
help='The path to the APK file to upload.')
argparser.add_argument('track',
nargs='?',
default='alpha',
help='Can be \'alpha\', \'beta\', \'production\' or \'rollout\'')
def main(argv):
flags = argparser.parse_args()
# Process flags and read their values.
service_account_email = flags.service_account_email
key_file = flags.key_file
package_name = flags.package_name
apk_file = flags.apk_file
track = flags.track
f = file(key_file, 'rb')
key = f.read()
f.close()
credentials = client.SignedJwtAssertionCredentials(
service_account_email,
key,
scope='https://www.googleapis.com/auth/androidpublisher')
http = httplib2.Http()
http = credentials.authorize(http)
service = build('androidpublisher', 'v2', http=http)
try:
edit_request = service.edits().insert(body={}, packageName=package_name)
result = edit_request.execute()
edit_id = result['id']
apk_response = service.edits().apks().upload(
editId=edit_id,
packageName=package_name,
media_body=apk_file).execute()
print 'Version code %d has been uploaded' % apk_response['versionCode']
track_response = service.edits().tracks().update(
editId=edit_id,
track=track,
packageName=package_name,
body={u'versionCodes': [apk_response['versionCode']]}).execute()
print 'Track %s is set for version code(s) %s' % (
track_response['track'], str(track_response['versionCodes']))
commit_request = service.edits().commit(
editId=edit_id, packageName=package_name).execute()
print 'Edit "%s" has been committed' % (commit_request['id'])
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
Python
| 0.000068
|
@@ -1706,16 +1706,225 @@
.track%0A%0A
+ print 'service_account_email: %22%25s%22' %25 service_account_email%0A print 'key_file: %22%25s%22' %25 key_file%0A print 'package_name: %22%25s%22' %25 package_name%0A print 'apk_file: %22%25s%22' %25 apk_file%0A print 'track: %22%25s%22' %25 track%0A%0A
f = fi
|
cc17684b207277028019f6f90f0e101dad766bdf
|
Add property with list of Stratis block devices to StratisPoolDevice
|
blivet/devices/stratis.py
|
blivet/devices/stratis.py
|
# devices/stratis.py
#
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vojtech Trefny <vtrefny@redhat.com>
#
import os
import logging
log = logging.getLogger("blivet")
from .storage import StorageDevice
from ..static_data import stratis_info
from ..storage_log import log_method_call
from ..errors import DeviceError, StratisError
from .. import devicelibs
class StratisPoolDevice(StorageDevice):
""" A stratis pool device """
_type = "stratis pool"
_resizable = False
_packages = ["stratisd", "stratis-cli"]
_dev_dir = "/dev/stratis"
_format_immutable = True
def __init__(self, *args, **kwargs):
"""
:encrypted: whether this pool is encrypted or not
:type encrypted: bool
:keyword passphrase: device passphrase
:type passphrase: str
:keyword key_file: path to a file containing a key
:type key_file: str
"""
self._encrypted = kwargs.pop("encrypted", False)
self.__passphrase = kwargs.pop("passphrase", None)
self._key_file = kwargs.pop("key_file", None)
super(StratisPoolDevice, self).__init__(*args, **kwargs)
@property
def size(self):
""" The size of this pool """
# sum up the sizes of the block devices
return sum(parent.size for parent in self.parents)
@property
def encrypted(self):
""" True if this device is encrypted. """
return self._encrypted
@encrypted.setter
def encrypted(self, encrypted):
self._encrypted = encrypted
@property
def key_file(self):
""" Path to key file to be used in /etc/crypttab """
return self._key_file
def _set_passphrase(self, passphrase):
""" Set the passphrase used to access this device. """
self.__passphrase = passphrase
passphrase = property(fset=_set_passphrase)
@property
def has_key(self):
return ((self.__passphrase not in ["", None]) or
(self._key_file and os.access(self._key_file, os.R_OK)))
def _pre_create(self, **kwargs):
super(StratisPoolDevice, self)._pre_create(**kwargs)
if self.encrypted and not self.has_key:
raise StratisError("cannot create encrypted stratis pool without key")
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
bd_list = [bd.path for bd in self.parents]
devicelibs.stratis.create_pool(name=self.name,
devices=bd_list,
encrypted=self.encrypted,
passphrase=self.__passphrase,
key_file=self._key_file)
def _post_create(self):
super(StratisPoolDevice, self)._post_create()
self.format.exists = True
pool_info = stratis_info.get_pool_info(self.name)
if not pool_info:
raise DeviceError("Failed to get information about newly created pool %s" % self.name)
self.uuid = pool_info.uuid
for parent in self.parents:
parent.format.pool_name = self.name
parent.format.pool_uuid = self.uuid
def _destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
devicelibs.stratis.remove_pool(self.uuid)
def add_hook(self, new=True):
super(StratisPoolDevice, self).add_hook(new=new)
if new:
return
for parent in self.parents:
parent.format.pool_name = self.name
parent.format.pool_uuid = self.uuid
def remove_hook(self, modparent=True):
if modparent:
for parent in self.parents:
parent.format.pool_name = None
parent.format.pool_uuid = None
super(StratisPoolDevice, self).remove_hook(modparent=modparent)
def dracut_setup_args(self):
return set(["stratis.rootfs.pool_uuid=%s" % self.uuid])
class StratisFilesystemDevice(StorageDevice):
""" A stratis pool device """
_type = "stratis filesystem"
_resizable = False
_packages = ["stratisd", "stratis-cli"]
_dev_dir = "/dev/stratis"
def __init__(self, *args, **kwargs):
if kwargs.get("size") is None and not kwargs.get("exists"):
kwargs["size"] = devicelibs.stratis.STRATIS_FS_SIZE
super(StratisFilesystemDevice, self).__init__(*args, **kwargs)
def _get_name(self):
""" This device's name. """
if self.pool is not None:
return "%s/%s" % (self.pool.name, self._name)
else:
return super(StratisFilesystemDevice, self)._get_name()
@property
def fsname(self):
""" The Stratis filesystem name (not including pool name). """
return self._name
@property
def pool(self):
if not self.parents:
# this should never happen but just to be sure
return None
return self.parents[0]
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
devicelibs.stratis.create_filesystem(self.fsname, self.pool.uuid)
def _post_create(self):
super(StratisFilesystemDevice, self)._post_create()
fs_info = stratis_info.get_filesystem_info(self.pool.name, self.fsname)
if not fs_info:
raise DeviceError("Failed to get information about newly created filesystem %s" % self.name)
self.uuid = fs_info.uuid
def _destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
devicelibs.stratis.remove_filesystem(self.pool.uuid, self.uuid)
def dracut_setup_args(self):
return set(["root=%s" % self.path])
|
Python
| 0
|
@@ -2086,24 +2086,145 @@
**kwargs)%0A%0A
+ @property%0A def blockdevs(self):%0A %22%22%22 A list of this pool block devices %22%22%22%0A return self.parents%5B:%5D%0A%0A
@propert
|
f8ee0fc34d060016f0f601e1d84000b9c612efc6
|
exclude "abstract" methods from coverage
|
muninn/storage/base.py
|
muninn/storage/base.py
|
import os.path
import muninn.util as util
class StorageBackend(object):
def __init__(self):
self.supports_symlinks = False
self.global_prefix = ''
def get_tmp_root(self, product):
if self._tmp_root:
tmp_root = os.path.join(self._tmp_root, product.core.archive_path)
util.make_path(tmp_root)
return tmp_root
def run_for_product(self, product, fn, use_enclosing_directory):
tmp_root = self.get_tmp_root(product)
product_path = self.product_path(product)
with util.TemporaryDirectory(dir=tmp_root, prefix=".run_for_product-",
suffix="-%s" % product.core.uuid.hex) as tmp_path:
self.get(product, product_path, tmp_path, use_enclosing_directory)
paths = [os.path.join(tmp_path, basename) for basename in os.listdir(tmp_path)]
return fn(paths)
def prepare(self):
# Prepare storage for use.
raise NotImplementedError()
def exists(self):
# Check that storage exists.
raise NotImplementedError()
def initialize(self, configuration):
# Initialize storage.
raise NotImplementedError()
def destroy(self):
# Destroy storage
raise NotImplementedError()
def product_path(self, product): # TODO refactor away?
# Product path within storage
raise NotImplementedError()
# TODO lower-granularity put/get/delete?
def put(self, paths, properties, use_enclosing_directory, use_symlinks=None,
retrieve_files=None, run_for_product=None):
# Place product file(s) into storage
raise NotImplementedError()
def get(self, product, product_path, target_path, use_enclosing_directory, use_symlinks=None):
# Retrieve product file(s) from storage
raise NotImplementedError()
def size(self, product_path):
# Return product storage size
raise NotImplementedError()
def delete(self, product_path, properties):
# Delete product file(s) from storage
raise NotImplementedError()
def move(self, product, archive_path, paths=None):
# Move product
raise NotImplementedError()
def current_archive_path(self, paths, properties):
raise NotImplementedError()
|
Python
| 0
|
@@ -924,24 +924,44 @@
epare(self):
+ # pragma: no cover
%0A # P
@@ -1038,24 +1038,44 @@
xists(self):
+ # pragma: no cover
%0A # C
@@ -1177,16 +1177,36 @@
ration):
+ # pragma: no cover
%0A
@@ -1283,24 +1283,44 @@
stroy(self):
+ # pragma: no cover
%0A # D
@@ -1363,32 +1363,58 @@
ementedError()%0A%0A
+ # TODO refactor away?%0A
def product_
@@ -1437,35 +1437,32 @@
ct): #
-TODO refactor away?
+pragma: no cover
%0A
@@ -1707,24 +1707,44 @@
oduct=None):
+ # pragma: no cover
%0A # P
@@ -1904,32 +1904,52 @@
_symlinks=None):
+ # pragma: no cover
%0A # Retri
@@ -2051,16 +2051,36 @@
t_path):
+ # pragma: no cover
%0A
@@ -2186,32 +2186,52 @@
th, properties):
+ # pragma: no cover
%0A # Delet
@@ -2352,16 +2352,36 @@
s=None):
+ # pragma: no cover
%0A
@@ -2487,16 +2487,36 @@
erties):
+ # pragma: no cover
%0A
|
5261aa35eb5ab697310efc5bc8b7d11e8655127b
|
Update project info
|
setup.py
|
setup.py
|
""" setup script for "portal" package
for development:
python setup.py develop
to install:
python setup.py install
"""
from setuptools import setup
project = "portal"
# maintain long_description as a single long line.
# workaround for a bug in pkg_info._get_metadata("PKG-INFO")
long_description =\
"""Alpha version of the TrueNTH Central Services RESTful API, to be used by TrueNTH intervention applications. This API attempts to conform with the HL7 FHIR specification as much as is reasonable.
"""
setup(
name=project,
url="https://github.com/uwcirg/true_nth_usa_portal_demo",
description="TrueNTH Central Services",
long_description=long_description,
author="University of Washington",
classifiers=(
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Intended Audience :: Healthcare Industry",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Medical Science Apps",
),
include_package_data=True,
use_scm_version=True,
zip_safe=False,
dependency_links=(
"git+https://github.com/pbugni/Flask-User.git#egg=Flask-User-0.6.8.1",
),
packages=["portal"],
setup_requires=("setuptools_scm"),
install_requires=(
"Authomatic>=0.1.0",
"celery",
"coverage",
"Flask>=0.10.1",
"Flask-Babel",
"Flask-Celery-Helper",
"Flask-Migrate",
"Flask-OAuthlib",
"Flask-SQLAlchemy",
"Flask-Script",
"Flask-Swagger",
"Flask-Testing",
"Flask-User>=0.6.8.1",
"Flask-WebTest",
"jsonschema",
"nose",
"oauthlib",
"page_objects",
"pkginfo",
"psycopg2",
"python-dateutil",
"recommonmark",
"redis",
"selenium",
"sphinx",
"sphinx_rtd_theme",
"swagger_spec_validator",
"validators",
"xvfbwrapper",
),
test_suite="tests",
)
|
Python
| 0
|
@@ -330,39 +330,38 @@
of the TrueNTH
-Central
+Shared
Services RESTfu
@@ -591,13 +591,8 @@
rtal
-_demo
%22,%0A
@@ -619,15 +619,14 @@
NTH
-Central
+Shared
Ser
@@ -684,16 +684,22 @@
author=%22
+CIRG,
Universi
@@ -713,24 +713,63 @@
ashington%22,%0A
+ author_email=%22truenth-dev@uw.edu%22,%0A
classifi
@@ -1184,16 +1184,60 @@
,%0A ),
+%0A license = %22BSD%22,%0A platforms = %22any%22,
%0A%0A in
|
e65790cdab1ed63064ae9c338d5489dfa5d4ab7a
|
Fix extra new line in setup script
|
docker/compose/mozdef_bootstrap/files/initial_setup.py
|
docker/compose/mozdef_bootstrap/files/initial_setup.py
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
import argparse
from datetime import datetime, timedelta
from time import sleep
from configlib import getConfig
import json
from elasticsearch.exceptions import ConnectionError
import sys
import os
from mozdef_util.elasticsearch_client import ElasticsearchClient
parser = argparse.ArgumentParser(description='Create the correct indexes and aliases in elasticsearch')
parser.add_argument('esserver', help='Elasticsearch server (ex: http://elasticsearch:9200)')
parser.add_argument('default_mapping_file', help='The relative path to default mapping json file (ex: cron/defaultTemplateMapping.json)')
parser.add_argument('backup_conf_file', help='The relative path to backup.conf file (ex: cron/backup.conf)')
args = parser.parse_args()
esserver = os.environ.get('OPTIONS_ESSERVERS')
if esserver is None:
esserver = args.esserver
esserver = esserver.strip('/')
print "Connecting to " + esserver
client = ElasticsearchClient(esserver)
current_date = datetime.now()
event_index_name = current_date.strftime("events-%Y%m%d")
previous_event_index_name = (current_date - timedelta(days=1)).strftime("events-%Y%m%d")
weekly_index_alias = 'events-weekly'
alert_index_name = current_date.strftime("alerts-%Y%m")
index_settings_str = ''
with open(args.default_mapping_file) as data_file:
index_settings_str = data_file.read()
index_settings = json.loads(index_settings_str)
all_indices = []
total_num_tries = 15
for attempt in range(total_num_tries):
try:
all_indices = client.get_indices()
except ConnectionError:
print 'Unable to connect to Elasticsearch...retrying'
sleep(5)
else:
break
else:
print 'Cannot connect to Elasticsearch after ' + str(total_num_tries) + ' tries, exiting script.'
exit(1)
refresh_interval = getConfig('refresh_interval', '1s', args.backup_conf_file)
number_of_shards = getConfig('number_of_shards', '1', args.backup_conf_file)
number_of_replicas = getConfig('number_of_replicas', '1', args.backup_conf_file)
slowlog_threshold_query_warn = getConfig('slowlog_threshold_query_warn', '5s', args.backup_conf_file)
slowlog_threshold_fetch_warn = getConfig('slowlog_threshold_fetch_warn', '5s', args.backup_conf_file)
mapping_total_fields_limit = getConfig('mapping_total_fields_limit', '1000', args.backup_conf_file)
index_settings['settings'] = {
"index": {
"refresh_interval": refresh_interval,
"number_of_shards": number_of_shards,
"number_of_replicas": number_of_replicas,
"search.slowlog.threshold.query.warn": slowlog_threshold_query_warn,
"search.slowlog.threshold.fetch.warn": slowlog_threshold_fetch_warn,
"mapping.total_fields.limit": mapping_total_fields_limit
}
}
if event_index_name not in all_indices:
print "Creating " + event_index_name
client.create_index(event_index_name, index_config=index_settings)
client.create_alias('events', event_index_name)
if previous_event_index_name not in all_indices:
print "Creating " + previous_event_index_name
client.create_index(previous_event_index_name, index_config=index_settings)
client.create_alias('events-previous', previous_event_index_name)
if alert_index_name not in all_indices:
print "Creating " + alert_index_name
client.create_index(alert_index_name)
client.create_alias('alerts', alert_index_name)
if weekly_index_alias not in all_indices:
print "Creating " + weekly_index_alias
client.create_alias_multiple_indices(weekly_index_alias, [event_index_name, previous_event_index_name])
|
Python
| 0.000001
|
@@ -998,17 +998,16 @@
rgs()%0A%0A%0A
-%0A
esserver
|
fda17500f59f29b754aeddad9a7c4b5b538737a2
|
Set pool info on the block devices when adding/removing Stratis pool
|
blivet/devices/stratis.py
|
blivet/devices/stratis.py
|
# devices/stratis.py
#
# Copyright (C) 2020 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vojtech Trefny <vtrefny@redhat.com>
#
import os
import logging
log = logging.getLogger("blivet")
from .storage import StorageDevice
from ..static_data import stratis_info
from ..storage_log import log_method_call
from ..errors import DeviceError, StratisError
from .. import devicelibs
class StratisPoolDevice(StorageDevice):
""" A stratis pool device """
_type = "stratis pool"
_resizable = False
_packages = ["stratisd", "stratis-cli"]
_dev_dir = "/dev/stratis"
_format_immutable = True
def __init__(self, *args, **kwargs):
"""
:encrypted: whether this pool is encrypted or not
:type encrypted: bool
:keyword passphrase: device passphrase
:type passphrase: str
:keyword key_file: path to a file containing a key
:type key_file: str
"""
self._encrypted = kwargs.pop("encrypted", False)
self.__passphrase = kwargs.pop("passphrase", None)
self._key_file = kwargs.pop("key_file", None)
super(StratisPoolDevice, self).__init__(*args, **kwargs)
@property
def size(self):
""" The size of this pool """
# sum up the sizes of the block devices
return sum(parent.size for parent in self.parents)
@property
def encrypted(self):
""" True if this device is encrypted. """
return self._encrypted
@encrypted.setter
def encrypted(self, encrypted):
self._encrypted = encrypted
@property
def key_file(self):
""" Path to key file to be used in /etc/crypttab """
return self._key_file
def _set_passphrase(self, passphrase):
""" Set the passphrase used to access this device. """
self.__passphrase = passphrase
passphrase = property(fset=_set_passphrase)
@property
def has_key(self):
return ((self.__passphrase not in ["", None]) or
(self._key_file and os.access(self._key_file, os.R_OK)))
def _pre_create(self, **kwargs):
super(StratisPoolDevice, self)._pre_create(**kwargs)
if self.encrypted and not self.has_key:
raise StratisError("cannot create encrypted stratis pool without key")
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
bd_list = [bd.path for bd in self.parents]
devicelibs.stratis.create_pool(name=self.name,
devices=bd_list,
encrypted=self.encrypted,
passphrase=self.__passphrase,
key_file=self._key_file)
def _post_create(self):
super(StratisPoolDevice, self)._post_create()
self.format.exists = True
pool_info = stratis_info.get_pool_info(self.name)
if not pool_info:
raise DeviceError("Failed to get information about newly created pool %s" % self.name)
self.uuid = pool_info.uuid
for parent in self.parents:
parent.format.pool_name = self.name
parent.format.pool_uuid = self.uuid
def _destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
devicelibs.stratis.remove_pool(self.uuid)
def dracut_setup_args(self):
return set(["stratis.rootfs.pool_uuid=%s" % self.uuid])
class StratisFilesystemDevice(StorageDevice):
""" A stratis pool device """
_type = "stratis filesystem"
_resizable = False
_packages = ["stratisd", "stratis-cli"]
_dev_dir = "/dev/stratis"
def __init__(self, *args, **kwargs):
if kwargs.get("size") is None and not kwargs.get("exists"):
kwargs["size"] = devicelibs.stratis.STRATIS_FS_SIZE
super(StratisFilesystemDevice, self).__init__(*args, **kwargs)
def _get_name(self):
""" This device's name. """
if self.pool is not None:
return "%s/%s" % (self.pool.name, self._name)
else:
return super(StratisFilesystemDevice, self)._get_name()
@property
def fsname(self):
""" The Stratis filesystem name (not including pool name). """
return self._name
@property
def pool(self):
if not self.parents:
# this should never happen but just to be sure
return None
return self.parents[0]
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
devicelibs.stratis.create_filesystem(self.fsname, self.pool.uuid)
def _post_create(self):
super(StratisFilesystemDevice, self)._post_create()
fs_info = stratis_info.get_filesystem_info(self.pool.name, self.fsname)
if not fs_info:
raise DeviceError("Failed to get information about newly created filesystem %s" % self.name)
self.uuid = fs_info.uuid
def _destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
devicelibs.stratis.remove_filesystem(self.pool.uuid, self.uuid)
def dracut_setup_args(self):
return set(["root=%s" % self.path])
|
Python
| 0
|
@@ -4323,32 +4323,565 @@
ool(self.uuid)%0A%0A
+ def add_hook(self, new=True):%0A super(StratisPoolDevice, self).add_hook(new=new)%0A if new:%0A return%0A%0A for parent in self.parents:%0A parent.format.pool_name = self.name%0A parent.format.pool_uuid = self.uuid%0A%0A def remove_hook(self, modparent=True):%0A if modparent:%0A for parent in self.parents:%0A parent.format.pool_name = None%0A parent.format.pool_uuid = None%0A%0A super(StratisPoolDevice, self).remove_hook(modparent=modparent)%0A%0A
def dracut_s
|
61d62bca67ff98ce830144d9d0f4d41b3c191289
|
fix health_check page for azure
|
open-hackathon-server/src/hackathon/health/health_check.py
|
open-hackathon-server/src/hackathon/health/health_check.py
|
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
import sys
from hackathon.hmongo.models import AzureKey
sys.path.append("..")
import requests
import abc
from sqlalchemy import __version__
from hackathon.constants import HEALTH_STATUS
from hackathon import RequiredFeature, Component
from hackathon.azureformation.service import Service
__all__ = [
"HostedDockerHealthCheck",
"AlaudaDockerHealthCheck",
"GuacamoleHealthCheck",
"StorageHealthCheck"
]
STATUS = "status"
DESCRIPTION = "description"
VERSION = "version"
class HealthCheck(Component):
"""Base class for health check item"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def report_health(self):
pass
class MongoDBHealthCheck(HealthCheck):
"""Check the health status of MongoDB."""
def report_health(self):
""" Report the status by querying mongodb server info
Will return OK if server info returned
"""
try:
server_info = self.db.client.server_info()
server_info[STATUS] = HEALTH_STATUS.OK
return server_info
except Exception as e:
return {
STATUS: HEALTH_STATUS.ERROR,
DESCRIPTION: e.message
}
class HostedDockerHealthCheck(HealthCheck):
"""Report status of hostdd docker
see more on docker/hosted_docker.py
"""
def __init__(self):
self.hosted_docker = RequiredFeature("hosted_docker")
self.alauda_docker = RequiredFeature("alauda_docker")
def report_health(self):
return self.hosted_docker.report_health()
class AlaudaDockerHealthCheck(HealthCheck):
"""Report status of Alauda service
see more on docker/alauda_docker.py
"""
def __init__(self):
self.alauda_docker = RequiredFeature("alauda_docker")
def report_health(self):
return self.alauda_docker.report_health()
class GuacamoleHealthCheck(HealthCheck):
"""Check the status of Guacamole Server by request its homepage"""
def __init__(self):
self.guacamole_url = self.util.get_config("guacamole.host") + '/guacamole'
def report_health(self):
try:
req = requests.get(self.guacamole_url)
self.log.debug(req.status_code)
if req.status_code == 200:
return {
STATUS: HEALTH_STATUS.OK
}
except Exception as e:
self.log.error(e)
return {
STATUS: HEALTH_STATUS.ERROR
}
class AzureHealthCheck(HealthCheck):
"""Check the status of azure to make sure config is right and azure is available"""
def report_health(self):
azure_keys = AzureKey.objects()
if not azure_keys:
return {
STATUS: HEALTH_STATUS.WARNING,
DESCRIPTION: "No Azure key found"
}
for azure_key in azure_keys:
azure = Service(azure_key.id)
if azure.ping():
return {
STATUS: HEALTH_STATUS.OK,
"type": "Azure Storage"
}
return {
STATUS: HEALTH_STATUS.ERROR
}
class StorageHealthCheck(HealthCheck):
"""Check the status of storage"""
def report_health(self):
self.storage.report_health()
def __init__(self):
self.storage = RequiredFeature("storage")
|
Python
| 0
|
@@ -4014,17 +4014,16 @@
zure_key
-s
= Azure
@@ -4035,16 +4035,24 @@
bjects()
+.first()
%0A
@@ -4060,33 +4060,32 @@
if not azure_key
-s
:%0A re
@@ -4206,49 +4206,8 @@
%7D%0A
- for azure_key in azure_keys:%0A
@@ -4248,20 +4248,16 @@
-
if azure
@@ -4269,36 +4269,32 @@
():%0A
-
return %7B%0A
@@ -4294,36 +4294,32 @@
-
-
STATUS: HEALTH_S
@@ -4328,20 +4328,16 @@
TUS.OK,%0A
-
@@ -4371,39 +4371,53 @@
age%22%0A
+ %7D%0A
-%7D%0A
+ else:%0A
return %7B
@@ -4409,32 +4409,36 @@
return %7B%0A
+
STAT
@@ -4453,32 +4453,36 @@
TH_STATUS.ERROR%0A
+
%7D%0A%0A%0Aclas
|
95e0f194a489ce5799fb5367b755601830ea7a1a
|
Add venue to views
|
project/apps/api/views.py
|
project/apps/api/views.py
|
import logging
log = logging.getLogger(__name__)
from rest_framework import (
viewsets,
permissions,
)
from drf_haystack.viewsets import HaystackViewSet
from .filters import (
ConventionFilter,
PersonFilter,
GroupFilter,
TuneFilter,
VenueFilter,
)
from .models import (
Arranger,
Award,
Catalog,
Chapter,
Contest,
Contestant,
Convention,
Director,
Group,
Judge,
Organization,
Performance,
Performer,
Person,
Round,
Score,
Session,
Singer,
Song,
Tune,
Venue,
)
from .serializers import (
ArrangerSerializer,
AwardSerializer,
CatalogSerializer,
ChapterSerializer,
ContestSerializer,
ContestantSerializer,
ConventionSerializer,
DirectorSerializer,
GroupSearchSerializer,
GroupSerializer,
JudgeSerializer,
OrganizationSerializer,
PerformanceSerializer,
PerformerSerializer,
PersonSearchSerializer,
PersonSerializer,
RoundSerializer,
ScoreSerializer,
SessionSerializer,
SingerSerializer,
SongSerializer,
TuneSerializer,
VenueSerializer,
)
class ArrangerViewSet(viewsets.ModelViewSet):
queryset = Arranger.objects.select_related(
'catalog',
'person',
)
serializer_class = ArrangerSerializer
# lookup_field = 'slug'
resource_name = "arranger"
class AwardViewSet(viewsets.ModelViewSet):
queryset = Award.objects.select_related(
'organization',
)
serializer_class = AwardSerializer
# lookup_field = 'slug'
resource_name = "award"
class ChapterViewSet(viewsets.ModelViewSet):
queryset = Chapter.objects.select_related(
'organization',
)
serializer_class = ChapterSerializer
# lookup_field = 'slug'
resource_name = "chapter"
class CatalogViewSet(viewsets.ModelViewSet):
queryset = Catalog.objects.select_related(
'tune',
).prefetch_related(
'arrangers',
)
serializer_class = CatalogSerializer
# lookup_field = 'slug'
resource_name = "catalog"
class ContestViewSet(viewsets.ModelViewSet):
queryset = Contest.objects.select_related(
'session',
'award',
'parent',
).prefetch_related(
'contestants',
)
serializer_class = ContestSerializer
resource_name = 'contest'
# lookup_field = 'slug'
resource_name = "contest"
class ContestantViewSet(viewsets.ModelViewSet):
queryset = Contestant.objects.select_related(
'performer',
'contest',
)
serializer_class = ContestantSerializer
resource_name = 'contestant'
# lookup_field = 'slug'
resource_name = "contestant"
class SessionViewSet(viewsets.ModelViewSet):
queryset = Session.objects.select_related(
'convention',
'administrator',
).prefetch_related(
'performers',
'rounds',
'judges',
'contests',
)
serializer_class = SessionSerializer
# lookup_field = 'slug'
resource_name = "session"
class PerformerViewSet(viewsets.ModelViewSet):
queryset = Performer.objects.select_related(
'session',
'organization',
'group',
).prefetch_related(
'performances',
'contestants',
'directors',
'singers',
)
serializer_class = PerformerSerializer
# lookup_field = 'slug'
resource_name = "performer"
class ConventionViewSet(viewsets.ModelViewSet):
queryset = Convention.objects.select_related(
'organization',
).prefetch_related(
'sessions',
)
serializer_class = ConventionSerializer
# lookup_field = 'slug'
resource_name = "convention"
filter_class = ConventionFilter
class DirectorViewSet(viewsets.ModelViewSet):
queryset = Director.objects.select_related(
'person',
'performer',
)
serializer_class = DirectorSerializer
# lookup_field = 'slug'
resource_name = "director"
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all().prefetch_related(
'performers',
)
serializer_class = GroupSerializer
# lookup_field = 'slug'
resource_name = "group"
filter_fields = (
'name',
)
filter_class = GroupFilter
class JudgeViewSet(viewsets.ModelViewSet):
queryset = Judge.objects.select_related(
'session',
'person',
'organization',
).prefetch_related(
'scores',
)
serializer_class = JudgeSerializer
# lookup_field = 'slug'
resource_name = "judge"
class OrganizationViewSet(viewsets.ModelViewSet):
queryset = Organization.objects.exclude(level=2)
serializer_class = OrganizationSerializer
# lookup_field = 'slug'
resource_name = "organization"
class PerformanceViewSet(viewsets.ModelViewSet):
queryset = Performance.objects.select_related(
'round',
'performer',
).prefetch_related(
'songs',
)
serializer_class = PerformanceSerializer
# lookup_field = 'slug'
resource_name = "performance"
class PersonViewSet(viewsets.ModelViewSet):
queryset = Person.objects.prefetch_related(
# 'catalogs',
'choruses',
'quartets',
)
serializer_class = PersonSerializer
# lookup_field = 'slug'
resource_name = "person"
filter_fields = (
'name',
)
filter_class = PersonFilter
class ScoreViewSet(viewsets.ModelViewSet):
queryset = Score.objects.select_related(
'song',
'judge',
)
serializer_class = ScoreSerializer
permission_classes = [
permissions.DjangoModelPermissions,
]
resource_name = "score"
class RoundViewSet(viewsets.ModelViewSet):
queryset = Round.objects.select_related(
'session',
).prefetch_related(
'performances',
)
serializer_class = RoundSerializer
# lookup_field = 'slug'
resource_name = "round"
class SingerViewSet(viewsets.ModelViewSet):
queryset = Singer.objects.select_related(
'person',
'performer',
)
serializer_class = SingerSerializer
# lookup_field = 'slug'
resource_name = "singer"
class SongViewSet(viewsets.ModelViewSet):
queryset = Song.objects.select_related(
'catalog',
'performance',
'tune',
).prefetch_related(
'scores',
)
serializer_class = SongSerializer
# lookup_field = 'slug'
resource_name = "song"
class TuneViewSet(viewsets.ModelViewSet):
queryset = Tune.objects.prefetch_related(
# 'catalogs',
'songs',
)
serializer_class = TuneSerializer
# lookup_field = 'slug'
resource_name = "tune"
filter_fields = (
'name',
)
filter_class = TuneFilter
class VenueViewSet(viewsets.ModelViewSet):
queryset = Venue.objects.prefetch_related(
'conventions',
)
serializer_class = VenueSerializer
# lookup_field = 'slug'
resource_name = "venue"
filter_fields = (
'name',
)
filter_class = VenueFilter
|
Python
| 0
|
@@ -3531,32 +3531,49 @@
'organization',%0A
+ 'venue',%0A
).prefetch_r
|
6ce8537236f6bbc92789dc57a07befad391e2bc8
|
fix install_requires
|
setup.py
|
setup.py
|
from setuptools import setup
version = '2.4.1'
setup(
name='cbagent',
version=version,
description='Stats collectors package for Couchbase Server monitoring',
author='Couchbase',
license='Apache Software License',
packages=[
'cbagent',
'cbagent.collectors',
'cbagent.collectors.libstats'
],
entry_points={
'console_scripts': [
'cbagent = cbagent.__main__:main',
]
},
include_package_data=True,
install_requires=[
'couchbase==1.2.0',
'decorator',
'fabric==1.8.0',
'logger',
'requests==2.1.0',
'seriesly',
'spring'
],
dependency_links=[
'git+https://github.com/couchbaselabs/spring.git#egg=spring',
]
)
|
Python
| 0.000001
|
@@ -38,17 +38,17 @@
= '2.4.
-1
+2
'%0A%0Asetup
@@ -528,17 +528,17 @@
se==1.2.
-0
+1
',%0A
|
750da37d3bf7ce393dcb6afbf54515fb3cd7091d
|
Make it future-proof.
|
tasks/generate_bdi_data.py
|
tasks/generate_bdi_data.py
|
import math
from datetime import date, datetime
import factory
from cumulusci.tasks.bulkdata.factory_utils import Adder, ModuleDataFactory, Models
def now():
return datetime.now().date()
JOB_ID = datetime.now().isoformat().rsplit(":", 1)[0]
START_DATE = date(2019, 1, 1) # Per https://salesforce.quip.com/gLfGAPtqVzUS
class GenerateBDIData(ModuleDataFactory):
"""Generate data specific to the Honeybees test cases"""
def make_records(self, num_records, factories, batchnum):
"""Make the 4 batches of DIs described here:
https://salesforce.quip.com/YfOpAwKbhcat
"""
batch_size = math.floor(num_records / 4)
def create_batch(classname, **kwargs):
factories.create_batch(classname, batch_size, **kwargs)
gau = factories["GAU"].create(Name="Scholarship")
create_batch(
"DataImport",
counter=Adder(0),
Donation_Donor__c="Account1",
Opp_Do_Not_Automatically_Create_Payment__c=False,
Account1_Name__c=factory.LazyAttribute(lambda o: f"Alan Alda BDITEST {batchnum} {o.counter(0)} - {JOB_ID}"),
CO1_Text__c=factory.LazyAttribute(lambda o: f"BDI Text {o.counter(0)} - BDI {JOB_ID}"),
GAU_Allocation_1_GAU__c=gau.id,
ASC_Role__c="match",
ASC_Amount__c=100,
CO2_currency__c=300,
CO2_currency_2__c=400,
matching_account=True,
)
create_batch(
"DataImport",
counter=Adder(0),
Donation_Donor__c="Account1",
Opp_Do_Not_Automatically_Create_Payment__c=False,
Account1_Name__c=factory.LazyAttribute(lambda o: f"Boris Becker BDITEST {batchnum} {o.counter(0)} - {JOB_ID}"),
CO1_Text__c=factory.LazyAttribute(lambda o: f"BDI text{o.counter(0)} - BDI {JOB_ID}"),
GAU_Allocation_1_GAU__c=gau.id,
ASC_Role__c="match",
ASC_Amount__c=100,
CO2_currency__c=300,
CO2_currency_2__c=400,
)
create_batch(
"DataImport",
counter=Adder(0),
Donation_Donor__c="Contact1",
Opp_Do_Not_Automatically_Create_Payment__c=False,
Contact1_Lastname__c=factory.LazyAttribute(lambda o: f"Charisma Carpenter BDITEST {batchnum} {o.counter(0)} - {JOB_ID}"),
Opportunity_Contact_Role_1_Role__c="Influencer",
CO1_Text__c=factory.LazyAttribute(lambda o: f"BDI text{o.counter(0)} - {JOB_ID}"),
GAU_Allocation_1_GAU__c=gau.id,
matching_contact=True,
)
create_batch(
"DataImport",
counter=Adder(0),
Donation_Donor__c="Contact1",
Opp_Do_Not_Automatically_Create_Payment__c=False,
Contact1_Lastname__c=factory.LazyAttribute(lambda o: f"Danny Devito BDITEST {batchnum} {o.counter(0)} - {JOB_ID}"),
Opportunity_Contact_Role_1_Role__c="Influencer",
CO1_Text__c=factory.LazyAttribute(lambda o: f"BDI text{o.counter(0)}"),
GAU_Allocation_1_GAU__c=gau.id,
)
# Households for matching
class AccountFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Models.households
record_type = "HH_Account"
class ContactFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Models.contacts
class DataImport(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Models.DataImport__c
exclude = ("account", "contact")
class Params:
counter = "Adder not set"
matching_account = factory.Trait(
account=factory.SubFactory(
AccountFactory,
name=factory.SelfAttribute("..Account1_Name__c"),
BillingStreet=factory.SelfAttribute("..Account1_Street__c"),
BillingCountry=factory.SelfAttribute("..Account1_Country__c"),
description="Pre-existing"
)
)
matching_contact = factory.Trait(
contact=factory.SubFactory(
ContactFactory,
name=factory.SelfAttribute("..Contact1_Lastname__c"),
description="Pre-existing"
)
)
id = factory.Sequence(lambda n: n + 1)
Donation_Amount__c = factory.LazyAttribute(lambda o: o.counter(1) * 100)
Donation_Date__c = now()
GAU_Allocation_1_Percent__c = 10
CO1_Date__c = now()
CO1_currency__c = 100
CO1_Number__c = 1
CO1_Picklist__c = factory.Sequence(lambda i: f"Option{(i%4) + 1}")
CO1_Phone__c = 123
CO1_textarea__c = "Long text"
CO1_url__c = "http://www.url.com/"
CO1_text2__c = factory.LazyAttribute(lambda o: f"BDI text{o.counter(0)}")
CO1_Currency2__c = 200
CO3_Text__c = factory.LazyAttribute(lambda o: f"BDI text{o.counter(0)}")
CO3_Date__c = now()
CO3_Currency__c = 100
CO3_Number__c = 1
CO3_Picklist__c = factory.Sequence(lambda i: f"Option{(i%3) + 1}")
CO3_Phone__c = 123
Account1_Country__c = "Tuvalu"
Account1_Street__c = "Cordova Street"
class GAU(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = Models.General_Accounting_Unit__c
id = factory.Sequence(lambda n: n + 1)
|
Python
| 0.000006
|
@@ -487,16 +487,33 @@
batchnum
+, **other_options
):%0A
|
4e24c542bdd99c9f0f49a6f272ebaf5494697e6a
|
add count parameter to ec2cleanls, for overriding default 5
|
ec2cleanlc/ec2cleanlc.py
|
ec2cleanlc/ec2cleanlc.py
|
#!/usr/bin/python
#
# Copyright (c) 2014 Vincent Janelle <randomfrequency@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Tiny bits of logic taken from Cloudcaster.py, @WrathOfChris
#
import argparse
import boto
import boto.ec2
import boto.ec2.autoscale
import boto.ec2.elb
import boto.route53
import boto.route53.zone
import boto.vpc
import datetime
import json
import os
import re
import sys
import time
import pprint
from operator import itemgetter, attrgetter
MAX_COUNT=5
pp = pprint.PrettyPrinter(indent=4)
if 'AWS_ACCESS_KEY' in os.environ:
aws_key = os.environ['AWS_ACCESS_KEY']
else:
aws_key = None
if 'AWS_SECRET_KEY' in os.environ:
aws_secret = os.environ['AWS_SECRET_KEY']
else:
aws_secret = None
vpc_subnetids = []
vpc_pubsubnetids = []
nat_subnetidx = 0
nat_instances = []
nat_instwait = 5
nat_publicdns = None
parser = argparse.ArgumentParser(description="Remove stale AWS launch configurations generated by cloudcaster.py")
parser.add_argument("-v", "--verbose", help="verbosity", action="store_true")
parser.add_argument("-n", "--dry-run", help="Dry run, noop mode", action="store_true")
parser.add_argument("file", help="cloudcaster JSON file")
args = parser.parse_args()
if args.file == None:
parser.print_help()
sys.exit(1)
verbose = args.verbose
dry_run = args.dry_run
conffile = open(args.file).read()
conf = json.loads(conffile)
# SETUP BOTO
awsasg = boto.ec2.autoscale.connect_to_region(conf['aws']['region'], aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
lc_groups = {}
if dry_run:
print "DRY RUN - NOT EXECUTING CLEANUP"
# Utility function to extract name and date from name
def extract_lc_names(config):
# name-"%Y%m%d%H%M%S"
match = re.search("(.+)-(\d{14})", config.name)
if match:
# "%Y%m%d%H%M%S"
date = time.strptime(match.group(2), "%Y%m%d%H%M%S")
return [ match.group(1), date ]
# Scroll through all launch configurations to cache configurations
def get_launch_configurations():
res = []
lcs = awsasg.get_all_launch_configurations()
for l in lcs:
res.append(l)
while lcs.next_token != None:
lcs = awsasg.get_all_launch_configurations(next_token=lcs.next_token)
for l in lcs:
res.append(l)
return res
# Hash of launch configuration names -> [ time.time_struct, ... ]
lcname_to_date = {}
def keyitup(entry):
lc_name = entry[0]
lc_date = entry[1]
# If configration already has a list going append
# otherwise create a new one..
if lc_name in lcname_to_date:
lcname_to_date[lc_name].append(lc_date)
else:
lcname_to_date[lc_name] = [ lc_date ]
# Begin work - retrieve all launch configurations
lc = get_launch_configurations()
# sort the list of configurations by name -> [ time.time_struct, ... ] in
# order from newwest to oldest
lc_groups = sorted(list(map(extract_lc_names, lc)), key=itemgetter(0,1), reverse=False)
map(keyitup, lc_groups)
for lc_name in lcname_to_date:
count = len(lcname_to_date[lc_name])
if count > MAX_COUNT:
for i in range(0,count - MAX_COUNT):
lc = "%s-%s" % ( lc_name, time.strftime("%Y%m%d%H%M%S", lcname_to_date[lc_name][i]) )
if verbose:
print "Pruning %s" % ( lc )
if dry_run:
print "-> WOULD DELETE %s" % ( lc )
else:
res = awsasg.delete_launch_configuration(lc)
|
Python
| 0
|
@@ -2371,32 +2371,97 @@
n=%22store_true%22)%0A
+parser.add_argument(%22-c%22, %22--count%22, help=%22max count%22, type=int)%0A
parser.add_argum
@@ -2636,16 +2636,57 @@
ry_run%0A%0A
+if args.count:%0A MAX_COUNT=args.count%0A%0A
conffile
|
cfc5b528a25b0c77086208755d7075bf1f17efbc
|
Add patient id in SimplePatientSerializer
|
patient/serializers.py
|
patient/serializers.py
|
from django.contrib.auth.models import User
from .models import Patient
from rest_framework import serializers
import datetime
from next_of_kin.models import NextOfKin
from next_of_kin.serializers import NextOfKinSerializer
class SimpleUserSerializer(serializers.ModelSerializer):
full_name = serializers.SerializerMethodField('get_full_name')
class Meta:
model = User
fields = ['full_name',]
def get_full_name(self, obj):
return obj.get_full_name()
class SimplePatientSerializer(serializers.ModelSerializer):
user = SimpleUserSerializer()
class Meta:
model = Patient
fields = ('user',)
class PatientListSerializer(serializers.ModelSerializer):
user = SimpleUserSerializer()
birth_date = serializers.SerializerMethodField('get_birth_date')
age = serializers.SerializerMethodField('get_age')
def get_birth_date(self, obj):
return obj.national_identification_number[0:2] + "." \
+ obj.national_identification_number[2:4] + "." \
+ obj.national_identification_number[4:6]
def get_age(self, obj):
today = datetime.datetime.today()
ddmm = obj.national_identification_number[0:4]
yyyy = "20" + obj.national_identification_number[4:6]
if int(yyyy) >= today.year:
yyyy = str(int(yyyy) - 100)
birth_date = datetime.datetime.strptime(ddmm + yyyy, "%d%m%Y")
diff = today - birth_date
num_years = int(diff.days / 365.2425) # rough estimate, can be wrong in some edge cases
return num_years
class Meta:
model = Patient
fields = (
'id',
'user',
'birth_date',
'age',
'national_identification_number',
'telephone'
)
class PatientDetailSerializer(PatientListSerializer):
user = SimpleUserSerializer()
birth_date = serializers.SerializerMethodField('get_birth_date')
age = serializers.SerializerMethodField('get_age')
next_of_kin = serializers.SerializerMethodField('get_next_of_kin')
def get_next_of_kin(self, obj):
next_of_kin = NextOfKin.objects.filter(patient__id=obj.id)
serializer = NextOfKinSerializer(next_of_kin, many=True, context=self.context)
return serializer.data
class Meta:
model = Patient
fields = (
'id',
'user',
'birth_date',
'age',
'national_identification_number',
'telephone',
'address',
'next_of_kin',
'pulse_max',
'pulse_min',
'o2_max',
'o2_min',
'temperature_max',
'temperature_min',
'activity_access',
'pulse_access',
'o2_access',
'temperature_access'
)
|
Python
| 0.000001
|
@@ -637,23 +637,28 @@
elds = (
+'id',
'user'
-,
)%0A%0A%0Aclas
|
b1933e4d998d703a14bbb1769e04a078fac215bc
|
Update HexStats.py
|
HexChat/HexStats.py
|
HexChat/HexStats.py
|
import hexchat
#Based on Weechat's Weestats: https://weechat.org/scripts/source/weestats.py.html/
#By Filip H.F. 'FiXato' Slagter <fixato [at] gmail [dot] com>
__module_name__ = 'HexStats'
__module_version__ = '0.0.1'
__module_description__ = 'Displays HexChat Wide User Statistics'
__module_author__ = 'Vlek'
def stats(word, word_to_eol, userdata):
context = hexchat.find_context()
context.prnt( getstats() )
return hexchat.EAT_ALL
def printstats(word, word_to_eol, userdata):
context = hexchat.find_context()
context.command('say {}'.format( getstats() ))
return hexchat.EAT_ALL
def getstats():
chans = hexchat.get_list('channels')
types = [i.type for i in chans]
channels = types.count(2)
contextlist = [i.context for i in chans if i.type == 2]
ops = []
for context in contextlist:
ops += [user.prefix for user in context.get_list('users') if user.nick == context.get_info('nick')]
print('Channel: {} - {}'.format(context.get_info('channel'), context.get_info('nick')))
#ops = ops.count('@')
servers = types.count(1)
queries = types.count(3)
return 'Stats: {} channels ({} OPs), {} servers, {} queries'.format( channels, ops,
servers, queries )
hexchat.hook_command("stats", stats, help="/stats displays HexChat user statistics")
hexchat.hook_command("printstats", printstats, help="/printstats Says HexChat user statistics in current context")
|
Python
| 0.000001
|
@@ -255,18 +255,18 @@
HexChat
- W
+-w
ide User
@@ -742,52 +742,61 @@
-contextlist = %5Bi.context for i in chans if i
+ops = %5B%5D%0A for channel in chans:%0A if channel
.typ
@@ -805,30 +805,21 @@
== 2
-%5D%0A
+:%0A
-ops = %5B%5D%0A
+
-for
con
@@ -827,24 +827,30 @@
ext
-in contextlist:%0A
+= channel.context%0A
@@ -957,109 +957,13 @@
')%5D%0A
- print('Channel: %7B%7D - %7B%7D'.format(context.get_info('channel'), context.get_info('nick')))
%0A
-#
ops
|
226e1ebfb71cfeea248425d3a157c3f43b6d95cb
|
Remove todo comments
|
bes/__init__.py
|
bes/__init__.py
|
"""
Log events to Elastic Search via bulk upload
"""
import datetime as _datetime
import json as _json
import logging as _logging
import socket as _socket
__version__ = '0.1'
LOG = _logging.getLogger(__name__)
DEFAULT = {
'host': 'localhost',
'port': 9700,
'protocol': 'UDP',
'index': 'log',
'datestamp_index': True,
'type': None,
}
class Connection(object):
"""A socket connecting to Elastic Search
Use a context manager for PEP 343's 'with' syntax:
>>> with Connection(host='localhost', port=1234) as c:
... c.send(message='hello!')
"""
def __init__(self, host=None, port=None, protocol=None):
if host is None:
host = DEFAULT['host']
if port is None:
port = DEFAULT['port']
if protocol is None:
protocol = DEFAULT['protocol']
self.host = host
self.port = port
if protocol == 'UDP':
self.socket_type = _socket.SOCK_DGRAM
else:
raise NotImplementedError(protocol)
self._sock = None
def __enter__(self):
self._sock = _socket.socket(_socket.AF_INET, self.socket_type)
return self
def __exit__(self, *exc_info):
if self._sock is not None:
try:
self._sock.close()
finally:
self._sock = None
def send(self, message):
LOG.debug(message)
self._sock.sendto(message, (self.host, self.port))
def log(index=None, type=None, sort_keys=False, **kwargs):
"""Log an arbitrary payload dictionary to Elastic Search
Uses the default connection configuration. If you need to
override any of them, build your payload dict by hand and use
emit() instead.
You can optionally override the index and type of payload, for
later filtering in Elastic Search. This means that `index` and
`type` are not available as payload keys.
"""
kwargs['@timestamp'] = _datetime.datetime.utcnow().isoformat()
kwargs['@version'] = 1
return emit(payload=kwargs, index=index, type=type, sort_keys=sort_keys)
def emit(payload, index=None, datestamp_index=None, type=None,
sort_keys=False, connection_class=Connection, **kwargs):
"""Send bulk-upload data to Elastic Search
Uses the 'index' action to add or replace a document as necessary.
http://www.elasticsearch.org/guide/reference/api/bulk/
http://www.elasticsearch.org/guide/reference/api/bulk-udp/
"""
#TODO indexes, types, and what Kibana likes.
#Try it out and adjust
#throwing all of payloads **kwargs into an 'additional' or simular field might
#required, and I don't know what happens if we send different data types with
#the same name ie a **kwargs of my_special_key: str and my_special_key: {'foo': 'bar'}
if index is None:
index = DEFAULT['index']
if type is None:
type = DEFAULT['type']
if datestamp_index is None:
datestamp_index = DEFAULT['datestamp_index']
if datestamp_index:
index = '-'.join([
index,
_datetime.date.today().strftime('%Y.%m.%d'),
])
if type is None:
LOG.error('You must set a type for {!r}'.format(payload))
return
index_data = {
'index': {
'_index': index,
'_type': type,
},
}
#not everything is JSON serializable, and logging should not blow up
#how to make these errors easier to track down?
try:
message = '\n'.join([
_json.dumps(index_data, sort_keys=sort_keys),
_json.dumps(payload, sort_keys=sort_keys),
'',
])
except TypeError:
LOG.error("Unable to serlialize %s to json" % payload)
message = '\n'.join([
_json.dumps(index_data, sort_keys=sort_keys),
_json.dumps({"error": "unable to serialize"}, sort_keys=sort_keys),
'',
])
if hasattr(message, 'encode'):
message = message.encode('utf-8') # convert str to bytes for Python 3
with connection_class(**kwargs) as connection:
connection.send(message)
return message
|
Python
| 0
|
@@ -2494,341 +2494,8 @@
%22%22%22%0A
- #TODO indexes, types, and what Kibana likes.%0A #Try it out and adjust%0A #throwing all of payloads **kwargs into an 'additional' or simular field might %0A #required, and I don't know what happens if we send different data types with%0A #the same name ie a **kwargs of my_special_key: str and my_special_key: %7B'foo': 'bar'%7D%0A
|
26281cbe36ae999f01d24af4cebf62874c3ba76d
|
Fix profile image URLs for image storage on non-public S3 buckets
|
openedx/core/djangoapps/user_api/accounts/image_helpers.py
|
openedx/core/djangoapps/user_api/accounts/image_helpers.py
|
"""
Helper functions for the accounts API.
"""
import hashlib
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import get_storage_class
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from student.models import UserProfile
from ..errors import UserNotFound
PROFILE_IMAGE_FILE_EXTENSION = 'jpg' # All processed profile images are converted to JPEGs
_PROFILE_IMAGE_SIZES = list(settings.PROFILE_IMAGE_SIZES_MAP.values())
def get_profile_image_storage():
"""
Configures and returns a django Storage instance that can be used
to physically locate, read and write profile images.
"""
config = settings.PROFILE_IMAGE_BACKEND
storage_class = get_storage_class(config['class'])
return storage_class(**config['options'])
def _make_profile_image_name(username):
"""
Returns the user-specific part of the image filename, based on a hash of
the username.
"""
hash_input = settings.PROFILE_IMAGE_HASH_SEED + username
return hashlib.md5(hash_input.encode('utf-8')).hexdigest()
def _get_profile_image_filename(name, size, file_extension=PROFILE_IMAGE_FILE_EXTENSION):
"""
Returns the full filename for a profile image, given the name and size.
"""
return '{name}_{size}.{file_extension}'.format(name=name, size=size, file_extension=file_extension)
def _get_profile_image_urls(name, storage, file_extension=PROFILE_IMAGE_FILE_EXTENSION, version=None):
"""
Returns a dict containing the urls for a complete set of profile images,
keyed by "friendly" name (e.g. "full", "large", "medium", "small").
"""
def _make_url(size):
url = storage.url(
_get_profile_image_filename(name, size, file_extension=file_extension)
)
return '{}?v={}'.format(url, version) if version is not None else url
return {size_display_name: _make_url(size) for size_display_name, size in settings.PROFILE_IMAGE_SIZES_MAP.items()}
def get_profile_image_names(username):
"""
Returns a dict containing the filenames for a complete set of profile
images, keyed by pixel size.
"""
name = _make_profile_image_name(username)
return {size: _get_profile_image_filename(name, size) for size in _PROFILE_IMAGE_SIZES}
def get_profile_image_urls_for_user(user, request=None):
"""
Return a dict {size:url} for each profile image for a given user.
Notes:
- this function does not determine whether the set of profile images
exists, only what the URLs will be if they do exist. It is assumed that
callers will use `_get_default_profile_image_urls` instead to provide
a set of urls that point to placeholder images, when there are no user-
submitted images.
- based on the value of django.conf.settings.PROFILE_IMAGE_BACKEND,
the URL may be relative, and in that case the caller is responsible for
constructing the full URL if needed.
Arguments:
user (django.contrib.auth.User): the user for whom we are getting urls.
Returns:
dictionary of {size_display_name: url} for each image.
"""
try:
if user.profile.has_profile_image:
urls = _get_profile_image_urls(
_make_profile_image_name(user.username),
get_profile_image_storage(),
version=user.profile.profile_image_uploaded_at.strftime("%s"),
)
else:
urls = _get_default_profile_image_urls()
except UserProfile.DoesNotExist:
# when user does not have profile it raises exception, when exception
# occur we can simply get default image.
urls = _get_default_profile_image_urls()
if request:
for key, value in urls.items():
urls[key] = request.build_absolute_uri(value)
return urls
def _get_default_profile_image_urls():
"""
Returns a dict {size:url} for a complete set of default profile images,
used as a placeholder when there are no user-submitted images.
TODO The result of this function should be memoized, but not in tests.
"""
return _get_profile_image_urls(
configuration_helpers.get_value('PROFILE_IMAGE_DEFAULT_FILENAME', settings.PROFILE_IMAGE_DEFAULT_FILENAME),
staticfiles_storage,
file_extension=settings.PROFILE_IMAGE_DEFAULT_FILE_EXTENSION,
)
def set_has_profile_image(username, is_uploaded, upload_dt=None):
"""
System (not user-facing) API call used to store whether the user has
uploaded a profile image, and if so, when. Used by profile_image API.
Arguments:
username (django.contrib.auth.User.username): references the user who
uploaded an image.
is_uploaded (bool): whether or not the user has an uploaded profile
image.
upload_dt (datetime.datetime): If `is_uploaded` is True, this should
contain the server-side date+time of the upload. If `is_uploaded`
is False, the parameter is optional and will be ignored.
Raises:
ValueError: is_uploaded was True, but no upload datetime was supplied.
UserNotFound: no user with username `username` exists.
"""
if is_uploaded and upload_dt is None:
raise ValueError("No upload datetime was supplied.")
elif not is_uploaded:
upload_dt = None
try:
profile = UserProfile.objects.get(user__username=username)
except ObjectDoesNotExist:
raise UserNotFound()
profile.profile_image_uploaded_at = upload_dt
profile.save()
|
Python
| 0.999999
|
@@ -1892,32 +1892,321 @@
sion)%0A )%0A
+ # Return the URL, with the %22v%22 parameter added as its query%0A # string with %22?v=%22. If the original URL already includes a%0A # query string (such as signed S3 URLs), append to the query%0A # string with %22&v=%22 instead.%0A separator = '&' if '?' in url else '?'%0A
return '
@@ -2207,17 +2207,18 @@
turn '%7B%7D
-?
+%7B%7D
v=%7B%7D'.fo
@@ -2226,16 +2226,27 @@
mat(url,
+ separator,
version
|
1a6b79629c4e79e3917287a693047fbe5e0129ad
|
Check user if admin before lockdown
|
plugins/lock_the_chat.py
|
plugins/lock_the_chat.py
|
"""
Echo plugin example
"""
import octeon
global locked
locked = []
PLUGINVERSION = 2
# Always name this variable as `plugin`
# If you dont, module loader will fail to load the plugin!
plugin = octeon.Plugin()
@plugin.message(regex=".*") # You pass regex pattern
def lock_check(bot, update):
if update.message.chat_id in locked:
for admin in update.message.chat.get_administrators():
if admin.user.username == update.message.from_user.username:
return
update.message.delete()
return
@plugin.command(command="/lock",
description="Locks chat",
inline_supported=True,
hidden=False)
def lock(bot, update, user, args):
if update.message.chat.type != "PRIVATE" and not update.message.chat_id in locked:
for admin in update.message.chat.get_administrators():
if admin.user.username == bot.get_me().username:
locked.append(update.message.chat_id)
return octeon.message("Chat locked")
return octeon.message("I am not admin of this chat...")
else:
return octeon.message("Why would you lock a private converstaion?")
@plugin.command(command="/unlock",
description="Unlocks chat",
inline_supported=True,
hidden=False)
def unlock(bot, update, user, args):
if update.message.chat_id in locked:
locked.remove(update.message.chat_id)
return octeon.message("Chat unlocked")
else:
return octeon.message("This chat wasnt locked at all")
|
Python
| 0.000001
|
@@ -738,77 +738,275 @@
chat
-.type != %22PRIVATE%22 and not update.message.chat_id in locked:%0A
+_id in locked:%0A return octeon.message(%22Chat is already locked%22)%0A if update.message.chat.type != %22PRIVATE%22:%0A for admin in update.message.chat.get_administrators():%0A if admin.user.username == update.message.from_user.username:%0A
for
@@ -993,32 +993,34 @@
:%0A
+
for admin in upd
@@ -1062,32 +1062,40 @@
():%0A
+
if admin.user.us
@@ -1139,24 +1139,32 @@
+
+
locked.appen
@@ -1197,32 +1197,40 @@
+
+
return octeon.me
@@ -1246,24 +1246,32 @@
at locked%22)%0A
+
retu
@@ -1322,16 +1322,148 @@
at...%22)%0A
+ return octeon.message(text=%22Hey! You are not admin of this chat!%22, photo=%22https://pbs.twimg.com/media/C_I2Xv1WAAAkpiv.jpg%22)%0A
else
@@ -1759,32 +1759,176 @@
t_id in locked:%0A
+ for admin in update.message.chat.get_administrators():%0A if admin.user.username == update.message.from_user.username:%0A
locked.r
@@ -1949,32 +1949,40 @@
essage.chat_id)%0A
+
return o
|
b3204ccf3bd3ac26cabb4e6aa75bdb9dbf3f9e75
|
Sentence case the biscuit
|
elections/uk/migrations/0005_add_favourite_biscuits.py
|
elections/uk/migrations/0005_add_favourite_biscuits.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
def create_simple_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
db_alias = schema_editor.connection.alias
ExtraField.objects.using(db_alias).update_or_create(
key='favourite_biscuits',
defaults={
'label': 'Favourite Biscuit 🍪',
'type': 'line',
'order': 1,
}
)
dependencies = [
('uk', '0004_add_biography'),
]
operations = [
migrations.RunPython(create_simple_fields),
]
|
Python
| 0.999999
|
@@ -468,9 +468,9 @@
ite
-B
+b
iscu
|
e45fac8e85d5b04e34a63dee8bd676ba294886e5
|
Remove unused import in hadec.py
|
astropy/coordinates/builtin_frames/hadec.py
|
astropy/coordinates/builtin_frames/hadec.py
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
from astropy.coordinates.attributes import (TimeAttribute,
QuantityAttribute,
EarthLocationAttribute)
__all__ = ['HADec']
doc_components = """
ha : `~astropy.coordinates.Angle`, optional, keyword-only
The Hour Angle for this object (``dec`` must also be given and
``representation`` must be None).
dec : `~astropy.coordinates.Angle`, optional, keyword-only
The Declination for this object (``ha`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_ha_cosdec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in hour angle (including the ``cos(dec)`` factor) for
this object (``pm_dec`` must also be given).
pm_dec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in declination for this object (``pm_ha_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object."""
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position and orientation of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame.
pressure : `~astropy.units.Quantity` ['pressure']
The atmospheric pressure as an `~astropy.units.Quantity` with pressure
units. This is necessary for performing refraction corrections.
Setting this to 0 (the default) will disable refraction calculations
when transforming to/from this frame.
temperature : `~astropy.units.Quantity` ['temperature']
The ground-level temperature as an `~astropy.units.Quantity` in
deg C. This is necessary for performing refraction corrections.
relative_humidity : `~astropy.units.Quantity` ['dimensionless'] or number.
The relative humidity as a dimensionless quantity between 0 to 1.
This is necessary for performing refraction corrections.
obswl : `~astropy.units.Quantity` ['length']
The average wavelength of observations as an `~astropy.units.Quantity`
with length units. This is necessary for performing refraction
corrections.
Notes
-----
The refraction model is based on that implemented in ERFA, which is fast
but becomes inaccurate for altitudes below about 5 degrees. Near and below
altitudes of 0, it can even give meaningless answers, and in this case
transforming to HADec and back to another frame can give highly discrepant
results. For much better numerical stability, leave the ``pressure`` at
``0`` (the default), thereby disabling the refraction correction and
yielding "topocentric" equatorial coordinates.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class HADec(BaseCoordinateFrame):
"""
A coordinate or frame in the Hour Angle-Declination system (Equatorial
coordinates) with respect to the WGS84 ellipsoid. Hour Angle is oriented
with respect to upper culmination such that the hour angle is negative to
the East and positive to the West.
This frame is assumed to *include* refraction effects if the ``pressure``
frame attribute is non-zero.
The frame attributes are listed under **Other Parameters**, which are
necessary for transforming from HADec to some other system.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'ha', u.hourangle),
RepresentationMapping('lat', 'dec')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
obstime = TimeAttribute(default=None)
location = EarthLocationAttribute(default=None)
pressure = QuantityAttribute(default=0, unit=u.hPa)
temperature = QuantityAttribute(default=0, unit=u.deg_C)
relative_humidity = QuantityAttribute(default=0, unit=u.dimensionless_unscaled)
obswl = QuantityAttribute(default=1*u.micron, unit=u.micron)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.has_data:
self._set_data_lon_wrap_angle(self.data)
@staticmethod
def _set_data_lon_wrap_angle(data):
if hasattr(data, 'lon'):
data.lon.wrap_angle = 180. * u.deg
return data
def represent_as(self, base, s='base', in_frame_units=False):
"""
Ensure the wrap angle for any spherical
representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
self._set_data_lon_wrap_angle(data)
return data
# self-transform defined in cirs_observed_transforms.py
|
Python
| 0
|
@@ -86,28 +86,8 @@
st%0A%0A
-import numpy as np%0A%0A
from
|
e37219c0403810adfb74bd321dfa2a916ef77820
|
update "completion_from_script"
|
jedi_daemon.py
|
jedi_daemon.py
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import logging
from logging import handlers
from optparse import OptionParser
import jedi
from jedi.api import NotFoundError
log = logging.getLogger('')
log.setLevel(logging.DEBUG)
def write(data):
if not isinstance(data, str):
data = json.dumps(data)
sys.stdout.write(data)
if not data.endswith('\n'):
sys.stdout.write('\n')
try:
sys.stdout.flush()
except IOError:
sys.exit()
is_funcargs_complete_enabled = True
auto_complete_function_params = 'required'
def format_completion(complete):
""" Returns a tuple of the string that would be visible in
the completion dialogue and the completion word
:type complete: jedi.api_classes.Completion
:rtype: (str, str)
"""
display, insert = complete.name + '\t' + complete.type, complete.name
return display, insert
def get_function_parameters(callDef):
""" Return list function parameters, prepared for sublime completion.
Tuple contains parameter name and default value
Parameters list excludes: self, *args and **kwargs parameters
:type callDef: jedi.api_classes.CallDef
:rtype: list of (str, str)
"""
if not callDef:
return []
params = []
for param in callDef.params:
cleaned_param = param.get_code().strip()
if '*' in cleaned_param or cleaned_param == 'self':
continue
params.append([s.strip() for s in cleaned_param.split('=')])
return params
def funcargs_from_script(script):
""" Get completion in case we are in a function call
:type script: jedi.api.Script
:rtype: list of str
"""
completions = []
in_call = script.call_signatures()
params = get_function_parameters(in_call)
for code in params:
if len(code) == 1:
completions.append((code[0], '${1:%s}' % code[0]))
else:
completions.append((code[0] + '\t' + code[1],
'%s=${1:%s}' % (code[0], code[1])))
return completions
def completions_from_script(script):
""" regular completions """
completions = script.complete()
return [format_completion(complete) for complete in completions]
def goto_from_script(script):
for method in ['get_definition', 'goto']:
try:
defns = getattr(script, method)()
except NotFoundError:
pass
else:
return [(i.module_path, i.line, i.column)
for i in defns if not i.in_builtin_module()
]
def usages_from_script(script):
return [(i.module_path, i.line, i.column)
for i in script.related_names() if not i.in_builtin_module()
]
def funcrargs_from_script(script):
""" Get function or class parameters and build Sublime Snippet string
for completion
:type script: jedi.api.Script
:rtype: str
"""
complete_all = auto_complete_function_params == 'all'
parameters = get_function_parameters(script.call_signatures())
completions = []
for index, parameter in enumerate(parameters):
try:
name, value = parameter
except IndexError:
name = parameter[0]
value = None
if value is None:
completions.append('${%d:%s}' % (index + 1, name))
elif complete_all:
completions.append('%s=${%d:%s}' % (name, index + 1, value))
return ", ".join(completions)
def process_line(line):
data = json.loads(line.strip())
req_type = data.get('type', None)
script = jedi.Script(data['source'], int(data['line']), int(data['offset']),
data['filename'] or '', 'utf-8')
out_data = {'uuid': data['uuid'], 'type': data['type']}
if req_type == 'autocomplete':
out_data[req_type] = funcargs_from_script(script) or []
out_data[req_type].extend(completions_from_script(script) or [])
elif req_type == 'goto':
out_data[req_type] = goto_from_script(script)
elif req_type == 'usages':
out_data[req_type] = usages_from_script(script)
elif req_type == 'funcargs':
out_data[req_type] = funcrargs_from_script(script)
write(out_data)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-p", "--project", dest="project_name", default='',
help="project name to store jedi's cache")
parser.add_option("-e", "--extra_folder", dest="extra_folders", default=[],
action="append", help="extra folders to add to sys.path")
parser.add_option("-f", "--complete_function_params", dest="function_params",
default='all')
options, args = parser.parse_args()
is_funcargs_complete_enabled = bool(options.function_params)
auto_complete_function_params = options.function_params
if options.project_name:
jedi.settings.cache_directory = os.path.join(
jedi.settings.cache_directory,
options.project_name,
)
if not os.path.exists(jedi.settings.cache_directory):
os.makedirs(jedi.settings.cache_directory)
hdlr = handlers.RotatingFileHandler(
filename=os.path.join(jedi.settings.cache_directory, 'daemon.log'),
maxBytes=10000000,
backupCount=5,
encoding='utf-8'
)
hdlr.setFormatter(logging.Formatter('%(asctime)s: %(levelname)-8s: %(message)s'))
log.addHandler(hdlr)
log.info(
"started. cache directory - %s, extra folders - %s, complete_function_params - %s",
jedi.settings.cache_directory,
options.extra_folders,
options.function_params,
)
for extra_folder in options.extra_folders:
if extra_folder not in sys.path:
sys.path.insert(0, extra_folder)
for line in iter(sys.stdin.readline, ''):
if line:
try:
process_line(line)
except Exception:
log.exception('failed to process line')
|
Python
| 0
|
@@ -2128,16 +2128,86 @@
pletions
+%0A%0A :type script: jedi.api.Script%0A :rtype: list of (str, str)%0A
%22%22%22%0A
@@ -2235,17 +2235,20 @@
.complet
-e
+ions
()%0A r
|
d60f0fa1f942a24ca38ce20f2b5a617eb5181456
|
update session backend
|
hiren/hiren/settings.py
|
hiren/hiren/settings.py
|
"""
Django settings for hiren project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6ajer4-t(c_k3@tb0@g-w5ztxoq61e866pm0xl2t4im%khu9qo'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'disk',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'hiren.urls'
WSGI_APPLICATION = 'hiren.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "template"),
)
|
Python
| 0.000001
|
@@ -1405,16 +1405,91 @@
re',%0A)%0A%0A
+MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'%0A
ROOT_URL
|
8b01df8ffb790a66cb054206def0a425275539c4
|
Fix encoding issue and comment improvement (#1807)
|
endpoints/getting-started/clients/google-jwt-client.py
|
endpoints/getting-started/clients/google-jwt-client.py
|
#!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of calling a Google Cloud Endpoint API with a JWT signed by
a Google API Service Account."""
import argparse
import time
import google.auth.crypt
import google.auth.jwt
import requests
from six.moves import urllib
def generate_jwt(service_account_file):
"""Generates a signed JSON Web Token using a Google API Service Account."""
# Note: this sample shows how to manually create the JWT for the purposes
# of showing how the authentication works, but you can use
# google.auth.jwt.Credentials to automatically create the JWT.
# http://google-auth.readthedocs.io/en/latest/reference
# /google.auth.jwt.html#google.auth.jwt.Credentials
signer = google.auth.crypt.RSASigner.from_service_account_file(
service_account_file)
now = int(time.time())
expires = now + 3600 # One hour in seconds
payload = {
'iat': now,
'exp': expires,
# aud must match 'audience' in the security configuration in your
# swagger spec. It can be any string.
'aud': 'echo.endpoints.sample.google.com',
# iss must match 'issuer' in the security configuration in your
# swagger spec. It can be any string.
'iss': 'jwt-client.endpoints.sample.google.com',
# sub and email are mapped to the user id and email respectively.
'sub': '12345678',
'email': 'user@example.com'
}
jwt = google.auth.jwt.encode(signer, payload)
return jwt
def make_request(host, api_key, signed_jwt):
"""Makes a request to the auth info endpoint for Google JWTs."""
url = urllib.parse.urljoin(host, '/auth/info/googlejwt')
params = {
'key': api_key
}
headers = {
'Authorization': 'Bearer {}'.format(signed_jwt)
}
response = requests.get(url, params=params, headers=headers)
response.raise_for_status()
return response.text
def main(host, api_key, service_account_file):
signed_jwt = generate_jwt(service_account_file)
response = make_request(host, api_key, signed_jwt)
print(response)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'host', help='Your API host, e.g. https://your-project.appspot.com.')
parser.add_argument(
'api_key', help='Your API key.')
parser.add_argument(
'service_account_file',
help='The path to your service account json file.')
args = parser.parse_args()
main(args.host, args.api_key, args.service_account_file)
|
Python
| 0
|
@@ -1784,32 +1784,61 @@
# swagger spec
+ (e.g. service account email)
. It can be any
@@ -1988,24 +1988,87 @@
-'sub': '12345678
+# sub should match 'iss'%0A 'sub': 'jwt-client.endpoints.sample.google.com
',%0A
@@ -2158,16 +2158,32 @@
payload)
+.decode('UTF-8')
%0A%0A re
|
e7424f77dcaeabc39f923ade405694457287fae9
|
Strip out the .json file extension when loading from a json store
|
pump_json.py
|
pump_json.py
|
#!/usr/bin/env python
import logging
import os
import simplejson as json
import struct
import sys
import shutil
import tempfile
import zipfile
import couchbaseConstants
import pump
JSON_SCHEME = "json://"
class JSONSource(pump.Source):
"""Reads json file or directory or zip file that contains json files."""
def __init__(self, opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur):
super(JSONSource, self).__init__(opts, spec, source_bucket, source_node,
source_map, sink_map, ctl, cur)
self.done = False
self.docs = list()
self.file_iter = None
@staticmethod
def can_handle(opts, spec):
return spec.startswith(JSON_SCHEME) and \
(os.path.isfile(spec.replace(JSON_SCHEME, "")) or \
os.path.isdir(spec.replace(JSON_SCHEME, "")) or \
spec.endswith(".zip"))
@staticmethod
def check(opts, spec):
return 0, {'spec': spec,
'buckets': [{'name': os.path.basename(spec),
'nodes': [{'hostname': 'N/A'}]}]}
def save_doc(self, batch, dockey, docvalue):
cmd = couchbaseConstants.CMD_TAP_MUTATION
vbucket_id = 0x0000ffff
cas, exp, flg = 0, 0, 0
try:
doc = json.loads(docvalue)
if '_id' not in doc:
msg = (cmd, vbucket_id, dockey, flg, exp, cas, '', docvalue, 0, 0, 0, 0)
batch.append(msg, len(docvalue))
else:
id = doc['_id'].encode('UTF-8')
del doc['_id']
docdata = {"doc":{
"json": doc,
"meta":{"id":id}
}}
if not is_data:
batch.append(json.dumps(docdata), len(docdata))
except ValueError, error:
logging.error("Fail to read json file with error:" + str(error))
@staticmethod
def gen_dockey(filename):
return os.path.splitext(os.path.basename(filename))[0]
@staticmethod
def enumerate_files(subdir, file_candidate, skip_views, skip_docs):
for item in os.listdir(subdir):
path = os.path.join(subdir, item)
if os.path.isfile(path):
dir = os.path.basename(os.path.dirname(path))
if (not skip_views and dir == "design_docs") or \
(not skip_docs and dir == "docs"):
file_candidate.append(path)
else:
dir = os.path.basename(path)
if not ((skip_docs and dir == "docs") or \
(skip_views and dir == "design_docs")):
JSONSource.enumerate_files(path, file_candidate, skip_views, skip_docs)
@staticmethod
def provide_design(opts, source_spec, source_bucket, source_map):
design_files = list()
f = source_spec.replace(JSON_SCHEME, "")
if os.path.isfile(f) and f.endswith(".zip"):
zf = zipfile.ZipFile(f)
for path in zf.namelist():
file = os.path.basename(path)
# Skip the design_docs directory listing
if file == "design_docs":
continue
dir = os.path.basename(os.path.dirname(path))
# Skip all files not in the design docs directory
if dir != "design_docs":
continue
design_files.append(zf.read(path))
zf.close()
elif os.path.isdir(f):
files = list()
JSONSource.enumerate_files(f, files, False, True)
for path in files:
if os.path.isfile(path):
f = open(path, 'r')
design_files.append(f.read())
f.close()
return 0, design_files
def provide_batch(self):
if self.done:
return 0, None
# During the first iteration load the file names, this is only run once
if not self.docs:
self.prepare_docs()
batch = pump.Batch(self)
f = self.spec.replace(JSON_SCHEME, "")
batch_max_size = self.opts.extra['batch_max_size']
# Each iteration should return a batch or mark the loading a finished
if os.path.isfile(f) and f.endswith(".zip"):
zf = zipfile.ZipFile(f)
while batch.size() < batch_max_size and self.docs:
path = self.docs.pop()
key = os.path.basename(path)
value = zf.read(path)
self.save_doc(batch, key, value)
zf.close()
else:
while batch.size() < batch_max_size and self.docs:
path = self.docs.pop()
key = os.path.basename(path)
try:
fp = open(path, 'r')
value = fp.read()
fp.close()
self.save_doc(batch, key, value)
except IOError, error:
logging.error("Fail to load json file with error" + str(error))
if not self.docs:
self.done = True
return 0, batch
def prepare_docs(self):
f = self.spec.replace(JSON_SCHEME, "")
if os.path.isfile(f) and f.endswith(".zip"):
zf = zipfile.ZipFile(f)
for path in zf.namelist():
file = os.path.basename(path)
# Skip the docs directory listing
if file == "docs":
continue
dir = os.path.basename(os.path.dirname(path))
# Skip all files not in the docs directory
if dir != "docs":
continue
self.docs.append(path)
zf.close()
elif os.path.isdir(f):
JSONSource.enumerate_files(f, self.docs, True, False)
else:
self.docs.append(f)
|
Python
| 0.998307
|
@@ -4536,32 +4536,109 @@
.basename(path)%0A
+ if key.endswith('.json'):%0A key = key%5B:-5%5D%0A
@@ -4884,32 +4884,109 @@
.basename(path)%0A
+ if key.endswith('.json'):%0A key = key%5B:-5%5D%0A
|
6447d231565ecb4445a4630cf24069caeefc061b
|
version bump
|
bids/version.py
|
bids/version.py
|
from __future__ import absolute_import, division, print_function
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 0
_version_micro = 1 # use '' for first of series, number for 1 and above
_version_extra = ''
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "bids: a template for small scientific Python projects"
# Long description will go up on the pypi page
long_description = """
PyBIDS
======
PyBIDS is a Python module to interface with datasets conforming BIDS.
See BIDS paper_ and http://bids.neuroimaging.io website for more information.
.. paper_: http://www.nature.com/articles/sdata201644
License
=======
``pybids`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2016--, PyBIDS developers, Planet Earth
"""
NAME = "pybids"
MAINTAINER = "PyBIDS Developers"
MAINTAINER_EMAIL = "bids-discussion@googlegroups.com",
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/INCF/pybids"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Yaroslav Halchenko",
AUTHOR_EMAIL = "debian@onerussian.com",
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
# No data for now
# PACKAGE_DATA = {'bids': [pjoin('data', '*')]}
REQUIRES = ["grabbit", "six"]
|
Python
| 0.000001
|
@@ -1758,17 +1758,16 @@
ups.com%22
-,
%0ADESCRIP
@@ -1922,17 +1922,16 @@
lchenko%22
-,
%0AAUTHOR_
@@ -1961,17 +1961,16 @@
ian.com%22
-,
%0APLATFOR
|
243c4ba66d35efbe58944ff973d668d9a3b7c6f8
|
Update __init__.py
|
VertexActions/__init__.py
|
VertexActions/__init__.py
|
#
|
Python
| 0.000072
|
@@ -1,2 +1 @@
-#
%0A
|
f168068ee4fb957e45293cf24028d02e565be2b5
|
check if files exist before comparing (closes #37)
|
GUI/InstanceWindowWrapper.py
|
GUI/InstanceWindowWrapper.py
|
from PyQt5.QtWidgets import *
from functools import partial
from webbrowser import open as webopen
from API.MultiMC import MultiMCInstance
from API.CurseAPI import CurseAPI, CurseProject
from Utils.Utils import clear_layout
from Utils.Config import Config, Setting
from GUI.InstanceWindow import Ui_InstanceWindow
from GUI.FileDialogWrapper import FileDialog
from GUI.DownloadDialogWrapper import DownloadDialog
from GUI.ModWidget import Ui_ModWidget
class InstanceWindow:
def __init__(self, instance: MultiMCInstance, curse: CurseAPI, conf: Config):
self.curse = curse
self.instance = instance
self.installed_mods = list()
self.conf = conf
self.mod_widgets = list()
self.win = QMainWindow()
self.ui = Ui_InstanceWindow()
self.ui.setupUi(self.win)
self.win.setWindowTitle("Editing {}".format(instance.name))
self.ui.pack_version.setText("Minecraft: {}".format(instance.version))
if instance.file:
self.file = self.curse.get_file(instance.file)
self.pack = self.curse.get_project(self.file.project)
self.ui.pack_pack.setText("Modpack ID: {} ({})".format(self.pack.name, self.file.pub_time))
else:
self.file = None
self.pack = None
self.ui.pack_pack.hide()
self.setup_mods()
self.setup_mod_browse(curse.get_mod_list(self.instance.version))
self.ui.pack_search.textChanged.connect(self.q_typed)
self.ui.pack_search.returnPressed.connect(self.search_packs)
self.ui.pack_search_button.clicked.connect(self.search_packs)
self.ui.meta_scan.clicked.connect(self.mod_scan)
self.win.show()
def q_typed(self):
if not self.conf.read(Setting.live_search):
return
if self.ui.pack_search.text() == "":
self.setup_mod_browse(self.curse.get_mod_list(self.instance.version))
return
self.setup_mod_browse(self.curse.search(self.ui.pack_search.text(), "mod", self.instance.version))
def search_packs(self):
if self.ui.pack_search.text() == "":
self.setup_mod_browse(self.curse.get_mod_list(self.instance.version))
return
self.setup_mod_browse(self.curse.search(self.ui.pack_search.text(), "mod", self.instance.version))
def setup_mods(self):
self.installed_mods = [self.curse.get_file(i["id"]).project for i in self.instance.mods]
clear_layout(self.ui.mod_box)
rl_mods = [i for i in self.instance.mods if self.curse.get_file(i["id"])]
rl_mods.sort(key=lambda x: self.curse.get_project(self.curse.get_file(x["id"]).project).name.lower())
for mod in rl_mods:
widget = QWidget()
el = Ui_ModWidget()
el.setupUi(widget)
modf = self.curse.get_file(mod["id"])
if not modf:
continue
proj = self.curse.get_project(modf.project)
el.mod_name.setText(proj.name)
el.mod_delete.clicked.connect(partial(self.mod_delete, mod["path"]))
el.mod_update.clicked.connect(partial(self.mod_install, proj, True))
el.mod_install.hide()
el.mod_info.hide()
el.mod_update.hide()
fs = [self.curse.get_file(i) for i in proj.files]
fs = [i for i in fs if self.instance.version in i.versions]
fs.sort(key=lambda x: x.pub_time, reverse=True)
if fs[0].pub_time > modf.pub_time:
el.mod_update.show()
self.ui.mod_box.addWidget(widget)
self.ui.mod_box.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
def setup_mod_browse(self, mods: list):
clear_layout(self.ui.browse_box)
for mod in mods:
widget = QWidget()
el = Ui_ModWidget()
el.setupUi(widget)
el.mod_name.setText(mod.name)
el.mod_install.clicked.connect(partial(self.mod_install, mod))
el.mod_delete.hide()
el.mod_update.hide()
el.mod_info.clicked.connect(partial(webopen, mod.page))
self.ui.browse_box.addWidget(widget)
self.ui.browse_box.addItem(QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding))
def mod_install(self, mod: CurseProject, force_latest=False):
files = [self.curse.get_file(i) for i in mod.files]
fs = [i for i in files if self.instance.version in i.versions]
if len(fs) < 1:
return False
fs.sort(key=lambda x: x.pub_time, reverse=True)
if self.conf.read(Setting.ask_file) and not force_latest:
dia = FileDialog(fs)
f = dia.dia.exec_()
if not f:
return
f = fs[f - 1]
else:
f = fs[0]
dia = DownloadDialog()
dia.download_mod(f, self.curse, self.instance)
for dep in f.deps:
if dep["Type"] != "required" or dep["AddOnId"] in self.installed_mods:
continue
self.mod_install(self.curse.get_project(dep["AddOnId"]))
self.setup_mods()
def mod_delete(self, fpath: str):
self.instance.uninstall_mod(fpath)
self.setup_mods()
def mod_scan(self):
self.instance.find_mods(self.curse.db)
self.setup_mods()
|
Python
| 0
|
@@ -3500,16 +3500,32 @@
if
+len(fs) %3E 0 and
fs%5B0%5D.pu
|
9a9a6643bbc26a3f359df52b0b4bbb4207225017
|
Update VariationalAutoencoderRunner.py
|
autoencoder/VariationalAutoencoderRunner.py
|
autoencoder/VariationalAutoencoderRunner.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder_models.VariationalAutoencoder import VariationalAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def min_max_scale(X_train, X_test):
preprocessor = prep.MinMaxScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = VariationalAutoencoder(
n_input = 784,
n_hidden = 200,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%d,' % (epoch + 1),
"Cost:", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
|
Python
| 0.000001
|
@@ -376,19 +376,17 @@
one_hot
- =
+=
True)%0A%0A%0A
@@ -982,19 +982,17 @@
n_input
- =
+=
784,%0A
@@ -1000,19 +1000,17 @@
n_hidden
- =
+=
200,%0A
@@ -1019,19 +1019,17 @@
ptimizer
- =
+=
tf.train
|
32fa9354c221f91cc6790177371a00468d22cb85
|
Fix the scan script
|
scan/scan.py
|
scan/scan.py
|
# Add your Python code here. E.g.
from microbit import *
MAX_ROWS=4
def scan(level,pause=500, reverse=False):
for i in range(0,10):
x = 0
rows = i
cols = i
while x <= i:
for y in range(0,rows+1):
if x <= MAX_ROWS and y <= MAX_ROWS:
coord_x = MAX_ROWS-x if reverse else x
coord_y = MAX_ROWS-y if reverse else y
display.set_pixel(coord_x,coord_y,max(0,level-((rows-y)*2)))
x = x+1
rows = rows-1
sleep(pause)
while True:
scan(9,150)
scan(150,True)
|
Python
| 0.001609
|
@@ -61,16 +61,26 @@
X_ROWS=4
+%0Alevel = 9
%0A%0Adef sc
@@ -86,14 +86,8 @@
can(
-level,
paus
@@ -135,16 +135,17 @@
(0,10):%0A
+%0A
@@ -591,10 +591,8 @@
can(
-9,
150)
|
9360c15f8883543ad5d83aa7dc870c60a1fed5ec
|
add infos
|
setup.py
|
setup.py
|
#!/usr/bin/env python3
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Philippe Proulx <philippe.proulx@efficios.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
import subprocess
from setuptools import setup
# make sure we run Python 3+ here
v = sys.version_info
if v.major < 3:
sys.stderr.write('Sorry, pytsdl needs Python 3\n')
sys.exit(1)
# pyPEG2 needs to be installed manually until their PyPI tarball is
# fixed for setuptools.
try:
import pypeg2
except ImportError:
sys.stderr.write('Please install pyPEG2 manually:\n\n')
sys.stderr.write(' sudo pip3 install pyPEG2\n')
sys.exit(1)
packages = [
'pytsdl',
]
setup(name='pytsdl',
version=0.3,
description='TSDL parser implemented entirely in Python 3',
author='Philippe Proulx',
author_email='eeppeliteloop@gmail.com',
url='https://github.com/eepp/pytsdl',
packages=packages)
|
Python
| 0
|
@@ -1866,16 +1866,73 @@
l.com',%0A
+ license='MIT',%0A keywords='tsdl ctf metadata',%0A
ur
|
dc1773eaf3e66ddf5cbaa564bb55dbb8e51218ff
|
Fix #752: test case failed
|
topbeat/tests/system/test_filesystem.py
|
topbeat/tests/system/test_filesystem.py
|
from topbeat import TestCase
"""
Contains tests for ide statistics.
"""
class Test(TestCase):
def test_filesystems(self):
"""
Checks that system wide stats are found in the output and
have the expected types.
"""
self.render_config_template(
system_stats=False,
process_stats=False,
filesystem_stats=True
)
topbeat = self.start_topbeat()
self.wait_until(lambda: self.output_has(lines=1))
topbeat.kill_and_wait()
output = self.read_output()[0]
for key in [
"fs.device_name",
"fs.mount_point",
]:
assert isinstance(output[key], basestring)
for key in [
"fs.used_p",
]:
assert type(output[key]) is float
for key in [
"fs.avail",
"fs.files",
"fs.free_files",
"fs.total",
"fs.used",
]:
assert type(output[key]) is int or type(output[key]) is long
|
Python
| 0
|
@@ -22,16 +22,30 @@
estCase%0A
+import numbers
%0A%0A%22%22%22%0ACo
@@ -792,35 +792,41 @@
assert
-typ
+isinstanc
e(output%5Bkey%5D) i
@@ -826,18 +826,25 @@
key%5D
-) is float
+, numbers.Number)
%0A%0A
|
1dacd99bbe1b32586a013d7d6f0874271e097e7c
|
Revise var to reach
|
lc0055_jump_game.py
|
lc0055_jump_game.py
|
"""Leetcode 55. Jump Game
Medium
URL: https://leetcode.com/problems/jump-game/
Given an array of non-negative integers, you are initially positioned at the
first index of the array.
Each element in the array represents your maximum jump length at that position.
Determine if you are able to reach the last index.
Example 1:
Input: [2,3,1,1,4]
Output: true
Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: [3,2,1,0,4]
Output: false
Explanation: You will always arrive at index 3 no matter what. Its maximum
jump length is 0, which makes it impossible to reach the last index.
"""
class SolutionGreedy(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
Time complexity: O(n).
Space complexity: O(1).
"""
# Create max reachable index.
reachable = 0
for i in range(len(nums)):
# Index i is not reachable.
if reachable < i:
return False
# Update reachable by taking max of itself and i+nums[i].
reachable = max(reachable, i + nums[i])
return True
def main():
# Ans: True
nums = [2,3,1,1,4]
print SolutionGreedy().canJump(nums)
# Ans: False
nums = [3,2,1,0,4]
print SolutionGreedy().canJump(nums)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -874,20 +874,16 @@
reach
-able
= 0%0A%0A
@@ -975,20 +975,16 @@
if reach
-able
%3C i:%0A
@@ -1037,20 +1037,16 @@
te reach
-able
by taki
@@ -1094,20 +1094,16 @@
reach
-able
= max(r
@@ -1106,20 +1106,16 @@
ax(reach
-able
, i + nu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.