commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
30bcdc2c6bc18eb8460ec1706e74bdd72bae0610
|
Fix test after using cool_filter
|
test/python/topology/test_names.py
|
test/python/topology/test_names.py
|
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017
import unittest
import itertools
from streamsx.topology.topology import *
def f42():
pass
class cool_class(object):
def __init__(self):
pass
def __call__(self, t):
return t
class cool_filter(object):
def __init__(self):
pass
def __call__(self, t):
return True
class TestNames(unittest.TestCase):
def test_names(self):
topo = Topology("Abc")
s1 = topo.source([], name="CoolSource")
self.assertEqual(s1.name, "CoolSource")
s1.category = 'Ingest'
self.assertEqual(s1.category, 'Ingest')
s1a = topo.source([], name="CoolSource")
self.assertEqual(s1a.name, "CoolSource_2")
s1b = topo.source([], name="CoolSource")
self.assertEqual(s1b.name, "CoolSource_3")
s2 = topo.source(f42)
self.assertEqual(s2.name, "f42")
s2a = topo.source(f42)
self.assertEqual(s2a.name, "f42_2")
s3 = topo.source(lambda : [])
self.assertEqual(s3.name, "source_lambda")
s3a = topo.source(lambda : [])
self.assertEqual(s3a.name, "source_lambda_2")
s4 = topo.source(cool_class())
self.assertEqual(s4.name, "cool_class")
s4a = topo.source(cool_class())
self.assertEqual(s4a.name, "cool_class_2")
s5 = topo.source([])
self.assertEqual(s5.name, "list_2")
s5a = topo.source([])
self.assertEqual(s5a.name, "list_3")
s6 = topo.source(itertools.repeat('Fred'))
self.assertEqual(s6.name, "repeat")
s6a = topo.source(itertools.repeat('Fred'))
self.assertEqual(s6a.name, "repeat_2")
s6.for_each(name="End", func=cool_class)
s7 = s6.filter(cool_filter(), name="mYF")
self.assertEqual(s7.name, "mYF")
s8 = s6.filter(cool_filter())
self.assertEqual(s8.name, "cool_filter")
s9 = s6.map(cool_class, name="mYM")
self.assertEqual(s9.name, "mYM")
s9.category = 'Analytics'
self.assertEqual(s9.category, 'Analytics')
s10 = s6.map(cool_class)
self.assertEqual(s10.name, "cool_class_4")
s11 = s6.flat_map(cool_class, name="mYFM")
self.assertEqual(s11.name, "mYFM")
s12 = s6.flat_map(cool_class)
self.assertEqual(s12.name, "cool_class_5")
s12.for_each(lambda x : None)
s12.category = 'DB'
self.assertEqual(s12.category, 'DB')
def test_non_placable_names(self):
topo = Topology()
s0 = topo.source([])
s1 = topo.source([])
s = s0.union({s1})
self.assertIsNone(s.category)
with self.assertRaises(TypeError):
s.category = 'Foo'
|
Python
| 0.000001
|
@@ -2061,17 +2061,17 @@
l_class_
-4
+3
%22)%0A%0A
@@ -2233,17 +2233,17 @@
l_class_
-5
+4
%22)%0A
|
5934669c0edbd914d14612e16be7c88641b50bee
|
Fix test for eq and test eq with other classes
|
test/test_chat_chatserverstatus.py
|
test/test_chat_chatserverstatus.py
|
from pytwitcherapi import chat
def test_eq_str(servers):
assert servers[0] == '192.16.64.11:80',\
"Server should be equal to the same address."
def test_noteq_str(servers):
assert servers[0] != '192.16.64.50:89',\
"""Server should not be equal to a different address"""
def test_eq(servers):
s1 = chat.ChatServerStatus('192.16.64.11:80')
assert servers[0] == s1,\
"""Servers with same address should be equal"""
def test_noteq(servers):
assert servers[0] != servers[1],\
"""Servers with different address should not be equal"""
def test_lt(servers):
sortedservers = sorted(servers)
expected = [servers[2], servers[3], servers[0], servers[1]]
assert sortedservers == expected,\
"""Server should be sorted like this: online, then offline,
little errors, then more errors, little lag, then more lag."""
|
Python
| 0
|
@@ -443,32 +443,138 @@
uld be equal%22%22%22%0A
+ assert not (s1 == 123),%5C%0A %22%22%22Servers should not be eqaul to other classes with different id%22%22%22%0A
%0A%0Adef test_noteq
@@ -587,32 +587,37 @@
rs):%0A assert
+not (
servers%5B0%5D != se
@@ -611,17 +611,17 @@
vers%5B0%5D
-!
+=
= server
@@ -624,16 +624,17 @@
rvers%5B1%5D
+)
,%5C%0A
|
03624fa9dbe1a0dbb74e324a35ee0cda44234bc8
|
add domain name when having issues in the warn output (#5105)
|
certbot-apache/certbot_apache/display_ops.py
|
certbot-apache/certbot_apache/display_ops.py
|
"""Contains UI methods for Apache operations."""
import logging
import os
import zope.component
from certbot import errors
from certbot import interfaces
import certbot.display.util as display_util
logger = logging.getLogger(__name__)
def select_vhost(domain, vhosts):
"""Select an appropriate Apache Vhost.
:param vhosts: Available Apache Virtual Hosts
:type vhosts: :class:`list` of type `~obj.Vhost`
:returns: VirtualHost or `None`
:rtype: `~obj.Vhost` or `None`
"""
if not vhosts:
return None
while True:
code, tag = _vhost_menu(domain, vhosts)
if code == display_util.OK:
return vhosts[tag]
else:
return None
def _vhost_menu(domain, vhosts):
"""Select an appropriate Apache Vhost.
:param vhosts: Available Apache Virtual Hosts
:type vhosts: :class:`list` of type `~obj.Vhost`
:returns: Display tuple - ('code', tag')
:rtype: `tuple`
"""
# Free characters in the line of display text (9 is for ' | ' formatting)
free_chars = display_util.WIDTH - len("HTTPS") - len("Enabled") - 9
if free_chars < 2:
logger.debug("Display size is too small for "
"certbot_apache.display_ops._vhost_menu()")
# This runs the edge off the screen, but it doesn't cause an "error"
filename_size = 1
disp_name_size = 1
else:
# Filename is a bit more important and probably longer with 000-*
filename_size = int(free_chars * .6)
disp_name_size = free_chars - filename_size
choices = []
for vhost in vhosts:
if len(vhost.get_names()) == 1:
disp_name = next(iter(vhost.get_names()))
elif len(vhost.get_names()) == 0:
disp_name = ""
else:
disp_name = "Multiple Names"
choices.append(
"{fn:{fn_size}s} | {name:{name_size}s} | {https:5s} | "
"{active:7s}".format(
fn=os.path.basename(vhost.filep)[:filename_size],
name=disp_name[:disp_name_size],
https="HTTPS" if vhost.ssl else "",
active="Enabled" if vhost.enabled else "",
fn_size=filename_size,
name_size=disp_name_size)
)
try:
code, tag = zope.component.getUtility(interfaces.IDisplay).menu(
"We were unable to find a vhost with a ServerName "
"or Address of {0}.{1}Which virtual host would you "
"like to choose?\n(note: conf files with multiple "
"vhosts are not yet supported)".format(domain, os.linesep),
choices, force_interactive=True)
except errors.MissingCommandlineFlag:
msg = (
"Encountered vhost ambiguity but unable to ask for user "
"guidance in non-interactive mode. Certbot may need "
"vhosts to be explicitly labelled with ServerName or "
"ServerAlias directives.")
logger.warning(msg)
raise errors.MissingCommandlineFlag(msg)
return code, tag
|
Python
| 0
|
@@ -2763,19 +2763,74 @@
biguity
-but
+when trying to find a vhost for %22%0A %22%7B0%7D but was
unable
@@ -3017,16 +3017,31 @@
ctives.%22
+.format(domain)
)%0A
|
a004abd76af602192704cf4d01d9daf3903d6477
|
Remove unused code
|
child_switzerland/models/child_compassion.py
|
child_switzerland/models/child_compassion.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: David Coninckx <david@coninckx.com>
#
# The licence is in the file __manifest__.py
#
##############################################################################
from odoo import models, fields, api
class CompassionChild(models.Model):
_inherit = 'compassion.child'
desc_fr = fields.Text('French description', readonly=True)
desc_de = fields.Text('German description', readonly=True)
desc_it = fields.Text('Italian description', readonly=True)
@api.model
def correct_old_children(self):
old_children = self.search([('global_id', '=', False)]).filtered(
lambda c: len(c.local_id) < 11)
for child in old_children:
if child.code:
child.local_id = child.code[0:2] + '0' + \
child.code[2:5] + '0' + child.code[5:]
return True
@api.model
def find_missing_global_id(self):
missing_gids = self.search([('global_id', '=', False)])
global_search = self.env['compassion.childpool.search'].create({
'take': 1,
})
cpt = 0
for child in missing_gids:
try:
global_search.local_id = child.local_id
global_search.do_search()
if global_search.global_child_ids:
child.global_id = global_search.global_child_ids.global_id
# Commit at each fix
self.env.cr.commit() # pylint: disable=invalid-commit
except:
self.env.invalidate_all()
finally:
cpt += 1
|
Python
| 0.000006
|
@@ -431,13 +431,8 @@
elds
-, api
%0A%0A%0Ac
@@ -696,1113 +696,4 @@
ue)%0A
-%0A @api.model%0A def correct_old_children(self):%0A old_children = self.search(%5B('global_id', '=', False)%5D).filtered(%0A lambda c: len(c.local_id) %3C 11)%0A for child in old_children:%0A if child.code:%0A child.local_id = child.code%5B0:2%5D + '0' + %5C%0A child.code%5B2:5%5D + '0' + child.code%5B5:%5D%0A return True%0A%0A @api.model%0A def find_missing_global_id(self):%0A missing_gids = self.search(%5B('global_id', '=', False)%5D)%0A global_search = self.env%5B'compassion.childpool.search'%5D.create(%7B%0A 'take': 1,%0A %7D)%0A cpt = 0%0A for child in missing_gids:%0A try:%0A global_search.local_id = child.local_id%0A global_search.do_search()%0A if global_search.global_child_ids:%0A child.global_id = global_search.global_child_ids.global_id%0A # Commit at each fix%0A self.env.cr.commit() # pylint: disable=invalid-commit%0A except:%0A self.env.invalidate_all()%0A finally:%0A cpt += 1%0A
|
07d17373891b501fa9e20d175ea993e021158c78
|
Add TODO
|
app/slot/controller.py
|
app/slot/controller.py
|
# 3rd Party Modules
import datetime
import os
from flask import request, redirect, render_template, json
from app import app
import config
from auth import requires_auth
from app.slot import db_fieldbook, messaging
import utils
def index():
ops = db_fieldbook.get_all_opportunities()
for op in ops:
if op["status"] == "Accepted":
op["class"] = "success"
elif op["status"] == "Offered":
op["class"] = "info"
elif op["status"] == "Expired":
op["class"] = "active"
elif op["status"] == "Attended":
op["class"] = "active"
elif op["status"] == "Not Attended":
op["class"] = "active"
op["remaining_mins"] = int(int(op["expiry_time"] - utils.to_timestamp(datetime.datetime.utcnow())) / 60)
return render_template('dashboard.html', ops=ops)
def render_new_procedure_form():
if request.method == 'POST':
print(request.form)
opportunity_doctor = request.form['doctor']
opportunity_procedure = request.form['procedure']
opportunity_location = request.form['location']
opportunity_duration = request.form['duration']
opportunity = {
'doctor': opportunity_doctor,
'procedure': opportunity_procedure,
'location': opportunity_location,
'duration': opportunity_duration
}
print(opportunity)
ref_id = db_fieldbook.add_opportunity(opportunity)
number_messages_sent, message_ref = messaging.broadcast_procedure(opportunity_procedure,
opportunity_location,
opportunity_duration,
opportunity_doctor,
ref_id)
offer = db_fieldbook.add_offer(ref_id, number_messages_sent)
print(offer['id'])
print(json.dumps(opportunity))
return redirect('/dashboard', code=302)
else:
procedures = db_fieldbook.get_procedures()
locations = db_fieldbook.get_locations()
timeframes = db_fieldbook.get_timeframes()
doctors = db_fieldbook.get_doctors()
return render_template('new_procedure.html', procedures=procedures, locations=locations,
timeframes=timeframes, doctors=doctors)
# Endpoint for receiving SMS messages from Twilio
@app.route('/sms', methods=['POST'])
@requires_auth
def receive_sms():
sms = dict(service_number=str(request.form['To']),
mobile=str(request.form['From']),
message=str(request.form['Body']))
print(str.format("Received SMS: \n"
"Service Number: {0}\n"
"Mobile: {1}\n"
"Message: {2}\n",
sms['service_number'],
sms['mobile'],
sms['message']))
messaging.request_procedure(sms['mobile'], sms['message'])
db_fieldbook.add_sms_log(sms['mobile'], sms['service_number'], sms['message'], 'IN')
return '<Response></Response>'
@app.route('/complete', methods=['POST'])
@requires_auth
def complete_procedure():
completed_id = request.form['id']
if request.form['attended_status'] == "Attended":
attended_status = True
else:
attended_status = False
print(str(completed_id))
print(str(attended_status))
db_fieldbook.complete_opportunity(completed_id, attended_status)
return redirect('/dashboard', code=302)
if __name__ == '__main__':
app.debug = config.debug_mode
port = int(os.environ.get("PORT", 5000))
print(str.format("Debug Mode is: {0}", app.debug))
app.run(
host="0.0.0.0",
port = port
)
|
Python
| 0.000002
|
@@ -2618,24 +2618,60 @@
ive_sms():%0A%0A
+ # TODO: Convert to dict literal%0A
sms = di
|
92f5cff9edfbeb2219fc2fb714364dc590bd912f
|
Fix too long line in soc.cache.logic module.
|
app/soc/cache/logic.py
|
app/soc/cache/logic.py
|
#!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module contains logic memcaching functions.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.api import memcache
from google.appengine.ext import db
import soc.cache.base
def key(model, filter, order):
"""Returns the memcache key for this query.
"""
new_filter = {}
for filter_key, value in filter.iteritems():
new_value = value.key().id_or_name() if isinstance(value, db.Model) else value
new_filter[filter_key] = new_value
return 'query_for_%(kind)s_%(filter)s_%(order)s' % {
'kind': repr(model.kind()),
'filter': repr(new_filter),
'order': repr(order),
}
def get(model, filter, order, *args, **kwargs):
"""Retrieves the data for the specified query from the memcache.
"""
memcache_key = key(model, filter, order)
return memcache.get(memcache_key), memcache_key
def put(data, memcache_key, *args, **kwargs):
"""Sets the data for the specified query in the memcache.
Args:
data: the data to be cached
"""
# Store data for fifteen minutes to force a refresh every so often
retention = 15*60
memcache.add(memcache_key, data, retention)
def flush(model, filter):
"""Removes the data for the current user from the memcache.
"""
memcache_key = key(model, filter)
memcache.delete(memcache_key)
# define the cache function
cache = soc.cache.base.getCacher(get, put)
|
Python
| 0.000001
|
@@ -982,80 +982,110 @@
-new_value = value.key().id_or_name() if isinstance(value, db.Model) else
+if isinstance(value, db.Model):%0A new_value = value.key().id_or_name()%0A else:%0A new_value =
val
|
ced24cc4af10e914d2cbd176be179543e6366d05
|
Update plot_post.py
|
mcmcplotlib/plot_post.py
|
mcmcplotlib/plot_post.py
|
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from hpd import hpd_grid
def plot_post(sample, alpha=0.05, show_mode=True, kde_plot=True, bins=50,
ROPE=None, comp_val=None, roundto=2):
"""Plot posterior and HPD
Parameters
----------
sample : Numpy array or python list
An array containing MCMC samples
alpha : float
Desired probability of type I error (defaults to 0.05)
show_mode: Bool
If True the legend will show the mode(s) value(s), if false the mean(s)
will be displayed
kde_plot: Bool
If True the posterior will be displayed using a Kernel Density Estimation
otherwise an histogram will be used
bins: integer
Number of bins used for the histogram, only works when kde_plot is False
ROPE: list or numpy array
Lower and upper values of the Region Of Practical Equivalence
comp_val: float
Comparison value
Returns
-------
post_summary : dictionary
Containing values with several summary statistics
"""
post_summary = {'mean':0,'median':0,'mode':0, 'alpha':0,'hpd_low':0,
'hpd_high':0, 'comp_val':0, 'pc_gt_comp_val':0, 'ROPE_low':0,
'ROPE_high':0, 'pc_in_ROPE':0}
post_summary['mean'] = np.mean(sample)
post_summary['median'] = np.median(sample)
post_summary['alpha'] = alpha
# Compute the hpd, KDE and mode for the posterior
hpd, x, y, modes = hpd_grid(sample, alpha, roundto)
post_summary['hpd'] = hpd
post_summary['mode'] = modes
## Plot KDE.
if kde_plot:
plt.plot(x, y, color='k', lw=2)
## Plot histogram.
else:
plt.hist(sample, normed=True, bins=bins, facecolor='b',
edgecolor='w')
## Display mode or mean:
if show_mode:
string = '{:g} ' * len(post_summary['mode'])
plt.plot(0, label='mode =' + string.format(*[round(item, roundto) for item in post_summary['mode']]).rstrip(), alpha=0)
else:
plt.plot(0, label='mean = {:g}'.format(round(post_summary['mean'], roundto)), alpha=0)
## Display the hpd.
hpd_label = ''
for value in hpd:
plt.plot(value, [0, 0], linewidth=10, color='b')
hpd_label = hpd_label + '{:g} {:g}'.format(round(value[0], roundto), round(value[1], roundto))
plt.plot(0, 0, linewidth=4, color='b', label='hpd {:g}%\n{}'.format(round((1-alpha)*100, roundto), hpd_label))
## Display the ROPE.
if ROPE is not None:
pc_in_ROPE = np.sum((sample > ROPE[0]) & (sample < ROPE[1]))/len(sample)*100
plt.plot(ROPE, [0, 0], linewidth=20, color='r', alpha=0.75)
plt.plot(0, 0, linewidth=4, color='r', label='{:g}% in ROPE'.format(round(pc_in_ROPE, roundto)))
post_summary['ROPE_low'] = ROPE[0]
post_summary['ROPE_high'] = ROPE[1]
post_summary['pc_in_ROPE'] = pc_in_ROPE
## Display the comparison value.
if comp_val is not None:
pc_gt_comp_val = 100 * np.sum(sample > comp_val)/len(sample)
pc_lt_comp_val = 100 - pc_gt_comp_val
plt.axvline(comp_val, ymax=.75, color='g', linewidth=4, alpha=0.75,
label='{:g}% < {:g} < {:g}%'.format(round(pc_lt_comp_val, roundto),
round(comp_val, roundto),
round(pc_gt_comp_val, roundto)))
post_summary['comp_val'] = comp_val
post_summary['pc_gt_comp_val'] = pc_gt_comp_val
plt.legend(loc=0, framealpha=1)
frame = plt.gca()
frame.axes.get_yaxis().set_ticks([])
return post_summary
|
Python
| 0
|
@@ -1601,25 +1601,16 @@
e, alpha
-, roundto
)%0D%0A p
|
bed9390490ad0c9d8d8319ea017f13d075284450
|
Make sure SQL file gets closed
|
calaccess_processed_filings/managers.py
|
calaccess_processed_filings/managers.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Custom manager for loading raw data in to "filings" models.
"""
from __future__ import unicode_literals
import itertools
# Django tricks
from django.db.models import Q
from django.db import connection
# Managers
from calaccess_processed.managers import BulkLoadSQLManager
# Logging
import logging
logger = logging.getLogger(__name__)
class FilingsManager(BulkLoadSQLManager):
"""
Utilities for more quickly loading bulk data.
"""
app_name = "calaccess_processed_filings"
def get_sql(self):
"""
Return string of raw sql for loading the model.
"""
return open(self.sql_path, 'r').read()
@property
def sql_path(self):
"""
Return the path to the .sql file with the model's loading query.
"""
file_name = 'load_%s_model' % self.model._meta.model_name
return self.get_sql_path(file_name)
def load(self):
"""
Load the model by executing its corresponding raw SQL query.
Temporarily drops any constraints or indexes on the model.
"""
# Drop constraints and indexes to speed loading
self.get_queryset().drop_constraints()
self.get_queryset().drop_indexes()
# Run the actual loader SQL
with connection.cursor() as c:
c.execute(self.get_sql())
# Restore the constraints and index that were dropped
self.get_queryset().restore_constraints()
self.get_queryset().restore_indexes()
class Form501FilingManager(FilingsManager):
"""
A custom manager for Form 501 filings.
"""
def without_candidacy(self):
"""
Returns Form 501 filings that do not have an OCD Candidacy yet.
"""
from calaccess_processed_elections.proxies import OCDCandidacyProxy
matched_qs = OCDCandidacyProxy.objects.matched_form501_ids()
matched_list = [i for i in itertools.chain.from_iterable(matched_qs)]
return self.get_queryset().exclude(
Q(filing_id__in=matched_list) | Q(office__icontains='RETIREMENT')
)
|
Python
| 0.000001
|
@@ -647,22 +647,20 @@
-return
+fp =
open(se
@@ -680,15 +680,70 @@
'r')
-.read()
+%0A sql = fp.read()%0A fp.close()%0A return sql
%0A%0A
|
c1ebf656a43ecec0cbcd6d8b7b2758651c77f7a7
|
Add params to upload_volume command
|
tempest/api/volume/test_volumes_actions.py
|
tempest/api/volume/test_volumes_actions.py
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.volume import base
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class VolumesActionsTest(base.BaseVolumeTest):
@classmethod
def resource_setup(cls):
super(VolumesActionsTest, cls).resource_setup()
# Create a test shared volume for attach/detach tests
cls.volume = cls.create_volume()
@decorators.idempotent_id('fff42874-7db5-4487-a8e1-ddda5fb5288d')
@decorators.attr(type='smoke')
@utils.services('compute')
def test_attach_detach_volume_to_instance(self):
# Create a server
server = self.create_server()
# Volume is attached and detached successfully from an instance
self.volumes_client.attach_volume(self.volume['id'],
instance_uuid=server['id'],
mountpoint='/dev/%s' %
CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'in-use')
self.volumes_client.detach_volume(self.volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'available')
@decorators.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
def test_volume_bootable(self):
# Verify that a volume bootable flag is retrieved
for bool_bootable in [True, False]:
self.volumes_client.set_bootable_volume(self.volume['id'],
bootable=bool_bootable)
fetched_volume = self.volumes_client.show_volume(
self.volume['id'])['volume']
# Get Volume information
# NOTE(masayukig): 'bootable' is "true" or "false" in the current
# cinder implementation. So we need to cast boolean values to str
# and make it lower to compare here.
self.assertEqual(str(bool_bootable).lower(),
fetched_volume['bootable'])
@decorators.idempotent_id('9516a2c8-9135-488c-8dd6-5677a7e5f371')
@utils.services('compute')
def test_get_volume_attachment(self):
# Create a server
server = self.create_server()
# Verify that a volume's attachment information is retrieved
self.volumes_client.attach_volume(self.volume['id'],
instance_uuid=server['id'],
mountpoint='/dev/%s' %
CONF.compute.volume_device_name)
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'],
'in-use')
self.addCleanup(waiters.wait_for_volume_resource_status,
self.volumes_client,
self.volume['id'], 'available')
self.addCleanup(self.volumes_client.detach_volume, self.volume['id'])
volume = self.volumes_client.show_volume(self.volume['id'])['volume']
self.assertIn('attachments', volume)
attachment = volume['attachments'][0]
self.assertEqual('/dev/%s' %
CONF.compute.volume_device_name,
attachment['device'])
self.assertEqual(server['id'], attachment['server_id'])
self.assertEqual(self.volume['id'], attachment['id'])
self.assertEqual(self.volume['id'], attachment['volume_id'])
@decorators.idempotent_id('d8f1ca95-3d5b-44a3-b8ca-909691c9532d')
@utils.services('image')
def test_volume_upload(self):
# NOTE(gfidente): the volume uploaded in Glance comes from setUpClass,
# it is shared with the other tests. After it is uploaded in Glance,
# there is no way to delete it from Cinder, so we delete it from Glance
# using the Glance images_client and from Cinder via tearDownClass.
image_name = data_utils.rand_name(self.__class__.__name__ + '-Image')
body = self.volumes_client.upload_volume(
self.volume['id'], image_name=image_name,
disk_format=CONF.volume.disk_format)['os-volume_upload_image']
image_id = body["image_id"]
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.images_client.delete_image,
image_id)
waiters.wait_for_image_status(self.images_client, image_id, 'active')
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'available')
@decorators.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
def test_reserve_unreserve_volume(self):
# Mark volume as reserved.
self.volumes_client.reserve_volume(self.volume['id'])
# To get the volume info
body = self.volumes_client.show_volume(self.volume['id'])['volume']
self.assertIn('attaching', body['status'])
# Unmark volume as reserved.
self.volumes_client.unreserve_volume(self.volume['id'])
# To get the volume info
body = self.volumes_client.show_volume(self.volume['id'])['volume']
self.assertIn('available', body['status'])
@decorators.idempotent_id('fff74e1e-5bd3-4b33-9ea9-24c103bc3f59')
def test_volume_readonly_update(self):
for readonly in [True, False]:
# Update volume readonly
self.volumes_client.update_volume_readonly(self.volume['id'],
readonly=readonly)
# Get Volume information
fetched_volume = self.volumes_client.show_volume(
self.volume['id'])['volume']
# NOTE(masayukig): 'readonly' is "True" or "False" in the current
# cinder implementation. So we need to cast boolean values to str
# to compare here.
self.assertEqual(str(readonly),
fetched_volume['metadata']['readonly'])
|
Python
| 0.000001
|
@@ -5547,32 +5547,228 @@
, 'available')%0A%0A
+ image_info = self.images_client.show_image(image_id)%0A self.assertEqual(image_name, image_info%5B'name'%5D)%0A self.assertEqual(CONF.volume.disk_format, image_info%5B'disk_format'%5D)%0A%0A
@decorators.
|
a2398d77f550a5b73c5bbd76d30131c88c64caa4
|
Make the example a bit more exciting.
|
swig/test.py
|
swig/test.py
|
import formast
e = formast.Expr()
formast.parse_xml("test.txt", e)
class Visitor(formast.Visitor):
def expr_uint(self, v):
print(v)
def expr_add(self, left, right):
print "("
self.expr(left)
print "+"
self.expr(right)
print ")"
def expr_sub(self, left, right):
print "("
self.expr(left)
print "+"
self.expr(right)
print ")"
def expr_mul(self, left, right):
print "("
self.expr(left)
print "*"
self.expr(right)
print ")"
def expr_div(self, left, right):
print "("
self.expr(left)
print "/"
self.expr(right)
print ")"
def expr_neg(self, right):
print "-"
self.expr(right)
def expr_pos(self, right):
print "+"
self.expr(right)
visitor = Visitor()
visitor.expr(e)
|
Python
| 0.000001
|
@@ -68,22 +68,22 @@
%0A%0Aclass
-Visito
+Printe
r(formas
@@ -139,11 +139,11 @@
rint
-(v)
+ v,
%0A%0A
@@ -186,32 +186,33 @@
print %22(%22
+,
%0A self.ex
@@ -229,32 +229,33 @@
print %22+%22
+,
%0A self.ex
@@ -273,32 +273,33 @@
print %22)%22
+,
%0A%0A def expr_s
@@ -330,32 +330,33 @@
print %22(%22
+,
%0A self.ex
@@ -371,34 +371,35 @@
%0A print %22
-+%22
+-%22,
%0A self.ex
@@ -417,32 +417,33 @@
print %22)%22
+,
%0A%0A def expr_m
@@ -474,32 +474,33 @@
print %22(%22
+,
%0A self.ex
@@ -525,16 +525,17 @@
rint %22*%22
+,
%0A
@@ -561,32 +561,33 @@
print %22)%22
+,
%0A%0A def expr_d
@@ -626,16 +626,17 @@
rint %22(%22
+,
%0A
@@ -669,16 +669,17 @@
rint %22/%22
+,
%0A
@@ -713,16 +713,17 @@
rint %22)%22
+,
%0A%0A de
@@ -763,17 +763,20 @@
print %22-
-%22
+ (%22,
%0A
@@ -792,16 +792,35 @@
r(right)
+%0A print %22)%22,
%0A%0A de
@@ -861,16 +861,17 @@
rint %22+%22
+,
%0A
@@ -893,41 +893,1122 @@
t)%0A%0A
-visitor = Visitor()%0Avisitor.expr(e)%0A
+class Evaluator(formast.Visitor):%0A def __init__(self):%0A formast.Visitor.__init__(self)%0A self.stack = %5B%5D%0A%0A def expr(self, e):%0A formast.Visitor.expr(self, e)%0A print self.stack%0A%0A def expr_uint(self, v):%0A self.stack.append(v)%0A%0A def expr_add(self, left, right):%0A self.expr(left)%0A self.expr(right)%0A self.stack.append(self.stack.pop() + self.stack.pop())%0A%0A def expr_sub(self, left, right):%0A self.expr(left)%0A self.expr(right)%0A self.stack.append(self.stack.pop() - self.stack.pop())%0A%0A def expr_mul(self, left, right):%0A self.expr(left)%0A self.expr(right)%0A self.stack.append(self.stack.pop() * self.stack.pop())%0A%0A def expr_div(self, left, right):%0A self.expr(right)%0A self.expr(left) # will pop first!%0A self.stack.append(self.stack.pop() // self.stack.pop())%0A%0A def expr_neg(self, right):%0A self.expr(right)%0A self.stack.append(-self.stack.pop())%0A%0A def expr_pos(self, right):%0A pass%0A%0Aprinter = Printer()%0Aprinter.expr(e)%0Aprint%0A%0Aevaluator = Evaluator()%0Aevaluator.expr(e)
%0A
|
13de22d42ecbc51ea19bdd571262959eb29804d9
|
Fix crop_and_resize usage
|
frcnn/roi_pool.py
|
frcnn/roi_pool.py
|
import sonnet as snt
import tensorflow as tf
import numpy as np
CROP = 'crop'
ROI_POOLING = 'roi_pooling'
class ROIPoolingLayer(snt.AbstractModule):
"""ROIPoolingLayer which applies ROI pooling (or tf.crop_and_resize)"""
def __init__(self, pooling_mode=CROP, pooled_width=7, pooled_height=7,
spatial_scale=1./16, feat_stride=[16], name='roi_pooling'):
super(ROIPoolingLayer, self).__init__(name=name)
self._pooling_mode = pooling_mode
self._pooled_width = pooled_width
self._pooled_height = pooled_height
self._spatial_scale = spatial_scale
self._feat_stride = feat_stride
def _get_bboxes(self, rois, pretrained):
"""
Get normalized coordinates for RoIs (betweetn 0 and 1 for easy cropping)
"""
pretrained_shape = tf.shape(pretrained)
height = (tf.to_float(pretrained_shape[1]) - 1.) * np.float32(self._feat_stride[0])
width = (tf.to_float(pretrained_shape[2]) - 1.) * np.float32(self._feat_stride[0])
x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
# Won't be backpropagated to rois anyway, but to save time TODO: What time is saved?
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
return bboxes
def _roi_crop(self, rois, pretrained):
bboxes = self._get_bboxes(rois, pretrained)
# TODO: Why?!!?
batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
crops = tf.image.crop_and_resize(
pretrained, bboxes, tf.to_int32(batch_ids),
[self._pooled_width * 2, self._pooled_height * 2], name="crops"
)
return tf.nn.max_pool(crops, [1, 1, 2, 2], [2] * 4, padding='SAME')
def _roi_pooling(self, roi, pretrained):
raise NotImplemented()
def _build(self, roi, pretrained):
if self._pooling_mode == CROP:
return self._roi_crop(roi, pretrained)
elif self._pooling_mode == ROI_POOLING:
return self._roi_pooling(roi, pretrained)
else:
raise NotImplemented('Pooling mode {} does not exist.'.format(self._pooling_mode))
|
Python
| 0.000001
|
@@ -58,16 +58,39 @@
as np%0A%0A
+slim = tf.contrib.slim%0A
%0ACROP =
@@ -688,32 +688,41 @@
bboxes(self, roi
+_proposal
s, pretrained):%0A
@@ -1079,24 +1079,33 @@
tf.slice(roi
+_proposal
s, %5B0, 1%5D, %5B
@@ -1152,24 +1152,33 @@
tf.slice(roi
+_proposal
s, %5B0, 2%5D, %5B
@@ -1226,24 +1226,33 @@
tf.slice(roi
+_proposal
s, %5B0, 3%5D, %5B
@@ -1299,24 +1299,33 @@
tf.slice(roi
+_proposal
s, %5B0, 4%5D, %5B
@@ -1430,26 +1430,14 @@
DO:
-What time is sa
+Remo
ve
-d
?%0A
@@ -1556,16 +1556,25 @@
elf, roi
+_proposal
s, pretr
@@ -1619,16 +1619,25 @@
oxes(roi
+_proposal
s, pretr
@@ -1674,16 +1674,18 @@
%0A
+ #
batch_i
@@ -1712,16 +1712,25 @@
lice(roi
+_proposal
s, %5B0, 0
@@ -1764,16 +1764,121 @@
), %5B1%5D)%0A
+ bboxes_shape = tf.shape(bboxes)%0A batch_ids = tf.zeros((bboxes_shape%5B0%5D, ), dtype=tf.int32)
%0A
@@ -1948,20 +1948,8 @@
es,
-tf.to_int32(
batc
@@ -1953,17 +1953,16 @@
atch_ids
-)
,%0A
@@ -2061,13 +2061,12 @@
urn
-tf.nn
+slim
.max
@@ -2070,16 +2070,18 @@
max_pool
+2d
(crops,
@@ -2085,44 +2085,23 @@
s, %5B
-1, 1,
2, 2%5D,
-%5B2%5D * 4, padding='SAME'
+stride=2
)%0A%0A%0A
@@ -2122,32 +2122,42 @@
ooling(self, roi
+_proposals
, pretrained):%0A
@@ -2211,16 +2211,26 @@
elf, roi
+_proposals
, pretra
@@ -2308,24 +2308,34 @@
roi_crop(roi
+_proposals
, pretrained
@@ -2424,16 +2424,26 @@
ling(roi
+_proposals
, pretra
|
a7e4a12a9d083fd9f0ab8a73b9207f2ec1911044
|
Use unique colormap names
|
typhon/tests/plots/test_colors.py
|
typhon/tests/plots/test_colors.py
|
# -*- coding: utf-8 -*-
"""Testing the functions in typhon.plots.colors.
"""
import filecmp
import os
from tempfile import mkstemp
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import numpy as np
import pytest
from typhon.plots import colors
class TestColors:
"""Testing the cm functions."""
ref_dir = os.path.join(os.path.dirname(__file__), "reference", "")
def setup_method(self):
"""Create a temporary file."""
fd, self.f = mkstemp()
os.close(fd)
def teardown_method(self):
"""Delete temporary file."""
os.remove(self.f)
def test_cmap2rgba(self):
"""Check colormap to RGB conversion."""
ref = np.loadtxt(os.path.join(self.ref_dir, 'viridis.txt'),
comments='%')
rgb = colors.cmap2rgba('viridis', 256)[:, :3] # ignore alpha
assert np.allclose(ref, rgb, atol=0.001)
def test_cmap2rgba_interpolation(self):
"""Check colormap to RGBA interpolation."""
max_planck_duplicates = np.array([
[0., 0.4627451, 0.40784314, 1.],
[0.48235294, 0.70980392, 0.67843137, 1.],
[0.74901961, 0.85098039, 0.83137255, 1.],
[0.96078431, 0.97254902, 0.97647059, 1.],
[0.96078431, 0.97254902, 0.97647059, 1.],
])
max_planck_interpolated = np.array([
[0., 0.4627451, 0.40784314, 1.],
[0.36318339, 0.64876586, 0.61158016, 1.],
[0.6172549, 0.7812226, 0.75580161, 1.],
[0.8038293, 0.88244521, 0.86892734, 1.],
[0.96078431, 0.97254902, 0.97647059, 1.],
])
assert np.allclose(
max_planck_interpolated,
colors.cmap2rgba('max_planck', 5, interpolate=True)
)
assert np.allclose(
max_planck_duplicates,
colors.cmap2rgba('max_planck', 5, interpolate=False)
)
def test_cmap2cpt(self):
"""Export colormap to cpt file."""
colors.cmap2cpt('viridis', filename=self.f)
ref = os.path.join(self.ref_dir, 'viridis.cpt')
with open(self.f) as testfile, open(ref) as reffile:
assert testfile.readlines() == reffile.readlines()
def test_cmap2txt(self):
"""Export colormap to txt file."""
colors.cmap2txt('viridis', filename=self.f)
ref = os.path.join(self.ref_dir, 'viridis.txt')
with open(self.f) as testfile, open(ref) as reffile:
assert testfile.readlines() == reffile.readlines()
def test_cmap2act(self):
"""Export colormap to act file."""
colors.cmap2act('viridis', filename=self.f)
ref = os.path.join(self.ref_dir, 'viridis.act')
assert filecmp.cmp(self.f, ref)
def test_cmap_from_txt(self):
"""Import colormap from txt file."""
idx = np.linspace(0, 1, 256)
viridis = plt.get_cmap('viridis')
cmap = colors.cmap_from_txt(os.path.join(
self.ref_dir, 'viridis.txt'), name="viridis_read")
assert np.allclose(viridis(idx), cmap(idx), atol=0.001)
def test_cmap_from_act(self):
"""Import colormap from act file."""
idx = np.linspace(0, 1, 256)
viridis = plt.get_cmap('viridis')
cmap = colors.cmap_from_act(
os.path.join(self.ref_dir, 'viridis.act'), name="viridis_read")
assert np.allclose(viridis(idx), cmap(idx), atol=0.004)
def test_get_material_design(self):
"""Test the retrieval of material design colors."""
hex_color = colors.get_material_design('red', shade='500')
assert hex_color == '#F44336'
hex_colors = colors.get_material_design('red', shade=None)
assert hex_colors == ['#FFEBEE', '#FFCDD2', '#EF9A9A', '#E57373',
'#EF5350', '#F44336', '#E53935', '#D32F2F',
'#C62828', '#B71C1C', '#FF8A80', '#FF5252',
'#FF1744', '#D50000']
def test_get_material_design_valuerror(self):
"""Test the behavior for undefined material design colors or shades."""
with pytest.raises(ValueError):
colors.get_material_design('undefined_color')
with pytest.raises(ValueError):
colors.get_material_design('red', 'undefined_shade')
def test_named_color_mapping(self):
"""Test if the typhon colors are available in the name mapping."""
assert all([c in mcolors.get_named_colors_mapping()
for c in colors.TYPHON_COLORS.keys()])
def test_named_color_hex(self):
"""Test if the 'ty:uhh-red' hex-value is correct."""
assert mcolors.get_named_colors_mapping()['ty:uhh-red'] == '#ee1d23'
|
Python
| 0
|
@@ -3010,32 +3010,36 @@
me=%22viridis_read
+_txt
%22)%0A%0A asse
@@ -3361,16 +3361,20 @@
dis_read
+_act
%22)%0A%0A
|
df9b8428d6575bf68699534c37425bd1bc1c6ae8
|
decrease the cache time for the block
|
paiji2_shoutbox/modular.py
|
paiji2_shoutbox/modular.py
|
from django.conf.urls import url, include
from modular_blocks import ModuleApp, TemplateTagBlock, modules
from . import urls
class ShoutboxModule(ModuleApp):
app_name = 'bulletin_board'
name = 'bulletin-board'
urls = url(r'^shoutbox/', include(urls))
templatetag_blocks = [
TemplateTagBlock(
name='bulletin-board',
library='shoutbox',
tag='display_bulletin_board',
cache_time=30 * 60,
kwargs={
'nb': 10,
},
),
]
modules.register(ShoutboxModule)
|
Python
| 0.000003
|
@@ -449,15 +449,9 @@
ime=
-30 * 60
+1
,%0A
|
c47d11fbe4e09dcec8d0c40d778c38b04b8ccc7b
|
Add List-Id and List-Unsubscribe headers
|
uchicagohvz/users/mailing_list.py
|
uchicagohvz/users/mailing_list.py
|
# Mailing list configuration
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from uchicagohvz import secrets
from .tasks import smtp_localhost_send
from .models import Profile
from rest_framework.response import Response
from rest_framework.views import APIView
import email
import hashlib
import hmac
def _verify(token, timestamp, signature):
return signature == hmac.new(
key=secrets.MAILGUN_API_KEY,
msg='{}{}'.format(timestamp, token),
digestmod=hashlib.sha256).hexdigest()
class ChatterMailgunHook(APIView):
authentication_classes = []
@method_decorator(csrf_exempt)
def post(self, request, *args, **kwargs):
FIELDS = (
'recipient', 'sender', 'from',
'subject', 'body-mime',
'timestamp', 'token', 'signature'
)
verified = _verify(request.data['token'], request.data['timestamp'], request.data['signature'])
if all([x in request.data for x in FIELDS]) and verified:
msg = email.message_from_string(request.data['body-mime'])
for x in ('From', 'Sender', 'To', 'Reply-To', 'Subject'):
del msg[x]
listhost_addr = 'chatter@lists.uchicagohvz.org'
msg['From'] = request.data['from']
msg['Sender'] = listhost_addr
msg['To'] = listhost_addr
msg['Reply-To'] = listhost_addr
msg['Subject'] = "[HvZ-Chatter] " + request.data['subject']
to_addrs = tuple(Profile.objects.filter(
user__is_active=True, subscribe_chatter_listhost=True).values_list('user__email', flat=True))
smtp_localhost_send(listhost_addr, to_addrs, msg.as_string())
return Response()
else:
return Response(status=406)
|
Python
| 0
|
@@ -1362,16 +1362,163 @@
bject'%5D%0A
+%09%09%09msg%5B'List-Id'%5D = 'HvZ-Chatter %3Chttps://www.uchicagohvz.org%3E'%0A%09%09%09msg%5B'List-Unsubscribe'%5D = '%3Chttps://www.uchicagohvz.org/users/update_profile/%3E'%0A
%09%09%09to_ad
|
43dcd3dc3ee4b090832455acf43e8dd483a6117b
|
Use current device scope in `initialize_tpu_system` if called without a cluster resolver.
|
tensorflow/python/tpu/tpu_strategy_util.py
|
tensorflow/python/tpu/tpu_strategy_util.py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU specific APIs to be used in conjunction with TPU Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute.cluster_resolver import TPUClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.tpu import topology
from tensorflow.python.tpu import tpu
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_INITIALIZED_TPU_SYSTEMS = {}
_LOCAL_MASTERS = ("", "local")
@tf_export("tpu.experimental.initialize_tpu_system")
def initialize_tpu_system(cluster_resolver=None):
"""Initialize the TPU devices.
Args:
cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
Returns:
The tf.tpu.Topology object for the topology of the TPU cluster.
Raises:
RuntimeError: If no TPU devices found for eager execution.
"""
if cluster_resolver is None:
cluster_resolver = TPUClusterResolver("")
assert isinstance(cluster_resolver, TPUClusterResolver)
tpu_name = compat.as_text(cluster_resolver._tpu) # pylint: disable=protected-access
if tpu_name in _INITIALIZED_TPU_SYSTEMS:
logging.warning("TPU system %s has already been initialized. "
"Reinitializing the TPU can cause previously created "
"variables on TPU to be lost.")
logging.info("Initializing the TPU system: %s", tpu_name)
if context.executing_eagerly():
# This function looks as it is for the following non-intuitive reasons.
# tpu.initialize_system creates a dummy op whose sole purpose is to trigger
# DistributedTPURewritePass. This pass actually adds real ops that
# initialize the TPU system. Thus, we can't simply run tpu.initialize_system
# eagerly. We need to wrap it in defun and trigger the rewrite passes on it.
job = None
if tpu_name not in _LOCAL_MASTERS:
# Explicitly place the tpu.initialize_system in the first worker to
# avoid the output node match multiple devices error.
job = "{}/replica:0/task:0".format(cluster_resolver.get_job_name())
@function.defun
def _tpu_init_fn():
return tpu.initialize_system(job=job)
# The TPU_SYSTEM device must match the device used in tpu.initialize_system
# exactly, otherwise you can get errors if there are multiple TPU_SYSTEM
# devices available.
with ops.device(tpu._tpu_system_device_name(job)): # pylint: disable=protected-access
output = _tpu_init_fn()
# Clear out the eager context caches since the memory is invalid now.
logging.info("Clearing out eager caches")
context.context()._clear_caches() # pylint: disable=protected-access
serialized_topology = output.numpy()
# TODO(b/134094971): Remove this when lazy tensor copy in multi-device
# function has been implemented.
context.context().mirroring_policy = context.MIRRORING_ALL
else:
master = cluster_resolver.master()
cluster_spec = cluster_resolver.cluster_spec()
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
if cluster_spec:
session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
with ops.Graph().as_default():
with session_lib.Session(config=session_config, target=master) as sess:
serialized_topology = sess.run(tpu.initialize_system())
logging.info("Finished initializing TPU system.")
tpu_topology = topology.Topology(serialized=serialized_topology)
_INITIALIZED_TPU_SYSTEMS[tpu_name] = tpu_topology
return tpu_topology
|
Python
| 0
|
@@ -1132,24 +1132,71 @@
rt function%0A
+from tensorflow.python.framework import device%0A
from tensorf
@@ -1227,16 +1227,16 @@
ort ops%0A
-
from ten
@@ -1968,16 +1968,29 @@
.%0A %22%22%22%0A
+ job = None%0A
if clu
@@ -2012,16 +2012,352 @@
s None:%0A
+ # If no cluster resolver is specified, and running eagerly, execute the init%0A # ops in the current device scope.%0A if context.executing_eagerly():%0A curr_device = device.DeviceSpec.from_string(context.context().device_name)%0A if curr_device.job is not None:%0A job = %22%7B%7D/replica:0/task:0%22.format(curr_device.job)%0A%0A
clus
@@ -3177,24 +3177,24 @@
lize_system%0A
+
# eagerl
@@ -3266,23 +3266,8 @@
it.%0A
- job = None%0A
|
0cc04e9a486fb7dcf312a5c336f8f529f8b1f32d
|
Update version 1.0.4 -> 1.0.5
|
skcode/__init__.py
|
skcode/__init__.py
|
"""
SkCode (Python implementation of BBcode syntax) parser library.
"""
# Package information
__author__ = "Fabien Batteix (@skywodd)"
__copyright__ = "Copyright 2015, TamiaLab"
__credits__ = ["Fabien Batteix", "TamiaLab"]
__license__ = "GPLv3"
__version__ = "1.0.4"
__maintainer__ = "Fabien Batteix"
__email__ = "fabien.batteix@tamialab.fr"
__status__ = "Development" # "Production"
# User friendly imports
from .treebuilder import parse_skcode
from .render import (render_to_html,
render_to_skcode,
render_to_text)
|
Python
| 0
|
@@ -262,9 +262,9 @@
1.0.
-4
+5
%22%0A__
|
f343a8bc7592ab9befb5c03ccd09db61439e3f76
|
remove extra buttons labelled Make Maintenance Visit
|
erpnext/patches/jan_mar_2012/allocated_to_profile.py
|
erpnext/patches/jan_mar_2012/allocated_to_profile.py
|
def execute():
"""
Changes allocated_to option to Profile in
DocType Customer Issue
"""
import webnotes
webnotes.conn.sql("""
UPDATE `tabDocField`
SET options='Profile'
WHERE fieldname='allocated_to'
""")
from webnotes.modules.module_manager import reload_doc
reload_doc('support', 'doctype', 'customer_issue')
|
Python
| 0
|
@@ -214,16 +214,143 @@
%0A%09%22%22%22)%0A%0A
+%09webnotes.conn.sql(%22%22%22%0A%09%09DELETE from %60tabDocField%60%0A%09%09WHERE parent='Customer Issue'%0A%09%09AND label='Make Maintenance Visit'%0A%09%22%22%22)%0A%0A
%09from we
|
dc3ad128d7e55d939ecffbedd9c3664ce81dcf77
|
Add fallback argument for Command-line tool.
|
slackpy/slackpy.py
|
slackpy/slackpy.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Takahiro Ikeuchi'
import os
import requests
import json
import traceback
from argparse import ArgumentParser
class SlackLogger:
def __init__(self, web_hook_url, channel=None, username='Logger'):
self.web_hook_url = web_hook_url
self.username = username
if channel is None:
self.channel = None
elif channel.startswith('#') or channel.startswith('@'):
self.channel = channel
else:
raise ValueError('channel must be started with "#" or "@".')
def __build_payload(self, message, title, color, fallback, fields):
if fields is '':
__fields = {
"title": title,
"text": message,
"color": color,
"fallback": fallback
}
__attachments = {
"fields": __fields
}
else:
__attachments = [{
"fallback": fallback,
"color": color,
"text": message,
"fields": fields
}]
payload = {
"channel": self.channel,
"username": self.username,
"attachments": __attachments
}
return payload
def __send_notification(self, message, title, color='good', fallback='',
fields=''):
"""Send a message to a channel.
Args:
title: The message title.
message: The message body.
color: Can either be one of 'good', 'warning', 'danger',
or any hex color code
fallback: What is shown to IRC/fallback clients
Returns:
api_response:
Raises:
TODO:
"""
if fallback is '':
fallback = title
payload = self.__build_payload(message, title, color, fallback, fields)
try:
response = requests.post(self.web_hook_url,
data=json.dumps(payload))
except Exception:
raise Exception(traceback.format_exc())
else:
if response.status_code == 200:
return response
else:
raise Exception(response.content.decode())
def debug(self, message, title='Slack Notification', fallback='',
fields=''):
return self.__send_notification(message=message,
title=title,
color='#03A9F4',
fallback=fallback,
fields=fields)
def info(self, message, title='Slack Notification', fallback='',
fields=''):
return self.__send_notification(message=message,
title=title,
color='good',
fallback=fallback,
fields=fields)
def warn(self, message, title='Slack Notification', fallback='',
fields=''):
return self.__send_notification(message=message,
title=title,
color='warning',
fallback=fallback,
fields=fields)
def error(self, message, title='Slack Notification', fallback='',
fields=''):
return self.__send_notification(message=message,
title=title,
color='danger',
fallback=fallback,
fields=fields)
def message(self, message, title='Slack Notification', fallback='',
color='good', fields=''):
return self.__send_notification(message=message,
title=title,
color=color,
fallback=fallback,
fields=fields)
def main():
try:
web_hook_url = os.environ["SLACK_INCOMING_WEB_HOOK"]
except KeyError:
print('ERROR: Please set the SLACK_INCOMING_WEB_HOOK variable in ' +
' your environment.')
else:
parser = ArgumentParser(description='slackpy command line tool')
parser.add_argument('-m',
'--message',
type=str,
required=True,
help='Message')
parser.add_argument('-c',
'--channel',
required=False,
help='Channel',
default=None)
parser.add_argument('-t',
'--title',
type=str,
required=False,
help='Title',
default='Slack Notification')
parser.add_argument('-n',
'--name',
type=str,
required=False,
help='Name of Postman',
default='Logger')
# The purpose of backward compatibility, old args (1, 2, 3)
# are being retained.
# DEBUG == 10, INFO == 20, # WARNING == 30, ERROR == 40
parser.add_argument('-l',
'--level',
type=int,
default=20,
choices=[10, 20, 30, 40, 1, 2, 3])
args = parser.parse_args()
client = SlackLogger(web_hook_url, args.channel, args.name)
if args.level == 10:
response = client.debug(args.message, args.title)
elif args.level == 20 or args.level == 1:
response = client.info(args.message, args.title)
elif args.level == 30 or args.level == 2:
response = client.warn(args.message, args.title)
elif args.level == 40 or args.level == 3:
response = client.error(args.message, args.title)
else:
raise Exception("'Level' must be selected from among 1 to 3")
if response.status_code == 200:
print(True)
else:
print(False)
|
Python
| 0
|
@@ -5439,16 +5439,289 @@
Logger')
+%0A parser.add_argument('-f',%0A '--fallback',%0A type=str,%0A required=False,%0A help='A plain-text summary of the attachment',%0A default='')
%0A%0A
@@ -6277,32 +6277,47 @@
sage, args.title
+, args.fallback
)%0A%0A elif
@@ -6404,32 +6404,47 @@
sage, args.title
+, args.fallback
)%0A%0A elif
@@ -6531,32 +6531,47 @@
sage, args.title
+, args.fallback
)%0A%0A elif
@@ -6667,16 +6667,31 @@
gs.title
+, args.fallback
)%0A%0A
|
96176bb223f9971311a0a42c6c9845ca1c0170cc
|
Add base class to throttling
|
user_management/api/throttling.py
|
user_management/api/throttling.py
|
from rest_framework.throttling import ScopedRateThrottle
class DefaultRateMixin(object):
def get_rate(self):
try:
return self.THROTTLE_RATES[self.scope]
except KeyError:
return self.default_rate
class PostRequestThrottleMixin(object):
def allow_request(self, request, view):
"""
Throttle POST requests only.
"""
if request.method != 'POST':
return True
return super(PostRequestThrottleMixin, self).allow_request(request, view)
class LoginRateThrottle(
DefaultRateMixin,
PostRequestThrottleMixin,
ScopedRateThrottle):
default_rate = '10/hour'
class UsernameLoginRateThrottle(LoginRateThrottle):
def get_cache_key(self, request, view):
if request.user.is_authenticated():
return None # Only throttle unauthenticated requests
ident = request.POST.get('username')
if ident is None:
return None # Only throttle username requests
return self.cache_format % {
'scope': self.scope,
'ident': ident.strip().lower(),
}
class PasswordResetRateThrottle(
DefaultRateMixin,
PostRequestThrottleMixin,
ScopedRateThrottle):
default_rate = '3/hour'
class ResendConfirmationEmailRateThrottle(
DefaultRateMixin,
PostRequestThrottleMixin,
ScopedRateThrottle):
default_rate = '3/hour'
|
Python
| 0.000001
|
@@ -533,21 +533,22 @@
%0A%0Aclass
-Login
+Scoped
RateThro
@@ -543,32 +543,36 @@
opedRateThrottle
+Base
(%0A Defaul
@@ -578,32 +578,24 @@
ltRateMixin,
-%0A
PostRequest
@@ -604,33 +604,143 @@
rottleMixin,
-%0A
+ ScopedRateThrottle):%0A %22%22%22Base class to define a scoped rate throttle on POST request.%22%22%22%0A%0A%0Aclass LoginRateThrottle(
ScopedRateTh
@@ -737,32 +737,36 @@
opedRateThrottle
+Base
):%0A default_r
@@ -1283,77 +1283,8 @@
tle(
-%0A DefaultRateMixin,%0A PostRequestThrottleMixin,%0A
Scop
@@ -1289,32 +1289,36 @@
opedRateThrottle
+Base
):%0A default_r
@@ -1380,77 +1380,8 @@
tle(
-%0A DefaultRateMixin,%0A PostRequestThrottleMixin,%0A
Scop
@@ -1386,32 +1386,36 @@
opedRateThrottle
+Base
):%0A default_r
|
b952f803b3f4cf9fc380a9168543e669de35cc61
|
Fix test code for streamsx.ec - don't use internals
|
test/python/spl/testtkpy/opt/python/streams/test_ec.py
|
test/python/spl/testtkpy/opt/python/streams/test_ec.py
|
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017
# Import the SPL decorators
from streamsx.spl import spl
import streamsx.ec as ec
import pickle
#------------------------------------------------------------------
# Test Execution Context (streamsx.ex) functions
#------------------------------------------------------------------
def splNamespace():
return "com.ibm.streamsx.topology.pytest.pyec"
@spl.filter()
class PyTestOperatorContext:
def __init__(self, domain_id, instance_id, job_id, pe_id, channel, local_channel, max_channels, local_max_channels):
self.enter_called = False
self.exit_called = False
self.domain_id = domain_id
self.instance_id = instance_id
self.job_id = job_id
self.pe_id = pe_id
self.channel = channel
self.local_channel = local_channel
self.max_channels = max_channels
self.local_max_channels = local_max_channels
if not self.check():
raise AssertionError("PyTestOperatorContext")
def same(self, expect, got):
if expect != got:
print("Expected", expect, "Got", got, flush=True)
return False
return True
def check(self):
ok = ec._supported
ok = ok and self.same(self.domain_id, ec.domain_id())
ok = ok and self.same(self.instance_id, ec.instance_id())
ok = ok and self.same(self.job_id, ec.job_id())
ok = ok and self.same(self.pe_id, ec.pe_id())
ok = ok and self.same(self.channel, ec.channel(self))
ok = ok and self.same(self.local_channel, ec.local_channel(self))
ok = ok and self.same(self.max_channels, ec.max_channels(self))
ok = ok and self.same(self.local_max_channels, ec.local_max_channels(self))
return ok
def __call__(self, *tuple):
if not self.enter_called:
raise AssertionError("__enter__() was not called")
if self.exit_called:
raise AssertionError("__exit__() was called before shutdown")
return self.check()
def __enter__(self):
self.enter_called = True
def __exit__(self, exception_type, exception_value, traceback):
self.exit_called = True
@spl.filter()
class PyTestMetrics:
def __init__(self):
ok = True
self.c = ec.CustomMetric(self, "C1")
ok = ok and self.check_metric(self.c, "C1", None, ec.MetricKind.Counter, 0)
c2 = ec.CustomMetric(self, "C2", "This is C2")
ok = ok and self.check_metric(c2, "C2", "This is C2", ec.MetricKind.Counter, 0)
c3 = ec.CustomMetric(self, "C3", initialValue=8123)
ok = ok and self.check_metric(c3, "C3", None, ec.MetricKind.Counter, 8123)
g1 = ec.CustomMetric(self, "G1", kind=ec.MetricKind.Gauge)
ok = ok and self.check_metric(g1, "G1", None, ec.MetricKind.Gauge, 0)
g2 = ec.CustomMetric(self, "G2", kind='Gauge', initialValue=-214)
ok = ok and self.check_metric(g2, "G2", None, ec.MetricKind.Gauge, -214)
if not ok:
raise AssertionError("Failed metrics!")
# Test a metric cannot be pickled
try:
pm = pickle.dumps(g2)
raise AssertionError("Was able to pickle metric:" + pm)
except pickle.PicklingError:
pass
def __call__(self, *tuple):
ok= True
cv = self.c.value
self.c += 7
ok = ok and self.check_metric(self.c, "C1", None, ec.MetricKind.Counter, cv + 7)
self.c.value += 13
ok = ok and self.check_metric(self.c, "C1", None, ec.MetricKind.Counter, cv + 7 + 13)
return ok
def check_metric(self, m, n, desc, k, v):
if n != m.name:
print(n, "!=", m.name)
return False
if desc is not None:
if desc != m.description:
print(desc, "!=", m.description)
return False
if k != m.kind:
print(k, "!=", m.kind)
return False
if v != m.value:
print("m.value", v, "!=", m.value)
return False
if v != int(m):
print("int(m)", v, "!=", int(m))
return False
return True
|
Python
| 0
|
@@ -1237,42 +1237,8 @@
ok =
- ec._supported%0A ok = ok and
sel
|
190cea0914b2b3db7c2a7856e18635d9b074cefc
|
add vertex and line
|
meshio/avsucd/_avsucd.py
|
meshio/avsucd/_avsucd.py
|
"""
I/O for AVS-UCD format, cf.
<https://lanl.github.io/LaGriT/pages/docs/read_avs.html>.
"""
import numpy
from .._exceptions import ReadError, WriteError
from .._files import open_file
from .._helpers import register
from .._mesh import Mesh
meshio_to_avsucd_type = {
"triangle": "tri",
"quad": "quad",
"tetra": "tet",
"pyramid": "pyr",
"wedge": "prism",
"hexahedron": "hex",
}
avsucd_to_meshio_type = {v: k for k, v in meshio_to_avsucd_type.items()}
avsucd_to_meshio_order = {
"triangle": [0, 1, 2],
"quad": [0, 1, 2, 3],
"tetra": [0, 1, 3, 2],
"pyramid": [4, 0, 1, 2, 3],
"wedge": [3, 4, 5, 0, 1, 2],
"hexahedron": [4, 5, 6, 7, 0, 1, 2, 3],
}
def read(filename):
with open_file(filename, "r") as f:
out = read_buffer(f)
return out
def read_buffer(f):
# Skip comments and unpack first line
num_nodes, num_cells, num_node_data, num_cell_data, _ = numpy.genfromtxt(
f, max_rows=1, dtype=int, comments="#"
)
# Read nodes
point_ids, points = _read_nodes(f, num_nodes)
# Read cells
cell_ids, cells, cell_data = _read_cells(f, num_cells, point_ids)
# Read node data
if num_node_data:
point_data = _read_node_data(f, num_nodes, num_node_data, point_ids)
else:
point_data = {}
# Read cell data
if num_cell_data:
cell_data.update(_read_cell_data(f, num_cells, num_cell_data, cells, cell_ids))
return Mesh(
points,
cells,
point_data=point_data,
cell_data={
k: {kk: numpy.array(vv) for kk, vv in v.items()}
for k, v in cell_data.items()
},
)
def _read_nodes(f, num_nodes):
data = numpy.genfromtxt(f, max_rows=num_nodes)
points_ids = {int(pid): i for i, pid in enumerate(data[:, 0])}
return points_ids, data[:, 1:]
def _read_cells(f, num_cells, point_ids):
cells = {}
cell_ids = {}
cell_data = {"avsucd:mat": {}}
count = {k: 0 for k in meshio_to_avsucd_type.keys()}
for _ in range(num_cells):
line = f.readline().strip().split()
cell_id, cell_mat = int(line[0]), int(line[1])
cell_type = avsucd_to_meshio_type[line[2]]
corner = [point_ids[int(pid)] for pid in line[3:]]
if cell_type not in cells:
cells[cell_type] = [corner]
cell_data["avsucd:mat"][cell_type] = [cell_mat]
else:
cells[cell_type].append(corner)
cell_data["avsucd:mat"][cell_type].append(cell_mat)
cell_ids[cell_id] = (cell_type, count[cell_type])
count[cell_type] += 1
for k, v in cells.items():
cells[k] = numpy.array(v)[:, avsucd_to_meshio_order[k]]
return cell_ids, cells, cell_data
def _read_node_data(f, num_nodes, num_node_data, point_ids):
line = f.readline() # Not quite sure what to do with this line...
labels = {}
point_data = {}
for i in range(num_node_data):
line = f.readline().strip().split(",")
labels[i] = line[0].strip()
point_data[labels[i]] = numpy.empty(num_nodes)
for _ in range(num_nodes):
line = f.readline().strip().split()
pid = point_ids[int(line[0])]
for i, val in enumerate(line[1:]):
point_data[labels[i]][pid] = float(val)
return point_data
def _read_cell_data(f, num_cells, num_cell_data, cells, cell_ids):
line = f.readline() # Not quite sure what to do with this line...
labels = {}
cell_data = {}
for i in range(num_cell_data):
line = f.readline().strip().split(",")
labels[i] = line[0].strip()
cell_data[labels[i]] = {k: numpy.empty(len(v)) for k, v in cells.items()}
for _ in range(num_cells):
line = f.readline().strip().split()
cell_type, cid = cell_ids[int(line[0])]
for i, val in enumerate(line[1:]):
cell_data[labels[i]][cell_type][cid] = float(val)
return cell_data
def write(filename, mesh):
pass
register("avsucd", [".inp"], read, {"avsucd": write})
|
Python
| 0.000003
|
@@ -261,24 +261,64 @@
cd_type = %7B%0A
+ %22vertex%22: %22pt%22,%0A %22line%22: %22line%22,%0A
%22triangl
@@ -537,24 +537,63 @@
o_order = %7B%0A
+ %22vertex%22: %5B0%5D,%0A %22line%22: %5B0, 1%5D,%0A
%22triangl
|
72e0eb9728119d5f9c05c5b456ffb9d0c02962b1
|
Correctly replace the winners column with the 0-1-2 values
|
data_readers.py
|
data_readers.py
|
# coding: utf-8
"""
Functions that return the data in the files, sometimes raw, with some cleaning
and/or summarization.
"""
import pandas as pd
RAW_MATCHES_FILE = 'raw_matches.csv'
RAW_WINNERS_FILE = 'raw_winners.csv'
TEAM_RENAMES_FILE = 'team_renames.csv'
def apply_renames(column):
"""Apply team renames to a team column from a dataframe."""
with open(TEAM_RENAMES_FILE) as renames_file:
renames = dict(l.strip().split(',')
for l in renames_file.readlines()
if l.strip())
def renamer(team):
return renames.get(team, team)
return column.apply(renamer)
def get_matches():
"""Create a dataframe with matches info."""
matches = pd.DataFrame.from_csv(RAW_MATCHES_FILE)
for column in ('team1', 'team2'):
matches[column] = apply_renames(matches[column])
def winner_from_score_diff(x):
if x > 0:
return 1
elif x < 0:
return 2
else:
return 0
matches['score_diff'] = matches['score1'] - matches['score2']
matches['winner'] = matches['score_diff']
matches['winner'].apply(winner_from_score_diff)
return matches
def get_winners():
"""Create a dataframe with podium positions info."""
winners = pd.DataFrame.from_csv(RAW_WINNERS_FILE)
winners.team = apply_renames(winners.team)
return winners
def get_team_stats():
"""Create a dataframe with useful stats for each team."""
winners = get_winners()
matches = get_matches()
teams = set(matches.team1.unique()).union(matches.team2.unique())
stats = pd.DataFrame(list(teams), columns=['team'])
stats = stats.set_index('team')
for team in teams:
team_matches = matches[(matches.team1 == team) |
(matches.team2 == team)]
stats.loc[team, 'matches_played'] = len(team_matches)
# wins where the team was on the left side (team1)
wins1 = team_matches[(team_matches.team1 == team) &
(team_matches.score1 > team_matches.score2)]
# wins where the team was on the right side (team2)
wins2 = team_matches[(team_matches.team2 == team) &
(team_matches.score2 > team_matches.score1)]
stats.loc[team, 'matches_won'] = len(wins1) + len(wins2)
stats.loc[team, 'years_played'] = len(team_matches.year.unique())
team_podiums = winners[winners.team == team]
to_score = lambda position: 5 - position # better position -> more score
stats.loc[team, 'podium_score'] = team_podiums.position.map(to_score).sum()
stats.loc[team, 'cups_won'] = len(team_podiums[team_podiums.position == 1])
stats['matches_won_percent'] = stats['matches_won'] / stats['matches_played'] * 100.0
stats['podium_score_yearly'] = stats['podium_score'] / stats['years_played']
stats['cups_won_yearly'] = stats['cups_won'] / stats['years_played']
return stats
|
Python
| 0.999979
|
@@ -1142,22 +1142,40 @@
winner'%5D
-.apply
+ = matches%5B'winner'%5D.map
(winner_
|
34ffbbfcd30a3ff0064afdfb5a07f86f425494ea
|
Add Todo download file test
|
kboard/functional_test/test_post_file_upload.py
|
kboard/functional_test/test_post_file_upload.py
|
import os
from django.conf import settings
from .base import FunctionalTest
class PostFileUploadTest(FunctionalTest):
def test_file_upload(self):
self.browser.get(self.live_server_url)
self.move_to_default_board()
# 지훈이는 첨부파일을 추가하여 새 게시글을 작성하기 위해 글 쓰기 버튼을 누른다.
self.click_create_post_button()
# 글 쓰기 페이지로 이동한다.
self.assertRegex(self.browser.current_url, '.+/default/new/')
# 제목과 내용을 입력한다.
titlebox = self.browser.find_element_by_id('id_post_title')
titlebox.send_keys('Title of This Post')
contentbox = self.get_contentbox()
contentbox.send_keys('Content of This Post')
self.browser.switch_to.default_content()
# 하단에 파일을 업로드 할 수 있는 버튼을 클릭하여 파일을 추가한다.
fileuploadbox = self.browser.find_elements_by_tag_name('input')[2]
fileuploadbox.send_keys(os.path.join(settings.BASE_DIR, 'test_file/test.txt'))
# 하단의 등록 버튼을 누르면 글 작성이 완료되고 게시글 목록으로 돌아간다.
self.click_submit_button()
self.assertRegex(self.browser.current_url, '.+/default/')
# 첨부파일 업로드가 잘 되었는지 확인하기 위해 방금 작성한 게시글을 클릭한다.
table = self.browser.find_element_by_id('id_post_list_table')
rows = table.find_elements_by_css_selector('tbody > tr > td > a')
rows[0].click()
# 'test.txt'라는 이름의 첨부파일이 표시된다.
uploaded_file = self.browser.find_element_by_id('id_uploaded_file')
self.assertEqual(uploaded_file.text, 'test.txt')
# 테스트에 사용했던 파일을 제거한다.
saved_test_file_name = os.path.join(settings.BASE_DIR, 'file/test.txt')
if os.path.isfile(saved_test_file_name):
os.remove(saved_test_file_name)
|
Python
| 0
|
@@ -1469,24 +1469,114 @@
test.txt')%0A%0A
+ # %EC%97%85%EB%A1%9C%EB%93%9C %EB%90%9C %EC%B2%A8%EB%B6%80%ED%8C%8C%EC%9D%BC%EC%9D%B8 'test.txt'%EC%9D%84 %EB%8B%A4%EC%9A%B4%EB%B0%9B%EC%9D%84 %EC%88%98 %EC%9E%88%EB%8A%94%EC%A7%80 %ED%99%95%EC%9D%B8%ED%95%9C%EB%8B%A4.%0A # TODO Add file download test%0A%0A
# %ED%85%8C%EC%8A%A4
|
67f3964f88b921f1c84b1e7169408806fdeade26
|
hello encoding
|
fuckingweather.py
|
fuckingweather.py
|
"""
fuckingweather.py - Willie module for The Fucking Weather
Copyright 2013 Michael Yanovich
Copyright 2013 Edward Powell
Licensed under the Eiffel Forum License 2.
http://willie.dftba.net
"""
from willie.module import commands, rate, priority, NOLIMIT
from willie import web
import re
@commands('fucking_weather', 'fw')
@rate(30)
@priority('low')
def fucking_weather(bot, trigger):
text = trigger.group(2)
if not text:
bot.reply("INVALID FUCKING PLACE. PLEASE ENTER A FUCKING ZIP CODE, OR A FUCKING CITY-STATE PAIR.")
return
text = web.quote(text)
page = web.get("http://thefuckingweather.com/?where=%s&unit=c" % (text))
re_mark = re.compile('<p class="remark">(.*?)</p>')
re_temp = re.compile('<span class="temperature" tempf="(.*)">(.*?)</span>')
re_flavor = re.compile('<p class="flavor">(.*?)</p>')
results = re_mark.findall(page)
temp = re_temp.findall(page)
flavor = re_flavor.findall(page)
if results:
bot.reply(temp[0]+"°?! "+results[0]+" ,"+flavor[0])
else:
bot.reply("I CAN'T FIND THAT SHIT")
return bot.NOLIMIT
|
Python
| 0.999155
|
@@ -189,16 +189,40 @@
net%0A%22%22%22%0A
+# -*- coding: utf-8 -*-%0A
from wil
|
07760e1b31c0cc33533d595f7a29e6231d580039
|
remove unused component
|
tests/cypress/dash/v_copy_paste.py
|
tests/cypress/dash/v_copy_paste.py
|
# pylint: disable=global-statement
import dash
from dash.dependencies import Input, Output, State
import dash_html_components as html
import dash_core_components as dcc
import os
import pandas as pd
import sys
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(sys.argv[0]), os.pardir, os.pardir, os.pardir)
)
)
module_names = ["dash_table"]
modules = [__import__(x) for x in module_names]
dash_table = modules[0]
url = "https://github.com/plotly/datasets/raw/master/" "26k-consumer-complaints.csv"
df = pd.read_csv(url)
df = df.values
app = dash.Dash()
app.css.config.serve_locally = True
app.scripts.config.serve_locally = True
app.layout = html.Div(
[
html.Div(id="container", children="Hello World"),
dash_table.DataTable(
id="table",
data=df[0:250],
columns=[
{"id": 0, "name": "Complaint ID"},
{"id": 1, "name": "Product"},
{"id": 2, "name": "Sub-product"},
{"id": 3, "name": "Issue"},
{"id": 4, "name": "Sub-issue"},
{"id": 5, "name": "State"},
{"id": 6, "name": "ZIP"},
{"id": 7, "name": "code"},
{"id": 8, "name": "Date received"},
{"id": 9, "name": "Date sent to company"},
{"id": 10, "name": "Company"},
{"id": 11, "name": "Company response"},
{"id": 12, "name": "Timely response?"},
{"id": 13, "name": "Consumer disputed?"},
],
editable=True,
sort_action='native',
),
dash_table.DataTable(
id="table2",
data=df[0:10],
columns=[
{"id": 0, "name": "Complaint ID"},
{"id": 1, "name": "Product"},
{"id": 2, "name": "Sub-product"},
{"id": 3, "name": "Issue"},
{"id": 4, "name": "Sub-issue"},
{"id": 5, "name": "State"},
{"id": 6, "name": "ZIP"},
{"id": 7, "name": "code"},
{"id": 8, "name": "Date received"},
{"id": 9, "name": "Date sent to company"},
{"id": 10, "name": "Company"},
{"id": 11, "name": "Company response"},
{"id": 12, "name": "Timely response?"},
{"id": 13, "name": "Consumer disputed?"},
],
editable=True,
sort_action='native',
),
]
)
@app.callback(
Output("table", "data"),
[Input("table", "data_timestamp")],
[State("table", "data"), State("table", "data_previous")],
)
# pylint: disable=unused-argument
def updateData(timestamp, current, previous):
# pylint: enable=unused-argument
if current is None or previous is None:
return current
if len(current) == len(previous):
for (i, datum) in enumerate(current):
previous_datum = previous[i]
if datum[0] != previous_datum[0]:
datum[1] = "MODIFIED"
return current
if __name__ == "__main__":
app.run_server(port=8082, debug=False)
|
Python
| 0.000004
|
@@ -131,43 +131,8 @@
tml%0A
-import dash_core_components as dcc%0A
impo
|
7be26218dbf808c1c18b9f42813dea5fa5c540aa
|
fix failing tests
|
corehq/motech/repeaters/tests/test_populate_caserepeater.py
|
corehq/motech/repeaters/tests/test_populate_caserepeater.py
|
from datetime import datetime
from django.core.management import call_command
from django.test import TestCase
from corehq.motech.models import ConnectionSettings
from corehq.motech.repeaters.management.commands.migrate_caserepeater import \
Command as MigrationCommand
from corehq.motech.repeaters.models import CaseRepeater, SQLCaseRepeater
class TestMigrationDiff(TestCase):
@classmethod
def setUpClass(cls):
domain = 'caserepeater-migration'
cls.conn = ConnectionSettings(domain=domain, url='http://url.com')
cls.conn.save()
cls.couch_repeater_obj = CaseRepeater(
_id='id_1',
domain=domain,
connection_settings_id=cls.conn.id,
white_listed_case_types=['white_case', 'black_case'],
black_listed_users=['user1'],
paused=False,
format='case_json',
)
cls.sql_repeater_obj = SQLCaseRepeater(
domain=domain,
connection_settings=cls.conn,
white_listed_case_types=['white_case', 'black_case'],
black_listed_users=['user1'],
is_paused=False,
format='case_json',
repeater_id='id_1',
)
cls.incorrect_sql_repeater_obj = SQLCaseRepeater(
domain=domain,
connection_settings=cls.conn,
white_listed_case_types=['black_case'],
black_listed_users=['user2'],
is_paused=True,
format='case_json',
repeater_id='id_1',
)
super().setUpClass()
def test_diff_couch_and_sql_with_no_diff(self):
output = MigrationCommand.diff_couch_and_sql(self.couch_repeater_obj.to_json(), self.sql_repeater_obj)
self.assertIsNone(output)
def test_diff_couch_and_sql_with_diff(self):
output = MigrationCommand.diff_couch_and_sql(
self.couch_repeater_obj.to_json(),
self.incorrect_sql_repeater_obj
)
self.assertEqual(output.split('\n'), [
"paused: couch value False != sql value True",
"white_listed_case_types: 2 in couch != 1 in sql",
"black_listed_users: couch value 'user1' != sql value 'user2'"
])
class TestMigrationCommand(TestCase):
@classmethod
def setUpClass(cls):
cls.domain_1 = 'caserepeater-migration'
cls.domain_2 = 'migration-caserepeater'
cls.date = datetime.utcnow().strftime('%Y-%m-%dT%H-%M-%S')
cls.conn = ConnectionSettings(domain=cls.domain_1, url='http://url.com')
cls.conn.save()
cls.couch_repeater_1 = CaseRepeater(
domain=cls.domain_1,
connection_settings_id=cls.conn.id,
format='case_json',
)
cls.couch_repeater_2 = CaseRepeater(
domain=cls.domain_2,
connection_settings_id=cls.conn.id,
format='case_json',
)
cls.couch_repeater_3 = CaseRepeater(
domain=cls.domain_1,
connection_settings_id=cls.conn.id,
format='case_json',
)
cls.repeaters = [cls.couch_repeater_1, cls.couch_repeater_2, cls.couch_repeater_3]
for repeater in cls.repeaters:
repeater.save(sync_to_sql=False)
return super(TestMigrationCommand, cls).setUpClass()
@classmethod
def tearDownClass(cls):
for r in cls.repeaters:
r.delete()
return super().tearDownClass()
def test_migration_with_no_arguments(self):
self.assertEqual(SQLCaseRepeater.objects.count(), 0)
# when multiple tests run in the same second they try to create log file with same name
# so we have to pass custom log_path to avoid test failure because of it
call_command('migrate_caserepeater', log_path=f'caserepeater_noargs_{self.date}.log')
self.assertEqual(SQLCaseRepeater.objects.count(), len(self.repeaters))
sql_repeater_ids = SQLCaseRepeater.objects.all().values_list('repeater_id', flat=True)
couch_repeater_ids = [r._id for r in self.repeaters]
self.assertListEqual(sorted(sql_repeater_ids), sorted(couch_repeater_ids))
def test_migration_for_one_domain(self):
self.assertEqual(SQLCaseRepeater.objects.count(), 0)
call_command(
'migrate_caserepeater',
domains=[self.domain_1],
log_path=f'caserepeater_one_domain_{self.date}.log'
)
self.assertEqual(SQLCaseRepeater.objects.count(), 2)
self.assertEqual(
list(SQLCaseRepeater.objects.all().values_list('domain', flat=True).distinct()),
[self.domain_1]
)
# running migration twice to verify nothing unexpected happens
call_command(
'migrate_caserepeater',
domains=[self.domain_1],
log_path=f'caserepeater_onedomain_{self.date}.log'
)
self.assertEqual(SQLCaseRepeater.objects.count(), 2)
self.assertEqual(
list(SQLCaseRepeater.objects.all().values_list('domain', flat=True).distinct()),
[self.domain_1]
)
|
Python
| 0.000003
|
@@ -2003,20 +2003,8 @@
tput
-.split('%5Cn')
, %5B%0A
|
4ebb0e7f4dc8ed7507560c7e378473fd4baf4914
|
update caviar orders scraping logic, add additional html parsing catches
|
app/scrapers/caviar.py
|
app/scrapers/caviar.py
|
from lxml import html
import json
from datetime import datetime
from utils import *
import logging
import os
import requests
_logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
_logger.addHandler(handler)
_logger.setLevel(logging.INFO)
def caviar_sign_in(order):
order_html = order.session.get('https://www.trycaviar.com/users/sign_in')
_logger.debug("Headers: %s", order_html.headers)
try:
tree = html.fromstring(order_html.text)
except:
_logger.error('Error reading html from sign in page')
return ""
token_elements = tree.xpath('//meta[@name="csrf-token"]')
token = token_elements[0].get('content')
if not token:
_logger.error('Unable to get token for sign in')
return ""
payload = {'action': '/users/sign_in', 'user[password]': order.password,
'user[remember_me]': '1', 'user[email]': order.username,
'authenticity_token': token}
response = order.session.post('https://www.trycaviar.com/users/sign_in', payload)
_logger.debug("Headers: %s", response.headers)
if response.status_code >= 400:
_logger.error('Sign in post returned an HTTP error code: %s', response.status_code)
return ""
else:
_logger.info("Successfully logged in: %s", response.status_code)
return "success"
def caviar_get_html(order, endpoint):
_logger.info("Calling caviar endpoint: %s", endpoint)
order_html = order.session.get('https://www.trycaviar.com'+endpoint)
_logger.debug("Headers: %s", order_html.headers)
_logger.info("Order page retrieved: %s", order_html.status_code)
try:
tree = html.fromstring(order_html.text)
except:
_logger.error('Error reading html from orders page')
return ""
title_element = tree.xpath('//title/text()')
if 'Sign In' in title_element[0]:
_logger.info("Orders page redirected to sign in")
sign_in_response = caviar_sign_in(order=order)
if not sign_in_response:
_logger.error("Sign in was not successful")
return ""
order_html = order.session.get('https://www.trycaviar.com'+endpoint)
try:
tree = html.fromstring(order_html.text)
except:
_logger.error('Error reading html from orders page')
return ""
else:
_logger.info("Already signed in, using orders page")
return tree
def caviar_get_order(order):
tree = caviar_get_html(order=order, endpoint="/orders")
if len(tree) == 0:
_logger.error("HTML order response tree is empty")
return ""
element_list = tree.xpath('//li[@class="order_information_list-item"]/text()')
try:
recent_datestr = element_list[0]
except:
_logger.warning('Not able to parse any order list items, returning empty order_id')
return ""
recent_datetime = datetime.strptime(recent_datestr, '%m/%d/%y %I:%M%p')
_logger.debug('Most recent order datetime: %s', recent_datetime)
now = datetime.now()
diff_hrs = get_time_diff(later_time=now, earlier_time=recent_datetime)
_logger.debug('Hours from last order: %s', diff_hrs)
if diff_hrs > 4:
_logger.warning("No recent orders found")
order_id = ""
else:
href_elements = tree.xpath('//ul[@class="order_information_list"]//a')
order_id = href_elements[0].get('href')
_logger.info("Using recent order: %s", order_id)
return order_id
def caviar_update_status(order):
tree = caviar_get_html(order=order, endpoint=order.order_id)
if len(tree) == 0:
_logger.error("HTML order_id response tree is empty")
return ""
else:
_logger.info("Successfully retrieved order_id html tree")
xmlblob = tree.xpath('//div[@data-react-class="OrderStatus"]')
jsonblob = xmlblob[0].get('data-react-props')
jsondata = json.loads(jsonblob)
order_status = jsondata['initial_status']
order_time = 'It ' + jsondata['order_status_text'].split('.')[0].split('Your order ')[1].encode() + '.'
_logger.info("Caviar initial_status: %s", order_status)
if order_status == 'received_and_confirmed':
order_statusfriendly = 'Your order has been received and confirmed by Caviar. ' + order_time
elif order_status == 'being_made':
order_statusfriendly = 'The kitchen is preparing your order. ' + order_time
elif order_status == 'cancelled':
order_statusfriendly = 'Your order has been cancelled.'
elif order_status == 'out_for_delivery':
order_statusfriendly = jsondata['order_status_hash']['out_for_delivery']['now_text'].encode()
elif order_status == 'delivered':
order_statusfriendly = 'Caviar says your order was delivered at ' + jsondata['order_status_hash']['delivered']['at'].encode() + '.'
_logger.info("Order is delivered, so mark status as closed")
order.general_status = GeneralStatus.closed.value
else:
_logger.error("Not able to extract order information from html")
order_statusfriendly = TTSResponses.order_parse_error.value
return order_statusfriendly
|
Python
| 0
|
@@ -2796,30 +2796,79 @@
rder
-_information_list-item
+-history_orders_content-body-column order-history_orders_content-column
%22%5D/t
@@ -3051,17 +3051,9 @@
%22%22%0A
-
%0A
+
@@ -3513,33 +3513,61 @@
rder
-_information_list%22%5D//a')%0A
+-history_orders_content-body%22%5D//a')%0A try:%0A
@@ -3591,25 +3591,25 @@
ef_elements%5B
-0
+1
%5D.get('href'
@@ -3610,16 +3610,137 @@
'href')%0A
+ except:%0A _logger.error('Not able to parse order url, returning empty order_id')%0A return %22%22%0A
@@ -4151,24 +4151,37 @@
rStatus%22%5D')%0A
+ try:%0A
jsonblob
@@ -4218,16 +4218,168 @@
props')%0A
+ except:%0A _logger.error(%22Not able to parse order information from html%22)%0A order_statusfriendly = TTSResponses.order_parse_error.value%0A%0A
json
|
e0713d8c537c17c9a4701fc7dcd9006749072201
|
increase dimension for supplier invoice number
|
l10n_it_account/wizard/check_account_invoive.py
|
l10n_it_account/wizard/check_account_invoive.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2017 Didotech SRL
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from openerp import netsvc
from openerp.osv import fields, orm
class check_account_invoice(orm.TransientModel):
_name = "check.account.invoice"
_description = "Value to Complete"
_inherit = "ir.wizard.screen"
def _get_accounts(self, cr, uid, context=None):
fiscal_position_obj = self.pool['account.fiscal.position']
partner_id = False #context.get('active_id', False)
if partner_id:
fiscal_position_ids = fiscal_position_obj.search(cr, uid, ['|', ('partner_id', '=', False), ('partner_id', '=', partner_id)], context=context)
else:
fiscal_position_ids = fiscal_position_obj.search(cr, uid, [('partner_id', '=', False)], context=context)
result = []
for fiscal_position in fiscal_position_obj.browse(cr, uid, fiscal_position_ids, context):
result.append((fiscal_position.id, fiscal_position.name))
return result
_columns = {
'check_invoice_fiscal_position': fields.boolean('Check Fiscal Position on Invoice'),
'property_account_position_id': fields.selection(_get_accounts, 'Fiscal Position'),
'check_invoice_payment_term': fields.boolean('Check Payment Term on Invoice'),
'payment_term': fields.many2one('account.payment.term', 'Payment Term'),
'required_vat': fields.boolean('Required Vat'),
'vat': fields.char('Vat', size=15, required=False),
'check_supplier_invoice_number': fields.boolean('Supplier invoice nr'),
'supplier_invoice_number': fields.char('Supplier invoice nr', size=16),
}
def action_invoice_validate(self, cr, uid, ids, context):
wizard = self.browse(cr, uid, ids[0], context=context)
invoice_vals = {}
if wizard.property_account_position_id:
invoice_vals.update(fiscal_position=int(wizard.property_account_position_id))
if wizard.payment_term:
invoice_vals.update(payment_term=wizard.payment_term.id)
if wizard.supplier_invoice_number:
invoice_vals.update(payment_term=wizard.supplier_invoice_number)
if invoice_vals:
self.pool['account.invoice'].write(cr, uid, context['active_id'], invoice_vals, context)
if wizard.vat:
partner_id = self.pool['account.invoice'].browse(cr, uid, context['active_id'], context).partner_id.id
self.pool['res.partner'].write(cr, uid, partner_id, {'vat': wizard.vat}, context)
wf_service = netsvc.LocalService('workflow')
wf_service.trg_validate(uid, 'account.invoice', context['active_id'], 'invoice_open', cr)
return {'type': 'ir.actions.act_window_close'}
|
Python
| 0
|
@@ -2488,10 +2488,10 @@
ize=
-16
+32
),%0A
|
8aa348a0252dbac1f8cf4d79b7faf62fd31c95f2
|
FIX evaluation stream check
|
avalanche/training/plugins/evaluation.py
|
avalanche/training/plugins/evaluation.py
|
import warnings
from copy import copy
from collections import defaultdict
from typing import Union, Sequence, TYPE_CHECKING
from avalanche.evaluation.metric_results import MetricValue
from avalanche.evaluation.metrics import accuracy_metrics, loss_metrics
from avalanche.logging import InteractiveLogger
if TYPE_CHECKING:
from avalanche.evaluation import PluginMetric
from avalanche.logging import BaseLogger
from avalanche.training.templates.supervised import SupervisedTemplate
class EvaluationPlugin:
"""Manager for logging and metrics.
An evaluation plugin that obtains relevant data from the
training and eval loops of the strategy through callbacks.
The plugin keeps a dictionary with the last recorded value for each metric.
The dictionary will be returned by the `train` and `eval` methods of the
strategies.
It is also possible to keep a dictionary with all recorded metrics by
specifying `collect_all=True`. The dictionary can be retrieved via
the `get_all_metrics` method.
This plugin also logs metrics using the provided loggers.
"""
def __init__(
self,
*metrics: Union["PluginMetric", Sequence["PluginMetric"]],
loggers: Union["BaseLogger", Sequence["BaseLogger"]] = None,
collect_all=True,
benchmark=None,
strict_checks=False,
suppress_warnings=False
):
"""Creates an instance of the evaluation plugin.
:param metrics: The metrics to compute.
:param loggers: The loggers to be used to log the metric values.
:param collect_all: if True, collect in a separate dictionary all
metric curves values. This dictionary is accessible with
`get_all_metrics` method.
:param benchmark: DEPRECATED. Ignored argument.
:param strict_checks: if True, checks that the full evaluation streams
is used when calling `eval`. An error will be raised otherwise.
When False only warnings will be raised.
:param suppress_warnings: if True, warnings and errors will never be
raised from the plugin.
If False, warnings and errors will be raised following
`benchmark` and `strict_checks` behavior.
"""
super().__init__()
self.collect_all = collect_all
self.benchmark = benchmark
self.strict_checks = strict_checks
self.suppress_warnings = suppress_warnings
flat_metrics_list = []
for metric in metrics:
if isinstance(metric, Sequence):
flat_metrics_list += list(metric)
else:
flat_metrics_list.append(metric)
self.metrics = flat_metrics_list
if loggers is None:
loggers = []
elif not isinstance(loggers, Sequence):
loggers = [loggers]
if benchmark is None:
if not suppress_warnings:
if strict_checks:
raise ValueError(
"Benchmark cannot be None " "in strict mode."
)
else:
warnings.warn(
"No benchmark provided to the evaluation plugin. "
"Metrics may be computed on inconsistent portion "
"of streams, use at your own risk."
)
else:
self.complete_test_stream = benchmark.test_stream
self.loggers: Sequence["BaseLogger"] = loggers
if len(self.loggers) == 0:
warnings.warn("No loggers specified, metrics will not be logged")
if self.collect_all:
# for each curve collect all emitted values.
# dictionary key is full metric name.
# Dictionary value is a tuple of two lists.
# first list gathers x values (indices representing
# time steps at which the corresponding metric value
# has been emitted)
# second list gathers metric values
self.all_metric_results = defaultdict(lambda: ([], []))
# Dictionary of last values emitted. Dictionary key
# is the full metric name, while dictionary value is
# metric value.
self.last_metric_results = {}
self._active = True
"""If False, no metrics will be collected."""
self._metric_values = []
"""List of metrics that have yet to be processed by loggers."""
@property
def active(self):
return self._active
@active.setter
def active(self, value):
assert (
value is True or value is False
), "Active must be set as either True or False"
self._active = value
def publish_metric_value(self, mval: MetricValue):
"""Publish a MetricValue to be processed by the loggers."""
self._metric_values.append(mval)
name = mval.name
x = mval.x_plot
val = mval.value
if self.collect_all:
self.all_metric_results[name][0].append(x)
self.all_metric_results[name][1].append(val)
self.last_metric_results[name] = val
def _update_metrics_and_loggers(
self, strategy: "SupervisedTemplate", callback: str
):
"""Call the metric plugins with the correct callback `callback` and
update the loggers with the new metric values."""
if not self._active:
return []
for metric in self.metrics:
if hasattr(metric, callback):
metric_result = getattr(metric, callback)(strategy)
if isinstance(metric_result, Sequence):
for mval in metric_result:
self.publish_metric_value(mval)
elif metric_result is not None:
self.publish_metric_value(metric_result)
for logger in self.loggers:
logger.log_metrics(self._metric_values)
if hasattr(logger, callback):
getattr(logger, callback)(strategy, self._metric_values)
self._metric_values = []
def get_last_metrics(self):
"""
Return a shallow copy of dictionary with metric names
as keys and last metrics value as values.
:return: a dictionary with full metric
names as keys and last metric value as value.
"""
return copy(self.last_metric_results)
def get_all_metrics(self):
"""
Return the dictionary of all collected metrics.
This method should be called only when `collect_all` is set to True.
:return: if `collect_all` is True, returns a dictionary
with full metric names as keys and a tuple of two lists
as value. The first list gathers x values (indices
representing time steps at which the corresponding
metric value has been emitted). The second list
gathers metric values. a dictionary. If `collect_all`
is False return an empty dictionary
"""
if self.collect_all:
return self.all_metric_results
else:
return {}
def reset_last_metrics(self):
"""
Set the dictionary storing last value for each metric to be
empty dict.
"""
self.last_metric_results = {}
def __getattribute__(self, item):
# We don't want to reimplement all the callbacks just to call the
# metrics. What we don't instead is to assume that any method that
# starts with `before` or `after` is a callback of the plugin system,
# and we forward that call to the metrics.
try:
return super().__getattribute__(item)
except AttributeError as e:
if item.startswith("before_") or item.startswith("after_"):
# method is a callback. Forward to metrics.
return lambda strat, **kwargs: self._update_metrics_and_loggers(
strat, item
)
raise
def before_eval(self, strategy: "SupervisedTemplate", **kwargs):
self._update_metrics_and_loggers(strategy, "before_eval")
msgw = (
"Evaluation stream is not equal to the complete test stream. "
"This may result in inconsistent metrics. Use at your own risk."
)
msge = (
"Stream provided to `eval` must be the same of the entire "
"evaluation stream."
)
curr_stream = strategy.current_eval_stream
benchmark = curr_stream[0].origin_stream.benchmark
full_stream = benchmark.streams[curr_stream.name]
if not self.suppress_warnings and len(curr_stream) != len(full_stream):
if self.strict_checks:
raise ValueError(msge)
else:
warnings.warn(msgw)
default_evaluator = EvaluationPlugin(
accuracy_metrics(minibatch=False, epoch=True, experience=True, stream=True),
loss_metrics(minibatch=False, epoch=True, experience=True, stream=True),
loggers=[InteractiveLogger()],
suppress_warnings=True,
)
__all__ = ["EvaluationPlugin", "default_evaluator"]
|
Python
| 0
|
@@ -8520,16 +8520,33 @@
l_stream
+%5B0%5D.origin_stream
%0A
|
70fdc88e73e52a800dd86504bab7fbf9ad89e1d8
|
Add partial_path property explicitly to the Work model.
|
app/soc/models/work.py
|
app/soc/models/work.py
|
#!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Work Model."""
__authors__ = [
'"Todd Larsen" <tlarsen@google.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from google.appengine.ext import db
from django.utils.translation import ugettext_lazy
import polymodel
class Work(polymodel.PolyModel):
"""Model of a Work created by one or more Persons in Roles.
Work is a "base entity" of other more specific "works" created by Persons
serving in "roles".
authors) a many:many relationship with Roles, stored in a separate
WorksAuthors model, used to represent authorship of the Work. See
the WorksAuthors model class for details.
reviews) a 1:many relationship between a Work and the zero or more
Reviews of that Work. This relation is implemented as the 'reviews'
back-reference Query of the Review model 'reviewed' reference.
"""
#: Required field indicating the "title" of the work, which may have
#: different uses depending on the specific type of the work. Works
#: can be indexed, filtered, and sorted by 'title'.
title = db.StringProperty(required=True,
verbose_name=ugettext_lazy('Title'))
title.help_text = ugettext_lazy(
'title of the document; often used in the window title')
#: optional, indexed plain text field used for different purposes,
#: depending on the specific type of the work
abstract = db.StringProperty(multiline=True)
abstract.help_text = ugettext_lazy(
'short abstract, summary, or snippet;'
' 500 characters or less, plain text displayed publicly')
#: Required link name, appended to a "path" to form the document URL.
#: The combined "path" and link name must be globally unique on the
#: site (but, unlike some link names, a Work link name can be reused,
#: as long as the combination with the preceding path is unique).
link_name = db.StringProperty(required=True,
verbose_name=ugettext_lazy('Link name'))
link_name.help_text = ugettext_lazy('link name used in URLs')
#: short name used in places such as the sidebar menu and breadcrumb trail
#: (optional: title will be used if short_name is not present)
short_name = db.StringProperty(verbose_name=ugettext_lazy('Short name'))
short_name.help_text = ugettext_lazy(
'short name used, for example, in the sidebar menu')
#: date when the work was created
created = db.DateTimeProperty(auto_now_add=True)
#: date when the work was last modified
modified = db.DateTimeProperty(auto_now=True)
|
Python
| 0
|
@@ -2150,24 +2150,556 @@
publicly')%0A%0A
+ #: Required path, prepended to a %22link name%22 to form the document URL.%0A #: The combined path and link name must be globally unique on the%0A #: site. Except in /site/docs (Developer) forms, this field is not%0A #: usually directly editable by the User, but is instead set by controller%0A #: logic to match the %22scope%22 of the document.%0A partial_path = db.StringProperty(required=True,%0A verbose_name=ugettext_lazy('Partial path'))%0A partial_path.help_text = ugettext_lazy(%0A 'path portion of URLs, prepended to link name')%0A%0A
#: Require
@@ -2776,22 +2776,20 @@
ombined
-%22
path
-%22
and lin
|
226639f98dee43e21db70070108d0c3131d75729
|
move to version 0.2.1b
|
gemini/version.py
|
gemini/version.py
|
__version__="0.2.0b"
|
Python
| 0.000001
|
@@ -10,12 +10,12 @@
__=%220.2.
-0
+1
b%22%0A
|
e01eccd8af27ad97a20b784b81ddde5cc8515e4b
|
fix incorrect codding utf8 to utf-8 (#903)
|
faker/providers/internet/hu_HU/__init__.py
|
faker/providers/internet/hu_HU/__init__.py
|
# coding=utf8
from __future__ import unicode_literals
from .. import Provider as InternetProvider
class Provider(InternetProvider):
free_email_domains = (
'gmail.com',
'hotmail.com',
'yahoo.com',
)
tlds = (
'hu',
'com',
'com.hu',
'info',
'org',
'net',
'biz',
)
replacements = (
('ö', 'o'),
('ü', 'u'),
('á', 'a'),
('é', 'e'),
('í', 'i'),
('ó', 'i'),
('ő', 'o'),
('ú', 'u'),
('ű', 'u'),
)
|
Python
| 0.000018
|
@@ -5,16 +5,17 @@
ding=utf
+-
8%0Afrom _
|
975f8e44f2d16e8f4e144291283b221e04edd95c
|
add a test in test_expand_path_cfg()
|
tests/unit/selection/factories/test_expand_path_cfg.py
|
tests/unit/selection/factories/test_expand_path_cfg.py
|
# Tai Sakuma <tai.sakuma@gmail.com>
import os
import sys
import pytest
from alphatwirl.selection.factories.FactoryDispatcher import expand_path_cfg
##__________________________________________________________________||
@pytest.fixture()
def alias_dict():
return {
'alias1': 'ev : ev.var1[0] >= 10',
'alias2': ('ev : ev.var2[0] >= 20', dict(name='name2')),
'alias3': 'alias1',
'alias4': 'alias3',
'alias5': 'ev : ev.var4[0] == {n}',
'alias6': ('ev : {low} <= ev.var5[0] < {high}', dict(low=11, high=20))
}
##__________________________________________________________________||
@pytest.mark.parametrize('path_cfg, expected', [
pytest.param(
'alias1',
dict(
factory='LambdaStrFactory',
lambda_str='ev : ev.var1[0] >= 10',
name='alias1'
),
id='alias1'
),
pytest.param(
('alias1', dict(name='name1')),
dict(
factory='LambdaStrFactory',
lambda_str='ev : ev.var1[0] >= 10',
name='name1'
),
id='alias1:with-name'
),
pytest.param(
'alias2',
dict(
factory='LambdaStrFactory',
lambda_str='ev : ev.var2[0] >= 20',
name='name2' # name has priority over alias
),
id='alias2:name-priority-over-alias'
),
pytest.param(
('alias2', dict(name='new_name2')),
dict(
factory='LambdaStrFactory',
lambda_str='ev : ev.var2[0] >= 20',
name='new_name2' # name can be overridden
),
id='alias2:name-overridden'
),
pytest.param(
'alias3',
dict(
factory='LambdaStrFactory',
lambda_str='ev : ev.var1[0] >= 10',
name='alias3' # the outermost alias has priority
),
id='alias3:alias-of-alias'
),
pytest.param(
'alias4',
dict(
factory='LambdaStrFactory',
lambda_str='ev : ev.var1[0] >= 10',
name='alias4' # the outermost alias has priority
),
id='alias4:alias-of-alias-of-alias'
),
pytest.param(
('alias5', dict(n=30)),
dict(
factory='LambdaStrFactory',
lambda_str='ev : ev.var4[0] == {n}', # not formatted
n=30,
name='alias5'
),
id='alias5:not-formatted'
),
pytest.param(
'alias6',
dict(
factory='LambdaStrFactory',
lambda_str='ev : {low} <= ev.var5[0] < {high}',
low=11,
high=20,
name='alias6',
),
id='alias6:not-formatted-with-default-values'
),
pytest.param(
('alias6', dict(high=30)),
dict(
factory='LambdaStrFactory',
lambda_str='ev : {low} <= ev.var5[0] < {high}',
low=11,
high=30,
name='alias6'
),
id='alias6:not-formatted-with-default-values-overridden'
),
pytest.param(
'ev : ev.nJets[0] >= 2',
dict(
factory='LambdaStrFactory',
lambda_str='ev : ev.nJets[0] >= 2',
),
id='string:lambda_str'
),
pytest.param(
'ev : ev.nJets[0] >= {n}',
dict(
factory='LambdaStrFactory',
lambda_str='ev : ev.nJets[0] >= {n}',
),
id='string:lambda_str-not-formatted'
),
pytest.param(
dict(All=()),
{'factory': 'AllFactory', 'path_cfg_list': ()},
id='dict-all-empty'
),
pytest.param(
dict(Any=()),
{'factory': 'AnyFactory', 'path_cfg_list': ()},
id='dict-any-empty'
),
pytest.param(
dict(Not=()),
{'factory': 'NotFactory', 'path_cfg': ()},
id='dict-not-empty'
),
pytest.param(
dict(All=(dict(factory='factory1'), dict(factory='factory2')), name='test_all', arg2=2, arg3=3),
dict(
factory='AllFactory',
path_cfg_list=(dict(factory='factory1'), dict(factory='factory2')),
name='test_all',
arg2=2, arg3=3
),
id='dict-all'
),
pytest.param(
dict(Any=(dict(factory='factory1'), dict(factory='factory2')), name='test_any', arg2=2, arg3=3),
dict(
factory='AnyFactory',
path_cfg_list=(dict(factory='factory1'), dict(factory='factory2')),
name='test_any',
arg2=2, arg3=3
),
id='dict-any'
),
pytest.param(
dict(Not=dict(factory='factory1'), name='test_not', arg2=2, arg3=3),
dict(
factory='NotFactory',
path_cfg=dict(factory='factory1'),
name='test_not',
arg2=2, arg3=3
),
id='dict-not'
),
])
def test_expand_path_cfg(alias_dict, path_cfg, expected):
actual = expand_path_cfg(path_cfg=path_cfg, alias_dict=alias_dict)
assert expected == actual
##__________________________________________________________________||
@pytest.mark.parametrize('path_cfg, error', [
pytest.param(
dict(All=(), Any=()), ValueError, id='multiple vertices: All Any'
),
pytest.param(
dict(All=(), Not=()), ValueError, id='multiple vertices: All Not'
),
pytest.param(
dict(Any=(), Not=()), ValueError, id='multiple vertices: Any Not'
),
pytest.param(
dict(), ValueError, id='empty dict'
),
])
def test_expand_path_cfg_raise(path_cfg, error):
with pytest.raises(error):
expand_path_cfg(path_cfg=path_cfg)
##__________________________________________________________________||
|
Python
| 0.000001
|
@@ -4809,32 +4809,503 @@
ict-not'%0A ),%0A
+ pytest.param(%0A dict(Any=(%0A 'ev : ev.x%5B0%5D == 0',%0A dict(All=(%0A 'ev : ev.x%5B0%5D %3E= 1',%0A 'ev : ev.y%5B0%5D %3E= 100',%0A )),%0A dict(Not=dict(%0A Any=(%0A 'ev : ev.z%5B0%5D == 0',%0A 'ev : ev.w%5B0%5D %3E= 300',%0A ),%0A )),%0A )),%0A %7B %7D,%0A id='example',%0A marks=pytest.mark.skip(reason='not fully expanded')%0A ),%0A
%5D)%0Adef test_expa
|
69c6c5126ebed3d2144300259c6d739f5323f0c3
|
fix bug, return data instead of none
|
geotweet/steps.py
|
geotweet/steps.py
|
import json
from log import logger, get_rotating_logger
class ProcessStep(object):
""" Base Class used to build data processing chain """
next_step = None
def get_next(self):
return self.next_step
def set_next(self, next_step):
self.next_step = next_step
def next(self, data):
if self.next_step:
return self.next_step.process(data)
return None
def process(self, step_input):
""" Process input and and return output """
return self.next(step_input)
class GeoFilterStep(ProcessStep):
"""
Process output from Twitter Streaming API
For each record output from the API will be called as argument to process.
That function will validate and convert tweet to desired format.
"""
def _validate(self, key, record):
if key in record and record[key]:
return True
return False
def validate_geotweet(self, record):
""" check that stream record is actual tweet with coordinates """
if record:
return self._validate('user', record) and self._validate('geo', record)
return False
def process(self, tweet):
""" Passes on tweet if missing 'geo' or 'user' property """
if self.validate_geotweet(tweet):
return self.next(tweet)
return None
class ExtractStep(ProcessStep):
""" Extract interesting fields from Tweet """
def process(self, tweet):
if not tweet:
return None
user = tweet['user']
data = dict(
user_id=user['id'],
name=user['name'],
screen_name=user['screen_name'],
description=user['description'],
location=user['location'],
friends_count=user['friends_count'],
followers_count=user['followers_count'],
tweet_id=tweet['id_str'],
source=tweet['source'],
created_at=tweet['created_at'],
timestamp=tweet['timestamp_ms'],
lonlat=tweet['coordinates']['coordinates']
)
return self.next(data)
class LogStep(ProcessStep):
""" Log tweet to rotating log """
def __init__(self, logfile, log_interval=60, when="M"):
if logfile:
self.rotating_logger = get_rotating_logger(logfile, log_interval)
self.logfile = logfile
def process(self, tweet):
if tweet:
self.rotating_logger.info(json.dumps(tweet))
return self.next(tweet)
|
Python
| 0.000002
|
@@ -410,20 +410,20 @@
return
-None
+data
%0A%0A de
@@ -1055,36 +1055,21 @@
f record
-:%0A return
+ and
self._v
@@ -1091,16 +1091,34 @@
record)
+ %5C%0A
and sel
@@ -1134,21 +1134,54 @@
te('
-geo', record)
+coordinates', record):%0A return True
%0A
@@ -2393,43 +2393,8 @@
val)
-%0A self.logfile = logfile
%0A%0A
|
fa3eeb3010d7adc1c3b958abd973fffeb3f711a0
|
fix cors header issue
|
gfsad/__init__.py
|
gfsad/__init__.py
|
from flask import Flask, request, g, redirect
from flask.ext.cache import Cache
from flask.ext.compress import Compress
from flask.ext.restless import APIManager
from flask_jwt import JWT
from flask_mail import Mail
from gfsad.exceptions import FieldError
from flask.ext.celery import Celery
from flask_limiter import Limiter, HEADERS
from gfsad.misc import PostMarkHandler
import logging
# APIManager.APINAME_FORMAT = 'api.{0}'
# APIManager.BLUEPRINTNAME_FORMAT = '{0}'
class JSONLimiter(Limiter):
def inject_headers(self, response):
current_limit = getattr(g, 'view_rate_limit', None)
if self.enabled and self.headers_enabled and current_limit:
window_stats = self.limiter.get_window_stats(*current_limit)
response.headers.add(
self.header_mapping[HEADERS.LIMIT],
str(current_limit[0].amount)
)
response.headers.add(
self.header_mapping[HEADERS.REMAINING],
str(window_stats[1])
)
response.headers.add(
self.header_mapping[HEADERS.RESET],
str(window_stats[0])
)
return response
cache = Cache()
compress = Compress()
limiter = JSONLimiter(headers_enabled=True, global_limits=["1000 per minute"])
api = APIManager()
jwt = JWT()
celery = Celery()
mail = Mail()
from gfsad.models import db, User
def add_cors_headers(response):
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = request.headers.get(
'Access-Control-Request-Headers', '')
response.headers['Access-Control-Allow-Methods'] = request.headers.get(
'Access-Control-Request-Methods', '')
# Do nothing for post, patch, delete etc..
try:
method = [e for e in request.url_rule.methods][-1]
except AttributeError:
# print "add_cors_headers: Attribute Error - " + str(response)
return response
if method in ['PUT', 'PATCH', 'DELETE', 'POST', 'OPTIONS']:
return response
if response.status_code == 404:
return response
# set cache max age
if '/api' in request.url_rule.rule:
response.headers['Cache-Control'] = 'max-age=120'
elif '/gee/time_series' in request.url_rule.rule:
response.headers['Cache-Control'] = 'max-age=4000000'
elif '/tiles' in request.url_rule.rule:
response.headers['Cache-Control'] = 'max-age=4000000'
elif '/gee/maps' in request.url_rule.rule:
response.headers['Cache-Control'] = 'max-age=80000'
else:
response.headers['Cache-Control'] = 'max-age=0'
return response
def create_app(config='Testing'):
app = Flask(__name__)
# Configure the flask app
app.config.from_object("gfsad.config." + config)
# initialize all of the extensions
jwt.init_app(app)
celery.init_app(app)
db.init_app(app)
limiter.init_app(app)
cache.init_app(app)
compress.init_app(app)
mail.init_app(app)
api.init_app(app, flask_sqlalchemy_db=db)
# import and register all of the blueprints
from gfsad.views.public import public
from gfsad.views.auth import auth
from gfsad.views.gee import gee
from gfsad.views.aws import aws
from gfsad.views.upload import upload
from gfsad.views.tiles import tile_blueprint
app.register_blueprint(public)
app.register_blueprint(gee)
app.register_blueprint(aws)
app.register_blueprint(auth)
app.register_blueprint(upload)
app.register_blueprint(tile_blueprint)
from gfsad.views.api import init_api
init_api(app)
# import and init error handlers
from gfsad.views.errors import init_error_handlers
init_error_handlers(app)
# cors headers and cache
app.after_request(add_cors_headers)
from gfsad.utils.log import log
app.after_request(log)
from gfsad.auth import load_user
@limiter.request_filter
def registered():
"""
Removes limit if user is registered and using a token.
:return:
"""
return load_user() != "anonymous"
if 'POSTMARK_API_KEY' in app.config:
email_handler = PostMarkHandler(api_key=app.config['POSTMARK_API_KEY'])
email_handler.setLevel(logging.ERROR)
app.logger.addHandler(email_handler)
import tasks.high_res_imagery
import tasks.classifications
return app
if __name__ == "__main__":
app = create_app()
|
Python
| 0
|
@@ -1664,17 +1664,16 @@
w-Method
-s
'%5D = req
|
75ab7e06d78d0534e700b5a910419cd655b156ba
|
Add --crawl_since flag when not providing repos file
|
git_downloader.py
|
git_downloader.py
|
#!/usr/bin/env python
#
import sys, os, argparse, logging, fnmatch, posixpath, socket
from github import Github
if sys.version_info < (3, 0):
# python 2
import urlparse
from urllib import urlretrieve
else:
# python 3
import urllib.parse as urlparse
from urllib.request import urlretrieve
def main(args, loglevel):
logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)
socket.setdefaulttimeout(args.timeout)
g = Github()
if args.repo_file:
with open(args.repo_file, 'r') as f:
repo_gen = f
download_files(args, g, repo_gen)
else:
repo_gen = (repo.html_url for repo in g.get_repos() if not repo.fork)
download_files(args, g, repo_gen)
def download_files(args, g, repo_gen):
file_counter = 0
for line in repo_gen:
logging.info('Fetching repository: %s' % line)
try:
repo_str = line.rstrip().split('github.com/')[-1]
repo = g.get_repo(repo_str)
tree = repo.get_git_tree('master', recursive=True)
files_to_download = []
for file in tree.tree:
if fnmatch.fnmatch(file.path, args.wildcard):
files_to_download.append('https://github.com/%s/raw/master/%s' % (repo_str, file.path))
for file in files_to_download:
logging.info('Downloading %s' % file)
file_counter += 1
filename = posixpath.basename(urlparse.urlsplit(file).path)
output_path = os.path.join(args.output_dir, filename)
if os.path.exists(output_path):
output_path += "-" + str(file_counter)
try:
urlretrieve(file, output_path)
except Exception as e:
logging.exception('Error downloading %s.' % file)
except Exception as e:
logging.exception('Error fetching repository %s.' % line)
args.yara_meta = os.path.join(args.output_dir, args.yara_meta)
with open(args.yara_meta, 'w') as f:
for i in os.listdir(args.output_dir):
try:
f.write("include \"" + i + "\"\n")
except Exception as e:
logging.exception('Couldn\'t write to %s.' % args.yara_meta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = "Github file downloader")
parser.add_argument("-r",
"--repo_file",
help = "Path for the input file which contains a url of a Github repository for each separate line")
parser.add_argument("-w",
"--wildcard",
help = "Unix shell-style wildcard to match files to download (for example: *.txt)")
parser.add_argument("-o",
"--output_dir",
default = "",
help = "Directory to store all downloaded files")
parser.add_argument("-y",
"--yara-meta",
default = "rules.yara",
help = "Yara meta rule filename to create")
parser.add_argument("-t",
"--timeout",
default = 30,
help = "Socket timeout (seconds)")
parser.add_argument("-v",
"--verbose",
help="increase output verbosity",
action="store_true")
args = parser.parse_args()
# Setup logging
if args.verbose:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
main(args, loglevel)
|
Python
| 0
|
@@ -84,34 +84,23 @@
ket%0A
-from github
import
-G
+g
ithub%0A
+%0A
if s
@@ -452,16 +452,23 @@
g =
+github.
Github()
@@ -626,24 +626,28 @@
+def
repo_gen
= (repo
@@ -642,69 +642,399 @@
_gen
- = (repo.html_url for repo in g.get_repos() if not repo.fork)
+():%0A last_page = args.crawl_since%0A git_repo_gen = g.get_repos(since=args.crawl_since)%0A for repo in git_repo_gen:%0A current_page = git_repo_gen.__nextUrl%0A if current_page != last_page:%0A print(%22Finished processing: %7B%7D%22.format(current_page))%0A last_page = current_page%0A yield repo
%0A
@@ -1070,16 +1070,18 @@
repo_gen
+()
)%0A%0Adef d
@@ -2934,24 +2934,281 @@
ate line%22)%0A%0A
+ parser.add_argument(%22-p%22,%0A %22--crawl_since%22,%0A default = github.GithubObject.NotSet,%0A help = %22When not using a repo_file, this will be used as starting position for github repo crawl%22)%0A%0A
parser.a
|
d08d7b3763fa10b34c6e68c78d85055771c4f8df
|
Handle ReleaseEvent
|
github3/events.py
|
github3/events.py
|
# -*- coding: utf-8 -*-
"""
github3.events
==============
This module contains the class(es) related to Events
"""
from github3.models import GitHubObject
class Event(GitHubObject):
"""The :class:`Event <Event>` object. It structures and handles the data
returned by via the `Events <http://developer.github.com/v3/events>`_
section of the GitHub API.
Two events can be compared like so::
e1 == e2
e1 != e2
And that is equivalent to::
e1.id == e2.id
e1.id != e2.id
"""
def __init__(self, event):
super(Event, self).__init__(event)
from github3.users import User
from github3.orgs import Organization
#: :class:`User <github3.users.User>` object representing the actor.
self.actor = User(event.get('actor')) if event.get('actor') else None
#: datetime object representing when the event was created.
self.created_at = self._strptime(event.get('created_at'))
#: Unique id of the event
self.id = event.get('id')
#: List all possible types of Events
self.org = None
if event.get('org'):
self.org = Organization(event.get('org'))
#: Event type http://developer.github.com/v3/activity/events/types/
self.type = event.get('type')
handler = _payload_handlers.get(self.type, identity)
#: Dictionary with the payload. Payload structure is defined by type_.
# _type: http://developer.github.com/v3/events/types
self.payload = handler(event.get('payload'))
#: Return ``tuple(owner, repository_name)``
self.repo = event.get('repo')
if self.repo is not None:
self.repo = tuple(self.repo['name'].split('/'))
#: Indicates whether the Event is public or not.
self.public = event.get('public')
def __repr__(self):
return '<Event [{0}]>'.format(self.type[:-5])
@staticmethod
def list_types():
"""List available payload types"""
return sorted(_payload_handlers.keys())
def is_public(self):
"""Indicates whether the Event is public or not.
.. warning:: This will be deprecated in 0.6
:returns: bool -- True if event is pubic, False otherwise
"""
return self.public
def _commitcomment(payload):
from github3.repos.comment import RepoComment
if payload.get('comment'):
payload['comment'] = RepoComment(payload['comment'], None)
return payload
def _follow(payload):
from github3.users import User
if payload.get('target'):
payload['target'] = User(payload['target'], None)
return payload
def _forkev(payload):
from github3.repos import Repository
if payload.get('forkee'):
payload['forkee'] = Repository(payload['forkee'], None)
return payload
def _gist(payload):
from github3.gists import Gist
if payload.get('gist'):
payload['gist'] = Gist(payload['gist'], None)
return payload
def _issuecomm(payload):
from github3.issues import Issue
from github3.issues.comment import IssueComment
if payload.get('issue'):
payload['issue'] = Issue(payload['issue'], None)
if payload.get('comment'):
payload['comment'] = IssueComment(payload['comment'], None)
return payload
def _issueevent(payload):
from github3.issues import Issue
if payload.get('issue'):
payload['issue'] = Issue(payload['issue'], None)
return payload
def _member(payload):
from github3.users import User
if payload.get('member'):
payload['member'] = User(payload['member'], None)
return payload
def _pullreqev(payload):
from github3.pulls import PullRequest
if payload.get('pull_request'):
payload['pull_request'] = PullRequest(payload['pull_request'], None)
return payload
def _pullreqcomm(payload):
from github3.pulls import ReviewComment
if payload.get('comment'):
payload['comment'] = ReviewComment(payload['comment'], None)
return payload
def _team(payload):
from github3.orgs import Team
from github3.repos import Repository
from github3.users import User
if payload.get('team'):
payload['team'] = Team(payload['team'], None)
if payload.get('repo'):
payload['repo'] = Repository(payload['repo'], None)
if payload.get('user'):
payload['user'] = User(payload['user'], None)
return payload
def identity(x):
return x
_payload_handlers = {
'CommitCommentEvent': _commitcomment,
'CreateEvent': identity,
'DeleteEvent': identity,
'FollowEvent': _follow,
'ForkEvent': _forkev,
'ForkApplyEvent': identity,
'GistEvent': _gist,
'GollumEvent': identity,
'IssueCommentEvent': _issuecomm,
'IssuesEvent': _issueevent,
'MemberEvent': _member,
'PublicEvent': lambda x: '',
'PullRequestEvent': _pullreqev,
'PullRequestReviewCommentEvent': _pullreqcomm,
'PushEvent': identity,
'StatusEvent': identity,
'TeamAddEvent': _team,
'WatchEvent': identity,
}
|
Python
| 0.000001
|
@@ -4050,24 +4050,213 @@
n payload%0A%0A%0A
+def _release(payload):%0A from github3.repos.release import Release%0A release = payload.get('release')%0A if release:%0A payload%5B'release'%5D = Release(release)%0A return payload%0A%0A%0A
def _team(pa
@@ -5179,24 +5179,54 @@
: identity,%0A
+ 'ReleaseEvent': _release,%0A
'StatusE
|
0352f542341fe25be74c0130e7e50394c6f0bb6d
|
add interactive message colorization
|
gitmagic/fixup.py
|
gitmagic/fixup.py
|
import gitmagic
import git.cmd
import tempfile
def fixup(repo, destination_picker, change_finder, args={}):
repo.index.reset()
for change in change_finder(repo):
_apply_change(repo, change)
destination_commits = destination_picker.pick(change)
if not destination_commits:
repo.index.commit( message = "WARNING: no destination commit")
continue
destination = destination_commits[0]
gitmagic.checkpoint("Should I create fixup commit for {} -> {}:{}\n{}".format(
change.a_file_name,
destination.hexsha[:7],
destination.summary,
change.diff), args)
repo.index.commit( message = "fixup! {}".format(destination.message))
def _apply_change(repo, change):
file_name = ""
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
f.write(change.diff)
file_name = f.name
git_ = git.cmd.Git(repo.working_dir)
git_.execute(['git', 'apply', '--cache', file_name])
|
Python
| 0.000001
|
@@ -472,191 +472,45 @@
int(
-%22Should I create fixup commit for %7B%7D -%3E %7B%7D:%7B%7D%5Cn%7B%7D%22.format(%0A change.a_file_name,%0A destination.hexsha%5B:7%5D,%0A destination.summary,%0A change.diff
+ _colorize_change(change, destination
), a
@@ -868,8 +868,988 @@
name%5D)%0A%0A
+NO_COLOR = %22%5C033%5B0m%22%0AYELLOW = %22%5C033%5B1;33m%22%0AWHITE = %22%5C033%5B1;37m%22%0AGREEN = %22%5C033%5B1;32m%22%0ARED = %22%5C033%5B1;31m%22%0ABLUE = %22%5C033%5B1;36m%22%0A%0Adef _colorize(message, color):%0A return %22%7B%7D%7B%7D%7B%7D%22.format(color, message, NO_COLOR)%0A%0Adef _colorize_change(change, commit):%0A message = _colorize(%22Should I create fixup commit for %7B%7D -%3E %7B%7D:%7B%7D%5Cn%22.format(%0A change.a_file_name,%0A commit.hexsha%5B:7%5D,%0A commit.summary), YELLOW)%0A message += _diff_colorizer(change.diff)%0A return message%0A%0Adef _is_diff_header_line(line):%0A return line%5B:1%5D == %22@%22 or line%5B:3%5D == %22+++%22 or line%5B:3%5D == %22---%22%0A%0Adef _diff_colorizer(diff):%0A colorized = %22%22%0A for line in diff.splitlines(keepends=True):%0A color = WHITE%0A first_char = line%5B:1%5D%0A if first_char == %22-%22:%0A color = RED%0A if first_char == %22+%22:%0A color = GREEN%0A if _is_diff_header_line(line):%0A color = BLUE%0A colorized += _colorize(line, color)%0A return colorized%0A
|
835b1ff03d517c4a621237d3cd1682df1322e0e8
|
add missing build dependency to py-execnet (#6443)
|
var/spack/repos/builtin/packages/py-execnet/package.py
|
var/spack/repos/builtin/packages/py-execnet/package.py
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyExecnet(PythonPackage):
"""execnet provides a share-nothing model with channel-send/receive
communication for distributing execution across many Python interpreters
across version, platform and network barriers."""
homepage = "http://codespeak.net/execnet"
url = "https://pypi.io/packages/source/e/execnet/execnet-1.4.1.tar.gz"
version('1.4.1', '0ff84b6c79d0dafb7e2971629c4d127a')
depends_on('py-setuptools', type='build')
depends_on('py-apipkg@1.4:', type=('build', 'run'))
|
Python
| 0
|
@@ -1718,16 +1718,67 @@
build')%0A
+ depends_on('py-setuptools-scm', type='build')%0A
depe
|
fe0d9a4de405d4324f33edf33b84ff22016c1ac1
|
Remove model details from raw machine output.
|
lib/python2.6/aquilon/server/formats/machine.py
|
lib/python2.6/aquilon/server/formats/machine.py
|
# ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2008,2009,2010 Contributor
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the EU DataGrid Software License. You should
# have received a copy of the license with this program, and the
# license is published at
# http://eu-datagrid.web.cern.ch/eu-datagrid/license.html.
#
# THE FOLLOWING DISCLAIMER APPLIES TO ALL SOFTWARE CODE AND OTHER
# MATERIALS CONTRIBUTED IN CONNECTION WITH THIS PROGRAM.
#
# THIS SOFTWARE IS LICENSED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE AND ANY WARRANTY OF NON-INFRINGEMENT, ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THIS
# SOFTWARE MAY BE REDISTRIBUTED TO OTHERS ONLY BY EFFECTIVELY USING
# THIS OR ANOTHER EQUIVALENT DISCLAIMER AS WELL AS ANY OTHER LICENSE
# TERMS THAT MAY APPLY.
"""Machine formatter."""
from aquilon import const
from aquilon.server.formats.formatters import ObjectFormatter
from aquilon.server.formats.list import ListFormatter
from aquilon.aqdb.model import Machine
class MachineInterfacePair(tuple):
"""Encapsulates a (machine, selected interface) pair"""
pass
class MachineInterfacePairFormatter(ObjectFormatter):
def csv_fields(self, item):
machine = item[0]
interface = item[1]
details = [machine.name, machine.location.rack.name,
machine.location.building.name, machine.model.vendor.name,
machine.model.name, machine.serial_no]
if interface:
details.extend([interface.name, interface.mac])
if interface.system:
details.append(interface.system.ip)
else:
details.append(None)
else:
details.extend([None, None, None])
return details
ObjectFormatter.handlers[MachineInterfacePair] = MachineInterfacePairFormatter()
class MachineFormatter(ObjectFormatter):
def format_raw(self, machine, indent=""):
details = [indent + "%s: %s" %
(machine.model.machine_type.capitalize(), machine.name)]
if machine.host:
details.append(indent + " Allocated to host: %s [%s]"
% (machine.host.fqdn, machine.host.ip))
if machine.cluster:
details.append(indent + \
" Hosted by {0:c}: {0.name}".format(machine.cluster))
for manager in machine.manager:
details.append(indent + " Manager: %s [%s]" % (manager.fqdn,
manager.ip))
for dbauxiliary in machine.auxiliaries:
details.append(indent + " Auxiliary: %s [%s]" % (
dbauxiliary.fqdn, dbauxiliary.ip))
# This is a bit of a hack. Delegating out to the standard location
# formatter now spews too much information about chassis. Maybe
# that will change when chassis has a corresponding hardware type.
for location_type in const.location_types:
if getattr(machine.location, location_type, None) is not None:
loc = getattr(machine.location, location_type)
details.append(indent + " {0:c}: {0.name}".format(loc))
if location_type == 'rack':
details.append(indent + " Row: %s" %
machine.location.rack.rack_row)
details.append(indent + " Column: %s" %
machine.location.rack.rack_column)
for slot in machine.chassis_slot:
details.append(indent + " {0:c}: {0.fqdn}".format(slot.chassis))
details.append(indent + " Slot: %d" % slot.slot_number)
details.append(self.redirect_raw(machine.model, indent + " "))
details.append(indent + " Cpu: %s x %d" %
(machine.cpu, machine.cpu_quantity))
details.append(indent + " Memory: %d MB" % machine.memory)
if machine.serial_no:
details.append(indent + " Serial: %s" % machine.serial_no)
for d in machine.disks:
extra = d.disk_type
if d.disk_type == "nas" and d.service_instance:
extra = extra + " from " + d.service_instance.name
details.append(indent + " Disk: %s %d GB %s (%s)"
% (d.device_name, d.capacity, d.controller_type,
extra))
for i in machine.interfaces:
details.append(self.redirect_raw(i, indent + " "))
if machine.comments:
details.append(indent + " Comments: %s" % machine.comments)
return "\n".join(details)
def get_header(self):
"""This is just an idea... not used anywhere (yet?)."""
return "machine,rack,building,vendor,model,serial,interface,mac,ip"
def csv_tolist(self, machine):
if machine.interfaces:
return [MachineInterfacePair((machine, i))
for i in machine.interfaces]
else:
return [MachineInterfacePair((machine, None))]
ObjectFormatter.handlers[Machine] = MachineFormatter()
class SimpleMachineList(list):
pass
class SimpleMachineListFormatter(ListFormatter):
def format_raw(self, smlist, indent=""):
return str("\n".join([indent + machine.name for machine in smlist]))
ObjectFormatter.handlers[SimpleMachineList] = SimpleMachineListFormatter()
class MachineMacList(list):
""" Holds MAC, machine-name [, hostname] """
pass
class MachineMacListFormatter(ListFormatter):
def csv_fields(self, result):
return result
ObjectFormatter.handlers[MachineMacList] = MachineMacListFormatter()
|
Python
| 0
|
@@ -4451,54 +4451,108 @@
end(
-self.redirect_raw(machine.model, indent + %22 %22
+indent + %22 %7B0:c%7D: %7B0.name%7D %7B1:c%7D: %7B1.name%7D%22.format(%0A machine.model.vendor, machine.model
))%0A
|
8c6793f62658c116a991800502b2dd3ee1463206
|
add pagination generic note
|
snug/pagination.py
|
snug/pagination.py
|
"""Tools for pagination.
.. versionadded:: 1.2
"""
import abc
import typing as t
from operator import attrgetter
from .compat import HAS_PEP492, PY3
from .query import Query, async_executor, executor
__all__ = [
'paginated',
'Page',
'Pagelike',
]
AsyncIterator = t.AsyncIterator if HAS_PEP492 else t.Iterable
T = t.TypeVar('T')
if HAS_PEP492: # pragma: no cover
from ._async import AsyncPaginator
class Pagelike(t.Generic[T]):
"""Abstract base class for page-like objects.
Any object implementing the attributes
:py:attr:`~Pagelike.content` and :py:attr:`~Pagelike.next`
implements this interface.
A query returning such an object may be :class:`paginated`.
"""
__slots__ = ()
@abc.abstractproperty
def content(self):
"""The contents of the page.
Returns
-------
T
The page content.
"""
raise NotImplementedError()
@abc.abstractproperty
def next(self):
"""The query to retrieve the next page,
or ``None`` if there is no next page.
Returns
-------
~snug.Query[Pagelike[T]]] or None
The next query.
"""
raise NotImplementedError()
class Page(Pagelike[T]):
"""A simple :class:`Pagelike` object
Parameters
----------
content: T
The page content.
next: ~snug.Query[Pagelike[T]]] or None
The query to retrieve the next page.
"""
__slots__ = '_content', '_next'
def __init__(self, content, next=None):
self._content, self._next = content, next
content = property(attrgetter('_content'))
next = property(attrgetter('_next'))
def __repr__(self):
return 'Page({})'.format(self._content)
class paginated(Query[t.Union[t.Iterator[T], AsyncIterator[T]]]):
"""A paginated version of a query.
Executing it returns an :term:`iterator`
or :term:`async iterator <asynchronous iterator>`.
If the wrapped query is reusable,
the paginated query is also reusable.
Parameters
----------
query: Query[Pagelike[T]]
The query to paginate.
This query must return a :class:`Pagelike` object.
Note
----
Async iterators were introduced in
`PEP 492 <https://www.python.org/dev/peps/pep-0492>`_.
Therefore, async execution of :class:`paginated`
queries is only supported on python 3.5.2+.
Example
-------
.. code-block:: python
def foo_page(...) -> Query[Pagelike[Foo]] # example query
...
return Page(...)
query = paginated(foo_page(...))
for foo in execute(query):
...
async for foo in execute_async(query): # python 3.5.2+ only
...
"""
__slots__ = '_query'
def __init__(self, query):
self._query = query
def __execute__(self, client, auth):
"""Execute the paginated query.
Returns
-------
~typing.Iterator[T]
An iterator yielding page content.
"""
return Paginator(self._query, executor(client=client, auth=auth))
if HAS_PEP492:
def __execute_async__(self, client, auth):
"""Execute the paginated query asynchronously.
Note
----
This method does not need to be awaited.
Returns
-------
~typing.AsyncIterator[T]
An asynchronous iterator yielding page content.
"""
return AsyncPaginator(self._query,
async_executor(client=client, auth=auth))
else: # pragma: no cover
def __execute_async__(self, client, auth):
raise NotImplementedError(
'async execution of paginated queries is python 3.5.2+ only')
def __repr__(self):
return 'paginated({})'.format(self._query)
class Paginator(t.Iterator[T]):
"""An iterator which keeps executing the next query in the page sequece,
returning the page content."""
__slots__ = '_executor', '_next_query'
def __init__(self, next_query, executor):
self._next_query, self._executor = next_query, executor
def __iter__(self):
return self
def __next__(self):
if self._next_query is None:
raise StopIteration()
page = self._executor(self._next_query)
self._next_query = page.next
return page.content
if not PY3: # pragma: no cover
next = __next__
|
Python
| 0
|
@@ -694,16 +694,293 @@
inated%60.
+%0A%0A Note%0A ----%0A Pagelike is a :class:%60~typing.Generic%60.%0A This means you may write %60%60Pagelike%5B%3Ctype-of-content%3E%5D%60%60%0A as a descriptive type annotation.%0A%0A For example: %60%60Pagelike%5BList%5Bstr%5D%5D%60%60 indicates a page-like%0A object whose %60%60content%60%60 is a list of strings.
%0A %22%22%22
|
98e2e0bdefd3fb7941d589c01b7a7fa92f8375e6
|
add fuzzing test for ZstdDecompressor.write_to()
|
tests/test_decompressor_fuzzing.py
|
tests/test_decompressor_fuzzing.py
|
import os
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import hypothesis
import hypothesis.strategies as strategies
except ImportError:
raise unittest.SkipTest('hypothesis not available')
import zstd
from . common import (
random_input_data,
)
@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')
class TestDecompressor_multi_decompress_to_buffer_fuzzing(unittest.TestCase):
@hypothesis.given(original=strategies.lists(strategies.sampled_from(random_input_data()),
min_size=1, max_size=1024),
threads=strategies.integers(min_value=1, max_value=8),
use_dict=strategies.booleans())
def test_data_equivalence(self, original, threads, use_dict):
kwargs = {}
if use_dict:
kwargs['dict_data'] = zstd.ZstdCompressionDict(original[0])
cctx = zstd.ZstdCompressor(level=1, threads=-1,
write_content_size=True,
write_checksum=True,
**kwargs)
frames_buffer = cctx.multi_compress_to_buffer(original)
dctx = zstd.ZstdDecompressor(**kwargs)
result = dctx.multi_decompress_to_buffer(frames_buffer)
self.assertEqual(len(result), len(original))
for i, frame in enumerate(result):
self.assertEqual(frame.tobytes(), original[i])
frames_list = [f.tobytes() for f in frames_buffer]
result = dctx.multi_decompress_to_buffer(frames_list)
self.assertEqual(len(result), len(original))
for i, frame in enumerate(result):
self.assertEqual(frame.tobytes(), original[i])
|
Python
| 0
|
@@ -1,12 +1,22 @@
+import io%0A
import os%0A%0At
@@ -13,16 +13,16 @@
port os%0A
-
%0Atry:%0A
@@ -280,16 +280,31 @@
mport (%0A
+ make_cffi,%0A
rand
@@ -322,16 +322,1157 @@
ta,%0A)%0A%0A%0A
+@unittest.skipUnless('ZSTD_SLOW_TESTS' in os.environ, 'ZSTD_SLOW_TESTS not set')%0A@make_cffi%0Aclass TestDecompressor_write_to_fuzzing(unittest.TestCase):%0A @hypothesis.given(original=strategies.sampled_from(random_input_data()),%0A level=strategies.integers(min_value=1, max_value=5),%0A write_size=strategies.integers(min_value=1, max_value=8192),%0A input_sizes=strategies.streaming(%0A strategies.integers(min_value=1, max_value=4096)))%0A def test_write_size_variance(self, original, level, write_size, input_sizes):%0A input_sizes = iter(input_sizes)%0A%0A cctx = zstd.ZstdCompressor(level=level)%0A frame = cctx.compress(original)%0A%0A dctx = zstd.ZstdDecompressor()%0A source = io.BytesIO(frame)%0A dest = io.BytesIO()%0A%0A with dctx.write_to(dest, write_size=write_size) as decompressor:%0A while True:%0A chunk = source.read(next(input_sizes))%0A if not chunk:%0A break%0A%0A decompressor.write(chunk)%0A%0A self.assertEqual(dest.getvalue(), original)%0A%0A%0A
@unittes
|
baed814d73ea645794d172614bb79f456730b42c
|
Fix auth providers to work around Python's broken import system.
|
apps/auth/providers.py
|
apps/auth/providers.py
|
# Universal Subtitles, universalsubtitles.org
#
# Copyright (C) 2012 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
authentication_provider_registry = {}
def add_authentication_provider(ap_class):
if ap_class.code in authentication_provider_registry:
if authentication_provider_registry[ap_class.code] != ap_class:
assert False, "Authentication provider code collision!"
authentication_provider_registry[ap_class.code] = ap_class
def get_authentication_provider(key):
return authentication_provider_registry.get(key)
def get_authentication_provider_choices():
choices = []
for provider in authentication_provider_registry.values():
choices.append((provider.code, provider.verbose_name))
return choices
class AuthenticationProvider(object):
"""The base class that other authentication providers should implement.
In a nutshell, an AuthenticationProvider is a simple class that has:
* A code attribute. This should be a unique string less than
24 characters long that will be stored as an attribute of Teams.
* A verbose_name attribute, for admin labels.
* A url() method, which takes a TeamMember object and a "next" URL, and
returns the URL we should send the user to where they can log in with the
provider.
* An image_url() method, which returns the URL for an image we should
display to the user when they're deciding whether or not to continue and
log in.
"""
code = None
verbose_name = None
def url(self, member, next=None):
"""Return the URL someone should be sent to where they will log in."""
assert False, "Not Implemented"
def image_url(self):
"""Return the URL of an image to display (probably a logo) or None."""
assert False, "Not Implemented"
class SampleAuthProvider(AuthenticationProvider):
code = 'sample'
verbose_name = 'Sample Provider'
def url(self, member, next=None):
return 'http://example.com/'
def image_url(self):
return 'http://placekitten.com/200/200/'
# add_authentication_provider(SampleAuthProvider)
|
Python
| 0
|
@@ -770,41 +770,150 @@
l.%0A%0A
-%0Aauthentication_provider_registry
+from django.conf import settings%0A%0A%0Aif not hasattr(settings, 'AUTHENTICATION_PROVIDER_REGISTRY'):%0A settings.AUTHENTICATION_PROVIDER_REGISTRY
= %7B
@@ -950,21 +950,24 @@
ider(ap_
-class
+instance
):%0A i
@@ -971,21 +971,24 @@
if ap_
-class
+instance
.code in
@@ -992,94 +992,115 @@
in
-authentication_provider_registry:%0A if authentication_provider_registry%5Bap_class
+settings.AUTHENTICATION_PROVIDER_REGISTRY:%0A if settings.AUTHENTICATION_PROVIDER_REGISTRY%5Bap_instance
.cod
@@ -1108,21 +1108,24 @@
%5D != ap_
-class
+instance
:%0A
@@ -1195,49 +1195,61 @@
-authentication_provider_registry%5Bap_class
+settings.AUTHENTICATION_PROVIDER_REGISTRY%5Bap_instance
.cod
@@ -1256,21 +1256,24 @@
e%5D = ap_
-class
+instance
%0A%0Adef ge
@@ -1319,40 +1319,49 @@
urn
-authentication_provider_registry
+settings.AUTHENTICATION_PROVIDER_REGISTRY
.get
@@ -1451,40 +1451,49 @@
in
-authentication_provider_registry
+settings.AUTHENTICATION_PROVIDER_REGISTRY
.val
@@ -2001,32 +2001,8 @@
s a
-TeamMember object and a
%22nex
@@ -2012,22 +2012,16 @@
URL, and
-%0A
returns
@@ -2038,16 +2038,22 @@
e should
+%0A
send th
@@ -2092,22 +2092,16 @@
with the
-%0A
provide
@@ -2746,32 +2746,24 @@
ef url(self,
- member,
next=None):
|
8f429a41f3541c5f32a9809a529dd800f7dafa0a
|
Fix log output for Docker daemonised
|
temp2dash.py
|
temp2dash.py
|
import json
import os
import requests
import sys
import time
import traceback
from temperusb import TemperHandler
URL = os.environ['DASHING_URL']
SCALE = float(os.environ['TEMP_SCALE'])
OFFSET = float(os.environ['TEMP_OFFSET'])
SENSOR = int(os.environ['TEMP_SENSOR'])
SLEEP = int(os.environ['SLEEP_TIME'])
th = TemperHandler()
devs = th.get_devices()
if len(devs) != 1:
print "Expected exactly one TEMPer device, found %d" % len(devs)
sys.exit(1)
dev = devs[0]
dev.set_calibration_data(scale=SCALE, offset=OFFSET)
while True:
try:
temperature = dev.get_temperature(sensor=SENSOR)
except Exception, err:
print "\nException on getting temperature\n"
print traceback.format_exc()
payload = {
'auth_token': 'abcdefghijklmnopqrstuvwxyz',
'temperature': '%0.0f%s' % (
temperature,
u'\N{DEGREE SIGN}',
),
}
sys.stdout.write(u'%0.1f%s, ' % (
temperature,
u'\N{DEGREE SIGN}',
))
sys.stdout.flush()
try:
post = requests.post(URL, data=json.dumps(payload))
except Exception, err:
print "\nException on posting temperature to dashing\n"
print traceback.format_exc()
if post.status_code != 204:
print "\nHTTP status from POST was %s (expected 204)\n" % post.status_code
time.sleep(SLEEP)
|
Python
| 0.000002
|
@@ -519,16 +519,26 @@
FFSET)%0A%0A
+chars = 0%0A
while Tr
@@ -907,48 +907,104 @@
s
-ys.stdout.write(u
+tring = %22%22%0A if chars %3E 72:%0A chars = 0%0A string = %22%5Cn%22 %0A string +=
'%250.1f
-%25s
, ' %25
- (%0A
tem
@@ -1015,43 +1015,61 @@
ture
-,%0A
+%0A
- u'%5CN%7BDEGREE SIGN%7D',%0A )
+chars += len(string)%0A sys.stdout.write(string
)%0A
|
bbd4f595fe83e9ce4881990be4c16ba53cefa5d8
|
fix small bug in rewind with scavenger
|
solver/comeback.py
|
solver/comeback.py
|
import utils.log
class ComeBack(object):
# object to handle the decision to choose the next area when all locations have the "no comeback" flag.
# handle rewinding to try the next area in case of a stuck.
# one ComebackStep object is created each time we have to use the no comeback heuristic, used for rewinding.
def __init__(self, solver):
self.comeBackSteps = []
# used to rewind
self.solver = solver
self.log = utils.log.get('Rewind')
def handleNoComeBack(self, locations, cur):
# return True if a rewind is needed. choose the next area to use
solveAreas = {}
locsCount = 0
for loc in locations:
if self.solver.majorsSplit != 'Full':
if loc.isClass(self.solver.majorsSplit) or loc.isBoss():
if loc.comeBack is None:
return False
elif loc.comeBack == True:
return False
else:
if loc.comeBack is None:
return False
if loc.comeBack == True:
return False
locsCount += 1
if loc.SolveArea in solveAreas:
solveAreas[loc.SolveArea] += 1
else:
solveAreas[loc.SolveArea] = 1
# only minors locations, or just one major, no need for a rewind step
if locsCount < 2:
return False
# only one solve area, no need for come back
if len(solveAreas) == 1:
self.log.debug("handleNoComeBack: only one solve area")
return False
self.log.debug("WARNING: use no come back heuristic for {} locs in {} solve areas ({})".format(locsCount, len(solveAreas), solveAreas))
# check if we can use an existing step
if len(self.comeBackSteps) > 0:
lastStep = self.comeBackSteps[-1]
if lastStep.cur == cur:
self.log.debug("Use last step at {}".format(cur))
return lastStep.next(locations)
elif self.reuseLastStep(lastStep, solveAreas):
self.log.debug("Reuse last step at {}".format(lastStep.cur))
if self.visitedAllLocsInArea(lastStep, locations):
return lastStep.next(locations)
else:
self.log.debug("There's still locations in the current solve area, visit them first")
return False
else:
self.log.debug("cur: {}, lastStep.cur: {}, don't use lastStep.next()".format(cur, lastStep.cur))
# create a step
self.log.debug("Create new step at {}".format(cur))
lastStep = ComeBackStep(solveAreas, cur)
self.comeBackSteps.append(lastStep)
return lastStep.next(locations, self.solver.getPriorityArea())
def reuseLastStep(self, lastStep, solveAreas):
# reuse the last step if all solve areas are included in last step to avoid creating too many.
# to avoid issues when a solve area from the previous step can't be reached from the current solve area,
# check that we have the same number of solve areas in both steps before reusing.
if len(solveAreas) != len(lastStep.solveAreas):
return False
for area in solveAreas:
# new solve area, don't reuse
if area not in lastStep.solveAreas:
return False
# more locations available in new step, don't reuse old one
if solveAreas[area] > lastStep.solveAreas[area]:
return False
return True
def visitedAllLocsInArea(self, lastStep, locations):
for loc in locations:
if loc.difficulty == True and loc.SolveArea == lastStep.curSolveArea:
return False
return True
def cleanNoComeBack(self, locations):
for loc in locations:
loc.areaWeight = None
def rewind(self, cur):
# come back to the previous step
# if no more rewinds available: tell we're stuck by returning False
if len(self.comeBackSteps) == 0:
self.log.debug("No more steps to rewind")
return False
self.log.debug("Start rewind, current: {}".format(cur))
while len(self.comeBackSteps) > 0:
lastStep = self.comeBackSteps[-1]
if not lastStep.moreAvailable():
self.log.debug("last step has been fully visited, go up one more time")
self.comeBackSteps.pop()
if len(self.comeBackSteps) == 0:
self.log.debug("No more steps to rewind")
return False
self.log.debug("Rewind to previous step at {}".format(self.comeBackSteps[-1].cur))
else:
break
count = cur - lastStep.cur
if count == 0:
self.log.debug("Can't rewind, it's buggy here !")
return False
self.solver.cancelLastItems(count)
# we've rewind, we may no longer be able to kill mother brain
self.solver.motherBrainCouldBeKilled = False
self.log.debug("Rewind {} items to {}".format(count, lastStep.cur))
return True
class ComeBackStep(object):
# one case of no come back decision
def __init__(self, solveAreas, cur):
self.visitedSolveAreas = []
self.solveAreas = solveAreas
self.cur = cur
self.curSolveArea = None
self.log = utils.log.get('RewindStep')
self.log.debug("create rewind step: {} {}".format(cur, solveAreas))
def moreAvailable(self):
self.log.debug("moreAvailable: cur: {} len(visited): {} len(areas): {}".format(self.cur, len(self.visitedSolveAreas), len(self.solveAreas)))
return len(self.visitedSolveAreas) < len(self.solveAreas)
def next(self, locations, priorityArea=None):
# use next available area, if all areas have been visited return True (stuck), else False
if not self.moreAvailable():
self.log.debug("rewind: all areas have been visited, stuck")
return True
self.log.debug("rewind next, solveAreas: {} visitedSolveAreas: {}".format(self.solveAreas, self.visitedSolveAreas))
maxAreaName = ""
if priorityArea is not None and priorityArea in self.solveAreas:
self.visitedSolveAreas.append(priorityArea)
self.curSolveArea = priorityArea
else:
# get area with max available locs
maxAreaWeigth = 0
for solveArea in sorted(self.solveAreas):
if solveArea in self.visitedSolveAreas:
continue
else:
if self.solveAreas[solveArea] > maxAreaWeigth:
maxAreaWeigth = self.solveAreas[solveArea]
maxAreaName = solveArea
self.visitedSolveAreas.append(maxAreaName)
self.curSolveArea = maxAreaName
self.log.debug("rewind next area: {}".format(self.curSolveArea))
outWeight = 10000
retSolveAreas = {}
for solveArea in self.solveAreas:
if solveArea == maxAreaName:
retSolveAreas[solveArea] = 1
else:
retSolveAreas[solveArea] = outWeight
# update locs
for loc in locations:
solveArea = loc.SolveArea
if solveArea in retSolveAreas:
loc.areaWeight = retSolveAreas[loc.SolveArea]
self.log.debug("rewind loc {} new areaWeight: {}".format(loc.Name, loc.areaWeight))
else:
# can happen if going to the first area unlocks new areas,
# or for minors locations when we no longer need minors.
loc.areaWeight = outWeight
self.log.debug("rewind loc {} from area {} not in original areas".format(loc.Name, solveArea))
return False
|
Python
| 0.000001
|
@@ -7181,27 +7181,33 @@
Area ==
-maxAreaName
+self.curSolveArea
:%0A
|
fd380c79b9644e6a51086e590812aef6e9377a22
|
Add test case to reproduce dnsmasq.set_config failure in #34263
|
tests/unit/modules/dnsmasq_test.py
|
tests/unit/modules/dnsmasq_test.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rupesh Tare <rupesht@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
mock_open,
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
from salt.modules import dnsmasq
# Import python libs
import os
# Globals
dnsmasq.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class DnsmasqTestCase(TestCase):
'''
TestCase for the salt.modules.at module
'''
def test_version(self):
'''
test to show installed version of dnsmasq.
'''
mock = MagicMock(return_value='A B C')
with patch.dict(dnsmasq.__salt__, {'cmd.run': mock}):
self.assertEqual(dnsmasq.version(), "C")
def test_fullversion(self):
'''
Test to Show installed version of dnsmasq and compile options.
'''
mock = MagicMock(return_value='A B C\nD E F G H I')
with patch.dict(dnsmasq.__salt__, {'cmd.run': mock}):
self.assertDictEqual(dnsmasq.fullversion(),
{'version': 'C',
'compile options': ['G', 'H', 'I']})
def test_set_config(self):
'''
test to show installed version of dnsmasq.
'''
mock = MagicMock(return_value={'conf-dir': 'A'})
with patch.object(dnsmasq, 'get_config', mock):
mock = MagicMock(return_value=['.', '~', 'bak', '#'])
with patch.object(os, 'listdir', mock):
self.assertDictEqual(dnsmasq.set_config(), {})
def test_get_config(self):
'''
test to dumps all options from the config file.
'''
mock = MagicMock(return_value={'conf-dir': 'A'})
with patch.object(dnsmasq, 'get_config', mock):
mock = MagicMock(return_value=['.', '~', 'bak', '#'])
with patch.object(os, 'listdir', mock):
self.assertDictEqual(dnsmasq.get_config(), {'conf-dir': 'A'})
def test_parse_dnamasq(self):
'''
test for generic function for parsing dnsmasq files including includes.
'''
text_file_data = '\n'.join(["line here", "second line", "A=B", "#"])
with patch('salt.utils.fopen',
mock_open(read_data=text_file_data),
create=True) as m:
m.return_value.__iter__.return_value = text_file_data.splitlines()
self.assertDictEqual(dnsmasq._parse_dnamasq('filename'),
{'A': 'B',
'unparsed': ['line here',
'second line']})
if __name__ == '__main__':
from integration import run_tests
run_tests(DnsmasqTestCase, needs_daemon=False)
|
Python
| 0
|
@@ -1658,24 +1658,932 @@
fig(), %7B%7D)%0A%0A
+ @patch('salt.modules.dnsmasq.get_config', MagicMock(return_value=%7B'conf-dir': 'A'%7D))%0A def test_set_config_filter_pub_kwargs(self):%0A '''%0A Test that the kwargs returned from running the set_config function%0A do not contain the __pub that may have been passed through in **kwargs.%0A '''%0A mock_domain = 'local'%0A mock_address = '/some-test-address.local/8.8.4.4'%0A with patch.dict(dnsmasq.__salt__, %7B'file.append': MagicMock()%7D):%0A ret = dnsmasq.set_config(follow=False,%0A domain=mock_domain,%0A address=mock_address,%0A __pub_pid=8184,%0A __pub_jid=20161101194639387946,%0A __pub_tgt='salt-call')%0A self.assertEqual(ret, %7B'domain': mock_domain, 'address': mock_address%7D)%0A%0A
def test
|
2186fa0cb389bc8e458eb0f276bc56aefbe34a29
|
add another multi-root case
|
tests/utils/test_floyd_warshall.py
|
tests/utils/test_floyd_warshall.py
|
import unittest
from nalaf.structures.data import Dataset, Document, Part, Token, Label, Entity
from nalaf.preprocessing.spliters import NLTKSplitter
from nalaf import print_verbose, print_debug
from nalaf.preprocessing.tokenizers import Tokenizer, NLTK_TOKENIZER, GenericTokenizer
from nalaf.features import get_spacy_nlp_english
from nalaf.preprocessing.parsers import Parser, SpacyParser
from nalaf.utils.floyd_warshall import compute_shortest_paths, path
import sys
STUB_ENTITY_CLASS_ID_1 = 'e_x_1'
STUB_ENTITY_CLASS_ID_2 = 'e_x_2'
STUB_RELATION_CLASS_ID_2 = 'r_x'
TEST_SENTENCES_SINGLE_ROOT = [
"Arabidopsis cotyledon - specific chloroplast biogenesis factor CYO1 is a protein disulfide isomerase .",
"FKBP12-rapamycin target TOR2 is a vacuolar protein with an associated phosphatidylinositol-4 kinase activity .",
"TMEM59 was found to be a ubiquitously expressed , Golgi - localized protein .",
"This indicates that Mdv1p possesses a Dnm1p - independent mitochondrial targeting signal .",
"Dnm1p - independent targeting of Mdv1p to mitochondria requires MDV2 .",
"The activated ROP11 recruits MIDD1 to induce local disassembly of cortical microtubules .",
"Conversely , cortical microtubules eliminate active ROP11 from the plasma membrane through MIDD1 .",
"GOLPH3L antagonizes GOLPH3 to determine Golgi morphology .",
"HERC2 coordinates ubiquitin - dependent assembly of DNA repair factors on damaged chromosomes .",
"Pivotal role of AtSUVH2 in heterochromatic histone methylation and gene silencing in Arabidopsis .",
"PHAX and CRM1 are required sequentially to transport U3 snoRNA to nucleoli .",
"CpSufE activates the cysteine desulfurase CpNifS for chloroplastic Fe - S cluster formation .",
"YMR313c/TGL3 encodes a novel triacylglycerol lipase located in lipid particles of Saccharomyces cerevisiae .",
"However , overexpression of ATG21 leads to CPY secretion .",
"PP2A colocalizes with shugoshin at centromeres and is required for centromeric protection .",
]
TEST_SENTENCES_MULTI_ROOT = [
# SS
"Import assays with pea ( Pisum sativum ) chloroplasts showed that PyrR and PyrD are taken up and proteolytically processed ."
]
class TestFloydWarshall(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dataset = Dataset()
doc1 = Document()
cls.dataset.documents['TEST_SENTENCES_SINGLE_ROOT'] = doc1
for s in TEST_SENTENCES_SINGLE_ROOT:
part = Part(s)
doc1.parts[s] = part
doc2 = Document()
cls.dataset.documents['TEST_SENTENCES_MULTI_ROOT'] = doc2
for s in TEST_SENTENCES_MULTI_ROOT:
part = Part(s)
doc2.parts[s] = part
cls.nlp = get_spacy_nlp_english(load_parser=True)
cls.parser = SpacyParser(cls.nlp)
cls.splitter = NLTKSplitter()
cls.tokenizer = GenericTokenizer(lambda string: (tok.text for tok in cls.nlp.tokenizer(string)))
cls.splitter.split(cls.dataset)
cls.tokenizer.tokenize(cls.dataset)
cls.parser.parse(cls.dataset)
cls.computed_sentences = []
for sentence in cls.dataset.sentences():
dist, then = compute_shortest_paths(sentence)
cls.computed_sentences.append((dist, then, sentence))
def test_distance_u_u_is_0(self):
for dist, then, sentence in self.computed_sentences:
V = len(sentence)
for u in range(V):
self.assertEqual(0, dist[u, u])
def test_distance_u_v_is_v_u(self):
for dist, then, sentence in self.computed_sentences:
V = len(sentence)
for u in range(V):
for v in range(V):
self.assertEqual(dist[u, v], dist[v, u])
if u != v:
self.assertTrue(dist[u, v] > 0)
u_dep_from = sentence[u].features['dependency_from']
v_dep_from = sentence[v].features['dependency_from']
both_are_multi_root = u_dep_from is None and v_dep_from is None
assert not both_are_multi_root, (u, v, sentence)
are_bidirectionaly_directly_connected = (
(u_dep_from is None or u_dep_from[0].features['id'] == v) or
(v_dep_from is None or v_dep_from[0].features['id'] == u))
if are_bidirectionaly_directly_connected and not both_are_multi_root:
self.assertEqual(dist[u, v], 1, (u, v, sentence[u], sentence[v], "|", sentence))
else:
self.assertTrue(dist[u, v] >= 2)
def test_path_u_v_is_reverseof_v_u(self):
for dist, then, sentence in self.computed_sentences:
V = len(sentence)
for u in range(V):
for v in range(V):
uv = path(u, v, dist, then, sentence)
vu = path(v, u, dist, then, sentence)
print("path of:", u, "to", v, "==", uv, "|||", sentence)
self.assertEqual(uv, list(reversed(vu)))
# assert len(uv) >= 1, "This still fails with multi roots"
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000003
|
@@ -2201,16 +2201,372 @@
essed .%22
+,%0A %22Consistent with this inference , Arabidopsis or maize ( Zea mays ) PyrR ( At3g47390 or GRMZM2G090068 ) restored riboflavin prototrophy to an E. coli ribD deletant strain when coexpressed with the corresponding PyrD protein ( At4g20960 or GRMZM2G320099 ) but not when expressed alone ; the COG3236 domain was unnecessary for complementing activity .%22
%0A%5D%0A%0Aclas
@@ -5525,24 +5525,55 @@
rsed(vu)))%0A%0A
+ # TODO #28%0A
@@ -5607,20 +5607,15 @@
1,
+(
%22This
-still
fail
@@ -5621,16 +5621,30 @@
ls with
+non-connected
multi ro
@@ -5647,16 +5647,27 @@
i roots%22
+, sentence)
%0A%0A%0Aif __
|
6e9f329f5a770955370e93c926c25d511ba8b981
|
Update the_ends_test/FunctionsUnitTest.py
|
the_ends_test/FunctionsUnitTest.py
|
the_ends_test/FunctionsUnitTest.py
|
import unittest
from the_ends.functions import function_finder
class TheEndsTestCases(unittest.TestCase):
def setUp(self):
pass
# before test cases
def tearDown(self):
pass
# after test cases
def test_isupper(self):
# example test
self.assertTrue('FOO'.isupper())
self.assertFalse('Foo'.isupper())
def test_split(self):
# example test
s = 'hello world'
self.assertEqual(s.split(), ['hello', 'world'])
# check that s.split fails when the separator is not a string
with self.assertRaises(TypeError):
s.split(2)
def test_empty_test(self):
# checks for empty import pass
equ = ' '
fun_test = function_finder(equ)
self.assertFalse(fun_test)
def test_multiple_single_line(self):
# checks for multiple functions called in a single line
ans = [
['function1', 'function2'],
[{1: '( x', 2: 'x', 3: ' 5/4', 4: ' 12^(2-1))', 5: ' 8'}, {1: 'y', 2: ' 7', 3: ' 11*(y-3)'}]
]
equ = 'x=2*5+function1(( x=x, 5/4, 12^(2-1)), 8) * function2(y, 7, 11*(y-3))'
fun_test = function_finder(equ)
self.assertEqual(fun_test, ans)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000003
|
@@ -56,16 +56,59 @@
_finder%0A
+import sys%0A%0Asys.path.insert(0, '/the_ends')
%0A%0Aclass
|
76829380376c31ea3f1e899770d1edffd1afc047
|
Change gravatar url to use https
|
apps/profiles/utils.py
|
apps/profiles/utils.py
|
import hashlib
def get_gravatar_url(email):
email_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
return "http://www.gravatar.com/avatar/{}".format(email_hash)
|
Python
| 0
|
@@ -127,16 +127,17 @@
rn %22http
+s
://www.g
|
c7f50eb666423ce3cc08d5e0714f4d18d672d326
|
clean up test
|
corehq/apps/hqadmin/tests/test_utils.py
|
corehq/apps/hqadmin/tests/test_utils.py
|
from django.test import TestCase
from pillowtop.listener import BasicPillow
from corehq.apps.domain.models import Domain
from ..utils import pillow_seq_store, EPSILON
from ..models import PillowCheckpointSeqStore
def import_settings():
class MockSettings(object):
PILLOWTOPS = {'test': ['corehq.apps.hqadmin.tests.test_utils.DummyPillow']}
return MockSettings()
class DummyPillow(BasicPillow):
document_class = Domain
def run(self):
pass
class TestPillowCheckpointSeqStore(TestCase):
def setUp(self):
import pillowtop.run_pillowtop
pillowtop.utils.import_settings = import_settings
self.pillow = DummyPillow()
def test_basic_cloudant_seq(self):
seq = '1-blahblah'
self.pillow.set_checkpoint({'seq': seq})
pillow_seq_store()
store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.get_checkpoint()['_id'])
self.assertEquals(store.seq, seq)
def test_basic_couchdb_seq(self):
seq = 100
self.pillow.set_checkpoint({'seq': seq})
pillow_seq_store()
store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.get_checkpoint()['_id'])
self.assertEquals(store.seq, str(seq))
def test_small_rewind(self):
"""
We should not notify if the seq is not significantly less than the previous
"""
seq = '10-blahblah'
self.pillow.set_checkpoint({'seq': seq})
pillow_seq_store()
seq_rewind = '9-blahblah'
self.pillow.set_checkpoint({'seq': seq_rewind})
pillow_seq_store()
store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.get_checkpoint()['_id'])
self.assertEquals(store.seq, seq_rewind)
def test_large_rewind(self):
"""
We should notify if the seq is significantly less than the previous and not update the seq
"""
seq = '{}-blahblah'.format(EPSILON + 10)
self.pillow.set_checkpoint({'seq': seq})
pillow_seq_store()
seq_rewind = '9-blahblah'
self.pillow.set_checkpoint({'seq': seq_rewind})
pillow_seq_store()
store = PillowCheckpointSeqStore.objects.get(checkpoint_id=self.pillow.get_checkpoint()['_id'])
self.assertEquals(store.seq, seq)
|
Python
| 0.000001
|
@@ -25,16 +25,35 @@
TestCase
+, override_settings
%0Afrom pi
@@ -232,71 +232,122 @@
e%0A%0A%0A
-def import_settings():%0A class MockSettings(object):%0A
+class DummyPillow(BasicPillow):%0A document_class = Domain%0A%0A def run(self):%0A pass%0A%0A%0A@override_settings(
PILL
@@ -352,19 +352,17 @@
LLOWTOPS
- =
+=
%7B'test':
@@ -419,297 +419,77 @@
w'%5D%7D
-%0A%0A return MockSettings()%0A%0A%0Aclass DummyPillow(BasicPillow):%0A document_class = Domain%0A%0A def run(self):%0A pass%0A%0A%0Aclass TestPillowCheckpointSeqStore(TestCase):%0A%0A def setUp(self):%0A import pillowtop.run_pillowtop%0A pillowtop.utils.import_settings = import_settings
+)%0Aclass TestPillowCheckpointSeqStore(TestCase):%0A%0A def setUp(self):
%0A
@@ -735,36 +735,51 @@
_id=self.pillow.
-get_
+checkpoint_manager.
checkpoint()%5B'_i
@@ -768,33 +768,27 @@
r.checkpoint
-()%5B'
_id
-'%5D
)%0A se
@@ -1023,36 +1023,51 @@
_id=self.pillow.
-get_
+checkpoint_manager.
checkpoint()%5B'_i
@@ -1056,33 +1056,27 @@
r.checkpoint
-()%5B'
_id
-'%5D
)%0A se
@@ -1548,36 +1548,51 @@
_id=self.pillow.
-get_
+checkpoint_manager.
checkpoint()%5B'_i
@@ -1581,33 +1581,27 @@
r.checkpoint
-()%5B'
_id
-'%5D
)%0A se
@@ -2123,12 +2123,27 @@
low.
-get_
+checkpoint_manager.
chec
@@ -2152,17 +2152,11 @@
oint
-()%5B'
_id
-'%5D
)%0A
|
11ef828a8180ba17f522e03ac198440feab40aa0
|
Update version
|
apt_select/__init__.py
|
apt_select/__init__.py
|
__version__ = '1.0.1'
|
Python
| 0
|
@@ -12,11 +12,11 @@
= '1.0.
-1
+2
'%0A
|
a3a408b9345291ca9a1999a779879afe0296f0a3
|
Update grayscale.py
|
08_Image_Processing/Color_Spaces/grayscale/grayscale.py
|
08_Image_Processing/Color_Spaces/grayscale/grayscale.py
|
import os, cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
_projectDirectory = os.path.dirname(__file__)
_imagesDirectory = os.path.join(_projectDirectory, "images")
_images = []
for _root, _dirs, _files in os.walk(_imagesDirectory):
for _file in _files:
if _file.endswith(".jpg"):
_images.append(os.path.join(_imagesDirectory, _file))
_imageIndex = 0
_imageTotal = len(_images)
_img1 = cv2.imread(_images[_imageIndex], cv2.IMREAD_UNCHANGED)
_fig = plt.figure("Grayscale Space")
_gs = GridSpec(2, 2)
_fig1 = plt.subplot(_gs[0:2, 0])
_fig1.set_title("RGB Space")
_img1Show = cv2.cvtColor(_img1, cv2.COLOR_BGR2RGB) #for displaying purpose
plt.imshow(_img1Show)
_img2, _img3, _img4 = cv2.split(_img1)
_fig2 = plt.subplot(_gs[0:2, 1])
_fig2.set_title("Grayscale Space")
_img2 = cv2.cvtColor(_img1, cv2.COLOR_BGR2GRAY)
plt.imshow(_img2, cmap = "gray")
plt.tight_layout()
plt.show()
|
Python
| 0.000001
|
@@ -530,31 +530,28 @@
figure(%22
-Grayscale
+Color
Space
+s
%22)%0D%0A_gs
@@ -962,8 +962,10 @@
t.show()
+%0D%0A
|
37167a9473a99931efbc60a8e46400ed017c8fa4
|
set up initial condition arrays
|
Assignment_5_partial_differentials/P440_Assign5_Exp2.py
|
Assignment_5_partial_differentials/P440_Assign5_Exp2.py
|
'''
Kaya Baber
Physics 440 - Computational Physics
Assignment 5 - PDEs
Exploration 2 - Parabolic PDEs: The Wave Equation
'''
import numpy as np
from numpy import linalg as LA
import matplotlib.pyplot as plt
import math
#make initial velocity array in real space
#make initial density array in real space
#fft both to fourier space
#make a column vector of density_f appended to velocity_f
#set the x range to (0->2pi)
#make derivative operator matrix
#i(diag (0,1,2,3,4,.. -4, -3, -2, -1)), where i is imaginary i
#make quad matrix [[I][op],[op][I]] and negative quad [[I][-op],[-op][I]]
#matrix multiply the negative quad by the FFT column vector
#linear algebra solve the pos_quad*newFFTvector = above-result for newFFTvector
#make seperate copies of the
#inverse FFT newFFTvector and append the real parts of velocity and density
def make_banded(N,M):
bandTopBot = [-1.]*(N-1)
bandMid = [2. + (4.*M)/(N**2) ]*N
banded = np.diag(bandMid)
banded = np.add(banded,np.diag(bandTopBot,1))
banded = np.add(banded,np.diag(bandTopBot,-1))
return banded
def make_operator(N,M):
bandedCrank = make_banded(N,M)
negativeCrank = bandedCrank * (-1)
bandedCrank[0] = [1] + [0]*(N-1)
bandedCrank[-1] = [0]*(N-1) + [1]
invertedCrank = LA.inv(bandedCrank)
operatorCrank = invertedCrank.dot(negativeCrank)
return operatorCrank
|
Python
| 0.000001
|
@@ -217,93 +217,450 @@
ath%0A
-%0A#make initial velocity array in real space%0A#make initial density array in real space
+import cmath%0A%0A%0A%0AL = 2.*math.pi #set the x range to (0-%3E2pi)%0AN = 1000 #number of spatial intervals and points (since it loops)%0Asteps = 1000 #number of timesteps%0AstepSize = 0.1 #temporal step size%0A%0A#make initial velocity array in real space%0AvelPhysGauss = np.exp(-10 * ( (np.linspace(0,L,N+1)%5B:-1%5D-math.pi) ** 2)) + %5B0J%5D*N%0A%0A#make initial density array in real space%0AdenPhysFlat = %5B0 + 0J%5D*N%0Aprint denPhysFlat%0Aprint denPhysFlat%5B2%5D+2.
%0A#ff
@@ -745,36 +745,8 @@
y_f%0A
-#set the x range to (0-%3E2pi)
%0A#ma
@@ -916,16 +916,34 @@
op%5D%5BI%5D%5D%0A
+#step forward%0A
#matrix
@@ -994,16 +994,20 @@
vector%0A
+
#linear
@@ -1078,16 +1078,20 @@
Tvector%0A
+
#make se
@@ -1115,623 +1115,251 @@
the
-%0A#inverse FFT newFFTvector and append the real parts of velocity and density%0Adef make_banded(N,M):%0A bandTopBot = %5B-1.%5D*(N-1)%0A bandMid = %5B2. + (4.*M)/(N**2) %5D*N%0A banded = np.diag(bandMid)%0A banded = np.add(banded,np.diag(bandTopBot,1))%0A banded = np.add(banded,np.diag(bandTopBot,-1))%0A return banded%0A %0A %0Adef make_operator(N,M):%0A bandedCrank = make_banded(N,M)%0A negativeCrank = bandedCrank * (-1)%0A bandedCrank%5B0%5D = %5B1%5D + %5B0%5D*(N-1)%0A bandedCrank%5B-1%5D = %5B0%5D*(N-1) + %5B1%5D%0A invertedCrank = LA.inv(bandedCrank)%0A operatorCrank = invertedCrank.dot(negativeCrank)%0A return operatorCrank
+velocity_f and density_f components of newFFTvector%0A #inverse FFT the components and append the real parts of velocity and density to a log%0A#repeat stepping for num steps%0A#plot the velocity and density logs in 3D%0A#maybe make an animation%0A%0A%0A
%0A%0A%0A
|
60b2c0db865fcf09636359888ead82ffc7666ae3
|
Add test for failed login when user is not active
|
yunity/userauth/tests/test_api.py
|
yunity/userauth/tests/test_api.py
|
from django.contrib import auth
from rest_framework import status
from rest_framework.test import APITestCase
from yunity.users.factories import UserFactory
class TestUserAuthAPI(APITestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = UserFactory()
cls.url = '/api/auth/'
def test_login(self):
data = {'email': self.user.email, 'password': self.user.display_name}
response = self.client.post(self.url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['email'], self.user.email)
user = auth.get_user(self.client)
self.assertTrue(user.is_authenticated())
def test_no_credentials(self):
data = {}
response = self.client.post(self.url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'email': ['This field is required.'],
'password': ['This field is required.']})
def test_wrong_credentials(self):
data = {'email': self.user.email, 'password': 'wrong_password'}
response = self.client.post(self.url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data['non_field_errors'], ['Unable to login with provided credentials.', ])
def test_status_not_logged_in(self):
response = self.client.get('/api/auth/status/')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['error'], 'not_authed')
def test_status_as_user(self):
self.client.force_login(user=self.user)
response = self.client.get('/api/auth/status/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['display_name'], self.user.display_name)
|
Python
| 0.000001
|
@@ -292,16 +292,73 @@
ctory()%0A
+ cls.disabled_user = UserFactory(is_active=False)%0A
@@ -1505,16 +1505,397 @@
.', %5D)%0A%0A
+ def test_login_as_disabled_user_fails(self):%0A data = %7B'email': self.disabled_user.email, 'password': self.disabled_user.display_name%7D%0A response = self.client.post(self.url, data, format='json')%0A self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)%0A user = auth.get_user(self.client)%0A self.assertFalse(user.is_authenticated())%0A%0A
def
|
67773a4b848d14bf6e6b160eb918e036971b7f0e
|
Use Python 3 type syntax in zerver/webhooks/semaphore/view.py.
|
zerver/webhooks/semaphore/view.py
|
zerver/webhooks/semaphore/view.py
|
# Webhooks for external integrations.
from typing import Any, Dict
import ujson
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.actions import check_send_stream_message
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.models import UserProfile, get_client
@api_key_only_webhook_view('Semaphore')
@has_request_variables
def api_semaphore_webhook(request, user_profile,
payload=REQ(argument_type='body'),
stream=REQ(default='builds')):
# type: (HttpRequest, UserProfile, Dict[str, Any], str) -> HttpResponse
# semaphore only gives the last commit, even if there were multiple commits
# since the last build
branch_name = payload["branch_name"]
project_name = payload["project_name"]
result = payload["result"]
event = payload["event"]
commit_id = payload["commit"]["id"]
commit_url = payload["commit"]["url"]
author_email = payload["commit"]["author_email"]
message = payload["commit"]["message"]
if event == "build":
build_url = payload["build_url"]
build_number = payload["build_number"]
content = u"[build %s](%s): %s\n" % (build_number, build_url, result)
elif event == "deploy":
build_url = payload["build_html_url"]
build_number = payload["build_number"]
deploy_url = payload["html_url"]
deploy_number = payload["number"]
server_name = payload["server_name"]
content = u"[deploy %s](%s) of [build %s](%s) on server %s: %s\n" % \
(deploy_number, deploy_url, build_number, build_url, server_name, result)
else: # should never get here
content = u"%s: %s\n" % (event, result)
content += "!avatar(%s) [`%s`](%s): %s" % (author_email, commit_id[:7],
commit_url, message)
subject = u"%s/%s" % (project_name, branch_name)
check_send_stream_message(user_profile, request.client, stream, subject, content)
return json_success()
|
Python
| 0
|
@@ -555,16 +555,42 @@
uest
-, user_p
+: HttpRequest, user_profile: UserP
rofi
@@ -626,16 +626,32 @@
payload
+: Dict%5Bstr, Any%5D
=REQ(arg
@@ -702,16 +702,21 @@
stream
+: str
=REQ(def
@@ -734,69 +734,8 @@
s'))
-:%0A # type: (HttpRequest, UserProfile, Dict%5Bstr, Any%5D, str)
-%3E
@@ -746,16 +746,17 @@
Response
+:
%0A%0A #
|
753893ac5ddaf6b17454180cea55b2ce0b94b571
|
fix comparación
|
account_analytic_cost_line/models/account.py
|
account_analytic_cost_line/models/account.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Comunitea All Rights Reserved
# $Jesús Ventosinos Mayor <jesus@comunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class AccountAnalyticJournal(models.Model):
_inherit = 'account.analytic.journal'
analytic_cost_journal = fields.Many2one('account.analytic.journal',
'Analytic cost journal')
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def invoice_validate(self):
t_uom = self.env['product.uom']
for invoice in self:
if invoice.type not in ('out_invoice', 'out_refund'):
continue
company_currency = self.company_id.currency_id
currency = self.currency_id.with_context(date=self.date_invoice)
sign = -1 if self.type == 'out_invoice' else 1
for analytic_move in invoice.mapped(
'move_id.line_id.analytic_lines'):
if not analytic_move.product_id:
continue
from_unit = analytic_move.product_uom_id.id
product_unit = analytic_move.product_id.uom_id.id
uom_qty = analytic_move.unit_amount
if from_unit != product_unit:
uom_qty = t_uom._compute_qty(from_unit,
analytic_move.unit_amount,
product_unit)
amount = currency.compute(
analytic_move.product_id.standard_price *
uom_qty, company_currency) * sign
if analytic_move.journal_id.analytic_cost_journal:
analytic_move.copy(
{'journal_id':
analytic_move.journal_id.analytic_cost_journal.id,
'amount': amount})
return super(AccountInvoice, self).invoice_validate()
class AccountAnalyticLine(models.Model):
_inherit = 'account.analytic.line'
@api.multi
def fix(self):
journal_id = self[0].journal_id.id
lines = self.search([('journal_id', '=', journal_id)])
t_uom = self.env['product.uom']
for l in lines:
if l.product_uom_id.id != l.product_id.uom_id:
invoice_id = False
for l2 in l.move_id.analytic_lines:
if l2.invoice_id:
invoice_id = l2.invoice_id
if not invoice_id:
continue
company_currency = invoice_id.company_id.currency_id
currency = invoice_id.currency_id.\
with_context(date=invoice_id.date_invoice)
sign = -1 if invoice_id.type == 'out_invoice' else 1
from_unit = l.product_uom_id.id
product_unit = l.product_id.uom_id.id
uom_qty = l.unit_amount
uom_qty = t_uom._compute_qty(from_unit,
l.unit_amount,
product_unit)
amount = currency.compute(
l.product_id.standard_price *
uom_qty, company_currency) * sign
l.amount = amount
return
|
Python
| 0.000102
|
@@ -3155,16 +3155,19 @@
d.uom_id
+.id
:%0A
|
d9d68abe350d253d6952041d61872bd3eec5f95d
|
FIX after duplicating an old invoice and validating it, get The invoice date cannot be later than the date of registration
|
account_invoice_entry_date/models/account.py
|
account_invoice_entry_date/models/account.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2010 ISA srl (<http://www.isa.it>).
# Copyright (C) 2014 Associazione Odoo Italia
# http://www.openerp-italia.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import fields, models
from openerp.tools.translate import _
from openerp.exceptions import Warning
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
registration_date = fields.Date(
'Registration Date',
states={
'paid': [('readonly', True)],
'open': [('readonly', True)],
'close': [('readonly', True)]
},
select=True,
help="Keep empty to use the current date")
def action_move_create(self, cr, uid, ids, context=None):
if not context:
context = {}
super(AccountInvoice, self).action_move_create(
cr, uid, ids, context=context)
for inv in self.browse(cr, uid, ids):
date_invoice = inv.date_invoice
reg_date = inv.registration_date
if not inv.registration_date:
if not inv.date_invoice:
reg_date = time.strftime('%Y-%m-%d')
else:
reg_date = inv.date_invoice
if date_invoice and reg_date:
if (date_invoice > reg_date):
raise Warning(_("The invoice date cannot be later than the"
" date of registration!"))
date_start = inv.registration_date or inv.date_invoice \
or time.strftime('%Y-%m-%d')
date_stop = inv.registration_date or inv.date_invoice \
or time.strftime('%Y-%m-%d')
period_ids = self.pool.get('account.period').search(
cr, uid,
[
('date_start', '<=', date_start),
('date_stop', '>=', date_stop),
('company_id', '=', inv.company_id.id)
])
if period_ids:
period_id = period_ids[0]
self.write(
cr, uid, [inv.id], {
'registration_date': reg_date, 'period_id': period_id})
mov_date = reg_date or inv.date_invoice or time.strftime(
'%Y-%m-%d')
self.pool.get('account.move').write(
cr, uid, [inv.move_id.id], {'state': 'draft'})
sql = "update account_move_line set period_id="+str(
period_id) + ",date='" + mov_date + "' where move_id = " + str(
inv.move_id.id)
cr.execute(sql)
self.pool.get('account.move').write(
cr, uid, [inv.move_id.id],
{'period_id': period_id, 'date': mov_date})
self.pool.get('account.move').write(
cr, uid, [inv.move_id.id], {'state': 'posted'})
self._log_event(cr, uid, ids)
return True
|
Python
| 0.000001
|
@@ -1503,16 +1503,36 @@
nt date%22
+,%0A copy=False
)%0A%0A d
|
b3c1aa9b3415240e03ae790d58df72d8442ae761
|
Fix PEP8
|
account_statement_so_completion/statement.py
|
account_statement_so_completion/statement.py
|
# -*- coding: utf-8 -*-
###############################################################################
# #
# Author: Joel Grand-Guillaume #
# Copyright 2011-2012 Camptocamp SA #
# #
# Author: Leonardo Pistone <leonardo.pistone@camptocamp.com> #
# Copyright 2013 Camptocamp SA #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from openerp.osv import fields, orm
from tools.translate import _
from openerp.addons.account_statement_base_completion.statement import ErrorTooManyPartner
class account_statement_completion_rule(orm.Model):
_name = "account.statement.completion.rule"
_inherit = "account.statement.completion.rule"
def _get_functions(self, cr, uid, context=None):
res = super(account_statement_completion_rule, self)._get_functions(
cr, uid, context=context)
res.append(
('get_from_ref_and_so', 'From line reference (based on SO number)')
)
return res
# Should be private but data are initialised with no update XML
def get_from_ref_and_so(self, cr, uid, st_line, context=None):
"""
Match the partner based on the SO number and the reference of the
statement line. Then, call the generic get_values_for_line method to
complete other values. If more than one partner matched, raise the
ErrorTooManyPartner error.
:param int/long st_line: read of the concerned
account.bank.statement.line
:return:
A dict of value that can be passed directly to the write method of
the statement line or {}
{'partner_id': value,
'account_id': value,
...}
"""
st_obj = self.pool.get('account.bank.statement.line')
res = {}
if st_line:
so_obj = self.pool.get('sale.order')
so_id = so_obj.search(cr,
uid,
[('name', '=', st_line['ref'])],
context=context)
if so_id:
if so_id and len(so_id) == 1:
so = so_obj.browse(cr, uid, so_id[0], context=context)
res['partner_id'] = so.partner_id.id
elif so_id and len(so_id) > 1:
raise ErrorTooManyPartner(
_('Line named "%s" (Ref:%s) was matched by more '
'than one partner while looking on SO by ref.') %
(st_line['name'], st_line['ref']))
st_vals = st_obj.get_values_for_line(
cr,
uid,
profile_id=st_line['profile_id'],
master_account_id=st_line['master_account_id'],
partner_id=res.get('partner_id', False),
line_type='customer',
amount=st_line['amount'] if st_line['amount'] else 0.0,
context=context)
res.update(st_vals)
return res
|
Python
| 0.005634
|
@@ -1844,17 +1844,16 @@
mport _%0A
-%0A
from ope
@@ -1915,16 +1915,22 @@
import
+%5C%0A
ErrorToo
@@ -1949,33 +1949,32 @@
%0A%0Aclass
-a
+A
ccount
-_s
+S
tatement
_complet
@@ -1957,34 +1957,33 @@
AccountStatement
-_c
+C
ompletion_rule(o
@@ -1975,18 +1975,17 @@
mpletion
-_r
+R
ule(orm.
@@ -2166,33 +2166,32 @@
= super(
-a
+A
ccount
-_s
+S
tatement
_complet
@@ -2182,18 +2182,17 @@
tatement
-_c
+C
ompletio
@@ -2192,18 +2192,17 @@
mpletion
-_r
+R
ule, sel
@@ -3144,21 +3144,17 @@
elf.pool
-.get(
+%5B
'account
@@ -3174,17 +3174,17 @@
nt.line'
-)
+%5D
%0A
@@ -3296,19 +3296,16 @@
.search(
-cr,
%0A
@@ -3317,131 +3317,49 @@
- uid,%0A %5B('name', '=', st_line%5B'ref'%5D)%5D,%0A
+cr, uid, %5B('name', '=', st_line%5B'ref'%5D)%5D,
con
@@ -3956,53 +3956,13 @@
cr,
-%0A uid,%0A
+ uid,
pro
|
c330eabd69fd2d047d5197946dc763dea6716347
|
Fix sphinx no longer works if PYTHONPATH is set (to 2.7)
|
test/test.py
|
test/test.py
|
#!/usr/bin/env python
"""
Test jenkinsflow.
"""
from __future__ import print_function
import sys, os, getpass, shutil, copy
major_version = sys.version_info.major
if major_version < 3:
import subprocess32 as subprocess
else:
import subprocess
from os.path import join as jp
import click
import tenjin
from tenjin.helpers import *
try:
import pytest
except ImportError:
print("See setup.py for test requirements, or use 'python setup.py test'", file=sys.stderr)
raise
here = os.path.abspath(os.path.dirname(__file__))
top_dir = os.path.dirname(here)
extra_sys_path = [os.path.normpath(path) for path in [here, jp(top_dir, '..'), jp(top_dir, 'demo'), jp(top_dir, 'demo/jobs')]]
sys.path = extra_sys_path + sys.path
os.environ['PYTHONPATH'] = ':'.join(extra_sys_path)
from jenkinsflow.test.framework import config
from jenkinsflow.test import cfg as test_cfg
from jenkinsflow.test.cfg import ApiType
def dummy(*_args):
print("*** Please use test/tests.py to run tests", file=sys.stderr)
class TestLoader(object):
def loadTestsFromNames(self, names, module=None):
return dummy
def run_tests(parallel, api, args, coverage=True, mock_speedup=1):
args = copy.copy(args)
test_cfg.select_speedup(mock_speedup)
if api is not None:
api_name = api.upper()
api_type = ApiType[api_name]
args.extend(['-k', 'ApiType.' + api_name])
else:
# We run for all apis
api_type = None
if coverage:
engine = tenjin.Engine()
cov_rc_file_name = jp(here, '.coverage_rc_' + api_type.name.lower() if api_type else 'all')
with open(cov_rc_file_name, 'w') as cov_rc_file:
context = dict(api_type=api_type, top_dir=top_dir, major_version=major_version)
cov_rc_file.write(engine.render(jp(here, "coverage_rc.tenjin"), context))
args.extend(['--cov=' + top_dir, '--cov-report=term-missing', '--cov-config=' + cov_rc_file_name])
try:
if api_type != ApiType.MOCK:
# Note: 'boxed' is required for the kill/abort_current test not to abort other tests
args.append('--boxed')
if parallel and api_type != ApiType.MOCK:
args.extend(['-n', '16'])
print('pytest.main', args)
rc = pytest.main(args)
if rc:
raise Exception("pytest {args} failed with code {rc}".format(args=args, rc=rc))
finally:
if coverage:
os.unlink(cov_rc_file_name)
def start_msg(*msg):
print("\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n", *msg)
@click.command()
@click.option('--mock-speedup', '-s', help="Time speedup when running mocked tests.", default=1000)
@click.option('--direct-url', help="Direct Jenkins URL. Must be different from the URL set in Jenkins (and preferably non proxied)",
default=test_cfg.direct_url(test_cfg.ApiType.JENKINS))
@click.option('--api', help="Select which api to use/test. Possible values: 'jenkins, 'script', 'mock'. Default is all.", default=None)
@click.option('--pytest-args', help="py.test arguments.")
@click.option('--job-delete/--no-job-delete', help="Delete and re-load jobs into Jenkins. Default is --no-job-delete.", default=False)
@click.option('--job-load/--no-job-load', help="Load jobs into Jenkins (skipping job load assumes all jobs already loaded and up to date). Deafult is --job-load.", default=True)
@click.argument('testfile', nargs=-1, type=click.Path(exists=True, readable=True))
def cli(mock_speedup, direct_url, api, pytest_args, job_delete, job_load, testfile):
"""
Test jenkinsflow.
First runs all tests mocked in hyperspeed, then runs against Jenkins, using jenkins_api, then run script_api jobs.
Normally jobs will be run in parallel, specifying --job-delete disables this.
The default options assumes that re-loading without deletions generates correct job config.
Tests that require jobs to be deleted/non-existing will delete the jobs, regardless of the --job-delete option.
[TESTFILE]... File names to pass to py.test
"""
os.environ[test_cfg.DIRECT_URL_NAME] = direct_url
os.environ[test_cfg.SKIP_JOB_DELETE_NAME] = 'false' if job_delete else 'true'
os.environ[test_cfg.SKIP_JOB_LOAD_NAME] = 'false' if job_load else 'true'
os.environ[test_cfg.SCRIPT_DIR_NAME] = test_cfg.script_dir()
print("Creating temporary test installation in", repr(config.pseudo_install_dir), "to make files available to Jenkins.")
install_script = jp(here, 'tmp_install.sh')
rc = subprocess.call([install_script])
if rc:
print("Failed test installation to", repr(config.pseudo_install_dir), "Install script is:", repr(install_script), file=sys.stderr)
print("Warning: Some tests will fail!", file=sys.stderr)
cov_file = ".coverage"
for cov_file in jp(here, cov_file), jp(top_dir, cov_file):
if os.path.exists(cov_file):
os.remove(cov_file)
print("\nRunning tests")
try:
args = ['--capture=sys', '--instafail']
if pytest_args or testfile:
coverage = False
args.extend(pytest_args.split(' ') + list(testfile) if pytest_args else list(testfile))
else:
coverage = True
args.append('--ff')
hudson = os.environ.get('HUDSON_URL')
if hudson:
print("Disabling parallel run, Hudson can't handle it :(")
parallel = test_cfg.skip_job_load() or test_cfg.skip_job_delete() and not hudson
run_tests(parallel, api, args, coverage, mock_speedup)
start_msg("Testing setup.py")
user = getpass.getuser()
install_prefix = '/tmp/' + user
tmp_packages_dir = install_prefix + '/lib/python{major}.{minor}/site-packages'.format(major=major_version, minor=sys.version_info.minor)
os.environ['PYTHONPATH'] = tmp_packages_dir
if os.path.exists(tmp_packages_dir):
shutil.rmtree(tmp_packages_dir)
os.makedirs(tmp_packages_dir)
os.chdir(top_dir)
subprocess.check_call([sys.executable, jp(top_dir, 'setup.py'), 'install', '--prefix', install_prefix])
shutil.rmtree(jp(top_dir, 'build'))
start_msg("Testing documentation generation")
os.chdir('doc/source')
subprocess.check_call(['make', 'html'])
except Exception as ex:
print('*** ERROR: There were errors! Check output! ***', repr(ex), file=sys.stderr)
raise
sys.exit(rc)
if __name__ == '__main__':
cli() # pylint: disable=no-value-for-parameter
|
Python
| 0
|
@@ -6292,16 +6292,53 @@
ource')%0A
+ del os.environ%5B'PYTHONPATH'%5D%0A
|
8a082670b108f36f95e5c421df41ea964843d122
|
Rename method
|
cartoframes/data/services/bq_datasets.py
|
cartoframes/data/services/bq_datasets.py
|
import os
import requests
from carto.utils import ResponseStream
# from carto.auth import APIKeyAuthClient
from carto.exceptions import CartoException
# TODO: this shouldn't be hardcoded
DO_ENRICHMENT_API_URL = 'http://localhost:7070/bq'
class BQDataset:
def __init__(self, name_id):
self.name = name_id
# TODO fix this crap
self.session = requests.Session()
self.api_key = 'my_valid_api_key'
def upload(self, dataframe):
url = DO_ENRICHMENT_API_URL + '/datasets/' + self.name
params = {'api_key': self.api_key}
try:
dataframe.to_csv(self.name, index=False)
files = {'file': open(self.name, 'rb')}
response = self.session.post(
url,
params=params,
files=files
)
response.raise_for_status()
except requests.HTTPError as e:
if 400 <= response.status_code < 500:
reason = response.json()['error'][0]
error_msg = u'%s Client Error: %s' % (response.status_code,
reason)
raise CartoException(error_msg)
raise CartoException(e)
except Exception as e:
raise CartoException(e)
finally:
os.remove(self.name)
return response
def upload_file_object(self, file_object):
pass
def create_import(self):
pass
def upload_dataframe(self, dataframe):
pass
def download(self):
url = DO_ENRICHMENT_API_URL + '/datasets/' + self.name
params = {'api_key': self.api_key}
try:
response = self.session.get(url,
params=params,
stream=True)
response.raise_for_status()
except requests.HTTPError as e:
if 400 <= response.status_code < 500:
# Client error, provide better reason
reason = response.json()['error'][0]
error_msg = u'%s Client Error: %s' % (response.status_code,
reason)
raise CartoException(error_msg)
else:
raise CartoException(e)
except Exception as e:
raise CartoException(e)
return response
def download_stream(self):
return ResponseStream(self.download())
class BQJob:
def __init__(self, job_id):
self.id = job_id
def status(self):
pass
def result(self):
pass
class BQUserDataset:
@staticmethod
def name(name_id):
return BQDataset(name_id)
|
Python
| 0.000002
|
@@ -1445,20 +1445,21 @@
def
-create_impor
+import_datase
t(se
|
872f5d997ec48b1c8eb7771ec3771f05dc27cd96
|
Put in target states for each state
|
catching_raindrops/catching_raindrops.py
|
catching_raindrops/catching_raindrops.py
|
# catching_raindrops.py 08/03/2016 D.J.Whale
# game parameters
CUP_CAPACITY = 5
SPEED = 6
MAX_MISSES = 3
AUTO_EMPTY = False
SENSITIVITY = 400
def get_cup_position():
acc = accelerometer.get_x()/SENSITIVITY
return math.clamp(0, 4, acc+2)
def show_splash_screen():
pass # TODO show an animation until any button pressed
def play_game():
score = 0
drops_in_cup = 0
misses = 0
drop_x = 0
drop_y = 0
cup_x = 2
state = "NEWDROP"
##start_game
while True:
if state == "NEWDROP":
pass
elif state == "RAINING":
pass
elif state == "ATCUP":
pass
elif state == "MISS":
pass
elif state == "CATCH":
pass
elif state == "FULL":
pass
elif state == "EMPTYING"
pass
elif state == "OVERFLOW":
pass
elif state == "GAMEOVER":
pass
def test_movement():
button_b.reset_presses()
while not button_b.was_pressed():
x = get_cup_position()
display.clear()
display.set_pixel(x, 2)
sleep(200)
def show_number(n):
pass # TODO flash digit 4 times then leave it solid
def run():
high_score = 0
while True:
show_splash_screen()
if button_a.was_pressed():
button_a.reset_presses()
score = play_game()
if score > high_score:
high_score = score
show_number(high_score)
elif button_b.was_pressed():
button_b.reset_presses()
test_movement()
# run()
|
Python
| 0.999999
|
@@ -471,16 +471,21 @@
EWDROP%22%0A
+ %0A
##st
@@ -544,36 +544,49 @@
P%22:%0A
-pass
+state = %22RAINING%22
%0A %0A
@@ -620,36 +620,78 @@
G%22:%0A
-pass
+state = %22ATCUP%22%0A state = %22EMPTYING%22
%0A %0A
@@ -723,36 +723,74 @@
P%22:%0A
-pass
+state = %22CATCH%22%0A state = %22MISS%22
%0A %0A
@@ -821,36 +821,80 @@
S%22:%0A
-pass
+state = %22GAMEOVER%22%0A state = %22NEWDROP%22
%0A %0A
@@ -926,36 +926,107 @@
H%22:%0A
-pass
+state = %22FULL%22%0A state = %22OVERFLOW%22%0A state = %22NEWDROP%22
%0A %0A
@@ -1057,36 +1057,80 @@
L%22:%0A
-pass
+state = %22EMPTYING%22%0A state = %22NEWDROP%22
%0A %0A
@@ -1164,36 +1164,49 @@
NG%22%0A
-pass
+state = %22NEWDROP%22
%0A %0A
@@ -1241,36 +1241,50 @@
W%22:%0A
-pass
+state = %22GAMEOVER%22
%0A %0A
@@ -1327,20 +1327,51 @@
-pass
+break%0A %0A return score
%0A
|
8d8f470ad0788b1e6e91155f07b351de04051824
|
add test for search by name and pagination
|
app/mod_bucketlists/tests/test_bucketlist.py
|
app/mod_bucketlists/tests/test_bucketlist.py
|
from app.test_config import BaseTestCase
class BucketListTestCase(BaseTestCase):
def test_creates_new_bucketlist_with_token(self):
data = {
'bucket_name': 'Christmas'
}
response = self.client.post('/bucketlists/', data=data, headers=self.token, follow_redirects=True)
self.assertEqual(201, response.status_code)
response = response.data.decode('utf-8')
self.assertIn(data['bucket_name'], response)
self.assertIn('date_created', response)
def test_gets_bucketlist_names_for_the_user(self):
response = self.client.get('/bucketlists/', headers=self.token, follow_redirects=True)
response = response.data.decode('utf-8')
self.assertIn('Checkpoint', response)
self.assertIn('created_by', response)
self.assertIn('date_created', response)
def test_error_on_bucketlist_creation_with_invalid_token(self):
data = {
'bucket_name': 'Christmas'
}
response = self.client.post('/bucketlists/', data=data, headers=self.invalid_token, follow_redirects=True)
self.assertEqual(403, response.status_code)
response = response.data.decode('utf-8')
self.assertIn('error', response)
self.assertIn('invalid token', response)
def test_error_on_bucketlist_creation_with_expired_token(self):
data = {
'bucket_name': 'Christmas'
}
response = self.client.post('/bucketlists/', data=data, headers=self.expired_token, follow_redirects=True)
self.assertEqual(403, response.status_code)
response = response.data.decode('utf-8')
self.assertIn('error', response)
self.assertIn('expired token', response)
|
Python
| 0
|
@@ -841,32 +841,452 @@
ed', response)%0A%0A
+ def test_search_bucketlist_by_name(self):%0A response = self.client.get('/bucketlists/?q=Check', headers=self.token, follow_redirects=True)%0A%0A response = response.data.decode('utf-8')%0A self.assertIn('Checkpoint', response)%0A self.assertIn('created_by', response)%0A self.assertIn('date_created', response)%0A self.assertIn('next', response)%0A self.assertIn('prev', response)%0A%0A
def test_err
|
036610f8f9830e58432e2484e49af1b4385b5056
|
fix the typo
|
artifacia/artifacia.py
|
artifacia/artifacia.py
|
import json
import requests
class Client:
"""
This is the entry point in Python client API.
if you are going to use our API, first of all you should instanciate
client objent with your username and passwrod which you got from the dashboard.
Now start using Artifacia recommendations APIS.
"""
def __init__(self, api_key):
"""
Artifacia recommendation client initiallization
@parameter username, password : will help you to access our API like
upload your data and get recommendations
"""
self.api_key = api_key
def upload_user_purchased_items(self, user_id ,user_data):
"""
Input parameter :
user_data which is list of json
You can use this method with the username and password to upload your
user's interaction data like purchased items
output: status for your request in json format
"""
response = requests.post('https://api.artifacia.com/v1/users/'+str(user_id)+'/purchased_items', data=json.dumps(user_data), headers={'Content-Type':'application/json', 'api_key': self.api_key}, verify=False)
return json.loads(response.text)
def upload_user_viewed_items(self, user_id, user_data):
"""
Input parameter :
user_data which is list of json
You can use this method with the username and password to upload your
user's interaction data like viewed items
output: status for your request in json format
"""
response = requests.post('https://api.artifacia.com/v1/users/'+str(user_id)+'/viewed_items', data=json.dumps(user_data), headers={'Content-Type':'application/json', 'api_key':self.api_key}, verify=False)
return json.loads(response.text)
def upload_item_data(self, item_data):
"""
Input parameter :
item_data which is list of json
You can use this method to upload your catalog data
output : status for your request in json format
"""
response = requests.post('https://api.artifacia.com/v1/items', data=json.dumps(item_data), headers={'Content-Type':'application/json', 'api_key':self.api_key}, verify=False)
return json.loads(response.text)
def delete_item_data(self, item_ids):
"""
Input parameter :
item_ids which is in json format
If you want to delete some items from our database incase if some
items are out of stock then you can esily delete your items usinf this method
output: status for your request in json format
"""
response = requests.delete('https://api.artifacia.com/v1/items', data=json.dumps(item_ids), headers={'Content-Type':'application/json', 'api_key':self.api_key}, verify=False)
return json.loads(response.text)
def get_visual_recommendation(self, prod_id, num, filters):
"""
Input parameter :
prod_id which is an integer type
It will help you to get similar image ids corresponding to a given image id
output :
return list of image item_ids
"""
url = 'https://api.artifacia.com/v1/recommendation/similar/' + str(prod_id)+'/'+str(num) + "?"
for item in filters.keys():
url = "".join([url,item,"=",str(filters[item], "&")])
response = requests.get(url, headers={'Content-Type':'application/json', 'api_key':self.api_key}, verify=False)
return json.loads(response.text)
def get_cpr_recommendation(self, prod_id, num):
"""
Input parameter :
prod_id which is an integer type
If you want to get items which goes together you can use this method with the given username and password
output:
return list of product ids which can go with the given image
"""
response = requests.get('https://api.artifacia.com/v1/recommendation/collections/' + str(prod_id)+'/'+str(num), headers={'Content-Type':'application/json', 'api_key':self.api_key}, verify=False)
return json.loads(response.text)
def get_personalized_recommendation(self, user_id, num):
"""
Input parameter :
user_id which is integer type
If you want to get recommendation on the basis of user behaviour then use this method with the given username and passwd
output:
list of prod_ids
"""
response = requests.get('https://api.artifacia.com/v1/recommendation/user/' +str(user_id)+'/'+str(num), headers={'Content-Type':'application/json', 'api_key':self.api_key}, verify=False)
return json.loads(response.text)
|
Python
| 1
|
@@ -3339,14 +3339,14 @@
tem%5D
+)
, %22&%22
-)
%5D)%0A
|
7855d8a4a4c3151f0b3f4da04696322cca92ee06
|
fix tests
|
cla_frontend/apps/cla_auth/tests/urls.py
|
cla_frontend/apps/cla_auth/tests/urls.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import http
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from . import base
@login_required
def test_view(request):
return http.HttpResponse('logged in')
zone_url = patterns('',
url(r'^login/$', 'cla_auth.views.login', {
'zone_name': base.DEFAULT_ZONE_NAME
}, name='login'),
url(r'^test/$',
test_view,
name=base.DEFAULT_ZONE_PROFILE['LOGIN_REDIRECT_URL'].split(':')[1]
)
)
urlpatterns = patterns('',
url(r'^test_zone/', include(zone_url, namespace=base.DEFAULT_ZONE_NAME)),
)
|
Python
| 0.000001
|
@@ -212,16 +212,66 @@
rt base%0A
+from django.core.urlresolvers import reverse_lazy%0A
%0A%0A@login
@@ -603,16 +603,191 @@
%5B1%5D%0A
+ ),%0A)%0A%0Aglobal_urls = patterns(%0A '',%0A url(r'%5Elogout/$', 'django.contrib.auth.views.logout',%0A %7B'next_page': reverse_lazy('login')%7D,%0A name='global_logout'),%0A
)%0A
-)%0A
%0A%0Aur
@@ -803,32 +803,32 @@
= patterns('',%0A
-
url(r'%5Etest_
@@ -889,10 +889,70 @@
NAME)),%0A
+ url(r'%5Eauth/', include(global_urls, namespace='auth')),%0A
)%0A
|
4bc0b7981f6eaa1744c90d0c080b9678af52d624
|
fix bug in picture specs
|
apps/project_sheet/project_pictures_specs.py
|
apps/project_sheet/project_pictures_specs.py
|
"""
Specification for image manipulation throw imagekit
"""
from imagekit.specs import ImageSpec
from imagekit import processors
from imagekit.processors import ImageProcessor
from imagekit.lib import ImageColor
class Center(ImageProcessor):
"""
Generic image centering processor
"""
width = None
height = None
background_color = '#000000'
@classmethod
def process(cls, img, fmt, obj):
if cls.width and cls.height:
background_color = ImageColor.getrgb(cls.background_color)
#FIXME : Image is not imported but it never raises exception so ...
bg_picture = Image.new("RGB", (cls.width, cls.height), background_color)
## paste it
bg_w, bg_h = bg_picture.size
img_w, img_h = img.size
coord_x, coord_y = (bg_w - img_w) / 2, (bg_h - img_h) / 2
bg_picture.paste(img, (coord_x, coord_y, coord_x + img_w, coord_y + img_h))
return bg_picture, fmt
class ResizeThumb(processors.Resize):
"""
Resizing processor providing media thumbnail
"""
width = 95
height = 65
crop = True
class ResizeIDCard(processors.Resize):
"""
Resizing processor providing profile ID card
"""
width = 137
height = 71
crop = True
class ResizeDisplay(processors.Resize):
"""
Resizing processor for media gallery
"""
width = 700
class PreResizeMosaic(processors.Resize):
"""
Resizing processor for mosaic
"""
width = 200
class CenterMosaic(processors.Resize):
#FIXME : semantic ? Center or Resize ?
width = 40
height = 40
crop = True
class CenterDisplay(Center):
"""
Image centering processor for media gallery
"""
width = 700
height = 460
class EnhanceThumb(processors.Adjustment):
"""
Adjustment processor to enhance the image at small sizes
"""
contrast = 1.2
sharpness = 1.1
class Thumbnail(ImageSpec):
access_as = 'thumbnail_image'
pre_cache = True
processors = [ResizeThumb, EnhanceThumb]
class Display(ImageSpec):
access_as = 'display'
increment_count = True
processors = [ResizeDisplay, CenterDisplay]
class MosaicTile(ImageSpec):
"""
For the Homepage
"""
access_as = 'mosaic_tile'
processors = [PreResizeMosaic, CenterMosaic]
class IDCard(ImageSpec):
"""
Preview when displaying a project sheet card
"""
access_as = 'thumbnail_idcard'
pre_cache = True
processors = [ResizeIDCard, EnhanceThumb]
|
Python
| 0
|
@@ -204,16 +204,24 @@
ageColor
+, Image%0A
%0A%0Aclass
|
354eea19773b652e705f68648c68c235bfa27dd7
|
Fix weird naming
|
twisted/plugins/nanoplay_plugin.py
|
twisted/plugins/nanoplay_plugin.py
|
from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.internet import reactor
from twisted.application import service, strports
from nanoplay import PayloadProtocol, ControlProtocol, CustomServer, Player
class Options(usage.Options):
optParameters = [
["payload", "p",
"tcp:port=5000", "Endpoint to listen for files on"],
["control", "c",
"tcp:port=5001", "Endpoint to listen for control commands on"]
]
class NanoplayMaker(object):
implements(service.IServiceMaker, IPlugin)
tapname = "nanoplay"
description = "nanoplay, trivial music player"
options = Options
def makeService(self, options):
"""
Construct a TCPServer from a factory defined in myproject.
"""
player = Player(reactor)
reactor.addSystemEventTrigger("before", "shutdown", player.kill)
s = service.MultiService()
payload_service = strports.service(options["payload"],
CustomServer(PayloadProtocol, player))
payload_service.setServiceParent(s)
payload_service = strports.service(options["control"],
CustomServer(ControlProtocol, player))
payload_service.setServiceParent(s)
return s
serviceMaker = NanoplayMaker()
|
Python
| 0.030363
|
@@ -1177,39 +1177,39 @@
rent(s)%0A
-payload
+control
_service = strpo
@@ -1322,39 +1322,39 @@
layer))%0A
-payload
+control
_service.setServ
|
e5603d855e22e27181890a972a329fd40a5f1989
|
Structure wasn't being written by run_gamma_calculations.
|
twod_materials/friction/startup.py
|
twod_materials/friction/startup.py
|
import os
import math
import numpy as np
from monty.serialization import loadfn
import twod_materials.utils as utl
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar
import twod_materials
from twod_materials.stability.startup import get_magmom_string
PACKAGE_PATH = twod_materials.__file__.replace('__init__.pyc', '')
PACKAGE_PATH = PACKAGE_PATH.replace('__init__.py', '')
KERNEL_PATH = os.path.join(PACKAGE_PATH, 'vdw_kernel.bindat')
if '/ufrc/' in os.getcwd():
HIPERGATOR = 2
elif '/scratch/' in os.getcwd():
HIPERGATOR = 1
VASP = loadfn(os.path.join(os.path.expanduser('~'),
'config.yaml'))['normal_binary']
def run_gamma_calculations(submit=True):
"""
Setup a 2D grid of static energy calculations to plot the Gamma
surface between two layers of the 2D material.
"""
if not os.path.isdir('friction'):
os.mkdir('friction')
os.chdir('friction')
if not os.path.isdir('lateral'):
os.mkdir('lateral')
os.chdir('lateral')
os.system('cp ../../CONTCAR POSCAR')
# Pad the bottom layer with 20 Angstroms of vacuum.
utl.add_vacuum(20 - utl.get_spacing(), 0.8)
structure = Structure.from_file('POSCAR')
n_sites_per_layer = structure.num_sites
n_divs_x = int(math.ceil(structure.lattice.a * 2.5))
n_divs_y = int(math.ceil(structure.lattice.b * 2.5))
# Get the thickness of the material.
max_height = max([site.coords[2] for site in structure.sites])
min_height = min([site.coords[2] for site in structure.sites])
thickness = max_height - min_height
# Make a new layer.
new_sites = []
for site in structure.sites:
new_sites.append((site.specie,
[site.coords[0], site.coords[1],
site.coords[2] + thickness + 3.5]))
for site in new_sites:
structure.append(site[0], site[1], coords_are_cartesian=True)
#structure.get_sorted_structure().to('POSCAR', 'POSCAR')
for x in range(n_divs_x):
for y in range(n_divs_y):
dir = '{}x{}'.format(x, y)
if not os.path.isdir(dir):
os.mkdir(dir)
# Copy input files
os.chdir(dir)
os.system('cp ../../../INCAR .')
os.system('cp ../../../KPOINTS .')
os.system('cp ../POSCAR .')
os.system('cp {} .'.format(KERNEL_PATH))
utl.write_potcar()
incar_dict = Incar.from_file('INCAR').as_dict()
incar_dict.update({'NSW': 0, 'LAECHG': False, 'LCHARG': False,
'LWAVE': False, 'MAGMOM': get_magmom_string()})
incar_dict.pop('NPAR', None)
Incar.from_dict(incar_dict).write_file('INCAR')
# Shift the top layer
poscar_lines = open('POSCAR').readlines()
with open('POSCAR', 'w') as poscar:
for line in poscar_lines[:8 + n_sites_per_layer]:
poscar.write(line)
for line in poscar_lines[8 + n_sites_per_layer:]:
split_line = line.split()
new_coords = [
float(split_line[0]) + float(x)/float(n_divs_x),
float(split_line[1]) + float(y)/float(n_divs_y),
float(split_line[2])]
poscar.write(' '.join([str(i) for i in new_coords])
+ '\n')
if HIPERGATOR == 1:
utl.write_pbs_runjob(dir, 1, 4, '400mb', '1:00:00', VASP)
submission_command = 'qsub runjob'
elif HIPERGATOR == 2:
utl.write_slurm_runjob(dir, 4, '400mb', '1:00:00', VASP)
submission_command = 'sbatch runjob'
if submit:
os.system(submission_command)
os.chdir('../')
os.chdir('../../')
def run_normal_force_calculations(basin_and_saddle_dirs,
spacings=np.arange(1.5, 4.25, 0.25),
submit=True):
"""
Set up and run static calculations of the basin directory
and saddle directory (specified as a tuple) at specified
interlayer spacings (by default, between 1.5 and 4 Angstroms)
to get f_N and f_F.
ex.
run_normal_force_calculations(('0x0', '3x6'))
or
run_normal_force_calculations(get_basin_and_peak_locations())
"""
spacings = [str(spc) for spc in spacings]
os.chdir('friction')
if not os.path.isdir('normal'):
os.mkdir('normal')
os.chdir('normal')
for spacing in spacings:
if not os.path.isdir(spacing):
os.mkdir(spacing)
for subdirectory in basin_and_saddle_dirs:
os.system('cp -r ../lateral/{} {}/'.format(subdirectory, spacing))
os.chdir('{}/{}'.format(spacing, subdirectory))
structure = Structure.from_file('POSCAR')
n_sites = len(structure.sites)
top_layer = structure.sites[n_sites / 2:]
bottom_of_top_layer = min(
[z_coord for z_coord in [site.coords[2] for site in top_layer]])
remove_indices = range(n_sites / 2, n_sites)
structure.remove_sites(remove_indices)
max_height = max([site.coords[2] for site in structure.sites])
for site in top_layer:
structure.append(
site.specie,
[site.coords[0],
site.coords[1],
site.coords[2] - bottom_of_top_layer
+ max_height + float(spacing)],
coords_are_cartesian=True
)
structure.to('POSCAR', 'POSCAR')
if HIPERGATOR == 1:
utl.write_pbs_runjob('{}_{}'.format(subdirectory, spacing), 1,
4, '400mb', '1:00:00', VASP)
submission_command = 'qsub runjob'
elif HIPERGATOR == 2:
utl.write_slurm_runjob('{}_{}'.format(subdirectory, spacing), 4,
'400mb', '1:00:00', VASP)
submission_command = 'sbatch runjob'
if submit:
os.system(submission_command)
os.chdir('../../')
os.chdir('../../')
|
Python
| 0
|
@@ -2008,24 +2008,61 @@
', 'POSCAR')
+%0A structure.to('POSCAR', 'POSCAR')
%0A%0A for x
|
25f0bd4064e527006b492a2242586c8025a2cd9d
|
Fix bug in normalization
|
athenet/algorithm/derest/layers/inception.py
|
athenet/algorithm/derest/layers/inception.py
|
from athenet.algorithm.derest.layers import DerestSoftmaxLayer,\
DerestReluLayer, DerestPoolLayer, DerestNormLayer, DerestLayer, \
DerestFullyConnectedLayer, DerestConvolutionalLayer, DerestDropoutLayer
from athenet.layers import Softmax, ReLU, PoolingLayer, LRN, \
ConvolutionalLayer, Dropout, FullyConnectedLayer, InceptionLayer
from athenet.algorithm.derest.utils import add_tuples, change_order
def get_derest_layer(layer, normalize=False):
"""
Return derest layer on which we can count activations, derivatives
and derest algorithm
:param Layer layer: network's original layer
:return DerestLayer: new better derest layer
"""
if isinstance(layer, Softmax):
return DerestSoftmaxLayer(layer)
if isinstance(layer, ReLU):
return DerestReluLayer(layer)
if isinstance(layer, PoolingLayer):
return DerestPoolLayer(layer)
if isinstance(layer, LRN):
return DerestNormLayer(layer)
if isinstance(layer, ConvolutionalLayer):
return DerestConvolutionalLayer(layer)
if isinstance(layer, Dropout):
return DerestDropoutLayer(layer)
if isinstance(layer, FullyConnectedLayer):
return DerestFullyConnectedLayer(layer)
if isinstance(layer, InceptionLayer):
return DerestInceptionLayer(layer)
raise NotImplementedError
class DerestInceptionLayer(DerestLayer):
def __init__(self, layer):
super(DerestInceptionLayer, self).__init__(layer)
self.derest_layer_lists = []
for layer_list in self.layer.layer_lists:
derest_layer_list = []
for l in layer_list:
derest_layer_list.append(get_derest_layer(l))
self.derest_layer_lists.append(derest_layer_list)
@staticmethod
def _normalize(data):
a = data.max(-data).amax()
return data / a
def count_activation(self, input, normalize=False):
results = None
for derest_layer_list in self.derest_layer_lists:
inp = input
for derest_layer in derest_layer_list:
if self.normalize_activations:
inp = self._normalize(inp)
derest_layer.activations = inp
inp = derest_layer.count_activation(inp, normalize)
if results is None:
results = inp
else:
results = results.concat(inp)
return results
def count_derivatives(self, output, input_shape, normalize=False):
output_list = []
last = 0
for layer in self.layer.top_layers:
channels = layer.output_shape[2]
output_list.append(output[:, last : (last + channels), ::])
last += channels
batches = input_shape[0]
result = None
for output, derest_list in zip(output_list, self.derest_layer_lists):
out = output
for derest_layer in reversed(derest_list):
if normalize:
out = self._normalize(out)
derest_layer.derivatives = out
local_input_shape = add_tuples(
batches, change_order(derest_layer.layer.input_shape))
out = derest_layer.count_derivatives(out, local_input_shape)
if result is None:
result = out
else:
result += out
return result
def count_derest(self, f):
results = []
for derest_layer_list in self.derest_layer_lists:
for derest_layer in derest_layer_list:
results.extend(derest_layer.count_derest(f))
return results
|
Python
| 0.000002
|
@@ -2088,21 +2088,16 @@
if
-self.
normaliz
@@ -2101,20 +2101,8 @@
lize
-_activations
:%0A
|
7f314bd4ee1602fc7ae05334ba6acc01bb2e214c
|
Handle case when there are no wip/ branches.
|
utils/update_tools/update_tools.py
|
utils/update_tools/update_tools.py
|
#!/usr/bin/env python3
"""
Semi-automated python script to maintain WIP master branches forked
from third-party tools.
The user needs to provide a location of the git project that needs to be
updated. In case the directory does not exist, the user needs to provide
the URL of the git repository.
This script takes into account all the branches marked under the `wip/`
namespace, rebases them on top of the master branch, and performs an
Octopus Merge on the master+wip-next branch.
In case of conflicts between different branches, the user is given access
to the shell, from which he can solve all the issues.
Once all the conflicting files are fixed, the script automatically performs
the last steps of the conflict solving.
The user can also choose to let the script to push force on the master+wip-next
branch.
"""
import os
import subprocess
import argparse
import git
def yes_or_no_input():
# raw_input returns the empty string for "enter"
yes = {'yes', 'y', 'ye', ''}
no = {'no', 'n'}
choice = input().lower()
if choice in yes:
return True
elif choice in no:
return False
def solve_conflicts(g, branch=""):
need_fix = set(g.diff("--name-only").split("\n"))
help_msg = """
CONFLICT {}
Entered in conflict-fixing mode, a shell will be spawned.
Solve conflicts in the following files:
""".format(branch)
for f in need_fix:
help_msg += "{}/{}\n".format(g.working_dir, f)
help_msg += """
After having fixed all the conflicts exit the spawned shell the following command
$ exit
"""
print(help_msg)
os.system('/bin/bash')
g.add(".")
def merge_branches(location, branches):
"""
Merge one or more branches into the current branch.
The branches have to be in string format
Returns:
- True: if merge was successful
- False: if merge was unsuccessful
"""
try:
subprocess.check_call(
"cd {} && git merge {} && cd -".format(location, branches),
shell=True
)
except subprocess.CalledProcessError:
print("Something went wrong during the merge!")
return False
pass
return True
def rebase_continue_rec(g):
try:
g.rebase("--continue")
except Exception:
solve_conflicts(g)
rebase_continue_rec(g)
def rebase_branch(g, branch):
g.checkout(branch)
try:
g.rebase('master')
except Exception:
solve_conflicts(g, branch)
rebase_continue_rec(g)
def revert_to_master(g, repo, remote):
""" Preserving history, revert {remote}/master+wip to {remote}/master
"""
g.reset(['--hard', '{}/master'.format(remote)])
g.reset(['{}/master+wip'.format(remote)])
g.add(['.'])
g.commit(
[
'-sm', 'Revert master+wip to master ({})'.format(
repo.commit('{}/master'.format(remote)).hexsha
)
]
)
# This removes files that got left behind during the reset's above.
# Without this, some stray files may cause git to error when attempting
# the merge.
g.clean(['-fx'])
g.merge(['master'])
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--location',
required=True,
help="Absolute path to the git repository."
)
parser.add_argument('--url', help="Optional url of the repository.")
parser.add_argument(
'--remote', help="Optional remote repository to use instead of origin"
)
parser.add_argument(
'--branch_name',
required=True,
help="Name of branch to push to github with new master+wip"
)
args = parser.parse_args()
location = args.location
url = args.url
remote = args.remote
assert os.path.exists(
location
) or url, "The git repository does not exists, and no URL has been provided"
if not os.path.exists(location):
git.repo.base.Repo.clone_from(url, location)
if not remote:
remote = 'origin'
repo = git.Repo("{}/.git".format(location))
g = git.cmd.Git(location)
g.fetch("-p")
all_branches = g.branch("-r")
# Remove spaces and special characters from branches
for string in [' ', '*']:
all_branches = all_branches.replace(string, '')
all_branches = all_branches.split('\n')
# Consider only branches in `origin`
origin_branches = []
for branch in all_branches:
if "HEAD" not in branch and "{}/".format(remote) in branch:
origin_branches.append(branch.replace('{}/'.format(remote), ''))
# Create new integration point on master.
g.checkout(['master'])
g.commit(['--allow-empty', '-sm', 'New integration point for master+wip.'])
branches = []
for branch in origin_branches:
if branch.startswith("wip/"):
print("Updating branch: ", branch)
rebase_branch(g, branch)
branches.append(branch)
assert args.branch_name not in ['master+wip', 'master'], \
('Branch name "{}" should not be "master+wip" nor "master"')
try:
g.checkout(['-b', args.branch_name])
except Exception:
print("Branch {} already exists!".format(args.branch_name))
g.checkout(args.branch_name)
revert_to_master(g, repo, remote)
branches_string = ' '.join(branches)
result = merge_branches(location, branches_string)
if not result:
revert_to_master(g, repo, remote)
for branch in branches:
result = merge_branches(location, branch)
if not result:
solve_conflicts(g, branch)
g.commit(
"-sm \"Sequential merge of conflicting branch {}\"".
format(branch)
)
else:
repo.index.commit(
"""Octopus merge
This is an Octopus Merge commit of the following branches:
{branches}
Signed-off-by: {user} <{email}>
""".format(
branches='\n'.join(branches),
user=repo.config_reader().get_value('user', 'name'),
email=repo.config_reader().get_value('user', 'email')
),
)
# Pushing to remote
print(
"Push on remote {} {} branch? [Y/n]".format(remote, args.branch_name)
)
if yes_or_no_input():
g.push(['{}'.format(remote), args.branch_name])
else:
print("Warning: did not push to {}".format(remote))
print("Octopus merge ready to be tested!")
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -5292,24 +5292,45 @@
o, remote)%0A%0A
+ if branches:%0A
branches
@@ -5358,16 +5358,20 @@
anches)%0A
+
resu
@@ -5416,16 +5416,48 @@
_string)
+%0A else:%0A result = True
%0A%0A if
|
411c66bbdb9e9f270a3aff6ec2f6cdd53808a47a
|
Fix #1091 All contacts have the same email on add AR Contact Combos If an object has a method that overrides a field from the Schema, get priority to the value returned by the method over the value returned by the attribute from the schema. i.e Contact object (which inherits from object Person) has the field EmailAddress from the schema and also the method getEmailAddress(). The values returned by them can be different (in the case of a Contact, the schema field returns the email of the client and the method returns the contact email). Use the last one as the preferred value.
|
bika/lims/browser/widgets/referencewidget.py
|
bika/lims/browser/widgets/referencewidget.py
|
from AccessControl import ClassSecurityInfo
from bika.lims import bikaMessageFactory as _
from bika.lims.browser import BrowserView
from bika.lims.interfaces import IReferenceWidgetVocabulary
from bika.lims.permissions import *
from bika.lims.utils import to_unicode as _u
from bika.lims.utils import to_utf8 as _c
from operator import itemgetter
from Products.Archetypes.Registry import registerWidget
from Products.Archetypes.Widget import StringWidget
from Products.CMFCore.utils import getToolByName
from zope.component import getAdapters
import json
import plone
class ReferenceWidget(StringWidget):
_properties = StringWidget._properties.copy()
_properties.update({
'macro': "bika_widgets/referencewidget",
'helper_js': ("bika_widgets/referencewidget.js",),
'helper_css': ("bika_widgets/referencewidget.css",),
'url': 'referencewidget_search',
'catalog_name': 'portal_catalog',
# base_query can be a dict or a callable returning a dict
'base_query': {},
# columnName must contain valid index names
'colModel': [
{'columnName': 'Title', 'width': '30', 'label': _(
'Title'), 'align': 'left'},
{'columnName': 'Description', 'width': '70', 'label': _(
'Description'), 'align': 'left'},
# UID is required in colModel
{'columnName': 'UID', 'hidden': True},
],
# Default field to put back into input elements
'ui_item': 'Title',
'search_fields': ('Title',),
'discard_empty': [],
'popup_width': '550px',
'showOn': 'false',
'sord': 'asc',
'sidx': 'Title',
'force_all': 'true',
'portal_types': {}
})
security = ClassSecurityInfo()
security.declarePublic('process_form')
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=False):
"""Return a UID so that ReferenceField understands.
"""
fieldName = field.getName()
if fieldName + "_uid" in form:
uid = form.get(fieldName + "_uid", '')
elif fieldName in form:
uid = form.get(fieldName, '')
else:
uid = None
return uid, {}
def get_combogrid_options(self, context, fieldName):
colModel = self.colModel
if 'UID' not in [x['columnName'] for x in colModel]:
colModel.append({'columnName': 'UID', 'hidden': True})
options = {
'url': self.url,
'colModel': colModel,
'showOn': self.showOn,
'width': self.popup_width,
'sord': self.sord,
'sidx': self.sidx,
'force_all': self.force_all,
'search_fields': self.search_fields,
'discard_empty': self.discard_empty,
}
return json.dumps(options)
def get_base_query(self, context, fieldName):
base_query = self.base_query
if callable(base_query):
base_query = base_query()
if base_query and isinstance(base_query, basestring):
base_query = json.loads(base_query)
# portal_type: use field allowed types
field = context.Schema().getField(fieldName)
allowed_types = getattr(field, 'allowed_types', None)
allowed_types_method = getattr(field, 'allowed_types_method', None)
if allowed_types_method:
meth = getattr(content_instance, allowed_types_method)
allowed_types = meth(field)
# If field has no allowed_types defined, use widget's portal_type prop
base_query['portal_type'] = allowed_types \
if allowed_types \
else self.portal_types
return json.dumps(self.base_query)
registerWidget(ReferenceWidget, title='Reference Widget')
class ajaxReferenceWidgetSearch(BrowserView):
""" Source for jquery combo dropdown box
"""
def __call__(self):
plone.protect.CheckAuthenticator(self.request)
page = self.request['page']
nr_rows = self.request['rows']
sord = self.request['sord']
sidx = self.request['sidx']
colModel = json.loads(_u(self.request.get('colModel', '[]')))
discard_empty = json.loads(_c(self.request.get('discard_empty', "[]")))
rows = []
brains = []
for name, adapter in getAdapters((self.context, self.request), IReferenceWidgetVocabulary):
brains.extend(adapter())
for p in brains:
row = {'UID': getattr(p, 'UID'),
'Title': getattr(p, 'Title')}
other_fields = [x for x in colModel
if x['columnName'] not in row.keys()]
instance = schema = None
discard = False
# This will be faster if the columnNames are catalog indexes
for field in other_fields:
fieldname = field['columnName']
value = getattr(p, fieldname, None)
if not value:
if instance is None:
instance = p.getObject()
schema = instance.Schema()
if fieldname in schema:
value = schema[fieldname].get(instance)
if fieldname in discard_empty and not value:
discard = True
break
# ' ' instead of '' because empty div fields don't render
# correctly in combo results table
row[fieldname] = value and value or ' '
if discard is False:
rows.append(row)
rows = sorted(rows, cmp=lambda x, y: cmp(
str(x).lower(), str(y).lower()),
key=itemgetter(sidx and sidx or 'Title'))
if sord == 'desc':
rows.reverse()
pages = len(rows) / int(nr_rows)
pages += divmod(len(rows), int(nr_rows))[1] and 1 or 0
start = (int(page) - 1) * int(nr_rows)
end = int(page) * int(nr_rows)
ret = {'page': page,
'total': pages,
'records': len(rows),
'rows': rows[start:end]}
return json.dumps(ret)
|
Python
| 0
|
@@ -4954,24 +4954,256 @@
olumnName'%5D%0A
+ # Prioritize method retrieval over field retrieval from schema%0A obj = p.getObject()%0A value = getattr(obj, fieldname, None)%0A if not value or hasattr(value, 'im_self'):%0A
|
b3eb2ef5e65ee18384b3d49981d604cbcf30c400
|
Rename noop setup to 'noop'.
|
buzzmobile/tests/test_utils/rostest_utils.py
|
buzzmobile/tests/test_utils/rostest_utils.py
|
"""A collection of utilities to make testing with ros less painful.
"""
import os
import random
import subprocess
import unittest
import socket
def rand_port():
"""Picks a random port number.
This is potentially unsafe, but shouldn't generally be a problem.
"""
return random.randint(10311, 12311)
class RosTestMeta(type):
def __new__(cls, name, bases, dct):
# It will break unless we throw in fake setup and teardown methods if
# the real ones don't exist yet.
def fake_settear(self):
_ = self
try:
old_setup = dct['setUp']
except KeyError:
old_setup = fake_settear
try:
old_teardown = dct['tearDown']
except KeyError:
old_teardown = fake_settear
def new_setup(self):
"""Wrapper around the user-defined setUp method that runs roscore.
"""
self.port = rand_port()
self.rosmaster_uri = 'http://{}:{}'.format(socket.gethostname(),
self.port)
env = {k:v for k, v in os.environ.iteritems()}
env.update({'ROS_MASTER_URI': self.rosmaster_uri})
try:
#TODO: make this retry on fail, or something similar.
self.roscore = subprocess.Popen(
['roscore', '-p', str(self.port)], env=env)
except:
raise Exception('port not availible')
old_setup(self)
def new_teardown(self):
"""Wrapper around the user-defined tearDown method that ends roscore.
"""
old_teardown(self)
self.roscore.kill()
self.roscore.wait()
dct['setUp'] = new_setup
dct['tearDown'] = new_teardown
dct['setUp'].__name__ = 'setUp'
dct['tearDown'].__name__ = 'tearDown'
return super(RosTestMeta, cls).__new__(cls, name, bases, dct)
class RosTest(unittest.TestCase):
"""A subclass of TestCase that exposes some additional ros-related attrs.
self.port is the port this instance will run on.
"""
__metaclass__ = RosTestMeta
def await(gen):
"""Shim to add await syntax to python2, kinda.
On a high level, this function simply takes the next item from a generator
and passes it along, but it blocks until that item is gotten. When used in
the context
await(TestNode.wait_for_message())
it will wait for the `wait_for_message` call to finish running.
In other words, if await(Node) doesn't raise an error, you should have
successfully recieved a message, and can therefore test against it,
otherwise the message access can raise a NoMessage error.
"""
res = None
for item in gen:
res = item
return res
|
Python
| 0.000007
|
@@ -515,25 +515,14 @@
def
-fake_settear(self
+noop(_
):%0A
@@ -536,16 +536,12 @@
-_ = self
+pass
%0A%0A
@@ -637,28 +637,20 @@
setup =
-fake_settear
+noop
%0A
@@ -754,20 +754,12 @@
n =
-fake_settear
+noop
%0A%0A
|
5eb11aa2a41e2d2448cf81d3ef4416a7aaf3a537
|
change db location to match reinit.sh script
|
calebasse/settings/local_settings_example.py
|
calebasse/settings/local_settings_example.py
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'calebasse.sqlite3',
}
}
|
Python
| 0
|
@@ -99,16 +99,26 @@
alebasse
+/calebasse
.sqlite3
|
717721018eff9897d2488b48aa932fcaa3694615
|
Fix crash
|
chatterbot/adapters/logic/closest_meaning.py
|
chatterbot/adapters/logic/closest_meaning.py
|
from chatterbot.adapters.exceptions import EmptyDatasetException
from .base_match import BaseMatchAdapter
from nltk.corpus import wordnet
from nltk.corpus import stopwords
from nltk import word_tokenize
class ClosestMeaningAdapter(BaseMatchAdapter):
def __init__(self, **kwargs):
super(ClosestMeaningAdapter, self).__init__(**kwargs)
from nltk.data import find
from nltk import download
# Download data if needed
try:
find('wordnet.zip')
except LookupError:
download('wordnet')
try:
find('stopwords.zip')
except LookupError:
download('stopwords')
try:
find('punkt.zip')
except LookupError:
download('punkt')
def get_tokens(self, text, exclude_stop_words=True):
"""
Takes a string and converts it to a tuple
of each word. Skips common stop words such
as ("is, the, a, ...") is 'exclude_stop_words'
is True.
"""
lower = text.lower()
tokens = word_tokenize(lower)
# Remove any stop words from the string
if exclude_stop_words:
excluded_words = stopwords.words("english")
tokens = set(tokens) - set(excluded_words)
return tokens
def get_similarity(self, string1, string2):
"""
Calculate the similarity of two statements.
This is based on the total similarity between
each word in each sentence.
"""
import itertools
tokens1 = self.get_tokens(string1)
tokens2 = self.get_tokens(string2)
total_similarity = 0
# Get the highest matching value for each possible combination of words
for combination in itertools.product(*[tokens1, tokens2]):
synset1 = wordnet.synsets(combination[0])
synset2 = wordnet.synsets(combination[1])
if synset1 and synset2:
# Compare the first synset in each list of synsets
similarity = synset1[0].path_similarity(synset2[0])
if similarity:
total_similarity = total_similarity + similarity
return total_similarity
def get(self, input_statement, statement_list=None):
"""
Takes a statement string and a list of statement strings.
Returns the closest matching statement from the list.
"""
statement_list = self.get_available_statements(statement_list)
if not statement_list:
if self.has_storage_context:
# Use a randomly picked statement
return 0, self.context.storage.get_random()
else:
raise EmptyDatasetException
# Get the text of each statement
text_of_all_statements = []
for statement in statement_list:
text_of_all_statements.append(statement.text)
# Check if an exact match exists
if input_statement.text in text_of_all_statements:
return 1, input_statement
closest_statement = None
closest_similarity = -1
total_similarity = 0
# For each option in the list of options
for statement in text_of_all_statements:
similarity = self.get_similarity(input_statement.text, statement)
total_similarity += similarity
if similarity > closest_similarity:
closest_similarity = similarity
closest_statement = statement
confidence = closest_similarity / total_similarity
return confidence, next(
(s for s in statement_list if s.text == closest_statement), None
)
|
Python
| 0.000011
|
@@ -3517,32 +3517,49 @@
nt = statement%0A%0A
+ try:%0A
confiden
@@ -3592,32 +3592,75 @@
total_similarity
+%0A except:%0A confidence = 0
%0A%0A return
|
55d364500e92e596f3d6e1897dbd4dded2b2410c
|
Move skylib to 0.7.0.
|
apple/repositories.bzl
|
apple/repositories.bzl
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definitions for handling Bazel repositories used by the Apple rules."""
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def _colorize(text, color):
"""Applies ANSI color codes around the given text."""
return "\033[1;{color}m{text}{reset}".format(
color = color,
reset = "\033[0m",
text = text,
)
def _green(text):
return _colorize(text, "32")
def _yellow(text):
return _colorize(text, "33")
def _warn(msg):
"""Outputs a warning message."""
print("\n{prefix} {msg}\n".format(
msg = msg,
prefix = _yellow("WARNING:"),
))
def _maybe(repo_rule, name, ignore_version_differences, **kwargs):
"""Executes the given repository rule if it hasn't been executed already.
Args:
repo_rule: The repository rule to be executed (e.g.,
`native.git_repository`.)
name: The name of the repository to be defined by the rule.
ignore_version_differences: If `True`, warnings about potentially
incompatible versions of depended-upon repositories will be silenced.
**kwargs: Additional arguments passed directly to the repository rule.
"""
if name in native.existing_rules():
if not ignore_version_differences:
# Verify that the repository is being loaded from the same URL and tag
# that we asked for, and warn if they differ.
# TODO(allevato): This isn't perfect, because the user could load from the
# same commit SHA as the tag, or load from an HTTP archive instead of a
# Git repository, but this is a good first step toward validating.
# Long-term, we should extend this function to support dependencies other
# than Git.
existing_repo = native.existing_rule(name)
if (existing_repo.get("remote") != kwargs.get("remote") or
existing_repo.get("tag") != kwargs.get("tag")):
expected = "{url} (tag {tag})".format(
tag = kwargs.get("tag"),
url = kwargs.get("remote"),
)
existing = "{url} (tag {tag})".format(
tag = existing_repo.get("tag"),
url = existing_repo.get("remote"),
)
_warn("""\
`build_bazel_rules_apple` depends on `{repo}` loaded from {expected}, but we \
have detected it already loaded into your workspace from {existing}. You may \
run into compatibility issues. To silence this warning, pass \
`ignore_version_differences = True` to `apple_rules_dependencies()`.
""".format(
existing = _yellow(existing),
expected = _green(expected),
repo = name,
))
return
repo_rule(name = name, **kwargs)
def apple_rules_dependencies(ignore_version_differences = False):
"""Fetches repositories that are dependencies of the `rules_apple` workspace.
Users should call this macro in their `WORKSPACE` to ensure that all of the
dependencies of the Swift rules are downloaded and that they are isolated from
changes to those dependencies.
Args:
ignore_version_differences: If `True`, warnings about potentially
incompatible versions of depended-upon repositories will be silenced.
"""
_maybe(
git_repository,
name = "bazel_skylib",
remote = "https://github.com/bazelbuild/bazel-skylib.git",
tag = "0.6.0",
ignore_version_differences = ignore_version_differences,
)
_maybe(
git_repository,
name = "build_bazel_apple_support",
remote = "https://github.com/bazelbuild/apple_support.git",
tag = "0.4.0",
ignore_version_differences = ignore_version_differences,
)
_maybe(
git_repository,
name = "build_bazel_rules_swift",
remote = "https://github.com/bazelbuild/rules_swift.git",
tag = "0.6.0",
ignore_version_differences = ignore_version_differences,
)
|
Python
| 0.001424
|
@@ -4089,33 +4089,33 @@
tag = %220.
-6
+7
.0%22,%0A ign
|
26d104b5758d41954d0da4a3447cc22c089c1cf0
|
fix migrations
|
cmsplugin_iframe2/migrations/0001_initial.py
|
cmsplugin_iframe2/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-04-01 18:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='IFramePlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cmsplugin_iframe2_iframeplugin', serialize=False, to='cms.CMSPlugin')),
('style', models.CharField(blank=True, choices=[(None, 'no class')], help_text='value of HTML attribute class', max_length=50, null=True, verbose_name='style')),
('width', models.CharField(blank=True, choices=[('200', '200 pixels'), ('400', '400 pixels'), ('800', '800 pixels'), ('100%', '100 %')], max_length=10, null=True, verbose_name='width')),
('height', models.CharField(blank=True, choices=[('150', '150 pixels'), ('300', '300 pixels'), ('600', '600 pixels'), ('1200', '1200 pixels'), ('100%', '100 %')], max_length=10, null=True, verbose_name='height')),
('align', models.CharField(blank=True, choices=[('left', 'align left'), ('right', 'align right')], max_length=10, null=True, verbose_name='align')),
('src', models.TextField(verbose_name='url')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
Python
| 0.000002
|
@@ -181,16 +181,45 @@
letion%0A%0A
+from ..conf import settings%0A%0A
%0Aclass M
@@ -756,28 +756,41 @@
ces=
-%5B(None, 'no class')%5D
+settings.CMSPLUGIN_IFRAME_CLASSES
, he
@@ -947,96 +947,40 @@
ces=
-%5B('200', '200 pixels'), ('400', '400 pixels'), ('800', '800 pixels'), ('100%25', '100 %25')%5D
+settings.CMSPLUGIN_IFRAME_WIDTHS
, ma
@@ -1095,121 +1095,41 @@
ces=
-%5B('150', '150 pixels'), ('300', '300 pixels'), ('600', '600 pixels'), ('1200', '1200 pixels'), ('100%25', '100 %25')%5D
+settings.CMSPLUGIN_IFRAME_HEIGHTS
, ma
|
2ef94518cdc933e619f4809941932631213ab945
|
Should be http not https
|
apps/comments/tests.py
|
apps/comments/tests.py
|
# -*- coding: utf-8 -*-
# Amara, universalsubtitles.org
#
# Copyright (C) 2012 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import datetime
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from videos.models import Video
from apps.auth.models import CustomUser as User
from django.core import mail
from apps.comments.models import Comment
from messages.tasks import send_video_comment_notification, SUBJECT_EMAIL_VIDEO_COMMENTED
class CommentEmailTests(TestCase):
fixtures = ['test.json', 'subtitle_fixtures.json']
def setUp(self):
self.user = User.objects.all()[0]
self.video = Video.objects.all()[0]
self.auth = dict(username='admin', password='admin')
self.logged_user = User.objects.get(username=self.auth['username'])
l = self.video.subtitle_language()
l.language_code = "en"
l.save()
comment = Comment(content_object=self.video)
comment.user = self.logged_user
comment.content = "testme"
comment.submit_date = datetime.datetime.now()
comment.save()
self.comment = comment
def _create_followers(self, video, num_followers):
for x in xrange(0,num_followers):
u = User(username="%s@example.lcom" %x, email="%s@example.com" % x)
u.save()
video.followers.add(u)
def _post_comment_for(self, obj):
self.client.login(**self.auth)
data = {
"content": "hi from tests",
'content_type' : ContentType.objects.get_for_model(obj).pk,
'object_pk':obj.pk
}
response = self.client.post(reverse("comments:post"), data)
self.assertEqual(response.status_code, 200)
return response
def test_universal_urls(self):
from localeurl.utils import universal_url
domain= Site.objects.get_current().domain
vid = self.video.video_id
self.assertEqual("https://%s/videos/%s/info/" % (domain, vid), universal_url("videos:video", kwargs={"video_id":vid}))
self.assertEqual("https://%s/videos/%s/info/" % (domain, vid), universal_url("videos:video", args=(vid,)))
def test_simple_email(self):
num_followers = 5
self._create_followers(self.video, num_followers)
mail.outbox = []
send_video_comment_notification(self.comment.pk)
self.assertEqual(len(mail.outbox), num_followers)
email = mail.outbox[0]
self.assertEqual(email.subject, SUBJECT_EMAIL_VIDEO_COMMENTED % dict(user=self.comment.user.username,
title=self.video.title_display()))
return None
def test_email_content(self):
num_followers = 2
body_dicts = []
from utils import tasks
self._create_followers(self.video, num_followers)
mail.outbox = []
send_video_comment_notification(self.comment.pk)
self.assertEqual(len(mail.outbox), num_followers)
for msg in mail.outbox:
self.assertEqual(msg.subject, u'admin left a comment on the video Hax')
def test_comment_view_for_video(self):
num_followers = 2
self._create_followers(self.video, num_followers)
mail.outbox = []
response = self._post_comment_for(self.video)
followers = set(self.video.notification_list(self.logged_user))
self.assertEqual(len(mail.outbox), len(followers))
email = mail.outbox[0]
self.assertEqual(email.subject, SUBJECT_EMAIL_VIDEO_COMMENTED % dict(user=str(self.comment.user),
title=self.video.title_display()))
def test_comment_view_for_language(self):
num_followers = 2
self._create_followers(self.video, num_followers)
lang = self.video.subtitle_language()
mail.outbox = []
response = self._post_comment_for(lang)
followers = set(self.video.notification_list(self.logged_user))
self.assertEqual(len(mail.outbox), len(followers))
email = mail.outbox[0]
self.assertEqual(email.subject, SUBJECT_EMAIL_VIDEO_COMMENTED % dict(user=self.comment.user.username,
title=self.video.title_display()))
|
Python
| 0.999636
|
@@ -2710,33 +2710,32 @@
ssertEqual(%22http
-s
://%25s/videos/%25s/
@@ -2844,17 +2844,16 @@
al(%22http
-s
://%25s/vi
|
4c96d4c2132575f89abdd3dd41eef5bb27a210e8
|
Fix timezone warnings in factory.py
|
apps/core/factories.py
|
apps/core/factories.py
|
from factory import Faker, Iterator, SubFactory
from factory.django import DjangoModelFactory
from apps.data.factories import EntryFactory, RepositoryFactory
from . import models
class SpeciesFactory(DjangoModelFactory):
name = Faker('word')
reference = SubFactory(EntryFactory)
repository = SubFactory(RepositoryFactory)
description = Faker('text', max_nb_chars=300)
class Meta:
model = 'core.Species'
django_get_or_create = ('name', )
class StrainFactory(DjangoModelFactory):
name = Faker('word')
description = Faker('text', max_nb_chars=300)
species = SubFactory(SpeciesFactory)
reference = SubFactory(EntryFactory)
class Meta:
model = 'core.Strain'
django_get_or_create = ('name', )
class OmicsUnitTypeFactory(DjangoModelFactory):
name = Faker('word')
description = Faker('text', max_nb_chars=300)
class Meta:
model = 'core.OmicsUnitType'
django_get_or_create = ('name', )
class OmicsUnitFactory(DjangoModelFactory):
reference = SubFactory(EntryFactory)
strain = SubFactory(StrainFactory)
type = SubFactory(OmicsUnitTypeFactory)
status = Iterator(s[0] for s in models.OmicsUnit.STATUS_CHOICES)
class Meta:
model = 'core.OmicsUnit'
django_get_or_create = ('reference', 'strain')
class PixelerFactory(DjangoModelFactory):
date_joined = Faker('date_time_this_decade')
email = Faker('email')
first_name = Faker('first_name')
is_active = Faker('pybool')
is_staff = Faker('pybool')
is_superuser = Faker('pybool')
last_login = Faker('date_time_this_decade')
last_name = Faker("last_name")
password = Faker("password")
username = Faker("user_name")
class Meta:
model = 'core.Pixeler'
django_get_or_create = ('username',)
class OmicsAreaFactory(DjangoModelFactory):
description = Faker('text', max_nb_chars=300)
level = Faker('pyint')
lft = Faker('pyint')
name = Faker('word')
rght = Faker('pyint')
tree_id = Faker('pyint')
class Meta:
model = 'core.OmicsArea'
django_get_or_create = ('name',)
class ExperimentFactory(DjangoModelFactory):
omics_area = SubFactory(OmicsAreaFactory)
created_at = Faker('datetime')
description = Faker('text', max_nb_chars=300)
released_at = Faker('datetime')
saved_at = Faker('datetime')
class Meta:
model = 'core.Experiment'
django_get_or_create = ('omics_area', 'created_at')
class AnalysisFactory(DjangoModelFactory):
pixeler = SubFactory(PixelerFactory)
created_at = Faker('date')
description = Faker('text', max_nb_chars=300)
notebook = Faker('file_path', depth=1, category=None, extension=None)
saved_at = Faker('date')
secondary_data = Faker('file_path', depth=1, category=None, extension=None)
class Meta:
model = 'core.Analysis'
django_get_or_create = ('secondary_data', 'pixeler',)
class PixelFactory(DjangoModelFactory):
value = Faker('pyfloat')
quality_score = Faker('pyfloat', left_digits=0)
omics_unit = SubFactory(OmicsUnitFactory)
analysis = SubFactory(AnalysisFactory)
class Meta:
model = 'core.Pixel'
django_get_or_create = ('value', 'omics_unit', 'analysis')
|
Python
| 0.000015
|
@@ -86,16 +86,50 @@
lFactory
+%0Afrom django.utils import timezone
%0A%0Afrom a
@@ -195,23 +195,209 @@
rom
-. import models
+faker import Faker as faker_Faker%0Afrom . import models%0A%0A%0Adef Get_date_with_timezone():%0A TZ = timezone.get_default_timezone()%0A fake = faker_Faker()%0A return fake.date_time_this_decade(tzinfo=TZ)
%0A%0A%0Ac
@@ -1615,37 +1615,31 @@
d =
-Faker('date_time_this_decade'
+Get_date_with_timezone(
)%0A
@@ -1819,37 +1819,31 @@
n =
-Faker('date_time_this_decade'
+Get_date_with_timezone(
)%0A
|
53f5e4cfdf3c841cb3cb87c7a63cc9d2d24d2ae6
|
error when employee res_partner relation it is false
|
hr_employee_catch_partner/models/hr_employee.py
|
hr_employee_catch_partner/models/hr_employee.py
|
# -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, api
class HrEmployee(models.Model):
_inherit = 'hr.employee'
@api.multi
def onchange_user(self, user_id):
user_obj = self.env['res.users']
result = super(HrEmployee, self).onchange_user(user_id)
if user_id:
cond = [('id', '=', user_id)]
user = user_obj.search(cond)
if user.partner_id:
result['value']['address_home_id'] = user.partner_id.id
return result
@api.model
def create(self, vals):
employee = super(HrEmployee, self).create(vals)
if employee.address_home_id:
employee.address_home_id.employee_id = employee.id
return employee
@api.multi
def write(self, vals):
if vals.get('address_home_id', False):
other_employee = self.search(
[('address_home_id', '=', self.address_home_id.id),
('id', '!=', self.id)], limit=1)
self.address_home_id.employee_id = other_employee or False
self.env['res.partner'].browse(
vals['address_home_id']).employee_id = self.id
return super(HrEmployee, self).write(vals)
|
Python
| 0.999991
|
@@ -922,16 +922,57 @@
False):%0A
+ if self.address_home_id:%0A
@@ -1005,16 +1005,20 @@
search(%0A
+
@@ -1094,16 +1094,20 @@
+
('id', '
@@ -1131,16 +1131,20 @@
imit=1)%0A
+
|
322cf2ad0909b021c2f0c740943fe0e3b443d630
|
Update backend
|
app/urlshortener/__init__.py
|
app/urlshortener/__init__.py
|
import redis
from app.urlshortener.name import getNthName
class URLShortener:
def __init__(self):
self.r = redis.StrictRedis(host='localhost', port=6379, db=0)
self.namespace = 'shorturl'
self.ttl = 60*60*24*7*2 # two weeks
def shorten(self, url, name):
existing_url = self.get(name)
if existing_url is None:
self.r.set(self.getRedisKeyForURL(name), url)
self.r.set(self.getRedisKeyForVisitCount(name), 0)
self.resetTTL(name)
return True
elif existing_url == url:
self.resetTTL(name)
return True
else:
return False
def visit(self, name):
url = self.get(name)
if url is not None:
self.r.incr(self.getRedisKeyForVisitCount(name))
self.resetTTL(name)
return url
def getNextName(self, name=None):
if name and not self.exists(name):
return name
while True:
n = self.r.incr(self.getRedisKeyForDefaultNameIndex())
name = getNthName(n)
if not self.exists(name):
return unicode(name)
def resetNameIndex(self):
self.r.set(self.getRedisKeyForDefaultNameIndex(), 0)
def exists(self, name):
url = self.get(name)
return url is not None
def get(self, name):
return self.r.get(self.getRedisKeyForURL(name))
def getVisitCount(self, name):
return self.r.get(self.getRedisKeyForVisitCount(name))
def resetTTL(self, name):
self.r.expire(self.getRedisKeyForURL(name), self.ttl)
self.r.expire(self.getRedisKeyForVisitCount(name), self.ttl)
def getTTL(self, name):
return self.r.ttl(self.getRedisKeyForURL(name))
def getRedisKeyForURL(self, name):
return self.getRedisKey('url', name)
def getRedisKeyForVisitCount(self, name):
return self.getRedisKey('visit-count', name)
def getRedisKeyForDefaultNameIndex(self):
return self.getRedisKey('default-name-index')
def getRedisKey(self, *args):
return ':'.join((self.namespace,) + args)
|
Python
| 0.000001
|
@@ -95,24 +95,37 @@
_init__(self
+, default_ttl
):%0A s
@@ -203,73 +203,189 @@
lf.n
-amespace = 'shorturl'%0A self.ttl = 60*60*24*7*2 # two weeks
+ew_namespace = 'lyli'%0A self.old_namespace = 'shorturl' # This can be removed on 29.11.2014 at 22:50%0A self.namespace = self.new_namespace%0A self.ttl = default_ttl
%0A%0A
@@ -1141,40 +1141,14 @@
elf.
-r.incr(self.getRedisKeyForDefaul
+getNex
tNam
@@ -1155,17 +1155,16 @@
eIndex()
-)
%0A
@@ -1259,24 +1259,141 @@
nicode(name)
+%0A %0A def getNextNameIndex(self):%0A n = self.r.incr(self.getRedisKeyForDefaultNameIndex())%0A return n
%0A%0A def re
@@ -1573,84 +1573,659 @@
-def get(self, name):%0A return self.r.get(self.getRedisKeyForURL(name))
+# move slowly to new name space%0A def get(self, name):%0A url = self.r.get(self.getRedisKeyForURL(name))%0A if url is None:%0A self.namespace = self.old_namespace%0A url = self.r.get(self.getRedisKeyForURL(name))%0A visitcount = self.getVisitCount(name)%0A %0A self.namespace = self.new_namespace%0A # create link in new namespace if it was only in the old one%0A if url is not None:%0A self.r.set(self.getRedisKeyForURL(name), url)%0A self.r.set(self.getRedisKeyForVisitCount(name), visitcount)%0A self.resetTTL(name)%0A return url
%0A%0A
|
d8c60afae9f116bae963d91fe097eb8b7fd1f1a0
|
Fix merge conflicts
|
bluebottle/clients/tests/test_commands.py
|
bluebottle/clients/tests/test_commands.py
|
import mock
from shutil import copyfile
<<<<<<< HEAD
=======
from django.db import connection
>>>>>>> hotfix/speed-up-tests
from django.test import TestCase
from django.test.utils import override_settings
from django.core.management import call_command
from django.conf import settings
from bluebottle.members.models import Member
from bluebottle.categories.models import Category
from bluebottle.projects.models import Project
from bluebottle.tasks.models import Task
from bluebottle.rewards.models import Reward
from bluebottle.test.factory_models.geo import CountryFactory
from bluebottle.wallposts.models import Wallpost
from bluebottle.orders.models import Order
@override_settings(TENANT_APPS=('django_nose',),
TENANT_MODEL='clients.Client',
DATABASE_ROUTERS=('tenant_schemas.routers.TenantSyncRouter', ))
class ManagementCommandArgsTests(TestCase):
def test_new_tenant(self):
from ..management.commands.new_tenant import Command as NewTenantCommand
cmd = NewTenantCommand()
self.assertEqual(len(cmd.option_list), 5)
self.assertEqual(cmd.option_list[0].dest, 'full_name')
self.assertEqual(cmd.option_list[1].dest, 'schema_name')
self.assertEqual(cmd.option_list[2].dest, 'domain_url')
self.assertEqual(cmd.option_list[3].dest, 'client_name')
self.assertEqual(cmd.option_list[4].dest, 'post_command')
@override_settings(TENANT_APPS=('django_nose',),
TENANT_MODEL='clients.Client',
DATABASE_ROUTERS=('tenant_schemas.routers.TenantSyncRouter',))
class ManagementCommandTests(TestCase):
def test_new_tenant(self):
from ..management.commands.new_tenant import Command as NewTenantCommand
cmd = NewTenantCommand()
with mock.patch('bluebottle.clients.management.commands.new_tenant.Command.handle') as handle_mock:
call_command(cmd, full_name='Test Client',
schema_name='test_schema',
domain_url='test.localhost',
client_name='test')
args, kwargs = handle_mock.call_args_list[0]
self.assertEqual(kwargs['full_name'], 'Test Client')
self.assertEqual(kwargs['schema_name'], 'test_schema')
self.assertEqual(kwargs['client_name'], 'test')
self.assertEqual(kwargs['domain_url'], 'test.localhost')
class ManagementCommandNewTenantTests(TestCase):
def test_create_new_tenant(self):
from ..management.commands.new_tenant import Command as NewTenantCommand
connection.set_schema_to_public()
cmd = NewTenantCommand()
store_func = 'bluebottle.clients.management.commands.new_tenant.Command.store_client'
super_func = 'bluebottle.clients.management.commands.new_tenant.Command.create_client_superuser'
with mock.patch(store_func) as store_mock, mock.patch(super_func) as super_mock:
call_command(
cmd,
full_name='New Tenant',
schema_name='new',
domain_url='http://new.localhost:8000',
client_name='new'
)
store_args, store_kwargs = store_mock.call_args_list[0]
super_args, super_kwargs = super_mock.call_args_list[0]
self.assertEqual(store_kwargs['name'], 'New Tenant')
self.assertEqual(store_kwargs['client_name'], 'new')
self.assertEqual(super_args, ('new',))
@override_settings(TENANT_APPS=('django_nose',),
TENANT_MODEL='clients.Client',
DATABASE_ROUTERS=('tenant_schemas.routers.TenantSyncRouter',))
class BulkImportTests(TestCase):
def setUp(self):
from ..management.commands.bulk_import import Command as BulkImportCommand
self.cmd = BulkImportCommand()
super(BulkImportTests, self).setUp()
CountryFactory.create(alpha2_code='NL')
def test_bulk_import_args(self):
json_file = '/tmp/empty.json'
with open(json_file, 'w') as outfile:
outfile.write('{}')
with mock.patch('bluebottle.clients.management.commands.bulk_import.Command.handle') as handle_mock:
call_command(self.cmd, file=json_file, tenant='test')
args, kwargs = handle_mock.call_args_list[0]
self.assertEqual(kwargs['file'], json_file)
self.assertEqual(kwargs['tenant'], 'test')
def test_bulk_import(self):
# setup some test files
test_file_dir = '{}/bluebottle/clients/tests/files/'.format(settings.PROJECT_ROOT)
copyfile('{}test-image.png'.format(test_file_dir), '/tmp/test-image.png')
json_file = '{}bulk_import.json'.format(test_file_dir)
call_command(self.cmd, file=json_file, tenant='test')
# users (includes admin user)
self.assertEqual(Member.objects.count(), 3)
user = Member.objects.get(email='bryan@brown.com')
self.assertFalse(user.is_staff)
self.assertTrue(user.is_active)
self.assertEqual(user.username, 'bryan')
self.assertEqual(user.first_name, 'Bryan')
self.assertEqual(user.last_name, 'Brown')
self.assertEqual(user.primary_language, 'en')
# categories
self.assertEqual(Category.objects.count(), 1)
category = Category.objects.get(slug='awesome-actors')
self.assertEqual(category.title, 'Awesome Actors')
self.assertEqual(category.description,
'Awesome Actors from Around the World')
# projects
self.assertEqual(Project.objects.count(), 1)
project = Project.objects.get(slug='f-x3')
self.assertEqual(project.owner.email, 'bryan@brown.com')
self.assertEqual(project.amount_asked.amount, 10000000.00)
self.assertEqual(project.video_url, 'https://www.youtube.com/watch?v=n1ncordnTMc')
# tasks
self.assertEqual(Task.objects.count(), 1)
task = Task.objects.get(project=project)
self.assertEqual(task.description, 'This movie is not going to be cheap')
self.assertEqual(task.status, 'realized')
# rewards
self.assertEqual(Reward.objects.count(), 1)
reward = Reward.objects.get(project=project)
self.assertEqual(reward.title, 'Front row')
self.assertEqual(reward.limit, 0)
self.assertEqual(reward.amount.amount, 100000.00)
# wallposts
self.assertEqual(Wallpost.objects.count(), 1)
wallpost = Wallpost.objects.filter(object_id=project.id).all()[0]
self.assertEqual(wallpost.author, user)
self.assertEqual(wallpost.text, 'Best movie ever!')
# orders
self.assertEqual(Order.objects.count(), 1)
order = Order.objects.first()
self.assertEqual(order.status, 'success')
self.assertEqual(order.total.amount, 35.00)
self.assertEqual(order.user, user)
self.assertEqual(order.donations.count(), 1)
self.assertEqual(order.donations.first().project, project)
|
Python
| 0.000008
|
@@ -38,29 +38,8 @@
le%0A%0A
-%3C%3C%3C%3C%3C%3C%3C HEAD%0A=======%0A
from
@@ -71,38 +71,8 @@
ion%0A
-%3E%3E%3E%3E%3E%3E%3E hotfix/speed-up-tests%0A
from
|
7ef6132194ccd207c554521209ba3472bf523940
|
Make factories return unicode data
|
common/djangoapps/student/tests/factories.py
|
common/djangoapps/student/tests/factories.py
|
from student.models import (User, UserProfile, Registration,
CourseEnrollmentAllowed, CourseEnrollment)
from django.contrib.auth.models import Group
from datetime import datetime
from factory import DjangoModelFactory, SubFactory, PostGenerationMethodCall, post_generation, Sequence
from uuid import uuid4
class GroupFactory(DjangoModelFactory):
FACTORY_FOR = Group
name = 'staff_MITx/999/Robot_Super_Course'
class UserProfileFactory(DjangoModelFactory):
FACTORY_FOR = UserProfile
user = None
name = 'Robot Test'
level_of_education = None
gender = 'm'
mailing_address = None
goals = 'World domination'
class RegistrationFactory(DjangoModelFactory):
FACTORY_FOR = Registration
user = None
activation_key = uuid4().hex
class UserFactory(DjangoModelFactory):
FACTORY_FOR = User
username = Sequence('robot{0}'.format)
email = Sequence('robot+test+{0}@edx.org'.format)
password = PostGenerationMethodCall('set_password',
'test')
first_name = Sequence('Robot{0}'.format)
last_name = 'Test'
is_staff = False
is_active = True
is_superuser = False
last_login = datetime(2012, 1, 1)
date_joined = datetime(2011, 1, 1)
@post_generation
def profile(obj, create, extracted, **kwargs):
if create:
obj.save()
return UserProfileFactory.create(user=obj, **kwargs)
elif kwargs:
raise Exception("Cannot build a user profile without saving the user")
else:
return None
class AdminFactory(UserFactory):
is_staff = True
class CourseEnrollmentFactory(DjangoModelFactory):
FACTORY_FOR = CourseEnrollment
user = SubFactory(UserFactory)
course_id = 'edX/toy/2012_Fall'
class CourseEnrollmentAllowedFactory(DjangoModelFactory):
FACTORY_FOR = CourseEnrollmentAllowed
email = 'test@edx.org'
course_id = 'edX/test/2012_Fall'
|
Python
| 0.005613
|
@@ -405,16 +405,17 @@
name =
+u
'staff_M
@@ -442,16 +442,16 @@
Course'%0A
-
%0A%0Aclass
@@ -548,16 +548,17 @@
name =
+u
'Robot T
@@ -605,16 +605,17 @@
ender =
+u
'm'%0A
@@ -649,16 +649,17 @@
goals =
+u
'World d
@@ -798,16 +798,32 @@
d4().hex
+.decode('ascii')
%0A%0A%0Aclass
@@ -900,24 +900,25 @@
= Sequence(
+u
'robot%7B0%7D'.f
@@ -945,16 +945,17 @@
equence(
+u
'robot+t
@@ -1109,16 +1109,17 @@
equence(
+u
'Robot%7B0
@@ -1797,16 +1797,16 @@
actory)%0A
-
cour
@@ -1809,24 +1809,25 @@
course_id =
+u
'edX/toy/201
|
0d1518bc9a329a8ccf6ed2559998ab8e65cbcb33
|
Don't assert the response is a JSON response
|
argonauts/testutils.py
|
argonauts/testutils.py
|
import json
import functools
from django.conf import settings
from django.test import Client, TestCase
__all__ = ['JsonTestClient', 'JsonTestCase']
class JsonTestClient(Client):
def _json_request(self, method, url, data=None, *args, **kwargs):
method_func = getattr(super(JsonTestClient, self), method)
if method == 'get':
encode = lambda x: x
else:
encode = json.dumps
if data is not None:
resp = method_func(url, encode(data), content_type='application/json', *args, **kwargs)
else:
resp = method_func(url, content_type='application/json', *args, **kwargs)
assert resp['Content-Type'].startswith('application/json')
charset = resp.charset or settings.DEFAULT_CHARSET
resp.json = json.loads(resp.content.decode(charset))
return resp
def __getattribute__(self, attr):
if attr in ('get', 'post', 'put', 'delete', 'trace', 'head', 'patch', 'options'):
return functools.partial(self._json_request, attr)
else:
return super(JsonTestClient, self).__getattribute__(attr)
class JsonTestCase(TestCase):
client_class = JsonTestClient
|
Python
| 0.99848
|
@@ -663,14 +663,10 @@
-assert
+if
res
@@ -713,18 +713,22 @@
n/json')
-%0A%0A
+:%0A
@@ -782,24 +782,28 @@
SET%0A
+
resp.json =
@@ -843,16 +843,17 @@
arset))%0A
+%0A
|
9be70159a8649e101cde029328fa376a621eaf98
|
Fix output file name to match output directory
|
accre_job_submitter.py
|
accre_job_submitter.py
|
#!/usr/bin/env python3
import os
import subprocess
from uuid import uuid4
ACCRE_JOB_TEMPLATE = """\
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1
#SBATCH --mem=1G
#SBATCH --time=0-00:30:00
#SBATCH --job-name=PCreo_Sphere
module load Anaconda3
PARAM_S=<|PARAM_S|>
PARAM_D=<|PARAM_D|>
PARAM_N=<|PARAM_N|>
echo "Creating output directory..."
mkdir /gpfs23/scratch/zhangdk/pcreo_runs/output_data_${SLURM_JOBID}
if [ $? -ne 0 ]
then
echo "Could not create output directory. Exiting."
exit 101
fi
cd /gpfs23/scratch/zhangdk/pcreo_runs/output_data_${SLURM_JOBID}
if [ $? -ne 0 ]
then
echo "Could not move to output directory. Exiting."
exit 102
fi
echo "Compiling PCreo_Sphere for initial run..."
/home/zhangdk/pcreo/compile.py ./pcreo_sphere_exe /home/zhangdk/pcreo/src/pcreo_sphere.f90 \
PCREO_DOUBLE_PREC PCREO_SYMMETRY PCREO_BFGS \
PCREO_PARAM_S=${PARAM_S}_rk PCREO_PARAM_D=${PARAM_D} PCREO_PARAM_N=${PARAM_N}
if [ $? -ne 0 ]
then
echo "PCreo_Sphere compilation failed. Exiting."
exit 103
fi
echo "Performing initial run..."
./pcreo_sphere_exe
rm ./pcreo_sphere_exe
/home/zhangdk/pcreo/out2in.py
echo "Compiling PCreo_Sphere for extension run..."
/home/zhangdk/pcreo/compile.py ./pcreo_sphere_exe /home/zhangdk/pcreo/src/pcreo_sphere.f90 \
PCREO_QUAD_PREC PCREO_SYMMETRY PCREO_BFGS \
PCREO_PARAM_S=${PARAM_S}_rk PCREO_PARAM_D=${PARAM_D} PCREO_PARAM_N=${PARAM_N}
if [ $? -ne 0 ]
then
echo "PCreo_Sphere compilation failed. Exiting."
exit 104
fi
echo "Performing extension run..."
./pcreo_sphere_exe
/home/zhangdk/pcreo/out2in.py
echo "Performing redundant extension run..."
./pcreo_sphere_exe
rm ./pcreo_sphere_exe
/home/zhangdk/pcreo/out2in.py
echo "Compiling PCreo_Sphere for cleanup run..."
/home/zhangdk/pcreo/compile.py ./pcreo_sphere_exe /home/zhangdk/pcreo/src/pcreo_sphere.f90 \
PCREO_QUAD_PREC PCREO_SYMMETRY PCREO_GRAD_DESC \
PCREO_PARAM_S=${PARAM_S}_rk PCREO_PARAM_D=${PARAM_D} PCREO_PARAM_N=${PARAM_N}
if [ $? -ne 0 ]
then
echo "PCreo_Sphere compilation failed. Exiting."
exit 105
fi
echo "Performing cleanup run..."
./pcreo_sphere_exe
/home/zhangdk/pcreo/out2in.py
echo "Performing redundant cleanup run..."
./pcreo_sphere_exe
rm ./pcreo_sphere_exe
/home/zhangdk/pcreo/out2in.py
echo "PCreo job successfully completed. Exiting."
"""
def submit_job(s, d, n, k=1):
s = float(s)
d = int(d)
n = int(n)
script_name = str(uuid4()) + '.sh'
with open(script_name, 'w+') as script_file:
script_file.write(ACCRE_JOB_TEMPLATE.replace(
'<|PARAM_S|>', str(s)).replace(
'<|PARAM_D|>', str(d)).replace(
'<|PARAM_N|>', str(n)))
if k == 1:
subprocess.run(['sbatch', script_name])
else:
subprocess.run(['sbatch', '--array=1-' + str(k), script_name])
os.remove(script_name)
for n in range(1, 51):
submit_job(1.0, 2, n)
|
Python
| 0.000002
|
@@ -210,11 +210,11 @@
=0-0
-0:3
+1:0
0:00
@@ -245,16 +245,46 @@
o_Sphere
+%0A#SBATCH --output=slurm-%25j.out
%0A%0Amodule
|
20bbacc8683512ba877654e810a0ce65876804c7
|
Use keyword to construct the help message.
|
appointments/handlers/new.py
|
appointments/handlers/new.py
|
from __future__ import unicode_literals
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from .base import AppointmentHandler
from ..forms import NewMessageForm
from ..models import Timeline, TimelineSubscription, now
class NewHandler(AppointmentHandler):
"Subscribes a user to a timeline."
keyword = 'new'
def help(self):
"Return help mesage."
help_text =_('To add a user a timeline send: %(prefix)s NEW <KEY> <NAME/ID> <DATE>. '
'The date is optional.') % {'prefix': self.prefix}
self.respond(help_text)
def parse_message(self, text):
"Tokenize message text."
result = {}
tokens = text.strip().split()
result['keyword'] = tokens.pop(0)
if tokens:
# Next token is the name/id
result['name'] = tokens.pop(0)
if tokens:
# Remaining tokens should be a date string
result['date'] = ' '.join(tokens)
return result
def handle(self, text):
parsed = self.parse_message(text)
form = NewMessageForm(data=parsed)
if form.is_valid():
result = {}
timeline = form.cleaned_data['timeline']
name = form.cleaned_data['name']
result['timeline'] = timeline
result['pin'] = name
result['connection'] = self.msg.connection
start = form.cleaned_data.get('date', None) or None
if start is not None:
result['start'] = start
else:
result['start'] = now()
# Check if already subscribed
previous = TimelineSubscription.objects.filter(
Q(Q(end__isnull=True) | Q(end__gte=now())),
timeline=timeline, connection=self.msg.connection, pin=name
)
msg_data = {
'user': ' %s' % self.msg.contact.name if self.msg.contact else '',
'date': result['start'].isoformat(),
'name': name,
'timeline': timeline.name,
}
if previous.exists():
# Already joined
response = _('Sorry, you previously registered a %(timeline)s for '
'%(name)s. You will be notified when '
'it is time for the next appointment.')
self.respond(response % msg_data)
else:
# FIXME: There is a small race condition here that we could
# create two subscriptions in parallel
TimelineSubscription.objects.create(**result)
response = _('Thank you%(user)s! You registered a %(timeline)s for '
'%(name)s on %(date)s. You will be notified when '
'it is time for the next appointment.')
self.respond(response % msg_data)
else:
# Respond with error message
if 'keyword' in form.errors:
# Invalid keyword
self.respond(_('Sorry, we could not find any appointments for '
'the keyword: %(keyword)s') % parsed)
elif 'name' in form.errors:
# Name is missing
self.respond(_('Sorry, you must include a name or id for your '
'appointments subscription.'))
elif 'date'in form.errors:
# Invalid date format
self.respond(_('Sorry, we cannot understand that date format. '
'For the best results please use the ISO YYYY-MM-DD format.'))
else:
# Non-field error
self.respond(_('Sorry, we cannot understand that message. '
'For additional help send: %(prefix)s NEW') % {'prefix': self.prefix})
return True
|
Python
| 0.000002
|
@@ -402,16 +402,69 @@
esage.%22%0A
+ keyword = self.keyword.split('%7C')%5B0%5D.upper()%0A
@@ -519,19 +519,27 @@
refix)s
-NEW
+%25(keyword)s
%3CKEY%3E %3C
@@ -618,16 +618,36 @@
f.prefix
+, 'keyword': keyword
%7D%0A
@@ -1125,32 +1125,106 @@
le(self, text):%0A
+ %22Register user with a given timeline based on the keyword match.%22%0A
parsed =
|
8b964d306f5c81ec0f2a3a17d3f03fb8adaec35d
|
Make checker mode a QObject
|
pcef/core/modes/checker.py
|
pcef/core/modes/checker.py
|
"""
This module contains the checker mode, a base class for code checker modes.
"""
from pcef.core.mode import Mode
from pcef.core.system import DelayJobRunner
from pcef.core.panels.marker import Marker
from pcef.core.decoration import TextDecoration
from pcef.qt import QtCore, QtGui
#: Status value for an information message
MSG_STATUS_INFO = 0
#: Status value for a warning message
MSG_STATUS_WARNING = 1
#: Status value for an error message
MSG_STATUS_ERROR = 2
class Message(object):
"""
A message associates a description with a status and few other information
such as line and column number, custom icon (to override the status icon).
A message will be displayed in the editor's marker panel and/or as a
TextDecoration (if status is error or warning).
"""
ICONS = {MSG_STATUS_INFO: ("dialog-info",
":/pcef-icons/rc/dialog-info.png"),
MSG_STATUS_WARNING: ("dialog-warning",
":/pcef-icons/rc/dialog-warning.png"),
MSG_STATUS_ERROR: ("dialog-error",
":/pcef-icons/rc/dialog-error.png")}
COLORS = {MSG_STATUS_INFO: "#4040DD",
MSG_STATUS_WARNING: "#DDDD40",
MSG_STATUS_ERROR: "#DD4040"}
def __init__(self, description, status, line, col=None, icon=None,
color=None):
"""
:param description: The message description (used as a tooltip)
:param status:
:param line:
:param col:
:param icon:
:param color:
"""
# QtCore.QObject.__init__(self)
assert 0 <= status <= 2
self.description = description
self.status = status
self.line = line
self.col = col
self.color = color
if self.color is None:
self.color = self.COLORS[status]
self.icon = icon
if self.icon is None:
self.icon = self.ICONS[status]
self._marker = None
self._decoration = None
class Checker(Mode):
"""
This mode is an abstract base class for code checker modes.
The checker will run an analysis job (in a background thread when the
editor's text changed and will take care of displaying messages emitted by
the addMessageRequested.
To create a concrete checker you must override the run method and use the
addMessageRequested signal to add messages to the ui from the background
thread.
The run method will receive a clone of the editor's text document and the
current file path.
"""
addMessageRequested = QtCore.Signal(Message)
clearMessagesRequested = QtCore.Signal()
def __init__(self, clearOnRequest=True):
Mode.__init__(self)
self.__jobRunner = DelayJobRunner(self, nbThreadsMax=2, delay=1200)
self.__messages = []
self.__mutex = QtCore.QMutex()
self.__clearOnRequest = clearOnRequest
def addMessage(self, message):
"""
Adds a message.
.. warning: Do not use this method from the run method, use
addMessageRequested signal instead.
:param message: Message to add
"""
self.__messages.append(message)
if message.line:
message._marker = Marker(message.line, message.icon,
message.description)
self.editor.markerPanel.addMarker(message._marker)
message._decoration = TextDecoration(self.editor.textCursor(),
startLine=message.line,
tooltip=message.description,
draw_order=3)
message._decoration.setFullWidth(True)
message._decoration.setError(color=QtGui.QColor(message.color))
self.editor.addDecoration(message._decoration)
def removeMessage(self, message):
"""
Remove the message
:param message: Message to remove
"""
self.__messages.remove(message)
if message._marker:
self.editor.markerPanel.removeMarker(message._marker)
if message._decoration:
self.editor.removeDecoration(message._decoration)
def clearMessages(self):
"""
Clears all messages.
.. warning: Do not use this method from the run method, use
clearMessagesRequested signal instead.
"""
while len(self.__messages):
self.removeMessage(self.__messages[0])
def onStateChanged(self, state):
if state:
self.editor.textChanged.connect(self.requestAnalysis)
self.addMessageRequested.connect(self.addMessage)
self.clearMessagesRequested.connect(self.clearMessages)
else:
self.editor.textChanged.disconnect(self.requestAnalysis)
self.addMessageRequested.disconnect(self.addMessage)
self.clearMessagesRequested.disconnect(self.clearMessages)
def run(self, document, filePath):
"""
Abstract method that is ran from a background thread. Override this
method to implement a concrete checker.
:param document: Clone of the QTextDocument (thread safe)
:param filePath: The current file path.
"""
raise NotImplementedError()
def requestAnalysis(self):
""" Request an analysis job. """
if self.__clearOnRequest:
self.clearMessages()
self.__jobRunner.requestJob(self.run, True,
self.editor.document().clone(),
self.editor.filePath)
if __name__ == "__main__":
import sys
import random
from pcef.core import QGenericCodeEdit, MarkerPanel
try:
import faulthandler
faulthandler.enable()
except ImportError:
pass
class FancyChecker(Checker):
"""
Example checker. Clear messages and add a message of each status on a
randome line.
"""
IDENTIFIER = "fancyChecker"
DESCRIPTION = "An example checker, does not actually do anything usefull"
def run(self, document, filePath):
self.clearMessagesRequested.emit()
msg = Message("A fancy info message", MSG_STATUS_INFO,
random.randint(1, self.editor.lineCount()))
self.addMessageRequested.emit(msg)
msg = Message("A fancy warning message", MSG_STATUS_WARNING,
random.randint(1, self.editor.lineCount()))
self.addMessageRequested.emit(msg)
msg = Message("A fancy error message", MSG_STATUS_ERROR,
random.randint(1, self.editor.lineCount()))
self.addMessageRequested.emit(msg)
def main():
app = QtGui.QApplication(sys.argv)
win = QtGui.QMainWindow()
edit = QGenericCodeEdit()
win.setCentralWidget(edit)
edit.installMode(FancyChecker())
edit.installPanel(MarkerPanel())
edit.openFile(__file__)
win.show()
app.exec_()
sys.exit(main())
|
Python
| 0.000025
|
@@ -2047,16 +2047,32 @@
ker(Mode
+, QtCore.QObject
):%0A %22
@@ -2760,32 +2760,70 @@
.__init__(self)%0A
+ QtCore.QObject.__init__(self)%0A
self.__j
|
fe288fdbb9f14715206006ba8de725bde5d6dee6
|
enable id_attribute again
|
apps/projects/serializers.py
|
apps/projects/serializers.py
|
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from apps.core.serializers import MarkdownSerializerMixin
from apps.domain.models import AttributeEntity, Attribute, Option, Range, VerboseName, Condition
from apps.questions.models import Catalog, Section, Subsection, QuestionEntity, Question
from .models import *
class ProjectsSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('id', 'title', 'description', 'current_snapshot', 'catalog')
class ValueSerializer(serializers.ModelSerializer):
class Meta:
model = Value
fields = (
'id',
'snapshot',
'attribute',
'set_index',
'collection_index',
'text',
'option'
)
class QuestionEntityOptionSerializer(serializers.ModelSerializer):
class Meta:
model = Option
fields = (
'id',
'text',
'additional_input'
)
class QuestionEntityRangeSerializer(serializers.ModelSerializer):
class Meta:
model = Range
fields = (
'id',
'minimum',
'maximum',
'step'
)
class QuestionEntityQuestionVerboseNameSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField()
name_plural = serializers.SerializerMethodField()
class Meta:
model = VerboseName
fields = (
'name',
'name_plural'
)
def get_name(self, obj):
return obj.name or _('set')
def get_name_plural(self, obj):
return obj.name_plural or _('sets')
class QuestionEntityVerboseNameSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField()
name_plural = serializers.SerializerMethodField()
class Meta:
model = VerboseName
fields = (
'name',
'name_plural'
)
def get_name(self, obj):
return obj.name or _('item')
def get_name_plural(self, obj):
return obj.name_plural or _('items')
class QuestionEntityConditionSerializer(serializers.ModelSerializer):
class Meta:
model = Condition
fields = (
'id',
'source_attribute',
'relation',
'target_text',
'target_option'
)
class QuestionEntityAttributeSerializer(MarkdownSerializerMixin, serializers.ModelSerializer):
options = QuestionEntityOptionSerializer(many=True, read_only=True)
range = QuestionEntityRangeSerializer(read_only=True)
verbosename = QuestionEntityQuestionVerboseNameSerializer()
conditions = QuestionEntityConditionSerializer(many=True, read_only=True)
class Meta:
model = Attribute
fields = (
'id',
'options',
'range',
'verbosename',
'conditions',
'is_collection'
)
class QuestionEntityAttributeEntitySerializer(MarkdownSerializerMixin, serializers.ModelSerializer):
verbosename = QuestionEntityVerboseNameSerializer()
conditions = QuestionEntityConditionSerializer(many=True, read_only=True)
class Meta:
model = AttributeEntity
fields = (
'id',
'verbosename',
'conditions'
)
class QuestionEntityQuestionSerializer(MarkdownSerializerMixin, serializers.ModelSerializer):
markdown_fields = ('help', )
attribute = QuestionEntityAttributeSerializer(source='attribute_entity.attribute')
class Meta:
model = Question
fields = (
'id',
'order',
'text',
'help',
'widget_type',
'attribute'
)
class QuestionEntitySerializer(MarkdownSerializerMixin, serializers.ModelSerializer):
markdown_fields = ('help', )
attribute_entity = QuestionEntityAttributeEntitySerializer()
collection = QuestionEntityAttributeEntitySerializer(source='attribute_entity.parent_collection')
questions = serializers.SerializerMethodField()
attributes = serializers.SerializerMethodField()
next = serializers.SerializerMethodField()
prev = serializers.SerializerMethodField()
progress = serializers.SerializerMethodField()
section = serializers.SerializerMethodField()
subsection = serializers.SerializerMethodField()
class Meta:
model = QuestionEntity
fields = (
'id',
'help',
'attribute_entity',
'collection',
'is_set',
'next',
'prev',
'progress',
'section',
'subsection',
'collection',
'questions',
'attributes'
)
def get_questions(self, obj):
if obj.is_set:
return QuestionEntityQuestionSerializer(instance=obj.questions, many=True, read_only=True).data
else:
return [QuestionEntityQuestionSerializer(instance=obj.question, read_only=True).data]
def get_attributes(self, obj):
if obj.is_set:
if obj.attribute_entity.parent_collection_id:
attributes = Attribute.objects.filter(parent_collection_id=obj.attribute_entity.parent_collection_id)
return [attribute.id for attribute in attributes]
else:
return [question.attribute_entity_id for question in obj.questions.all()]
else:
return [obj.attribute_entity_id]
def get_prev(self, obj):
try:
return QuestionEntity.objects.get_prev(obj.pk).pk
except QuestionEntity.DoesNotExist:
return None
def get_next(self, obj):
try:
return QuestionEntity.objects.get_next(obj.pk).pk
except QuestionEntity.DoesNotExist:
return None
def get_progress(self, obj):
try:
return QuestionEntity.objects.get_progress(obj.pk)
except QuestionEntity.DoesNotExist:
return None
def get_section(self, obj):
section = obj.subsection.section
return {
'id': obj.subsection.section.id,
'title': obj.subsection.section.title
}
def get_subsection(self, obj):
return {
'id': obj.subsection.id,
'title': obj.subsection.title
}
class CatalogQuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = (
'id',
'text'
)
class CatalogQuestionEntitySerializer(serializers.ModelSerializer):
questions = CatalogQuestionSerializer(many=True, read_only=True)
text = serializers.CharField(source='question.text')
class Meta:
model = QuestionEntity
fields = (
'id',
'text',
'questions'
)
class CatalogSubsectionSerializer(serializers.ModelSerializer):
entities = serializers.SerializerMethodField()
class Meta:
model = Subsection
fields = (
'id',
'title',
'entities'
)
def get_entities(self, obj):
entities = QuestionEntity.objects.filter(subsection=obj, question__parent_entity=None).order_by('order')
return CatalogQuestionEntitySerializer(instance=entities, many=True).data
class CatalogSectionSerializer(serializers.ModelSerializer):
subsections = CatalogSubsectionSerializer(many=True, read_only=True)
class Meta:
model = Section
fields = (
'id',
'title',
'subsections'
)
class CatalogSerializer(serializers.ModelSerializer):
sections = CatalogSectionSerializer(many=True, read_only=True)
class Meta:
model = Catalog
fields = (
'id',
'title',
'sections'
)
|
Python
| 0.000001
|
@@ -3234,32 +3234,88 @@
ead_only=True)%0A%0A
+ id_attribute = serializers.SerializerMethodField()%0A%0A
class Meta:%0A
@@ -3306,32 +3306,32 @@
class Meta:%0A
-
model =
@@ -3375,32 +3375,60 @@
'id',%0A
+ 'id_attribute',%0A
'ver
@@ -3459,24 +3459,24 @@
conditions'%0A
-
)%0A%0A%0A
@@ -3466,32 +3466,211 @@
ons'%0A )%0A%0A
+ def get_id_attribute(self, obj):%0A try:%0A return %7B'id': obj.children.get(title='id').pk%7D%0A except AttributeEntity.DoesNotExist:%0A return None%0A%0A
%0Aclass QuestionE
|
bbc5953cbaf29ef3421049db3c7ac00fd94c3734
|
Clean up nocookie code
|
pelican_youtube/youtube.py
|
pelican_youtube/youtube.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Kura
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import unicode_literals
from docutils import nodes
from docutils.parsers.rst import directives, Directive
class YouTube(Directive):
""" Embed YouTube video in posts.
Based on the YouTube directive by Brian Hsu:
https://gist.github.com/1422773
VIDEO_ID is required, other arguments are optional
Usage:
.. youtube:: VIDEO_ID
"""
def boolean(argument):
"""Conversion function for yes/no True/False."""
value = directives.choice(argument, ('yes', 'True', 'no', 'False'))
return value in ('yes', 'True')
required_arguments = 1
optional_arguments = 6
option_spec = {
'class': directives.unchanged,
'width': directives.positive_int,
'height': directives.positive_int,
'allowfullscreen': boolean,
'seamless': boolean,
'nocookie': boolean,
}
final_argument_whitespace = False
has_content = False
def run(self):
videoID = self.arguments[0].strip()
# Choose whether to use the YouTube nocookie domain for reduced tracking.
nocookie = self.options['nocookie'] \
if 'nocookie' in self.options else False
youtube_domain = 'youtube-nocookie' if nocookie else 'youtube'
url = 'https://www.{}.com/embed/{}'.format(youtube_domain, videoID)
width = self.options['width'] if 'width' in self.options else None
height = self.options['height'] if 'height' in self.options else None
fullscreen = self.options['allowfullscreen'] \
if 'allowfullscreen' in self.options else True
seamless = self.options['seamless'] \
if 'seamless' in self.options else True
css_classes = 'youtube'
if 'class' in self.options:
css_classes += ' {}'.format(self.options['class'])
elif height is None:
# use responsive design with 16:9 aspect ratio by default
css_classes += ' {}'.format('youtube-16x9')
# no additional classes if dimensions (height/width) are specified
iframe_arguments = [
(width, 'width="{}"'),
(height, 'height="{}"'),
(fullscreen, 'allowfullscreen'),
(seamless, 'seamless frameBorder="0"'),
]
div_block = '<div class="{}">'.format(css_classes)
embed_block = '<iframe src="{}" '.format(url)
for value, format in iframe_arguments:
embed_block += (format + ' ').format(value) if value else ''
embed_block = embed_block[:-1] + '></iframe>'
return [
nodes.raw('', div_block, format='html'),
nodes.raw('', embed_block, format='html'),
nodes.raw('', '</div>', format='html'),
]
def register():
directives.register_directive('youtube', YouTube)
|
Python
| 0.000411
|
@@ -2195,33 +2195,34 @@
-nocookie = self.options%5B'
+youtube_domain = 'youtube-
noco
@@ -2226,17 +2226,16 @@
ocookie'
-%5D
%5C%0A
@@ -2280,75 +2280,8 @@
lse
-False%0A youtube_domain = 'youtube-nocookie' if nocookie else
'you
|
6a58541a0fe1a942c3a2c187eb0358bd8350a51f
|
Change default output folder of minimize-content-pack.py.
|
minimize-content-pack.py
|
minimize-content-pack.py
|
"""
minimize-content-pack
Remove assessment items, subtitles and po files from a content pack.
Usage:
minimize-content-pack.py <old-content-pack-path> <out-path>
"""
import zipfile
from pathlib import Path
from docopt import docopt
ITEMS_TO_TRANSFER = [
"metadata.json",
"content.db",
"backend.mo",
"frontend.mo",
]
def minimize_content_pack(oldpackpath: Path, outpath: Path):
with zipfile.ZipFile(str(oldpackpath)) as oldzf,\
zipfile.ZipFile(str(outpath), "w") as newzf:
items = list(i for i in oldzf.namelist()
for will_be_transferred in ITEMS_TO_TRANSFER
if will_be_transferred in i)
for item in items:
bytes = oldzf.read(item)
newzf.writestr(item, bytes)
def main():
args = docopt(__doc__)
contentpackpath = Path(args["<old-content-pack-path>"])
outpath = Path(args["<out-path>"] or
"minimal.zip")
outpath = outpath.expanduser()
minimize_content_pack(contentpackpath, outpath)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -935,16 +935,20 @@
%22
+out/
minimal.
|
35e6559bd13f46679333e72b6356a82a0657cce4
|
fix thinko in kepler test
|
gala/potential/potential/tests/test_against_galpy.py
|
gala/potential/potential/tests/test_against_galpy.py
|
"""Test some builtin potentials against galpy"""
# Third-party
import numpy as np
from astropy.constants import G
import astropy.units as u
import pytest
# This project
from ...._cconfig import GSL_ENABLED
from ....units import galactic
from ..builtin import (KeplerPotential, MiyamotoNagaiPotential,
NFWPotential, PowerLawCutoffPotential,
BovyMWPotential2014)
try:
import galpy
import galpy.orbit
import galpy.potential
GALPY_INSTALLED = True
except ImportError:
GALPY_INSTALLED = False
# Set to arbitrary values for testing
ro = 8.1 * u.kpc
vo = 240 * u.km/u.s
ntest = 128
def helper(gala_pot, galpy_pot):
Rs = np.random.uniform(1, 15, size=ntest) * u.kpc
zs = np.random.uniform(1, 15, size=ntest) * u.kpc
xyz = np.zeros((3, Rs.size)) * u.kpc
xyz[0] = Rs
assert np.allclose(gala_pot.circular_velocity(xyz).to_value(u.km/u.s),
galpy_pot.vcirc(R=Rs.to_value(ro)))
xyz[2] = zs
assert np.allclose(gala_pot.density(xyz).to_value(u.Msun/u.pc**3),
galpy_pot.dens(R=Rs.to_value(ro), z=zs.to_value(ro)))
assert np.allclose(gala_pot.energy(xyz).to_value((u.km / u.s)**2),
galpy_pot(R=Rs.to_value(ro), z=zs.to_value(ro)))
assert np.allclose(gala_pot.gradient(xyz).to_value((u.km/u.s) * u.pc/u.Myr / u.pc),
-galpy_pot.Rforce(R=Rs.to_value(ro), z=zs.to_value(ro)))
@pytest.mark.skipif(not GALPY_INSTALLED,
reason="requires galpy to run this test")
def test_kepler():
from galpy.potential import KeplerPotential as BovyKeplerPotential
M = 5e10 * u.Msun
gala_pot = KeplerPotential(m=M, units=galactic)
amp = (G*M).to_value(vo**2 * ro)
bovy_pot = BovyKeplerPotential(amp=amp, ro=ro, vo=vo)
helper(gala_pot, bovy_pot)
|
Python
| 0
|
@@ -1369,16 +1369,19 @@
/ u.pc)
+%5B2%5D
,%0A
@@ -1408,17 +1408,17 @@
lpy_pot.
-R
+z
force(R=
|
7073ab92491d82f23af1c61f3c68e6f29a3261c2
|
Version bump. fixes #4
|
assetfiles/__init__.py
|
assetfiles/__init__.py
|
__version__ = '0.5.0'
|
Python
| 0.000004
|
@@ -14,9 +14,9 @@
'0.
-5
+6
.0'%0A
|
20f7102daf411a07ec922fceb2fac6c00356a84b
|
Revert "Version in function"
|
asgi_redis/__init__.py
|
asgi_redis/__init__.py
|
import pkg_resources
from .core import RedisChannelLayer
from .local import RedisLocalChannelLayer
def get_version():
return pkg_resources.require('asgi_redis')[0].version
|
Python
| 0
|
@@ -97,38 +97,21 @@
er%0A%0A
-%0Adef get_version():%0A return
+__version__ =
pkg
|
ea0847a1c509b2eba1e652b597f2921b0c19da2d
|
Add field for name in mail dict
|
mail_parser.py
|
mail_parser.py
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
import os, sys
from email.parser import Parser
import json
import re
def parse_mail(file_in):
"""
Extract Subject & Body of mail file
headers must be formatted as a block of RFC 2822 style
"""
# filename_out = os.path.splitext(os.path.basename(file_in))[0] + ".json"
# infile_path = os.path.dirname(file_in)
# dirname = infile_path.split('/').pop()
#
# PATH_out = infile_path + '/' + dirname + '_' + filename_out
with open(file_in, 'r') as INFILE:
raw_mail = Parser().parse(INFILE)
formated_mail = {
"body": raw_mail.get_payload(),
"subject": raw_mail['subject'],
}
# small correction of text, remove email adresses in the text
reg = re.compile("[^@|\s]+@[^@]+\.[^@|\s]+") # black magic
formated_mail['body'] = re.sub(reg, "",formated_mail['body'])
return formated_mail
def write_json(dico, fileout):
"""
Write dict into json-styled file
Je collectionne les canards...
... vivants !
"""
with open(fileout, "w") as OUTFILE:
json.dump(dico, OUTFILE, ensure_ascii=False)
def correct_mail(file_in):
"""
Remove all blank lines in mail files, it fucks-up the parsing if not
"""
new_body = ""
with open(file_in, 'r') as INFILE:
# select only non-blank lines, use generator to avoid memory storage
for line in (l for l in INFILE if(len(l.strip()) > 1)):
new_body += line
# regenerate the mail file
with open(file_in, 'w') as INFILE:
INFILE.write(new_body)
|
Python
| 0
|
@@ -956,16 +956,176 @@
ody'%5D)%0A%0A
+ date = os.path.dirname(file_in).split('/').pop() + '-'%0A name = os.path.splitext(os.path.basename(file_in))%5B0%5D%0A formated_mail%5B'name'%5D = date+name%0A %0A
retu
|
ae916c1ee52941bb5a1ccf87abe2a9758897bd08
|
Add deprecation warnings and message to getlines function
|
IPython/utils/ulinecache.py
|
IPython/utils/ulinecache.py
|
"""
Wrapper around linecache which decodes files to unicode according to PEP 263.
"""
import functools
import linecache
import sys
from IPython.utils import py3compat
from IPython.utils import openpy
getline = linecache.getline
# getlines has to be looked up at runtime, because doctests monkeypatch it.
@functools.wraps(linecache.getlines)
def getlines(filename, module_globals=None):
return linecache.getlines(filename, module_globals=module_globals)
|
Python
| 0
|
@@ -1,12 +1,64 @@
%22%22%22%0A
+This module has been deprecated since IPython 6.0.%0A%0A
Wrapper
@@ -175,16 +175,42 @@
port sys
+%0Afrom warnings import warn
%0A%0Afrom I
@@ -415,16 +415,16 @@
tlines)%0A
-
def getl
@@ -460,16 +460,243 @@
=None):%0A
+ %22%22%22%0A Deprecated since IPython 6.0%0A %22%22%22%0A warn((%22%60IPython.utils.ulinecache.getlines%60 is deprecated since%22%0A %22 IPython 6.0 and will be removed in future versions.%22),%0A DeprecationWarning, stacklevel=2)%0A
retu
|
eec612ae54010485bb53403ada88f723d9a21cc1
|
Use Dict Comprehension and add criticality
|
InterviewSchedulerGurobi.py
|
InterviewSchedulerGurobi.py
|
from gurobipy import *
from datetime import datetime
def read_input_csv(filename):
row_header, matrix, col_header = list(), dict(), set()
with open(filename) as f:
for csvline in f:
csvline = csvline.strip()
if len(row_header) == 0:
row_header = csvline.split(',')
continue
row = csvline.split(',')
for i in xrange(1, len(row)):
matrix[row[0], row_header[i]] = float(row[i])
col_header.add(row[0])
row_header.pop(0)
col_header = sorted(col_header)
return matrix, row_header, col_header
if __name__ == "__main__":
if len(sys.argv) < 3:
print >> sys.stderr, "Usage: InterviewScheduler Shortlists.csv SlotsPanels.csv Prefs.csv"
exit(-1)
shortlists, clubs, names = read_input_csv(sys.argv[1])
print(datetime.now().time())
print('Number of Clubs')
print(len(clubs))
print('Number of Candidates')
print(len(names))
panels, clubs2, slots = read_input_csv(sys.argv[2])
print('Number of Slots')
print(len(slots))
prefs, clubs3, names2 = read_input_csv(sys.argv[3])
assert (sorted(clubs) == sorted(clubs2))
assert (sorted(clubs) == sorted(clubs3))
assert (sorted(names) == sorted(names2))
totalClubs = len(clubs) + 1
costs = dict()
maxpanels = dict()
for c in clubs:
maxpanels[c] = 0
for s in slots:
if panels[s, c] > maxpanels[c]:
maxpanels[c] = panels[s, c]
for s in xrange(len(slots)):
costs[slots[s]] = s + 1
print('Creating IPLP')
model = Model('interviews')
vars = model.addVars(slots, clubs, names, vtype=GRB.BINARY, name='G')
# Objective - allocate max students to the initial few slots
model.setObjective(
quicksum(vars[s, c, n] * costs[s] * (totalClubs - prefs[n, c]) for s in slots for n in names for c in clubs),
GRB.MINIMIZE)
totalstudents = sum(shortlists.values()) # Constraint all students to be allocated
model.addConstr((vars.sum() == totalstudents))
# Constraint - maximum number in a slot for a club is limited by panels
model.addConstrs((vars.sum(s, c, '*') <= panels[s, c] for s in slots for c in clubs))
# Constraint - allocate student only if he has a shortlist
model.addConstrs((vars.sum('*', c, n) <= shortlists[n, c] for n in names for c in clubs))
# Constraint - slots should not conflict for a student
model.addConstrs((vars.sum(s, '*', n) <= 1 for s in slots for n in names))
print('Optimising')
model.optimize()
solution = model.getAttr('X', vars)
schedout = open('schedule.csv', 'w')
line = 'Slot'
for c in clubs:
for j in range(int(maxpanels[c])):
line = line + ',' + c + str(j + 1)
schedout.write(line + '\n')
for s in slots:
line = s
for c in clubs:
l = [''] * int(maxpanels[c])
i = 0
for n in names:
if solution[s, c, n] == 1:
l[i] = n + ' ' + str(prefs[n, c])
i = i + 1
line = line + ',' + ','.join(l)
schedout.write(line + '\n')
schedout.close()
print(model.status)
print(datetime.now().time())
|
Python
| 0.000001
|
@@ -370,32 +370,67 @@
line.split(',')%0A
+ col_header.add(row%5B0%5D)%0A
for
@@ -521,46 +521,8 @@
i%5D)%0A
- col_header.add(row%5B0%5D)
%0A
@@ -1327,208 +1327,179 @@
-costs = dict()%0A%0A maxpanels = dict()%0A for c in clubs:%0A
+# Find out max number of panels%0A maxpanels = dict((c,
max
+(
panels%5B
-c%5D = 0%0A for s in slots:%0A if panels%5Bs, c%5D %3E maxpanels%5Bc%5D:%0A maxpanels%5Bc%5D = panel
+s, c%5D for s in slots)) for c in clubs)%0A%0A # Generate cost of slots%0A costs = dict((slot
s%5Bs
+%5D
,
-c%5D%0A%0A
+s + 1)
for
@@ -1526,41 +1526,136 @@
ts))
-:
+)%0A
%0A
- costs%5Bslots%5Bs%5D%5D = s + 1
+# Calculate number shortlists for each students%0A crit = dict((n, sum(shortlists%5Bn, c%5D for c in clubs)) for n in names)
%0A%0A
@@ -3142,16 +3142,20 @@
' + str(
+int(
prefs%5Bn,
@@ -3158,16 +3158,43 @@
s%5Bn, c%5D)
+) + '_' + str(int(crit%5Bn%5D))
%0A
|
59f324229acfab30811cc61b3880770292699a6d
|
update country in email to group
|
tola/util.py
|
tola/util.py
|
import unicodedata
import urllib2
import json
import sys
from activitydb.models import Country, TolaUser
from django.contrib.auth.models import User
from django.core.mail import send_mail, mail_admins, mail_managers, EmailMessage
#CREATE NEW DATA DICTIONARY OBJECT
def siloToDict(silo):
parsed_data = {}
key_value = 1
for d in silo:
label = unicodedata.normalize('NFKD', d.field.name).encode('ascii','ignore')
value = unicodedata.normalize('NFKD', d.char_store).encode('ascii','ignore')
row = unicodedata.normalize('NFKD', d.row_number).encode('ascii','ignore')
parsed_data[key_value] = {label : value}
key_value += 1
return parsed_data
def getCountry(user):
"""
Returns the object the view is displaying.
"""
# get users country from django cosign module
user_countries = TolaUser.objects.all().filter(user__id=user.id).values('countries')
get_countries = Country.objects.all().filter(id__in=user_countries)
return get_countries
def getTolaDataSilos(user):
"""
Returns a list of silos from TolaData that the logged in user has access to
"""
url="https://tola-data.mercycorps.org/api/silo/?format=json"
# set url for json feed here
json_file = urllib2.urlopen(url)
print "JSON FILE:"
print json_file.read()
#load data
data = json.load(json_file)
json_file.close()
for row in data:
print row
vars_to_sql = []
keys_to_sql = []
for new_key, new_value in row.iteritems():
try:
new_key = new_key.encode('ascii','ignore')
new_value = new_value.encode('ascii','ignore')
except Exception, err:
sys.stderr.write('ERROR: %s\n' % str(err))
print new_key
print new_value
if new_value:
#country or region related columns only
if new_key in ('country','region','iso_code'):
#change iso_code to code for DB table
if new_key == 'iso_code':
new_key = 'code'
keys_to_sql.append(new_key)
vars_to_sql.append(new_value)
silos = keys_to_sql + vars_to_sql
return silos
def emailGroup(country,group,link,subject,message,submiter=None):
#email incident to admins in each country assoicated with the projects program
for single_country in country.all():
country = Country.objects.all().filter(country=single_country)
getGroupEmails = User.objects.all().filter(groups__name=group,tola_user__country=country).values_list('email', flat=True)
print getGroupEmails
email_link = link
formatted_email = email_link
subject = str(subject)
message = str(message) + formatted_email
to = [str(item) for item in getGroupEmails]
if submiter:
to.append(submiter + ", ")
print to
email = EmailMessage(subject, message, 'systems@mercycorps.org',
to)
email.send()
mail_admins(subject, message, fail_silently=False)
|
Python
| 0.000001
|
@@ -3157,15 +3157,8 @@
iter
- + %22, %22
)%0A
|
59936f52f319073324cda22707464728bcd9bbf3
|
Update Internet.hexip to handle IPv6
|
plugins/Internet/plugin.py
|
plugins/Internet/plugin.py
|
###
# Copyright (c) 2003-2005, Jeremiah Fincher
# Copyright (c) 2010-2011, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import socket
import telnetlib
import supybot.utils as utils
from supybot.commands import *
from supybot.utils.iter import any
import supybot.callbacks as callbacks
class Internet(callbacks.Plugin):
"""Add the help for "@help Internet" here."""
threaded = True
def dns(self, irc, msg, args, host):
"""<host|ip>
Returns the ip of <host> or the reverse DNS hostname of <ip>.
"""
if utils.net.isIP(host):
hostname = socket.getfqdn(host)
if hostname == host:
irc.reply('Host not found.')
else:
irc.reply(hostname)
else:
try:
ip = socket.getaddrinfo(host, None)[0][4][0]
if ip == '64.94.110.11': # Verisign sucks!
irc.reply('Host not found.')
else:
irc.reply(ip)
except socket.error:
irc.reply('Host not found.')
dns = wrap(dns, ['something'])
_domain = ['Domain Name', 'Server Name', 'domain']
_registrar = ['Sponsoring Registrar', 'Registrar', 'source']
_updated = ['Last Updated On', 'Domain Last Updated Date', 'Updated Date',
'Last Modified', 'changed']
_created = ['Created On', 'Domain Registration Date', 'Creation Date']
_expires = ['Expiration Date', 'Domain Expiration Date']
_status = ['Status', 'Domain Status', 'status']
def whois(self, irc, msg, args, domain):
"""<domain>
Returns WHOIS information on the registration of <domain>.
"""
usertld = domain.split('.')[-1]
if '.' not in domain:
irc.errorInvalid('domain')
return
try:
t = telnetlib.Telnet('%s.whois-servers.net' % usertld, 43)
except socket.error, e:
irc.error(str(e))
return
t.write(domain)
t.write('\r\n')
s = t.read_all()
server = registrar = updated = created = expires = status = ''
for line in s.splitlines():
line = line.strip()
if not line or ':' not in line:
continue
if not server and any(line.startswith, self._domain):
server = ':'.join(line.split(':')[1:]).strip().lower()
# Let's add this check so that we don't respond with info for
# a different domain. E.g., doing a whois for microsoft.com
# and replying with the info for microsoft.com.wanadoodoo.com
if server != domain:
server = ''
continue
if not server:
continue
if not registrar and any(line.startswith, self._registrar):
registrar = ':'.join(line.split(':')[1:]).strip()
elif not updated and any(line.startswith, self._updated):
s = ':'.join(line.split(':')[1:]).strip()
updated = 'updated %s' % s
elif not created and any(line.startswith, self._created):
s = ':'.join(line.split(':')[1:]).strip()
created = 'registered %s' % s
elif not expires and any(line.startswith, self._expires):
s = ':'.join(line.split(':')[1:]).strip()
expires = 'expires %s' % s
elif not status and any(line.startswith, self._status):
status = ':'.join(line.split(':')[1:]).strip().lower()
if not status:
status = 'unknown'
try:
t = telnetlib.Telnet('whois.pir.org', 43)
except socket.error, e:
irc.error(str(e))
return
t.write('registrar ')
t.write(registrar.split('(')[0].strip())
t.write('\n')
s = t.read_all()
url = ''
for line in s.splitlines():
line = line.strip()
if not line:
continue
if line.startswith('Email'):
url = ' <registered at %s>' % line.split('@')[-1]
elif line.startswith('Registrar Organization:'):
url = ' <registered by %s>' % line.split(':')[1].strip()
elif line == 'Not a valid ID pattern':
url = ''
if server and status:
info = filter(None, [status, created, updated, expires])
s = format('%s%s is %L.', server, url, info)
irc.reply(s)
else:
irc.error('I couldn\'t find such a domain.')
whois = wrap(whois, ['lowered'])
def hexip(self, irc, msg, args, ip):
"""<ip>
Returns the hexadecimal IP for that IP.
"""
quads = ip.split('.')
ret = ""
for quad in quads:
i = int(quad)
ret += '%02x' % i
irc.reply(ret.upper())
hexip = wrap(hexip, ['ip'])
Class = Internet
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
Python
| 0
|
@@ -6258,32 +6258,86 @@
IP.%0A %22%22%22%0A
+ ret = %22%22%0A if utils.net.isIPV4(ip):%0A
quads =
@@ -6350,37 +6350,24 @@
it('.')%0A
- ret = %22%22%0A
for
@@ -6393,16 +6393,20 @@
+
i = int(
@@ -6419,24 +6419,28 @@
+
+
ret += '%2502x
@@ -6442,14 +6442,314 @@
'%2502
-x' %25 i
+X' %25 i%0A else:%0A octets = ip.split(':')%0A for octet in octets:%0A if octet:%0A i = int(octet, 16)%0A ret += '%2504X' %25 i%0A else:%0A missing = (8 - len(octets)) * 4%0A ret += '0' * missing
%0A
@@ -6770,16 +6770,8 @@
(ret
-.upper()
)%0A
|
04065919be55d8e4371cc1e7fec1a0148298ccf7
|
throw if obj is not serializable
|
mygeotab/serializers.py
|
mygeotab/serializers.py
|
# -*- coding: utf-8 -*-
"""
mygeotab.serializers
~~~~~~~~~~~~~~~~~~~~
JSON serialization and deserialization helper objects for the MyGeotab API.
"""
import re
import arrow
import six
use_rapidjson = False
try:
import rapidjson
DATETIME_MODE = rapidjson.DM_SHIFT_TO_UTC | rapidjson.DM_ISO8601
use_rapidjson = True
except ImportError:
pass
import json
from mygeotab import dates
DATETIME_REGEX = re.compile(r"^\d{4}\-\d{2}\-\d{2}")
def json_serialize(obj):
if use_rapidjson:
return rapidjson.dumps(obj, default=object_serializer)
return json.dumps(obj, default=object_serializer, separators=(",", ":"))
def json_deserialize(json_str):
if use_rapidjson:
return rapidjson.loads(json_str, datetime_mode=DATETIME_MODE)
return json.loads(json_str, object_hook=object_deserializer)
def object_serializer(obj):
"""Helper to serialize a field into a compatible MyGeotab object.
:param obj: The object.
"""
return dates.format_iso_datetime(obj) if ((type(obj) is dict) and 'isoformat' in obj) else obj
def object_deserializer(obj):
"""Helper to deserialize a raw result dict into a proper dict.
:param obj: The dict.
"""
for key, val in obj.items():
if isinstance(val, six.string_types) and DATETIME_REGEX.search(val):
try:
obj[key] = dates.localize_datetime(arrow.get(val).datetime)
except (ValueError, arrow.parser.ParserError):
obj[key] = val
return obj
|
Python
| 0.000008
|
@@ -1061,19 +1061,93 @@
j) else
-obj
+raise TypeError(%22Unserializable object %7B%7D of type %7B%7D%22.format(obj, type(obj)))
%0A%0A%0Adef o
|
c2c4e47f5cdae6e683e87dcc8c7b536633755c5a
|
fix with black formatter
|
examples/distribuited_execution_terraform/aws/plan/basic.py
|
examples/distribuited_execution_terraform/aws/plan/basic.py
|
import time
from locust import HttpUser, task, between
class Quickstart(HttpUser):
wait_time = between(1, 5)
@task
def google(self):
self.client.request_name = "google"
self.client.get("https://google.com/")
@task
def microsoft(self):
self.client.request_name = "microsoft"
self.client.get("https://microsoft.com/")
@task
def facebook(self):
self.client.request_name = "facebook"
self.client.get("https://facebook.com/")
|
Python
| 0.000029
|
@@ -49,16 +49,17 @@
etween%0A%0A
+%0A
class Qu
|
c43bfe9bdec958b18573a9d0fa87cd6a881d6281
|
Fix python 3 compatibility issue for StringIO
|
test/test_provider_object_store_service.py
|
test/test_provider_object_store_service.py
|
import StringIO
import uuid
from test.helpers import ProviderTestBase
import test.helpers as helpers
class ProviderObjectStoreServiceTestCase(ProviderTestBase):
def __init__(self, methodName, provider):
super(ProviderObjectStoreServiceTestCase, self).__init__(
methodName=methodName, provider=provider)
def test_crud_container(self):
"""
Create a new container, check whether the expected values are set,
and delete it
"""
name = "cbtestcreatecontainer-{0}".format(uuid.uuid4())
test_container = self.provider.object_store.create_container(name)
with helpers.exception_action(lambda x: test_container.delete()):
containers = self.provider.object_store.list_containers()
found_containers = [c for c in containers if c.name == name]
self.assertTrue(
len(found_containers) == 1,
"List containers does not return the expected container %s" %
name)
test_container.delete()
containers = self.provider.object_store.list_containers()
found_containers = [c for c in containers if c.name == name]
self.assertTrue(
len(found_containers) == 0,
"Container %s should have been deleted but still exists." %
name)
def test_crud_container_objects(self):
"""
Create a new container, upload some contents into the container, and
check whether list properly detects the new content.
Delete everything afterwards.
"""
name = "cbtestcontainerobjs-{0}".format(uuid.uuid4())
test_container = self.provider.object_store.create_container(name)
# ensure that the container is empty
objects = test_container.list()
self.assertEqual([], objects)
with helpers.exception_action(lambda x: test_container.delete()):
obj_name = "hello_world.txt"
obj = test_container.create_object(obj_name)
with helpers.exception_action(lambda x: obj.delete()):
# TODO: This is wrong. We shouldn't have to have a separate
# call to upload some content before being able to delete
# the content. Maybe the create_object method should accept
# the file content as a parameter.
obj.upload("dummy content")
objs = test_container.list()
found_objs = [o for o in objs if o.name == obj_name]
self.assertTrue(
len(found_objs) == 1,
"List container objects does not return the expected"
" object %s" % obj_name)
obj.delete()
objs = test_container.list()
found_objs = [o for o in objs if o.name == obj_name]
self.assertTrue(
len(found_objs) == 0,
"Object %s should have been deleted but still exists." %
obj_name)
test_container.delete()
def test_upload_download_container_content(self):
name = "cbtestcontainerobjs-{0}".format(uuid.uuid4())
test_container = self.provider.object_store.create_container(name)
with helpers.exception_action(lambda x: test_container.delete()):
obj_name = "hello_upload_download.txt"
obj = test_container.create_object(obj_name)
with helpers.exception_action(lambda x: obj.delete()):
content = "Hello World. Here's some content"
# TODO: Upload and download methods accept different parameter
# types. Need to make this consistent - possibly provider
# multiple methods like upload_from_file, from_stream etc.
obj.upload(content)
target_stream = StringIO.StringIO()
obj.download(target_stream)
self.assertEqual(target_stream.getvalue(), content)
obj.delete()
test_container.delete()
|
Python
| 0.000004
|
@@ -1,19 +1,120 @@
-import StringIO
+# Python 3 compatibility fix%0Atry:%0A from StringIO import StringIO%0Aexcept ImportError:%0A from io import StringIO%0A
%0Aimp
@@ -4002,17 +4002,8 @@
m =
-StringIO.
Stri
|
a967fbb3b38e0788ccbde0650076ab05e693806a
|
Bump version number.
|
nativeconfig/version.py
|
nativeconfig/version.py
|
VERSION = '2.9.1'
|
Python
| 0
|
@@ -8,11 +8,11 @@
= '
-2.9.1
+3.0.0
'%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.