commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
40fc5d12d93d9c258e615b6001070b4fbd04f119
|
Add sharding checks
|
cogs/utils/checks.py
|
cogs/utils/checks.py
|
import discord
from discord.ext import commands
# noinspection PyUnresolvedReferences
import __main__
def owner_check(ctx):
return str(ctx.message.author.id) in __main__.liara.owners
def is_owner():
return commands.check(owner_check)
def is_bot_account():
def predicate(ctx):
return ctx.bot.user.bot
return commands.check(predicate)
def is_not_bot_account():
def predicate(ctx):
return not ctx.bot.user.bot
return commands.check(predicate)
def is_selfbot():
def predicate(ctx):
return ctx.bot.self_bot
return commands.check(predicate)
def is_not_selfbot():
def predicate(ctx):
return not ctx.bot.self_bot
return commands.check(predicate)
def mod_or_permissions(**permissions):
def predicate(ctx):
if owner_check(ctx):
return True
if not isinstance(ctx.message.author, discord.Member):
return False
if ctx.message.author == ctx.message.guild.owner:
return True
# let's get the roles and compare them to
# what we have on file (if we do)
roles = [x.name.lower() for x in ctx.message.author.roles]
try:
if __main__.liara.settings['roles'][str(ctx.message.guild.id)]['mod_role'].lower() in roles:
return True
except KeyError:
pass
try:
if __main__.liara.settings['roles'][str(ctx.message.guild.id)]['admin_role'].lower() in roles:
return True
except KeyError:
pass
user_permissions = dict(ctx.message.author.permissions_in(ctx.message.channel))
for permission in permissions:
if permissions[permission]:
allowed = user_permissions.get(permission, False)
if allowed:
return True
return False
return commands.check(predicate)
def admin_or_permissions(**permissions):
def predicate(ctx):
if owner_check(ctx):
return True
if not isinstance(ctx.message.author, discord.Member):
return False
if ctx.message.author == ctx.message.guild.owner:
return True
try:
roles = [x.name.lower() for x in ctx.message.author.roles]
if __main__.liara.settings['roles'][str(ctx.message.guild.id)]['admin_role'].lower() in roles:
return True
except KeyError:
pass
user_permissions = dict(ctx.message.author.permissions_in(ctx.message.channel))
for permission in permissions:
if permissions[permission]:
allowed = user_permissions.get(permission, False)
if allowed:
return True
return False
return commands.check(predicate)
def serverowner_or_permissions(**permissions):
def predicate(ctx):
if owner_check(ctx):
return True
if not isinstance(ctx.message.author, discord.Member):
return False
if ctx.message.author == ctx.message.guild.owner:
return True
user_permissions = dict(ctx.message.author.permissions_in(ctx.message.channel))
for permission in permissions:
allowed = user_permissions.get(permission, False)
if allowed:
return True
return False
return commands.check(predicate)
# deal with more of Red's nonsense
serverowner = serverowner_or_permissions
admin = admin_or_permissions
mod = mod_or_permissions
|
Python
| 0
|
@@ -713,24 +713,517 @@
redicate)%0A%0A%0A
+def is_main_shard():%0A def predicate(ctx):%0A if ctx.bot.shard_id is None:%0A return True%0A elif ctx.bot.shard_id == 0:%0A return True%0A else:%0A return False%0A return commands.check(predicate)%0A%0A%0Adef is_not_main_shard():%0A def predicate(ctx):%0A if ctx.bot.shard_id is None:%0A return False%0A elif ctx.bot.shard_id == 0:%0A return False%0A else:%0A return True%0A return commands.check(predicate)%0A%0A%0A
def mod_or_p
|
4dedbc15c835d02ccde99fb9fad00ed9a590c69e
|
Add private field to posts
|
blog/models.py
|
blog/models.py
|
import hashlib, random
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import UserMixin, AnonymousUserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from geoalchemy2 import Geometry
db = SQLAlchemy()
class User(db.Model, UserMixin):
__tablename__ = "user"
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(32))
password = db.Column(db.String())
email = db.Column(db.String())
api_key = db.Column(db.String(64))
submitted = db.relationship('Post', backref='author', lazy='dynamic')
pings = db.relationship('Ping', backref='author', lazy='dynamic')
def __init__(self, username, password, email):
self.username = username
self.email = email
self.set_password(password)
self.new_api_key()
def set_password(self, password):
self.password = generate_password_hash(password)
def check_password(self, value):
return check_password_hash(self.password, value)
def new_api_key(self):
self.api_key = hashlib.sha224(str(random.getrandbits(256)).encode('utf-8')).hexdigest()
def is_authenticated(self):
if isinstance(self, AnonymousUserMixin):
return False
else:
return True
def is_active(self):
return True
def is_anonymous(self):
if isinstance(self, AnonymousUserMixin):
return True
else:
return False
def get_id(self):
return self.id
def __repr__(self):
return '<User %r>' % self.username
class Post(db.Model):
""" A post containing location data """
__tablename__ = "post"
id = db.Column(db.Integer, primary_key=True)
post_type = db.Column(db.String(32), nullable=False)
title = db.Column(db.String(256), nullable=False)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow(), nullable=False)
loc = db.Column(Geometry('POINT'), nullable=False)
latitude = db.Column(db.Float, default=43.165556, nullable=False)
longitude = db.Column(db.Float, default=-77.611389, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True, nullable=False)
__mapper_args__ = {'polymorphic_on': post_type }
def get_id(self):
return self.id
def get_location(self):
return self.loc
def __repr__(self):
return '<Post {0}>'.format(self.title)
class TextPost(Post):
""" A blog post """
__mapper_args__ = {'polymorphic_identity': 'text'}
text = db.Column(db.Text)
class ImagePost(Post):
""" An image post """
__mapper_args__ = {'polymorphic_identity': 'image'}
image_path = db.Column(db.Text)
caption = db.Column(db.String(512))
class Ping(db.Model):
__tablename__ = "ping"
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey('user.id'), index=True)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow())
loc = db.Column(Geometry('POINT'))
|
Python
| 0
|
@@ -2042,16 +2042,81 @@
=False)%0A
+ private = db.Column(db.Boolean, default=False, nullable=False)%0A
author
|
76092902023d0d29d0702f2c9bb1cfefb8d0e42c
|
add ordering to links and make description optional
|
blog/models.py
|
blog/models.py
|
from datetime import timedelta
from django.db import models
from django.utils import timezone
from django.dispatch import receiver
from django.utils.timezone import localtime
from django.db.models import Count
import markdown
from mptt.models import MPTTModel, TreeForeignKey
from taggit.managers import TaggableManager
from versatileimagefield.fields import VersatileImageField
from versatileimagefield.image_warmer import VersatileImageFieldWarmer
# manager to pull all posts that aren't published in the future
class PostManager(models.Manager):
def get_queryset(self):
return super(PostManager, self).get_queryset().filter(pub_date__lte=timezone.now())
# returns either today at 4pm (server time) or tomorrow at 4pm if it's currently after 4pm
def default_start_time():
now = timezone.now()
start = now.replace(hour=21, minute=0, second=0, microsecond=0)
return start if start > now else start + timedelta(days=1)
class Post(models.Model):
title = models.CharField(max_length=300)
slug = models.SlugField(unique=True, max_length=300)
excerpt = models.TextField()
body = models.TextField()
body_html = models.TextField(editable=False)
category = TreeForeignKey('Category', on_delete=models.CASCADE)
pub_date = models.DateTimeField('date published', default=default_start_time, db_index=True)
tags = TaggableManager()
first_image = VersatileImageField(editable=False, max_length=400)
objects = models.Manager()
published = PostManager()
class Meta:
ordering = ['-pub_date']
def __str__(self):
return self.title
def get_absolute_url(self):
local_pub_date = localtime(self.pub_date)
return "/blog/%s/%s/" % (local_pub_date.strftime("%Y/%m/%d"), self.slug)
# takes the text of the post and replaces the {{REPLACE}} strings with the proper image text
def process_image_links(self, body_parts):
link_string = '<a href="%s"><img src="%s" height="%s" width="%s" class="img-responsive" /></a>'
for i in range(0,len(body_parts)):
if i%2 == 0: # skip even pieces because they're not surrounded by replace tokens
continue
cur_image = body_parts[i]
img_search = Media.objects.filter(image_name=cur_image)
if img_search:
img = img_search[0] # should be only one
link_text = link_string % (img.full_image.url, img.scale_image.url, img.scale_image.height, img.scale_image.width)
body_parts[i] = link_text
return "".join(body_parts)
# override save so we can add the linked images to the post
def save(self, *args, **kwargs):
body_parts = self.body.split("{{REPLACE}}")
image_processed = self.process_image_links(body_parts)
self.body_html = markdown.markdown(image_processed)
first_img = self.get_first_image()
if first_img:
self.first_image = first_img.full_image
super(Post, self).save(*args, **kwargs)
# get the first image from the body text
def get_first_image(self):
body_parts = self.body.split("{{REPLACE}}", 2) # only split twice because we're getting the first image, which is the second piece
if len(body_parts) > 1:
img_name = body_parts[1]
img_search = Media.objects.filter(image_name=img_name) # find the image model
if img_search:
return img_search[0] # should be only one
return None
@receiver(models.signals.post_save, sender=Post)
def warm_Post_first_image(sender, instance, **kwargs):
first = instance.get_first_image()
if first:
post_img_warmer = VersatileImageFieldWarmer(
instance_or_queryset=first,
rendition_key_set='first_image',
image_attr='full_image'
)
num_created, failed_to_create = post_img_warmer.warm()
class Category(MPTTModel):
title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200, unique=True)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
#parent = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True)
active = models.BooleanField(default=False)
class MPTTMeta:
order_insertion_by = ['slug']
class Meta:
ordering = ['slug']
verbose_name_plural = "categories"
def __str__(self):
return self.title
def get_absolute_url(self):
return "/blog/category/%s/" % self.slug
def get_display(self):
if self.active:
prefix_character = "[−]"
else:
prefix_character = "[+]"
if self.is_leaf_node():
prefix = '— '
else:
prefix = '<a class="collapsible" href="#%s" data-toggle="collapse">%s</a> ' % (self.pk, prefix_character)
return '<li>%s<a href="%s">%s</a></li>\n' % (prefix, self.get_absolute_url(), self.title)
def get_active_string(self):
if self.active:
return " in"
return ""
class Media(models.Model):
image_name = models.CharField(max_length=200, unique=True)
pub_date = models.DateTimeField('date published', default=timezone.now, editable=False)
full_image = VersatileImageField(upload_to="full/%Y/%m/%d", max_length=400)
scale_image = VersatileImageField(max_length=400, editable=False)
class Meta:
verbose_name_plural = "media"
ordering = ['-pub_date']
def __str__(self):
return self.image_name
def save(self, *args, **kwargs):
super(Media, self).save(*args, **kwargs)
self.scale_image = self.full_image.thumbnail['750x540'].name
super(Media, self).save(*args, **kwargs)
@receiver(models.signals.post_save, sender=Media)
def warm_Media_images(sender, instance, **kwargs):
media_img_warmer = VersatileImageFieldWarmer(
instance_or_queryset=instance,
rendition_key_set='scaled_image',
image_attr='full_image'
)
num_created, failed_to_create = media_img_warmer.warm()
# gets used if we need to populate the first image field for every model in the database
def populate_first_image():
for post in Post.published.all():
first = post.get_first_image()
if first:
post.first_image = first
post.save()
class Link(models.Model):
title = models.CharField(max_length=150)
url = models.URLField()
description = models.TextField()
def __str__(self):
return self.title
|
Python
| 0
|
@@ -6617,25 +6617,96 @@
s.TextField(
-)
+null=True, blank=True)%0A %0A class Meta:%0A ordering = %5B'title'%5D
%0A %0A de
|
a54c30063efbd55ebcbfdb1f4b2158673a7dc8a9
|
change the id so we can get the archive url correct
|
blogear/bot.py
|
blogear/bot.py
|
# bot.py
#
# Pubsub Client Bot - Listens to pubsub events for new blog entries.
#
import time
import datetime
from twisted.python import log
from twisted.internet import defer, task, reactor
from twisted.words.xish import domish, xpath
from wokkel.pubsub import PubSubClient, Item
from wokkel import disco
from twisted.words.protocols.jabber.jid import internJID
from wokkel.xmppim import AvailablePresence
# use cheetah for templating
from Cheetah.Template import Template
class PubSub2Blog(object):
""" Handles PubSub event items and turns them to formated static files.
The templates are extremly simple for now and can be changed later.
"""
queue_interval = 2.0
def __init__(self, path):
self.path = path
self.www_path = path+'/www'
self.template_path = path+'/templates'
self.atom_queue = []
self.index_queue = []
self.processing_atom = False
self.processing_index = False
## tasks to process queues
self.task = task.LoopingCall(self._processQueues)
self.task.start(self.queue_interval) # run every N seconds
def _processQueue(self, queue_name, ext='.html'):
""" Process a queue with the given name.
"""
queue = getattr(self, queue_name+'_queue')
queue_status = getattr(self, 'processing_'+queue_name, False)
if not queue_status and len(queue)>0:
setattr(self, 'processing_'+queue_name, True)
id, entry_file_name, entries = queue.pop()
args = {
'id': id,
'entries': entries,
'updated':str(datetime.datetime.utcnow()),
}
html = self.template(queue_name+ext, args)
file_name = self.www_path+'/' + queue_name + ext
self._writeFile(file_name, html)
reactor.callLater(1.0, setattr, self, 'processing_'+queue_name, True)
def _processQueues(self):
self._processQueue('atom', '.xml')
self._processQueue('index')
def template(self, file, vars):
""" Process a template
"""
template = open(self.template_path+'/'+file, 'r').read()
return str(Template(template, searchList=[vars]))
def atom2hash(self, elem):
""" Convert an atom domish element to a dictionary for template processing.
"""
args = {}
args['id'] = str(elem.id)
args['categories'] = []
cats = domish.generateElementsNamed(elem.children, 'category')
for cat in cats:
args['categories'].append(cat['term'])
if elem.content:
args['content'] = str(elem.content)
elif elem.title:
args['content'] = str(elem.title)
if elem.title:
args['title'] = str(elem.title)
elif elem.id:
args['title'] = str(elem.id)
else:
args['title'] = 'No Title'
return args
def _writeFile(self, file_name, html):
# open up file
ef = open(file_name, 'w')
ef.write(html)
ef.close()
def updateEntry(self, blog_id, args):
"""update the entry on disk.
"""
html = self.template('entry.html', args)
id, file_name = blog_id.split(":", 1)
file_name = self.www_path+'/archive/' + file_name + ".html"
self._writeFile(file_name, html)
def updateAtom(self, blog_id, entries):
"""
"""
id, file_name = blog_id.split(":", 1)
# push on queue
self.atom_queue.append((id, file_name, entries))
def updateIndex(self, blog_id, entries):
"""
"""
id, file_name = blog_id.split(":", 1)
# push on queue
self.index_queue.append((id, file_name, entries))
class Bot(PubSubClient):
"""The listener bot to manage the static html representing thetofu.com
In addition to the standard XMPPHandler behavior, it also provides
getJid() and publish().
"""
admins = []
def __init__(self, blog, service=None, node=None):
PubSubClient.__init__(self)
self.blog = blog
self.service = service
self.nodeIdentifier = node
def connectionInitialized(self):
"""When connection is establised send presence and subscribe to the node
that the bot is listening to.
"""
self.send(AvailablePresence())
self.subscribe(self.service, self.nodeIdentifier, self.getJid().userhostJID())
PubSubClient.connectionInitialized(self)
@defer.inlineCallbacks
def itemsReceived(self, item_event):
"""Gather items, convert to html and send the files to there proper location.
"""
for item in item_event.items:
if item.name != 'item': # TODO - handle retract and other events
continue
item_id = item.getAttribute('id', str(time.time()))
date_pub = xpath.queryForNodes("/item/entry/published", item)
published = datetime.datetime.now()
if date_pub:
published = str(date_pub[0])
blog_id = None
blog_ids = xpath.queryForNodes("/item/entry/id", item)
if blog_ids:
blog_id = str(blog_ids[0])
# create entry
args = self.blog.atom2hash(item.entry)
self.blog.updateEntry(blog_id, args)
# grab last 10 items
ret_items = yield self.items(self.service, self.nodeIdentifier, 10)
last_items = []
last_ids = []
for ri in ret_items:
rargs = self.blog.atom2hash(ri.entry)
if rargs['id'] not in last_ids:
last_items.append(rargs)
last_ids.append(args['id'])
# update index
self.blog.updateIndex(blog_id, last_items)
# update atom
self.blog.updateAtom(blog_id, last_items)
def getJid(self):
"""Return the JID the connection is authenticed as."""
return self.xmlstream.authenticator.jid
|
Python
| 0
|
@@ -3616,32 +3616,66 @@
# push on queue%0A
+ entries%5B'id'%5D = file_name%0A
self.ato
@@ -3832,32 +3832,66 @@
d.split(%22:%22, 1)%0A
+ entries%5B'id'%5D = file_name%0A
# push o
|
0b452dca8c517b180df037fafc52f6e2b09811c1
|
fix class name
|
books/forms.py
|
books/forms.py
|
from django.forms import ModelForm
from .models import BookReader, User
class UserForm(ModelForm):
class Meta:
model = User
class BookReader(ModelForm):
class Meta:
model = BookReader
excluse = ['user']
|
Python
| 0.999994
|
@@ -150,16 +150,20 @@
okReader
+Form
(ModelFo
|
77d26064694e89d30ea4d62a7a9de9fb7d4038a0
|
Fix typo secounds => seconds (#743)
|
common/util/debug.py
|
common/util/debug.py
|
import functools
import json
import pprint as _pprint
import sublime
from contextlib import contextmanager
_log = []
enabled = False
ENCODING_NOT_UTF8 = "{} was sent as binaries and we dont know the encoding, not utf-8"
def start_logging():
global _log
global enabled
_log = []
enabled = True
def stop_logging():
global enabled
enabled = False
@contextmanager
def disable_logging():
global enabled
enabled = False
try:
yield
finally:
enabled = True
def get_log():
return json.dumps(_log, indent=2)
def add_to_log(obj):
if enabled:
_log.append(obj)
def log_git(command, stdin, stdout, stderr, secounds):
message = {
"type": "git",
"command": command,
"stdin": stdin,
"stdout": stdout,
"stderr": stderr,
"secounds": secounds
}
if stdin.__class__ == bytes:
message["stdin"] = try_to_decode(stdin, "stdin")
if stdout.__class__ == bytes:
message["stdout"] = try_to_decode(stdout, "stdout")
if stderr.__class__ == bytes:
message["stderr"] = try_to_decode(stderr, "stderr")
add_to_log(message)
def try_to_decode(message, name):
try:
return message.decode(),
except UnicodeDecodeError:
return ENCODING_NOT_UTF8.format(name)
def log_error(err):
add_to_log({
"type": "error",
"error": repr(err)
})
def log_on_exception(fn):
def wrapped_fn(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception as e:
add_to_log({
"type": "exception",
"exception": repr(e)
})
raise e
def dump_var(name, value, width=79, end='\n', **kwargs):
is_str = isinstance(value, str)
prefix = "{}{}".format(name, ': ' if is_str else '=')
line_prefix = end + ' '*len(prefix)
if not is_str:
value = _pprint.pformat(value, width=max(49, width-len(prefix)))
print(prefix + line_prefix.join(value.splitlines()), end=end, **kwargs)
def dump(*args, **kwargs):
for i, arg in enumerate(args):
dump_var("_arg{}".format(i), arg)
for name, arg in sorted(kwargs.items()):
dump_var(name, arg)
# backward-compatibility
def pprint(*args, **kwargs):
"""
Pretty print since we can not use debugger
"""
dump(*args, **kwargs)
def get_trace_tags():
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
if savvy_settings.get("dev_mode"):
return savvy_settings.get("dev_trace", [])
else:
return []
def trace(*args, tag="debug", fill=None, fill_width=60, **kwargs):
"""
Lightweight logging facility. Provides simple print-like interface with
filtering by tags and pretty-printed captions for delimiting output
sections.
See the "dev_trace" setting for possible values of the "tag" keyword.
"""
if tag not in get_trace_tags():
return
if fill is not None:
sep = str(kwargs.get('sep', ' '))
caption = sep.join(args)
args = "{0:{fill}<{width}}".format(caption and caption + sep,
fill=fill, width=fill_width),
print("GS [{}]".format(tag), *args, **kwargs)
def trace_for_tag(tag):
return functools.partial(trace, tag=tag)
trace.for_tag = trace_for_tag
class StackMeter:
"""Reentrant context manager counting the reentrancy depth."""
def __init__(self, depth=0):
super().__init__()
self.depth = depth
def __enter__(self):
depth = self.depth
self.depth += 1
return depth
def __exit__(self, *exc_info):
self.depth -= 1
|
Python
| 0.000001
|
@@ -675,17 +675,16 @@
rr, seco
-u
nds):%0A
@@ -837,17 +837,16 @@
%22seco
-u
nds%22: se
@@ -847,17 +847,16 @@
s%22: seco
-u
nds%0A
|
c20fc022f1de734876821a65771e1ca500a3d8d4
|
Fix to 2d MB dist generator
|
brownian_tools.py
|
brownian_tools.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 23 10:40:16 2016
Library of common functions for Brownian code
@author: William Jones and Luc Moseley
History:
23/11/2016: WJ - created, imported functions from animate.py
23/11/2016: WJ - started work on t_wall; new general wall collision code
24/11/2016: WJ - Added in new collision routine for walls, incomplete,
needs vectorising
"""
import numpy as np
"""
Define sub-routines used throghout
"""
"""
NaN array creation routine just to save typing it out
"""
def nanarr(size):
arr = np.full(size, np.nan)
return arr
def rand_mb2d(T,m):
p_in = np.random.uniform()
return ((-2*T/m) * np.log(1 - (m/T)**2 * p_in))**0.5
"""
Generates list of i and j indices for lower half matrix indexing (i.e. i > j)
For upper half triangle switch i and j. Useful for speeding up routines by
avoiding loops
"""
def tri_indgen(n):
a = np.arange(n)
a = np.tile(a,[n,1])
b = a.T
i = a[a>b]
j = b[a>b]
return i, j
"""
Returns array of times to collision.
"""
def t_collide(pos, vel, sz_arr):
n = np.shape(vel)[0]
nd = np.shape(vel)[1]
[j_arr, i_arr] = tri_indgen(n) #reversed i,j to use upper half triangle
temp = np.full([np.size(i_arr), 2], np.nan)
a = np.sum((vel[i_arr]-vel[j_arr])**2, axis=1)
b = 2*np.sum((vel[i_arr]-vel[j_arr])*(pos[i_arr]-pos[j_arr]), axis=1)
c = np.sum((pos[i_arr]-pos[j_arr])**2, axis=1)-(sz_arr[i_arr]+sz_arr[j_arr])**2
chk = b**2 - 4*a*c
wh = chk >= 0
temp[wh,0] = (-b[wh]+chk[wh]**0.5)/(2*a[wh])
temp[wh,1] = (-b[wh]-chk[wh]**0.5)/(2*a[wh])
temp = np.nanmin(temp, axis=1)
temp[temp < 1E-10] = np.nan
return temp
"""
Returns array of collision times to wall
"""
def t_wall(pos, vel, sz_arr, coord):
wall_vec = coord[1]-coord[0]
# Calculate normalised normal vector to wall
wall_n = np.dot(np.array([[0,-1],[1,0]]), wall_vec)
wall_n = wall_n/np.sum(wall_n**2)**0.5
#Position vector to wall
pos_n = np.dot(coord[0]-pos, wall_n)
vel_n = np.dot(vel, wall_n)
#minimum time to collision
temp = np.nanmin([(pos_n-sz_arr)/vel_n, (pos_n+sz_arr)/vel_n])
if temp < 1E-10:
temp = np.nan
#TODO: check for end of walls, important for obtuse shapes
return temp
"""
Returns array of distances to collisions
"""
def get_dist(pos,sz_arr):
n = np.shape(pos)[0]
[j_arr, i_arr] = tri_indgen(n) #reversed i,j to use upper half triangle
temp = np.sum((pos[i_arr]-pos[j_arr])**2, axis=1)**0.5 - (sz_arr[i_arr]+sz_arr[j_arr])
return temp
"""
Finds vector normal for sawtooth (to create trigger wall on correct side)
"""
def normal(x_v2, y_v2, spike_side):
x_v3, y_v3 = y_v2, x_v2
if spike_side == 1:
y_v3 = y_v3 * -1
elif spike_side == -1:
x_v3 = x_v3 * -1
return np.array([x_v3[1]-x_v3[0], y_v3[1]-y_v3[0]])
|
Python
| 0.000004
|
@@ -686,19 +686,8 @@
(1 -
- (m/T)**2 *
p_i
|
396a217ad725e25c8761edf3678dea349d06e023
|
Reorganize imports
|
setuptools/__init__.py
|
setuptools/__init__.py
|
"""Extensions to the 'distutils' for large or complex distributions"""
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature, _get_unpatched
import distutils.core
from setuptools.depends import Require
from distutils.core import Command as _Command
from distutils.util import convert_path
import os
import sys
from setuptools.version import __version__
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'find_packages'
]
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
def find_packages(where='.', exclude=()):
"""Return a list all Python packages found within directory 'where'
'where' should be supplied as a "cross-platform" (i.e. URL-style) path; it
will be converted to the appropriate local path syntax. 'exclude' is a
sequence of package names to exclude; '*' can be used as a wildcard in the
names, such that 'foo.*' will exclude all subpackages of 'foo' (but not
'foo' itself).
"""
out = []
stack=[(convert_path(where), '')]
while stack:
where,prefix = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where,name)
if ('.' not in name and os.path.isdir(fn) and
os.path.isfile(os.path.join(fn,'__init__.py'))
):
out.append(prefix+name); stack.append((fn,prefix+name+'.'))
for pat in list(exclude)+['ez_setup']:
from fnmatch import fnmatchcase
out = [item for item in out if not fnmatchcase(item,pat)]
return out
setup = distutils.core.setup
_Command = _get_unpatched(_Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
# Add support for keyword arguments
_Command.__init__(self,dist)
for k,v in kw.items():
setattr(self,k,v)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
for k,v in kw.items():
setattr(cmd,k,v) # update command with keywords
return cmd
import distutils.core
distutils.core.Command = Command # we can't patch distutils.cmd, alas
def findall(dir = os.curdir):
"""Find all files under 'dir' and return the list of full filenames
(relative to 'dir').
"""
all_files = []
for base, dirs, files in os.walk(dir):
if base==os.curdir or base.startswith(os.curdir+os.sep):
base = base[2:]
if base:
files = [os.path.join(base, f) for f in files]
all_files.extend(filter(os.path.isfile, files))
return all_files
import distutils.filelist
distutils.filelist.findall = findall # fix findall bug in distutils.
# sys.dont_write_bytecode was introduced in Python 2.6.
if ((hasattr(sys, "dont_write_bytecode") and sys.dont_write_bytecode) or
(not hasattr(sys, "dont_write_bytecode") and os.environ.get("PYTHONDONTWRITEBYTECODE"))):
_dont_write_bytecode = True
else:
_dont_write_bytecode = False
|
Python
| 0.000001
|
@@ -68,175 +68,49 @@
%22%22%22%0A
-from setuptools.extension import Extension%0Afrom setuptools.dist import Distribution, Feature, _get_unpatched%0Aimport distutils.core%0Afrom setuptools.depends import Requi
+%0Aimport os%0Aimport sys%0Aimport distutils.co
re%0Af
@@ -195,36 +195,85 @@
rt_path%0A
+%0A
import
-os%0Aimport sys
+setuptools.version%0Afrom setuptools.extension import Extension
%0Afrom se
@@ -285,23 +285,20 @@
ols.
-version
+dist
import
__ve
@@ -297,19 +297,84 @@
ort
-__version__
+Distribution, Feature, _get_unpatched%0Afrom setuptools.depends import Require
%0A%0A__
@@ -481,16 +481,62 @@
ges'%0A%5D%0A%0A
+__version__ = setuptools.version.__version__%0A%0A
bootstra
|
b95aaa571669d2a5277bd4d9e18d9d1b81e3bcab
|
Decrypt post test
|
BunqAPI/tests.py
|
BunqAPI/tests.py
|
from django.test import TestCase, RequestFactory
from BunqAPI.installation import installation
from django.contrib.auth.models import User
from BunqAPI.encryption import AESCipher
import json
import base64
from BunqAPI import views
from django.contrib.auth import authenticate
from faker import Faker
# from pprint import pprint
# Create your tests here.
class testScript(TestCase):
"""docstring for testScript.
This test is supposed to test the scipts."""
def setUp(self):
fake = Faker()
self.username = fake.name()
self.password = fake.password(
length=10,
special_chars=True,
digits=True,
upper_case=True,
lower_case=True)
user = User.objects.create_user(self.username, '', self.password)
user.save()
def installation(self):
decryt = AESCipher(self.password)
encryt = json.loads(installation(
self.username, self.password, 'API_KEY').encrypt())
d = AESCipher.decrypt(decryt, encryt['secret'])
self.assertIs(len(d), 4)
self.assertTrue(isinstance(d, dict))
def installation_error1(self):
decryt = AESCipher('wrong password')
encryt = json.loads(installation(
self.username, self.password, 'API_KEY').encrypt())
self.assertRaises(
UnicodeDecodeError,
AESCipher.decrypt,
decryt, encryt['secret']
)
def installation_error2(self):
decryt = AESCipher(self.password)
encryt = json.loads(installation(
self.username, self.password, 'API_KEY').encrypt())
secret = encryt['secret'] = 'destroyed'
self.assertRaises(
base64.binascii.Error,
AESCipher.decrypt,
decryt, secret
)
def GUIDs(self):
guid = User.objects.get(username=self.username).profile.GUID
self.assertTrue(isinstance(guid, list))
def test_run(self):
self.installation()
self.installation_error1()
self.installation_error2()
self.GUIDs()
class testView(TestCase):
"""docstring for testView.
This test is supposed to test the views.
Need to find a way to simulate logged in with 2FA
"""
def test_generate(self):
response = self.client.get('/generate', follow=True)
self.assertEqual(response.status_code, 200)
response2 = self.client.post('/generate', follow=True)
self.assertEqual(response2.status_code, 200)
def test_decrypt(self):
response = self.client.get('/decrypt', follow=True)
self.assertEqual(response.status_code, 200)
response2 = self.client.post('/decrypt', follow=True)
self.assertEqual(response2.status_code, 200)
def test_api(self):
response = self.client.post('/API/register', follow=True)
self.assertEqual(response.status_code, 200)
class TestViewCode(TestCase):
"""docstring for TestViewCode."""
def setUp(self):
fake = Faker()
name = fake.name()
pas = fake.password(
length=10,
special_chars=True,
digits=True,
upper_case=True,
lower_case=True
)
User.objects.create_user(name, '', pas)
self.user = authenticate(username=name, password=pas)
self.user.is_verified = lambda: True
self.factory = RequestFactory()
def test_generate_get(self):
request = self.factory.get('/generate')
request.user = self.user
self.assertEqual(
views.generate(request).status_code,
200
)
def test_decrypt_get(self):
request = self.factory.get('/decrypt')
request.user = self.user
self.assertEqual(
views.decrypt(request).status_code,
200
)
def test_generate_post(self):
data = {
'API': Faker().sha256(raw_output=False),
'encryption_password': Faker().password(
length=10,
special_chars=True,
digits=True,
upper_case=True,
lower_case=True
),
}
request = self.factory.post('/generate', data=data)
request.user = self.user
self.assertEqual(
views.generate(request).status_code,
200
)
|
Python
| 0.000007
|
@@ -4411,28 +4411,324 @@
,%0A 200%0A )%0A
+%0A def test_decrypt_post(self):%0A data = %7B%0A 'Nothing': 'Nothing',%0A %7D%0A request = self.factory.post('/decrypt', data=data)%0A request.user = self.user%0A%0A self.assertEqual(%0A views.decrypt(request).status_code,%0A 200%0A )%0A
|
4bea54dade5e6d3e1940ba596a08cf076c2df5b6
|
Assertion was always true
|
akvo/rsr/management/commands/fix_orphaned_periods.py
|
akvo/rsr/management/commands/fix_orphaned_periods.py
|
# -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
import sys
from django.core.management.base import BaseCommand
from ...models import Indicator, IndicatorPeriod
INDICATORS = [
# Child, Parent
(20552, 16726),
(14910, 14855),
]
PERIODS = [
# Orphaned periods
# Child, Parent
(16521, 16522),
(16519, 16520),
(23471, 17821),
(23472, 17821),
# Orphaned indicators, hence periods
(26220, 18201),
(14972, 14917),
]
# Unable to figure out parent for the following
# #######################
# period id: 26081, indicator id: 20414
# project id: 4272
# result title: u'25. Community groups exist with a recognisable voice/representation of women and marginalized groups'
# #######################
# period id: 12721, indicator id: 12939
# project id: 4145
# result title: u'Partners have access to Akvo RSR'
# extra child indicator, no corresponding parent (can be ignored?)
def pprint_period_lineage(period):
indicator = period.indicator
result = indicator.result
project = result.project
print u'{} > {} > {} > {}--{}'.format(
project.title, result.title, indicator.title, period.period_start, period.period_end
)
print u'{} > {} > {} > {}'.format(project.id, result.id, indicator.id, period.id)
print u'#' * 20
class Command(BaseCommand):
args = '[<indicator|indicator_period> <child_id> <parent_id>]'
help = 'Script for fixing orphaned indicators and periods'
def handle(self, *args, **options):
# parse options
verbosity = int(options['verbosity'])
if len(args) == 0:
indicators = INDICATORS
periods = PERIODS
elif len(args) == 3 and args[0] == 'indicator':
indicators = [(int(args[1]), int(args[2]))]
periods = []
elif len(args) == 3 and args[0] == 'indicator_period':
indicators = []
periods = [(int(args[1]), int(args[2]))]
else:
print 'Usage: {} {}'.format(sys.argv[0], self.args)
sys.exit(1)
for child_id, parent_id in indicators:
child_indicator = Indicator.objects.get(id=child_id)
parent_indicator = Indicator.objects.get(id=parent_id)
assert (
child_indicator.result.parent_result == parent_indicator.result,
'{} cannot be a parent of {}'.format(parent_id, child_id)
)
child_indicator.parent_indicator = parent_indicator
child_indicator.save()
# Any additional missing data is taken care of by saving the parent.
parent_indicator.save()
if verbosity > 1:
self.stdout.write('{} indicator made parent of {}'.format(parent_id, child_id))
for child_id, parent_id in periods:
child_period = IndicatorPeriod.objects.get(id=child_id)
parent_period = IndicatorPeriod.objects.get(id=parent_id)
assert (
child_period.indicator.result.parent_result == parent_period.indicator.result,
'{} cannot be a parent of {}'.format(parent_id, child_id)
)
child_period.parent_period = parent_period
child_period.save()
# Any additional missing data is taken care of by saving the parent.
parent_period.save()
pprint_period_lineage(child_period)
if parent_period.indicator.periods.count() != child_period.indicator.periods.count():
print 'No. of periods mismatch with parent :: '
pprint_period_lineage(parent_period)
if verbosity > 1:
self.stdout.write('{} period made parent of {}'.format(parent_id, child_id))
if indicators:
fixed_indicators = ', '.join(str(id_) for id_, _ in indicators)
self.stdout.write('Fixed parents for indicator ids: {}'.format(fixed_indicators))
if periods:
fixed_periods = ', '.join(str(id_) for id_, _ in periods)
self.stdout.write('Fixed parents for period ids: {}'.format(fixed_periods))
|
Python
| 0.999997
|
@@ -2486,179 +2486,181 @@
sert
- (%0A child_indicator.result.parent_result == parent_indicator.result,%0A '%7B%7D cannot be a parent of %7B%7D'.format(parent_id, child_id)%0A )
+ion_message = '%7B%7D cannot be a parent of %7B%7D'.format(parent_id, child_id)%0A assert child_indicator.result.parent_result == parent_indicator.result, assertion_message
%0A
@@ -3198,32 +3198,22 @@
-assert (%0A
+child_result =
chi
@@ -3230,33 +3230,45 @@
indicator.result
-.
+%0A
parent_result ==
@@ -3258,33 +3258,32 @@
parent_result =
-=
parent_period.i
@@ -3297,17 +3297,16 @@
r.result
-,
%0A
@@ -3310,19 +3310,35 @@
-
+assertion_message =
'%7B%7D can
@@ -3396,25 +3396,93 @@
-)
+assert child_result.parent_result == parent_result, assertion_message
%0A
|
390bcb4be27012794ceb927e3ab2e384c2909daf
|
Add retries
|
conf/celeryconfig.py
|
conf/celeryconfig.py
|
from datetime import timedelta
import os
from ast import literal_eval
from celery.schedules import crontab
from kombu import Queue
CLUSTER_NAME = os.getenv('CLUSTER_NAME', 'local')
MESSAGES_TTL = 7200
# Broker and Queue Settings
BROKER_URL = os.getenv('BROKER_URL',
'amqp://guest:guest@localhost:5672')
BROKER_HEARTBEAT = int(os.getenv('BROKER_HEARTBEAT', '20'))
CELERY_DEFAULT_QUEUE = 'cluster-deployer-%s-default' % CLUSTER_NAME
CELERY_PREFORK_QUEUE = 'cluster-deployer-%s-prefork' % CLUSTER_NAME
CELERY_QUEUES = (
Queue(CELERY_DEFAULT_QUEUE, routing_key='default',
queue_arguments={'x-message-ttl': MESSAGES_TTL}),
Queue(CELERY_PREFORK_QUEUE, routing_key='prefork',
queue_arguments={'x-message-ttl': MESSAGES_TTL}),
)
CELERY_DEFAULT_EXCHANGE = 'cluster-deployer-%s' % (CLUSTER_NAME)
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_ROUTES = {
'deployer.tasks.deployment._fleet_deploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._fleet_undeploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._wait_for_undeploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._wait_for_undeploy': {
'routing_key': 'prefork',
},
'deployer.tasks.deployment._fleet_check_running': {
'routing_key': 'prefork',
}
}
CELERY_RESULT_BACKEND = 'amqp'
CELERY_RESULT_EXCHANGE = 'cluster-deployer-%s-results' % CLUSTER_NAME
CELERY_IMPORTS = ('deployer.tasks', 'deployer.tasks.deployment',
'deployer.tasks.common', 'deployer.tasks.proxy',
'celery.task')
CELERY_ACCEPT_CONTENT = ['json', 'pickle']
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
CELERY_ALWAYS_EAGER = literal_eval(os.getenv('CELERY_ALWAYS_EAGER', 'False'))
CELERY_CHORD_PROPAGATES = True
CELERY_STORE_ERRORS_EVEN_IF_IGNORED = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
CELERY_IGNORE_RESULT = False
CELERYD_TASK_SOFT_TIME_LIMIT = 300
CELERYD_TASK_TIME_LIMIT = 330
CELERY_SEND_TASK_SENT_EVENT = True
CELERY_TASK_RESULT_EXPIRES = timedelta(hours=6)
CELERY_RESULT_PERSISTENT = True
# Remote Management
CELERYD_POOL_RESTARTS = True
# Queue Settings
CELERY_QUEUE_HA_POLICY = 'all'
# GLobal Settings
CELERY_TIMEZONE = 'UTC'
# Task releated settings
CELERY_ACKS_LATE = True
CELERY_TASK_PUBLISH_RETRY_POLICY = {
'max_retries': 30,
'interval_step': 1,
'interval_max': 10
}
# Celery Beat settings
CELERYBEAT_SCHEDULE = {
'celery.task.backend_cleanup': {
'task': 'deployer.tasks.backend_cleanup',
'schedule': crontab(hour="*/2", minute=0),
'args': (),
}
}
|
Python
| 0.000539
|
@@ -334,60 +334,205 @@
KER_
-HEARTBEAT = int(os.getenv('BROKER_HEARTBEAT', '20'))
+CONNECTION_TIMEOUT = int(os.getenv('BROKER_CONNECTION_TIMEOUT', '20'))%0ABROKER_HEARTBEAT = int(os.getenv('BROKER_HEARTBEAT', '20'))%0ABROKER_CONNECTION_RETRY = True%0ABROKER_CONNECTION_MAX_RETRIES = 100
%0ACEL
|
f78ef9ff6094b23316a170cf8ae33056ba358aae
|
Remove a TODO
|
feedreader/handlers.py
|
feedreader/handlers.py
|
"""APIRequestHandler subclasses for API endpoints."""
from tornado.web import HTTPError
from feedreader.api_request_handler import APIRequestHandler
class MainHandler(APIRequestHandler):
def get(self):
username = self.require_auth()
self.write({"message": "Hello world!"})
class UsersHandler(APIRequestHandler):
def post(self):
"""Create a new user."""
body = self.require_body_schema({
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
},
"required": ["username", "password"],
})
# TODO: handle username already being taken, empty password
try:
self.auth_provider.register(body["username"], body["password"])
except ValueError as e:
raise HTTPError(400, reason=e.message)
self.set_status(201)
|
Python
| 0.998852
|
@@ -662,76 +662,8 @@
%7D)%0A
- # TODO: handle username already being taken, empty password%0A
|
cd37746924a6b6b94afd044688c4a2554d0f50d1
|
fix variable name for id
|
import.py
|
import.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from app import app, db, Request
from glob import glob
from sqlalchemy.exc import IntegrityError
from OpenSSL import crypto
from datetime import datetime
for path in glob("{}/freifunk_*.crt".format(app.config['DIRECTORY'])):
with open(path) as certfile:
print("Importing {} ...".format(path))
certificate = crypto.load_certificate(
crypto.FILETYPE_PEM,
certfile.read()
)
# extract email and id from subject components
components = dict(certificate.get_subject().get_components())
email_address = components[b'emailAddress']
# remove 'freifunk_' prefix from id
cert_id = components[b'CN'].decode('utf-8').replace('freifunk_', '')
# extract creation date from certificate
generation_date = datetime.strptime(
certificate.get_notBefore().decode('utf-8'),
'%Y%m%d%H%M%SZ'
)
request = Request(cert_id, email_address, generation_date)
try:
db.session.add(request)
db.session.commit()
print("Improted {}.".format(id))
except IntegrityError:
print("{} already exists.".format(id))
db.session.rollback()
|
Python
| 0.999789
|
@@ -1163,32 +1163,37 @@
ted %7B%7D.%22.format(
+cert_
id))%0A exc
@@ -1258,16 +1258,21 @@
.format(
+cert_
id))%0A
|
d4da07688c0b1244bad24c26483a0f1b94a8fab0
|
remove that filtering option
|
src/apps/calendar/schema.py
|
src/apps/calendar/schema.py
|
from graphene import relay, AbstractType, String
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from .models import Calendar, Day
class CalendarNode(DjangoObjectType):
"""
how does this work?
"""
class Meta:
model = Calendar
filter_fields = {
'uuid': ['exact', ]
}
filter_order_by = ['uuid']
interfaces = (relay.Node, )
class CalendarQuery(AbstractType):
"""
how does this work?
"""
calendar = relay.Node.Field(CalendarNode)
calendars = DjangoFilterConnectionField(CalendarNode)
class DayNode(DjangoObjectType):
"""
how does this work?
"""
class Meta:
model = Day
interfaces = (relay.Node, )
exclude_fields = ('image', 'image_small', 'image_large')
image_large_url = String()
image_small_url = String()
def resolve_image_large_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_large_url()
)
def resolve_image_small_url(self, args, context, info):
"""
self is the Day instance
"""
return DayNode.get_absolute_image_url(
context, self.get_image_small_url()
)
def get_absolute_image_url(context, relative_url):
return context.scheme + '://' + context.get_host() + relative_url
class DayQuery(AbstractType):
"""
how does this work?
"""
day = relay.Node.Field(DayNode)
days = DjangoFilterConnectionField(DayNode)
|
Python
| 0.000045
|
@@ -382,43 +382,8 @@
%7D%0A
- filter_order_by = %5B'uuid'%5D%0A
|
f859752e1b2f5e3ad16b0220199375dd130c184b
|
Use namespace only to look up a symbol, not environment.
|
src/libeeyore/values.py
|
src/libeeyore/values.py
|
from abc import ABCMeta, abstractmethod
from all_known import all_known
from eeyinterface import implements_interface
from usererrorexception import EeyUserErrorException
# -- Base class and global methods ---
class EeyValue( object ):
__metaclass__ = ABCMeta
def __init__( self ):
self.cached_eval = None
def render( self, env ):
return env.render_value( self.evaluate( env ) )
def is_known( self, env ):
return True
def evaluate( self, env ):
if self.cached_eval is None:
self.cached_eval = self.do_evaluate( env )
return self.cached_eval
def do_evaluate( self, env ):
return self
def evaluated_type( self, env ):
return self.__class__
@abstractmethod
def construction_args( self ): pass
def __repr__( self ):
return "%s(%s)" % (
self.__class__.__name__,
",".join( repr(x) for x in self.construction_args() )
)
# --- Specific value types ---
class EeyVariable( EeyValue ):
def __init__( self, clazz ):
EeyValue.__init__( self )
self.clazz = clazz
def construction_args( self ):
return ( self.clazz, )
def is_known( self, env ):
return False
def evaluated_type( self, env ):
return self.clazz
class EeySymbol( EeyValue ):
def __init__( self, symbol_name ):
EeyValue.__init__( self )
self.symbol_name = symbol_name
def construction_args( self ):
return ( self.symbol_name, )
def _lookup( self, env ):
if self.symbol_name not in env.namespace:
raise EeyUserErrorException( "The symbol '%s' is not defined." %
self.symbol_name )
# TODO: line, column, filename
return env.namespace[self.symbol_name]
def name( self ):
# TODO: delete this method, or use it consistently
return self.symbol_name
def do_evaluate( self, env ):
# Look up this symbol in the namespace of our environment
value = self._lookup( env ).evaluate( env )
if value.is_known( env ):
# Pass back what we looked up
return value
elif implements_interface( value, EeySymbol ):
return value
else:
# If what we find is a variable (i.e. something unknown until
# runtime) then we simply return ourselves: for the purpose of
# rendering, this _is_ a symbol.
return self
def is_known( self, env ):
return self._lookup( env ).is_known( env )
def evaluated_type( self, env ):
return self._lookup( env ).evaluated_type( env )
class EeyBool( EeyValue ):
def __init__( self, value ):
EeyValue.__init__( self )
self.value = value
def construction_args( self ):
return ( self.value, )
class EeyInt( EeyValue ):
def __init__( self, str_int ):
EeyValue.__init__( self )
self.value = str( str_int )
def construction_args( self ):
return ( self.value, )
def plus( self, other ):
assert other.__class__ == self.__class__
# TODO: handle large numbers
return EeyInt( str( int( self.value ) + int( other.value ) ) )
def times( self, other ):
assert other.__class__ == self.__class__
# TODO: handle large numbers
return EeyInt( str( int( self.value ) * int( other.value ) ) )
def greater_than( self, other ):
assert other.__class__ == self.__class__
# TODO: handle large numbers
return EeyBool( int( self.value ) > int( other.value ) )
class EeyFloat( EeyValue ):
def __init__( self, str_float ):
EeyValue.__init__( self )
self.value = str( str_float )
def construction_args( self ):
return ( self.value, )
def plus( self, other ):
assert other.__class__ == self.__class__
# TODO: handle arbitrary numbers
return EeyFloat( str( float( self.value ) + float( other.value ) ) )
def times( self, other ):
assert other.__class__ == self.__class__
# TODO: handle arbitrary numbers
return EeyFloat( str( float( self.value ) * float( other.value ) ) )
def greater_than( self, other ):
assert other.__class__ == self.__class__
# TODO: handle arbitrary numbers
return EeyBool( float( self.value ) > float( other.value ) )
class EeyNoneType( EeyValue ):
def construction_args( self ):
return ()
eey_none = EeyNoneType()
class EeyVoid( EeyValue ):
def construction_args( self ):
return ()
class EeyString( EeyValue ):
def __init__( self, py_str ):
EeyValue.__init__( self )
self.value = py_str
def construction_args( self ):
return ( self.value, )
def as_py_str( self ):
return self.value
class EeyBinaryOp( EeyValue ):
def __init__( self, left_value, right_value ):
EeyValue.__init__( self )
# TODO: assert( all( self.is_applicable, ( left_value, right_value ) )
self.left_value = left_value
self.right_value = right_value
def construction_args( self ):
return ( self.left_value, self.right_value )
def do_evaluate( self, env ):
if self.is_known( env ):
lv = self.left_value.evaluate( env )
rv = self.right_value.evaluate( env )
return self.operator( lv, rv )
else:
return self
def evaluated_type( self, env ):
return self.left_value.evaluated_type( env )
def is_known( self, env ):
return all_known( ( self.left_value, self.right_value ), env )
class EeyPlus( EeyBinaryOp ):
def operator( self, lv, rv ):
return lv.plus( rv )
class EeyTimes( EeyBinaryOp ):
def operator( self, lv, rv ):
return lv.times( rv )
class EeyGreaterThan( EeyBinaryOp ):
def operator( self, lv, rv ):
return lv.greater_than( rv )
def evaluated_type( self, env ):
return EeyBool
class EeyPass( EeyValue ):
"""A statement that does nothing."""
def __init__( self ):
EeyValue.__init__( self )
def construction_args( self ):
return ()
class EeyType( EeyValue ):
def __init__( self, value ):
EeyValue.__init__( self )
# TODO: check we have been passed a type
self.value = value
def construction_args( self ):
return ( self.value, )
def __eq__( self, other ):
return (
self.__class__ == other.__class__ and
self.value == other.value
)
def __ne__( self, other ):
return not self.__eq__( other )
class EeyArray( EeyValue ):
def __init__( self, value_type, values ):
EeyValue.__init__( self )
self.value_type = value_type
self.values = values
def construction_args( self ):
return ( self.value_type, self.values )
def get_index( self, int_index ):
return self.values[int_index]
|
Python
| 0
|
@@ -1543,35 +1543,41 @@
_lookup( self,
-env
+namespace
):%0A if s
@@ -1599,20 +1599,16 @@
not in
-env.
namespac
@@ -1781,20 +1781,16 @@
return
-env.
namespac
@@ -2049,32 +2049,42 @@
elf._lookup( env
+.namespace
).evaluate( env
@@ -2564,16 +2564,26 @@
kup( env
+.namespace
).is_kn
@@ -2663,16 +2663,26 @@
kup( env
+.namespace
).evalu
|
4a30762680fd3ee9b95795f39e10e15faf4279e8
|
remove language intolerance
|
src/boarbot/modules/echo.py
|
src/boarbot/modules/echo.py
|
import discord
from boarbot.common.botmodule import BotModule
from boarbot.common.events import EventType
class EchoModule(BotModule):
async def handle_event(self, event_type, args):
if event_type == EventType.MESSAGE:
await self.echo(args[0])
async def echo(self, message: discord.Message):
if not self.client.user.mentioned_in(message):
return # Gotta mention me
if '!echo' not in message.clean_content:
return # Need !echo
echo = message.clean_content.split('!echo', 1)[1]
if 'shit' in echo:
raise ValueError('Your language is bad and you should feel bad')
await self.client.send_message(message.channel, echo)
|
Python
| 0.999939
|
@@ -554,112 +554,8 @@
%5B1%5D%0A
- if 'shit' in echo:%0A raise ValueError('Your language is bad and you should feel bad')%0A
|
e8d0c7f678689c15049186360c08922be493587a
|
Remove non-existant flask.current_app.debug doc ref.
|
flask_nav/renderers.py
|
flask_nav/renderers.py
|
from flask import current_app
from dominate import tags
from visitor import Visitor
class Renderer(Visitor):
"""Base interface for navigation renderers.
Visiting a node should return a string or an object that converts to a
string containing HTML."""
def visit_object(self, node):
"""Fallback rendering for objects.
If the current application is in debug-mode
(:attr:`flask.current_app.debug` is ``True``), an ``<!-- HTML comment
-->`` will be rendered, indicating which class is missing a visitation
function.
Outside of debug-mode, returns an empty string.
"""
if current_app.debug:
return tags.comment(
'no implementation in {} to render {}'.format(
self.__class__.__name__, node.__class__.__name__,
))
return ''
class SimpleRenderer(Renderer):
"""A very basic HTML5 renderer.
Renders a navigational structure using ``<nav>`` and ``<ul>`` tags that
can be styled using modern CSS.
:param kwargs: Additional attributes to pass on to the root ``<nav>``-tag.
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
def visit_Link(self, node):
return tags.a(node.title, **node.attribs)
def visit_Navbar(self, node):
kwargs = {'_class': 'navbar'}
kwargs.update(self.kwargs)
cont = tags.nav(**kwargs)
ul = cont.add(tags.ul())
for item in node.items:
ul.add(tags.li(self.visit(item)))
return cont
def visit_View(self, node):
kwargs = {}
if node.active:
kwargs['_class'] = 'active'
return tags.a(node.title,
href=node.get_url(),
title=node.title,
**kwargs)
def visit_Subgroup(self, node):
group = tags.ul(_class='subgroup')
title = tags.span(node.title)
if node.active:
title.attributes['class'] = 'active'
for item in node.items:
group.add(tags.li(self.visit(item)))
return tags.div(title, group)
def visit_Separator(self, node):
return tags.hr(_class='separator')
def visit_Label(self, node):
return tags.span(node.title, _class='nav-label')
|
Python
| 0
|
@@ -404,14 +404,9 @@
(
-:attr:
+%60
%60fla
@@ -426,16 +426,17 @@
p.debug%60
+%60
is %60%60Tr
|
8972cf13c7f3460a5f9f85e8a5a23bfa6f53006d
|
Update mongo
|
flask_restler/mongo.py
|
flask_restler/mongo.py
|
import bson
import marshmallow as ma
from .filters import Filter as VanilaFilter, Filters
from .resource import ResourceOptions, Resource, APIError, logger
class ObjectId(ma.fields.Field):
def _deserialize(self, value, attr, data):
try:
return bson.ObjectId(value)
except:
raise ma.ValidationError('invalid ObjectId `%s`' % value)
def _serialize(self, value, attr, obj):
if value is None:
return ma.missing
return str(value)
class MongoSchema(ma.Schema):
_id = ObjectId()
def __init__(self, instance=None, **kwargs):
self.instance = instance
super(MongoSchema, self).__init__(**kwargs)
@ma.post_load
def make_instance(self, data):
"""Build object from data."""
if self.instance is not None:
self.instance.update(data)
return self.instance
return data
def load(self, data, instance=None, *args, **kwargs):
self.instance = instance or self.instance
return super(MongoSchema, self).load(data, *args, **kwargs)
class MongoOptions(ResourceOptions):
def __init__(self, cls):
super(MongoOptions, self).__init__(cls)
self.name = self.meta and getattr(self.meta, 'name', None)
if not self.collection:
return
self.name = self.name or str(self.collection.name)
if not cls.Schema:
meta = type('Meta', (object,), self.schema_meta)
cls.Schema = type(
self.name.title() + 'Schema', (MongoSchema,), dict({'Meta': meta}, **self.schema))
class Filter(VanilaFilter):
operators = {
'$eq': '$eq',
'$ge': '$gte',
'$gt': '$gt',
'$in': '$in',
'$le': '$lte',
'$lt': '$lt',
'$ne': '$ne',
'$nin': '$nin',
}
def apply(self, collection, ops, **kwargs):
"""Filter mongo."""
logger.debug('Apply filter %s (%r)', self.name, ops)
return collection.find({self.name: dict(ops)})
class MongoFilters(Filters):
FILTER_CLASS = Filter
class MongoChain(object):
""" Support query chains.
Only for `find` and `find_one` methods.
::
collection = MongoChain(mongo_collection)
collection = collection.find({'field': 'value').find('field2': 'value')
result = collection.find_one({'field3': 'value')
results = collection.skip(10).limit(10)
"""
CURSOR_METHODS = (
'where', 'sort', 'skip', 'rewind', 'retrieved', 'remove_option', 'next', 'min',
'max_time_ms', 'max_scan', 'max_await_time_ms', 'max', 'limit', 'hint', 'explain',
'distinct', 'cursor_id', 'count', 'comment', 'collection', 'close', 'clone', 'batch_size',
'alive', 'address', 'add_option', '__getitem__'
)
def __init__(self, collection):
self.collection = collection
self.query = {}
self.projection = None
def find(self, query=None, projection=None):
self.query = self.__update__(query)
self.projection = projection
return self
def find_one(self, query=None, projection=None):
query = self.__update__(query)
logger.debug('Mongo find one: %r', query)
return self.collection.find_one(query, projection=projection)
def aggregate(self, pipeline, **kwargs):
if self.query:
for params in pipeline:
if '$match' in params:
params['$match'] = self.__update__(params['$match'])
break
else:
pipeline.insert(0, {'$match': self.query})
logger.debug('Mongo aggregate: %r', pipeline)
return self.collection.aggregate(pipeline, **kwargs)
def __repr__(self):
return "<MongoChain (%s) %r>" % (self.collection.name, self.query)
def __update__(self, query):
if query:
return dict(self.query, **query)
return self.query
def __getattr__(self, name):
"""Proxy any attributes except find to self.collection."""
logger.debug('Mongo load: %r', self.query)
if name in self.CURSOR_METHODS:
cursor = self.collection.find(self.query, self.projection)
return getattr(cursor, name)
return getattr(self.collection, name)
class MongoResource(Resource):
"""Provide API for Pymongo document and collections."""
OPTIONS_CLASS = MongoOptions
class Meta:
collection = None
filters = 'login',
filters_converter = MongoFilters
aggregate = False # Support aggregation. Set to pipeline.
object_id = '_id'
schema = {}
def get_many(self, *args, **kwargs):
"""Return collection filters."""
return MongoChain(self.meta.collection)
def get_one(self, *args, **kwargs):
"""Load a resource."""
resource = super(MongoResource, self).get_one(*args, **kwargs)
if not resource:
return None
return self.collection.find_one({self.meta.object_id: bson.ObjectId(resource)})
def paginate(self, offset=0, limit=None):
"""Paginate collection."""
if self.meta.aggregate:
pipeline_all = self.meta.aggregate + [{'$limit': limit}, {'$skip': offset}]
pipeline_num = self.meta.aggregate + [{'$group': {'_id': None, 'total': {'$sum': 1}}}]
counts = list(self.collection.aggregate(pipeline_num))
return (
self.collection.aggregate(pipeline_all),
counts and counts[0]['total'] or 0
)
return self.collection.skip(offset).limit(limit), self.collection.count()
def to_simple(self, data, many=False, **kwargs):
"""Support aggregation."""
if isinstance(data, MongoChain) and self.meta.aggregate:
data = data.aggregate(self.meta.aggregate)
return super(MongoResource, self).to_simple(data, many=many, **kwargs)
def get_schema(self, resource=None, **kwargs):
return self.Schema(instance=resource)
def save(self, resource):
"""Save resource to DB."""
if resource.get('_id'):
self.meta.collection.replace_one({'_id': resource['_id']}, resource)
else:
write = self.meta.collection.insert_one(resource)
resource['_id'] = write.inserted_id
return resource
def sort(self, collection, *sorting, **Kwargs):
"""Sort resources."""
sorting = {name: -1 if desc else 1 for name, desc in sorting}
return collection.sort(list(sorting.items()))
def delete(self, resource=None, **kwargs):
if resource is None:
raise APIError('Resource not found', status_code=404)
self.collection.delete_one({self.meta.object_id: resource[self.meta.object_id]})
|
Python
| 0.000001
|
@@ -5250,39 +5250,39 @@
%5B%7B'$
-limit': limit%7D, %7B'$skip': offse
+skip': offset%7D, %7B'$limit': limi
t%7D%5D%0A
@@ -6538,16 +6538,228 @@
orting%7D%0A
+ if self.meta.aggregate:%0A pipeline = %5Bp for p in self.meta.aggregate if '$sort' not in p%5D%0A pipeline.append(%7B'$sort': sorting%7D)%0A return self.collection.aggregate(pipeline)%0A%0A
|
f6df8c05d247650f4899d1101c553230a60ccc70
|
Improve registration response messages
|
fogeybot/cogs/users.py
|
fogeybot/cogs/users.py
|
from discord.ext.commands import command
class UserCommands(object):
def __init__(self, bot, api, db):
self.bot = bot
self.api = api
self.db = db
@command(description="Registers/updates your battle tag", pass_context=True)
async def register(self, ctx, battletag: str):
if '#' not in battletag:
await self.bot.reply("bad battle tag format, it should look like this: `MrCool#123`")
return
# TODO verify with hotslogs (account for private profiles)
await self.db.register_battle_tag(ctx.message.author.id, battletag)
await self.bot.reply("Registration successful")
@command(description="Shows your registered battle tags, if any", pass_context=True)
async def registrationstatus(self, ctx):
battle_tag = await self.db.lookup_battle_tag(ctx.message.author.id)
if battle_tag:
await self.bot.reply("Registered battle tag: `{}`".format(battle_tag))
else:
await self.bot.reply("Battle tag not found")
@command(description="Unregisters your battle tag", pass_context=True)
async def unregister(self, ctx):
await self.db.unregister_battle_tag(ctx.message.author.id)
await self.bot.reply("Registration removed")
|
Python
| 0.000002
|
@@ -464,66 +464,834 @@
-# TODO verify with hotslogs (account for private profiles)
+try:%0A info = await self.api.get_mmr(battletag)%0A%0A if info.present:%0A msg = %22Registration successful%5Cn%22%0A msg += %22**Note**: MMR lookup requires that your HotsLog profile remains public%22%0A else:%0A msg = %22Unable to find %60%7B%7D%60 via HotsLogs; either your profile is private, or you made a typo%5Cn%22.format(battletag)%0A msg += %22If you made a typo: simply type %60!register battletag#123%60 again%5Cn%22%0A msg += %22If your profile is private: you will need to specify your MMR each time you %60!joinpickup%60, or make it public%22%0A%0A except APIError:%0A msg = %22Registration succeeded, but I was unable to verify your battle tag with HotsLogs%5Cn%22%0A msg += %22**Note**: MMR lookup requires that your HotsLog profile remains public%22
%0A%0A
@@ -1356,33 +1356,32 @@
.id, battletag)%0A
-%0A
await se
@@ -1397,33 +1397,11 @@
ply(
-%22Registration successful%22
+msg
)%0A%0A
|
46749e9d6ef3eca8ca0d9eae25829ef88d31f5b4
|
Exit if no events are received
|
broadcaster.py
|
broadcaster.py
|
from threading import Thread
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from multiprocessing.pool import ThreadPool
import datetime
import base64
import logging
import os
import re
import redis
import requests
import signal
import sys
import time
import uuid
import multiprocessing
FRAMES_PATH = os.getenv("FRAMES_PATH", 'frames')
HTTP_HOST_LIST_URL = os.getenv("HTTP_HOST_LIST_URL", None)
HTTP_HOST = os.getenv("HTTP_HOST", "localhost")
HTTP_PORT = int(os.getenv("HTTP_PORT", 9080))
HTTP_PUBLISH_URL_TEMPLATE = os.getenv("HTTP_PUBLISH_URLS_TEMPLATE", 'http://{host}:{port}/pub?id={channel}')
http_hosts = requests.get(HTTP_HOST_LIST_URL).json() if HTTP_HOST_LIST_URL else [HTTP_HOST]
REDIS_HOST_LIST_URL = os.getenv("REDIS_HOST_LIST_URL", None)
REDIS_HOST = os.getenv("REDIS_HOST", "")
REDIS_PORT = int(os.getenv("REDIS_PORT", 6379))
REDIS_DB = int(os.getenv("REDIS_DB", 0))
REDIS_PASSWORD = os.getenv("REDIS_PASSWORD", None)
REDIS_TTL = int(os.getenv("REDIS_TTL", 60))
REDIS_SAMPLE_RATE = int(os.getenv("REDIS_SAMPLE_RATE", 8)) # 1/8 images will be post to redis
redis_hosts = requests.get(REDIS_HOST_LIST_URL).json() if REDIS_HOST_LIST_URL else [REDIS_HOST]
WORKERS = int(os.getenv("WORKERS", multiprocessing.cpu_count()))
EVENT_QUEUE_MAX_SIZE = int(os.getenv("EVENT_QUEUE_MAX_SIZE", WORKERS * 2))
BASE64_ENCODE = "BASE64_ENCODE" in os.environ
LOG_FILE = os.getenv("LOG_FILE", None)
LOG_LEVEL = getattr(logging, os.getenv("LOG_LEVEL", "debug").upper())
logger = logging.getLogger("broadcaster")
pool = ThreadPool(processes=WORKERS)
def log_on_error(func):
def f(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception as err:
logger.exception(err)
return f
class EventHandler(FileSystemEventHandler):
def __init__(self, *args, **kwargs):
super(EventHandler, self).__init__(*args, **kwargs)
self.last_event = datetime.datetime.now()
def on_created(self, event):
self.last_event = datetime.datetime.now()
if os.path.isdir(event.src_path):
return
post_async(event.src_path)
@log_on_error
def post_async(path):
return pool.apply_async(func=post, args=(path,))
@log_on_error
def post(path):
channel = os.path.basename(os.path.dirname(path))
with open(path, 'rb') as content:
data = content.read()
http_data = base64.b64encode(data) if BASE64_ENCODE else data
post_http(channel, http_data, path)
post_redis(channel, data, path)
os.remove(path)
@log_on_error
def post_http(channel, data, path):
for host in [h for h in http_hosts if h]:
post_http_to_host(channel, data, path, host)
@log_on_error
def post_http_to_host(channel, data, path, host):
url = HTTP_PUBLISH_URL_TEMPLATE.format(channel=channel, host=host, port=HTTP_PORT)
r = requests.post(url, data=data, timeout=0.5)
if r.status_code == 200:
logger.debug('Pushed {} to {}'.format(path, url))
else:
logger.error(r)
@log_on_error
def post_redis(channel, data, path):
digits = re.findall("(\d+)", path)
if digits:
count = int(digits[-1]) % REDIS_SAMPLE_RATE
if count != 0:
logger.debug('Image {} not sampled ({}/{}).'.format(path, count, REDIS_SAMPLE_RATE))
return
for redis_host in [h for h in redis_hosts if h]:
try:
r = redis.StrictRedis(host=redis_host, port=REDIS_PORT, db=REDIS_DB, password=REDIS_PASSWORD)
channel_ttl = r.get("thumb/" + channel + "/ttl")
channel_ttl = int(channel_ttl) if channel_ttl else REDIS_TTL
key = "thumb/" + channel
blob_key = "blob/" + str(uuid.uuid4())
timestamp = os.path.getmtime(path)
r.zadd(key, timestamp, blob_key)
r.setex(blob_key, channel_ttl, data)
r.zremrangebyscore(key, "-inf", timestamp - channel_ttl)
logger.debug('Pushed {} to {}. Key={}, timestamp={}'.format(path, redis_host, blob_key, timestamp))
except Exception as err:
logger.error(err)
def setup_logger():
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler = None
if LOG_FILE:
handler = logging.FileHandler(LOG_FILE)
else:
handler = logging.StreamHandler(sys.stdout)
logger.setLevel(LOG_LEVEL)
handler.setLevel(LOG_LEVEL)
handler.setFormatter(formatter)
logger.addHandler(handler)
def delete_all_files(top):
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
path = os.path.join(root, name)
logger.debug("Removing old file {}".format(path))
os.remove(path)
def signal_handler(signal, frame):
logger.warning("Interrupt. Shuting down.")
sys.exit(0)
def run():
setup_logger()
logger.info('Started')
event_handler = EventHandler()
observer = Observer(timeout=0.1)
observer.event_queue.maxsize = EVENT_QUEUE_MAX_SIZE
try:
delete_all_files(FRAMES_PATH)
observer.schedule(event_handler, path=FRAMES_PATH, recursive=True)
observer.start()
signal.signal(signal.SIGINT, signal_handler)
while True:
time.sleep(1)
now = datetime.datetime.now()
if now - event_handler.last_event > datetime.timedelta(minutes=1):
logger.warning("No events received in the last minute. "
"Unfinished tasks: %d" % observer.event_queue.unfinished_tasks)
assert observer.is_alive()
event_handler.last_event = now
except KeyboardInterrupt:
logger.warning("Keyboard interruption. Shuting down.")
except Exception as err:
logger.error(err)
finally:
observer.join()
if __name__ == "__main__":
run()
|
Python
| 0
|
@@ -5473,18 +5473,18 @@
minute.
-
%22
+)
%0A
@@ -5496,176 +5496,140 @@
- %22Unfinished tasks: %25d%22 %25 observer.event_queue.unfinished_tasks)%0A assert observer.is_alive()%0A event_handler.last_event = now
+# Sometimes watchdog stops receiving events.%0A # We exit, so the process can be restarted.%0A sys.exit(1)
%0A
|
4240cc0e6fb552d3ca91082fe3173cb26b273ac0
|
support for dplay.dk and it.dplay.com
|
lib/svtplay_dl/service/dplay.py
|
lib/svtplay_dl/service/dplay.py
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
import copy
import json
import time
import os
from svtplay_dl.service import Service
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.subtitle import subtitle
from svtplay_dl.utils.urllib import quote
from svtplay_dl.error import ServiceError
from svtplay_dl.utils import filenamify
from svtplay_dl.log import log
class Dplay(Service):
supported_domains = ['dplay.se']
def get(self, options):
data = self.get_urldata()
premium = False
if self.exclude(options):
yield ServiceError("Excluding video")
return
match = re.search("<link rel='shortlink' href='http://www.dplay.se/\?p=(\d+)", data)
if not match:
yield ServiceError("Can't find video id")
return
vid = match.group(1)
data = self.http.request("get", "http://www.dplay.se/api/v2/ajax/videos?video_id=%s" % vid).text
dataj = json.loads(data)
if dataj["data"] == None:
yield ServiceError("Cant find video. wrong url without video?")
return
if options.username and options.password:
premium = self._login(options)
if not premium:
yield ServiceError("Wrong username or password")
return
what = self._playable(dataj, premium)
if what == 1:
yield ServiceError("Premium content")
return
if what == 2:
yield ServiceError("DRM protected. Can't do anything")
return
if options.output_auto:
directory = os.path.dirname(options.output)
options.service = "dplay"
name = self._autoname(dataj)
if name is None:
yield ServiceError("Cant find vid id for autonaming")
return
title = "%s-%s-%s" % (name, vid, options.service)
if len(directory):
options.output = os.path.join(directory, title)
else:
options.output = title
suburl = dataj["data"][0]["subtitles_sv_srt"]
if len(suburl) > 0:
yield subtitle(copy.copy(options), "raw", suburl)
if options.force_subtitle:
return
data = self.http.request("get", "http://geo.dplay.se/geo.js").text
dataj = json.loads(data)
geo = dataj["countryCode"]
timestamp = (int(time.time())+3600)*1000
cookie = {"dsc-geo": quote('{"countryCode":"%s","expiry":%s}' % (geo, timestamp))}
if options.cookies:
options.cookies.update(cookie)
else:
options.cookies = cookie
data = self.http.request("get", "https://secure.dplay.se/secure/api/v2/user/authorization/stream/%s?stream_type=hds" % vid, cookies=options.cookies)
dataj = json.loads(data.text)
if "hds" in dataj:
streams = hdsparse(copy.copy(options), self.http.request("get", dataj["hds"], params={"hdcore": "3.8.0"}), dataj["hds"])
if streams:
for n in list(streams.keys()):
yield streams[n]
data = self.http.request("get", "https://secure.dplay.se/secure/api/v2/user/authorization/stream/%s?stream_type=hls" % vid, cookies=options.cookies)
dataj = json.loads(data.text)
if "hls" in dataj:
streams = hlsparse(options, self.http.request("get", dataj["hls"]), dataj["hls"])
if streams:
for n in list(streams.keys()):
yield streams[n]
def _autoname(self, jsondata):
show = jsondata["data"][0]["video_metadata_show"]
season = jsondata["data"][0]["season"]
episode = jsondata["data"][0]["episode"]
title = jsondata["data"][0]["title"]
return filenamify("%s.s%se%s.%s" % (show, season, episode, title))
def _login(self, options):
data = self.http.request("get", "https://secure.dplay.se/login/", cookies={})
options.cookies = data.cookies
match = re.search('realm_code" value="([^"]+)"', data.text)
postdata = {"username" : options.username, "password": options.password, "remember_me": "true", "realm_code": match.group(1)}
data = self.http.request("post", "https://secure.dplay.se/secure/api/v1/user/auth/login", data=postdata, cookies=options.cookies)
if data.status_code == 200:
options.cookies = data.cookies
return True
else:
return False
def _playable(self, dataj, premium):
if dataj["data"][0]["content_info"]["package_label"]["value"] == "Premium" and not premium:
return 1
if dataj["data"][0]["video_metadata_drmid_playready"] != "none":
return 2
if dataj["data"][0]["video_metadata_drmid_flashaccess"] != "none":
return 2
return 0
def find_all_episodes(self, options):
data = self.get_urldata()
match = re.search('data-show-id="([^"]+)"', data)
if not match:
log.error("Cant find show id")
return None
premium = None
if options.username and options.password:
premium = self._login(options)
url = "http://www.dplay.se/api/v2/ajax/shows/%s/seasons/?items=9999999&sort=episode_number_desc&page=" % match.group(1)
episodes = []
page = 0
data = self.http.request("get", "%s%s" % (url, page)).text
dataj = json.loads(data)
for i in dataj["data"]:
what = self._playable(dataj, premium)
if what == 0:
episodes.append(i["url"])
pages = dataj["total_pages"]
for n in range(1, pages):
data = self.http.request("get", "%s%s" % (url, n)).text
dataj = json.loads(data)
for i in dataj["data"]:
what = self._playable(dataj, premium)
if what == 0:
episodes.append(i["url"])
if len(episodes) == 0:
log.error("Cant find any playable files")
return episodes
|
Python
| 0
|
@@ -559,16 +559,44 @@
play.se'
+, 'dplay.dk', %22it.dplay.com%22
%5D%0A%0A d
|
be4ac92b1729ec67e417fc114ca0195f4424aed1
|
Remove stray utf-8
|
libcloudcore/serializers/xml.py
|
libcloudcore/serializers/xml.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import collections
import xmltodict
import dateutil.parser
from .. import models, layer
from ..utils import force_str
class Parser(models.Visitor):
def visit_string(self, shape, value):
return value or ''
def visit_blob(self, shape, value):
return value or ''
def visit_integer(self, shape, value):
return int(value)
visit_long = visit_integer
def visit_float(self, shape, value):
return float(value)
def visit_double(self, shape, value):
return float(value)
def visit_boolean(self, shape, value):
if value == "true":
return True
return False
def visit_timestamp(self, shape, value):
return dateutil.parser.parse(value)
def visit_list(self, shape, value):
if not value:
return []
subshape = shape.of
result = []
if not isinstance(value, list):
value = [value]
for child in value:
result.append(self.visit(subshape, child[subshape.name]))
return result
def visit_map(self, shape, value):
# FIXME: Make Key/Value configurable
if not value:
return {}
if not isinstance(value, list):
value = [value]
out = {}
key_shape = shape.key_shape
value_shape = shape.value_shape
for child in value:
key = self.visit(key_shape, child["Key"])
value = self.visit(value_shape, child["Value"])
out[key] = value
return out
def visit_structure(self, shape, value):
if not value:
return {}
out = {}
for member in shape.iter_members():
if member.name in value:
out[member.name] = self.visit(
member.shape,
value[member.name],
)
return out
class Serializer(models.Visitor):
def visit(self, shape, name, value):
visit_fn_name = "visit_{}".format(shape.type)
try:
visit_fn = getattr(self, visit_fn_name)
except AttributeError:
raise NotImplementedError(visit_fn_name)
return visit_fn(shape, name, value)
def visit_string(self, shape, name, value):
return value or None
def visit_blob(self, shape, name, value):
return value or None
def visit_timestamp(self, shape, name, value):
return value.isoformat()
def visit_integer(self, shape, name, value):
return value
visit_long = visit_integer
def visit_float(self, shape, name, value):
# On python 2.7 we need to take care to repr() floats because
# >>> str(float("-9999.999999999998"))
# '-10000.0’
return repr(value)
visit_double = visit_float
def visit_boolean(self, shape, name, value):
return "true" if value else "false"
def visit_list(self, shape, name, value):
if not value:
return None
subshape = shape.of
nodes = []
for subvalue in value:
nodes.append({
subshape.name: self.visit(
subshape,
subshape.wire_name,
subvalue
)
})
return nodes
def visit_map(self, shape, name, value):
# FIXME: Make Key/Value configurable
key_shape = shape.key_shape
value_shape = shape.value_shape
if not value:
return None
nodes = []
for k, v in value.items():
nodes.append({
"Key": self.visit(key_shape, key_shape.name, k),
"Value": self.visit(value_shape, value_shape.name, v),
})
return nodes
def visit_structure(self, shape, name, value):
structure = collections.OrderedDict()
for member in shape.iter_members():
if member.name in value:
structure[member.wire_name] = self.visit(
member.shape,
member.wire_name,
value[member.name]
)
return structure
class XmlSerializer(layer.Layer):
def _namespaces(self, operation):
namespaces = {}
for ns, uri in operation.model.metadata.get("namespaces", {}).items():
namespaces[uri] = ns if ns else None
return namespaces
def serialize(self, operation, shape, params):
body = Serializer().visit(
shape,
shape.name,
params,
)
for uri, ns in self._namespaces(operation).items():
if ns:
body["@xmlns:{}".format(ns)] = uri
else:
body["@xmlns"] = uri
return force_str(xmltodict.unparse(
{shape.wire_name: body},
pretty=True,
))
def deserialize(self, operation, shape, body):
payload = xmltodict.parse(
body,
strip_whitespace=False,
process_namespaces=True,
namespaces=self._namespaces(operation),
)
return Parser().visit(shape, payload[shape.name])
def before_call(self, request, operation, **params):
request.headers['Content-Type'] = 'text/xml'
request.body = self.serialize(
operation,
operation.input_shape,
params
)
return super(XmlSerializer, self).before_call(
request,
operation,
**params
)
def after_call(self, operation, request, response):
return self.deserialize(
operation,
operation.output_shape,
response.body
)
|
Python
| 0.000001
|
@@ -3556,9 +3556,9 @@
00.0
-%E2%80%99
+'
%0A
|
a26c10f9358c51e291e8493463f3836c8bed01e2
|
Fix level command: Admins (level 1000+) can no longer promote other admins who are the same levels as them
|
pajbot/modules/basic/admincommands.py
|
pajbot/modules/basic/admincommands.py
|
import logging
import pajbot.models
from pajbot.managers.adminlog import AdminLogManager
from pajbot.modules import BaseModule
from pajbot.modules import ModuleType
from pajbot.modules.basic import BasicCommandsModule
log = logging.getLogger(__name__)
class AdminCommandsModule(BaseModule):
ID = __name__.split('.')[-1]
NAME = 'Basic admin commands'
DESCRIPTION = 'All miscellaneous admin commands'
CATEGORY = 'Feature'
ENABLED_DEFAULT = True
MODULE_TYPE = ModuleType.TYPE_ALWAYS_ENABLED
PARENT_MODULE = BasicCommandsModule
def whisper(self, **options):
message = options['message']
bot = options['bot']
if message:
msg_args = message.split(' ')
if len(msg_args) > 1:
username = msg_args[0]
rest = ' '.join(msg_args[1:])
bot.whisper(username, rest)
def edit_points(self, **options):
message = options['message']
bot = options['bot']
source = options['source']
if message:
msg_split = message.split(' ')
if len(msg_split) < 2:
# The user did not supply enough arguments
bot.whisper(source.username, 'Usage: !editpoints USERNAME POINTS')
return False
username = msg_split[0]
if len(username) < 2:
# The username specified was too short. ;-)
return False
try:
num_points = int(msg_split[1])
except (ValueError, TypeError):
# The user did not specify a valid integer for points
bot.whisper(source.username, 'Invalid amount of points. Usage: !{command_name} USERNAME POINTS'.format(command_name=self.command_name))
return False
with bot.users.find_context(username) as user:
if not user:
bot.whisper(source.username, 'This user does not exist FailFish')
return False
user.points += num_points
if num_points >= 0:
bot.whisper(source.username, 'Successfully gave {} {} points.'.format(user.username_raw, num_points))
else:
bot.whisper(source.username, 'Successfully removed {} points from {}.'.format(abs(num_points), user.username_raw))
def level(self, **options):
message = options['message']
bot = options['bot']
source = options['source']
if message:
msg_args = message.split(' ')
if len(msg_args) > 1:
username = msg_args[0].lower()
new_level = int(msg_args[1])
if new_level >= source.level:
bot.whisper(source.username, 'You cannot promote someone to the same or higher level as you ({0}).'.format(source.level))
return False
# We create the user if the user didn't already exist in the database.
with bot.users.get_user_context(username) as user:
old_level = user.level
user.level = new_level
log_msg = '{}\'s user level changed from {} to {}'.format(
user.username_raw,
old_level,
new_level)
bot.whisper(source.username, log_msg)
AdminLogManager.add_entry('Userlevel edited', source, log_msg)
return True
bot.whisper(source.username, 'Usage: !level USERNAME NEW_LEVEL')
return False
def cmd_silence(self, **options):
bot = options['bot']
source = options['source']
if bot.silent:
bot.whisper(source.username, 'The bot is already silent')
else:
bot.silent = True
bot.whisper(source.username, 'The bot is now silent. Use !unsilence to enable messages again. Note that this option does not stick in case the bot crashes or restarts')
def cmd_unsilence(self, **options):
bot = options['bot']
source = options['source']
if not bot.silent:
bot.whisper(source.username, 'The bot can already talk')
else:
bot.silent = False
bot.whisper(source.username, 'The bot can now talk again')
def load_commands(self, **options):
self.commands['w'] = pajbot.models.command.Command.raw_command(self.whisper,
level=2000,
description='Send a whisper from the bot')
self.commands['editpoints'] = pajbot.models.command.Command.raw_command(self.edit_points,
level=1500,
description='Modifies a users points',
examples=[
pajbot.models.command.CommandExample(None, 'Give a user points',
chat='user:!editpoints pajlada 500\n'
'bot>user:Successfully gave pajlada 500 points.',
description='This creates 500 points and gives them to pajlada').parse(),
pajbot.models.command.CommandExample(None, 'Remove points from a user',
chat='user:!editpoints pajlada -500\n'
'bot>user:Successfully removed 500 points from pajlada.',
description='This removes 500 points from pajlada. Users can go into negative points with this.').parse(),
])
self.commands['level'] = pajbot.models.command.Command.raw_command(self.level,
level=1000,
description='Set a users level')
self.commands['silence'] = pajbot.models.command.Command.raw_command(self.cmd_silence,
level=500,
description='Silence the bot')
self.commands['mute'] = self.commands['silence']
self.commands['unsilence'] = pajbot.models.command.Command.raw_command(self.cmd_unsilence,
level=500,
description='Unsilence the bot')
self.commands['unmute'] = self.commands['unsilence']
|
Python
| 0.000001
|
@@ -3064,32 +3064,338 @@
rname) as user:%0A
+ if user.level %3E= source.level:%0A bot.whisper(source.username, 'You cannot change the level of someone who is the same or higher level than you. You are level %7B%7D, and %7B%7D is level %7B%7D'.format(source.level, username, user.level))%0A return False%0A%0A
|
db711fe24ffff78d21db3af8e437dc2f2f1b48a7
|
Add space at top of class bruteforce_ssh_pyes
|
alerts/bruteforce_ssh_pyes.py
|
alerts/bruteforce_ssh_pyes.py
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Anthony Verez averez@mozilla.com
# Jeff Bryner jbryner@mozilla.com
from lib.alerttask import AlertTask
from query_models import SearchQuery, TermMatch, PhraseMatch, TermsMatch
class AlertBruteforceSshES(AlertTask):
def main(self):
search_query = SearchQuery(minutes=2)
search_query.add_must([
TermMatch('_type', 'event'),
PhraseMatch('summary', 'failed'),
TermMatch('program', 'sshd'),
TermsMatch('summary', ['login', 'invalid', 'ldap_count_entries']),
])
search_query.add_must_not([
PhraseMatch('summary', '10.22.75.203'),
PhraseMatch('summary', '10.8.75.144'),
])
self.filtersManual(search_query)
# Search aggregations on field 'sourceipaddress', keep X samples of
# events at most
self.searchEventsAggregated('details.sourceipaddress', samplesLimit=10)
# alert when >= X matching events in an aggregation
self.walkAggregations(threshold=10)
# Set alert properties
def onAggregation(self, aggreg):
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
# aggreg['value']: value of the aggregation field, ex: toto@example.com
# aggreg['events']: list of events in the aggregation
category = 'bruteforce'
tags = ['ssh']
severity = 'NOTICE'
summary = ('{0} ssh bruteforce attempts by {1}'.format(
aggreg['count'], aggreg['value']))
hosts = self.mostCommon(
aggreg['allevents'], '_source.details.hostname')
for i in hosts[:5]:
summary += ' {0} ({1} hits)'.format(i[0], i[1])
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
Python
| 0.000003
|
@@ -380,17 +380,16 @@
ertTask%0A
-%0A
from que
@@ -454,16 +454,17 @@
sMatch%0A%0A
+%0A
class Al
|
dfe531a481e2e753e755f8877bc747147deb7840
|
Set optional port as proper Channel attribute. (#1163)
|
parsl/channels/oauth_ssh/oauth_ssh.py
|
parsl/channels/oauth_ssh/oauth_ssh.py
|
import logging
import paramiko
import socket
from parsl.errors import OptionalModuleMissing
from parsl.channels.ssh.ssh import SSHChannel
try:
from oauth_ssh.ssh_service import SSHService
from oauth_ssh.oauth_ssh_token import find_access_token
_oauth_ssh_enabled = True
except (ImportError, NameError):
_oauth_ssh_enabled = False
logger = logging.getLogger(__name__)
class OAuthSSHChannel(SSHChannel):
"""SSH persistent channel. This enables remote execution on sites
accessible via ssh. This channel uses Globus based OAuth tokens for authentication.
"""
def __init__(self, hostname, username=None, script_dir=None, envs=None, port=22):
''' Initialize a persistent connection to the remote system.
We should know at this point whether ssh connectivity is possible
Args:
- hostname (String) : Hostname
KWargs:
- username (string) : Username on remote system
- script_dir (string) : Full path to a script dir where
generated scripts could be sent to.
- envs (dict) : A dictionary of env variables to be set when executing commands
- port (int) : Port at which the SSHService is running
Raises:
'''
if not _oauth_ssh_enabled:
raise OptionalModuleMissing(['oauth_ssh'],
"OauthSSHChannel requires oauth_ssh module and config.")
self.hostname = hostname
self.username = username
self.script_dir = script_dir
self.envs = {}
if envs is not None:
self.envs = envs
try:
access_token = find_access_token(hostname)
except Exception:
logger.exception("Failed to find the access token for {}".format(hostname))
raise
try:
self.service = SSHService(hostname, port)
self.transport = self.service.login(access_token, username)
except Exception:
logger.exception("Caught an exception in the OAuth authentication step with {}".format(hostname))
raise
self.sftp_client = paramiko.SFTPClient.from_transport(self.transport)
def execute_wait(self, cmd, walltime=60, envs={}):
''' Synchronously execute a commandline string on the shell.
This command does *NOT* honor walltime currently.
Args:
- cmd (string) : Commandline string to execute
Kwargs:
- walltime (int) : walltime in seconds
- envs (dict) : Dictionary of env variables
Returns:
- retcode : Return code from the execution, -1 on fail
- stdout : stdout string
- stderr : stderr string
Raises:
None.
'''
session = self.transport.open_session()
session.setblocking(0)
nbytes = 1024
session.exec_command(self.prepend_envs(cmd, envs))
session.settimeout(walltime)
try:
# Wait until command is executed
exit_status = session.recv_exit_status()
stdout = session.recv(nbytes).decode('utf-8')
stderr = session.recv_stderr(nbytes).decode('utf-8')
except socket.timeout:
logger.exception("Command failed to execute without timeout limit on {}".format(self))
raise
return exit_status, stdout, stderr
def execute_no_wait(self, cmd, walltime=60, envs={}):
''' Execute asynchronousely without waiting for exitcode
Args:
- cmd (string): Commandline string to be executed on the remote side
KWargs:
- walltime (int): timeout to exec_command
- envs (dict): A dictionary of env variables
Returns:
- None, stdout (readable stream), stderr (readable stream)
Raises:
- ChannelExecFailed (reason)
'''
session = self.transport.open_session()
session.setblocking(0)
nbytes = 10240
session.exec_command(self.prepend_envs(cmd, envs))
stdout = session.recv(nbytes).decode('utf-8')
stderr = session.recv_stderr(nbytes).decode('utf-8')
return None, stdout, stderr
def close(self):
return self.transport.close()
|
Python
| 0
|
@@ -1545,16 +1545,40 @@
ipt_dir%0A
+ self.port = port
%0A
|
455522eb6f626579350e3b26807a6b47501e80d3
|
Change ftp site url to "ftp.igs.org"
|
fetch_site_logs_from_ftp_sites.py
|
fetch_site_logs_from_ftp_sites.py
|
"""
Fetch updated site log files from remote FTP sites and upload them to GWS incoming site log bucket.
"""
import logging
import datetime
import ftplib
import os
import re
import shutil
import tempfile
import boto3
import dateutil.parser
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
logger.setLevel(logging.INFO)
class SiteLogFtpSource(object):
def __init__(self, host, path):
self.host = host
self.path = path
site_log_ftp_sources = [ # pylint: disable=invalid-name
SiteLogFtpSource('igscb.jpl.nasa.gov', '/pub/station/log'),
SiteLogFtpSource('161.65.59.67', '/gps/sitelogs/logs'),
]
def parse_date(string):
"""
Given date in format 'yyyymmdd', return python date(yyyy, mm, dd)
"""
return datetime.date(int(string[0:4]), int(string[4:6]), int(string[6:8]))
def parse_site_log_file_name(file_name):
"""
Given site log file name in format 'abcd_yyyymmdd.log', return ('abcd', date(yyyy, mm, dd))
"""
return file_name[0:4], parse_date(file_name[5:13])
class LogHunter(object):
site_log_file_name_pattern = re.compile(r'(?P<file_name>\w{4}_\d{8}\.log)', re.IGNORECASE)
def __init__(self, ftp_sources, current_site_logs):
self.ftp_sources = ftp_sources
self.ftp_connections = []
self.current_site_logs = current_site_logs
for ftp_source in self.ftp_sources:
try:
ftp = ftplib.FTP(ftp_source.host)
ftp.set_debuglevel(0)
ftp.login('anonymous', 'anonymous@ga.gov.au')
ftp.cwd(ftp_source.path)
self.ftp_connections.append(ftp)
except Exception as err: #pylint: disable=broad-except
logger.warn(err, exc_info=True)
def with_site_log_updates(self, update_handler):
download_directory_name = tempfile.mkdtemp()
for _four_char_id, (ftp, remote_file_name) in self.get_site_log_updates().iteritems():
try:
downloaded_file_name = self.download(ftp, remote_file_name, download_directory_name)
logger.info('Downloaded ' + remote_file_name + ' from ' + ftp.host)
with file(downloaded_file_name, 'rb') as downloaded_file:
update_handler(downloaded_file)
except Exception as err: #pylint: disable=broad-except
logger.error(err, exc_info=True)
if not os.listdir(download_directory_name):
logger.info('No updates found')
shutil.rmtree(download_directory_name)
def download(self, ftp, file_name, directory):
download_path = os.path.join(directory, file_name)
site_log_file = open(download_path, 'wb')
ftp.retrbinary('RETR ' + file_name, site_log_file.write)
site_log_file.close()
return site_log_file.name
def get_site_log_updates(self):
remote_file_names = []
def collect_remote_file_names(ftp, line):
match = re.search(type(self).site_log_file_name_pattern, line)
if match:
remote_file_names.append((ftp, match.group('file_name')))
else:
return None
for ftp in self.ftp_connections:
try:
logger.info('Fetching site log listing from ' + ftp.host)
ftp.retrlines("LIST", lambda line, ftp=ftp: collect_remote_file_names(ftp, line))
except Exception as err: #pylint: disable=broad-except
logger.warn(err, exc_info=True)
# sort by file name
remote_file_names.sort(key=lambda remote_file_name: remote_file_name[1])
remote_site_logs = {}
for remote_file_name in remote_file_names:
four_char_id, date_prepared = parse_site_log_file_name(remote_file_name[1])
if four_char_id in self.current_site_logs:
if date_prepared > self.current_site_logs[four_char_id]:
remote_site_logs[four_char_id] = (remote_file_name[0], remote_file_name[1])
return remote_site_logs
def gws_list_site_logs():
site_logs = {}
gws_url = os.environ['gws_url']
response = requests.get(gws_url + '/siteLogs?projection=datePrepared&size=10000')
response.raise_for_status()
for site_log in response.json()['_embedded']['siteLogs']:
site_logs[site_log['fourCharacterId'].lower()] = dateutil.parser.parse(site_log['datePrepared']).date()
return site_logs
def lambda_handler(event, context):
logHunter = LogHunter(site_log_ftp_sources, gws_list_site_logs())
s3 = boto3.client('s3')
incoming_bucket_name = os.environ['incoming_bucket_name']
def upload_site_log(site_log_file, bucket_name):
s3.put_object(
Bucket=bucket_name,
Key=os.path.split(site_log_file.name)[1],
Body=site_log_file)
logger.info('Uploaded ' + os.path.split(site_log_file.name)[1] + ' to s3://' + bucket_name)
logHunter.with_site_log_updates(lambda site_log_file: upload_site_log(site_log_file, incoming_bucket_name))
if __name__ == '__main__':
loggerStreamHandler = logging.StreamHandler()
loggerStreamHandler.setLevel(logging.INFO)
logger.addHandler(loggerStreamHandler)
lambda_handler(None, None)
|
Python
| 0
|
@@ -551,26 +551,19 @@
ce('
-igscb.jpl.nasa.gov
+ftp.igs.org
', '
|
b1b33a778d7abca2aa29e9612b6a75ff4aa7d64f
|
add UnboundError to actionAngle
|
galpy/actionAngle_src/actionAngle.py
|
galpy/actionAngle_src/actionAngle.py
|
import math as m
class actionAngle:
"""Top-level class for actionAngle classes"""
def __init__(self,*args,**kwargs):
"""
NAME:
__init__
PURPOSE:
initialize an actionAngle object
INPUT:
OUTPUT:
HISTORY:
2010-07-11 - Written - Bovy (NYU)
"""
if len(args) == 3: #R,vR.vT
R,vR,vT= args
self._R= R
self._vR= vR
self._vT= vT
elif len(args) == 5: #R,vR.vT, z, vz
R,vR,vT, z, vz= args
self._R= R
self._vR= vR
self._vT= vT
self._z= z
self._vz= vz
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
self._R= R
self._vR= vR
self._vT= vT
self._z= z
self._vz= vz
self._phi= phi
else:
if len(args) == 2:
vxvv= args[0](args[1]).vxvv
else:
vxvv= args[0].vxvv
self._R= vxvv[0]
self._vR= vxvv[1]
self._vT= vxvv[2]
if len(vxvv) > 3:
self._z= vxvv[3]
self.vz= vxvv[4]
self._phi= vxvv[5]
if hasattr(self,'_z'): #calculate the polar angle
if self._z == 0.: self._theta= m.pi/2.
else: self._theta= m.atan(self._R/self._z)
return None
|
Python
| 0.000001
|
@@ -1446,8 +1446,153 @@
rn None%0A
+%0Aclass UnboundError(Exception):%0A def __init__(self, value):%0A self.value = value%0A def __str__(self):%0A return repr(self.value)%0A
|
400788fac8b91206521feaea800e37dd183c2f4f
|
Make sure coverage is at 100% for ref_validator_test.py
|
tests/swagger20_validator/ref_validator_test.py
|
tests/swagger20_validator/ref_validator_test.py
|
import pytest
from jsonschema.validators import Draft4Validator
from jsonschema.validators import RefResolver
from mock import Mock, MagicMock
from bravado_core.swagger20_validator import ref_validator
@pytest.fixture
def address_target():
return {
'type': 'object',
'properties': {
'street': {
'type': 'string',
},
'city': {
'type': 'string',
},
'state': {
'type': 'string',
},
},
'required': ['street', 'city', 'state'],
}
@pytest.fixture
def address_ref():
return '#/definitions/Address'
@pytest.fixture
def address_schema(address_ref, annotated_scope):
return {
'$ref': address_ref,
'x-scope': annotated_scope,
}
@pytest.fixture
def address():
return {
'street': '1000 Main St',
'city': 'Austin',
'state': 'TX',
}
@pytest.fixture
def original_scope():
return ['file:///tmp/swagger.json']
@pytest.fixture
def annotated_scope():
return [
'file:///tmp/swagger.json',
'file:///tmp/models.json',
]
@pytest.fixture
def mock_validator(original_scope):
validator = Mock(spec=Draft4Validator)
validator.resolver = Mock(spec=RefResolver)
validator.resolver._scopes_stack = original_scope
# Make descend() return an empty list to StopIteration.
validator.descend.return_value = []
return validator
def test_when_resolve_is_not_None(address_target, address, original_scope,
annotated_scope, address_ref,
address_schema, mock_validator):
# Verify RefResolver._scopes_stack is replaced by the x-scope
# annotation's scope stack during the call to RefResolver.resolve(...)
def assert_correct_scope_and_resolve(*args, **kwargs):
assert mock_validator.resolver._scopes_stack == annotated_scope
return 'file:///tmp/swagger.json', address_target
mock_validator.resolver.resolve = Mock(
side_effect=assert_correct_scope_and_resolve)
# Force iteration over generator function
list(ref_validator(mock_validator, ref=address_ref, instance=address,
schema=address_schema))
assert mock_validator.resolver.resolve.call_count == 1
assert mock_validator.resolver._scopes_stack == original_scope
def test_when_resolve_is_None(address_target, address, original_scope,
annotated_scope, address_ref, address_schema,
mock_validator):
# Verify RefResolver._scopes_stack is replaced by the x-scope
# annotation's scope stack during the call to RefResolver.resolving(...)
def assert_correct_scope_and_resolve(*args, **kwargs):
assert mock_validator.resolver._scopes_stack == annotated_scope
return 'file:///tmp/swagger.json', address_target
mock_validator.resolver.resolve = None
mock_validator.resolver.resolving.return_value = MagicMock(
side_effect=assert_correct_scope_and_resolve)
# Force iteration over generator function
list(ref_validator(mock_validator, ref=address_ref, instance=address,
schema=address_schema))
assert mock_validator.resolver.resolving.call_count == 1
assert mock_validator.resolver._scopes_stack == original_scope
|
Python
| 0
|
@@ -1448,16 +1448,22 @@
alue = %5B
+Mock()
%5D%0A re
|
0662ab1773b835b447dd71ad53fa595f490cbcc8
|
Add proper encoding support to ftp_list
|
flexget/plugins/input/ftp_list.py
|
flexget/plugins/input/ftp_list.py
|
import logging
import ftplib
import os
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
log = logging.getLogger('ftp_list')
class InputFtpList(object):
"""
Generate entries from a ftp listing
Configuration:
ftp_list:
config:
use-ssl: no
name: <ftp name>
username: <username>
password: <password>
host: <host to connect>
port: <port>
dirs:
- <directory 1>
- <directory 2>
- ....
"""
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('list', key='dirs').accept('text')
config = root.accept('dict', key='config', required=True)
config.accept('text', key='name', required=True)
config.accept('text', key='username', required=True)
config.accept('text', key='password', required=True)
config.accept('text', key='host', required=True)
config.accept('integer', key='port', required=True)
config.accept('boolean', key='use-ssl')
return root
def prepare_config(self, config):
config.setdefault('use-ssl', False)
return config
def on_task_input(self, task, config):
config = self.prepare_config(config)
connection_config = config['config']
if connection_config['use-ssl']:
ftp = ftplib.FTP_TLS()
else:
ftp = ftplib.FTP()
# ftp.set_debuglevel(2)
log.debug('Trying connecting to: %s', (connection_config['host']))
try:
ftp.connect(connection_config['host'], connection_config['port'])
ftp.login(connection_config['username'], connection_config['password'])
except ftplib.all_errors as e:
raise plugin.PluginError(e)
log.debug('Connected.')
ftp.sendcmd('TYPE I')
ftp.set_pasv(True)
entries = []
for path in config['dirs']:
baseurl = "ftp://%s:%s@%s:%s/" % (connection_config['username'], connection_config['password'],
connection_config['host'], connection_config['port'])
try:
dirs = ftp.nlst(path)
except ftplib.error_perm as e:
raise plugin.PluginWarning(str(e))
if not dirs:
log.verbose('Directory %s is empty', path)
for p in dirs:
url = baseurl + p
title = os.path.basename(p)
log.info('Accepting entry %s ' % title)
entries.append(Entry(title, url))
return entries
@event('plugin.register')
def register_plugin():
plugin.register(InputFtpList, 'ftp_list', api_ver=2)
|
Python
| 0.000001
|
@@ -1064,32 +1064,78 @@
required=True)%0A
+ config.accept('text', key='encoding')%0A
config.a
@@ -1239,16 +1239,26 @@
config
+%5B'config'%5D
.setdefa
@@ -1279,16 +1279,72 @@
False)%0A
+ config%5B'config'%5D.setdefault('encoding', 'auto')%0A
@@ -1733,17 +1733,16 @@
try:
-
%0A
@@ -2008,28 +2008,373 @@
cted.')%0A
-
+%0A encoding = 'ascii'%0A if connection_config%5B'encoding'%5D == 'auto':%0A feat_response = ftp.sendcmd('FEAT')%0A if 'UTF8' in %5Bfeat_item.strip().upper() for feat_item in feat_response.splitlines()%5D:%0A encoding = 'utf8'%0A elif connection_config%5B'encoding'%5D:%0A encoding = connection_config%5B'encoding'%5D%0A
%0A
@@ -2947,24 +2947,63 @@
p in dirs:%0A
+ p = p.decode(encoding)%0A
|
31e0b14e21cdbd346a7814c161178f5533d25215
|
use int as long if python 3 and above
|
any_urlfield/models/values.py
|
any_urlfield/models/values.py
|
"""
Custom data objects
"""
from __future__ import unicode_literals
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.loading import get_model
from django.utils.encoding import python_2_unicode_compatible
import logging
from any_urlfield.cache import get_urlfield_cache_key
try:
from django.utils import six
unicode = six.text_type
string_types = six.string_types
except ImportError:
# Python 2, Django 1.3
string_types = basestring
logger = logging.getLogger('any_urlfield.models')
# Not really sure about the cache invalidation yet
URL_CACHE_TIMEOUT = 3600 # 1 hour
@python_2_unicode_compatible
class AnyUrlValue(object):
"""
Custom value object for the :class:`~any_urlfield.models.AnyUrlField`.
This value holds both the internal page ID, and external URL.
It can be used to parse the database contents:
.. code-block:: python
value = AnyUrlValue.from_db_value(url)
article = value.get_object()
print unicode(value)
A conversion to :class:`unicode` or :class:`str` causes the URL to be generated.
This allows the field value to be used in string concatenations, or template variable evaluations:
.. code-block:: html+django
{{ mymodel.url }}
"""
def __init__(self, type_prefix, type_value, url_type_registry=None):
# Easy configuration, allowing other code to deserialize database values.
if url_type_registry is None:
from any_urlfield.models.fields import AnyUrlField
url_type_registry = AnyUrlField._static_registry
self.url_type_registry = url_type_registry
self.url_type = url_type_registry[type_prefix]
self.type_value = type_value
if url_type_registry.index(type_prefix) is None:
raise ValueError("Unsupported AnyUrlValue prefix '{0}'. Supported values are: {1}".format(type_prefix, url_type_registry.keys()))
@classmethod
def from_db_value(cls, url, url_type_registry=None):
"""
Convert a serialized database value to this object.
The value can be something like:
* an external URL: ``http://..`` , ``https://..``
* a custom prefix: ``customid://214``, ``customid://some/value``
* a default "app.model" prefix: ``appname.model://31``
"""
# Easy configuration, allowing other code to deserialize database values.
if url_type_registry is None:
from any_urlfield.models.fields import AnyUrlField
url_type_registry = AnyUrlField._static_registry
try:
prefix, url_rest = url.split('://', 2)
except ValueError:
# While expecting the field to be validated,
# Don't crash when there is old contents.
prefix = 'http'
url_rest = url
url_type = url_type_registry[prefix]
if url_type is None:
raise ValueError("Unsupported URL prefix in database value '{0}'. Supported values are: {1}".format(url, url_type_registry.keys()))
if url_type.has_id_value:
if url_rest == 'None':
return None
id = long(url_rest)
return AnyUrlValue(prefix, id, url_type_registry)
else:
return AnyUrlValue(prefix, url, url_type_registry)
def to_db_value(self):
"""
Convert the value into a serialized format which can be stored in the database.
For example: ``http://www.external.url/`` or ``pageid://22``.
"""
if self.url_type.prefix == 'http':
return self.type_value
elif self.type_value is None:
return None # avoid app.model://None
else:
return "{0}://{1}".format(self.url_type.prefix, self.type_value)
def exists(self):
"""
Check whether the references model still exists.
"""
if self.url_type.prefix == 'http' and self.type_value:
return True
elif self.url_type.has_id_value:
Model = self.get_model()
return Model.objects.filter(pk=self.type_value).exists()
elif self.type_value:
# Random other value that can't be checked
return True
else:
# None or empty.
return False
def get_model(self):
"""
Return the model that this value points to.
"""
Model = self.url_type.model
if isinstance(Model, string_types):
app_label, model_name = Model.split(".") # assome appname.ModelName otherwise.
Model = get_model(app_label, model_name)
return Model
def get_object(self):
"""
Return the database object that the value points to.
"""
if self.url_type.has_id_value:
Model = self.get_model()
return Model.objects.get(pk=self.type_value)
else:
return None
@property
def type_prefix(self):
"""
Return the URL type prefix.
For external URLs this is always ``"http"``.
"""
return self.url_type.prefix
# Python 2 support comes from python_2_unicode_compatible
def __str__(self):
"""
Return the URL that the value points to.
"""
if self.url_type.has_id_value:
if not self.type_value:
return ""
# First see if the URL is cached
Model = self.get_model()
cache_key = get_urlfield_cache_key(Model, self.type_value)
url = cache.get(cache_key)
if url:
return url
try:
object = Model.objects.get(pk=self.type_value)
url = object.get_absolute_url()
cache.set(cache_key, url, URL_CACHE_TIMEOUT)
return url
except ObjectDoesNotExist as e:
# Silently fail in templates. Avoid full page crashing.
logger.error("Failed to generate URL for %r: %s", self, e)
return "#{0}".format(e.__class__.__name__)
else:
return self.type_value or ""
def __len__(self):
return len(unicode(self))
def __repr__(self):
return str("<AnyUrlValue '{0}'>".format(self.to_db_value()))
def __getattr__(self, item):
return getattr(unicode(self), item)
def __getitem__(self, item):
return unicode(self).__getitem__(item)
def __bool__(self):
return bool(self.type_value)
# Python 2 support:
__nonzero__ = __bool__
def __eq__(self, other):
return isinstance(other, AnyUrlValue) \
and self.url_type == other.url_type \
and self.type_value == other.type_value
def __ne__(self, other):
return not isinstance(other, AnyUrlValue) \
or self.url_type != other.url_type \
or self.type_value != other.type_value
def __getstate__(self):
"""
Pickle support
"""
# Avoid pickling the registry if it's the shared one.
from any_urlfield.models.fields import AnyUrlField
if self.url_type_registry != AnyUrlField._static_registry:
url_type_registry = self.url_type_registry
else:
url_type_registry = None
return (url_type_registry, self.url_type.prefix, self.type_value)
def __setstate__(self, state):
url_type_registry, prefix, type_value = state
from any_urlfield.models.fields import AnyUrlField
if url_type_registry is not None:
self.url_type_registry = url_type_registry
else:
self.url_type_registry = AnyUrlField._static_registry
self.type_value = type_value
self.url_type = self.url_type_registry[prefix]
|
Python
| 0.000003
|
@@ -510,16 +510,44 @@
string%0A%0A
+if six.PY3:%0A long = int%0A%0A
%0Alogger
|
a0af5dc1478fe8b639cc5a37898ad180f1f20a89
|
Add --midi option to CLI
|
src/twelve_tone/cli.py
|
src/twelve_tone/cli.py
|
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -mtwelve_tone` python will execute
``__main__.py`` as a script. That means there won't be any
``twelve_tone.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``twelve_tone.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import click
from twelve_tone.composer import Composer
@click.command()
def main():
c = Composer()
c.compose()
click.echo(c.get_melody())
|
Python
| 0
|
@@ -700,16 +700,71 @@
mmand()%0A
+@click.option('--midi', '-m', help='MIDI output file')%0A
def main
@@ -764,16 +764,20 @@
ef main(
+midi
):%0A c
@@ -837,8 +837,69 @@
lody())%0A
+ if midi is not None:%0A c.save_to_midi(filename=midi)%0A
|
910b1cc171de18cc844abe912130541234b23c7f
|
Add auth support.
|
flamyngo/views.py
|
flamyngo/views.py
|
import json
import re
import os
from pymongo import MongoClient
from monty.serialization import loadfn
from monty.json import jsanitize
from flask import render_template, request, make_response
from flamyngo import app
module_path = os.path.dirname(os.path.abspath(__file__))
SETTINGS = loadfn(os.environ["FLAMYNGO"])
CONN = MongoClient(SETTINGS["db"]["host"], SETTINGS["db"]["port"])
DB = CONN[SETTINGS["db"]["database"]]
if "username" in SETTINGS["db"]:
DB.authenticate(SETTINGS["db"]["username"], SETTINGS["db"]["password"])
CNAMES = [d["name"] for d in SETTINGS["collections"]]
CSETTINGS = {d["name"]: d for d in SETTINGS["collections"]}
@app.route('/', methods=['GET'])
def index():
return make_response(render_template('index.html', collections=CNAMES))
@app.route('/query', methods=['GET'])
def query():
cname = request.args.get("collection")
search_string = request.args.get("search_string")
settings = CSETTINGS[cname]
criteria = {}
for regex in settings["query"]:
if re.match(r'%s' % regex[1], search_string):
criteria[regex[0]] = parse_criteria(search_string, regex[2])
break
if not criteria:
criteria = json.loads(search_string)
results = []
for r in DB[cname].find(criteria, projection=settings["summary"]):
processed = {}
for k in settings["summary"]:
toks = k.split(".")
try:
val = r[toks[0]]
for t in toks[1:]:
try:
val = val[t]
except KeyError:
# Handle integer indices
val = val[int(t)]
except:
# Return the base value if we can descend into the data.
val = None
processed[k] = val
results.append(processed)
return make_response(render_template(
'index.html', collection_name=cname,
results=results, fields=settings["summary"],
unique_key=settings["unique_key"],
active_collection=cname,
collections=CNAMES)
)
@app.route('/<string:collection_name>/doc/<string:uid>')
def get_doc(collection_name, uid):
settings = CSETTINGS[collection_name]
criteria = {
settings["unique_key"]: parse_criteria(uid, settings["unique_key_type"])}
doc = DB[collection_name].find_one(criteria)
return make_response(render_template(
'doc.html', doc=json.dumps(jsanitize(doc)),
collection_name=collection_name, doc_id=uid)
)
def parse_criteria(val, vtype):
toks = vtype.rsplit(".", 1)
if len(toks) == 1:
func = getattr(__import__("__builtin__"), toks[0])
else:
mod = __import__(toks[0], globals(), locals(), [toks[1]], 0)
func = getattr(mod, toks[1])
return func(val)
if __name__ == "__main__":
app.run(debug=True)
|
Python
| 0
|
@@ -189,16 +189,26 @@
response
+, Response
%0A%0Afrom f
@@ -227,16 +227,81 @@
rt app%0A%0A
+from functools import wraps%0Afrom flask import request, Response%0A%0A
module_p
@@ -721,16 +721,935 @@
ions%22%5D%7D%0A
+AUTH_USER = SETTINGS.get(%22AUTH_USER%22, None)%0AAUTH_PASSWD = SETTINGS.get(%22AUTH_PASSWD%22, None)%0A%0A%0Adef check_auth(username, password):%0A %22%22%22This function is called to check if a username /%0A password combination is valid.%0A %22%22%22%0A if AUTH_USER is None:%0A return True%0A return username == AUTH_USER and password == AUTH_PASSWD%0A%0A%0Adef authenticate():%0A %22%22%22Sends a 401 response that enables basic auth%22%22%22%0A return Response(%0A 'Could not verify your access level for that URL.%5Cn'%0A 'You have to login with proper credentials', 401,%0A %7B'WWW-Authenticate': 'Basic realm=%22Login Required%22'%7D)%0A%0A%0Adef requires_auth(f):%0A @wraps(f)%0A def decorated(*args, **kwargs):%0A auth = request.authorization%0A if (AUTH_USER is not None) and (not auth or not check_auth(%0A auth.username, auth.password)):%0A return authenticate()%0A return f(*args, **kwargs)%0A return decorated%0A
%0A%0A@app.r
@@ -1671,24 +1671,39 @@
ds=%5B'GET'%5D)%0A
+@requires_auth%0A
def index():
@@ -1819,16 +1819,31 @@
'GET'%5D)%0A
+@requires_auth%0A
def quer
@@ -3193,16 +3193,31 @@
:uid%3E')%0A
+@requires_auth%0A
def get_
|
ea0b81ad1e56935e14429e3b064300b679c61ce1
|
version 0.4.365
|
src/you_get/version.py
|
src/you_get/version.py
|
#!/usr/bin/env python
script_name = 'you-get'
__version__ = '0.4.350'
|
Python
| 0.000001
|
@@ -64,8 +64,8 @@
.4.3
+6
5
-0
'%0A
|
81612e20e327b4b4eabb4c77201dd6b8d2d21e93
|
Add get_default getter to config.
|
law/config.py
|
law/config.py
|
# -*- coding: utf-8 -*-
"""
law Config interface.
"""
__all__ = ["Config"]
import os
import tempfile
import six
from six.moves.configparser import ConfigParser
class Config(ConfigParser):
_instance = None
_default_config = {
"core": {
"db_file": os.environ.get("LAW_DB_FILE", os.path.expandvars("$HOME/.law/db")),
"target_tmp_dir": tempfile.gettempdir(),
},
"paths": {},
}
_config_files = ("$LAW_CONFIG_FILE", "$HOME/.law/config", "etc/law/config")
@classmethod
def instance(cls, config_file=""):
if cls._instance is None:
cls._instance = cls(config_file=config_file)
return cls._instance
def __init__(self, config_file="", skip_fallbacks=False):
ConfigParser.__init__(self, allow_no_value=True) # old-style
files = (config_file,)
if not skip_fallbacks:
files += self._config_files
# read from files
self.config_file = None
for f in files:
f = os.path.expandvars(os.path.expanduser(f))
if os.path.isfile(f):
self.read(f)
self.config_file = f
# maybe inherit
if self.has_section("core") and self.has_option("core", "inherit_config"):
self.inherit(self.get("core", "inherit_config"))
# update by defaults
self.update(self._default_config, overwrite=False)
def optionxform(self, option):
return option
def update(self, data, overwrite=True):
for section, _data in data.items():
if not self.has_section(section):
self.add_section(section)
for option, value in _data.items():
if overwrite or not self.has_option(section, option):
self.set(section, option, value)
def inherit(self, filename):
p = self.__class__(filename, skip_fallbacks=True)
self.update(p._sections, overwrite=False)
def keys(self, section):
return [key for key, _ in self.items(section)]
|
Python
| 0
|
@@ -1487,16 +1487,206 @@
option%0A%0A
+ def get_default(self, section, option, default=None):%0A if self.has_option(section, option):%0A return self.get(section, option)%0A else:%0A return default%0A%0A
def
|
67444868b1c7c50da6d490893d72991b65b2aa7b
|
Add superlance supervisord plugin
|
frontend/setup.py
|
frontend/setup.py
|
import sys
from setuptools import setup, find_packages
requires = (
'flask',
'Flask-Script',
'flask_sockets',
'gunicorn',
'cassandra-driver',
'google-api-python-client',
'ecdsa',
'daemonize',
'websocket-client',
'pyzmq',
'fabric',
'pyyaml',
'supervisor',
'pexpect',
'blist'
)
setup(
name = 'cstar_perf.frontend',
version = '1.0',
description = 'A web frontend for cstar_perf, the Cassandra performance testing platform',
author = 'The DataStax Cassandra Test Engineering Team',
author_email = 'ryan@datastax.com',
url = 'https://github.com/datastax/cstar_perf',
install_requires = requires,
namespace_packages = ['cstar_perf'],
packages=find_packages(),
zip_safe=False,
include_package_data=True,
entry_points = {'console_scripts':
['cstar_perf_client = cstar_perf.frontend.client.client:main',
'cstar_perf_server = cstar_perf.frontend.lib.server:main',
'cstar_perf_notifications = cstar_perf.frontend.server.notifications:main']},
)
from cstar_perf.frontend.lib.crypto import generate_server_keys
generate_server_keys()
|
Python
| 0
|
@@ -327,16 +327,34 @@
'blist'
+,%0A 'superlance'
%0A)%0A%0Asetu
|
5ffbad954dfe588bdfcfa7b6fb20057fdd186e34
|
Use a real favicon
|
leapreader.py
|
leapreader.py
|
from os.path import join, dirname
import random
from itty import get, run_itty
import itty
from jinja2 import Environment, FileSystemLoader
import typd
env = Environment(loader=FileSystemLoader(join(dirname(__file__), 'templates')))
settings = {}
t = typd.TypePad(endpoint='http://api.typepad.com/')
cache = dict()
def configure():
if 'memcached_servers' in settings:
getdef = object()
class Cache(object):
def __init__(self, cache):
self.cache = cache
def __getitem__(self, key):
return self.cache.get(key)
def __setitem__(self, key, value):
return self.cache.set(key, value)
def get(self, key, default=getdef):
ret = self.cache.get(key)
if ret is not None:
return ret
if default is not getdef:
return default
return None
import memcache
global cache
cache = Cache(memcache.Client(settings['memcached_servers'], debug=10))
def render(templatename, data):
t = env.get_template(templatename)
return t.render(**data)
def random_rotation():
while True:
yield random.gauss(0, 3)
@get('/static/(?P<filename>.+)')
def static(request, filename):
return itty.serve_static_file(request, filename, root=join(dirname(__file__), 'static'))
@get('/')
def index(request):
if 'consumer_key' in settings:
raise itty.Redirect('http://www.typepad.com/services/api-redirect-identify?consumer_key=%s&nonce=7'
% settings['consumer_key'])
try:
profilename = request.GET['name']
except KeyError:
return render('index.html', {
'rot': random_rotation(),
'ganalytics_code': settings.get('ganalytics_code'),
})
raise itty.Redirect('/' + profilename)
@get('/.services/tp-session')
def identify_user(request):
user = request.GET.get('user')
if user:
userobj = t.users.get(user)
raise itty.Redirect('/' + userobj.preferred_username)
raise itty.Redirect('http://www.typepad.com/services/signin?to=http://leapf.org/')
def add_followers(profilename, notes):
cachekey = '%s:follow' % profilename
followers = set(cache.get(cachekey, ()))
# Yield the followers first so we can consult it later.
yield followers
for note in notes:
followers.add(note.actor.url_id)
yield note
cache[cachekey] = tuple(followers)
def good_notes_for_notes(notes):
for note in notes:
# TODO: skip notes when paging
if note.verb in ('AddedNeighbor', 'SharedBlog', 'JoinedGroup'):
continue
if note.verb == 'NewAsset':
obj = note.object
if obj is None: # deleted asset
continue
if obj.permalink_url is None: # no ancillary
continue
if obj.source is not None: # no boomerang
if obj.source.by_user:
continue
if obj.container is not None and obj.container.url_id == '6p0120a5e990ac970c':
continue
if getattr(obj, 'reblog_of', None) is not None:
note.original = obj
note.verb = 'Reblog'
obj = note.object = t.assets.get(obj.reblog_of.url_id)
elif getattr(obj, 'root', None) is not None:
note.original = obj
note.verb = 'Comment'
obj = note.object = t.assets.get(obj.root.url_id)
okay_types = ['Post']
if obj.container and obj.container.object_type == 'Group':
okay_types.extend(['Photo', 'Audio', 'Video', 'Link'])
if obj.object_type not in okay_types:
continue
# Yay, let's show this one!
yield note
def objs_for_notes(followers, notes):
interesting = dict()
for note in notes:
obj = note.object
try:
objdata = interesting[obj.url_id]
except KeyError:
objdata = {
'object': obj,
'actions': list(),
'when': note.published,
#'action_times': ...?
}
interesting[obj.url_id] = objdata
if note.verb == 'NewAsset':
objdata['new_asset'] = True
objdata['when'] = note.published
else:
objdata['actions'].append(note)
for objdata in sorted(interesting.values(), key=lambda d: d['when'], reverse=True):
obj = objdata['object']
obj.actions = objdata['actions']
if not objdata.get('new_asset'):
# If we don't have a NewAsset event but we know the asset is by
# someone we follow, don't show the asset. The NewAsset event just
# passed out of the window. (Since we already went through all the
# notes, the followers list is up to date.)
if obj.author.url_id in followers:
continue
obj.why = obj.actions[0]
yield obj
@get('/(?P<profilename>[^/]+)(?P<activity>/activity)?')
def read(request, profilename, activity):
try:
if activity:
notes = t.users.get_events(profilename, limit=50)
all_notes = notes.entries
else:
notes = t.users.get_notifications(profilename, offset=1, limit=50)
more_notes = t.users.get_notifications(profilename, offset=51, limit=50)
all_notes = notes.entries + more_notes.entries
except typd.NotFound:
raise itty.NotFound('No such profilename %r' % profilename)
noteiter = add_followers(profilename, all_notes)
followers = noteiter.next()
posts = (obj for obj in objs_for_notes(followers, good_notes_for_notes(noteiter)))
return render('read.html', {
'activity_view': bool(activity),
'profilename': profilename,
'posts': posts,
'rot': random_rotation(),
'ganalytics_code': settings.get('ganalytics_code'),
})
if __name__ == '__main__':
try:
execfile(join(dirname(__file__), 'settings.py'), settings)
except IOError:
pass
configure()
run_itty(host='0.0.0.0')
|
Python
| 0.000137
|
@@ -1243,24 +1243,131 @@
uss(0, 3)%0A%0A%0A
+@get('/favicon.ico')%0Adef favicon(request):%0A raise itty.Redirect('http://www.typepad.com/favicon.ico')%0A%0A%0A
@get('/stati
|
ce5777bd6c803b3841b2ebbb36fd148f3178bda9
|
Change method for stdout/stderr.
|
src/vcstools/common.py
|
src/vcstools/common.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import copy
import shlex
import subprocess
import logging
from vcstools.vcs_base import VcsError
def normalized_rel_path(path, basepath):
"""
If path is absolute, return relative path to it from
basepath. If relative, return it normalized.
:param path: an absolute or relative path
:param basepath: if path is absolute, shall be made relative to this
:returns: a normalized relative path
"""
# gracefully ignore invalid input absolute path + no basepath
if path is None:
return basepath
if os.path.isabs(path) and basepath is not None:
return os.path.normpath(os.path.relpath(os.path.realpath(path), os.path.realpath(basepath)))
return os.path.normpath(path)
def sanitized(arg):
"""
makes sure a composed command to be executed via shell was not injected.
A composed command would be like "ls %s"%foo.
In this example, foo could be "; rm -rf *"
sanitized raises an Error when it detects such an attempt
:raises VcsError: on injection attempts
"""
if arg is None or arg.strip() == '':
return ''
arg = str(arg.strip('"').strip())
safe_arg = '"%s"'%arg
# this also detects some false positives, like bar"";foo
if '"' in arg:
if (len(shlex.split(safe_arg, False, False)) != 1):
raise VcsError("Shell injection attempt detected: >%s< = %s"%(arg, shlex.split(safe_arg, False, False)))
return safe_arg
def _discard_line(line):
if line is None:
return True
# the most common feedback lines of scms. We don't care about those. We let through anything unusual only.
discard_prefixes = ["adding ", "added ", "updating ", "requesting ", "pulling from ",
"searching for ", "(", "no changes found",
"0 files",
"A ", "D ", "U ",
"At revision", "Path: ", "First,",
"Installing", "Using ",
"No ", "Tree ",
"All ",
"+N ", "-D ", " M ", " M* ", "RM" # bzr
]
for pre in discard_prefixes:
if line.startswith(pre):
return True
return False
def run_shell_command(cmd, cwd=None, shell=False, us_env=True, show_stdout=False, verbose=False, no_filter=False):
"""
executes a command and hides the stdout output, loggs stderr
output when command result is not zero. Make sure to sanitize
arguments in the command.
:param cmd: A string to execute.
:param shell: Whether to use os shell.
:param us_env: changes env var LANG before running command, can influence program output
:param show_stdout: show some of the output (except for discarded lines in _discard_line()), ignored if no_filter
:param verbose: show all output, ignored if no_filter
:param no_filter: does not wrap stdout, so invoked command prints everything outside our knowledge
this is DANGEROUS, as vulnerable to shell injection.
:returns: ( returncode, stdout, stderr); stdout is None if no_filter==True
:raises: VcsError on OSError
"""
try:
env = copy.copy(os.environ)
if us_env:
env ["LANG"] = "en_US.UTF-8"
if no_filter:
# in no_filter mode, we cannot pipe stdin, as this
# causes some prompts to be hidden (e.g. mercurial over
# http)
stdout_target = None
stderr_target = None
else:
stdout_target = subprocess.PIPE
stderr_target = subprocess.PIPE
proc = subprocess.Popen(cmd,
shell=shell,
cwd=cwd,
stdout=stdout_target,
stderr=stderr_target,
env=env)
# when we read output in while loop, it would not be returned
# in communicate()
stdout_buf = []
stderr_buf = []
if not no_filter and (verbose or show_stdout):
# this loop runs until proc is done
# it listen to the pipe, print and stores result in buffer for returning
# this allows proc to run while we still can filter out output we don't care about
# readline() blocks
while True:
line = proc.stdout.readline().decode('UTF-8')
if line is not None and line != '':
if verbose or not _discard_line(line):
print(line),
stdout_buf.append(line)
if (not line or proc.returncode is not None):
break
# stderr was swallowed in pipe, in verbose mode print lines
if verbose:
while True:
line = proc.stderr.readline().decode('UTF-8')
if line != '':
print(line),
stderr_buf.append(line)
if not line:
break
(stdout, stderr) = proc.communicate()
if stdout is not None:
stdout_buf.append(stdout.decode('utf-8'))
stdout = "\n".join(stdout_buf)
if stderr is not None:
stderr_buf.append(stderr.decode('utf-8'))
stderr = "\n".join(stderr_buf)
message = None
if proc.returncode != 0 and stderr is not None and stderr != '':
logger = logging.getLogger('vcstools')
message = "Command failed: '%s'"%(cmd)
if cwd is not None:
message += "\n run at: '%s'"%(cwd)
message += "\n errcode: %s:\n%s"%(proc.returncode, stderr)
logger.warn(message)
result = stdout
if result is not None:
result = result.rstrip()
return (proc.returncode, result, message)
except OSError as ose:
logger = logging.getLogger('vcstools')
message = "Command failed with OSError. '%s' <%s, %s>:\n%s"%(cmd, shell, cwd, ose)
logger.error(message)
raise VcsError(message)
|
Python
| 0
|
@@ -5952,34 +5952,67 @@
-while True
+for line in iter(proc.stdout.readline, b'')
:%0A
@@ -6028,30 +6028,12 @@
e =
-proc.stdout.read
line
-()
.dec
@@ -6433,18 +6433,51 @@
-while True
+for line in iter(proc.stderr.readline, b'')
:%0A
@@ -6501,30 +6501,12 @@
e =
-proc.stderr.read
line
-()
.dec
@@ -6680,20 +6680,16 @@
break
-
%0A
|
a16cda69c2ec0e96bf5b5a558e288d22b353f28f
|
change work folder before build
|
build/build.py
|
build/build.py
|
#!/usr/bin/env python2.7
# coding=utf-8
import subprocess
import platform
import os
import sys
def build(platform):
print("[Start Build] Target Platform: " + platform)
build_script = ""
if platform == "windows":
build_script = "make_win_with_2015_static.bat"
subprocess.Popen(["cmd.exe","/C",build_script],shell=True)
elif platform == "android":
build_script = "make_android_static.sh"
elif platform == "ios":
build_script = "make_ios.sh"
elif platform == "osx":
build_script = "make_osx_static.sh"
if __name__ == '__main__':
length = len(sys.argv)
if length < 2:
sys.exit("please select target platform !")
platform = sys.argv[1]
build(platform)
# print(platform)
|
Python
| 0
|
@@ -89,16 +89,17 @@
ort sys%0A
+%0A
def buil
@@ -163,24 +163,135 @@
+ platform)%0A
+ build_folder = os.path.split(os.path.realpath(__file__))%5B0%5D%0A #change folder%0A os.chdir(build_folder)%0A%0A
build_sc
@@ -389,75 +389,8 @@
at%22%0A
- subprocess.Popen(%5B%22cmd.exe%22,%22/C%22,build_script%5D,shell=True)%0A
@@ -558,16 +558,16 @@
%22osx%22:%0A
-
@@ -601,16 +601,67 @@
atic.sh%22
+%0A subprocess.check_call(build_script,shell=True)
%0A%0Aif __n
|
8d20d419435b3e2d1b7bf5a5a88c58d5f5477187
|
Add docstring for is_template method.
|
folio/__init__.py
|
folio/__init__.py
|
# -*- coding: utf-8 -*-
"""
Folio is an static website generator using jinja2 template engine.
"""
import os
import shutil
import fnmatch
import logging
from jinja2 import Environment, FileSystemLoader
__all__ = ['Folio']
__version__ = '0.1-dev'
class Folio(object):
"""
:param name: Projects's name.
:param build_path: Destination directory where the final HTML will be
generated. Defaults to ``'build'`` in the project's root.
:param template_path: Source directory that contains the templates to be
processed. Defaults to ``'templates'`` in the
project's root.
:param static_path: Source for the static content that will be copied to
the build directory as first action. Defaults to
``'static'`` in the project's root.
:param encoding: The template's encoding. Defaults to utf-8.
:param jinja_extensions: Jinja2 extensions.
"""
def __init__(self, name, build_path='build', template_path='templates',
static_path='static', encoding='utf-8',
jinja_extensions=()):
#: The name of the project. It's used for logging and can improve
#: debugging information.
self.name = name
#: The project logger, an instance of the :class: `logging.Logger`.
self.logger = logging.getLogger(self.name)
#: The destination directory to copy the static content and create the
#: builded templates.
self.build_path = os.path.abspath(build_path)
#: The source directory from where the templates will be parsed.
self.template_path = os.path.abspath(template_path)
#: It contains files that will be copied at first to the build
#: directory unmodified.
self.static_path = os.path.abspath(static_path)
#: The source encoding for templates. Default to utf-8.
self.encoding = encoding
#: The context generators per template. The template name is store as
#: key and the callback as value. It will call the function, with the
#: jinja2 environment as first parameters, for the template in the
#: build process. The function must return a dictionary to be used in
#: the template.
#:
#: Context functions are registered like this::
#:
#: @proj.context('index.html')
#: def index_context(jinja2_env):
#: return {'files': jinja2_env.list_templates(),
#: 'author': 'Me'}
#:
#: Then in the template you could use it as normal variables.
self.contexts = {}
#: Builders are the core of folio, this will link a filename match with
#: a build function that will be responsible of translating templates
#: into final HTML files.
#:
#: The default builder is given, this will treat all *.html files as
#: jinja2 templates and process them, generating the same template name
#: as output file in the build directory.
self.builders = [('*.html', self._default_builder)]
#: The jinja environment is used to make a list of the templates, and
#: it's used by the builders to dump output files.
self.env = self._create_jinja_environment(jinja_extensions)
def _create_jinja_loader(self):
"""Create a Jinja loader."""
return FileSystemLoader(searchpath=self.template_path)
def _create_jinja_environment(self, extensions):
"""Create a Jinja environment."""
return Environment(loader=self._create_jinja_loader(),
extensions=extensions)
def build(self):
def _remove_build():
if os.path.exists(self.build_path):
for path, _, files in os.walk(self.build_path, topdown=False):
for f in files:
os.remove(os.path.join(path, f))
os.rmdir(path)
_remove_build()
if os.path.exists(self.static_path):
shutil.copytree(self.static_path, self.build_path)
else:
os.mkdir(self.build_path)
templates = self.env.list_templates(filter_func=self.is_template)
for template_name in templates:
self.build_template(template_name)
def build_template(self, template_name):
context = {}
if template_name in self.contexts:
context = self.contexts[template_name](self.env)
self.logger.info('Building %s', template_name)
for pattern, builder in self.builders.__reversed__():
if fnmatch.fnmatch(template_name, pattern):
builder(self.env, template_name, context)
break
def is_template(self, filename):
_, tail = os.path.split(filename)
return not (tail.startswith('.') or tail.startswith('_'))
def _default_builder(self, env, template_name, context):
head, _ = os.path.split(template_name)
if head:
head = os.path.join(self.build_path, head)
if not os.path.exists(head):
os.makedirs(head)
destination = os.path.join(self.build_path, template_name)
template = env.get_template(template_name)
template.stream(**context).dump(destination, encoding=self.encoding)
def context(self, template_name):
def wrapper(func):
self.contexts[template_name] = func
return wrapper
def builder(self, pattern):
def wrapper(func):
self.builders.append((pattern, func))
return wrapper
def run(self, host='127.0.0.1', port=8080):
import thread
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
os.chdir(self.build_path)
def serve():
server = HTTPServer((host, port), SimpleHTTPRequestHandler)
server.serve_forever()
self.logger.info('Serving at %s:%d', host, port)
thread.start_new_thread(serve, ())
def watch():
def handler(event):
if event.is_directory:
return
template_name = event.src_path[len(self.template_path) + 1:]
if not self.is_template(template_name):
return
self.logger.info('File %s %s', template_name, event.event_type)
self.build_template(template_name)
EventHandler = type('EventHandler', (FileSystemEventHandler, ),
{'on_any_event': lambda self, e: handler(e)})
observer = Observer()
observer.schedule(EventHandler(), path=self.template_path,
recursive=True)
observer.start()
try:
while True:
pass
except KeyboardInterrupt:
observer.stop()
observer.join()
watch()
|
Python
| 0
|
@@ -4832,24 +4832,268 @@
filename):%0A
+ %22%22%22Return true if a file is considered a template. The default%0A behaviour is to ignore all hidden files and the ones that start with%0A and underscore.%0A%0A :param filename: The (possible) template filename.%0A %22%22%22%0A
_, t
@@ -5134,20 +5134,18 @@
-return not (
+ignored =
tail
@@ -5184,17 +5184,43 @@
ith('_')
-)
+%0A return not ignored
%0A%0A de
|
e4b9463dcbe5700c5a9089188e1f3caca5a206ab
|
Add hierarchy walker
|
avalon/tools/cbsceneinventory/lib.py
|
avalon/tools/cbsceneinventory/lib.py
|
from avalon import io, api
def switch_item(container,
asset_name=None,
subset_name=None,
representation_name=None):
"""Switch container asset, subset or representation of a container by name.
It'll always switch to the latest version - of course a different
approach could be implemented.
Args:
container (dict): data of the item to switch with
asset_name (str): name of the asset
subset_name (str): name of the subset
representation_name (str): name of the representation
Returns:
dict
"""
if all(not x for x in [asset_name, subset_name, representation_name]):
raise ValueError(
"Must have at least one change provided to switch.")
# Collect any of current asset, subset and representation if not provided
# so we can use the original name from those.
if any(not x for x in [asset_name, subset_name, representation_name]):
_id = io.ObjectId(container["representation"])
representation = io.find_one({"type": "representation", "_id": _id})
version, subset, asset, project = io.parenthood(representation)
if asset_name is None:
asset_name = asset["name"]
if subset_name is None:
subset_name = subset["name"]
if representation_name is None:
representation_name = representation["name"]
# Find the new one
asset = io.find_one({"name": asset_name, "type": "asset"})
assert asset, ("Could not find asset in the database with the name "
"'%s'" % asset_name)
subset = io.find_one({"name": subset_name,
"type": "subset",
"parent": asset["_id"]})
assert subset, ("Could not find subset in the database with the name "
"'%s'" % subset_name)
version = io.find_one({"type": "version",
"parent": subset["_id"]},
sort=[('name', -1)])
assert version, "Could not find a version for {}.{}".format(
asset_name, subset_name
)
representation = io.find_one({"name": representation_name,
"type": "representation",
"parent": version["_id"]})
assert representation, (
"Could not find representation in the database with"
" the name '%s'" % representation_name)
api.switch(container, representation)
return representation
|
Python
| 0.000008
|
@@ -2509,24 +2509,262 @@
return representation%0A
+%0A%0Adef walk_hierarchy(node):%0A %22%22%22Recursively yield group node%0A %22%22%22%0A for child in node.children():%0A if child.get(%22isGroupNode%22):%0A yield child%0A%0A for _child in walk_hierarchy(child):%0A yield _child%0A
|
6c004827c642c3aee4166dd8689dc40104be6346
|
Stop hard-coding satellites, and make the tester easy to run on any notebook path. Allow specifying output and error files as args rather than just on command line.
|
src/verify_notebook.py
|
src/verify_notebook.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import json
import subprocess
import sys
def get_out_and_err():
output = None
if len(sys.argv) == 3:
with open(sys.argv[1], 'r') as out:
output = out.read()
with open(sys.argv[2], 'r') as err:
error = err.read()
else:
cmd = 'runipy --matplotlib --stdout Satellites.ipynb'
p = subprocess.Popen(cmd, shell=True, stdin=None,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=True)
output = p.stdout.read()
error = p.stderr.read()
return (output, error)
def check_results(results, warnings_are_errors=False, content_tester=None):
(output, error) = results
notebook = json.loads(output)
cells = notebook['worksheets'][0]['cells']
for cell in cells:
if cell['cell_type'] in ('markdown', 'heading'):
pass
elif cell['cell_type'] == 'code':
for output in cell['outputs']:
if output['output_type'] == 'pyerr':
raise ValueError(str(output))
elif output['output_type'] == 'stream':
for msg in output['text']:
if re.search(r'/(.*):\d+:.*(warn(ings?)?)\W', msg, re.I):
if warnings_are_errors:
raise ValueError(msg)
else:
print "WARNING: ", msg
elif output['output_type'] == 'pyout':
if content_tester:
content_tester(cell)
else:
pass
elif output['output_type'] == 'display_data':
pass # Assume they're good.
else:
raise ValueError(str(output))
else:
raise ValueError(str(output))
if 'exception' in error or 'nonzero exit status' in error:
raise ValueError(error)
def main():
check_results(get_out_and_err())
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -724,188 +724,117 @@
err(
-):%0A output = None%0A if len(sys.argv) == 3:%0A with open(sys.argv%5B1%5D, 'r') as out:%0A output = out.read()%0A with open(sys.argv%5B2%5D, 'r') as err:%0A error = err.read()%0A els
+notebook_path=None, outfile=None, errfile=None):%0A output = None%0A error = None%0A if notebook_path is not Non
e:%0A
@@ -876,25 +876,38 @@
out
-Satellites.ipynb'
+%22%25s.ipynb%22' %25 (notebook_path,)
%0A
@@ -1128,16 +1128,359 @@
.read()%0A
+ elif outfile is not None and errfile is not None:%0A with open(outfile, 'r') as out:%0A output = out.read()%0A with open(errfile, 'r') as err:%0A error = err.read()%0A elif len(sys.argv) == 3:%0A return get_out_and_err(sys.argv%5B1%5D, sys.argv%5B2%5D)%0A else:%0A raise ValueError(%22Specify a notebook path or out and err files to verify.%22)%0A
return
@@ -2322,16 +2322,57 @@
er(cell)
+ # Content-tester should raise on error.
%0A
@@ -2684,98 +2684,149 @@
r)%0A%0A
- %0A%0Adef main():%0A check_results(get_out_and_err())%0A%0Aif __name__ == %22__main__%22:%0A main(
+def run_and_verify_notebook(notebook_path, **kwargs):%0A '''runipy it and verify it.'''%0A check_results(get_out_and_err(notebook_path), **kwargs
)%0A
|
a9f6432288f74b9f590a91649de1e475ed523806
|
Correct data format
|
restclients/test/library/mylibinfo.py
|
restclients/test/library/mylibinfo.py
|
from datetime import date
from django.test import TestCase
from django.conf import settings
from restclients.library.mylibinfo import get_account, get_account_html
from restclients.exceptions import DataFailureException
class MyLibInfoTest(TestCase):
def test_get_account(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS =
'restclients.dao_implementation.libraries.File'):
account = get_account("javerage")
self.assertEquals(account.next_due, date(2014, 5, 27))
self.assertEquals(account.holds_ready, 1)
self.assertEquals(account.fines, 5.35)
self.assertEquals(account.items_loaned, 3)
self.assertEquals(account.get_next_due_date_str(True), "May 27, 2014")
self.assertEquals(account.get_next_due_date_str(False), "2014-05-27")
account = get_account("jnewstudent")
self.assertIsNone(account.next_due)
self.assertEquals(account.holds_ready, 0)
self.assertEquals(account.fines, 0.0)
self.assertEquals(account.items_loaned, 0)
def test_html_response(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS =
'restclients.dao_implementation.libraries.File'):
response = get_account_html("javerage")
self.assertEquals(response, '<p>You have 7 items checked out.<br>\nYou have items due back on 2014-04-29.<br>\nYou don\'t owe any fines.</p>\n<a href="http://alliance-primo.hosted.exlibrisgroup.com/primo_library/libweb/action/dlBasketGet.do?vid=UW&redirectTo=myAccount">Go to your account</a>')
def test_invalid_user(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS =
'restclients.dao_implementation.libraries.File'):
#Testing error message in a 200 response
self.assertRaises(DataFailureException, get_account, "invalidnetid")
#Testing non-200 response
self.assertRaises(DataFailureException, get_account, "invalidnetid123")
try:
get_account("invalidnetid")
except DataFailureException as ex:
self.assertEquals(ex.msg, "[Alma] User not found/401651")
def test_with_timestamp(self):
with self.settings(
RESTCLIENTS_LIBRARIES_DAO_CLASS=
'restclients.dao_implementation.libraries.File'):
response = get_account_html('javerage', timestamp=1391122522900)
self.assertEquals(response, '<p>You have 7 items checked out.<br>\n You have items due back on 2014-04-29.<br>\n You don\'t owe any fines.</p>\n <a href="http://alliance-primo.hosted.exlibrisgroup.com/primo_library/libweb/action/dlBasketGet.do?vid=UW&redirectTo=myAccount">Go to your account</a>')
|
Python
| 0.999995
|
@@ -761,20 +761,19 @@
), %22
+Tue,
May 27
-, 2014
%22)%0A
|
ec2c0701382e09009c3bc25456bf672fa06f4b92
|
Remove DynamicModel
|
simple_model/models.py
|
simple_model/models.py
|
import inspect
from typing import Any, Iterable, Iterator, Tuple, Union
from .exceptions import ValidationError
from .fields import ModelField
class BaseModel(type):
_field_class = ModelField
def __new__(cls, name, bases, attrs, **kwargs):
super_new = super().__new__
# do not perform initialization for Model class
# (only initialize Model subclasses)
parents = [base for base in bases if isinstance(base, BaseModel)]
if not parents:
return super_new(cls, name, bases, attrs)
new_class = super_new(cls, name, bases, attrs, **kwargs)
attr_meta = attrs.pop('Meta', None)
meta = attr_meta if attr_meta else getattr(new_class, 'Meta', None)
if not meta:
meta = type('Meta', (), {})
hints = getattr(new_class, '__annotations__', {})
try:
meta.fields = getattr(meta, 'fields')
except AttributeError: # assume all fields are defined as typed class attributes
assert hints, ('Model must have a "fields" attribute on its Meta class or its fields '
'defined as typed class attributes'.format(new_class.__name__))
meta.fields = tuple(hints)
meta.allow_empty = getattr(meta, 'allow_empty', tuple(meta.fields))
for field_name in meta.fields:
field_type = hints.get(field_name) if hints else None
default_value = getattr(new_class, field_name, None)
field = ModelField(
model_class=new_class,
name=field_name,
default_value=default_value,
type=field_type,
allow_empty=field_name in meta.allow_empty,
)
setattr(new_class, field_name, field)
new_class._meta = meta
return new_class
class Model(metaclass=BaseModel):
def __init__(self, **kwargs):
for field_name in self._meta.fields:
descriptor = getattr(type(self), field_name)
field_value = kwargs.get(field_name, descriptor.default_value)
setattr(self, field_name, field_value)
self.__post_init__(**kwargs)
def __post_init__(self, **kwargs):
pass
def __eq__(self, other: Any) -> bool:
try:
return dict(self) == dict(other) # type: ignore
except (TypeError, ValueError):
return False
def __iter__(self) -> Iterator[Tuple[str, Any, ModelField]]:
self.clean()
for name, value, descriptor in self._get_fields():
yield name, descriptor.to_python(value)
def __repr__(self) -> str:
attrs = ', '.join(
'{name}={value!r}'.format(name=name, value=value) for name, value, _ in self._get_fields()
)
return '{class_name}({attrs})'.format(class_name=type(self).__name__, attrs=attrs)
def _get_fields(self) -> Iterator[ModelField]:
cls = type(self)
return (
(field_name, getattr(self, field_name), getattr(cls, field_name))
for field_name in self._meta.fields
)
@classmethod
def build_many(cls, source: Iterable) -> list:
if cls.is_empty(source):
return []
keys_sets = [d.keys() for d in source]
for key_set in keys_sets:
if key_set ^ keys_sets[0]:
raise ValueError('All elements in source should have the same keys')
return [cls(**item) for item in source]
@staticmethod
def is_empty(value: Any) -> bool:
if value == 0 or value is False:
return False
return not bool(value)
def clean(self) -> None:
for field in self._get_fields():
field.clean()
setattr(self, field.name, field.value)
def validate(self, raise_exception: bool=True) -> Union[None, bool]:
for field in self._get_fields():
try:
field.validate()
except ValidationError:
if raise_exception:
raise
return False
return None if raise_exception else True
class DynamicModel(Model):
def __init__(self, *args, **kwargs):
for field_name, field_value in kwargs.items():
setattr(self, field_name, field_value)
def _get_fields(self) -> Iterator[ModelField]:
for field_name in self.get_fields():
field_value = getattr(self, field_name)
yield ModelField(self, field_name, field_value)
def get_fields(self) -> Tuple[str, ...]:
return tuple(
name for name, value in inspect.getmembers(self)
if not(name.startswith('_') or inspect.ismethod(value) or inspect.isfunction(value))
)
|
Python
| 0.000001
|
@@ -1,19 +1,4 @@
-import inspect%0A
from
@@ -3650,37 +3650,55 @@
ne:%0A for
-field
+name, value, descriptor
in self._get_fi
@@ -3721,20 +3721,50 @@
-field.clean(
+clean_value = descriptor.clean(self, value
)%0A
@@ -3791,26 +3791,20 @@
lf,
-field.name, field.
+name, clean_
valu
@@ -3892,21 +3892,39 @@
for
-field
+name, value, descriptor
in self
@@ -3976,23 +3976,39 @@
-field.validate(
+descriptor.validate(self, value
)%0A
@@ -4186,625 +4186,4 @@
rue%0A
-%0A%0Aclass DynamicModel(Model):%0A def __init__(self, *args, **kwargs):%0A for field_name, field_value in kwargs.items():%0A setattr(self, field_name, field_value)%0A%0A def _get_fields(self) -%3E Iterator%5BModelField%5D:%0A for field_name in self.get_fields():%0A field_value = getattr(self, field_name)%0A yield ModelField(self, field_name, field_value)%0A%0A def get_fields(self) -%3E Tuple%5Bstr, ...%5D:%0A return tuple(%0A name for name, value in inspect.getmembers(self)%0A if not(name.startswith('_') or inspect.ismethod(value) or inspect.isfunction(value))%0A )%0A
|
b70d3c2c75befe747079697a66b1bb417749e786
|
Update Workflow: add abstract method .on_failure()
|
simpleflow/workflow.py
|
simpleflow/workflow.py
|
from __future__ import absolute_import
class Workflow(object):
"""
Main interface to define a workflow by submitting tasks for asynchronous
execution.
The actual behavior depends on the executor backend.
"""
def __init__(self, executor):
self._executor = executor
def submit(self, func, *args, **kwargs):
"""
Submit a function for asynchronous execution.
:param func: callable registered as an task.
:type func: task.ActivityTask | task.WorkflowTask.
:param *args: arguments passed to the task.
:type *args: Sequence.
:param **kwargs: keyword-arguments passed to the task.
:type **kwargs: Mapping (dict).
:returns:
:rtype: Future.
"""
return self._executor.submit(func, *args, **kwargs)
def map(self, func, iterable):
"""
Submit a function for asynchronous execution for each value of
*iterable*.
:param func: callable registered as an task.
:type func: task.ActivityTask | task.WorkflowTask.
:param iterable: collections of arguments passed to the task.
:type iterable: Iterable.
"""
return self._executor.map(func, iterable)
def starmap(self, func, iterable):
"""
Submit a function for asynchronous execution for each value of
*iterable*.
:param func: callable registered as an task.
:type func: task.ActivityTask | task.WorkflowTask.
:param iterable: collections of multiple-arguments passed to the task
as positional arguments. They are destructured using
the ``*`` operator.
:type iterable: Iterable.
"""
return self._executor.starmap(func, iterable)
def fail(self, reason, details=None):
self._executor.fail(reason, details)
def run(self, *args, **kwargs):
raise NotImplementedError
|
Python
| 0.000002
|
@@ -1967,8 +1967,189 @@
edError%0A
+%0A def on_failure(self, history, reason, details=None):%0A %22%22%22%0A The executor calls this method when the workflow fails.%0A%0A %22%22%22%0A raise NotImplementedError%0A
|
6a67c22a9843517ece1ee5e890ea38873b44648b
|
Teste html_to_latex.
|
libretto/templatetags/extras.py
|
libretto/templatetags/extras.py
|
# coding: utf-8
from __future__ import unicode_literals
import re
from bs4 import BeautifulSoup, Comment
from django.template import Library
from django.utils.encoding import smart_text
from ..utils import abbreviate as abbreviate_func
register = Library()
@register.filter
def stripchars(text):
return smart_text(BeautifulSoup(text, 'html.parser'))
@register.filter
def striptags_n_chars(text):
return smart_text(BeautifulSoup(text, 'html.parser').get_text())
compact_paragraph_re = re.compile(r'(?<![\n\s ])\n+[\s\n ]*\n+(?![\n\s ])')
@register.filter
def compact_paragraph(text):
return compact_paragraph_re.sub(r' / ', text.strip('\n'))
escaped_chars_re = re.compile(r'([#$%&_])')
@register.filter
def escape_latex(text):
return escaped_chars_re.sub(r'\\\1', text)
html_latex_bindings = (
(dict(name='h1'), r'\part*{', r'}'),
(dict(name='h2'), r'\chapter*{', r'}'),
(dict(name='h3'), r'\section*{', r'}'),
(dict(name='p'), '\n\n', '\n\n'),
(dict(name='cite'), r'\textit{', r'}'),
(dict(name='em'), r'\textit{', r'}'),
(dict(name='i'), r'\textit{', r'}'),
(dict(name='strong'), r'\textbf{', r'}'),
(dict(name='b'), r'\textbf{', r'}'),
(dict(name='small'), r'\small{', r'}'),
(dict(class_='sc'), r'\textsc{', r'}'),
(dict(style=re.compile(r'.*font-variant:\s*'
r'small-caps;.*')), r'\textsc{', r'}'),
)
@register.filter
def html_to_latex(text):
"""
Permet de convertir du HTML en syntaxe LaTeX.
Attention, ce convertisseur est parfaitement incomplet et ne devrait pas
être utilisé en dehors d'un contexte très précis.
"""
soup = BeautifulSoup(text)
for html_selectors, latex_open_tag, latex_close_tag in html_latex_bindings:
for tag in soup.find_all(**html_selectors):
tag.insert(0, latex_open_tag)
tag.append(latex_close_tag)
for comment in soup.find_all(text=lambda text: isinstance(text, Comment)):
comment.extract()
return smart_text(soup.get_text())
@register.filter
def abbreviate(string, min_vowels=0, min_len=1, tags=True, enabled=True):
return abbreviate_func(string, min_vowels=min_vowels, min_len=min_len,
tags=tags, enabled=enabled)
|
Python
| 0
|
@@ -1449,24 +1449,25 @@
(text):%0A
+r
%22%22%22%0A Perm
@@ -1639,16 +1639,330 @@
pr%C3%A9cis.
+%0A%0A %3E%3E%3E print(html_to_latex('%3Ch1%3EBonjour %C3%A0 tous%3C/h1%3E'))%0A %5Cpart*%7BBonjour %C3%A0 tous%7D%0A %3E%3E%3E print(html_to_latex('%3Cspan style=%22font-series: bold; font-variant: small-caps;%22%3E%C3%89criture romaine%3C/span%3E'))%0A %5Ctextsc%7B%C3%89criture romaine%7D%0A %3E%3E%3E print(html_to_latex('Vive les %3C!-- cons --%3Epoilus !'))%0A Vive les poilus !
%0A %22%22%22
|
cd72e710a74625fec34486329fa21310827f9f09
|
validate downloaded image
|
apps/bplan/serializers.py
|
apps/bplan/serializers.py
|
import os
from urllib import request
from urllib.parse import urlparse
from django.apps import apps
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from rest_framework import serializers
from adhocracy4.modules import models as module_models
from adhocracy4.phases import models as phase_models
from .models import Bplan
from .phases import StatementPhase
BPLAN_EMBED = '<iframe height="500" style="width: 100%; min-height: 300px; ' \
'max-height: 100vh" src="{}" frameborder="0"></iframe>'
PROJECT_IMAGE_DIR = 'projects/backgrounds/'
class BplanSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(required=False)
typ = serializers.HiddenField(default='Bplan')
# make write_only for consistency reasons
start_date = serializers.DateTimeField(write_only=True)
end_date = serializers.DateTimeField(write_only=True)
image_url = serializers.URLField(required=False, write_only=True)
class Meta:
model = Bplan
fields = (
'id', 'name', 'description', 'url', 'office_worker_email', 'typ',
'is_draft', 'start_date', 'end_date', 'image_url'
)
extra_kwargs = {
# write_only for constency reasons
'is_draft': {'default': False, 'write_only': True},
'name': {'write_only': True},
'description': {'write_only': True},
'url': {'write_only': True},
'office_worker_email': {'write_only': True}
}
def create(self, validated_data):
orga_pk = self._context.get('organisation_pk', None)
orga_model = apps.get_model(settings.A4_ORGANISATIONS_MODEL)
orga = orga_model.objects.get(pk=orga_pk)
validated_data['organisation'] = orga
start_date = validated_data.pop('start_date')
end_date = validated_data.pop('end_date')
image_url = validated_data.pop('image_url', None)
if image_url:
try:
validated_data['image'] = \
self._download_image_from_url(image_url)
except:
raise serializers.ValidationError(
'Failed to download image {}'.format(image_url))
bplan = super().create(validated_data)
self._create_module_and_phase(bplan, start_date, end_date)
return bplan
def _create_module_and_phase(self, bplan, start_date, end_date):
module = module_models.Module.objects.create(
name=bplan.slug + '_module',
weight=1,
project=bplan,
)
phase_content = StatementPhase()
phase_models.Phase.objects.create(
name=_('Bplan statement phase'),
description=_('Bplan statement phase'),
type=phase_content.identifier,
module=module,
start_date=start_date,
end_date=end_date
)
def to_representation(self, instance):
dict = super().to_representation(instance)
dict['embed_code'] = self._response_embed_code(instance)
return dict
def _response_embed_code(self, bplan):
url = self._get_absolute_url(bplan)
embed = BPLAN_EMBED.format(url)
return embed
def _get_absolute_url(self, bplan):
site_url = Site.objects.get_current().domain
embed_url = reverse('embed-project', kwargs={'slug': bplan.slug, })
url = 'https://{}{}'.format(site_url, embed_url)
return url
def _download_image_from_url(self, url):
parsed_url = urlparse(url)
file_path = os.path.join(PROJECT_IMAGE_DIR,
os.path.basename(parsed_url.path))
file_name = os.path.join(settings.MEDIA_ROOT, file_path)
file_dir = os.path.dirname(file_name)
os.makedirs(file_dir, exist_ok=True)
request.urlretrieve(url, file_name)
return file_path
|
Python
| 0.000001
|
@@ -3,17 +3,16 @@
port os%0A
-%0A
from url
@@ -172,16 +172,114 @@
rt Site%0A
+from django.core.exceptions import ValidationError%0Afrom django.core.files.images import ImageFile%0A
from dja
@@ -407,16 +407,72 @@
lizers%0A%0A
+from adhocracy4.images.validators import validate_image%0A
from adh
@@ -2217,29 +2217,8 @@
rl:%0A
- try:%0A
@@ -2265,28 +2265,24 @@
-
self._downlo
@@ -2313,148 +2313,8 @@
url)
-%0A except:%0A raise serializers.ValidationError(%0A 'Failed to download image %7B%7D'.format(image_url))
%0A%0A
@@ -3618,16 +3618,33 @@
, url):%0A
+ try:%0A
@@ -3666,24 +3666,28 @@
lparse(url)%0A
+
file
@@ -3759,16 +3759,20 @@
+
os.path.
@@ -3802,24 +3802,28 @@
h))%0A
+
+
file_name =
@@ -3867,16 +3867,20 @@
e_path)%0A
+
@@ -3925,16 +3925,20 @@
+
os.maked
@@ -3975,16 +3975,20 @@
+
+
request.
@@ -4027,21 +4027,807 @@
-return file_path
+except:%0A self._remove_image_if_exists(file_name)%0A raise serializers.ValidationError(%0A 'Failed to download image %7B%7D'.format(url))%0A self._validate_image(file_name)%0A return file_path%0A%0A def _validate_image(self, file_name):%0A image_file = open(file_name, %22rb%22)%0A image = ImageFile(image_file, file_name)%0A config = settings.IMAGE_ALIASES.get('*', %7B%7D)%0A config.update(settings.IMAGE_ALIASES%5B'heroimage'%5D)%0A try:%0A validate_image(image, **config)%0A except ValidationError as e:%0A self._remove_image_if_exists(file_name)%0A raise serializers.ValidationError(e)%0A%0A def _remove_image_if_exists(self, file_name):%0A try:%0A os.remove(file_name)%0A except:%0A pass
%0A
|
268c577acd07bce4eb7e63bab6a38a7b436bc2e5
|
Include request ip in monitored data
|
frappe/monitor.py
|
frappe/monitor.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from datetime import datetime
import json
import traceback
import frappe
import os
import uuid
MONITOR_REDIS_KEY = "monitor-transactions"
def start(transaction_type="request", method=None, kwargs=None):
if frappe.conf.monitor:
frappe.local.monitor = Monitor(
transaction_type=transaction_type, method=method, kwargs=kwargs
)
def stop():
if frappe.conf.monitor and hasattr(frappe.local, "monitor"):
frappe.local.monitor.dump()
def log_file():
return os.path.join(frappe.utils.get_bench_path(), "logs", "monitor.json.log")
class Monitor:
def __init__(self, transaction_type=None, method=None, kwargs=None):
try:
self.site = frappe.local.site
self.timestamp = datetime.utcnow()
self.transaction_type = transaction_type
self.uuid = uuid.uuid4()
if self.transaction_type == "request":
self.data = frappe.form_dict
self.headers = dict(frappe.request.headers)
self.method = frappe.request.method
self.path = frappe.request.path
else:
self.kwargs = kwargs
self.method = method
except Exception:
traceback.print_exc()
def dump(self):
try:
timediff = datetime.utcnow() - self.timestamp
# Obtain duration in microseconds
self.duration = int(timediff.total_seconds() * 1000000)
data = {
"uuid": self.uuid,
"duration": self.duration,
"site": self.site,
"timestamp": self.timestamp.isoformat(sep=" "),
"transaction_type": self.transaction_type,
}
if self.transaction_type == "request":
update = {
"data": self.data,
"headers": self.headers,
"method": self.method,
"path": self.path,
}
else:
update = {
"kwargs": self.kwargs,
"method": self.method,
}
data.update(update)
json_data = json.dumps(data, sort_keys=True, default=str)
store(json_data)
except Exception:
traceback.print_exc()
def store(json_data):
MAX_LOGS = 1000000
if frappe.cache().llen(MONITOR_REDIS_KEY) > MAX_LOGS:
frappe.cache().ltrim(MONITOR_REDIS_KEY, 1, -1)
frappe.cache().rpush(MONITOR_REDIS_KEY, json_data)
def flush():
try:
# Fetch all the logs without removing from cache
logs = frappe.cache().lrange(MONITOR_REDIS_KEY, 0, -1)
logs = list(map(frappe.safe_decode, logs))
with open(log_file(), "a", os.O_NONBLOCK) as f:
f.write("\n".join(logs))
f.write("\n")
# Remove fetched entries from cache
frappe.cache().ltrim(MONITOR_REDIS_KEY, len(logs) - 1, -1)
except Exception:
traceback.print_exc()
|
Python
| 0
|
@@ -1061,16 +1061,54 @@
eaders)%0A
+%09%09%09%09self.ip = frappe.local.request_ip%0A
%09%09%09%09self
@@ -1754,16 +1754,36 @@
eaders,%0A
+%09%09%09%09%09%22ip%22: self.ip,%0A
%09%09%09%09%09%22me
|
7a5cb953f64dce841d88b9c8b45be7719c617ba2
|
Fix games init file
|
games/__init__.py
|
games/__init__.py
|
__all__ = ['Game', 'Mancala', 'Player', 'TicTacToe']
|
Python
| 0.000001
|
@@ -1,45 +1,52 @@
-__all__ = %5B'Game', 'Mancala', 'Player', '
+import Game%0Aimport Mancala%0Aimport Player%0Aimport
TicT
@@ -54,6 +54,4 @@
cToe
-'%5D
|
5dec04a8d06f5ea96d96b7a551c5ad9959e4c9e7
|
Properly specify dns in network_metadata
|
playbooks/library/network_metadata.py
|
playbooks/library/network_metadata.py
|
#!/usr/bin/env python
# coding: utf-8 -*-
# (c) 2015, Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: network_metadata
short_description: Returns a config-drive network-metadata dictionary
extends_documentation_fragment: openstack
'''
def main():
argument_spec = dict(
ipv4_address=dict(required=False),
ipv4_gateway=dict(required=False),
ipv4_interface_mac=dict(required=False),
ipv4_nameserver=dict(required=False),
ipv4_subnet_mask=dict(required=False),
vlan_id=dict(required=False),
network_mtu=dict(required=False),
nics=dict(required=False),
node_network_info=dict(required=False)
)
module = AnsibleModule(argument_spec)
network_metadata = module.params['node_network_info']
if not network_metadata:
links = []
networks = []
if module.params['ipv4_interface_mac']:
links.append({
'id': module.params['ipv4_interface_mac'],
'type': 'phy',
'ethernet_mac_address': module.params['ipv4_interface_mac'],
'mtu': module.params['network_mtu']
})
for nic in module.params['nics']:
if nic['mac'] == module.params['ipv4_interface_mac']:
networks.append({
'id': 'ipv4-%s' % nic['mac'],
'link': nic['mac'],
'type': 'ipv4',
'ip_address': module.params['ipv4_address'],
'netmask': module.params['ipv4_subnet_mask'],
'dns_nameservers': [
module.params['ipv4_nameserver']
],
'routes': [{
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'gateway': module.params['ipv4_gateway']
}]
})
else:
for i, nic in enumerate(module.params['nics']):
nic_id = nic['mac']
if module.params['vlan_id']:
nic_id = 'vlan-%s' % nic['mac']
links.append({
'id': nic_id,
'type': 'vlan',
'vlan_id': module.params['vlan_id'],
'vlan_link': nic['mac'],
'vlan_mac_address': nic['mac']
})
links.append({
'id': nic['mac'],
'type': 'phy',
'ethernet_mac_address': nic['mac'],
'mtu': module.params['network_mtu']
})
if i == 0:
networks.append({
'id': 'ipv4-%s' % nic_id,
'link': nic_id,
'type': 'ipv4',
'ip_address': module.params['ipv4_address'],
'netmask': module.params['ipv4_subnet_mask'],
'dns_nameservers': [
module.params['ipv4_nameserver']
],
'routes': [{
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'gateway': module.params['ipv4_gateway']
}]
})
else:
networks.append({
'id': 'ipv4-dhcp-%s' % nic_id,
'link': nic_id,
'type': 'ipv4_dhcp',
})
network_metadata = {
'links': links,
'networks': networks
}
facts = {'network_metadata': network_metadata}
module.exit_json(changed=False, ansible_facts=facts)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
Python
| 0.999961
|
@@ -4317,32 +4317,236 @@
%7D)%0A%0A
+ services = %5B%5D%0A if module.params%5B'ipv4_nameserver'%5D:%0A services.append(%7B%0A 'type': 'dns',%0A 'address': module.params%5B'ipv4_nameserver'%5D%0A %7D)%0A%0A
network_
@@ -4618,16 +4618,50 @@
networks
+,%0A 'services': services
%0A
|
147c85aff3e93ebb39d984a05cec970b3dc7edc0
|
Add expires_at field to jwt that was removed accidentally (#242)
|
frontstage/jwt.py
|
frontstage/jwt.py
|
"""
Module to create jwt token.
"""
from datetime import datetime, timedelta
from jose import jwt
from frontstage import app
def timestamp_token(token):
"""Time stamp the expires_in argument of the OAuth2 token. And replace with an expires_in UTC timestamp"""
current_time = datetime.now()
expires_in = current_time + timedelta(seconds=token['expires_in'])
data_dict_for_jwt_token = {
"refresh_token": token['refresh_token'],
"access_token": token['access_token'],
"role": "respondent",
"party_id": token['party_id']
}
return data_dict_for_jwt_token
def encode(data):
"""Encode data in jwt token."""
return jwt.encode(data, app.config['JWT_SECRET'], algorithm=app.config['JWT_ALGORITHM'])
def decode(token):
"""Decode data in jwt token."""
return jwt.decode(token, app.config['JWT_SECRET'], algorithms=[app.config['JWT_ALGORITHM']])
|
Python
| 0
|
@@ -487,32 +487,78 @@
access_token'%5D,%0A
+ %22expires_at%22: expires_in.timestamp(),%0A
%22role%22:
|
a23e385d5de4ae3c36eb7e5e37b7bfcc6ed5d129
|
Add bat file suffix for invoking dart2js.
|
site/try/build_try.gyp
|
site/try/build_try.gyp
|
# Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE
{
'targets': [
{
'target_name': 'try_site',
'type': 'none',
'dependencies': [
'../../runtime/dart-runtime.gyp:dart',
'../../create_sdk.gyp:create_sdk_internal',
],
'variables': {
'try_dart_static_files': [
'index.html',
'dartlang-style.css',
'iframe.html',
'iframe.js',
'dart-icon.png', # iOS icon.
'dart-iphone5.png', # iPhone 5 splash screen.
'dart-icon-196px.png', # Android icon.
'try-dart-screenshot.png', # Google+ screen shot.
'../../third_party/font-awesome/font-awesome-4.0.3/'
'fonts/fontawesome-webfont.woff',
'favicon.ico',
'<(SHARED_INTERMEDIATE_DIR)/leap.dart.js',
'<(SHARED_INTERMEDIATE_DIR)/sdk.json',
],
},
'actions': [
{
'action_name': 'sdk_json',
'message': 'Creating sdk.json',
'inputs': [
# Depending on this file ensures that the SDK is built before this
# action is executed.
'<(PRODUCT_DIR)/dart-sdk/README',
# This dependency is redundant for now, as this directory is
# implicitly part of the dependencies for dart-sdk/README.
'<!@(["python", "../../tools/list_files.py", "\\.dart$", '
'"../../sdk/lib/_internal/compiler/samples/jsonify"])',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/sdk.json',
],
'action': [
'<(PRODUCT_DIR)/dart-sdk/bin/'
'<(EXECUTABLE_PREFIX)dart<(EXECUTABLE_SUFFIX)',
'-Dlist_all_libraries=true',
'-DoutputJson=true',
'../../sdk/lib/_internal/compiler/samples/jsonify/jsonify.dart',
'<(SHARED_INTERMEDIATE_DIR)/sdk.json',
],
},
{
'action_name': 'compile',
'message': 'Creating leap.dart.js',
'inputs': [
# Depending on this file ensures that the SDK is built before this
# action is executed.
'<(PRODUCT_DIR)/dart-sdk/README',
'<!@(["python", "../../tools/list_files.py", "\\.dart$", "src"])',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/leap.dart.js',
],
'action': [
'<(PRODUCT_DIR)/dart-sdk/bin/dart2js',
'-p../../sdk/lib/_internal/',
'-Denable_ir=false',
'src/leap.dart',
'-o<(SHARED_INTERMEDIATE_DIR)/leap.dart.js',
],
},
{
'action_name': 'nossl_appcache',
'message': 'Creating nossl.appcache',
'inputs': [
'add_time_stamp.py',
'nossl.appcache',
'<@(try_dart_static_files)',
'build_try.gyp', # If the list of files changed.
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/nossl.appcache',
],
# Try Dart! uses AppCache. Cached files are only validated when the
# manifest changes (not its timestamp, but its actual contents).
'action': [
'python',
'add_time_stamp.py',
'nossl.appcache',
'<(SHARED_INTERMEDIATE_DIR)/nossl.appcache',
],
},
],
'copies': [
{
# Destination directory.
'destination': '<(PRODUCT_DIR)/try_dartlang_org/',
# List of files to be copied (creates implicit build dependencies).
'files': [
'app.yaml',
'<@(try_dart_static_files)',
'<(SHARED_INTERMEDIATE_DIR)/nossl.appcache',
],
},
],
},
],
}
|
Python
| 0.005184
|
@@ -203,16 +203,181 @@
ENSE%0A%0A%7B%0A
+ 'variables' : %7B%0A 'script_suffix%25': '',%0A %7D,%0A 'conditions' : %5B%0A %5B'OS==%22win%22', %7B%0A 'variables' : %7B%0A 'script_suffix': '.bat',%0A %7D,%0A %7D%5D,%0A %5D,%0A
'targe
@@ -2756,16 +2756,32 @@
/dart2js
+%3C(script_suffix)
',%0A
|
2c870ee5b3d4df5a2f628350b7d4897f301c34de
|
Delete commented out urlparams code
|
badger/helpers.py
|
badger/helpers.py
|
import hashlib
import urllib
import urlparse
from django.conf import settings
from django.contrib.auth.models import SiteProfileNotAvailable
from django.core.exceptions import ObjectDoesNotExist
from django.utils.html import conditional_escape
try:
from commons.urlresolvers import reverse
except ImportError, e:
from django.core.urlresolvers import reverse
try:
from tower import ugettext_lazy as _
except ImportError, e:
from django.utils.translation import ugettext_lazy as _
import jingo
import jinja2
from jinja2 import evalcontextfilter, Markup, escape
from jingo import register, env
from .models import (Badge, Award, Nomination, Progress,
BadgeAwardNotAllowedException)
@register.function
def user_avatar(user, secure=False, size=256, rating='pg', default=''):
try:
profile = user.get_profile()
if profile.avatar:
return profile.avatar.url
except AttributeError:
pass
except SiteProfileNotAvailable:
pass
except ObjectDoesNotExist:
pass
base_url = (secure and 'https://secure.gravatar.com' or
'http://www.gravatar.com')
m = hashlib.md5(user.email)
return '%(base_url)s/avatar/%(hash)s?%(params)s' % dict(
base_url=base_url, hash=m.hexdigest(),
params=urllib.urlencode(dict(
s=size, d=default, r=rating
))
)
@register.function
def user_awards(user):
return Award.objects.filter(user=user)
@register.function
def user_badges(user):
return Badge.objects.filter(creator=user)
@register.function
def badger_allows_add_by(user):
return Badge.objects.allows_add_by(user)
@register.function
def qr_code_image(value, alt=None, size=150):
# TODO: Bake our own QR codes, someday soon!
url = conditional_escape("http://chart.apis.google.com/chart?%s" % \
urllib.urlencode({'chs': '%sx%s' % (size, size), 'cht': 'qr', 'chl': value, 'choe': 'UTF-8'}))
alt = conditional_escape(alt or value)
return Markup(u"""<img class="qrcode" src="%s" width="%s" height="%s" alt="%s" />""" %
(url, size, size, alt))
@register.function
def nominations_pending_approval(user):
return Nomination.objects.filter(badge__creator=user,
approver__isnull=True)
@register.function
def nominations_pending_acceptance(user):
return Nomination.objects.filter(nominee=user,
approver__isnull=False,
accepted=False)
# FIXME - This code is broken because smart_str doesn't exist in the namespace
# Since it's not used anywhere in django-badger and I'm not sure whether
# deleting it is ok or not, I'm commenting it out.
#
# @register.filter
# def urlparams(url_, hash=None, **query):
# """Add a fragment and/or query paramaters to a URL.
#
# New query params will be appended to exising parameters, except duplicate
# names, which will be replaced.
# """
# url = urlparse.urlparse(url_)
# fragment = hash if hash is not None else url.fragment
#
# # Use dict(parse_qsl) so we don't get lists of values.
# q = url.query
# query_dict = dict(urlparse.parse_qsl(smart_str(q))) if q else {}
# query_dict.update((k, v) for k, v in query.items())
#
# query_string = _urlencode([(k, v) for k, v in query_dict.items()
# if v is not None])
# new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,
# query_string, fragment)
# return new.geturl()
#
#
# def _urlencode(items):
# """A Unicode-safe URLencoder."""
# try:
# return urllib.urlencode(items)
# except UnicodeEncodeError:
# return urllib.urlencode([(k, smart_str(v)) for k, v in items])
|
Python
| 0
|
@@ -2542,1286 +2542,4 @@
se)%0A
-%0A%0A# FIXME - This code is broken because smart_str doesn't exist in the namespace%0A# Since it's not used anywhere in django-badger and I'm not sure whether%0A# deleting it is ok or not, I'm commenting it out.%0A#%0A# @register.filter%0A# def urlparams(url_, hash=None, **query):%0A# %22%22%22Add a fragment and/or query paramaters to a URL.%0A# %0A# New query params will be appended to exising parameters, except duplicate%0A# names, which will be replaced.%0A# %22%22%22%0A# url = urlparse.urlparse(url_)%0A# fragment = hash if hash is not None else url.fragment%0A# %0A# # Use dict(parse_qsl) so we don't get lists of values.%0A# q = url.query%0A# query_dict = dict(urlparse.parse_qsl(smart_str(q))) if q else %7B%7D%0A# query_dict.update((k, v) for k, v in query.items())%0A# %0A# query_string = _urlencode(%5B(k, v) for k, v in query_dict.items()%0A# if v is not None%5D)%0A# new = urlparse.ParseResult(url.scheme, url.netloc, url.path, url.params,%0A# query_string, fragment)%0A# return new.geturl()%0A# %0A# %0A# def _urlencode(items):%0A# %22%22%22A Unicode-safe URLencoder.%22%22%22%0A# try:%0A# return urllib.urlencode(items)%0A# except UnicodeEncodeError:%0A# return urllib.urlencode(%5B(k, smart_str(v)) for k, v in items%5D)%0A
|
e8b49384d3e9e23485199ef131f0cb8f818a2a02
|
edit default port
|
get-image-part.py
|
get-image-part.py
|
import tornado.ioloop
import tornado.web
import tornado.wsgi
import io
import time
import random
import os
from PIL import Image
N = 20
class MainHandler(tornado.web.RequestHandler):
def get(self):
n = int(random.uniform(0,N))
img = int(self.get_argument("img"))
fn = os.path.join(os.path.dirname(__file__), "images/"+str(img)+".jpg")
im = Image.open(fn)
dim = im.size
c = im.crop((int(n*dim[0]/N), 0, int((n+1)*dim[0]/N), dim[1]))
c = c.convert("RGBA")
bio = io.BytesIO()
c.save(bio, 'PNG')
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Content-Type', 'image/jpeg')
self.set_header('X-ECE459-Fragment', str(n))
time.sleep(abs(random.gauss(0.2, 0.2)))
self.write(bio.getvalue())
application = tornado.wsgi.WSGIApplication([
(r"/image", MainHandler),
])
if __name__ == "__main__":
import logging
import wsgiref.simple_server
logger = logging.getLogger('tornado.application')
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
server = wsgiref.simple_server.make_server('', 8000, application)
server.serve_forever()
|
Python
| 0.000001
|
@@ -1172,11 +1172,11 @@
'',
-800
+459
0, a
|
37e1eb093eb29044930cb90a049f471aa2caad8b
|
Update publicip.py
|
apps/tinyosGW/publicip.py
|
apps/tinyosGW/publicip.py
|
#!/usr/bin/python
# Author : jeonghoonkang, https://github.com/jeonghoonkang
#-*- coding: utf-8 -*-
from __future__ import print_function
from subprocess import *
from types import *
import platform
import sys
import os
def run_cmd(cmd):
p = Popen(cmd, shell=True, stdout=PIPE)
output = p.communicate()[0]
return output
def hostname():
cmd = "hostname"
ret = run_cmd(cmd)
return ret
def getip():
cmd = "curl http://checkip.amazonaws.com"
ip = run_cmd(cmd)
print (ip)
return ip
def getiip():
cmd="/sbin/ifconfig"
_os_type = platform.system()
print (_os_type)
if _os_type.find('Cygwin') > 0:
cmd = "ipconfig"
iip = run_cmd(cmd)
return iip, _os_type
def checkifexist(fname):
cmd='ls ' + fname
print (run_cmd(cmd))
def writefile(_in, fn="ip.txt"):
f = open(fn, 'w')
f.write(_in)
f.flush()
f.close()
return
def args_proc():
msg = "usage : python %s {server_IP_ADD} {server_PORT} {server_id} {passwd_for_server}" %__file__
msg += " => user should input arguments {} "
print (msg)
if len(sys.argv) < 2:
exit("[bye] you need to input args, ip / port / id")
arg1 = sys.argv[1]
arg2 = sys.argv[2]
arg3 = sys.argv[3]
arg4 = sys.argv[4]
ip = arg1
port = arg2
id = arg3
passwd = arg4
print ("... start running, inputs are ", ip, port, id, passwd)
return ip, port, id, passwd
if __name__ == '__main__':
ip, port, id, passwd = args_proc()
p_ip = getip()
i_ip, os_type = getiip()
info = i_ip + p_ip
hostn = hostname()
name = os.getlogin()
if os_type == "Linux":
fname = '/home/%s/' %name
elif os_type == "Darwin":
fname = '/Users/%s/' %name
fname += 'devel/BerePi/apps/tinyosGW/out/%s.txt' %(hostn[:-1])
writefile (info, fname)
checkifexist(fname)
cmd = 'sshpass -p' + passwd + ' ' + 'scp' + ' -o' + ' StrictHostKeyChecking=no'
cmd += " %s " %fname + '%s@%s:' %(id,ip) + '/var/www/html/server/'
# cmd = 'scp'
# cmd += " %s " %fname + '%s@%s:' %(id,ip) + '/var/www/html/server/'
ret = run_cmd(cmd)
print (cmd)
print (" ")
print (ret)
# ssh-keygen
# cat ~/.ssh/id_rsa.pub | ssh -p xxxx pi@xxx.xxx.xxx 'cat >>
# .ssh/authorized_keys'
|
Python
| 0.000001
|
@@ -1,8 +1,30 @@
+#-*- coding: utf-8 -*-
%0A#!/usr/
@@ -96,31 +96,8 @@
kang
-%0A#-*- coding: utf-8 -*-
%0A%0Afr
|
5cda2208ada4c521c0c452f4eb8987633454b44b
|
Update userpass.py
|
hvac/api/auth_methods/userpass.py
|
hvac/api/auth_methods/userpass.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""USERPASS methods module."""
from hvac.api.vault_api_base import VaultApiBase
DEFAULT_MOUNT_POINT = 'userpass'
class Userpass(VaultApiBase):
"""USERPASS Auth Method (API).
Reference: https://www.vaultproject.io/api/auth/userpass/index.html
"""
def create_or_update_user(self, username, password, policies=None, mount_point=DEFAULT_MOUNT_POINT):
"""
Create/update user in userpass.
Supported methods:
POST: /auth/{mount_point}/users/{username}. Produces: 204 (empty body)
:param username: The username for the user.
:type username: str | unicode
:param password: The password for the user. Only required when creating the user.
:type password: str | unicode
:param policies: The list of policies to be set on username created.
:type policies: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
"""
params = {
'password': password,
}
params.update(
utils.remove_nones({
'policies': policies,
})
)
api_path = '/v1/auth/{mount_point}/users/{username}'.format(mount_point=mount_point, username=username)
return self._adapter.post(
url=api_path,
json=params,
)
def list_user(self, mount_point=DEFAULT_MOUNT_POINT):
"""
List existing users that have been created in the auth method
Supported methods:
LIST: /auth/{mount_point}/users. Produces: 200 application/json
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the list_groups request.
:rtype: dict
"""
api_path = '/v1/auth/{mount_point}/users'.format(mount_point=mount_point)
response = self._adapter.list(
url=api_path,
)
return response.json()
def read_user(self, username, mount_point=DEFAULT_MOUNT_POINT):
"""
Read user in the auth method.
Supported methods:
GET: /auth/{mount_point}/users/{username}. Produces: 200 application/json
:param username: The username for the user.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the read_group request.
:rtype: dict
"""
api_path = '/v1/auth/{mount_point}/users/{username}'.format(mount_point=mount_point, username=username)
response = self._adapter.get(
url=api_path,
)
return response.json()
def delete_user(self, username, mount_point=DEFAULT_MOUNT_POINT):
"""
Delete user in the auth method.
Supported methods:
GET: /auth/{mount_point}/users/{username}. Produces: 200 application/json
:param username: The username for the user.
:type name: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
:return: The JSON response of the read_group request.
:rtype: dict
"""
api_path = '/v1/auth/{mount_point}/users/{username}'.format(mount_point=mount_point, username=username)
response = self._adapter.delete(
url=api_path,
)
return response.json()
def update_password_on_user(self, username, password, mount_point=DEFAULT_MOUNT_POINT):
"""
update password for the user in userpass.
Supported methods:
POST: /auth/{mount_point}/users/{username}/password. Produces: 204 (empty body)
:param username: The username for the user.
:type username: str | unicode
:param password: The password for the user. Only required when creating the user.
:type password: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
"""
params = {
'password': password,
}
api_path = '/v1/auth/{mount_point}/users/{username}/password'.format(mount_point=mount_point, username=username)
return self._adapter.post(
url=api_path,
json=params,
)
def login(self, username, password, mount_point=DEFAULT_MOUNT_POINT):
"""
Log in with USERPASS credentials.
Supported methods:
POST: /auth/{mount_point}/login/{username}. Produces: 200 application/json
:param username: The username for the user.
:type username: str | unicode
:param password: The password for the user. Only required when creating the user.
:type password: str | unicode
:param mount_point: The "path" the method/backend was mounted on.
:type mount_point: str | unicode
"""
params = {
'password': password,
}
api_path = '/v1/auth/{mount_point}/login/{username}'.format(mount_point=mount_point, username=username)
response = self._adapter.post(
url=api_path,
json=params,
)
return response.json()
|
Python
| 0.000001
|
@@ -70,16 +70,39 @@
ule.%22%22%22%0A
+from hvac import utils%0A
from hva
|
996474b241d72a475b4ee111a2b7b5fd8c504c27
|
Fix of var name mistake in register_to_payment
|
getpaid/models.py
|
getpaid/models.py
|
import sys
from datetime import datetime
from django.apps import apps
from django.db import models
from django.utils import six
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from .abstract_mixin import AbstractMixin
from getpaid import signals
from .utils import import_backend_modules
from django.conf import settings
if six.PY3:
unicode = str
PAYMENT_STATUS_CHOICES = (
('new', _("new")),
('in_progress', _("in progress")),
('accepted_for_proc', _("accepted for processing")),
('partially_paid', _("partially paid")),
('paid', _("paid")),
('cancelled', _("cancelled")),
('failed', _("failed")),
)
class PaymentManager(models.Manager):
def get_queryset(self):
return super(PaymentManager, self).get_queryset().select_related('order')
@python_2_unicode_compatible
class PaymentFactory(models.Model, AbstractMixin):
"""
This is an abstract class that defines a structure of Payment model that will be
generated dynamically with one additional field: ``order``
"""
amount = models.DecimalField(_("amount"), decimal_places=4, max_digits=20)
currency = models.CharField(_("currency"), max_length=3)
status = models.CharField(_("status"), max_length=20, choices=PAYMENT_STATUS_CHOICES, default='new', db_index=True)
backend = models.CharField(_("backend"), max_length=50)
created_on = models.DateTimeField(_("created on"), auto_now_add=True, db_index=True)
paid_on = models.DateTimeField(_("paid on"), blank=True, null=True, default=None, db_index=True)
amount_paid = models.DecimalField(_("amount paid"), decimal_places=4, max_digits=20, default=0)
external_id = models.CharField(_("external id"), max_length=64, blank=True, null=True)
description = models.CharField(_("description"), max_length=128, blank=True, null=True)
class Meta:
abstract = True
def __str__(self):
return _("Payment #%(id)d") % {'id': self.id}
@classmethod
def contribute(cls, order, **kwargs):
return {'order': models.ForeignKey(order, **kwargs)}
@classmethod
def create(cls, order, backend):
"""
Builds Payment object based on given Order instance
"""
payment = Payment()
payment.order = order
payment.backend = backend
signals.new_payment_query.send(sender=None, order=order, payment=payment)
if payment.currency is None or payment.amount is None:
raise NotImplementedError('Please provide a listener for getpaid.signals.new_payment_query')
payment.save()
signals.new_payment.send(sender=None, order=order, payment=payment)
return payment
def get_processor(self):
try:
__import__(self.backend)
module = sys.modules[self.backend]
return module.PaymentProcessor
except (ImportError, AttributeError):
raise ValueError("Backend '%s' is not available or provides no processor." % self.backend)
def change_status(self, new_status):
"""
Always change payment status via this method. Otherwise the signal
will not be emitted.
"""
if self.status != new_status:
# do anything only when status is really changed
old_status = self.status
self.status = new_status
self.save()
signals.payment_status_changed.send(
sender=type(self), instance=self,
old_status=old_status, new_status=new_status
)
def on_success(self, amount=None):
"""
Called when payment receives successful balance income. It defaults to
complete payment, but can optionally accept received amount as a parameter
to handle partial payments.
Returns boolean value if payment was fully paid
"""
if getattr(settings, 'USE_TZ', False):
self.paid_on = datetime.utcnow().replace(tzinfo=utc)
else:
self.paid_on = datetime.now()
if amount:
self.amount_paid = amount
else:
self.amount_paid = self.amount
fully_paid = (self.amount_paid >= self.amount)
if fully_paid:
self.change_status('paid')
else:
self.change_status('partially_paid')
return fully_paid
def on_failure(self):
"""
Called when payment was failed
"""
self.change_status('failed')
def register_to_payment(order_class, **kwargs):
"""
A function for registering unaware order class to ``getpaid``. This will
generate a ``Payment`` model class that will store payments with
ForeignKey to original order class
This also will build a model class for every enabled backend.
"""
global Payment
global Order
class Payment(PaymentFactory.construct(order=order_class, **kwargs)):
objects = PaymentManager()
class Meta:
ordering = ('-created_on',)
verbose_name = _("Payment")
verbose_name_plural = _("Payments")
Order = order_class
# Now build models for backends
backend_models_modules = import_backend_modules('models')
for backend_name, models_module in backend_models_modules.items():
for model in models_module.build_models(Payment):
apps.register_model(backend_name, module)
return Payment
|
Python
| 0.000096
|
@@ -5520,19 +5520,18 @@
ame, mod
-ul
e
+l
)%0A re
|
9786a1d0f3baa1b0034cc1f48c611305bd05cd43
|
Define _display_name method for Response that refer to Audit.
|
src/ggrc/models/response.py
|
src/ggrc/models/response.py
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: vraj@reciprocitylabs.com
from ggrc import db
from .mixins import deferred, BusinessObject
from .relationship import Relatable
from .object_document import Documentable
from .object_person import Personable
from .object_control import Controllable
class Response(BusinessObject, db.Model):
__tablename__ = 'responses'
__mapper_args__ = {
'polymorphic_on': 'response_type',
}
VALID_STATES = (u'Assigned', u'Accepted', u'Completed')
VALID_TYPES = (u'documentation', u'interview', u'population sample')
request_id = deferred(
db.Column(db.Integer, db.ForeignKey('requests.id'), nullable=False),
'Response')
response_type = db.Column(db.Enum(*VALID_TYPES), nullable=False)
status = deferred(db.Column(db.Enum(*VALID_STATES), nullable=False),
'Response')
_publish_attrs = [
'request',
'status',
'response_type',
]
_sanitize_html = [
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Response, cls).eager_query()
return query.options(
orm.joinedload('request'))
class DocumentationResponse(
Relatable, Documentable, Personable, Controllable, Response):
__mapper_args__ = {
'polymorphic_identity': 'documentation'
}
_table_plural = 'documentation_responses'
_publish_attrs = [
]
_sanitize_html = [
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(DocumentationResponse, cls).eager_query()
return query.options()
class InterviewResponse(
Relatable, Documentable, Personable, Controllable, Response):
__mapper_args__ = {
'polymorphic_identity': 'interview'
}
_table_plural = 'interview_responses'
meetings = db.relationship('Meeting', backref='response')
_publish_attrs = [
'meetings',
]
_sanitize_html = [
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(InterviewResponse, cls).eager_query()
return query.options(
orm.subqueryload('meetings'))
class PopulationSampleResponse(
Relatable, Documentable, Personable, Controllable, Response):
__mapper_args__ = {
'polymorphic_identity': 'population sample'
}
_table_plural = 'population_sample_responses'
population_worksheet_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=False),
'Response')
population_count = deferred(db.Column(db.Integer, nullable=True),
'Response')
sample_worksheet_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=False),
'Response')
sample_count = deferred(db.Column(db.Integer, nullable=True), 'Response')
sample_evidence_id = deferred(
db.Column(db.Integer, db.ForeignKey('documents.id'), nullable=False),
'Response')
population_worksheet = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.population_worksheet_id"
)
sample_worksheet = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.sample_worksheet_id"
)
sample_evidence = db.relationship(
"Document",
foreign_keys="PopulationSampleResponse.sample_evidence_id"
)
_publish_attrs = [
'population_worksheet',
'population_count',
'sample_worksheet',
'sample_count',
'sample_evidence',
]
_sanitize_html = [
'population_count',
'sample_count',
]
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(PopulationSampleResponse, cls).eager_query()
return query.options(
orm.joinedload('population_worksheet'),
orm.joinedload('sample_worksheet'),
orm.joinedload('sample_evidence'))
|
Python
| 0
|
@@ -1108,32 +1108,169 @@
ml = %5B%0A %5D%0A%0A
+ def _display_name(self):%0A return u'Response with id=%7B0%7D for Audit %22%7B1%7D%22'.format(%0A self.id, self.request.audit.display_name)%0A%0A
@classmethod%0A
|
3e33a94580d386be71298bcc7fb2d4a4bc19dd34
|
apply only the unified diff instead of the whole file
|
gitmagic/fixup.py
|
gitmagic/fixup.py
|
import gitmagic
def fixup(repo, destination_picker, change_finder, args={}):
repo.index.reset()
for change in change_finder(repo):
_apply_change(repo, change)
destination_commits = destination_picker.pick(change)
if not destination_commits:
repo.index.commit( message = "WARNING: no destination commit")
continue
destination = destination_commits[0]
gitmagic.checkpoint("Should I create fixup commit for {} -> {}:{}\n{}".format(
change.a_file_name,
destination.hexsha[:7],
destination.summary,
change.unified_diff().read()), args)
repo.index.commit( message = "fixup! {}".format(destination.message))
def _apply_change(repo, change):
#todo: apply unified diff only
repo.index.add([change.a_file_name])
|
Python
| 0.000001
|
@@ -8,16 +8,71 @@
gitmagic
+%0Afrom git.cmd import Git%0Afrom io import import StringIO
%0A%0Adef fi
@@ -821,77 +821,113 @@
-#todo: apply unified diff only%0A repo.index.add(%5Bchange.a_file_name%5D
+git = Git(repo.working_dir)%0A git.execute(%5B'git', 'apply', '-'%5D, istream=StringIO(change.unified_diff())
)%0A%0A
|
d645eb7d70f76c9974da51d4517c77c1cc2c575a
|
version 0.4.1347
|
src/you_get/version.py
|
src/you_get/version.py
|
#!/usr/bin/env python
script_name = 'you-get'
__version__ = '0.4.1328'
|
Python
| 0.000001
|
@@ -65,8 +65,8 @@
4.13
-28
+47
'%0A
|
648487b9a256ffa1d9ba91758e0c8afe8409fb9b
|
version 0.4.1328
|
src/you_get/version.py
|
src/you_get/version.py
|
#!/usr/bin/env python
script_name = 'you-get'
__version__ = '0.4.1314'
|
Python
| 0.000001
|
@@ -65,8 +65,8 @@
4.13
-14
+28
'%0A
|
eb642e63cd32b972ccdec4f487b9a7e2e7cb17b5
|
make product_change_type template filter work with hidden plans
|
billing/templatetags/billing_tags.py
|
billing/templatetags/billing_tags.py
|
from django import template
import billing.loading
from pricing.products import Product
register = template.Library()
@register.filter
def product_change_type(product, user):
upc = user.billing_account.get_current_product_class()
if isinstance(product, Product):
product = type(product)
if upc:
products = billing.loading.get_products()
upc_index = products.index(upc)
p_index = products.index(product)
if upc_index < p_index:
return 'upgrade'
elif upc_index == p_index:
return None
else:
return 'downgrade'
else:
return 'upgrade'
|
Python
| 0
|
@@ -359,16 +359,27 @@
roducts(
+hidden=True
)%0A
|
58c685aa03c51a96a25af9dd7d6792035b1f167e
|
fix update_firebase_installation
|
plugins/tff_backend/bizz/dashboard.py
|
plugins/tff_backend/bizz/dashboard.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.4@@
import time
from collections import defaultdict
from datetime import datetime
from dateutil.relativedelta import relativedelta
from mcfw.rpc import arguments, returns
from plugins.rogerthat_api.to.installation import InstallationLogTO, InstallationTO
from plugins.tff_backend.bizz.flow_statistics import get_flow_run_ticker_entry
from plugins.tff_backend.bizz.installations import list_installations, get_ticker_entry_for_installation
from plugins.tff_backend.firebase import put_firebase_data, remove_firebase_data
from plugins.tff_backend.models.statistics import FlowRun
from plugins.tff_backend.to.dashboard import TickerEntryTO
@returns([TickerEntryTO])
def rebuild_flow_stats(start_date):
# type: (datetime) -> list[TickerEntryTO]
stats_per_flow_name = defaultdict(dict)
ticker_entries = []
for flow_run in FlowRun.list_by_start_date(start_date): # type: FlowRun
stats_per_flow_name[flow_run.flow_name][flow_run.id] = flow_run.status
ticker_entries.append(get_flow_run_ticker_entry(flow_run))
put_firebase_data('/dashboard/flows.json', stats_per_flow_name)
return ticker_entries
def rebuild_installation_stats(date):
cursor = None
max_timestamp = time.mktime(date.timetuple())
has_more = True
# keys = possible values of InstallationTO.status
firebase_data = {
'started': {},
'in_progress': {},
'finished': {}
}
ticker_entries = []
while has_more:
installation_list = list_installations(page_size=1000, cursor=cursor, detailed=True)
cursor = installation_list.cursor
if not installation_list.more:
has_more = False
for installation in installation_list.results:
if installation.timestamp <= max_timestamp:
has_more = False
else:
firebase_data[installation.id] = installation.status
# timestamp might not be the most accurate but good enough
ticker_entries.append(get_ticker_entry_for_installation(installation, []))
put_firebase_data('/dashboard/installations.json', firebase_data)
return ticker_entries
@arguments(installation=InstallationTO, logs=[InstallationLogTO])
def update_firebase_installation(installation, logs):
# type: (InstallationTO, list[InstallationLogTO]) -> None
ticker_entry = get_ticker_entry_for_installation(installation, logs)
put_firebase_data('/dashboard/installations.json' % {installation.id, installation.status})
put_firebase_data('/dashboard/ticker/%s.json' % ticker_entry.id, ticker_entry.to_dict())
def rebuild_firebase_data():
# Removes all /dashboard data from firebase and rebuilds it
# Shouldn't be ran more than once a month if all goes well
ticker_entries = []
remove_firebase_data('dashboard.json')
date = datetime.now() - relativedelta(days=7)
ticker_entries.extend(rebuild_installation_stats(date))
ticker_entries.extend(rebuild_flow_stats(date))
put_firebase_data('/dashboard/ticker.json', {entry.id: entry.to_dict() for entry in ticker_entries})
|
Python
| 0.000001
|
@@ -3088,18 +3088,17 @@
ns.json'
- %25
+,
%7Binstal
|
0c7702333355b185027f8bdc06b6e31d3968712e
|
Update live.py
|
fxcmminer_v1.0/live.py
|
fxcmminer_v1.0/live.py
|
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from db_manager import DatabaseManager
import time
import datetime
import forexconnect as fx
import settings as s
from event import LiveDataEvent
import re
class LiveDataMiner(object):
"""
Creates time based event for collecting FXCM data.
Information on this API
https://apscheduler.readthedocs.io/en/latest/
https://github.com/agronholm/apscheduler/
"""
def __init__(self, events_queue, event):
while True:
try:
self.fxc = fx.ForexConnectClient(str(s.FX_USER),
str(s.FX_PASS),
str(s.FX_ENVR))
if self.fxc.is_connected() == True:
break
except RuntimeError:
pass
self.events_queue = events_queue
self.offer = event.offer
self.executors = {'processpool': ProcessPoolExecutor(100)
}
self.job_defaults = {
'coalesce': True,
'max_instances': 12
}
self.sched = BackgroundScheduler(executors=self.executors)
self.sched.add_job(self.min_1,'cron', minute='0-59')
self.sched.add_job(self.min_5, 'cron', minute='0,5,10,15,20,25,30,35,40,45,50,55')
self.sched.add_job(self.min_15, 'cron', minute='0,15,30,45')
self.sched.add_job(self.min_30, 'cron', minute='0,30')
self.sched.add_job(self.hour_1, 'cron', hour='0-23')
self.sched.add_job(self.hour_2, 'cron', hour='0,2,4,6,8,10,12,14,16,18,20,22')
self.sched.add_job(self.hour_4, 'cron', hour='0,4,8,12,16,20')
self.sched.add_job(self.hour_8, 'cron', hour='0,8,16')
self.sched.add_job(self.day_1, 'cron', hour='0')
self.sched.add_job(self.week_1, 'cron', day_of_week='sun',hour='17')
self.sched.add_job(self.month_1, 'cron', day='1', hour='17')
def min_1(self):
tf = 'm1'
p = 1
self.get_live(tf, p, self.fxc)
def min_5(self):
tf = 'm5'
p = 2
self.get_live(tf, p, self.fxc)
def min_15(self):
tf = 'm15'
p = 3
self.get_live(tf, p, self.fxc)
def min_30(self):
tf = 'm30'
p = 4
self.get_live(tf, p, self.fxc)
def hour_1(self):
tf = 'H1'
p = 5
self.get_live(tf, p, self.fxc)
def hour_2(self):
tf = 'H2'
p = 6
self.get_live(tf, p, self.fxc)
def hour_4(self):
tf = 'H4'
p = 7
self.get_live(tf, p, self.fxc)
def hour_8(self):
tf = 'H8'
p = 8
self.get_live(tf, p, self.fxc)
def day_1(self):
tf = 'D1'
p = 9
self.get_live(tf, p, self.fxc)
def week_1(self):
tf = 'W1'
p = 10
self.get_live(tf, p, self.fxc)
def month_1(self):
tf = 'M1'
p = 11
self.get_live(tf, p, self.fxc)
def get_live(self, time_frame, priority, fxc):
"""
"""
fm_date = DatabaseManager().return_date(self.offer, time_frame)
fm_date = fm_date + datetime.timedelta(minutes = 1)
to_date = datetime.datetime.now()
instrument = self.offer
try:
data = fxc.get_historical_prices(
str(instrument), fm_date,
to_date, str(time_frame))
data = [d.__getstate__()[0] for d in data]
except (KeyError, IndexError):
data = []
if data != []:
self.events_queue.put(LiveDataEvent(
data, instrument, time_frame))
del data
def start_timers(self):
self.sched.start()
|
Python
| 0.000001
|
@@ -387,20 +387,16 @@
M data.%0A
-
%0A Inf
|
dca5bd7d16866c0badf6c9d4ae69335aacef9f6d
|
Add sleep-and-try-again when getting MB info
|
audio_pipeline/util/MBInfo.py
|
audio_pipeline/util/MBInfo.py
|
__author__ = 'cephalopodblue'
import musicbrainzngs as ngs
from . import Util
class MBInfo():
default_server = ngs.hostname
def __init__(self, server=None, backup_server=None, useragent=("hidat_audio_pipeline", "0.1")):
if server is not None and server != self.default_server:
ngs.set_hostname(server)
self.backup_server = backup_server
ngs.set_useragent(useragent[0], useragent[1])
#####
# == Get Release
# Retrieves a raw release from MusicBrainz using their API
#####
def get_release(self, release_id):
if Util.is_mbid(release_id):
include=["artist-credits", "recordings", "isrcs", "media", "release-groups", "labels", "artists"]
try:
mb_release = ngs.get_release_by_id(release_id, includes=include)['release']
except ngs.ResponseError as e:
# probably a bad request / mbid
# propagate up
raise e
except ngs.NetworkError as e:
# can't reach the musicbrainz server - if we have a local, try hitting it?
mb_release = None
# propagate error up
raise e
return mb_release
#####
# == Get artist
# Retrieves raw artist metadata from MusicBrainz using their API
#####
def get_artist(self, artist_id):
if Util.is_mbid(artist_id):
include=["aliases", "url-rels", "annotation", "artist-rels"]
try:
mb_artist = ngs.get_artist_by_id(artist_id, includes=include)['artist']
return mb_artist
except ngs.ResponseError as e:
# probably a bad request / mbid
# propagate up
raise e
except ngs.NetworkError as e:
# can't reach the musicbrainz server - if we have a local, try hitting it?
mb_artist = None
# propagate error up
raise e
|
Python
| 0
|
@@ -71,16 +71,28 @@
rt Util%0A
+import time%0A
%0A%0Aclass
@@ -1104,44 +1104,310 @@
r -
-if we have a local, try hitting it?%0A
+wait 10 seconds and try again%0A time.sleep(10)%0A try: %0A mb_release = ngs.get_release_by_id(release_id, includes=include)%5B'release'%5D%0A except ngs.NetworkError as e:%0A # if we still can't reach it, propagate up the error%0A
@@ -1444,32 +1444,36 @@
+
# propagate erro
@@ -1469,32 +1469,36 @@
pagate error up%0A
+
|
bf188dfae49ab23c8f5dd7eeb105951f6c068b7f
|
Add filtering by is_circle to Role admin.
|
backend/feedbag/role/admin.py
|
backend/feedbag/role/admin.py
|
from django.contrib import admin
from .models import Role
@admin.register(Role)
class RoleAdmin(admin.ModelAdmin):
list_display = ('name',
'is_circle',
'parent',
'purpose',
'archived',
)
list_filter = ('archived',)
actions = ['archive_role']
def archive_role(self, request, queryset):
for role in queryset:
role.archive()
self.message_user(request, "Roles were successfully archived.")
archive_role.short_description = 'Archive selected roles'
|
Python
| 0
|
@@ -30,86 +30,419 @@
min%0A
-%0Afrom .models import Role%0A%0A%0A@admin.register(Role)%0Aclass RoleAdmin(admin.M
+from django.utils.translation import ugettext as _%0A%0Afrom .models import Role%0A%0A%0Aclass IsCircleListFilter(admin.SimpleListFilter):%0A # Human-readable title which will be displayed in the%0A # right admin sidebar just above the filter options.%0A title = _('Is Circle')%0A%0A # Parameter for the filter that will be used in the URL query.%0A parameter_name = 'is_circle'%0A%0A def lookups(self, request, m
odel
-A
+_a
dmin
@@ -452,31 +452,282 @@
-list_display = ('name',
+ %22%22%22%0A Returns a list of tuples. The first element in each%0A tuple is the coded value for the option that will%0A appear in the URL query. The second element is the%0A human-readable name for the option that will appear%0A in the right sidebar.
%0A
@@ -727,32 +727,36 @@
ar.%0A
+%22%22%22%0A
'is_circ
@@ -747,40 +747,139 @@
- 'is_circle',%0A
+return (('is_circle', _('Is circle')), ('is_not_circle', _('Is not circle')),)%0A%0A def queryset(self, request, queryset):%0A
@@ -878,53 +878,201 @@
+%22%22%22%0A
-'parent',%0A 'purpose',
+ Returns the filtered queryset based on the value%0A provided in the query string and retrievable via%0A %60self.value()%60.%0A %22%22%22%0A if self.value() == 'is_circle':
%0A
@@ -1084,78 +1084,370 @@
- 'archived',%0A )%0A list_filter = ('archived
+return queryset.filter(children__isnull=False)%0A elif self.value() == 'is_not_circle':%0A return queryset.filter(children__isnull=True)%0A%0A%0A@admin.register(Role)%0Aclass RoleAdmin(admin.ModelAdmin):%0A list_display = ('name', 'is_circle', 'parent', 'purpose', 'archived',)%0A list_filter = ('archived', IsCircleListFilter)%0A search_fields = ('name
',)%0A
@@ -1654,16 +1654,17 @@
ived.%22)%0A
+%0A
arch
|
0fac23c22307ca598e0cc6712280903c2a7d559d
|
Improve auto battle module
|
gbf_bot/auto_battle.py
|
gbf_bot/auto_battle.py
|
import logging
import random
import time
import pyautogui
from . import auto_battle_config as config
from .components import Button
logger = logging.getLogger(__name__)
attack = Button('attack.png', config['attack'])
auto = Button('auto.png', config['auto'])
def activate(battle_time):
pyautogui.PAUSE = 1.3
time.sleep(5 + random.random() * 0.25)
logger.info('click attack')
attack.double_click()
time.sleep(random.random() * 0.35)
logger.info('click auto')
auto.click()
# battle result
time.sleep(battle_time + random.random() * 3)
|
Python
| 0.000001
|
@@ -51,16 +51,52 @@
autogui%0A
+from . import top_left, window_size%0A
from . i
@@ -130,16 +130,38 @@
config%0A
+from . import utility%0A
from .co
@@ -222,16 +222,17 @@
ame__)%0A%0A
+%0A
attack =
@@ -372,50 +372,300 @@
1.3%0A
- time.sleep(5 + random.random() * 0.25)
+%0A # wait before battle start%0A w, h = window_size%0A start_pt = (top_left%5B0%5D + w//2, top_left%5B1%5D + h*1//3)%0A region = start_pt + (w//2, h*2//3)%0A while True:%0A time.sleep(0.5)%0A found = utility.locate(attack.path, region)%0A if found is not None:%0A break%0A
%0A
@@ -826,16 +826,16 @@
result%0A
+
time
@@ -856,28 +856,6 @@
time
- + random.random() * 3
)%0A
|
d37c2328a8ed58778f4c39091add317878831b4e
|
increment version
|
grizli/version.py
|
grizli/version.py
|
# git describe --tags
__version__ = "0.7.0-41-g39ad8ff"
|
Python
| 0.000004
|
@@ -41,16 +41,16 @@
.0-4
-1-g39ad8ff
+7-g6450ea1
%22%0A
|
d30d10a477f0b46fa73da76cb1b010e1376c3ff2
|
Update version for a new PYPI package.
|
gtable/version.py
|
gtable/version.py
|
__version__ = '0.6.2'
|
Python
| 0
|
@@ -14,9 +14,7 @@
'0.
-6.2
+7
'%0A
|
311be8c11b513fd2b3d2bb4427b5bc0b43c2539c
|
Move reverse along with geometry
|
caminae/core/forms.py
|
caminae/core/forms.py
|
from math import isnan
from django.forms import ModelForm
from django.contrib.gis.geos import LineString
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
import floppyforms as forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Field, Submit, Div
from crispy_forms.bootstrap import FormActions
from .models import Path
from .widgets import LineStringWidget
class PathForm(ModelForm):
geom = forms.gis.LineStringField(widget=LineStringWidget)
reverse_geom = forms.BooleanField(
required=False,
label = _("Reverse geometry"),
help_text = _("The geometry will be reversed once saved"),
)
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Div('name',
'structure',
'stake',
'trail',
Field('comments', css_class='input-xlarge'),
'datasource',
'networks',
'usages',
'valid',
'reverse_geom',
css_class="span4",
),
Div('geom',
css_class="span7",),
FormActions(
Submit('cancel', 'Cancel'),
Submit('save_changes', _('Save changes'), css_class="btn-primary offset1"),
css_class="form-actions span11",
)
)
def __init__(self, *args, **kwargs):
super(PathForm, self).__init__(*args, **kwargs)
if self.instance.pk:
self.helper.form_action = self.instance.get_update_url()
else:
self.helper.form_action = reverse("core:path_add")
def save(self, commit=True):
path = super(PathForm, self).save(commit=False)
if self.cleaned_data.get('reverse_geom'):
# path.geom.reverse() won't work for 3D coords
reversed_coord = path.geom.coords[-1::-1]
# FIXME: why do we have to filter nan variable ?! Why are they here in the first place ?
valid_coords = [ (x, y, 0.0 if isnan(z) else z) for x, y, z in reversed_coord ]
path.geom = LineString(valid_coords)
if commit:
path.save()
return path
class Meta:
model = Path
exclude = ('geom_cadastre',)
|
Python
| 0.000001
|
@@ -630,24 +630,20 @@
Reverse
-geometry
+path
%22),%0A
@@ -672,16 +672,12 @@
The
-geometry
+path
wil
@@ -1063,23 +1063,8 @@
-'reverse_geom',
%0A
@@ -1122,16 +1122,44 @@
'geom',%0A
+ 'reverse_geom',%0A
|
f9dadd363d9f370d884c0b127e1f63df8916f3c8
|
Rename some functions
|
calculation.py
|
calculation.py
|
"""Calculation functions."""
from __future__ import division
import numpy as np
from scipy.signal import butter, freqz
def deconv_process(excitation, system_response, fs):
"""Deconvolution.
It is a necessity to zeropadd the excitation signal
to avoid zircular artifacts, if the system response is longer
than the excitation signal. Therfore, the excitation signal has
been extended for freely chosen 5 seconds as default. If you want
to simulate the 'Cologne Cathedral', feel free to zeropadd
more seconds.
"""
NFFT = _pow2(len(excitation) + len(system_response) - 1)
excitation_f = np.fft.fft(excitation, NFFT)
excitation_f_inv = 1 / excitation_f
# butter_w, butter_h = butter_bandpass(20, 20000, fs, NFFT, order=2)
return np.fft.ifft(np.fft.fft(system_response, NFFT) * excitation_f_inv).real
def snr_db(signal, noise):
"""Calculating Signal-to-noise ratio.
Parameters
----------
signal : array_like
Signal vector
noise : array_like
Noise vector
Returns
-------
Return SNR in dB
"""
return 10 * np.log10(_mean_power(signal) / _mean_power(noise))
def _mean_power(signal):
return np.mean(np.abs(signal ** 2))
def _pow2(n):
i = 1
while i < n:
i *= 2
return i
def coherency(excitation, system_response):
Rxx = np.correlate(excitation, excitation, 'full')
Ryy = np.correlate(system_response, system_response, 'full')
Ryx = np.correlate(system_response, excitation, 'full')
return np.abs(Ryx) ** 2 / (Rxx * Ryy)
def butter_bandpass(lower_bound, higher_bound, fs, NFFT, order):
wl = lower_bound / (fs / 2)
wh = higher_bound / (fs / 2)
b, a = butter(order, [wl, wh], btype='band')
butter_w, butter_h = freqz(b, a, worN=NFFT, whole=True)
return butter_w, butter_h
def limiter(signal, threshold_dB):
array_positions = np.where(signal < threshold_dB)
signal[array_positions] = threshold_dB
return signal
def noise_db(level, size=None, seed=1):
scale = 10 ** (level / 20.)
np.random.seed(seed)
return np.random.normal(scale=scale, size=size)
|
Python
| 0.00292
|
@@ -73,16 +73,71 @@
y as np%0A
+from . import plotting%0Aimport matplotlib.pyplot as plt%0A
from sci
@@ -402,195 +402,8 @@
nal.
- Therfore, the excitation signal has%0A been extended for freely chosen 5 seconds as default. If you want%0A to simulate the 'Cologne Cathedral', feel free to zeropadd%0A more seconds.
%0A
@@ -490,24 +490,25 @@
_f = np.fft.
+r
fft(excitati
@@ -604,17 +604,17 @@
ass(
-2
+1
0, 2
-0000
+2049
, fs
@@ -615,24 +615,29 @@
49, fs, NFFT
+//2+1
, order=2)%0A
@@ -654,16 +654,17 @@
np.fft.i
+r
fft(np.f
@@ -666,16 +666,17 @@
(np.fft.
+r
fft(syst
@@ -717,13 +717,8 @@
inv)
-.real
%0A%0A%0Ad
@@ -1084,23 +1084,21 @@
(np.
-abs
+square
(signal
- ** 2
))%0A%0A
@@ -1173,276 +1173,8 @@
i%0A%0A%0A
-def coherency(excitation, system_response):%0A Rxx = np.correlate(excitation, excitation, 'full')%0A Ryy = np.correlate(system_response, system_response, 'full')%0A Ryx = np.correlate(system_response, excitation, 'full')%0A return np.abs(Ryx) ** 2 / (Rxx * Ryy)%0A%0A%0A
def
@@ -1439,17 +1439,19 @@
tter_h%0A%0A
-%0A
+#~
def limi
@@ -1460,16 +1460,22 @@
r(signal
+_f_inv
, thresh
@@ -1491,104 +1491,471 @@
-array_positions = np.where(signal %3C threshold_dB)%0A signal%5Barray_positions%5D = threshold_dB
+#~ signal_f_inv_abs = np.abs(signal_f_inv)%0A #~ signal_f_inv_phase = np.angle(signal_f_inv)%0A #~ signal_f_inv_abs_dB = plotting._dB_calculation(signal_f_inv_abs)%0A #~ array_positions = np.where(signal_f_inv_abs_dB %3E signal_f_inv_abs_dB.max() + threshold_dB)%0A #~ threshold = 10**((signal_f_inv_abs_dB.max()+threshold_dB)/20)%0A #~ signal_f_inv_abs%5Barray_positions%5D = threshold%0A #~ signal_f_inv = signal_f_inv_abs * np.exp(1j*signal_f_inv_phase)
%0A
+ #~
ret
@@ -1968,23 +1968,31 @@
gnal
+_f_inv
%0A%0A%0Adef
+awgn_
noise
-_db
(lev
@@ -2124,8 +2124,290 @@
e=size)%0A
+%0A#~ def coherency(excitation, system_response):%0A #~ Rxx = np.correlate(excitation, excitation, 'full')%0A #~ Ryy = np.correlate(system_response, system_response, 'full')%0A #~ Ryx = np.correlate(system_response, excitation, 'full')%0A #~ return np.abs(Ryx) ** 2 / (Rxx * Ryy)%0A
|
88f36912de48a84e4e3778889948f85655ba9064
|
Remove token logging
|
canis/oauth.py
|
canis/oauth.py
|
from os import environ
from urllib import urlencode
from datetime import datetime, timedelta
from flask import Flask, request, redirect
import requests
app = Flask(__name__)
SPOTIFY_CLIENT_ID = environ['CANIS_SPOTIFY_API_CLIENT_ID']
SPOTIFY_SECRET = environ['CANIS_SPOTIFY_API_SECRET']
SPOTIFY_CALLBACK = environ.get('CANIS_SPOTIFY_API_CALLBACK', 'http://127.0.0.1:5000/callback/')
access_token = None
refresh_token = None
expiration = None
@app.route('/login')
def login():
args = {
'client_id': SPOTIFY_CLIENT_ID,
'response_type': 'code',
'redirect_uri': SPOTIFY_CALLBACK,
'scope': 'playlist-read-private playlist-modify-private playlist-modify-public',
}
arg_str = urlencode(args)
url = 'https://accounts.spotify.com/authorize?{}'.format(arg_str)
return redirect(url)
@app.route('/callback/')
def callback():
args = {
'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': SPOTIFY_CALLBACK,
'client_id': SPOTIFY_CLIENT_ID,
'client_secret': SPOTIFY_SECRET
}
r = requests.post('https://accounts.spotify.com/api/token', data=args)
resp = r.json()
store_token_response(resp)
shutdown_server()
return "You're good to go"
def refresh():
args = {
'refresh_token': refresh_token,
'grant_type': 'refresh_token',
'client_id': SPOTIFY_CLIENT_ID,
'client_secret': SPOTIFY_SECRET
}
r = requests.post('https://accounts.spotify.com/api/token', data=args)
resp = r.json()
store_token_response(resp)
def store_token_response(resp):
global access_token
global refresh_token
global expiration
access_token = resp['access_token']
if resp.get('refresh_token'):
refresh_token = resp['refresh_token']
expiration = datetime.utcnow() + timedelta(seconds=int(resp['expires_in']))
print (access_token, refresh_token, expiration)
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
|
Python
| 0.000001
|
@@ -1892,60 +1892,8 @@
'%5D))
-%0A print (access_token, refresh_token, expiration)
%0A%0Ade
|
a6a95a5eec833c512d08f962b22c4c177d6021b0
|
Fix Resource leak on ASyncStaticFiles (#588)
|
apistar/server/staticfiles.py
|
apistar/server/staticfiles.py
|
import os
import typing
from http import HTTPStatus
from importlib.util import find_spec
from apistar import exceptions
from apistar.compat import aiofiles, whitenoise
class BaseStaticFiles():
def __call__(self, environ, start_response):
raise NotImplementedError()
class StaticFiles(BaseStaticFiles):
"""
Static file handling for WSGI applications, using `whitenoise`.
"""
def __init__(self, prefix: str, static_dir: str=None, packages: typing.Sequence[str]=None):
self.check_requirements()
self.whitenoise = whitenoise.WhiteNoise(application=self.not_found)
if static_dir is not None:
self.whitenoise.add_files(static_dir, prefix=prefix)
for package in packages or []:
package_dir = os.path.dirname(find_spec(package).origin)
package_dir = os.path.join(package_dir, 'static')
package_prefix = prefix.rstrip('/') + '/' + package
self.whitenoise.add_files(package_dir, prefix=package_prefix)
def check_requirements(self):
if whitenoise is None:
raise RuntimeError('`whitenoise` must be installed to use `StaticFiles`.')
def __call__(self, environ, start_response):
return self.whitenoise(environ, start_response)
def not_found(self, environ, start_response):
raise exceptions.NotFound()
class ASyncStaticFiles(StaticFiles):
"""
Static file handling for ASGI applications, using `whitenoise` and `aiofiles`.
"""
def check_requirements(self):
if whitenoise is None:
raise RuntimeError('`whitenoise` must be installed to use `ASyncStaticFiles`.')
if aiofiles is None:
raise RuntimeError('`aiofiles` must be installed to use `ASyncStaticFiles`.')
def __call__(self, scope):
path = scope['path'].encode('iso-8859-1', 'replace').decode('utf-8', 'replace')
if self.whitenoise.autorefresh:
static_file = self.whitenoise.find_file(path)
else:
static_file = self.whitenoise.files.get(path)
if static_file is None:
async def not_found(receive, send):
raise exceptions.NotFound()
return not_found
else:
return ASGIFileSession(static_file, scope)
class ASGIFileSession():
def __init__(self, static_file, scope):
self.static_file = static_file
self.scope = scope
self.headers = {}
for key, value in scope['headers']:
wsgi_key = 'HTTP_' + key.decode().upper().replace('-', '_')
wsgi_value = value.decode()
self.headers[wsgi_key] = wsgi_value
async def __call__(self, receive, send):
status, headers, file = await self.get_response(self.scope['method'], self.headers)
await send({
'type': 'http.response.start',
'status': status.value,
'headers': [
(key.lower().encode(), value.encode())
for key, value in headers
]
})
if file is None:
await send({
'type': 'http.response.body',
'body': b''
})
else:
chunk = await file.read(8192)
more_body = True
while more_body:
next_chunk = await file.read(8192)
more_body = bool(next_chunk)
await send({
'type': 'http.response.body',
'body': chunk,
'more_body': more_body
})
chunk = next_chunk
async def get_response(self, method, request_headers):
if method != 'GET' and method != 'HEAD':
return (
HTTPStatus.METHOD_NOT_ALLOWED,
(('Allow', 'GET, HEAD'),),
None
)
elif self.static_file.file_not_modified(request_headers):
return self.static_file.not_modified_response
path, headers = self.static_file.get_path_and_headers(request_headers)
if method != 'HEAD':
file_handle = await aiofiles.open(path, 'rb')
else:
file_handle = None
return (HTTPStatus.OK, headers, file_handle)
|
Python
| 0.000072
|
@@ -3180,32 +3180,53 @@
)%0A else:%0A
+ try:%0A
chun
@@ -3255,32 +3255,36 @@
92)%0A
+
more_body = True
@@ -3281,24 +3281,28 @@
ody = True%0A%0A
+
@@ -3334,16 +3334,20 @@
+
next_chu
@@ -3381,32 +3381,36 @@
+
more_body = bool
@@ -3431,32 +3431,36 @@
+
+
await send(%7B%0A
@@ -3448,32 +3448,36 @@
await send(%7B%0A
+
@@ -3522,32 +3522,36 @@
+
+
'body': chunk,%0A
@@ -3549,16 +3549,20 @@
chunk,%0A
+
@@ -3596,16 +3596,20 @@
re_body%0A
+
@@ -3627,32 +3627,36 @@
+
chunk = next_chu
@@ -3657,16 +3657,104 @@
xt_chunk
+%0A finally:%0A # Free resource%0A await file.close()
%0A%0A as
|
59f765b8f383ae915131b9588d5867fc42dabde7
|
fix sshpass command in test
|
cappat/jobs.py
|
cappat/jobs.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Utilities: Agave wrapper for sherlock
"""
import os
from os import path as op
from errno import EEXIST
import socket
from subprocess import check_output
import pkg_resources as pkgr
from cappat.tpl import Template
SHERLOCK_SBATCH_TEMPLATE = pkgr.resource_filename('cappat.tpl', 'sherlock-sbatch.jnj2')
SHERLOCK_SBATCH_DEFAULTS = {
'nodes': 1,
'time': '01:00:00',
'mincpus': 4,
'mem_per_cpu': 8000,
'modules': ['load singularity'],
'partition': 'russpold',
'job_name': 'crn-bidsapp',
'job_log': 'logs/crn-bidsapp'
}
class TaskManager:
"""
A task manager factory class
"""
@staticmethod
def build(task_list, slurm_settings=None, temp_folder=None):
"""
Get the appropriate TaskManager object
"""
hostname = _gethostname()
if not hostname:
raise RuntimeError('Could not identify execution system')
if hostname.endswith('ls5.tacc.utexas.edu'):
raise NotImplementedError
elif hostname.endswith('stanford.edu'):
return SherlockSubmission(task_list, slurm_settings, temp_folder)
elif hostname.endswith('stampede.tacc.utexas.edu'):
raise NotImplementedError
elif hostname.startswith('box') and hostname.endswith('.localdomain'):
return CircleCISubmission(task_list, slurm_settings, temp_folder)
else:
raise RuntimeError(
'Could not identify "{}" as a valid execution system'.format(hostname))
class TaskSubmissionBase(object):
def __init__(self, task_list, slurm_settings=None, temp_folder=None):
if not task_list:
raise RuntimeError('a list of tasks is required')
self.task_list = task_list
missing = list(set(self._get_mandatory_fields()) -
set(list(slurm_settings.keys())))
if missing:
raise RuntimeError('Error filling up template with missing fields:'
' {}.'.format("'%s'".join(missing)))
self.slurm_settings = slurm_settings
if temp_folder is None:
temp_folder = op.join(os.getcwd(), 'log')
_check_folder(temp_folder)
self.temp_folder = temp_folder
self.sbatch_files = self._generate_sbatch()
self.job_ids = []
def _generate_sbatch(self):
raise NotImplementedError
def _get_mandatory_fields(self):
raise NotImplementedError
def submit(self):
"""
Submits a list of sbatch files and returns the assigned job ids
"""
raise NotImplementedError
def children_yield(self):
"""
Busy wait until all jobs in the list are done
"""
return NotImplementedError
class SherlockSubmission(TaskSubmissionBase):
"""
The Sherlock submission
"""
def __init__(self, task_list, slurm_settings=None, temp_folder=None):
def_settings = SHERLOCK_SBATCH_DEFAULTS.copy()
if not slurm_settings is None:
def_settings.update(slurm_settings)
super(SherlockSubmission, self).__init__(
task_list, def_settings, temp_folder)
def _get_mandatory_fields(self):
return list(SHERLOCK_SBATCH_DEFAULTS.keys())
def _generate_sbatch(self):
"""
Generates one sbatch file per task
"""
slurm_settings = self.slurm_settings.copy()
sbatch_files = []
for i, task in enumerate(self.task_list):
sbatch_files.append(op.join(self.temp_folder, 'slurm-%06d.sbatch' % i))
slurm_settings['commandline'] = task
conf = Template(SHERLOCK_SBATCH_TEMPLATE)
conf.generate_conf(slurm_settings, sbatch_files[-1])
return sbatch_files
def submit(self):
"""
Submits a list of sbatch files and returns the assigned job ids
"""
for slurm_job in self.sbatch_files:
# run sbatch
slurm_result = check_output(['sbatch', slurm_job])
# parse output and get job id
class CircleCISubmission(SherlockSubmission):
def submit(self):
"""
Submits a list of sbatch files and returns the assigned job ids
"""
for i, slurm_job in enumerate(self.sbatch_files):
# run sbatch
slurm_job = os.path.basename(slurm_job)
slurm_result = check_output([
'sshpass', '-p', 'testuser',
'ssh' '-p', '10022', 'testuser@localhost',
'sbatch', os.path.join('/scratch/slurm', slurm_job)])
# parse output and get job id
with open('/scratch/slurm/slurm-output%04d.txt' % i, 'w') as sfile:
sfile.write(slurm_result)
print slurm_result
def _gethostname():
hostname = socket.gethostname()
if len(hostname.strip('.')) == 1 and hostname.startswith('login'):
# This is here because ls5 returns only the login node name 'loginN'
fqdns = list(
set([socket.getfqdn(i[4][0])
for i in socket.getaddrinfo(socket.gethostname(), None)]))
hostname = fqdns[0]
return hostname
def _check_folder(folder):
if not op.exists(folder):
try:
os.makedirs(folder)
except OSError as exc:
if not exc.errno == EEXIST:
raise
|
Python
| 0.000004
|
@@ -4615,16 +4615,17 @@
'ssh'
+,
'-p', '
|
4556add4d9c3645559e51005129dcc65bd0b00ca
|
__VERSION__ changed
|
stop_words/__init__.py
|
stop_words/__init__.py
|
import json
import os
__VERSION__ = (2015, 2, 21)
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
STOP_WORDS_DIR = os.path.join(CURRENT_DIR, 'stop-words')
STOP_WORDS_CACHE = {}
with open(os.path.join(STOP_WORDS_DIR, 'languages.json'), 'rb') as map_file:
buffer = map_file.read()
buffer = buffer.decode('ascii')
LANGUAGE_MAPPING = json.loads(buffer)
AVAILABLE_LANGUAGES = list(LANGUAGE_MAPPING.values())
def get_version():
"""
:rtype: basestring
"""
return ".".join(str(v) for v in __VERSION__)
class StopWordError(Exception):
pass
def get_stop_words(language, cache=True):
"""
:type language: basestring
:rtype: list
"""
try:
language = LANGUAGE_MAPPING[language]
except KeyError:
if language not in AVAILABLE_LANGUAGES:
raise StopWordError('{0}" language is unavailable.'.format(
language
))
if cache and language in STOP_WORDS_CACHE:
return STOP_WORDS_CACHE[language]
language_filename = os.path.join(STOP_WORDS_DIR, language + '.txt')
try:
with open(language_filename, 'rb') as language_file:
stop_words = [line.decode('utf-8').strip()
for line in language_file.readlines()]
stop_words = apply_filters(stop_words, language)
except IOError:
raise StopWordError(
'{0}" file is unreadable, check your installation.'.format(
language_filename
)
)
if cache:
STOP_WORDS_CACHE[language] = stop_words
return stop_words
_filters = {None: []}
def apply_filters(stopwords, language):
"""
Apply registered filters to stopwords
:param stopwords: list
:param language: string
:return: filtered stopwords
"""
if language in _filters:
for func in _filters[language]:
stopwords = func(stopwords)
for func in _filters[None]:
stopwords = func(stopwords, language)
return stopwords
def add_filter(func, language=None):
"""
Register filters for specific language.
If language == None the filter applies for all languages.
Filter will not apply for stop words in cache.
:param func: callable
:param language: string|None
:return:
"""
if not language in _filters:
_filters[language] = []
_filters[language].append(func)
def remove_filter(func, language=None):
"""
:param func:
:param language:
:return:
"""
if not (language in _filters and func in _filters[language]):
return False
_filters[language].remove(func)
return True
def safe_get_stop_words(language):
"""
:type language: basestring
:rtype: list
"""
try:
return get_stop_words(language)
except StopWordError:
return []
|
Python
| 0.999994
|
@@ -41,17 +41,17 @@
15, 2, 2
-1
+3
)%0ACURREN
|
94702fdf121bf82be7e9dbd11a5b271e0e9077b8
|
Fix fixtures logging. Serves me right for not testing
|
cozify/test/fixtures.py
|
cozify/test/fixtures.py
|
#!/usr/bin/env python3
import os, pytest, tempfile, datetime
from cozify import conftest, config, hub, cloud
from . import fixtures_devices as dev
@pytest.fixture
def default_hub():
barehub = lambda:0
config.setStatePath() # reset to default config
config.dump_state()
barehub.hub_id = hub.default()
barehub.name = hub.name(barehub.hub_id)
barehub.host = hub.host(barehub.hub_id)
barehub.token = hub.token(barehub.hub_id)
barehub.remote = hub.remote
return barehub
@pytest.fixture
def tmp_cloud():
with Tmp_cloud() as cloud:
yield cloud
@pytest.fixture
def live_cloud():
config.setStatePath() # reset to default
return cloud
@pytest.fixture
def id():
return 'deadbeef-aaaa-bbbb-cccc-dddddddddddd'
@pytest.fixture
def tmphub():
with tmp_hub() as hub:
yield hub
@pytest.fixture
def id():
return 'deadbeef-aaaa-bbbb-cccc-dddddddddddd'
@pytest.fixture
def devices():
return dev
@pytest.fixture
def livehub(request):
config.setStatePath() # default config assumed to be live
config.dump_state() # dump state so it's visible in failed test output
autoremote = getattr(request.module, "autoremote", True) # enable skipping ping
if autoremote:
log.debug('Livehub setup checking if connection valid.')
assert hub.ping()
else:
log.debug('Livehub setup skipped ping.')
return hub
class Tmp_cloud():
"""Creates a temporary cloud state with test data.
"""
def __init__(self):
self.configfile, self.configpath = tempfile.mkstemp()
self.section = 'Cloud'
self.email = 'example@example.com'
self.token = 'eyJkb20iOiJ1ayIsImFsZyI6IkhTNTEyIiwidHlwIjoiSldUIn0.eyJyb2xlIjo4LCJpYXQiOjE1MTI5ODg5NjksImV4cCI6MTUxNTQwODc2OSwidXNlcl9pZCI6ImRlYWRiZWVmLWFhYWEtYmJiYi1jY2NjLWRkZGRkZGRkZGRkZCIsImtpZCI6ImRlYWRiZWVmLWRkZGQtY2NjYy1iYmJiLWFhYWFhYWFhYWFhYSIsImlzcyI6IkNsb3VkIn0.QVKKYyfTJPks_BXeKs23uvslkcGGQnBTKodA-UGjgHg' # valid but useless jwt token.
self.expiry = datetime.timedelta(days=1)
self.now = datetime.datetime.now()
self.iso_now = self.now.isoformat().split(".")[0]
self.yesterday = self.now - datetime.timedelta(days=1)
self.iso_yesterday = self.yesterday.isoformat().split(".")[0]
def __enter__(self):
config.setStatePath(self.configpath)
cloud._setAttr('email', self.email)
cloud._setAttr('remotetoken', self.token)
cloud._setAttr('last_refresh', self.iso_yesterday)
return self
def __exit__(self, exc_type, exc_value, traceback):
os.remove(self.configpath)
if exc_type is not None:
debug.logger.error("%s, %s, %s" % (exc_type, exc_value, traceback))
return False
class tmp_hub():
"""Creates a temporary hub section (with test data) in a tmp_cloud
"""
def __init__(self):
self.id = 'deadbeef-aaaa-bbbb-cccc-dddddddddddd'
self.name = 'HubbyMcHubFace'
self.host = '127.0.0.1'
self.section = 'Hubs.{0}'.format(self.id)
self.token = 'eyJkb20iOiJ1ayIsImFsZyI6IkhTNTEyIiwidHlwIjoiSldUIn0.eyJyb2xlIjo4LCJpYXQiOjE1MTI5ODg5NjksImV4cCI6MTUxNTQwODc2OSwidXNlcl9pZCI6ImRlYWRiZWVmLWFhYWEtYmJiYi1jY2NjLWRkZGRkZGRkZGRkZCIsImtpZCI6ImRlYWRiZWVmLWRkZGQtY2NjYy1iYmJiLWFhYWFhYWFhYWFhYSIsImlzcyI6IkNsb3VkIn0.QVKKYyfTJPks_BXeKs23uvslkcGGQnBTKodA-UGjgHg' # valid but useless jwt token.
def __enter__(self):
self.cloud = Tmp_cloud() # this also initializes temporary state
config.state.add_section(self.section)
config.state[self.section]['hubname'] = self.name
config.state[self.section]['host'] = self.host
config.state[self.section]['hubtoken'] = self.token
config.state['Hubs']['default'] = self.id
print('Temporary state:')
config.dump_state()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is not None:
debug.logger.error("%s, %s, %s" % (exc_type, exc_value, traceback))
return False
config.state.remove_section(self.section)
def devices(self):
return dev.device_ids, dev.devices
|
Python
| 0
|
@@ -54,16 +54,25 @@
datetime
+, logging
%0A%0Afrom c
@@ -1248,32 +1248,36 @@
ote:%0A log
+ging
.debug('Livehub
@@ -1361,16 +1361,20 @@
log
+ging
.debug('
@@ -2674,36 +2674,31 @@
-debug.
logg
-er
+ing
.error(%22%25s,
@@ -3972,20 +3972,15 @@
-debug.
logg
-er
+ing
.err
|
1391dd0b084f2c26fd5d0afceb81ffb5daab5dcf
|
Remove path and user filter from admin
|
rest_framework_tracking/admin.py
|
rest_framework_tracking/admin.py
|
from django.contrib import admin
from .models import APIRequestLog
class APIRequestLogAdmin(admin.ModelAdmin):
date_hierarchy = 'requested_at'
list_display = ('id', 'requested_at', 'response_ms', 'status_code',
'user', 'method',
'path', 'remote_addr', 'host',
'query_params')
list_filter = ('user', 'path', 'method', 'status_code')
admin.site.register(APIRequestLog, APIRequestLogAdmin)
|
Python
| 0
|
@@ -362,24 +362,8 @@
= (
-'user', 'path',
'met
|
6051ef3a68db15b220e939240f7bfcb34db1c7c8
|
Check if cache value is not None, not truthy
|
tunigo/api.py
|
tunigo/api.py
|
from __future__ import unicode_literals
import time
import requests
from tunigo.cache import Cache
from tunigo.genre import Genre, SubGenre
from tunigo.playlist import Playlist
from tunigo.release import Release
BASE_URL = 'https://api.tunigo.com/v3/space'
BASE_QUERY = 'locale=en&product=premium&version=6.38.31&platform=web'
class Tunigo(object):
def __init__(self, region='all', max_results=1000, cache_time=3600):
self._region = region
self._max_results = max_results
self._cache = Cache(cache_time)
def __repr__(self):
return "Tunigo(region='{}', max_results={}, cache_time={})".format(
self._region,
self._max_results,
self._cache._cache_time)
def _get(self, key, options=''):
uri = ('{}/{}?region={}&per_page={}&{}'
.format(BASE_URL, key, self._region,
self._max_results, BASE_QUERY))
if options:
uri = '{}&{}'.format(uri, options)
result = requests.get(uri)
if (result.status_code != 200 or
'application/json' not in result.headers['content-type']):
return []
return result.json()['items']
def get_playlists(self, key, options='', cache_key=''):
if not cache_key:
cache_key = 'playlists-{}-{}'.format(key, options)
cache_value = self._cache.get(cache_key)
if cache_value:
return cache_value
else:
playlists = []
for item in self._get(key, options):
playlists.append(Playlist(item_array=item['playlist']))
self._cache.insert(cache_key, playlists)
return playlists
def get_featured_playlists(self):
return self.get_playlists('featured-playlists',
'dt={}'.format(time.strftime('%FT%H:01:00')),
'featured-playlists')
def get_top_lists(self):
return self.get_playlists('toplists')
def get_genres(self):
cache_key = 'genres'
cache_value = self._cache.get(cache_key)
if cache_value:
return cache_value
else:
genres = []
for item in self._get('genres'):
if item['genre']['templateName'] != 'toplists':
genres.append(Genre(item_array=item['genre']))
self._cache.insert(cache_key, genres)
return genres
def get_genre_playlists(self, genre=None, sub_genre=None):
if type(genre) == Genre:
genre_key = genre.key
else:
genre_key = genre
if type(sub_genre) == SubGenre:
sub_genre_key = sub_genre.key
if not genre_key:
genre_key = sub_genre.main_genre.key
else:
sub_genre_key = sub_genre
if sub_genre_key and sub_genre_key != 'all':
options = 'filter={}'.format(sub_genre_key)
else:
options = ''
return self.get_playlists(genre_key, options)
def get_new_releases(self):
cache_key = 'releases'
cache_value = self._cache.get(cache_key)
if cache_value:
return cache_value
else:
releases = []
for item in self._get('new-releases'):
releases.append(Release(item_array=item['release']))
self._cache.insert(cache_key, releases)
return releases
|
Python
| 0
|
@@ -1412,32 +1412,44 @@
if cache_value
+ is not None
:%0A re
@@ -2135,32 +2135,44 @@
if cache_value
+ is not None
:%0A re
@@ -3208,16 +3208,28 @@
he_value
+ is not None
:%0A
|
379f0a70425f171dc3046e32277b27e2bd9f55dc
|
Add multilabel param to cv_train and fix fetching labels
|
revscoring/utilities/cv_train.py
|
revscoring/utilities/cv_train.py
|
"""
``revscoring cv_train -h``
::
Performs a cross-validation of a scorer model strategy across folds of
a dataset and then trains a final model on the entire set of data. Note
that either --labels or --pop-rates must be specified for classifiers.
Usage:
cv_train -h | --help
cv_train <scoring-model> <features> <label>
[--labels=<labels>]
[-p=<kv>]... [-s=<kv>]...
[-w=<lw>]... [-r=<lp>]...
[-o=<p>]...
[--version=<vers>]
[--observations=<path>]
[--model-file=<path>]
[--folds=<num>]
[--workers=<num>]
[--center]
[--scale]
[--debug]
Options:
-h --help Prints this documentation
<scoring-model> Classpath to a ScorerModel to construct
and train
<features> Classpath to an list of features to use when
constructing the model
<label> The name of the field to be predicted
--labels=<labels> A comma-separated sequence of labels that will
be used for ordering labels statistics and
other presentations of the model.
-w --label-weight=<lw> A label-weight pair that rescales adjusts the
cost of getting a specific label prediction
wrong.
-r --pop-rate=<lp> A label-proportion pair that rescales metrics
based on the rate that the label appears in the
population. If not provided, sample rates will
be assumed to reflect population rates.
-p --parameter=<kv> A key-value argument pair to use when
constructing the <scoring-model>.
--version=<vers> A version to associate with the model
--observations=<path> Path to a file containing observations
containing a 'cache' [default: <stdin>]
--model-file=<path> Path to write a model file to
[default: <stdout>]
--folds=<num> The number of folds that should be used when
cross-validating. If set to 1, testing will be
skipped and a model will just be trained on
all observations [default: 10]
--workers=<num> The number of workers that should be used when
cross-validating
--center Features should be centered on a common axis
--scale Features should be scaled to a common range
--debug Print debug logging.
""" # noqa
import json
import logging
import sys
import docopt
import yamlconf
from ..dependencies import solve
from .util import read_labels_and_population_rates, read_observations
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
ScoringModel = yamlconf.import_module(args['<scoring-model>'])
features = yamlconf.import_module(args['<features>'])
version = args['--version']
estimator_params = {}
for parameter in args['--parameter']:
key, value = parameter.split("=", 1)
estimator_params[key] = json.loads(value)
labels, label_weights, population_rates = read_labels_and_population_rates(
args['--labels'], args['--label-weight'], args['--pop-rate'])
model = ScoringModel(
features, version=version,
labels=labels, label_weights=label_weights,
population_rates=population_rates,
center=args['--center'],
scale=args['--scale'],
**estimator_params)
if args['--observations'] == "<stdin>":
observations = read_observations(sys.stdin)
else:
observations = read_observations(open(args['--observations']))
label_name = args['<label>']
value_labels = \
[(list(solve(features, cache=ob['cache'])), ob[label_name])
for ob in observations]
if args['--model-file'] == "<stdout>":
model_file = sys.stdout.buffer
else:
model_file = open(args['--model-file'], 'wb')
folds = int(args['--folds'])
workers = int(args['--workers']) if args['--workers'] is not None else None
run(value_labels, model_file, model, folds, workers)
def run(value_labels, model_file, model, folds, workers):
model = cv_train(model, value_labels, folds, workers)
sys.stderr.write(model.info.format())
sys.stderr.write("\n")
model.dump(model_file)
def cv_train(model, value_labels, folds, workers):
if folds > 1:
logger.info("Cross-validating model statistics for {0} folds..."
.format(folds))
model.cross_validate(value_labels, folds=folds, processes=workers)
logger.info("Training model on all data...")
model.train(value_labels)
return model
|
Python
| 0
|
@@ -381,24 +381,64 @@
s=%3Clabels%3E%5D%0A
+ %5B--labels-config=%3Clc%3E%5D%0A
@@ -779,16 +779,48 @@
-scale%5D%0A
+ %5B--multilabel%5D%0A
@@ -1446,16 +1446,202 @@
model.%0A
+ --labels-config=%3Clc%3E Path to a file containing labels and its%0A configurations like population-rates and%0A weights%0A
@@ -3174,16 +3174,93 @@
n range%0A
+ --multilabel Whether to perform multilabel classification%0A
@@ -4235,17 +4235,129 @@
p-rate'%5D
-)
+,%0A args%5B'--labels-config'%5D)%0A%0A multilabel = False%0A if args%5B'--multilabel'%5D:%0A multilabel = True
%0A%0A mo
@@ -4410,16 +4410,39 @@
version,
+ multilabel=multilabel,
%0A
|
a4cb47b928f6cd7f62780eb620e66a9494b17302
|
Generate a full graph and a graph without age restricted content
|
generate_graph.py
|
generate_graph.py
|
from graphviz import Digraph
import math
import pymongo
def main():
client = pymongo.MongoClient()
db = client.reddit
related_subs = {}
subscribers = {}
adult = {}
subreddits = db.subreddits.find({'type': 'subreddit'})
if subreddits:
for subreddit in subreddits:
title = subreddit['_id']
links = subreddit['linked']
if 'subscribers' in subreddit:
subscribers[title] = subreddit['subscribers']
if 'adult' in subreddit:
adult[title] = True
related_subs[title] = links
generate_adult_graph(related_subs, subscribers, adult, min_subscribers=100)
def generate_adult_graph(related_subs, subscribers, adult, min_subscribers):
g = Digraph('G', filename='adult.gv')
edges_added = 0
for key in related_subs:
for sub in related_subs[key]:
if not sub:
continue
# Filter: only include edge if sub has # subscribers
if sub in subscribers and sub in adult:
subscriber_cnt = subscribers[sub]
if subscriber_cnt >= min_subscribers:
log_cnt = math.log2(subscriber_cnt)
g.edge(key, sub, weight=str(log_cnt))
print("Edge count: " + str(edges_added))
edges_added += 1
g.save()
if __name__ == '__main__':
main()
|
Python
| 0.998926
|
@@ -627,74 +627,1638 @@
ate_
-adult_graph(related_subs, subscribers, adult, min_subscribers=100)
+full_graph(related_subs, subscribers, adult, min_subscribers=0)%0A generate_censored_graph(related_subs, subscribers, adult, min_subscribers=100)%0A generate_adult_graph(related_subs, subscribers, adult, min_subscribers=100)%0A%0Adef generate_full_graph(related_subs, subscribers, adult, min_subscribers):%0A%0A g = Digraph('G', filename='full.gv')%0A%0A edges_added = 0%0A for key in related_subs:%0A for sub in related_subs%5Bkey%5D:%0A if not sub:%0A continue%0A%0A # Filter: only include edge if sub has # subscribers%0A if sub in subscribers:%0A subscriber_cnt = subscribers%5Bsub%5D%0A if subscriber_cnt %3E= min_subscribers:%0A log_cnt = math.log2(subscriber_cnt)%0A g.edge(key, sub, weight=str(log_cnt))%0A print(%22Edge count: %22 + str(edges_added))%0A edges_added += 1%0A%0A g.save()%0A%0Adef generate_censored_graph(related_subs, subscribers, adult, min_subscribers):%0A%0A g = Digraph('G', filename='censored.gv')%0A%0A edges_added = 0%0A for key in related_subs:%0A for sub in related_subs%5Bkey%5D:%0A if not sub:%0A continue%0A%0A # Filter: only include edge if sub has # subscribers%0A if sub in subscribers and not sub in adult:%0A subscriber_cnt = subscribers%5Bsub%5D%0A if subscriber_cnt %3E= min_subscribers:%0A log_cnt = math.log2(subscriber_cnt)%0A g.edge(key, sub, weight=str(log_cnt))%0A print(%22Edge count: %22 + str(edges_added))%0A edges_added += 1%0A%0A g.save()%0A
%0A%0Ade
|
ad4b5ccf7c89fa67e69d065c47edaa9e18c009ee
|
add docstrings add if __name__ == '__main__': to make pydoc work
|
src/hal/user_comps/pyvcp.py
|
src/hal/user_comps/pyvcp.py
|
#!/usr/bin/env python
# This is a component of emc
# Copyright 2007 Anders Wallin <anders.wallin@helsinki.fi>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, os
BASE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
sys.path.insert(0, os.path.join(BASE, "lib", "python"))
import vcpparse
import hal
from Tkinter import Tk
import getopt
def usage():
print "Usage: pyvcp -c hal_component_name myfile.xml"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "c:")
except getopt.GetoptError, detail:
print detail
usage()
sys.exit(1)
#try:
# opts, args = getopt.getopt(sys.argv[1:], "c:")
#except getopt.GetoptError:
# print "usage: pyvcp -c hal_component_name myfile.xml"
# sys.exit(0)
#print opts
#print args
component_name = None
for o, a in opts:
if o == "-c":
component_name = a
if component_name is None:
usage()
sys.exit(1)
try:
filename=args[0]
except:
usage()
sys.exit(1)
#try:
# filename=sys.argv[1]
#except:
# print "Error: No XML file specified!"
# sys.exit()
pyvcp0 = Tk()
pyvcp0.title(component_name)
vcpparse.filename=filename
pycomp=vcpparse.create_vcp(compname=component_name, master=pyvcp0)
pycomp.ready()
try:
pyvcp0.mainloop()
except KeyboardInterrupt:
sys.exit(0)
main()
|
Python
| 0.000254
|
@@ -845,16 +845,515 @@
7 USA%0A%0A
+%0A%22%22%22 Python Virtual Control Panel for EMC%0A%0A A virtual control panel (VCP) is used to display and control%0A HAL pins, which are either BIT or FLOAT valued.%0A%0A Usage: pyvcp -c compname myfile.xml%0A%0A compname is the name of the HAL component to be created. %0A The name of the HAL pins associated with the VCP will begin with 'compname.'%0A %0A myfile.xml is an XML file which specifies the layout of the VCP.%0A Valid XML tags are described in the documentation for pyvcp_widgets.py%0A%22%22%22%0A%0A
import s
@@ -1563,24 +1563,61 @@
ef usage():%0A
+ %22%22%22 prints the usage message %22%22%22%0A
print %22U
@@ -1671,24 +1671,117 @@
def main():%0A
+ %22%22%22 creates a HAL component.%0A calls vcpparse with the specified XML file.%0A %22%22%22%0A
try:%0A
@@ -1933,228 +1933,8 @@
1)%0A%0A
- %0A #try:%0A # opts, args = getopt.getopt(sys.argv%5B1:%5D, %22c:%22)%0A #except getopt.GetoptError:%0A # print %22usage: pyvcp -c hal_component_name myfile.xml%22%0A # sys.exit(0)%0A%0A #print opts%0A #print args%0A
@@ -2035,29 +2035,8 @@
= a
-%0A %0A
%0A%0A
@@ -2101,21 +2101,16 @@
xit(1)%0A%0A
- %0A
try:
@@ -2188,129 +2188,8 @@
1)%0A%0A
- #try:%0A # filename=sys.argv%5B1%5D%0A #except:%0A # print %22Error: No XML file specified!%22%0A # sys.exit()%0A%0A
@@ -2444,16 +2444,46 @@
xit(0)%0A%0A
-%0A
+if __name__ == '__main__':%0A
main()%0A
|
c3b681e5fbe157ea70167da1e67c740e8339af6f
|
Use plac annotations for arguments and add n_iter
|
examples/training/train_new_entity_type.py
|
examples/training/train_new_entity_type.py
|
#!/usr/bin/env python
# coding: utf8
"""
Example of training an additional entity type
This script shows how to add a new entity type to an existing pre-trained NER
model. To keep the example short and simple, only four sentences are provided
as examples. In practice, you'll need many more — a few hundred would be a
good start. You will also likely need to mix in examples of other entity
types, which might be obtained by running the entity recognizer over unlabelled
sentences, and adding their annotations to the training set.
The actual training is performed by looping over the examples, and calling
`nlp.entity.update()`. The `update()` method steps through the words of the
input. At each word, it makes a prediction. It then consults the annotations
provided on the GoldParse instance, to see whether it was right. If it was
wrong, it adjusts its weights so that the correct action will score higher
next time.
After training your model, you can save it to a directory. We recommend
wrapping models as Python packages, for ease of deployment.
For more details, see the documentation:
* Training: https://alpha.spacy.io/usage/training
* NER: https://alpha.spacy.io/usage/linguistic-features#named-entities
Developed for: spaCy 2.0.0a18
Last updated for: spaCy 2.0.0a18
"""
from __future__ import unicode_literals, print_function
import random
from pathlib import Path
import spacy
from spacy.gold import GoldParse, minibatch
# new entity label
LABEL = 'ANIMAL'
# training data
TRAIN_DATA = [
("Horses are too tall and they pretend to care about your feelings",
[(0, 6, 'ANIMAL')]),
("Do they bite?", []),
("horses are too tall and they pretend to care about your feelings",
[(0, 6, 'ANIMAL')]),
("horses pretend to care about your feelings", [(0, 6, 'ANIMAL')]),
("they pretend to care about your feelings, those horses",
[(48, 54, 'ANIMAL')]),
("horses?", [(0, 6, 'ANIMAL')])
]
def main(model=None, new_model_name='animal', output_dir=None):
"""Set up the pipeline and entity recognizer, and train the new entity.
model (unicode): Model name to start off with. If None, a blank English
Language class is created.
new_model_name (unicode): Name of new model to create. Will be added to the
model meta and prefixed by the language code, e.g. 'en_animal'.
output_dir (unicode / Path): Optional output directory. If None, no model
will be saved.
"""
if model is not None:
nlp = spacy.load(model) # load existing spaCy model
print("Loaded model '%s'" % model)
else:
nlp = spacy.blank('en') # create blank Language class
print("Created blank 'en' model")
# Add entity recognizer to model if it's not in the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe(ner)
# otherwise, get it, so we can add labels to it
else:
ner = nlp.get_pipe('ner')
ner.add_label(LABEL) # add new entity label to entity recognizer
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes) as disabled: # only train NER
random.seed(0)
optimizer = nlp.begin_training(lambda: [])
for itn in range(50):
losses = {}
gold_parses = get_gold_parses(nlp.make_doc, TRAIN_DATA)
for batch in minibatch(gold_parses, size=3):
docs, golds = zip(*batch)
nlp.update(docs, golds, losses=losses, sgd=optimizer,
drop=0.35)
print(losses)
# test the trained model
test_text = 'Do you like horses?'
doc = nlp(test_text)
print("Entities in '%s'" % test_text)
for ent in doc.ents:
print(ent.label_, ent.text)
# save model to output directory
if output_dir is not None:
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir()
nlp.meta['name'] = new_model_name # rename model
nlp.to_disk(output_dir)
print("Saved model to", output_dir)
# test the saved model
print("Loading from", output_dir)
nlp2 = spacy.load(output_dir)
doc2 = nlp2(test_text)
for ent in doc2.ents:
print(ent.label_, ent.text)
def get_gold_parses(tokenizer, train_data):
"""Shuffle and create GoldParse objects.
tokenizer (Tokenizer): Tokenizer to processs the raw text.
train_data (list): The training data.
YIELDS (tuple): (doc, gold) tuples.
"""
random.shuffle(train_data)
for raw_text, entity_offsets in train_data:
doc = tokenizer(raw_text)
gold = GoldParse(doc, entities=entity_offsets)
yield doc, gold
if __name__ == '__main__':
import plac
plac.call(main)
|
Python
| 0
|
@@ -1337,16 +1337,28 @@
nction%0A%0A
+import plac%0A
import r
@@ -1363,16 +1363,16 @@
random%0A
-
from pat
@@ -1953,415 +1953,358 @@
%5D%0A%0A%0A
-def main(model=None, new_m
+@plac.annotations(%0A model=(%22M
odel
-_
+
name
-='animal', output_dir=None):%0A %22%22%22Set up the pipeline and entity recognizer, and train the new entity.%0A
+. Defaults to blank 'en' model.%22, %22option%22, %22m%22, str),
%0A
+new_
model
- (unicode): Model name to start off with. If None, a blank English%0A Language class is created.%0A new_model_name (unicode): Name of new model to create. Will be added to the%0A model meta and prefixed by the language code, e.g. 'en_
+_name=(%22New model name for model meta.%22, %22option%22, %22nm%22, str),%0A output_dir=(%22Optional output directory%22, %22option%22, %22o%22, Path),%0A n_iter=(%22Number of training iterations%22, %22option%22, %22n%22, int))%0Adef main(model=None, new_model_name='
animal'
-.%0A
+,
out
@@ -2314,99 +2314,102 @@
_dir
- (unicode / Path): Optional output directory. If None, no model%0A will be saved.%0A
+=None, n_iter=50):%0A %22%22%22Set up the pipeline and entity recognizer, and train the new entity.
%22%22%22%0A
@@ -3363,18 +3363,22 @@
n range(
-50
+n_iter
):%0A
@@ -4853,16 +4853,16 @@
gold%0A%0A%0A
+
if __nam
@@ -4884,24 +4884,8 @@
_':%0A
- import plac%0A
|
edca360431f28be2d67f7193a9d53be0460169a1
|
fix test
|
biopandas/pdb/tests/test_read_pdb.py
|
biopandas/pdb/tests/test_read_pdb.py
|
# BioPandas
# Author: Sebastian Raschka <mail@sebastianraschka.com>
# License: BSD 3 clause
# Project Website: http://rasbt.github.io/biopandas/
# Code Repository: https://github.com/rasbt/biopandas
from biopandas.pdb import PandasPdb
import os
import numpy as np
import pandas as pd
from nose.tools import raises
from biopandas.testutils import assert_raises
try:
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
except ImportError:
from urllib2 import urlopen, HTTPError, URLError # Python 2.7 compatib
TESTDATA_FILENAME = os.path.join(os.path.dirname(__file__), 'data', '3eiy.pdb')
TESTDATA_FILENAME2 = os.path.join(os.path.dirname(__file__), 'data',
'4eiy_anisouchunk.pdb')
TESTDATA_FILENAME_GZ = os.path.join(os.path.dirname(__file__), 'data',
'3eiy.pdb.gz')
ATOM_DF_COLUMNS = ['record_name', 'atom_number', 'blank_1',
'atom_name', 'alt_loc', 'residue_name',
'blank_2', 'chain_id', 'residue_number',
'insertion', 'blank_3',
'x_coord', 'y_coord', 'z_coord',
'occupancy', 'b_factor', 'blank_4',
'segment_id', 'element_symbol',
'charge', 'line_idx']
ANISOU_DF_COLUMNS = ['record_name', 'atom_number', 'blank_1',
'atom_name', 'alt_loc', 'residue_name',
'blank_2', 'chain_id', 'residue_number',
'insertion', 'blank_3',
'U(1,1)', 'U(2,2)', 'U(3,3)',
'U(1,2)', 'U(1,3)', 'U(2,3)',
'blank_4', 'element_symbol',
'charge', 'line_idx']
with open(TESTDATA_FILENAME, 'r') as f:
three_eiy = f.read()
with open(TESTDATA_FILENAME2, 'r') as f:
four_eiy = f.read()
def test__read_pdb():
"""Test private _read_pdb"""
ppdb = PandasPdb()
path, txt = ppdb._read_pdb(TESTDATA_FILENAME)
print(txt)
assert txt == three_eiy
def test__read_pdb_raises():
"""Test private _read_pdb:
Test if ValueError is raised for wrong file formats."""
expect = ('Wrong file format; allowed file formats are '
'.pdb, .pdb.gz, .ent, .ent.gz')
def run_code_1():
PandasPdb()._read_pdb("protein.mol2")
assert_raises(ValueError,
expect,
run_code_1)
def run_code_2():
PandasPdb()._read_pdb("protein.mol2.gz")
assert_raises(ValueError,
expect,
run_code_2)
def test_fetch_pdb():
"""Test fetch_pdb"""
try:
ppdb = PandasPdb()
url, txt = ppdb._fetch_pdb('3eiy')
except HTTPError:
url, txt = None, None
except ConnectionResetError:
url, txt = None, None
if txt: # skip if PDB down
txt[:100] == three_eiy[:100]
ppdb.fetch_pdb('3eiy')
assert ppdb.pdb_text == txt
assert ppdb.pdb_path == 'https://files.rcsb.org/download/3eiy.pdb'
def test__read_pdb_gz():
"""Test public _read_pdb with gzip files"""
ppdb = PandasPdb()
path, txt = ppdb._read_pdb(TESTDATA_FILENAME_GZ)
assert txt == three_eiy
def test__construct_df():
"""Test pandas dataframe construction"""
ppdb = PandasPdb()
dfs = ppdb._construct_df(three_eiy.splitlines())
assert set(dfs.keys()) == {'OTHERS', 'ATOM', 'ANISOU', 'HETATM'}
assert set(dfs['ATOM'].columns) == set(ATOM_DF_COLUMNS)
assert set(dfs['HETATM'].columns) == set(ATOM_DF_COLUMNS)
assert set(dfs['ANISOU'].columns) == set(ANISOU_DF_COLUMNS)
exp = pd.Series(np.array(['ATOM', 1, '', 'N', '', 'SER',
'', 'A', 2, '', '', 2.527, 54.656, -1.667, 1.0,
52.73, '', '', 'N', None, 609]),
index=['record_name', 'atom_number', 'blank_1',
'atom_name', 'alt_loc', 'residue_name',
'blank_2', 'chain_id', 'residue_number',
'insertion', 'blank_3',
'x_coord', 'y_coord', 'z_coord',
'occupancy', 'b_factor', 'blank_4',
'segment_id', 'element_symbol',
'charge', 'line_idx'])
assert exp.equals(dfs['ATOM'].loc[0, :])
def test_read_pdb():
"""Test public read_pdb"""
ppdb = PandasPdb()
ppdb.read_pdb(TESTDATA_FILENAME)
assert ppdb.pdb_text == three_eiy
assert ppdb.code == '3eiy', ppdb.code
assert ppdb.pdb_path == TESTDATA_FILENAME
def test_read_pdb_from_list():
"""Test public read_pdb_from_list"""
for pdb_text, code in zip([three_eiy, four_eiy], ['3eiy', '4eiy']):
ppdb = PandasPdb()
ppdb.read_pdb_from_list(pdb_text.splitlines(True))
assert ppdb.pdb_text == pdb_text
assert ppdb.code == code
assert ppdb.pdb_path == ''
def test_anisou_input_handling():
"""Test public read_pdb"""
ppdb = PandasPdb()
ppdb.read_pdb(TESTDATA_FILENAME2)
assert ppdb.pdb_text == four_eiy
assert ppdb.code == '4eiy', ppdb.code
@raises(AttributeError)
def test_get_exceptions():
ppdb = PandasPdb()
ppdb.read_pdb(TESTDATA_FILENAME)
ppdb.get('main-chai')
def test_get_all():
ppdb = PandasPdb()
ppdb.read_pdb(TESTDATA_FILENAME)
for i in ['c-alpha', 'hydrogen', 'main chain']:
ppdb.get(i)
def test_get_df():
ppdb = PandasPdb()
ppdb.read_pdb(TESTDATA_FILENAME)
shape = ppdb.get('c-alpha').shape
assert shape == (174, 21), shape
shape = ppdb.get('hydrogen', invert=True, records=('ATOM',)).shape
assert shape == (1330, 21), shape
# deprecated use of string
shape = ppdb.get('hydrogen', invert=True, records='ATOM').shape
assert shape == (1330, 21), shape
shape = ppdb.get('hydrogen').shape
assert shape == (0, 21), shape
shape = ppdb.get('main chain', records=('ATOM',)).shape
assert shape == (696, 21), shape
shape = ppdb.get('heavy', records=('ATOM',)).shape
assert shape == (1330, 21), shape
shape = ppdb.get('carbon', records=('ATOM',)).shape
assert shape == (473, 21), shape
|
Python
| 0.000002
|
@@ -6181,11 +6181,11 @@
== (
-473
+857
, 21
|
214b1882a0eaf00bdd5dedbb02a28bba7f8d247b
|
update version to 1.1.7
|
cartoview/__init__.py
|
cartoview/__init__.py
|
__version__ = (1, 1, 5, 'alpha', 0)
|
Python
| 0
|
@@ -18,9 +18,9 @@
1,
-5
+7
, 'a
|
74a25caa15d0ab32d83355bc90cc415f5ff8cd1b
|
Remove unused imports.
|
exp/viroscopy/model/HIVEpidemicModelABC.py
|
exp/viroscopy/model/HIVEpidemicModelABC.py
|
"""
A script to estimate the HIV epidemic model parameters using ABC.
"""
from apgl.graph.SparseGraph import SparseGraph
from apgl.graph.GraphStatistics import GraphStatistics
from apgl.util import *
from exp.viroscopy.model.HIVGraph import HIVGraph
from exp.viroscopy.model.HIVABCParameters import HIVABCParameters
from exp.viroscopy.model.HIVEpidemicModel import HIVEpidemicModel
from exp.viroscopy.model.HIVRates import HIVRates
from exp.viroscopy.model.HIVModelUtils import HIVModelUtils
from exp.viroscopy.model.HIVGraphMetrics import HIVGraphMetrics2
from apgl.predictors.ABCSMC import ABCSMC
from apgl.util.ProfileUtils import ProfileUtils
import logging
import sys
import numpy
import multiprocessing
import scipy.stats
FORMAT = "%(levelname)s:root:%(process)d:%(message)s"
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=FORMAT)
numpy.set_printoptions(suppress=True, precision=4, linewidth=100)
numpy.seterr(invalid='raise')
#First try the experiment on some toy data
resultsDir = PathDefaults.getOutputDir() + "viroscopy/toy/"
graphFile = resultsDir + "ToyEpidemicGraph0"
targetGraph = HIVGraph.load(graphFile)
numTimeSteps = 10
T, recordStep, printStep, M = HIVModelUtils.defaultSimulationParams()
times = numpy.linspace(0, T, numTimeSteps)
graphMetrics = HIVGraphMetrics2(times)
realSummary = graphMetrics.summary(targetGraph)
epsilonArray = numpy.array([0.8, 0.6, 0.5])*numTimeSteps
def breakFunc(graph, currentTime):
return graphMetrics.shouldBreak(realSummary, graph, epsilonArray[0], currentTime)
def createModel(t):
"""
The parameter t is the particle index.
"""
undirected = True
T, recordStep, printStep, M = HIVModelUtils.defaultSimulationParams()
graph = HIVGraph(M, undirected)
alpha = 2
zeroVal = 0.9
p = Util.powerLawProbs(alpha, zeroVal)
hiddenDegSeq = Util.randomChoice(p, graph.getNumVertices())
rates = HIVRates(graph, hiddenDegSeq)
model = HIVEpidemicModel(graph, rates, T)
model.setRecordStep(recordStep)
model.setPrintStep(printStep)
model.setBreakFunction(breakFunc)
return model
if len(sys.argv) > 1:
numProcesses = int(sys.argv[1])
else:
numProcesses = multiprocessing.cpu_count()
posteriorSampleSize = 20
thetaLen = 10
logging.debug("Posterior sample size " + str(posteriorSampleSize))
meanTheta = HIVModelUtils.defaultTheta()
abcParams = HIVABCParameters(meanTheta, 0.5, 0.2)
abcSMC = ABCSMC(epsilonArray, realSummary, createModel, abcParams, graphMetrics)
abcSMC.setPosteriorSampleSize(posteriorSampleSize)
thetasArray = abcSMC.run()
meanTheta = numpy.mean(thetasArray, 0)
stdTheta = numpy.std(thetasArray, 0)
logging.debug(thetasArray)
logging.debug("meanTheta=" + str(meanTheta))
logging.debug("stdTheta=" + str(stdTheta))
logging.debug("realTheta=" + str(HIVModelUtils.defaultTheta()))
thetaFileName = resultsDir + "ThetaDistSimulated.pkl"
Util.savePickle(thetasArray, thetaFileName)
|
Python
| 0
|
@@ -71,109 +71,8 @@
%22%22%22%0A
-from apgl.graph.SparseGraph import SparseGraph%0Afrom apgl.graph.GraphStatistics import GraphStatistics
%0Afro
@@ -494,57 +494,8 @@
CSMC
-%0Afrom apgl.util.ProfileUtils import ProfileUtils
%0A%0Aim
@@ -557,28 +557,8 @@
sing
-%0Aimport scipy.stats
%0A%0AFO
|
eb5ab4abdc18f56ac21524225d5c1168ece35def
|
allow for category based link
|
generic/models.py
|
generic/models.py
|
from django.core.urlresolvers import reverse, Resolver404
from django.db import models
from preferences.models import Preferences
from snippetscream import resolve_to_name
class Link(models.Model):
title = models.CharField(
max_length=256,
help_text='A short descriptive title.',
)
view_name = models.CharField(
max_length=256,
help_text="View name to which this link will redirect. This takes \
precedence over url field below.",
blank=True,
null=True,
)
url = models.CharField(
max_length=256,
help_text='URL to which this menu link will redirect.',
blank=True,
null=True,
)
def get_absolute_url(self):
"""
Returns url to which link should redirect based on a reversed view name
or otherwise explicitly provided url.
"""
if self.view_name:
return reverse(self.view_name)
else:
return self.url
def is_active(self, request):
"""
Determines whether or not the link can be consider active based on the
request path. True if the request path can be resolved to the same view
name as is contained in view_name field. Otherwise True if request path
starts with url as contained in url field (needs some work).
"""
try:
pattern_name = resolve_to_name(request.path_info)
except Resolver404:
pattern_name = None
active = False
if pattern_name:
active = pattern_name == self.view_name
if not active and self.url:
active = request.path_info.startswith(self.url)
return active
def __unicode__(self):
return self.title
class MenuPreferences(Preferences):
__module__ = 'preferences.models'
links = models.ManyToManyField(Link, through='generic.MenuLinkPosition')
class NavbarPreferences(Preferences):
__module__ = 'preferences.models'
links = models.ManyToManyField(Link, through='generic.NavbarLinkPosition')
class LinkPosition(models.Model):
link = models.ForeignKey(Link)
position = models.IntegerField()
class Meta():
abstract = True
ordering = ('position',)
def __unicode__(self):
return "Link titled %s in position %s." % (self.link.title, \
self.position)
class MenuLinkPosition(LinkPosition):
preferences = models.ForeignKey(MenuPreferences)
class NavbarLinkPosition(LinkPosition):
preferences = models.ForeignKey(NavbarPreferences)
|
Python
| 0
|
@@ -453,19 +453,251 @@
ce over
-url
+Category and URL fields below.%22,%0A blank=True,%0A null=True,%0A )%0A category = models.ForeignKey(%0A 'category.Category',%0A help_text=%22Category to which this link will redirect. This takes %5C%0Aprecedence over URL
field b
@@ -971,19 +971,19 @@
Returns
-url
+URL
to whic
@@ -1047,20 +1047,19 @@
+categ
or
+y
o
-therwise
+r
exp
@@ -1075,19 +1075,47 @@
rovided
-url
+URL in that order of precedence
.%0A
@@ -1190,16 +1190,96 @@
w_name)%0A
+ elif self.category:%0A return self.category.get_absolute_url()%0A
@@ -1618,19 +1618,128 @@
ts with
-url
+URL as resolved for category contained in category field.%0A Otherwise True if request path starts with URL
as cont
@@ -1750,16 +1750,24 @@
d in url
+%0A
field (
@@ -1976,24 +1976,24 @@
ttern_name:%0A
-
@@ -2028,24 +2028,149 @@
f.view_name%0A
+ if not active and self.category:%0A active = request.path_info.startswith(self.category.get_absolute_url())%0A
if n
|
696521f33f397b6fa31183da182b8b41fb8d7808
|
Move colon match to @bot only
|
slackbot/dispatcher.py
|
slackbot/dispatcher.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import re
import time
import traceback
from functools import wraps
import six
from slackbot.manager import PluginsManager
from slackbot.utils import WorkerPool
from slackbot import settings
logger = logging.getLogger(__name__)
class MessageDispatcher(object):
def __init__(self, slackclient, plugins):
self._client = slackclient
self._pool = WorkerPool(self.dispatch_msg)
self._plugins = plugins
alias_regex = ''
if getattr(settings, 'ALIASES', None):
logger.info('using aliases %s', settings.ALIASES)
alias_regex = '|(?P<alias>{})'.format('|'.join([re.escape(s) for s in settings.ALIASES.split(',')]))
self.AT_MESSAGE_MATCHER = re.compile(r'^(?:\<@(?P<atuser>\w+)\>{}):? ?(?P<text>.*)$'.format(alias_regex))
def start(self):
self._pool.start()
def dispatch_msg(self, msg):
category = msg[0]
msg = msg[1]
text = msg['text']
responded = False
for func, args in self._plugins.get_plugins(category, text):
if func:
responded = True
try:
func(Message(self._client, msg), *args)
except:
logger.exception('failed to handle message %s with plugin "%s"', text, func.__name__)
reply = u'[{}] I have problem when handling "{}"\n'.format(func.__name__, text)
reply += u'```\n{}\n```'.format(traceback.format_exc())
self._client.rtm_send_message(msg['channel'], reply)
if not responded and category == u'respond_to':
self._default_reply(msg)
def _on_new_message(self, msg):
# ignore edits
subtype = msg.get('subtype', '')
if subtype == u'message_changed':
return
botname = self._client.login_data['self']['name']
try:
msguser = self._client.users.get(msg['user'])
username = msguser['name']
except (KeyError, TypeError):
if 'username' in msg:
username = msg['username']
else:
return
if username == botname or username == u'slackbot':
return
msg_respond_to = self.filter_text(msg)
if msg_respond_to:
self._pool.add_task(('respond_to', msg_respond_to))
else:
self._pool.add_task(('listen_to', msg))
def _get_bot_id(self):
return self._client.login_data['self']['id']
def filter_text(self, msg):
full_text = msg.get('text', '')
channel = msg['channel']
bot_name = self._get_bot_id()
m = self.AT_MESSAGE_MATCHER.match(full_text)
if channel[0] == 'C' or channel[0] == 'G':
if not m:
return
matches = m.groupdict()
atuser = matches.get('atuser', None)
text = matches.get('text', None)
alias = matches.get('alias', None)
if alias:
atuser = bot_name
if atuser != bot_name:
# a channel message at other user
return
logger.debug('got an AT message: %s', text)
msg['text'] = text
else:
if m:
msg['text'] = m.groupdict().get('text', None)
return msg
def loop(self):
while True:
events = self._client.rtm_read()
for event in events:
if event.get('type') != 'message':
continue
self._on_new_message(event)
time.sleep(1)
def _default_reply(self, msg):
try:
from slackbot_settings import default_reply
except ImportError:
default_reply = [
u'Bad command "{}", You can ask me one of the following questions:\n'.format(msg['text']),
]
default_reply += [u' • `{0}` {1}'.format(p.pattern, v.__doc__ or "")
for p, v in six.iteritems(self._plugins.commands['respond_to'])]
# pylint: disable=redefined-variable-type
default_reply = u'\n'.join(default_reply)
m = Message(self._client, msg)
m.reply(default_reply)
def unicode_compact(func):
"""
Make sure the first parameter of the decorated method to be a unicode
object.
"""
@wraps(func)
def wrapped(self, text, *a, **kw):
if not isinstance(text, six.text_type):
text = text.decode('utf-8')
return func(self, text, *a, **kw)
return wrapped
class Message(object):
def __init__(self, slackclient, body):
self._client = slackclient
self._body = body
self._plugins = PluginsManager()
def _get_user_id(self):
if 'user' in self._body:
return self._body['user']
return self._client.find_user_by_name(self._body['username'])
@unicode_compact
def _gen_at_message(self, text):
text = u'<@{}>: {}'.format(self._get_user_id(), text)
return text
@unicode_compact
def gen_reply(self, text):
chan = self._body['channel']
if chan.startswith('C') or chan.startswith('G'):
return self._gen_at_message(text)
else:
return text
@unicode_compact
def reply_webapi(self, text):
"""
Send a reply to the sender using Web API
(This function supports formatted message
when using a bot integration)
"""
text = self.gen_reply(text)
self.send_webapi(text)
@unicode_compact
def send_webapi(self, text, attachments=None):
"""
Send a reply using Web API
(This function supports formatted message
when using a bot integration)
"""
self._client.send_message(
self._body['channel'],
text,
attachments=attachments)
@unicode_compact
def reply(self, text):
"""
Send a reply to the sender using RTM API
(This function doesn't supports formatted message
when using a bot integration)
"""
text = self.gen_reply(text)
self.send(text)
@unicode_compact
def send(self, text):
"""
Send a reply using RTM API
(This function doesn't supports formatted message
when using a bot integration)
"""
self._client.rtm_send_message(self._body['channel'], text)
def react(self, emojiname):
"""
React to a message using the web api
"""
self._client.react_to_message(
emojiname=emojiname,
channel=self._body['channel'],
timestamp=self._body['ts'])
@property
def channel(self):
return self._client.get_channel(self._body['channel'])
@property
def body(self):
return self._body
def docs_reply(self):
reply = [u' • `{0}` {1}'.format(v.__name__, v.__doc__ or '')
for _, v in six.iteritems(self._plugins.commands['respond_to'])]
return u'\n'.join(reply)
|
Python
| 0
|
@@ -824,13 +824,13 @@
+)%5C%3E
+:?
%7B%7D)
-:?
?(?
|
455e1fe93b612c7049059cf217652862c995fe97
|
Replace dict(<list_comprehension>) pattern with dict comprehension
|
import_export/instance_loaders.py
|
import_export/instance_loaders.py
|
from __future__ import unicode_literals
class BaseInstanceLoader(object):
"""
Base abstract implementation of instance loader.
"""
def __init__(self, resource, dataset=None):
self.resource = resource
self.dataset = dataset
def get_instance(self, row):
raise NotImplementedError
class ModelInstanceLoader(BaseInstanceLoader):
"""
Instance loader for Django model.
Lookup for model instance by ``import_id_fields``.
"""
def get_queryset(self):
return self.resource._meta.model.objects.all()
def get_instance(self, row):
try:
params = {}
for key in self.resource.get_import_id_fields():
field = self.resource.fields[key]
params[field.attribute] = field.clean(row)
return self.get_queryset().get(**params)
except self.resource._meta.model.DoesNotExist:
return None
class CachedInstanceLoader(ModelInstanceLoader):
"""
Loads all possible model instances in dataset avoid hitting database for
every ``get_instance`` call.
This instance loader work only when there is one ``import_id_fields``
field.
"""
def __init__(self, *args, **kwargs):
super(CachedInstanceLoader, self).__init__(*args, **kwargs)
pk_field_name = self.resource.get_import_id_fields()[0]
self.pk_field = self.resource.fields[pk_field_name]
ids = [self.pk_field.clean(row) for row in self.dataset.dict]
qs = self.get_queryset().filter(**{
"%s__in" % self.pk_field.attribute: ids
})
self.all_instances = dict([
(self.pk_field.get_value(instance), instance)
for instance in qs])
def get_instance(self, row):
return self.all_instances.get(self.pk_field.clean(row))
|
Python
| 0.000003
|
@@ -1649,14 +1649,9 @@
s =
-dict(%5B
+%7B
%0A
@@ -1659,17 +1659,16 @@
-(
self.pk_
@@ -1692,17 +1692,17 @@
nstance)
-,
+:
instanc
@@ -1702,17 +1702,16 @@
instance
-)
%0A
@@ -1737,10 +1737,18 @@
n qs
-%5D)
+%0A %7D
%0A%0A
|
ac8c1b6849c490c776636e3771e80344e6b0fb2e
|
Update github3.search.user for consistency
|
github3/search/user.py
|
github3/search/user.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .. import users
from ..models import GitHubCore
class UserSearchResult(GitHubCore):
def _update_attributes(self, data):
result = data.copy()
#: Score of the result
self.score = self._get_attribute(result, 'score')
if 'score' in result:
del result['score']
#: Text matches
self.text_matches = self._get_attribute(result, 'text_matches', [])
if 'text_matches' in result:
del result['text_matches']
#: User object matching the search
self.user = users.ShortUser(result, self)
def _repr(self):
return '<UserSearchResult [{0}]>'.format(self.user)
|
Python
| 0
|
@@ -157,86 +157,155 @@
-def _update_attributes(self, data):%0A result = data.copy()%0A%0A #: S
+%22%22%22Representation of a search result for a user.%0A%0A This object has the following attributes:%0A%0A .. attribute:: score%0A%0A The confidence s
core
@@ -314,74 +314,52 @@
f th
-e
+is
result
+.%0A
%0A
- self.score = self._get_attribute(result, 'score')
+.. attribute:: text_matches%0A
%0A
@@ -367,212 +367,288 @@
-if 'score' in result:%0A del result%5B'score'%5D%0A%0A #: Text matches%0A self.text_matches = self._get_attribute(result, 'text_matches', %5B%5D)%0A if 'text_matches' in result:%0A
+If present, a list of text strings that match the search string.%0A%0A .. attribute:: user%0A%0A A :class:%60~github3.users.ShortUser%60 representing the user found%0A in this search result.%0A %22%22%22%0A%0A def _update_attributes(self, data):%0A result = data.copy()%0A
del
@@ -647,78 +647,99 @@
-del result%5B'text_matches'%5D%0A%0A #: User object matching the search
+self.score = result.pop('score')%0A self.text_matches = result.pop('text_matches', %5B%5D)
%0A
|
b15f401fe270b69e46fb3009c4d55c917736fb27
|
Bump version
|
guild/__init__.py
|
guild/__init__.py
|
# Copyright 2017 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import os
import subprocess
__version__ = "0.3.0.dev7"
__requires__ = [
# (<required module>, <distutils package req>)
("pip", "pip"),
("yaml", "PyYAML"),
("setuptools", "setuptools"),
("tabview", "tabview"),
("twine", "twine"),
("werkzeug", "Werkzeug"),
("whoosh", "Whoosh"),
]
__pkgdir__ = os.path.dirname(os.path.dirname(__file__))
def _try_init_git_attrs():
try:
_init_git_commit()
except (OSError, subprocess.CalledProcessError):
pass
else:
try:
_init_git_status()
except (OSError, subprocess.CalledProcessError):
pass
def _init_git_commit():
commit = _git_cmd("git -C \"%(repo)s\" log -1 --oneline | cut -d' ' -f1")
globals()["__git_commit__"] = commit
def _init_git_status():
raw = _git_cmd("git -C \"%(repo)s\" status -s")
globals()["__git_status__"] = raw.split("\n") if raw else []
def _git_cmd(cmd, **kw):
repo = os.path.dirname(__file__)
cmd = cmd % dict(repo=repo, **kw)
null = open(os.devnull, "w")
out = subprocess.check_output(cmd, stderr=null, shell=True)
return out.decode("utf-8").strip()
def version():
git_commit = globals().get("__git_commit__")
if git_commit:
git_status = globals().get("__git_status__", [])
workspace_changed_marker = "*" if git_status else ""
return "%s (dev %s%s)" % (__version__, git_commit,
workspace_changed_marker)
else:
return __version__
_try_init_git_attrs()
|
Python
| 0
|
@@ -694,17 +694,17 @@
.3.0.dev
-7
+8
%22%0A%0A__req
|
156817ee4e11c6a363511d915a9ea5cf96e41fb5
|
add statistics tracking
|
benchbuild/experiments/mse.py
|
benchbuild/experiments/mse.py
|
"""
Test Maximal Static Expansion.
This tests the maximal static expansion implementation by
Nicholas Bonfante (implemented in LLVM/Polly).
"""
from benchbuild.experiment import RuntimeExperiment
from benchbuild.extensions import RunWithTime, RuntimeExtension
from benchbuild.settings import CFG
class PollyMSE(RuntimeExperiment):
"""The polly experiment."""
NAME = "polly-mse"
def actions_for_project(self, project):
"""Compile & Run the experiment with -O3 enabled."""
project.cflags = [
"-O3",
"-fno-omit-frame-pointer",
"-mllvm", "-stats",
"-mllvm", "-polly",
"-mllvm", "-polly-enable-mse",
"-mllvm", "-polly-process-unprofitable",
"-mllvm", "-polly-optree-analyze-known=0",
"-mllvm", "-polly-enable-delicm=0",
]
project.runtime_extension = \
RunWithTime(
RuntimeExtension(project, self,
{'jobs': int(CFG["jobs"].value())}))
return self.default_runtime_actions(project)
|
Python
| 0.000005
|
@@ -138,16 +138,70 @@
y).%0A%22%22%22%0A
+from benchbuild.extensions import ExtractCompileStats%0A
from ben
@@ -632,40 +632,8 @@
r%22,%0A
- %22-mllvm%22, %22-stats%22,%0A
@@ -869,16 +869,88 @@
%5D%0A
+ project.compiler_extension = ExtractCompileStats(project, self)%0A
|
8a178f1249b968e315b8492ed15c033aca119033
|
Reset closed site property
|
bluebottle/clients/tests/test_api.py
|
bluebottle/clients/tests/test_api.py
|
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from rest_framework import status
from bluebottle.clients import properties
from bluebottle.test.utils import BluebottleTestCase
class ClientSettingsTestCase(BluebottleTestCase):
def setUp(self):
super(ClientSettingsTestCase, self).setUp()
self.settings_url = reverse('settings')
@override_settings(CLOSED_SITE=False, TOP_SECRET="*****",EXPOSED_TENANT_PROPERTIES=['closed_site'])
def test_settings_show(self):
# Check that exposed property is in settings api, and other settings are not shown
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['closedSite'], False)
self.assertNotIn('topSecret', response.data)
# Check that exposed setting gets overwritten by client property
setattr(properties, 'CLOSED_SITE', True)
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['closedSite'], True)
# Check that previously hidden setting can be exposed
setattr(properties, 'EXPOSED_TENANT_PROPERTIES', ['closed_site', 'top_secret'])
response = self.client.get(self.settings_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('topSecret', response.data)
|
Python
| 0
|
@@ -1462,28 +1462,79 @@
'topSecret', response.data)%0A
+%0A setattr(properties, 'CLOSED_SITE', False)%0A
|
e469163c5b103483b294f9c1f37c918177e7dbce
|
Make prsync.py really useful
|
admin/prsync.py
|
admin/prsync.py
|
#!/usr/bin/env python
import os
import sys
import subprocess
import urlparse
import pygithub3
try:
import angr
angr_dir = os.path.realpath(os.path.join(os.path.dirname(angr.__file__), '../..'))
except ImportError:
print 'Please run this script in the angr virtualenv!'
sys.exit(1)
def main(branch_name=None):
print 'Enter the urls of the pull requests, separated by newlines. EOF to finish:'
urls = sys.stdin.read().strip().split('\n')
if len(urls) == 0:
sys.exit(0)
prs = []
gh = pygithub3.Github()
for url in urls:
try:
path = urlparse.urlparse(url).path
pathkeys = path.split('/')
prs.append(gh.pull_requests.get(int(pathkeys[4]), pathkeys[1], pathkeys[2]))
assert pathkeys[3] == 'pull'
except Exception: # pylint: disable=broad-except
print url, 'is not a github pull request url'
import ipdb; ipdb.set_trace()
sys.exit(1)
if branch_name is None:
branch_name = 'pr/' + prs[0].head['label'].replace(':','/')
for pr in prs:
repo_path = os.path.join(angr_dir, pr.base['repo']['name'])
print '\x1b[32;1m$', 'git', 'checkout', '-B', branch_name, 'master', '\x1b[0m'
subprocess.call(['git', 'checkout', '-B', branch_name, 'master'], cwd=repo_path)
print '\x1b[32;1m$', 'git', 'pull', pr.head['repo']['git_url'], pr.head['ref'], '\x1b[0m'
subprocess.call(['git', 'pull', pr.head['repo']['git_url'], pr.head['ref']], cwd=repo_path)
print '\x1b[32;1m$', 'git', 'push', '-f', '-u', 'origin', branch_name, '\x1b[0m'
subprocess.call(['git', 'push', '-f', '-u', 'origin', branch_name], cwd=repo_path)
if __name__ == '__main__':
if len(sys.argv) > 1:
main(sys.argv[1])
else:
main()
|
Python
| 0.000006
|
@@ -319,16 +319,31 @@
ame=None
+, do_push=False
):%0A p
@@ -1049,12 +1049,18 @@
'pr/
-' +
+%25s-%25d' %25 (
prs%5B
@@ -1092,16 +1092,32 @@
':','/')
+, prs%5B0%5D.number)
%0A%0A fo
@@ -1563,32 +1563,56 @@
cwd=repo_path)%0A
+ if do_push:%0A
print '%5C
@@ -1676,32 +1676,36 @@
name, '%5Cx1b%5B0m'%0A
+
subproce
@@ -1784,34 +1784,551 @@
h)%0A%0A
-if __name__ == '__main__':
+ repolist = ' '.join(pr.base%5B'repo'%5D%5B'name'%5D for pr in prs)%0A%0A print%0A print '%5Cx1b%5B33;1mTo merge this pull request, run the following commands:%5Cx1b%5B0m'%0A print 'REPOS=%25s ./git_all.sh checkout master' %25 repolist%0A print 'REPOS=%25s ./git_all.sh merge %25s' %25 (repolist, branch_name)%0A print 'REPOS=%25s ./git_all.sh push' %25 repolist%0A print 'REPOS=%25s ./git_all.sh branch -D %25s' %25 (repolist, branch_name)%0A%0A%0Aif __name__ == '__main__':%0A s_do_push = False%0A if '-p' in sys.argv:%0A s_do_push = True%0A sys.argv.remove('-n')%0A
%0A
@@ -2374,16 +2374,35 @@
.argv%5B1%5D
+, do_push=s_do_push
)%0A el
@@ -2418,10 +2418,27 @@
main(
+do_push=s_do_push
)%0A
|
9f32fee9da5ccffec9a86f62ab9a55625eb65ff7
|
Fix menu user input
|
admin/wakeup.py
|
admin/wakeup.py
|
#!/bin/env python3
from pathlib import Path
import subprocess
def main():
main.path = Path(".config/wol.cfg")
main.path = main.path.home().joinpath(main.path)
# iterate wake on lan list, wollist
menu = generate_menulist(main.path)
if display_menu(menu):
hostname, hwadress = menu[main.user_choice]
subprocess.run(["wol", hwadress])
def display_menu(menu):
for i, item in enumerate(menu):
print("{} - {}".format((i+1),item))
try:
choice = input("Your choice: ")
main.user_choice = int(choice) - 1
except KeyboardInterrupt:
print()
return False
if check_in_bounds(main.user_choice, menu):
return True
else:
print("{:-^80}".format("Invalid choice"))
display_menu(menu)
def check_in_bounds(choice, l):
length = len(l)
if choice < length and choice >= 0:
return True
else:
return False
def generate_menulist(path):
menu = list()
with path.open() as wollist:
for record in wollist:
menu.append(tuple(record.strip().split(" ")))
return menu
def usage():
pass
if __name__ == "__main__":
main()
|
Python
| 0.002013
|
@@ -59,43 +59,86 @@
ess%0A
-%0Adef main():%0A main.
+import logging%0Aimport sys%0Alogger = logging.getLogger(__name__)%0A%0Awol_
path =
-Path(%22
+%22~/
.con
@@ -153,61 +153,130 @@
cfg%22
-)%0A main.path = main.path.home().joinpath(main.path
+%0A# TODO query list from database when available%0A%0Adef main():%0A global wol_path%0A wol_path = Path(wol_path).expanduser(
)%0A
@@ -342,21 +342,20 @@
enulist(
-main.
+wol_
path)%0A
@@ -360,138 +360,53 @@
-if display_menu(menu):%0A hostname, hwadress = menu%5Bmain.user_choice%5D%0A subprocess.run(%5B%22wol%22, hwadress%5D)%0A%0A
+while True:%0A user_choice = %22%22%0A
%0Adef
@@ -401,21 +401,16 @@
-%0Adef
display_
@@ -423,107 +423,78 @@
enu)
-:
%0A
-for i, item in enumerate(menu):%0A print(%22%7B%7D - %7B%7D%22.format((i+1),item)
+ try:%0A choice = input(%22Your choice: %22
)%0A
-try:%0A
choi
@@ -481,32 +481,37 @@
%22)%0A
+user_
choice = input(%22
@@ -505,36 +505,29 @@
ice = in
-put(%22Your
+t(
choice
-: %22)
+) - 1
%0A
@@ -531,43 +531,158 @@
-main.user_choice = int(choice) - 1%0A
+ if check_in_bounds(user_choice, menu):%0A break%0A else:%0A logger.info(%22Choose a number from the menu.%22)%0A%0A
@@ -688,16 +688,17 @@
except
+(
Keyboard
@@ -710,183 +710,266 @@
rupt
-:%0A print()%0A return False%0A if check_in_bounds(main.user_choice, menu):%0A return True%0A else:%0A print(%22%7B:-%5E80%7D%22.format(%22Invalid choice%22))%0A
+, EOFError):%0A logger.error(%22%5Cnbye%22)%0A sys.exit(0)%0A except (ValueError, TypeError):%0A logger.error(%22Input is not a number.%22)%0A%0A%0A hostname, hwadress = menu%5Buser_choice%5D%0A subprocess.run(%5B%22wol%22, hwadress%5D)%0A%0A
disp
@@ -956,32 +956,37 @@
%5D)%0A%0A
+%0Adef
display_menu(men
@@ -987,21 +987,97 @@
nu(menu)
+:
%0A
+for i, item in enumerate(menu):%0A print(%22%7B%7D - %7B%7D%22.format((i+1),item))
%0A%0Adef ch
|
3a3a8217bd5ff63c77eb4d386bda042cfd7a1196
|
delete the depency to sale_order_extend
|
sale_order_report/__openerp__.py
|
sale_order_report/__openerp__.py
|
# -*- coding: utf-8 -*-
# © 2016 ClearCorp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Sale Order Report',
'summary': 'Sale order report in Qweb',
'version': '8.0.1.0',
'category': 'Sales',
'website': 'http://clearcorp.cr',
'author': 'ClearCorp',
'license': 'AGPL-3',
'sequence': 10,
'application': False,
'installable': True,
'auto_install': False,
"depends": [
'sale_order_discount',
'sale_order_extended',
'base_reporting',
],
"data": [
'data/report.paperformat.xml',
'data/sale_report.xml',
'views/report_sale_order.xml',
'views/report_sale_order_layout.xml',
'views/report_sale_order_layout_header.xml',
'views/report_sale_order_layout_footer.xml',
],
}
|
Python
| 0.000001
|
@@ -475,39 +475,8 @@
t',%0A
- 'sale_order_extended',%0A
|
7700e8b9a5a62fce875156482170c4fbc4cae902
|
Update shortcut template
|
swjblog/polls/views.py
|
swjblog/polls/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
# Create your views here.
from .models import Question
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
template = loader.get_template('polls/index.html')
context = {
'latest_question_list': latest_question_list,
}
return HttpResponse(template.render(context, request))
def detail(request, question_id):
return HttpResponse("You're looking at question %s." % question_id)
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id)
|
Python
| 0.000001
|
@@ -1,40 +1,4 @@
-from django.shortcuts import render%0A
from
@@ -65,16 +65,71 @@
loader%0A
+from django.shortcuts import get_object_or_404, render%0A
%0A%0A# Crea
@@ -179,16 +179,18 @@
stion%0A%0A%0A
+#
def inde
@@ -201,16 +201,18 @@
quest):%0A
+#
late
@@ -273,20 +273,22 @@
e')%5B:5%5D%0A
+#
+
template
@@ -330,16 +330,18 @@
.html')%0A
+#
cont
@@ -348,16 +348,18 @@
ext = %7B%0A
+#
@@ -408,14 +408,18 @@
st,%0A
+#
%7D%0A
+#
@@ -478,112 +478,387 @@
))%0A%0A
-def detail(request, question_id):%0A return HttpResponse(%22You're looking at question %25s.%22 %25 question_id
+# shortcut%0Adef index(request):%0A latest_question_list = Question.objects.order_by('-pub_date')%5B:5%5D%0A context = %7B'latest_question_list': latest_question_list%7D%0A return render(request, 'polls/index.html', context)%0A%0Adef detail(request, question_id):%0A%09question = get_object_or_404(Question, pk=question_id)%0A return render(request, 'polls/detail.html', %7B'question': question%7D
)%0A%0Ad
|
0e780569b8d40f3b9599df4f7d4a457f23b3f54f
|
Make uploader work
|
stoneridge_uploader.py
|
stoneridge_uploader.py
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import glob
import os
import human_curl as requests
import stoneridge
class StoneRidgeUploader(object):
"""Takes the upload files created by the collator and uploads them to the
graph server
"""
def __init__(self):
self.url = stoneridge.get_config('upload', 'url')
def run(self):
file_pattern = os.path.join(stoneridge.outdir, 'upload_*.json')
upload_files = glob.glob(file_pattern)
for upload in upload_files:
fname = os.path.basename(upload)
with file(upload, 'rb') as f:
requests.post(self.url, files=((fname, f),))
@stoneridge.main
def main():
parser = stoneridge.ArgumentParser()
args = parser.parse_args()
uploader = StoneRidgeUploader()
uploader.run()
|
Python
| 0.000035
|
@@ -248,22 +248,8 @@
ort
-human_curl as
requ
@@ -644,55 +644,16 @@
f
-or upload in upload_files:%0A fname
+iles
=
+%7B
os.p
@@ -669,117 +669,176 @@
ame(
-upload)%0A with file(upload, 'rb') as f:%0A requests.post(self.url, files=((fname, f),)
+fname): open(fname, 'rb')%0A for fname in upload_files%7D%0A requests.post(self.url, files=files)%0A for f in files.values():%0A f.close(
)%0A%0A@
|
00216fef47b24c7c4d371cb350db7305d85e7d7b
|
fix link to numpy func
|
cupy/random/__init__.py
|
cupy/random/__init__.py
|
import numpy as _numpy
def bytes(length):
"""Returns random bytes.
.. note:: This function is just a wrapper for :meth:`numpy.random.bytes`.
The resulting bytes are generated on the host (NumPy), not GPU.
.. seealso:: :meth:`numpy.random.bytes
<numpy.random.mtrand.RandomState.bytes>`
"""
# TODO(kmaehashi): should it be provided in CuPy?
return _numpy.random.bytes(length)
# import class and function
from cupy.random._distributions import beta # NOQA
from cupy.random._distributions import binomial # NOQA
from cupy.random._distributions import chisquare # NOQA
from cupy.random._distributions import dirichlet # NOQA
from cupy.random._distributions import exponential # NOQA
from cupy.random._distributions import f # NOQA
from cupy.random._distributions import gamma # NOQA
from cupy.random._distributions import geometric # NOQA
from cupy.random._distributions import gumbel # NOQA
from cupy.random._distributions import hypergeometric # NOQA
from cupy.random._distributions import laplace # NOQA
from cupy.random._distributions import logistic # NOQA
from cupy.random._distributions import lognormal # NOQA
from cupy.random._distributions import logseries # NOQA
from cupy.random._distributions import multivariate_normal # NOQA
from cupy.random._distributions import negative_binomial # NOQA
from cupy.random._distributions import noncentral_chisquare # NOQA
from cupy.random._distributions import noncentral_f # NOQA
from cupy.random._distributions import normal # NOQA
from cupy.random._distributions import pareto # NOQA
from cupy.random._distributions import poisson # NOQA
from cupy.random._distributions import power # NOQA
from cupy.random._distributions import rayleigh # NOQA
from cupy.random._distributions import standard_cauchy # NOQA
from cupy.random._distributions import standard_exponential # NOQA
from cupy.random._distributions import standard_gamma # NOQA
from cupy.random._distributions import standard_normal # NOQA
from cupy.random._distributions import standard_t # NOQA
from cupy.random._distributions import triangular # NOQA
from cupy.random._distributions import uniform # NOQA
from cupy.random._distributions import vonmises # NOQA
from cupy.random._distributions import wald # NOQA
from cupy.random._distributions import weibull # NOQA
from cupy.random._distributions import zipf # NOQA
from cupy.random._generator import get_random_state # NOQA
from cupy.random._generator import RandomState # NOQA
from cupy.random._generator import reset_states # NOQA
from cupy.random._generator import seed # NOQA
from cupy.random._generator import set_random_state # NOQA
from cupy.random._permutations import permutation # NOQA
from cupy.random._permutations import shuffle # NOQA
from cupy.random._sample import choice # NOQA
from cupy.random._sample import multinomial # NOQA
from cupy.random._sample import rand # NOQA
from cupy.random._sample import randint # NOQA
from cupy.random._sample import randn # NOQA
from cupy.random._sample import random_integers # NOQA
from cupy.random._sample import random_sample # NOQA
from cupy.random._sample import random_sample as random # NOQA
from cupy.random._sample import random_sample as ranf # NOQA
from cupy.random._sample import random_sample as sample # NOQA
|
Python
| 0
|
@@ -118,20 +118,19 @@
er for :
-meth
+obj
:%60numpy.
|
3e784305acba4cc067d6f639032cacd97e1e3496
|
Update N0bot.py
|
snapchat_bots/N0bot.py
|
snapchat_bots/N0bot.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
n0Bot - A bot for me, my friends and the world
Copyright (C) 2014 N07070
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Import the classes
from snapchat_bots import SnapchatBot, Snap
from argparse import ArgumentParser
import os, sys, time
sys.path.insert(0, '/media/HDD/Code/Python/SnapchatBot/snapchat_bots')
from utils import guess_type
credit = """\n
|---------------------------------------------------------|
| * FbStoryBot V3* |
| by N07070 |
|---------------------------------------------------------|"""
class n0bot(SnapchatBot):
# Say hi if the user added me
def on_friend_add(self, friend):
self.log(+str(friend)+" has added me !\n")
self.send_snap(friend, Snap.from_file('resources/auto_welcome.png'))
self.log("\nI have "+str(len(bot.get_friends()))+" friends !\n")
# Say goodbye if the user deleted me
def on_friend_delete(self, friend):
self.log(str(friend)+" does not want me anymore...\n")
self.send_snap(friend, Snap.from_file('resources/adieu.png'))
self.delete_friend(friend)
self.log("I have "+str(len(bot.get_friends()))+" friends !\n")
# To add the snap to the story, and watermark it with the username
def on_snap(self, sender, snap):
# Sauvegarde temporaire du snap
if snap.media_type == 0:
snap.save("temporaire.jpg")
self.log("Saved the snap.")
elif snap.media_type == 1 or snap.media_type == 2:
snap.save("temporaire.mp4")
self.log("Saved the snap.")
else:
self.log("I do not know which type of snap I got !")
# If the snap is an image
if snap.media_type == 0:
os.system('convert temporaire.jpg -fill \'#FFF5EE\' -pointsize 20 -annotate +0+20 "'+str(sender)+'" image.jpg')
try:
self.post_story(Snap.from_file('image.jpg'))
self.log("Posted the snap in the story !")
try:
if sender != args.username:
self.send_snap(sender, Snap.from_file('resources/recu.png'))
self.log("Validated the reception !")
except:
pass
except:
self.send_snap(sender, Snap.from_file('resources/erreur.png'))
self.log("Error while trying to post the snap !")
pass
# os.system('rm image.jpg temporaire.jpg')
# If the snap is a video
elif snap.media_type == 1 or snap.media_type == 2 and os.path.getsize("temporaire") != 0:
os.system('ffmpeg -i temporaire.mp4 -vf drawtext="fontfile=resources/Arial.ttf: text='+str(sender)+':fontcolor=dimgray@1.0:fontsize=00:x=00: y=00" -y output.mp4')
try:
self.post_story(Snap.from_file('output.mp4'))
self.log("Posted the snap in the story !")
except:
self.send_snap(sender, Snap.from_file('resources/erreur.png'))
self.log("Error while trying to post the snap !")
pass
os.system('rm output.mp4 temporaire.mp4')
try:
if sender != args.username:
self.send_snap(sender, Snap.from_file('resources/recu.png'))
self.log("Validated the reception !")
except:
pass
# En cas d'erreur
else:
self.send_snap(sender, Snap.from_file('resources/erreur.png'))
self.log("Error while trying to post the snap !")
# To message someone or the message all the users
def message(self, text, friend):
os.system('convert -size 1080x1920 -background "#2C3539" -gravity Center -fill grey -pointsize 80 label:"'+str(text)+'" image.jpg')
self.send_snap(friend, Snap.from_file('image.jpg'))
os.system('rm image.jpg')
self.log('Sent the message : ' + str(text) + ' ')
# To delete all the stories
def clean_story(self):
self.clear_stories()
self.log("Deleted the story !")
self.post_story(Snap.from_file('resources/deleted_story.png'))
if __name__ == '__main__':
parser = ArgumentParser('n0bot')
parser.add_argument('-u', '--username', required=True, type=str,help='Username of the account to run the bot on')
parser.add_argument('-p', '--password', required=True, type=str,help='Password of the account to run the bot on')
parser.add_argument('-msg', '--message', required=False, type=str,help='Text you want to send as a message')
parser.add_argument('-user', '--recipient', required=False, type=str, help='The user you want to the send the message to; needs to existe')
parser.add_argument('-d','--delete',required=False, help='If you want to delete the story of the bot')
args = parser.parse_args()
bot = n0bott(args.username, args.password)
#Lister tout les utilisateurs puis le nombre d'utilisateurs.
bot.log(credit)
bot.log("I have "+str(len(bot.get_friends()))+" friends !")
if args.message and args.recipient:
if args.recipient == "all" or args.recipient == "All":
friends_list = bot.get_friends()
for friends in friends_list:
print(friends)
time.sleep(0.1)
bot.message(args.message, friends)
else:
bot.message(args.message,args.recipient)
else:
bot.log("You need to provide a valid message and username to send a message.")
if args.delete:
bot.clean_story()
bot.listen()
|
Python
| 0
|
@@ -1096,22 +1096,22 @@
*
-FbStoryBot V3*
+n0bot V3.1 *
|
5b1790664ad5268a1d1764b81d1fa7e8fea5aabe
|
Bump version number.
|
stormtracks/version.py
|
stormtracks/version.py
|
VERSION = (0, 5, 0, 5, 'alpha')
def get_version(form='short'):
if form == 'short':
return '.'.join([str(v) for v in VERSION[:4]])
elif form == 'long':
return '.'.join([str(v) for v in VERSION][:4]) + '-' + VERSION[4]
else:
raise ValueError('unrecognised form specifier: {0}'.format(form))
__version__ = get_version()
if __name__ == '__main__':
print(get_version())
|
Python
| 0
|
@@ -13,17 +13,17 @@
, 5, 0,
-5
+6
, 'alpha
|
f39720535c8b5814d01536623fc75f14cb84673d
|
Update comments
|
src/pycrunchbase/pycrunchbase.py
|
src/pycrunchbase/pycrunchbase.py
|
import requests
import six
from .resource import (
Acquisition,
FundingRound,
Organization,
Page,
Person,
Product,
)
@six.python_2_unicode_compatible
class CrunchBase(object):
"""Class that manages talking to CrunchBase API"""
BASE_URL = 'https://api.crunchbase.com/v/2/'
ORGANIZATIONS_URL = BASE_URL + 'organizations'
def __init__(self, api_key=None):
if not api_key:
raise ValueError('API key for CrunchBase not supplied')
self.api_key = api_key
def organizations(self, name):
"""
Search for a organization given a name, returns details of first match
Returns:
Organization or None
"""
url = self.ORGANIZATIONS_URL
data = self._make_request(url, {'name': name})
if not data or data.get('error'):
return None
return Page(name, data)
def organization(self, permalink):
"""Get the details of a organization given a organization's permalink.
Returns:
Organization or None
"""
node_data = self.get_node('organization', permalink)
return Organization(node_data) if node_data else None
def person(self, permalink):
"""Get the details of a person given a person's permalink
Returns:
Person or None
"""
node_data = self.get_node('person', permalink)
return Person(node_data) if node_data else None
def funding_round(self, uuid):
"""Get the details of a FundingRound given the uuid.
Returns
FundingRound or None
"""
node_data = self.get_node('funding-round', uuid)
return FundingRound(node_data) if node_data else None
def acquisition(self, uuid):
"""Get the details of a acquisition given a uuid.
Returns:
Acquisition or None
"""
node_data = self.get_node('acquisition', uuid)
return Acquisition(node_data) if node_data else None
def product(self, permalink):
"""Get the details of a product given a product permalink.
Returns:
Product or None
"""
node_data = self.get_node('product', permalink)
return Product(node_data) if node_data else None
def get_node(self, node_type, uuid, params=None):
"""Get the details of a Node from CrunchBase.
The node_type must match that of CrunchBase's, and the uuid
is either the {uuid} or {permalink} as stated on their docs.
Returns:
dict: containing the data describing this node with the keys
uuid, type, properties, relationships.
Or None if there's an error.
"""
node_url = self.BASE_URL + node_type + '/' + uuid
data = self._make_request(node_url, params=params)
if not data or data.get('error'):
return None
return data
def more(self, page):
"""Given a Page, tries to get more data using the
first_page_url or next_page_url given in the response.
Returns:
None if there is no more data to get or if you have all the data
Relationship with the new data
"""
if page.total_items <= len(page):
return None
if page.first_page_url:
url_to_call = page.first_page_url
return self._page(page.name, url_to_call)
elif page.next_page_url:
url_to_call = page.next_page_url
return self._page(page.name, url_to_call)
else:
return None
def _page(self, name, url):
"""Loads a page for a Node
Args:
name (str): name of page we are getting
url (str): url of the page to make the call to
Returns:
page if we can get the data
None if we have an error
"""
data = self._make_request(url)
if not data or data.get('error'):
return None
return Page(name, data)
def _build_url(self, base_url, params=None):
"""Helper to build urls by appending all queries and the API key
"""
params = params or {}
base_url = '{url}?user_key={api_key}'.format(
url=base_url, api_key=self.api_key)
query_list = ['%s=%s' % (k, v) for k, v in six.iteritems(params)]
if query_list:
base_url += '&' + '&'.join(query_list)
return base_url
def _make_request(self, url, params=None):
"""Makes the actual API call to CrunchBase
"""
final_url = self._build_url(url, params)
response = requests.get(final_url)
response.raise_for_status()
return response.json().get('data')
def __str__(self):
return "pycrunchbase CrunchBase API"
|
Python
| 0
|
@@ -621,30 +621,50 @@
rns
-details of first match
+the first%0A :class:%60Page%60 of results
%0A%0A
@@ -686,36 +686,28 @@
-Organization
+Page
or None%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.