commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
5a21e98880a7e903f335ada22856248f346dad5a
|
remove mutable class-level variable that's always overridden by a fresh instance variable with the same name (at least within this code, and I'm going to assume that's how it's meant to be used)
|
oonib/testhelpers/http_helpers.py
|
oonib/testhelpers/http_helpers.py
|
import json
import random
import string
from twisted.application import internet, service
from twisted.internet import protocol, reactor, defer
from twisted.protocols import basic
from twisted.web import resource, server, static, http
from twisted.web.microdom import escape
from cyclone.web import RequestHandler, Application
from twisted.protocols import policies, basic
from twisted.web.http import Request
class SimpleHTTPChannel(basic.LineReceiver, policies.TimeoutMixin):
"""
This is a simplified version of twisted.web.http.HTTPChannel to overcome
header lowercase normalization. It does not actually implement the HTTP
protocol, but only the subset of it that we need for testing.
What this HTTP channel currently does is process the HTTP Request Line and
the Request Headers and returns them in a JSON datastructure in the order
we received them.
The returned JSON dict looks like so:
{
'request_headers':
[['User-Agent', 'IE6'], ['Content-Length', 200]]
'request_line':
'GET / HTTP/1.1'
}
"""
requestFactory = Request
__first_line = 1
__header = ''
__content = None
length = 0
maxHeaders = 500
requestLine = ''
headers = []
timeOut = 60 * 60 * 12
def __init__(self):
self.requests = []
def connectionMade(self):
self.setTimeout(self.timeOut)
def lineReceived(self, line):
if self.__first_line:
self.requestLine = line
self.__first_line = 0
elif line == '':
# We have reached the end of the headers.
if self.__header:
self.headerReceived(self.__header)
self.__header = ''
self.allHeadersReceived()
self.setRawMode()
elif line[0] in ' \t':
# This is to support header field value folding over multiple lines
# as specified by rfc2616.
self.__header = self.__header+'\n'+line
else:
if self.__header:
self.headerReceived(self.__header)
self.__header = line
def headerReceived(self, line):
try:
header, data = line.split(':', 1)
self.headers.append((header, data.strip()))
except:
log.err("Got malformed HTTP Header request field")
log.err("%s" % line)
def allHeadersReceived(self):
headers_dict = {}
for k, v in self.headers:
if k not in headers_dict:
headers_dict[k] = []
headers_dict[k].append(v)
response = {'request_headers': self.headers,
'request_line': self.requestLine,
'headers_dict': headers_dict
}
json_response = json.dumps(response)
self.transport.write('HTTP/1.1 200 OK\r\n\r\n')
self.transport.write('%s' % json_response)
self.transport.loseConnection()
class HTTPReturnJSONHeadersHelper(protocol.ServerFactory):
protocol = SimpleHTTPChannel
def buildProtocol(self, addr):
p = self.protocol()
p.headers = []
return p
class HTTPTrapAll(RequestHandler):
def _execute(self, transforms, *args, **kwargs):
self._transforms = transforms
defer.maybeDeferred(self.prepare).addCallbacks(
self._execute_handler,
lambda f: self._handle_request_exception(f.value),
callbackArgs=(args, kwargs))
def _execute_handler(self, r, args, kwargs):
if not self._finished:
args = [self.decode_argument(arg) for arg in args]
kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.iteritems())
# This is where we do the patching
# XXX this is somewhat hackish
d = defer.maybeDeferred(self.all, *args, **kwargs)
d.addCallbacks(self._execute_success, self._execute_failure)
self.notifyFinish().addCallback(self.on_connection_close)
class HTTPRandomPage(HTTPTrapAll):
"""
This generates a random page of arbitrary length and containing the string
selected by the user.
/<length>/<keyword>
XXX this is currently disabled as it is not of use to any test.
"""
isLeaf = True
def _gen_random_string(self, length):
return ''.join(random.choice(string.letters) for x in range(length))
def genRandomPage(self, length=100, keyword=None):
data = self._gen_random_string(length/2)
if keyword:
data += keyword
data += self._gen_random_string(length - length/2)
data += '\n'
return data
def all(self, length, keyword):
length = 100
if length > 100000:
length = 100000
return self.genRandomPage(length, keyword)
HTTPRandomPageHelper = Application([
# XXX add regexps here
(r"/(.*)/(.*)", HTTPRandomPage)
])
|
Python
| 0
|
@@ -1237,25 +1237,8 @@
= ''
-%0A headers = %5B%5D
%0A%0A
@@ -1279,32 +1279,58 @@
__init__(self):%0A
+ self.headers = %5B%5D%0A
self.req
@@ -3089,11 +3089,14 @@
-p =
+return
sel
@@ -3111,48 +3111,8 @@
ol()
-%0A p.headers = %5B%5D%0A return p
%0A%0Acl
|
7138cd2fb7a5dc8a5044f15b19d3d53a1486dec3
|
order by companies by name, helps when viewing adding companies to jobs entry form
|
companies/models.py
|
companies/models.py
|
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from markupfield.fields import MarkupField
from cms.models import NameSlugModel
DEFAULT_MARKUP_TYPE = getattr(settings, 'DEFAULT_MARKUP_TYPE', 'restructuredtext')
class Company(NameSlugModel):
about = MarkupField(blank=True, default_markup_type=DEFAULT_MARKUP_TYPE)
contact = models.CharField(null=True, blank=True, max_length=100)
email = models.EmailField(null=True, blank=True)
url = models.URLField('URL', null=True, blank=True)
logo = models.ImageField(upload_to='companies/logos/', blank=True, null=True)
class Meta:
verbose_name = _('company')
verbose_name_plural = _('companies')
|
Python
| 0
|
@@ -745,8 +745,38 @@
anies')%0A
+ ordering = ('name', )%0A
|
f283dc1f710c8eca452d39f63f5b3b956e5676c8
|
Fix the xs-tape9 option
|
transmutagen/origen.py
|
transmutagen/origen.py
|
import argparse
import os
from subprocess import run
from pyne.utils import toggle_warnings
import warnings
toggle_warnings()
warnings.simplefilter('ignore')
from pyne.origen22 import (nlbs, write_tape5_irradiation, write_tape4,
parse_tape9, merge_tape9, write_tape9, parse_tape6)
from pyne.material import from_atom_frac
ORIGEN = '/home/origen22/code/o2_therm_linux.exe'
decay_TAPE9 = "/home/origen22/libs/decay.lib"
LIBS_DIR = "/home/origen22/libs"
def make_parser():
p = argparse.ArgumentParser('origen', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('xs-tape9', dest='xs_tape9', help="""path to the cross section TAPE9 file. If
the path is not absolute, defaults to looking in {LIBS_DIR}""".format(LIBS_DIR=LIBS_DIR))
p.add_argument('time', help='the time in sec',
type=float)
p.add_argument('--phi', help='the neutron flux in [n/cm^2/sec]',
type=float, default=4e14)
p.add_argument('--decay-tape9', help="path to the decay TAPE9 file.",
default=decay_TAPE9)
p.add_argument('--origen', help="Path to the origen executable",
default=ORIGEN)
return p
def main():
p = make_parser()
try:
import argcomplete
argcomplete.autocomplete(p)
except ImportError:
pass
args = p.parse_args()
xs_tape9 = args.xs_tape9
if not os.path.isabs(xs_tape9):
xs_tape9 = os.path.join(LIBS_DIR, xs_tape9)
time = args.time
phi = args.phi
decay_tape9 = args.decay_tape9
origen = args.origen
parsed_xs_tape9 = parse_tape9(xs_tape9)
parsed_decay_tape9 = parse_tape9(decay_tape9)
merged_tape9 = merge_tape9([parsed_decay_tape9, parsed_xs_tape9])
# Can set outfile to change directory, but the file name needs to be
# TAPE9.INP.
write_tape9(merged_tape9)
decay_nlb, xsfpy_nlb = nlbs(parsed_xs_tape9)
# Can set outfile, but the file name should be called TAPE5.INP.
write_tape5_irradiation("IRF", time/(60*60*24), phi,
xsfpy_nlb=xsfpy_nlb, cut_off=0, out_table_num=[4],
out_table_nes=[True, False, False])
M = from_atom_frac({"U235": 1}, mass=1, atoms_per_molecule=1)
write_tape4(M)
run(origen)
data = parse_tape6()
print(data)
filename = "{library} {time} {phi}.py".format(
library=os.path.basename(xs_tape9),
time=time,
phi=phi,
)
with open('/data/' + filename, 'w') as f:
f.write(repr(data))
print("Writing data to data/" + filename)
if __name__ == '__main__':
main()
|
Python
| 0.999998
|
@@ -591,25 +591,25 @@
ment('xs
--
+_
tape9',
dest='xs
@@ -604,17 +604,20 @@
9',
-dest
+metavar
='xs
-_
+-
tape
|
a0c5299b2d1107972bbfeaf6b9b5e11847c95b59
|
Set workpackage of entry when loading zeiterfassung
|
trex/models/project.py
|
trex/models/project.py
|
# -*- coding: utf-8 -*-
#
# (c) 2014 Bjoern Ricks <bjoern.ricks@gmail.com>
#
# See LICENSE comming with the source of 'trex' for details.
#
from django.conf import settings
from django.core.urlresolvers import reverse_lazy
from django.db import models, transaction
class BaseQuerySet(models.QuerySet):
use_for_related_fields = True
def as_manager(cls, use_for_related_fields=None):
# override method to allow setting use_for_related_fields
if use_for_related_fields is None:
use_for_related_fields = cls.use_for_related_fields
manager_cls = models.manager.Manager.from_queryset(cls)
manager_cls.use_for_related_fields = use_for_related_fields
return manager_cls()
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
class EntryQuerySet(BaseQuerySet):
def get_duration_sum(self):
dsum = self.aggregate(duration=models.Sum("duration"))
return dsum["duration"] or 0
class Project(models.Model):
name = models.CharField(max_length=50, unique=True)
description = models.TextField(blank=True, default="")
active = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
users = models.ManyToManyField(
settings.AUTH_USER_MODEL, through="ProjectUser",
related_name="projects")
class Meta:
ordering = ("name", "active")
def get_absolute_url(self):
return reverse_lazy("project-detail", kwargs={"pk": self.id})
def create_entries_from_zeiterfassung(self, zeiterfassung):
with transaction.atomic():
written = []
skipped = []
for zentry in zeiterfassung.read():
user, _ = ProjectUser.objects.get_or_create(
project=self, user_abbr=zentry.get_user()
)
entry, created = Entry.objects.get_or_create(
project=self, date=zentry.get_date(),
duration=zentry.get_duration(), state=zentry.get_state(),
description=zentry.get_description(), user=user,
)
# raise ValueError(
# "Zeiterfassung entry %s has already been imported "
# "to the project %s" % (zentry, self.name))
if not created:
# entry is already in db
skipped.append(zentry)
continue
tag, created = Tag.objects.get_or_create(
project=self, name=zentry.get_workpackage()
)
entry.tags.add(tag)
written.append(zentry)
return written, skipped
def __unicode__(self):
return "Project %s ID %s" % (self.name, self.id)
class Entry(models.Model):
project = models.ForeignKey(Project, related_name="entries")
date = models.DateField()
duration = models.PositiveIntegerField()
description = models.TextField()
created = models.DateTimeField(auto_now_add=True)
state = models.CharField(max_length="5", blank=True)
workpackage = models.CharField(max_length=255, blank=True)
user = models.ForeignKey("ProjectUser", related_name="entries")
tags = models.ManyToManyField("Tag", related_name="entries")
objects = EntryQuerySet.as_manager(use_for_related_fields=True)
class Meta:
ordering = ("date", "created")
def get_absolute_url(self):
return reverse_lazy("entry-detail", kwargs={"pk": self.id})
class Tag(models.Model):
project = models.ForeignKey(Project, related_name="tags")
name = models.CharField(max_length=255)
description = models.TextField(blank=True, default="")
created = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ("name",)
class ProjectUser(models.Model):
project = models.ForeignKey(Project, related_name="project_users")
user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True)
user_abbr = models.CharField("User abbreviation for the project",
max_length=25)
class Meta:
unique_together = ("project", "user_abbr")
|
Python
| 0
|
@@ -2109,16 +2109,74 @@
r=user,%0A
+ workpackage=zentry.get_workpackage(),%0A
|
da1d77291da0e683a3fbf89b60c9a7f9c2bee64a
|
use set for deps
|
compiler/js/code.py
|
compiler/js/code.py
|
import re
def scan(text):
str_context = False
escape = False
c_comment = False
cpp_comment = False
begin = 0
invalid = []
for i in xrange(0, len(text)):
c = text[i]
if escape:
escape = False
continue
if cpp_comment:
if c == "\n":
cpp_comment = False
end = i
invalid.append((begin, end))
#print "cpp-comment", (begin, end), text[begin:end]
continue
if c_comment:
if text[i: i + 2] == "*/":
end = i + 2
c_comment = False
invalid.append((begin, end))
#print "c-comment", begin, end, text[begin:end]
continue
if str_context and c == "\\":
escape = True
continue
if c == "\"" or c == "'":
str_context = not str_context
if str_context:
begin = i
else:
end = i + 1
invalid.append((begin, end))
#print "string at %d:%d -> %s" %(begin, end, text[begin:end])
continue
if str_context:
continue
if text[i: i + 2] == "//":
begin = i
cpp_comment = True
if text[i: i + 2] == "/*":
c_comment = True
begin = i
return text, invalid
enum_re = re.compile(r'([A-Z]\w*)\.([A-Z]\w*)')
def replace_enums(text, generator, registry):
def replace_enum(m):
try:
component = registry.find_component(generator.package, m.group(1))
return "_globals.%s.prototype.%s" %(component, m.group(2))
except:
return m.group(0)
text = enum_re.sub(replace_enum, text)
#print text
return text
id_re = re.compile(r'([_a-z]\w*)\.')
def process(text, generator, registry):
id_set = registry.id_set
text, invalid = scan(text)
def replace_id(m):
pos = m.start(0)
name = m.group(1)
first = text[pos - 1] != "."
if first == '_globals':
return m.group(0)
if name in id_set:
ok = True
for b, e in invalid:
if pos >= b and pos < e:
ok = False
break
if ok:
return ("this." if first else "") + "_get('%s')." %name
return m.group(0)
text = id_re.sub(replace_id, text)
text = replace_enums(text, generator, registry)
#print text
return text
gets_re = re.compile(r'(this)((?:\._get\(\'.*?\'\))+)(?:\.([a-zA-Z0-9\.]+))?')
tr_re = re.compile(r'\W(qsTr|qsTranslate|tr)\(')
def parse_deps(parent, text):
deps = []
for m in gets_re.finditer(text):
gets = (m.group(1) + m.group(2)).split('.')
gets = map(lambda x: parent if x == 'this' else x, gets)
target = gets[-1]
target = target[target.index('\'') + 1:target.rindex('\'')]
gets = gets[:-1]
path = ".".join(gets)
if target == 'model':
signal = '_row' if m.group(3) != 'index' else '_rowIndex'
deps.append(("%s._get('_delegate')" %parent, signal))
else:
deps.append((path, target))
for m in tr_re.finditer(text):
deps.append((parent + '._context', 'language'))
return deps
def mangle_path(path):
return ["this"] + ["_get('%s')"%name for name in path ]
def generate_accessors(target):
path = target.split('.')
get = ".".join(mangle_path(path[:-1]))
return get, path[-1]
|
Python
| 0
|
@@ -2150,18 +2150,21 @@
%09deps =
-%5B%5D
+set()
%0A%09for m
@@ -2514,20 +2514,17 @@
%09%09deps.a
-ppen
+d
d((%22%25s._
@@ -2572,28 +2572,25 @@
e:%0A%09%09%09deps.a
-ppen
+d
d((path, tar
@@ -2636,20 +2636,17 @@
%09%09deps.a
-ppen
+d
d((paren
|
10e7388eec8d16f5a69e5d4f3b9e6cf56a1c956e
|
Remove explicit byte string from migration 0003 (#298)
|
silk/migrations/0003_request_prof_file.py
|
silk/migrations/0003_request_prof_file.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 18:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('silk', '0002_auto_update_uuid4_id_field'),
]
operations = [
migrations.AddField(
model_name='request',
name='prof_file',
field=models.FileField(null=True, upload_to=b''),
),
]
|
Python
| 0.000004
|
@@ -438,17 +438,16 @@
load_to=
-b
''),%0A
|
30452b9fe815a2b68826b739625d1c06886fb17e
|
Remove redundant isinstance() check
|
pact/group.py
|
pact/group.py
|
import itertools
from .base import PactBase
class PactGroup(PactBase):
def __init__(self, pacts=None, lazy=True):
if pacts is None:
pacts = []
self._pacts = list(pacts)
self._finished_pacts = []
self._is_lazy = lazy
super(PactGroup, self).__init__()
def __iadd__(self, other):
self.add(other)
return self
def __iter__(self):
return itertools.chain(self._pacts, self._finished_pacts)
def add(self, pact, absorb=False):
if absorb and isinstance(pact, PactGroup):
if isinstance(pact, PactGroup):
raise NotImplementedError('Absorbing groups is not supported') # pragma: no cover
self._pacts.append(pact)
if absorb:
# pylint: disable=protected-access
while pact._then:
# then might throw, so we attempt it first
self.then(pact._then[0])
pact._then.pop(0)
def _is_finished(self):
has_finished = True
indexes_to_remove = []
for index, pact in enumerate(self._pacts):
if pact.poll():
indexes_to_remove.append(index)
else:
has_finished = False
if self._is_lazy:
break
for index in reversed(indexes_to_remove):
self._finished_pacts.append(self._pacts.pop(index))
return has_finished
def __repr__(self):
return repr(list(self._pacts))
|
Python
| 0.000015
|
@@ -565,56 +565,8 @@
p):%0A
- if isinstance(pact, PactGroup):%0A
|
1b3657adf92b52d731fd0d9248f517a0cee58019
|
Compute the difference with the previous timestamp
|
src/remora_parse_fs.py
|
src/remora_parse_fs.py
|
#!/usr/bin/env python
#
#========================================================================
# HEADER
#========================================================================
#% DESCRIPTION
#% remora_parse_fs
#%
#% DO NOT call this script directory. This is a postprocessing
#% tool called by REMORA
#%
#========================================================================
#- IMPLEMENTATION
#- version REMORA 1.4
#- authors Carlos Rosales (carlos@tacc.utexas.edu)
#- Antonio Gomez (agomez@tacc.utexas.edu)
#- license MIT
#
#========================================================================
# HISTORY
# 2015/12/15: Doesn't use xltop. Instead, Remora creates a file
# for each node with the filesystem load
# 2015/09/09: Python implementation, handles heterogeneous file
# system entries in xltop.txt correctly
# 2015/08/12: Initial version
#========================================================================
import glob
import sys
from collections import defaultdict
if (len(sys.argv) != 2):
print "Error: invalid number of parameters"
print "This script needs the name of a folder as argument"
sys.exit()
initialized=False
results=defaultdict(list)
header=list()
for filename in glob.iglob(sys.argv[1]+'/lustre_*'):
with open(filename) as f:
idx = 0
for line in f:
idx += 1
if "TIMESTAMP" in line:
#Only process the first line for the firs file that
#is processed
#This is how we collect the different filesystems in
#the sytem
if initialized:
continue
initialized = True
parts = line.split()
for i in parts:
#We don't need the TIMESTAMP name
if "TIMESTAMP" in i:
continue
#Everything that it's not TIMESTAMP in the first
#line of the first file, is a filesystem. We append
#all the filesystems into the header list
header.append(i)
continue
parts = line.split()
idx2=0
#We now process each line. We have to skip the first
#column (the timestamp)
for i in parts:
if (idx2==0):
idx2 += 1
continue
#Now, add or append each value read to the appropriate
#item in a list. 'results' is a dictionary, where the key
#is the name of the filesystem (that's why use 'header[i]'
#to access each element of the dictionary) and the elements
#are lists
if ((idx-2)>=len(results[header[idx2-1]])):
results[header[idx2-1]].append(int(i))
else:
results[header[idx2-1]][idx-2] += int(i)
idx2 += 1
#Now we simply format the matrix for a pretty output
out_header=""
numvals=0
max_load=list()
for i in results:
out_header = out_header + i + " "
numvals=len(results[i])
temp_max=0
for j in xrange(numvals):
if results[i][j] > temp_max:
temp_max = results[i][j]
max_load.append(temp_max)
fout = open(sys.argv[1]+"/fs_lustre_total.txt", "w")
fout.write(out_header+"\n")
for j in xrange(numvals):
out_vals = ""
for i in results:
out_vals = out_vals + str(results[i][j]) + " "
fout.write(out_vals +"\n")
fout.close()
idx=0
for i in results:
print "REMORA: MAX load in %10s: %10d" % (i, max_load[idx])
idx += 1
|
Python
| 1
|
@@ -3523,24 +3523,108 @@
in results:%0A
+ if (j==0):%0A out_vals = out_vals + %22 0 %22%0A else:%0A
out_
@@ -3658,16 +3658,32 @@
ts%5Bi%5D%5Bj%5D
+-results%5Bi%5D%5Bj-1%5D
) + %22
|
628e77501340b89a77649a0c798f77d8ef4a4d1f
|
Update processors.py
|
dax/processors.py
|
dax/processors.py
|
import os
import re
import task
import logging
import XnatUtils
#Logger for logs
logger = logging.getLogger('dax')
class Processor(object):
""" Base class for processor """
def __init__(self, walltime_str, memreq_mb, spider_path, version=None, ppn=1, xsitype='proc:genProcData'):
""" init function """
self.walltime_str = walltime_str # 00:00:00 format
self.memreq_mb = memreq_mb # memory required in megabytes
self.set_spider_settings(spider_path, version)
self.ppn = ppn
self.xsitype = xsitype
#get the spider_path right with the version:
def set_spider_settings(self, spider_path, version):
""" function to set the spider version/path/name from the filepath """
if version:
#get the proc_name
proc_name = os.path.basename(spider_path)[7:-3]
#remove any version if there is one
proc_name = re.split("/*_v[0-9]/*", proc_name)[0]
#setting the version and name of the spider
self.version = version
self.name = proc_name+'_v'+self.version.split('.')[0]
self.spider_path = os.path.join(os.path.dirname(spider_path), 'Spider_'+proc_name+'_v'+version+'.py')
else:
self.default_settings_spider(spider_path)
def default_settings_spider(self, spider_path):
""" default function to get the spider version/name """
#set spider path
self.spider_path = spider_path
#set the name and the version of the spider
if len(re.split("/*_v[0-9]/*", spider_path))>1:
self.version = os.path.basename(spider_path)[7:-3].split('_v')[-1]
self.name = re.split("/*_v[0-9]/*", os.path.basename(spider_path)[7:-3])[0]+'_v'+self.version.split('.')[0]
else:
self.version = '1.0.0'
self.name = os.path.basename(spider_path)[7:-3]
# has_inputs - does this object have the required inputs? e.g. NIFTI format of the required scan type and quality and are there no conflicting inputs, i.e. only 1 required by 2 found?
def has_inputs(self): # what other arguments here, could be Project/Subject/Session/Scan/Assessor depending on type of processor?
""" has_inputs function to check if inputs present on XNAT to run the job """
raise NotImplementedError()
# should_run - is the object of the proper object type? e.g. is it a scan? and is it the required scan type? e.g. is it a T1?
def should_run(self): # what other arguments here, could be Project/Subject/Session/Scan/Assessor depending on type of processor?
""" return True if the assessor should exist/ False if not """
raise NotImplementedError()
def write_pbs(self, filename):
""" function that write the pbs for the assessor """
raise NotImplementedError()
class ScanProcessor(Processor):
""" Scan Processor class for processor on a scan on XNAT """
def has_inputs(self):
""" return status,qcstatus
status = 0 if still NEED_INPUTS, -1 if NO_DATA, 1 if NEED_TO_RUN
qcstatus = only when -1 or 0. You can set it to a short string that explain why it's no ready to run
e.g: No NIFTI
"""
raise NotImplementedError()
def __init__(self, scan_types, walltime_str, memreq_mb, spider_path, version=None, ppn=1):
""" init function overridden from base class """
super(ScanProcessor, self).__init__(walltime_str, memreq_mb, spider_path, version, ppn)
if isinstance(scan_types, list):
self.scan_types = scan_types
elif isinstance(scan_types, str):
if scan_types == 'all':
self.scan_types = 'all'
else:
self.scan_types = scan_types.split(',')
else:
self.scan_types = []
def get_assessor_name(self, cscan):
""" return the assessor label """
scan_dict = cscan.info()
subj_label = scan_dict['subject_label']
sess_label = scan_dict['session_label']
proj_label = scan_dict['project_label']
scan_label = scan_dict['scan_label']
return proj_label+'-x-'+subj_label+'-x-'+sess_label+'-x-'+scan_label+'-x-'+self.name
def get_task(self, intf, cscan, upload_dir):
""" return the task object for this assessor """
scan_dict = cscan.info()
assessor_name = self.get_assessor_name(cscan)
scan = XnatUtils.get_full_object(intf, scan_dict)
assessor = scan.parent().assessor(assessor_name)
return task.Task(self, assessor, upload_dir)
def has_resource(self, cscan, resource):
""" return true if resource exists in cscan """
if resource in [cres.info()['label'] for cres in cscan.resources()]:
return True
else:
return False
def should_run(self, scan_dict):
""" should_run function overwrited from base-class to check if it's a right scan"""
return scan_dict['scan_type'] in self.scan_types
def is_unusable(self, cscan):
""" return true if scan unusable """
return cscan.info()['quality'] == "unusable"
class SessionProcessor(Processor):
""" Session Processor class for processor on a session on XNAT """
def has_inputs(self):
""" return status,qcstatus
status = 0 if still NEED_INPUTS, -1 if NO_DATA, 1 if NEED_TO_RUN
qcstatus = only when -1 or 0. You can set it to a short string that explain why it's no ready to run
e.g: No NIFTI
"""
raise NotImplementedError()
def __init__(self, walltime_str, memreq_mb, spider_path, version=None, ppn=1):
""" init function overridden from base class """
super(SessionProcessor, self).__init__(walltime_str, memreq_mb, spider_path, version, ppn)
def should_run(self, session_dict):
""" return if the assessor should exist. Always true on a session """
return True
def get_assessor_name(self, csess):
""" return the assessor label """
session_dict = csess.info()
proj_label = session_dict['project']
subj_label = session_dict['subject_label']
sess_label = session_dict['label']
return proj_label+'-x-'+subj_label+'-x-'+sess_label+'-x-'+self.name
def get_task(self, intf, csess, upload_dir):
""" return the task for this process """
sess_info = csess.info()
assessor_name = self.get_assessor_name(csess)
session = XnatUtils.get_full_object(intf, sess_info)
assessor = session.assessor(assessor_name)
return task.Task(self, assessor, upload_dir)
def processors_by_type(proc_list):
""" function to organize the assessor by type
return two lists: one for scan, one for session
"""
exp_proc_list = list()
scan_proc_list = list()
# Build list of processors by type
for proc in proc_list:
if issubclass(proc.__class__, ScanProcessor):
scan_proc_list.append(proc)
elif issubclass(proc.__class__, SessionProcessor):
exp_proc_list.append(proc)
else:
logger.warn('unknown processor type:'+proc)
return exp_proc_list, scan_proc_list
|
Python
| 0.000001
|
@@ -2726,149 +2726,8 @@
or()
-%0A %0A def write_pbs(self, filename):%0A %22%22%22 function that write the pbs for the assessor %22%22%22%0A raise NotImplementedError()
%0A%0Acl
|
7dc9085bf0665efc3083b64c0b34cb7c8c92ae31
|
update now drops duplicates
|
dblib/dbUpdate.py
|
dblib/dbUpdate.py
|
import pymongo
import multiprocessing
import multiprocessing.connection
import time
SIZE = 128
NUM_NODES = 3
def recv_data(sock,dataQueue,cQueue):
connect = sock.accept()
cQueue.put("listen")
data = connect.recv()
dataQueue.put(data)
connect.close()
print("received data")
exit(0)
def db_send(database,queue):
collection = database.times
t = int(time.time())
doc = int(t/600)
for i in range(queue.qsize()):
data = queue.get()
data = data.split(',')
for j in range(0,len(data)-3,4):
new_posts = {}
new_posts.update({'data':{"mac":data[j+3],'node':int(data[0]),'time':int(data[j+1]),'sigstr':int(data[j+2])}})
collection.update({'_id':doc},{"$push":new_posts},upsert=True)
## dic = {'node':temp[0],'time':temp[1],'sigstr':temp[2],'mac':temp[3]}
## new_posts.append(dic)
## posts.insert_many(new_posts)
print("sent")
exit(0)
def server(host,port):
client = pymongo.MongoClient()
db = client.cheddar
sock = multiprocessing.connection.Listener((host,port))
dq = multiprocessing.Queue()
cq = multiprocessing.Queue()
cq.put("listen")
while True:
try:
task = cq.get(True,1)
except:
task = "none"
if task == "listen":
print("spawning listening thread")
p = multiprocessing.Process(target=recv_data, args=(sock,dq,cq))
p.start()
## if (dq.qsize() == 100):
if dq.qsize() != 0:
print("spawning sending thread")
p = multiprocessing.Process(target=db_send,args=(db,dq))
p.start()
## pass
server('',10000)
|
Python
| 0
|
@@ -748,12 +748,16 @@
,%7B%22$
-push
+addToSet
%22:ne
|
e76d6ad7a4670bfa47ba506343aff2e5f118f976
|
fix rsync options for use in shared scenarios
|
myriadeploy/update_myria_jar_only.py
|
myriadeploy/update_myria_jar_only.py
|
#!/usr/bin/env python
import myriadeploy
import subprocess
import sys
def host_port_list(workers):
return [str(worker[0]) + ':' + str(worker[1]) for worker in workers]
def get_host_port_path(node, default_path):
if len(node) == 2:
(hostname, port) = node
if default_path is None:
raise Exception("Path not specified for node %s" % str(node))
else:
path = default_path
else:
(hostname, port, path) = node
return (hostname, port, path)
def copy_distribution(config):
"Copy the distribution (jar and libs and conf) to compute nodes."
nodes = config['nodes']
description = config['description']
default_path = config['path']
username = config['username']
for node in nodes:
(hostname, _, path) = get_host_port_path(node, default_path)
if hostname != 'localhost':
remote_path = "%s@%s:%s/%s-files" % (username, hostname, path, description)
else:
remote_path = "%s/%s-files" % (path, description)
to_copy = ["libs", "conf"]
args = ["rsync", "--del", "-aLvz"] + to_copy + [remote_path]
if subprocess.call(args):
raise Exception("Error copying distribution to %s" % (hostname,))
def main(argv):
# Usage
if len(argv) != 2:
print >> sys.stderr, "Usage: %s <deployment.cfg>" % (argv[0])
print >> sys.stderr, " deployment.cfg: a configuration file modeled after deployment.cfg.sample"
sys.exit(1)
config = myriadeploy.read_config_file(argv[1])
# Step 1: Copy over libs, "conf", myria
copy_distribution(config)
if __name__ == "__main__":
main(sys.argv)
|
Python
| 0
|
@@ -1105,17 +1105,19 @@
del%22, %22-
-a
+rlD
Lvz%22%5D +
|
4ab79d5f72a2c518d6ced8f1db645ac84c2ce64b
|
fix ALLOWED_HOSTS in base settings
|
treeherder/settings/base.py
|
treeherder/settings/base.py
|
# Django settings for webapp project.
import os
from treeherder import path
# needed to setup celery
import djcelery
djcelery.setup_loader()
# These settings can all be optionally set via env vars, or in local.py:
TREEHERDER_DATABASE_NAME = os.environ.get("TREEHERDER_DATABASE_NAME", "")
TREEHERDER_DATABASE_USER = os.environ.get("TREEHERDER_DATABASE_USER", "")
TREEHERDER_DATABASE_PASSWORD = os.environ.get("TREEHERDER_DATABASE_PASSWORD", "")
TREEHERDER_DATABASE_HOST = os.environ.get("TREEHERDER_DATABASE_HOST", "localhost")
TREEHERDER_DATABASE_PORT = os.environ.get("TREEHERDER_DATABASE_PORT", "")
TREEHERDER_MEMCACHED = os.environ.get("TREEHERDER_MEMCACHED", "")
TREEHERDER_MEMCACHED_KEY_PREFIX = os.environ.get("TREEHERDER_MEMCACHED_KEY_PREFIX", "treeherder")
DEBUG = os.environ.get("TREEHERDER_DEBUG", False)
RABBITMQ_USER = os.environ.get("TREEHERDER_RABBITMQ_USER", "")
RABBITMQ_PASSWORD = os.environ.get("TREEHERDER_RABBITMQ_PASSWORD", "")
RABBITMQ_VHOST = os.environ.get("TREEHERDER_RABBITMQ_VHOST", "")
RABBITMQ_HOST = os.environ.get("TREEHERDER_RABBITMQ_HOST", "")
RABBITMQ_PORT = os.environ.get("TREEHERDER_RABBITMQ_PORT", "")
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get("TREEHERDER_DJANGO_SECRET_KEY", "my-secret-key")
ADMINS = [] # TBD
MANAGERS = ADMINS
SITE_ID = 1
ROOT_URLCONF = "treeherder.webapp.urls"
WSGI_APPLICATION = 'treeherder.webapp.wsgi.application'
TIME_ZONE = "America/Los_Angeles"
LANGUAGE_CODE = "en-us"
USE_I18N = False
USE_L10N = True
USE_TZ = False
STATIC_ROOT = path("webapp", "static")
STATIC_URL = "/static/"
# Additional locations of static files
STATICFILES_DIRS = []
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
#"django.contrib.staticfiles.finders.DefaultStorageFinder",
]
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
"django.template.loaders.eggs.Loader",
]
TEMPLATE_DIRS = [
path("webapp", "templates")
]
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django_browserid.context_processors.browserid'
)
MIDDLEWARE_CLASSES = [
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'corsheaders.middleware.CorsMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
AUTHENTICATION_BACKENDS = (
'django_browserid.auth.BrowserIDBackend',
'django.contrib.auth.backends.ModelBackend'
)
# this tells browserid to not create users.
# a user must be created first in the admin
# and then can be recognized with persona login
BROWSERID_CREATE_USER = False
# Path to redirect to on successful login.
LOGIN_REDIRECT_URL = '/'
# Path to redirect to on unsuccessful login attempt.
LOGIN_REDIRECT_URL_FAILURE = '/'
# Path to redirect to on logout.
LOGOUT_REDIRECT_URL = '/'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 3rd party apps
'south',
'djcelery',
'south',
'rest_framework',
'corsheaders',
'django_browserid',
# treeherder apps
'treeherder.model',
'treeherder.webapp',
'treeherder.log_parser',
'treeherder.etl',
]
LOCAL_APPS = []
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
from kombu import Exchange, Queue
CELERY_QUEUES = (
Queue('default', Exchange('default'), routing_key='default'),
# queue for failed jobs/logs
Queue('fail', Exchange('fail'), routing_key='*.fail'),
# queue for successful jobs/logs
Queue('success', Exchange('success'), routing_key='*.success'),
)
# default value when no task routing info is specified
CELERY_DEFAULT_QUEUE = 'default'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERYBEAT_SCHEDULER = "djcelery.schedulers.DatabaseScheduler"
# rest-framework settings
REST_FRAMEWORK = {
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
)
}
SITE_URL = "http://local.treeherder.mozilla.org"
BUILDAPI_PENDING_URL = "https://secure.pub.build.mozilla.org/builddata/buildjson/builds-pending.js"
BUILDAPI_RUNNING_URL = "https://secure.pub.build.mozilla.org/builddata/buildjson/builds-running.js"
# this setting allows requests from any host
CORS_ORIGIN_ALLOW_ALL = True
try:
from .local import *
except ImportError:
pass
INSTALLED_APPS += LOCAL_APPS
TEMPLATE_DEBUG = DEBUG
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
"NAME": TREEHERDER_DATABASE_NAME,
"USER": TREEHERDER_DATABASE_USER,
"PASSWORD": TREEHERDER_DATABASE_PASSWORD,
"HOST": TREEHERDER_DATABASE_HOST,
"PORT": TREEHERDER_DATABASE_PORT,
}
}
CACHES = {
"default": {
"BACKEND": "treeherder.cache.MemcachedCache",
"LOCATION": TREEHERDER_MEMCACHED,
"TIMEOUT": 0,
# bumping this is effectively equivalent to restarting memcached
"VERSION": 1,
}
}
KEY_PREFIX = TREEHERDER_MEMCACHED_KEY_PREFIX
# celery broker setup
BROKER_URL = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(
RABBITMQ_USER,
RABBITMQ_PASSWORD,
RABBITMQ_HOST,
RABBITMQ_PORT,
RABBITMQ_VHOST
)
API_HOSTNAME = SITE_URL
ALLOWED_HOSTS = [SITE_URL]
|
Python
| 0
|
@@ -5558,16 +5558,61 @@
= True%0A%0A
+ALLOWED_HOSTS = %5B%22.treeherder.mozilla.org%22%5D%0A%0A
try:%0A
@@ -6519,31 +6519,4 @@
URL%0A
-ALLOWED_HOSTS = %5BSITE_URL%5D%0A
|
afbef65bd28f0058edf39579125e2ccb35a72aee
|
Update test_multivariate.py to Python 3.4
|
nb_twitter/test/test_multivariate.py
|
nb_twitter/test/test_multivariate.py
|
# -*- coding: utf-8 -*-
# test_multivariate.py
# nb_twitter/nb_twitter/bayes
#
# Created by Thomas Nelson <tn90ca@gmail.com>
# Preston Engstrom <pe12nh@brocku.ca>
# Created..........................2015-06-29
# Modified.........................2015-06-29
#
# This script was developed for use as part of the nb_twitter package
from nb_twitter.bayes import multivariate
train_class = ['c', 'j']
train_docs = [['c', 'chinese beijing chinese'],
['c', 'chinese chinese shanghai'],
['c', 'chinese macao'],
['j', 'tokyo japan chinese']]
test_docs = 'chinese chinese chinese tokyo japan'
classifier = multivariate.Multivariate(train_class, train_docs)
classifier.train()
results = classifier.run(test_docs)
print "C\t\t=", classifier.C
print "D\t\t=", classifier.D
print "N\t\t=", classifier.N
print "V\t\t=", classifier.V
print "Nc\t\t=", classifier.Nc
print "Prior\t=", classifier.prior
print "Prob\t=", classifier.prob
print
print(results)
|
Python
| 0.000009
|
@@ -257,18 +257,18 @@
2015-06-
-29
+30
%0A#%0A# Thi
@@ -761,17 +761,17 @@
)%0A%0Aprint
-
+(
%22C%5Ct%5Ct=%22
@@ -784,23 +784,24 @@
sifier.C
+)
%0Aprint
-
+(
%22D%5Ct%5Ct=%22
@@ -814,23 +814,24 @@
sifier.D
+)
%0Aprint
-
+(
%22N%5Ct%5Ct=%22
@@ -844,23 +844,24 @@
sifier.N
+)
%0Aprint
-
+(
%22V%5Ct%5Ct=%22
@@ -874,23 +874,24 @@
sifier.V
+)
%0Aprint
-
+(
%22Nc%5Ct%5Ct=
@@ -906,23 +906,24 @@
ifier.Nc
+)
%0Aprint
-
+(
%22Prior%5Ct
@@ -946,15 +946,16 @@
rior
+)
%0Aprint
-
+(
%22Pro
@@ -976,16 +976,17 @@
ier.prob
+)
%0Aprint%0Ap
@@ -998,8 +998,9 @@
results)
+%0A
|
45b0af75824c1f7715c464ae2dfc35ac8d7a9767
|
Add additional_tags parameter to upload and pass through client args to httplib2.
|
cloudshark/cloudshark.py
|
cloudshark/cloudshark.py
|
import httplib2
import io
import json
import os
import urllib
class CloudsharkError(Exception):
def __init__(self, msg, error_code=None):
self.msg = msg
self.error_code = error_code
def __str__(self):
return repr('%s: %s' % (self.error_code, self.msg))
class Cloudshark(object):
def __init__(self,url,token):
self.url = url
self.token = token
def get_info(self,id):
"""Get the info about a particular capture by id."""
url = '%s/api/v1/%s/info/%s' % (self.url,self.token,id)
http = httplib2.Http()
(response,content) = http.request(url,method='GET')
http_status = response.get('status')
if http_status != '200':
print(response)
raise CloudsharkError('Error retrieving: %s'%url,http_status)
return json.loads(content)
def search_by_file_name(self,file_name):
"""Search for a capture by file name."""
url = '%s/api/v1/%s/search?search[filename]=%s' % (self.url,self.token,urllib.quote(file_name))
http = httplib2.Http()
(response,content) = http.request(url,method='GET')
http_status = response.get('status')
if http_status != '200':
print(response)
raise CloudsharkError('Error retrieving: %s'%url,http_status)
return json.loads(content)
def upload(self,file_object,file_name=None):
"""Upload a capture file to Cloudshark."""
url = '%s/api/v1/%s/upload' % (self.url,self.token)
BOUNDARY = "LANDSHARKCLOUDSHARK"
headers = {}
headers['Content-Type'] = 'multipart/form-data; boundary=%s' % BOUNDARY
if file_name is None:
file_name = os.path.basename(file_object.name)
file_content = file_object.read()
body_lines = ['--' + BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="%s"' % file_name,
'Content-Type: application/octet-stream',
'',
file_content,
'--' + BOUNDARY + '--',
'']
b = io.BytesIO()
for body_line in body_lines:
if isinstance(body_line,unicode):
b.write(body_line.encode('utf-8'))
else:
b.write(body_line)
b.write(b'\r\n')
body = b.getvalue()
http = httplib2.Http()
(response,content) = http.request(url,method='POST',body=body,headers=headers)
http_status = response.get('status')
if http_status != '200':
print(response)
raise CloudsharkError('Error retrieving: %s'%url,http_status)
# content is a dict with "id" and "filename" entries.
return json.loads(content)
|
Python
| 0
|
@@ -343,19 +343,34 @@
rl,token
+,client_args=%7B%7D
):%0A
-
@@ -410,16 +410,55 @@
= token
+%0A self.client_args = client_args
%0A%0A de
@@ -624,32 +624,50 @@
= httplib2.Http(
+**self.client_args
)%0A (respo
@@ -1147,32 +1147,50 @@
= httplib2.Http(
+**self.client_args
)%0A (respo
@@ -1493,24 +1493,59 @@
le_name=None
+,additional_tags=None,comments=None
):%0A %22
@@ -2154,34 +2154,540 @@
_content
-,%0A
+%5D%0A if additional_tags is not None:%0A body_lines += %5B'--' + BOUNDARY,%0A 'Content-Disposition: form-data; name=%22additional_tags%22',%0A 'Content-Type: text/plain',%0A '',%0A ','.join(additional_tags)%5D%0A if comments is not None:%0A body_lines += %5B'--' + BOUNDARY,%0A 'Content-Disposition: form-data; name=%22comments%22',%0A 'Content-Type: text/plain',%0A '',%0A comments%5D%0A body_lines += %5B
'--' + B
@@ -2991,16 +2991,16 @@
value()%0A
-
@@ -3020,16 +3020,34 @@
b2.Http(
+**self.client_args
)%0A
|
1b668fa59624bc1f73f5fceebecbbadfc0038156
|
support arrow DictionaryType
|
packages/vaex-arrow/vaex_arrow/dataset.py
|
packages/vaex-arrow/vaex_arrow/dataset.py
|
__author__ = 'maartenbreddels'
import logging
import pyarrow as pa
import pyarrow.parquet as pq
import vaex.dataset
import vaex.file.other
from .convert import column_from_arrow_array
logger = logging.getLogger("vaex_arrow")
class DatasetArrow(vaex.dataset.DatasetLocal):
"""Implements storage using arrow"""
def __init__(self, filename=None, table=None, write=False):
super(DatasetArrow, self).__init__(name=filename, path=filename, column_names=[])
self._write = write
if table is None:
self._load()
else:
self._load_table(table)
def _load(self):
source = pa.memory_map(self.path)
reader = pa.ipc.open_stream(source)
table = pa.Table.from_batches([b for b in reader])
self._load_table(table)
def _load_table(self, table):
self._length_unfiltered = self._length_original = table.num_rows
self._index_end = self._length_original = table.num_rows
for col in table.columns:
name = col.name
# TODO: keep the arrow columns, and support and test chunks
arrow_array = col.data.chunks[0]
column = column_from_arrow_array(arrow_array)
self.columns[name] = column
self.column_names.append(name)
self._save_assign_expression(name, vaex.expression.Expression(self, name))
@classmethod
def can_open(cls, path, *args, **kwargs):
return path.rpartition('.')[2] == 'arrow'
@classmethod
def get_options(cls, path):
return []
@classmethod
def option_to_args(cls, option):
return []
class DatasetParquet(DatasetArrow):
def _load(self):
# might not be optimal, but it works, we can always see if we can
# do mmapping later on
table = pq.read_table(self.path)
self._load_table(table)
vaex.file.other.dataset_type_map["arrow"] = DatasetArrow
vaex.file.other.dataset_type_map["parquet"] = DatasetParquet
|
Python
| 0
|
@@ -865,17 +865,16 @@
ltered =
-
self._l
@@ -930,17 +930,16 @@
ex_end =
-
self._l
@@ -1149,16 +1149,330 @@
unks%5B0%5D%0A
+ if isinstance(arrow_array.type, pa.DictionaryType):%0A column = column_from_arrow_array(arrow_array.indices)%0A labels = column_from_arrow_array(arrow_array.dictionary).tolist()%0A self._categories%5Bname%5D = dict(labels=labels, N=len(labels))%0A else:%0A
@@ -1521,17 +1521,16 @@
_array)%0A
-%0A
|
52239a9b6cd017127d52c29ac0e2a0d3818e7d9e
|
Add new lab_members fieldset_website to fieldsets for cms_lab_members
|
cms_lab_members/admin.py
|
cms_lab_members/admin.py
|
from django.contrib import admin
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from lab_members.models import Scientist
from lab_members.admin import ScientistAdmin
class CMSScientistAdmin(PlaceholderAdminMixin, ScientistAdmin):
fieldsets = [
ScientistAdmin.fieldset_basic,
ScientistAdmin.fieldset_advanced,
]
admin.site.unregister(Scientist)
admin.site.register(Scientist, CMSScientistAdmin)
|
Python
| 0
|
@@ -296,16 +296,57 @@
_basic,%0A
+ ScientistAdmin.fieldset_website,%0A
|
dda3ebfcb9fff7f7304ee72c087dca9f8556fe6c
|
Update yadisk.py
|
cogs/utils/api/yadisk.py
|
cogs/utils/api/yadisk.py
|
import json
import requests
DEVICE_ID = '141f72b7-fd02-11e5-981a-00155d860f42'
DEVICE_NAME = 'DroiTaka'
CLIENT_ID = 'b12710fc26ee46ba82e34b97f08f2305'
CLIENT_SECRET = '4ff2284115644e04acc77c54526364d2'
class YaDisk(object):
def __init__(self, token):
self.session = requests.session()
self.session.headers.update({'Authentication': 'OAuth ' + str(token),})
def get_key_url():
format_url = "https://oauth.yandex.ru/authorize?response_type=code&client_id={}&device_id={}&device_name={}&force_confirm=yes"
return format_url.format(CLIENT_ID, DEVICE_ID, DEVICE_NAME)
def get_token(key):
res = requests.post('http://oauth.yandex.ru/token', data = {
'grant_type': 'authorization_code',
'code': key,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'device_id': DEVICE_ID,
'device_name': DEVICE_NAME,
})
print(res.text)
return res.json()['access_token']
def _get(self, url, *args, **kwargs):
return self.session.get(url, *args, **kwargs)
def _post(self, url, data, *args, **kwargs):
return self.session.post(url, {'data': json.dumps(data), }, *args, **kwargs)
def list_files(self, dir_path):
file_list = []
res = self._get("https://cloud-api.yandex.net:443/v1/disk/resources", params={"path": dir_path,})
for file in res.json()['_embedded']['items']:
if file['type'] == 'file':
file_list.append(file['name'])
return file_list
def direct_link(self, file_path):
response = self.session._get("https://cloud-api.yandex.net:443/v1/disk/resources/download",
params={"path": file_path,})
return response.json()['href']
|
Python
| 0.000001
|
@@ -623,16 +623,17 @@
st('http
+s
://oauth
|
2eb1535c3bb137216548bacaf9f7a22cd9e0e8a2
|
Fix incorrect double-quotes.
|
colour/plotting/graph.py
|
colour/plotting/graph.py
|
# -*- coding: utf-8 -*-
"""
Automatic Colour Conversion Graph Plotting
==========================================
Defines the automatic colour conversion graph plotting objects:
- :func:`colour.plotting.plot_automatic_colour_conversion_graph`
"""
from __future__ import division
from colour.graph import CONVERSION_GRAPH, CONVERSION_GRAPH_NODE_LABELS
from colour.utilities import is_networkx_installed
if is_networkx_installed(): # pragma: no cover
import networkx as nx
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['plot_automatic_colour_conversion_graph']
def plot_automatic_colour_conversion_graph(filename, prog='fdp', args=''):
"""
Plots *Colour* automatic colour conversion graph using
`Graphviz <https://www.graphviz.org/>`__ and
`pyraphviz <https://pygraphviz.github.io>`__.
Parameters
----------
filename : unicode
Filename to use to save the image.
prog : unicode, optional
{'neato', 'dot', 'twopi', 'circo', 'fdp', 'nop'},
*Graphviz* layout method.
args : unicode, optional
Additional arguments for *Graphviz*.
Returns
-------
AGraph
*Pyraphviz* graph.
Notes
-----
- This definition does not directly plot the *Colour* automatic colour
conversion graph but instead write it to an image.
Examples
--------
>>> import tempfile
>>> import colour
>>> from colour import read_image
>>> from colour.plotting import plot_image
>>> filename = '{0}.png'.format(tempfile.mkstemp()[-1])
>>> _ = plot_automatic_colour_conversion_graph(filename, 'dot')
... # doctest: +SKIP
>>> plot_image(read_image(filename)) # doctest: +SKIP
.. image:: ../_static/Plotting_Plot_Colour_Automatic_Conversion_Graph.png
:align: center
:alt: plot_automatic_colour_conversion_graph
"""
if is_networkx_installed(raise_exception=True): # pragma: no cover
agraph = nx.nx_agraph.to_agraph(CONVERSION_GRAPH)
for node in agraph.nodes():
node.attr.update(label=CONVERSION_GRAPH_NODE_LABELS[node.name])
agraph.node_attr.update(
style='filled',
shape='circle',
color='#2196F3FF',
fillcolor='#2196F370',
fontname='Helvetica',
fontcolor="#263238")
agraph.edge_attr.update(color='#26323870')
for node in ('CIE XYZ', 'RGB', 'Spectral Distribution'):
agraph.get_node(node.lower()).attr.update(
shape='doublecircle',
color='#673AB7FF',
fillcolor='#673AB770',
fontsize=30)
for node in ('ATD95', 'CAM16', 'CIECAM02', 'Hunt', 'LLAB',
'Nayatani95', 'RLAB'):
agraph.get_node(node.lower()).attr.update(
color='#00BCD4FF', fillcolor='#00BCD470')
agraph.draw(filename, prog=prog, args=args)
return agraph
|
Python
| 0.000178
|
@@ -2568,17 +2568,17 @@
lor=
-%22
+'
#263238
-%22
+'
)%0A
|
8b0cb0bdd3e5c5a31de2f6c8dce5ed041940a80e
|
fix bad indentation
|
skopt/tests/test_acquisition.py
|
skopt/tests/test_acquisition.py
|
from math import log
import numpy as np
import pytest
from scipy import optimize
from sklearn.multioutput import MultiOutputRegressor
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from skopt.acquisition import _gaussian_acquisition
from skopt.acquisition import gaussian_acquisition_1D
from skopt.acquisition import gaussian_ei
from skopt.acquisition import gaussian_lcb
from skopt.acquisition import gaussian_pi
from skopt.learning import GaussianProcessRegressor
from skopt.learning.gaussian_process.kernels import Matern
from skopt.learning.gaussian_process.kernels import WhiteKernel
from skopt.space import Space
from skopt.utils import cook_estimator
class ConstSurrogate:
def predict(self, X, return_std=True):
X = np.array(X)
return np.zeros(X.shape[0]), np.ones(X.shape[0])
# This is used to test that given constant acquisition values at
# different points, acquisition functions "EIps" and "PIps"
# prefer candidate points that take lesser time.
# The second estimator mimics the GP regressor that is fit on
# the log of the input.
class ConstantGPRSurrogate(object):
def __init__(self, space):
self.space = space
def fit(self, X, y):
"""
The first estimator returns a constant value.
The second estimator is a gaussian process regressor that
models the logarithm of the time.
"""
X = np.array(X)
y = np.array(y)
gpr = cook_estimator("GP", self.space, random_state=0)
gpr.fit(X, np.log(np.ravel(X)))
self.estimators_ = []
self.estimators_.append(ConstSurrogate())
self.estimators_.append(gpr)
return self
@pytest.mark.fast_test
def test_acquisition_ei_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
ei = gaussian_ei(X, ConstSurrogate(), -0.5, xi=0.)
assert_array_almost_equal(ei, [0.1977966] * 4)
@pytest.mark.fast_test
def test_acquisition_pi_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
pi = gaussian_pi(X, ConstSurrogate(), -0.5, xi=0.)
assert_array_almost_equal(pi, [0.308538] * 4)
@pytest.mark.fast_test
def test_acquisition_variance_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
var = gaussian_lcb(X, ConstSurrogate(), kappa='inf')
assert_array_almost_equal(var, [-1.0] * 4)
@pytest.mark.fast_test
def test_acquisition_lcb_correctness():
# check that it works with a vector as well
X = 10 * np.ones((4, 2))
lcb = gaussian_lcb(X, ConstSurrogate(), kappa=0.3)
assert_array_almost_equal(lcb, [-0.3] * 4)
@pytest.mark.fast_test
def test_acquisition_api():
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
y = rng.randn(10)
gpr = GaussianProcessRegressor()
gpr.fit(X, y)
for method in [gaussian_ei, gaussian_lcb, gaussian_pi]:
assert_array_equal(method(X, gpr).shape, 10)
assert_raises(ValueError, method, rng.rand(10), gpr)
def check_gradient_correctness(X_new, model, acq_func, y_opt):
analytic_grad = gaussian_acquisition_1D(
X_new, model, y_opt, acq_func)[1]
num_grad_func = lambda x: gaussian_acquisition_1D(
x, model, y_opt, acq_func=acq_func)[0]
num_grad = optimize.approx_fprime(X_new, num_grad_func, 1e-5)
assert_array_almost_equal(analytic_grad, num_grad, 3)
@pytest.mark.fast_test
def test_acquisition_gradient():
rng = np.random.RandomState(0)
X = rng.randn(20, 5)
y = rng.randn(20)
X_new = rng.randn(5)
mat = Matern()
wk = WhiteKernel()
gpr = GaussianProcessRegressor(kernel=mat + wk)
gpr.fit(X, y)
for acq_func in ["LCB", "PI", "EI"]:
check_gradient_correctness(X_new, gpr, acq_func, np.max(y))
@pytest.mark.fast_test
@pytest.mark.parametrize("acq_func", ["EIps", "PIps"])
def test_acquisition_per_second(acq_func):
X = np.reshape(np.linspace(4.0, 8.0, 10), (-1, 1))
y = np.vstack((np.ones(10), np.ravel(np.log(X)))).T
cgpr = ConstantGPRSurrogate(Space(((1.0, 9.0),)))
cgpr.fit(X, y)
X_pred = np.reshape(np.linspace(1.0, 11.0, 20), (-1, 1))
indices = np.arange(6)
vals = _gaussian_acquisition(X_pred, cgpr, y_opt=1.0, acq_func=acq_func)
for fast, slow in zip(indices[:-1], indices[1:]):
assert_greater(vals[slow], vals[fast])
acq_wo_time = _gaussian_acquisition(
X, cgpr.estimators_[0], y_opt=1.2, acq_func=acq_func[:2])
acq_with_time = _gaussian_acquisition(
X, cgpr, y_opt=1.2, acq_func=acq_func)
assert_array_almost_equal(acq_wo_time / acq_with_time, np.ravel(X), 2)
def test_gaussian_acquisition_check_inputs():
model = ConstantGPRSurrogate(Space(((1.0, 9.0),)))
with pytest.raises(ValueError) as err:
vals = _gaussian_acquisition(np.arange(1, 5), model)
assert("it must be 2-dimensional" in exc.value.args[0])
@pytest.mark.fast_test
@pytest.mark.parametrize("acq_func", ["EIps", "PIps"])
def test_acquisition_per_second_gradient(acq_func):
rng = np.random.RandomState(0)
X = rng.randn(20, 10)
# Make the second component large, so that mean_grad and std_grad
# do not become zero.
y = np.vstack((X[:, 0], np.abs(X[:, 0])**3)).T
for X_new in [rng.randn(10), rng.randn(10)]:
gpr = cook_estimator("GP", Space(((-5.0, 5.0),)), random_state=0)
mor = MultiOutputRegressor(gpr)
mor.fit(X, y)
check_gradient_correctness(X_new, mor, acq_func, 1.5)
|
Python
| 0.00089
|
@@ -4995,20 +4995,16 @@
model)%0A
-
asse
@@ -5041,10 +5041,10 @@
in e
-xc
+rr
.val
|
14043a783e2ebd6c4a27a38f08ca75e6e31dd5d8
|
Add show admin panel
|
cinemair/shows/admin.py
|
cinemair/shows/admin.py
|
from django.contrib import admin
from . import models
class ShowsInline(admin.TabularInline):
model = models.Show
extra = 0
|
Python
| 0
|
@@ -128,8 +128,407 @@
tra = 0%0A
+%0A%0A@admin.register(models.Show)%0Aclass Show(admin.ModelAdmin):%0A fieldsets = (%0A (None, %7B%22fields%22: (%22cinema%22, %22movie%22, %22datetime%22)%7D),%0A )%0A list_display = (%22id%22, %22cinema%22, %22movie%22, %22datetime%22)%0A #list_editable = (,)%0A list_filter = (%22cinema%22,)%0A search_fields = (%22id%22, %22cinema__name%22, %22movie__title%22, %22datetime%22)%0A date_hierarchy = %22datetime%22%0A ordering = (%22cinema%22, %22datetime%22)%0A
|
059254eed4bcc32bbb2ded134f70701b535c1b15
|
Fix stray merge content
|
data/admin.py
|
data/admin.py
|
from django.contrib import admin
from models import (SchoolData, HeadTeacher, InboundSMS, AcademicAchievementCode,
TeacherPerformanceData, LearnerPerformanceData)
<<<<<<< HEAD
from actions import export_select_fields_csv_action
from rts.utils import DistrictIdFilter
=======
from actions import export_as_csv_action
from rts.utils import DistrictIdFilter, ManagePermissions
>>>>>>> develop
class SchoolDataAdmin(ManagePermissions):
list_display = ["emis", "name", "classrooms", "teachers",
"teachers_g1", "teachers_g2", "boys_g2", "girls_g2", "created_by", "created_at"]
actions = [export_select_fields_csv_action("Export selected objects as CSV file",
fields= [
("emis", "EMIS"),
("created_at", "Created At"),
("created_by", "Created By"),
("name", "Name"),
("boys_g2", "Grade 2 Boys"),
("girls_g2", "Grade 2 Girls"),
("classrooms", "Classrooms"),
("teachers", "Teachers"),
("teachers_g2", "Grade 2 Teachers"),
("teachers_g1", "Grade 1 Teachers"),
],
header=True
)]
class HeadTeacherAdmin(ManagePermissions):
list_display = ["emis", "first_name", "last_name", "msisdn", "gender", "date_of_birth", "is_zonal_head", "zonal_head_name","created_at"]
actions = [export_select_fields_csv_action("Export selected objects as CSV file",
fields= [
("created_at", "Created At"),
("first_name", "First Name"),
("last_name", "Last Name"),
("msisdn", "MSISDN"),
("gender", "Gender"),
("date_of_birth", "Date of Birth"),
("is_zonal_head", "Is Zonal Head"),
("zonal_head_name", "Zonal Head Name"),
("emis", "EMIS"),
],
header=True
)]
def queryset(self, request):
"""
Limits queries for pages that belong to district admin
"""
qs = super(HeadTeacherAdmin, self).queryset(request)
return DistrictIdFilter(parent=self, request=request, qs=qs).queryset()
class TeacherPerformanceDataAdmin(ManagePermissions):
list_display = ["emis", "gender", "age", "years_experience", "g2_pupils_present", "g2_pupils_registered",
"classroom_environment_score", "t_l_materials", "pupils_materials_score",
"pupils_books_number", "reading_lesson", "pupil_engagement_score", "attitudes_and_beliefs",
"training_subtotal", "ts_number", "reading_assessment", "reading_total", "academic_level",
"created_by", "created_at"]
actions = [export_select_fields_csv_action("Export selected objects as CSV file",
fields= [
("emis", "EMIS"),
("created_at", "Created At"),
("created_by", "Created By"),
("gender", "Gender"),
("age", "Age"),
("ts_number", "TS Number"),
("academic_level", "Academic Level"),
("years_experience", "Years of Experience"),
("g2_pupils_registered", "Grade 2 Pupils Registered"),
("g2_pupils_present", "Grade 2 Pupils Present"),
("classroom_environment_score", "Classroom Environment Score"),
("pupils_books_number", "Pupils Book Number"),
("pupils_materials_score", "Pupils Materials Score"),
("t_l_materials", "T L Materials"),
("reading_lesson", "Reading lesson"),
("pupil_engagement_score", "Pupil Engagement Score"),
("attitudes_and_beliefs", "Attitudes And Beliefs"),
("training_subtotal", "Training Subtotal"),
("reading_assessment", "Reading Assessment"),
("reading_total", "Reading Total"),
],
header=True
)]
def queryset(self, request):
"""
Limits queries for pages that belong to district admin
"""
qs = super(TeacherPerformanceDataAdmin, self).queryset(request)
return DistrictIdFilter(parent=self, request=request, qs=qs).queryset()
class LearnerPerformanceDataAdmin(ManagePermissions):
list_display = ["emis", "gender", "total_number_pupils", "phonetic_awareness", "vocabulary",
"reading_comprehension", "writing_diction", "below_minimum_results", "minimum_results",
"desirable_results", "outstanding_results", "created_by", "created_at"]
actions = [export_select_fields_csv_action("Export selected objects as CSV file",
fields= [
("emis", "EMIS"),
("created_by", "Created By"),
("created_at", "Created At"),
("gender", "Gender"),
("total_number_pupils", "Total Number of Pupils"),
("phonetic_awareness", "Phonetic Awareness"),
("writing_diction", "Writing Diction"),
("reading_comprehension", "Reading Comprehension"),
("vocabulary", "Vocabulary"),
("outstanding_results", "Outstanding Results"),
("desirable_results", "Desirable Results"),
("minimum_results", "Minimum Results"),
("below_minimum_results", "Below Minimum Results"),
],
header=True
)]
def queryset(self, request):
"""
Limits queries for pages that belong to district admin
"""
qs = super(LearnerPerformanceDataAdmin, self).queryset(request)
return DistrictIdFilter(parent=self, request=request, qs=qs).queryset()
class InboundSMSAdmin(ManagePermissions):
list_display = ["message", "created_by", "created_at"]
actions = [export_select_fields_csv_action("Export selected objects as CSV file",
fields= [
("message", "Message"),
("created_by", "Created By"),
("created_at", "Created At"),
],
header=True
)]
def queryset(self, request):
"""
Limits queries for pages that belong to district admin
"""
qs = super(InboundSMSAdmin, self).queryset(request)
return DistrictIdFilter(parent=self, request=request, qs=qs).queryset()
class AcademicAchievementCodeAdmin(admin.ModelAdmin):
list_display = ["id", "achievement"]
actions = None
def has_add_permission(self, request):
return False
def __init__(self, *args, **kwargs):
super(AcademicAchievementCodeAdmin, self).__init__(*args, **kwargs)
self.list_display_links = (None, )
admin.site.register(SchoolData, SchoolDataAdmin)
admin.site.register(HeadTeacher, HeadTeacherAdmin)
admin.site.register(TeacherPerformanceData, TeacherPerformanceDataAdmin)
admin.site.register(LearnerPerformanceData, LearnerPerformanceDataAdmin)
admin.site.register(InboundSMS, InboundSMSAdmin)
admin.site.register(AcademicAchievementCode, AcademicAchievementCodeAdmin)
|
Python
| 0.000003
|
@@ -180,21 +180,8 @@
ta)%0A
-%3C%3C%3C%3C%3C%3C%3C HEAD%0A
from
@@ -232,96 +232,8 @@
ion%0A
-from rts.utils import DistrictIdFilter%0A=======%0Afrom actions import export_as_csv_action%0A
from
@@ -290,24 +290,8 @@
ons%0A
-%3E%3E%3E%3E%3E%3E%3E develop%0A
%0A%0A%0Ac
|
9e1b3893a676f0fff7d601245fd06ec5df7fb61f
|
bump version
|
circleparse/__init__.py
|
circleparse/__init__.py
|
from circleparse.replay import parse_replay_file, parse_replay
__version__ = "6.0.0"
|
Python
| 0
|
@@ -74,13 +74,13 @@
__ = %226.
-0
+1
.0%22%0A
|
0251d41a46165f76b8e76da716bbc280723ce767
|
Make the circuits.web.loggers.Logger understand and respect X-Forwarded-For request headers when logging the remote host
|
circuits/web/loggers.py
|
circuits/web/loggers.py
|
# Module: loggers
# Date: 6th November 2008
# Author: James Mills, prologic at shortcircuit dot net dot au
"""Logger Component
This module implements Logger Components.
"""
import os
import sys
import rfc822
import datetime
from circuits.core import handler, BaseComponent
def formattime():
now = datetime.datetime.now()
month = rfc822._monthnames[now.month - 1].capitalize()
return ("[%02d/%s/%04d:%02d:%02d:%02d]" %
(now.day, month, now.year, now.hour, now.minute, now.second))
class Logger(BaseComponent):
channel = "web"
format = "%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s \"%(f)s\" \"%(a)s\""
def __init__(self, file=None, logger=None, **kwargs):
super(Logger, self).__init__(**kwargs)
if type(file) is str:
self.file = open(os.path.abspath(os.path.expanduser(file)), "a")
elif type(file) is file or hasattr(file, "write"):
self.file = file
else:
self.file = sys.stdout
self.logger = logger
@handler("response")
def response(self, response):
self.log(response)
def log(self, response):
request = response.request
remote = request.remote
outheaders = response.headers
inheaders = request.headers
protocol = "HTTP/%d.%d" % request.protocol
atoms = {"h": remote.name or remote.ip,
"l": "-",
"u": getattr(request, "login", None) or "-",
"t": formattime(),
"r": "%s %s %s" % (request.method, request.path, protocol),
"s": str(response.code),
"b": outheaders.get("Content-Length", "") or "-",
"f": inheaders.get("Referer", ""),
"a": inheaders.get("User-Agent", ""),
}
for k, v in atoms.items():
if isinstance(v, unicode):
v = v.encode("utf8")
elif not isinstance(v, str):
v = str(v)
# Fortunately, repr(str) escapes unprintable chars, \n, \t, etc
# and backslash for us. All we have to do is strip the quotes.
v = repr(v)[1:-1]
# Escape double-quote.
atoms[k] = v.replace("\"", "\\\"")
if self.logger is not None:
self.logger.info(self.format % atoms)
else:
self.file.write(self.format % atoms)
self.file.write("\n")
self.file.flush()
|
Python
| 0
|
@@ -1355,46 +1355,176 @@
-atoms = %7B%22h%22: remote.name or remote.ip
+if %22X-Forwarded-For%22 in inheaders:%0A host = inheaders%5B%22X-Forwarded-For%22%5D%0A else:%0A host = remote.name or remote.ip%0A%0A atoms = %7B%22h%22: host
,%0A
|
c1beb674852b7362101f066c2f357a4276c83528
|
Add python3 compatibility to smart_cache
|
cjktools/smart_cache.py
|
cjktools/smart_cache.py
|
# -*- coding: utf-8 -*-
#
# smart_cache.py
# cjktools
#
"""
This module implements a smart caching function, with dependencies.
"""
import common
import cPickle as pickle
import types
import os
from os import path
def disk_proxy_direct(method, cache_file, dependencies=[]):
"""
Creates a proxy for an expensive method which is cached in a single
file.
@param method: The method whose return values to cache.
@param cache_file: Where to cache the return values.
@param dependencies: Any files which are dependencies for the cache.
@return: A callable object that looks just like method.
"""
def proxy_method(*args, **params):
cached_val = try_cache(cache_file, args, params, dependencies)
if cached_val is None:
if 'CACHE_DEBUG' in os.environ:
print '[cache miss: %s]' % os.path.basename(cache_file)
# cache miss, expensive fetch and repopulate cache
result = apply(method, args, params)
if 'CACHE_DEBUG' in os.environ:
print '[storing: %s]' % os.path.basename(cache_file)
store_cache_object(result, cache_file, args, params)
return result
else:
if 'CACHE_DEBUG' in os.environ:
print '[cache hit: %s]' % os.path.basename(cache_file)
# cache hit
return cached_val
proxy_method.__doc__ = method.__doc__
return proxy_method
def disk_proxy(cache_file, dependencies):
"""
Decorator version of disk_proxy_direct().
"""
return lambda method: disk_proxy_direct(method, cache_file, dependencies)
def memory_proxy(method):
"""
Creates an in-memory proxy for the given method. This method is
suitable for use wrapping expensive methods with small return values.
This proxy will demonstrate unbounded growth if you keep using the
method on new input. The references kept here prevent the results and
arguments from being garbage-collected. If that's not what you want,
consider the weakref.proxy() method in the standard python library.
"""
method_dict = {}
def proxy_method(*args, **params):
key = (args, tuple(params.items()))
if key in method_dict:
# cache hit
return method_dict[key]
else:
# cache miss, expensive call and insert
result = apply(method, args, params)
method_dict[key] = result
return result
proxy_method.__doc__ = method.__doc__
return proxy_method
def try_cache(filename, method_args=[], method_params={}, dependencies=[]):
"""
Determines whether the cached object is still fresh (if one exists),
and if so returns that object. Otherwise returns None.
@param filename: The filename to look for a cached entry in.
@param method_args: The arguments passed to the method we're trying to
cache.
@param method_params: As for method_args, but dictionary arguments.
@return: None or a stored value
"""
if needs_update(filename, dependencies):
return None
try:
i_stream = common.sopen(filename, 'r', encoding=None)
stored_args = pickle.load(i_stream)
stored_params = pickle.load(i_stream)
if stored_args == method_args and stored_params == method_params:
obj = pickle.load(i_stream)
i_stream.close()
return obj
else:
i_stream.close()
return None
except:
# could get several errors here:
# - badly pickled file
# - changed local modules when loading pickled value
# - filesystem permissions or problems
return None
def store_cache_object(obj, filename, method_args=[], method_params={}):
"""
Creates a smart cache object in the file.
@param obj: The object to cache.
@param filename: The location of the cache file.
@param method_args: Any arguments which were passed to the cached
method.
@param method_params: Any keyword parameters passed to the cached
method.
"""
o_stream = common.sopen(filename, 'w', encoding=None)
pickle.dump(method_args, o_stream, pickle.HIGHEST_PROTOCOL)
pickle.dump(method_params, o_stream, pickle.HIGHEST_PROTOCOL)
pickle.dump(obj, o_stream, pickle.HIGHEST_PROTOCOL)
o_stream.close()
return
def needs_update(target, dependencies):
"""
Determine if the target is older than any of its dependencies.
@param target: A filename for the target.
@param dependencies: A sequence of dependency filenames.
"""
if not path.exists(target):
return True
target_time = path.getmtime(target)
for dependency in dependencies:
if type(dependency) in (str, unicode):
filenames = [dependency]
elif isinstance(dependency, types.ModuleType):
filenames = _get_module_dependencies(dependency)
else:
raise TypeError("Unknown dependency type %s" % (type(dependency)))
for filename in filenames:
if path.getmtime(filename) > target_time:
return True
else:
return False
def _get_module_dependencies(module):
"""
Determines the file dependencies of a module. Adds one level of module
includes.
"""
dependency_set = set()
dependency_set.add(module.__file__)
for item in module.__dict__.values():
if isinstance(item, types.ModuleType) and hasattr(item, '__file__'):
dependency_set.add(item.__file__)
return dependency_set
|
Python
| 0.000001
|
@@ -147,33 +147,8 @@
mon%0A
-import cPickle as pickle%0A
impo
@@ -187,16 +187,86 @@
t path%0A%0A
+from six import string_types%0Afrom six.moves import cPickle as pickle%0A%0A
%0Adef dis
@@ -866,33 +866,33 @@
print
-
+(
'%5Bcache miss: %25s
@@ -916,32 +916,33 @@
name(cache_file)
+)
%0A # c
@@ -1010,36 +1010,32 @@
esult =
-apply(
method
-,
+(*
args,
+**
params)%0A
@@ -1099,17 +1099,17 @@
print
-
+(
'%5Bstorin
@@ -1138,32 +1138,33 @@
name(cache_file)
+)
%0A sto
@@ -1318,17 +1318,17 @@
print
-
+(
'%5Bcache
@@ -1367,16 +1367,17 @@
he_file)
+)
%0A
@@ -2439,28 +2439,24 @@
t =
-apply(
method
-,
+(*
args,
+**
para
@@ -4805,19 +4805,25 @@
if
-typ
+isinstanc
e(depend
@@ -4830,26 +4830,22 @@
ency
-) in (str, unicode
+, string_types
):%0A
@@ -4880,17 +4880,16 @@
ndency%5D%0A
-%0A
@@ -4996,17 +4996,16 @@
ndency)%0A
-%0A
|
3026d78dc6e2a0f6f391819370f2369df94e77eb
|
Move Data Portal / Other to bottom of contact select
|
ckanext/nhm/settings.py
|
ckanext/nhm/settings.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from collections import OrderedDict
# the order here matters as the default option should always be first in the dict so that it is
# automatically selected in combo boxes that use this list as a source for options
COLLECTION_CONTACTS = OrderedDict([
('Data Portal / Other', 'data@nhm.ac.uk'),
('Algae, Fungi & Plants', 'm.carine@nhm.ac.uk'),
('Economic & Environmental Earth Sciences', 'g.miller@nhm.ac.uk'),
('Fossil Invertebrates & Plants', 'z.hughes@nhm.ac.uk'),
('Fossil Vertebrates & Anthropology', 'm.richter@nhm.ac.uk'),
('Insects', 'g.broad@nhm.ac.uk'),
('Invertebrates', 'm.lowe@nhm.ac.uk'),
('Library & Archives', 'library@nhm.ac.uk'),
('Mineral & Planetary Sciences', 'm.rumsey@nhm.ac.uk'),
('Vertebrates', 'simon.loader@nhm.ac.uk'),
])
|
Python
| 0
|
@@ -380,55 +380,8 @@
t(%5B%0A
- ('Data Portal / Other', 'data@nhm.ac.uk'),%0A
@@ -864,11 +864,58 @@
c.uk'),%0A
+ ('Data Portal / Other', 'data@nhm.ac.uk'),%0A
%5D)%0A
|
2105143c63292ec225258b3ca129156d858cf972
|
Use OrderParameterDistribution objects in wetting.
|
coex/wetting.py
|
coex/wetting.py
|
"""Find the wetting properties of a direct or expanded ensemble
grand canonical simulation.
"""
import numpy as np
def get_cos_theta(s, d):
"""Calculate the cosine of the contact angle.
Args:
s: A float (or numpy array): the spreading coefficient.
d: A float (or numpy array): the drying coefficient.
Returns:
The cosine of the contact angle as a float or numpy array.
"""
return -(s - d) / (s + d)
def get_drying_coefficient(lnpi):
"""Calculate the drying coefficient.
Args:
lnpi: The logarithm of the probability distribution.
Returns:
The dimensionless drying coefficient (beta*d*A).
See also:
get_spreading_coefficient()
"""
potential = -lnpi
valley = np.amin(potential)
split = int(0.5 * len(potential))
plateau = np.mean(potential[:split])
return valley - plateau
def get_expanded_ensemble_coefficients(valley, plateau, index, reference):
"""Calculate the change in spreading/drying coefficient for a pair of
simulations.
Args:
valley: The logarithm of the probability distribution of the
valley region.
plateau: The logarithm of the probability distribution of the
plateau region.
index: The reference subensemble number.
reference: The reference spreading/drying coefficient.
Returns:
A numpy array with the spreading/drying coefficient of each
subensemble.
"""
return reference - (valley - valley[index]) + (plateau - plateau[index])
def get_spreading_coefficient(lnpi):
"""Calculate the spreading coefficient.
Args:
potential: The logarithm of the probability distribution.
Returns:
The dimensionless spreading coefficient (beta*s*A).
See Also:
get_drying_coefficient()
"""
potential = -lnpi
valley = np.amin(potential)
split = int(0.5 * len(potential))
plateau = np.mean(potential[split:])
return valley - plateau
def get_tension(s, d):
"""Calculate the interfacial tension.
Args:
s: A float (or numpy array): the spreading coefficient.
d: A float (or numpy array): the drying coefficient.
Returns:
The interfacial tension in the appropriate units.
"""
return -0.5 * (s + d)
|
Python
| 0
|
@@ -462,36 +462,44 @@
ing_coefficient(
-lnpi
+distribution
):%0A %22%22%22Calcul
@@ -549,55 +549,96 @@
-lnpi: The logarithm of the probability distribu
+distribution: An OrderParameterDistribution from a direct (GC)%0A drying simula
tion
@@ -778,36 +778,62 @@
potential = -
-lnpi
+distribution.log_probabilities
%0A valley = np
@@ -1156,159 +1156,126 @@
ey:
-The logarithm of the probability distribution of the%0A valley region.%0A plateau: The logarithm of the probability distribution of the%0A
+An OrderParameterDistribution from the valley region.%0A plateau: An OrderParameterDistribution from the plateau%0A
@@ -1281,23 +1281,16 @@
-plateau
region.
@@ -1622,20 +1622,28 @@
ficient(
-lnpi
+distribution
):%0A %22
@@ -1704,60 +1704,99 @@
-potential: The logarithm of the probability distribu
+distribution: An OrderParameterDistribution from a direct (GC)%0A spreading simula
tion
@@ -1948,12 +1948,38 @@
= -
-lnpi
+distribution.log_probabilities
%0A
|
a962e631b0fc997a6a5569244463c3f96da8b671
|
add extra fwhm2sigma test
|
lib/neuroimaging/fmri/tests/test_utils.py
|
lib/neuroimaging/fmri/tests/test_utils.py
|
import unittest
import numpy as N
import scipy
from neuroimaging.fmri.utils import CutPoly, WaveFunction, sigma2fwhm, fwhm2sigma
class utilTest(unittest.TestCase):
def test_CutPoly(self):
f = CutPoly(2.0)
t = N.arange(0, 10.0, 0.1)
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x for x in t])
f = CutPoly(2.0, (5, 7))
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x*(x >= 5 and x < 7) for x in t])
f = CutPoly(2.0, (None, 7))
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x*(x < 7) for x in t])
f = CutPoly(2.0, (5, None))
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x*(x >= 5) for x in t])
def test_WaveFunction(self):
start = 5.0
duration = 2.0
height = 3.0
f = WaveFunction(5, 2, 3)
t = N.arange(0, 10.0, 0.1)
y = f(t)
scipy.testing.assert_almost_equal(y, [height*(x >= start and x < start + duration) for x in t])
def test_sigma_fwhm(self):
"""
ensure that fwhm2sigma and sigma2fwhm are inverses of each other
"""
fwhm = N.arange(1.0, 5.0, 0.1)
scipy.testing.assert_almost_equal(sigma2fwhm(fwhm2sigma(fwhm)), fwhm)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -1173,32 +1173,72 @@
(1.0, 5.0, 0.1)%0A
+ sigma = N.arange(1.0, 5.0, 0.1)%0A
scipy.te
@@ -1299,16 +1299,96 @@
, fwhm)%0A
+ scipy.testing.assert_almost_equal(fwhm2sigma(sigma2fwhm(sigma)), sigma)%0A
|
43c46d69580ebdfaa663653fabd3222bdf51fa76
|
Remove pdb from pepper_rpg.py
|
pepper_rpg.py
|
pepper_rpg.py
|
class Color:
""" Adds basic colors for print statements """
RED = '\033[91m'
PURPLE = '\033[95m'
YELLOW = '\033[93m'
END = '\033[0m'
def showInstructions():
""" print a main menu and the commands """
print(Color.PURPLE + "Welcome to Pepper RPG v 1.0!" + Color.END)
print("========")
print("In this game you are Pepper, a small Russian Blue cat"
+ " who is craving a jelly filled donut. \nYour mission "
+ "if you choose to accept it is to acquire "
+ "a donut by any means necessary.")
print("========")
showHelp()
def showDescription(currentRoom):
""" prints the description of the current room """
print(Color.PURPLE + rooms[currentRoom]["description"] + '\n' + Color.END)
def showHelp():
""" prints list of commands """
print("Commands:")
print("'help'")
print("'go [direction]'")
print("'(l)ook'")
print("'quit'")
def showStatus(currentRoom):
""" print the player's current status """
print("----------------------------")
print("You are in the {}".format(rooms[currentRoom]["name"]))
print("-----------------------------")
def movePlayer(direction, currentRoom):
""" checks if a direction is allowed, if so move player """
if direction in rooms[currentRoom]:
room = rooms[currentRoom][direction]
else:
room = currentRoom
print("You can't go that way! \n")
return room
def parseMove(move, current_room, directions):
""" takes in a player move and does appropriate actions """
move_results = {
'has_player_moved': False,
'current_room': current_room,
'used_look': False,
}
# Handle go command
if move[0] == "go":
move_results['current_room'] = movePlayer(move[1], current_room)
move_results['has_player_moved'] = True
elif str(move[0]) in directions:
move_results['current_room'] = movePlayer(move[0], current_room)
move_results['has_player_moved'] = True
elif move[0] == "help":
showHelp()
elif move[0] == "look" or move[0] == "l":
showDescription(current_room)
move_results['used_look'] = True
elif move[0] == "quit" or move[0] == "exit":
print(Color.RED + "Doh, you didn't win this time. Thanks "
+ "for playing Pepper RPG, have a nice day!" + Color.END)
exit(0)
else:
print("Not a valid command")
return move_results
# Dictionary that links rooms to other room positions
rooms = {
1: {
"description": "The bedroom of the house has two windows with"
+ " blue curtains and a walk-in closet. There is an exit south.",
"name": "Bedroom",
"south": 2,
},
2: {
"description": "The living room has a vintage red couch and hardwood "
+ "floor. There are exits north, north-west, south, and west.",
"name": "Living room",
"north": 1,
"north-west": 5,
"south": 3,
"west": 4,
},
3: {
"description": "The bonus room has two desks, a cat tree, and several"
+ " musical instruments. There is an exit north.",
"name": "Bonus room",
"north": 2,
},
4: {
"description": "The bathroom has a sink, toliet, and a white bathtub"
+ " with a teal shower curtain. There is an exit east.",
"name": "Bathroom",
"east": 2,
},
5: {
"description": "The kitchen has a black and white checkered floor with a"
+ "wooden table and two chairs. There is an exit south.",
"name": "Kitchen",
"south": 2,
}
}
showInstructions()
def main():
# Game Variables
current_room = 1
game_status = "ongoing"
used_look = False
directions = ["north", "north-east", "north-west", "south", "east", "west"]
# Game loop
while game_status == "ongoing":
if not used_look:
showStatus(current_room)
used_look = False
# Get player's next 'move'
move = input(">").lower().split()
# Handle player's move
player_move = parseMove(move, current_room, directions)
# On player move command update current room
# import pdb; pdb.set_trace()
if player_move['has_player_moved']:
current_room = player_move['current_room']
if player_move['used_look']:
used_look = True
if __name__ == "__main__":
main()
|
Python
| 0.000009
|
@@ -4219,46 +4219,8 @@
oom%0A
- # import pdb; pdb.set_trace()%0A
|
108763ace5f250922387aacffab4a668155cfe67
|
deploy script changes
|
deploy/fabfile.py
|
deploy/fabfile.py
|
# -*- coding: utf-8 -*-
# http://docs.fabfile.org/en/1.5/tutorial.html
from __future__ import with_statement
from fabric.api import *
from contextlib import contextmanager as _contextmanager
@_contextmanager
def virtualenv():
with prefix(env.virtualenv_activate):
yield
env.hosts = ['176.58.125.166']
env.user = 'rootio'
env.project_root = '/home/rootio/public_python/rootio_web'
env.virtualenv_activate = 'source .venv/bin/activate'
env.forward_agent = True
def git_update():
stash_str = run("git stash")
run("git pull origin master")
if stash_str.strip() != 'No local changes to save':
run("git stash pop")
def restart_apache():
sudo("/etc/init.d/apache2 graceful")
def restart_cache():
sudo("/etc/init.d/memcached restart", pty=False)
def touch_wsgi():
# Touching the deploy.wsgi file will cause apache's mod_wsgi to
# reload all python modules having to restart apache.
with cd(env.project_root):
run("touch deploy/wsgi_handler.py")
def update(full=False):
with cd(env.project_root):
git_update()
with virtualenv():
run("pip install -r requirements.txt")
#todo: alembic update
#todo: static files
touch_wsgi()
restart_cache()
#restart_apache()
def deploy():
update()
def initdb():
local("python manage.py initdb")
def reset():
"""
Reset local debug env.
"""
local("rm -rf /tmp/instance")
local("mkdir /tmp/instance")
def runserver():
"""
Run local server, for debugging only.
Need to move up one directory, from deploy to see manage.py
"""
with lcd('..'):
reset()
initdb()
with virtualenv():
local("python manage.py run")
|
Python
| 0.000001
|
@@ -422,17 +422,16 @@
'source
-.
venv/bin
@@ -985,23 +985,23 @@
loy/
-wsgi_handler.py
+rootio_web.wsgi
%22)%0A%0A
@@ -1171,29 +1171,42 @@
-#todo: alembic update
+run(%22python manage.py migrate up%22)
%0A
@@ -1255,16 +1255,17 @@
i()%0A
+#
restart_
|
34fa7433ea6f04089a420e0392605147669801d1
|
Revert "added more crappy codes"
|
dummy.py
|
dummy.py
|
import os
def foo():
"""
This is crappy function. should be removed using git checkout
"""
if True == True:
return True
else:
return False
def main():
pass
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -93,61 +93,19 @@
%22%22%0A%09
-if True == True:%0A%09%09return True%0A%09else:%0A%09%09return False%0A
+return None
%0A%0Ade
|
4c25ae60bcf65d206e9bb3ee7467a2106021a490
|
fix usage of version with svn import (#71)
|
vcstool/clients/svn.py
|
vcstool/clients/svn.py
|
import os
from xml.etree.ElementTree import fromstring
from .vcs_base import VcsClientBase, which
class SvnClient(VcsClientBase):
type = 'svn'
_executable = None
@staticmethod
def is_repository(path):
return os.path.isdir(os.path.join(path, '.svn'))
def __init__(self, path):
super(SvnClient, self).__init__(path)
def branch(self, command):
if command.all:
return self._not_applicable(
command,
message='at least with the option to list all branches')
self._check_executable()
cmd_info = [SvnClient._executable, 'info', '--xml']
result_info = self._run_command(cmd_info)
if result_info['returncode']:
result_info['output'] = \
'Could not determine url: ' + result_info['output']
return result_info
info = result_info['output']
try:
root = fromstring(info)
entry = root.find('entry')
url = entry.findtext('url')
repository = entry.find('repository')
root_url = repository.findtext('root')
except Exception as e:
return {
'cmd': '',
'cwd': self.path,
'output': 'Could not determine url from xml: %s' % e,
'returncode': 1
}
if not url.startswith(root_url):
return {
'cmd': '',
'cwd': self.path,
'output':
"Could not determine url suffix. The root url '%s' is not "
"a prefix of the url '%s'" % (root_url, url),
'returncode': 1
}
return {
'cmd': ' '.join(cmd_info),
'cwd': self.path,
'output': url[len(root_url):],
'returncode': 0,
}
def custom(self, command):
self._check_executable()
cmd = [SvnClient._executable] + command.args
return self._run_command(cmd)
def diff(self, command):
self._check_executable()
cmd = [SvnClient._executable, 'diff']
if command.context:
cmd += ['--unified=%d' % command.context]
return self._run_command(cmd)
def export(self, command):
self._check_executable()
cmd_info = [SvnClient._executable, 'info', '--xml']
result_info = self._run_command(cmd_info)
if result_info['returncode']:
result_info['output'] = \
'Could not determine url: ' + result_info['output']
return result_info
info = result_info['output']
try:
root = fromstring(info)
entry = root.find('entry')
url = entry.findtext('url')
revision = entry.get('revision')
except Exception as e:
return {
'cmd': '',
'cwd': self.path,
'output': 'Could not determine url from xml: %s' % e,
'returncode': 1
}
export_data = {'url': url}
if command.exact:
export_data['version'] = revision
return {
'cmd': ' '.join(cmd_info),
'cwd': self.path,
'output': url,
'returncode': 0,
'export_data': export_data
}
def import_(self, command):
if not command.url:
return {
'cmd': '',
'cwd': self.path,
'output': "Repository data lacks the 'url' value",
'returncode': 1
}
not_exist = self._create_path()
if not_exist:
return not_exist
self._check_executable()
url = command.url
if command.version:
url += '@%d' % command.version
cmd_checkout = [
SvnClient._executable, '--non-interactive', 'checkout', url, '.']
result_checkout = self._run_command(cmd_checkout, retry=command.retry)
if result_checkout['returncode']:
result_checkout['output'] = \
"Could not checkout repository '%s': %s" % \
(command.url, result_checkout['output'])
return result_checkout
return {
'cmd': ' '.join(cmd_checkout),
'cwd': self.path,
'output': result_checkout['output'],
'returncode': 0
}
def log(self, command):
if command.limit_tag:
return {
'cmd': '',
'cwd': self.path,
'output': 'SvnClient can not determine log since tag',
'returncode': NotImplemented
}
if command.limit_untagged:
return {
'cmd': '',
'cwd': self.path,
'output': 'SvnClient can not determine latest tag',
'returncode': NotImplemented
}
self._check_executable()
cmd = [SvnClient._executable, 'log']
if command.limit != 0:
cmd += ['--limit', '%d' % command.limit]
return self._run_command(cmd)
def pull(self, _command):
self._check_executable()
cmd = [SvnClient._executable, '--non-interactive', 'update']
return self._run_command(cmd)
def push(self, command):
self._check_executable()
return self._not_applicable(command)
def remotes(self, _command):
self._check_executable()
cmd_info = [SvnClient._executable, 'info', '--xml']
result_info = self._run_command(cmd_info)
if result_info['returncode']:
result_info['output'] = \
'Could not determine url: ' + result_info['output']
return result_info
info = result_info['output']
try:
root = fromstring(info)
entry = root.find('entry')
url = entry.findtext('url')
except Exception as e:
return {
'cmd': '',
'cwd': self.path,
'output': 'Could not determine url from xml: %s' % e,
'returncode': 1
}
return {
'cmd': ' '.join(cmd_info),
'cwd': self.path,
'output': url,
'returncode': 0,
}
def status(self, command):
self._check_executable()
cmd = [SvnClient._executable, 'status']
if command.quiet:
cmd += ['--quiet']
return self._run_command(cmd)
def _check_executable(self):
assert SvnClient._executable is not None, \
"Could not find 'svn' executable"
if not SvnClient._executable:
SvnClient._executable = which('svn')
|
Python
| 0
|
@@ -3803,17 +3803,17 @@
l += '@%25
-d
+s
' %25 comm
|
e4850d9ba5cb4733862194298cdbb8a34766b39f
|
update tests for new api
|
reddit.py
|
reddit.py
|
import json, random, urllib2
def declare():
return {"reddit": "privmsg", "guess": "privmsg"}
def callback(self):
channel = self.channel
command = self.command
user = self.user
msg = self.message
type = self.type
isop = self.isop
if command == 'guess':
u = 'SwordOrSheath'
else:
try:
u = str(msg.split(' ', 1)[1])
except:
return self.msg(channel, "Please specify a subreddit!")
try:
req = urllib2.Request("https://www.reddit.com/r/" + u + "/new.json", headers={ 'User-Agent': 'UNIX:the_kgb:reddit https://github.com/stqism/THE_KGB-apps' })
fd = urllib2.urlopen(req)
reddit_api = json.loads(fd.read())
fd.close()
cringe = []
for i in reddit_api['data']['children']:
url = i['data']['url']
title = i['data']['title']
selfpost = bool(i['data']['is_self'])
post = "https://reddit.com" + i['data']['permalink']
if 'imgur' in url:
if 'http://i.imgur.com' in url: #force https
url = 'https://i.imgur.com/%s' % (url.split('/')[3])
if 'http://' in url and '/a/' not in url: #direct URLs
if 'gallery' in url:
url = 'https://i.imgur.com/%s.jpg' % (url.split('/')[4])
else:
url = 'https://i.imgur.com/%s.jpg' % (url.split('/')[3])
cringe.append([title, url, post])
item = random.choice(cringe)
if command == 'guess':
try:
u = str(msg.split(' ', 1)[1])
return self.msg(channel, u + ": Am I male or female? " + item[1])
except:
return self.msg(channel, "Am I male or female? " + item[1])
else:
if not selfpost:
via = " (via: " + item[2] + ")"
return self.msg(channel, str(item[0] + " " + item[1] + via))
else:
return self.msg(channel, str(item[0] + " " + item[1]))
except Exception, e:
return self.msg('#the_kgb', str(e))
class api:
def msg(self, channel, text):
return "[%s] %s" % (channel, text)
if __name__ == "__main__":
api = api()
u = "joe!username@hostmask"
c = '#test'
if callback(api, '', True, channel=c, user=u, command='reddit', msg='^reddit') != '[%s] Please specify a subreddit!' % (c):
print '[TESTFAIL] no arguments'
exit(1)
if callback(api, '', True, channel=c, user=u, command='reddit', msg='^reddit fatpeoplehate') != '[#the_kgb] HTTP Error 404: Not Found':
print '[TESTFAIL] error catcher'
exit(1)
if not callback(api, '', True, channel=c, user=u, command='reddit', msg='^reddit fatlogic').startswith('[%s] ' % (c)):
print '[TESTFAIL] Subreddit loader'
exit(1)
if not callback(api, '', True, channel=c, user=u, command='guess', msg='^guess').startswith('[%s] Am I male or female?' % (c)):
print '[TESTFAIL] guess no user'
exit(1)
n = 'bob'
if not callback(api, '', True, channel=c, user=u, command='guess', msg='^guess %s' % (n)).startswith('[%s] %s: Am I male or female?' % (c, n)):
print '[TESTFAIL] guess with user'
exit(1)
|
Python
| 0
|
@@ -2296,134 +2296,270 @@
-u
+c
= %22
-joe!username@hostmask%22%0A c = '#test'%0A%0A if callback(api, '', True
+#test%22%0A %0A setattr(api, 'isop', True)%0A setattr(api, 'type', 'privmsg')%0A setattr(api, 'command', 'reddit')%0A setattr(api, 'user', 'joe!username@hostmask')%0A setattr(api
,
+'
channel
-=c, user=u, command='reddit', msg='%5Ereddit'
+', c)%0A%0A setattr(api, 'message', '%5Ereddit')%0A if callback(api
) !=
@@ -2666,76 +2666,32 @@
-if callback(api, '', True, channel=c, user=u, command='reddit', msg=
+setattr(api, 'message',
'%5Ere
@@ -2710,16 +2710,37 @@
lehate')
+%0A if callback(api)
!= '%5B#t
@@ -2841,98 +2841,75 @@
-if not callback(api, '', True, channel=c, user=u, command='reddit', msg='%5Ereddit fatlogic'
+setattr(api, 'message', '%5Ereddit fatlogic')%0A if not callback(api
).st
@@ -3002,87 +3002,102 @@
-if not callback(api, '', True, channel=c, user=u
+setattr(api, 'message', '%5Eguess')%0A setattr(api
,
+'
command
-='guess', msg='%5Eguess'
+', 'guess')%0A if not callback(api
).st
@@ -3221,96 +3221,74 @@
-if not callback(api, '', True, channel=c, user=u, command='guess', msg='%5Eguess %25s' %25 (n)
+setattr(api, 'message', '%5Eguess %25s' %25 (n))%0A if not callback(api
).st
|
fb3949f74271b98caa427eee9d1be5d36e370627
|
modify components from list to set
|
rdhinet.py
|
rdhinet.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dongdong Tian @ USTC
#
# Revision History:
# 2014-09-03 Dongdong Tian Initial Coding
#
"""Extract SAC data files from Hi-net WIN32 files
Usage:
rdhinet.py DIRNAME [-C <comps>] [-D <outdir>] [-P <procs>]
rdhinet.py -h
Options:
-h Show this helo.
-C <comps> Components list separated with commas. Avaiable components are
U, N, E, X, Y. [default: U,N,E]
-D <outdir> Output directory for SAC files.
-P <procs> Parallel using multiple processes. Set number of cpus to <procs>
if <procs> equals 0. [default: 0]
"""
import os
import glob
import shlex
import zipfile
import subprocess
import multiprocessing
from docopt import docopt
# external tools from Hi-net
catwin32 = "catwin32"
win2sac = "win2sac_32"
def unzip(zips):
"""unzip zip file list"""
for file in zips:
print("Unzip %s" % (file))
zipFile = zipfile.ZipFile(file, "r")
for name in zipFile.namelist():
zipFile.extract(name)
def win32_cat(cnts, cnt_total):
""" merge WIN32 files to one total WIN32 file"""
print("Total %d win32 files" % (len(cnts)))
cmd = "%s %s -o %s" % (catwin32, ' '.join(cnts), cnt_total)
args = shlex.split(cmd)
subprocess.call(args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def win_prm(chfile, prmfile="win.prm"):
""" four line parameters file"""
with open(prmfile, "w") as f:
f.write(".\n")
f.write(chfile + "\n")
f.write(".\n")
f.write(".\n")
def get_chno(chfile, comps):
""" read channel no list from channel table"""
chno = []
with open(chfile, "r") as f:
for line in f:
if line[0] == '#':
continue
items = line.split()
no, comp = items[0], items[4]
if comp in comps:
chno.append(no)
print("Total %d channels" % len(chno))
return chno
def _exctract_channel(tup):
"""extract only one channel for one time"""
winfile, chno, sacfile, outdir, prmfile = tup
subprocess.call([win2sac, winfile, chno, sacfile, outdir, prmfile],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def win32_sac(winfile, ch_no, sacfile="SAC", outdir=".", prmfile="win.prm"):
tuple_list = []
for ch in ch_no:
t = winfile, ch, sacfile, outdir, prmfile
tuple_list.append(t)
procs = int(arguments['-P'])
if procs == 1:
for t in tuple_list:
_exctract_channel(t)
else:
if procs == 0:
procs = multiprocessing.cpu_count()
else:
procs = min(multiprocessing.cpu_count(), procs)
pool = multiprocessing.Pool(processes=procs)
pool.map(_exctract_channel, tuple_list)
def unlink_lists(files):
for f in files:
os.unlink(f)
if __name__ == "__main__":
arguments = docopt(__doc__)
# change directory
os.chdir(arguments['DIRNAME'])
print("Working in dir %s" % (arguments['DIRNAME']))
# unzip zip files
unzip(glob.glob("??_??_????????????_*_?????.zip"))
# merge win32 files
cnts = sorted(glob.glob("??????????????????.cnt"))
cnt_total = "%s_%d.cnt" % (cnts[0][0:11], len(cnts))
win32_cat(cnts, cnt_total)
unlink_lists(cnts)
chfile = glob.glob("??_??_????????.euc.ch")[0]
# generate win32 paramerter file
win_prm(chfile)
# get channel NO. lists for channel table
comps = arguments['-C'].split(",")
chno = get_chno(chfile, comps)
# extract sac files
outdir = '.'
if arguments['-D']:
outdir = arguments['-D']
if not os.path.exists(outdir):
os.makedirs(outdir)
win32_sac(cnt_total, chno, outdir=outdir)
|
Python
| 0
|
@@ -347,46 +347,59 @@
s%3E
-Components list separated with commas.
+Selection of components to extract.%0A
Ava
@@ -418,32 +418,16 @@
ents are
-%0A
U, N, E
@@ -433,17 +433,16 @@
E, X, Y.
-
%5Bdefaul
@@ -449,11 +449,9 @@
t: U
-,N,
+N
E%5D%0A
@@ -3533,16 +3533,20 @@
comps =
+set(
argument
@@ -3556,18 +3556,8 @@
-C'%5D
-.split(%22,%22
)%0A
|
acd0b8803579ece5b52a3158c05140ff1287f0be
|
Handle string values better in FilterComparison.__str__
|
odin/filtering.py
|
odin/filtering.py
|
# -*- coding: utf-8 -*-
from .traversal import TraversalPath
class FilterAtom(object):
"""
Base filter statement
"""
def __call__(self, resource):
raise NotImplementedError()
def any(self, collection):
return any(self(r) for r in collection)
def all(self, collection):
return all(self(r) for r in collection)
class FilterChain(FilterAtom):
operator_name = ''
check_operator = all
def __init__(self, *atoms):
self._atoms = list(atoms)
def __add__(self, other):
if isinstance(other, self.__class__):
return self.__class__(*self._atoms + other._atoms)
elif isinstance(other, FilterComparison):
self._atoms.append(other)
return self
raise TypeError("{} not supported for this operation".format(other))
def __call__(self, resource):
return self.check_operator(a(resource) for a in self._atoms)
def __str__(self):
pin = " {} ".format(self.operator_name)
return "({})".format(pin.join(str(a) for a in self._atoms))
class And(FilterChain):
operator_name = 'AND'
check_operator = all
class Or(FilterChain):
operator_name = 'OR'
check_operator = any
class FilterComparison(FilterAtom):
"""
Base class for filter operator atoms
"""
operator_symbol = ''
def __init__(self, field, value, operation=None):
self.field = TraversalPath.parse(field)
self.value = value
self.operation = operation
def __call__(self, resource):
try:
value = self.field.get_value(resource)
except KeyError:
return False
else:
if self.operation:
value = self.operation(value)
return self.compare(value)
def __str__(self):
if self.operation:
op_name = getattr(self.operation, 'name', self.operation.__name__)
return "{}({}) {} {}".format(op_name, self.field, self.operator_symbol, self.value)
else:
return "{} {} {}".format(self.field, self.operator_symbol, self.value)
def compare(self, value):
raise NotImplementedError()
class Equal(FilterComparison):
operator_symbol = '=='
def compare(self, value):
return value == self.value
class NotEqual(FilterComparison):
operator_symbol = '!='
def compare(self, value):
return value != self.value
class LessThan(FilterComparison):
operator_symbol = '<'
def compare(self, value):
return value < self.value
class LessThanOrEqual(FilterComparison):
operator_symbol = '<='
def compare(self, value):
return value <= self.value
class GreaterThan(FilterComparison):
operator_symbol = '>'
def compare(self, value):
return value > self.value
class GreaterThanOrEqual(FilterComparison):
operator_symbol = '>='
def compare(self, value):
return value >= self.value
|
Python
| 0.000019
|
@@ -17,16 +17,27 @@
f-8 -*-%0A
+import six%0A
from .tr
@@ -1818,32 +1818,154 @@
__str__(self):%0A
+ value = self.value%0A if isinstance(self.value, six.string_types):%0A value = '%22%7B%7D%22'.format(value)%0A%0A
if self.
@@ -2134,29 +2134,24 @@
tor_symbol,
-self.
value)%0A
@@ -2230,21 +2230,16 @@
symbol,
-self.
value)%0A%0A
|
55b3dd5caed31fe884bb652453ae6e29bf789e65
|
add try/catch around stats logging
|
omgeo/__init__.py
|
omgeo/__init__.py
|
import copy
import logging
from omgeo.places import PlaceQuery
from omgeo.postprocessors import DupePicker, SnapPoints
import time
stats_logger = logging.getLogger('omgeo.stats')
class Geocoder():
"""
Class for building a custom geocoder using external APIs.
"""
DEFAULT_SOURCES = [['omgeo.services.EsriWGS', {}],
['omgeo.services.Nominatim', {}]
]
DEFAULT_PREPROCESSORS = []
DEFAULT_POSTPROCESSORS = [
SnapPoints(),
DupePicker('match_addr', 'locator',
['rooftop', 'parcel', 'interpolation_offset', 'interpolation'])
]
def _get_service_by_name(self, service_name):
try:
module, separator, class_name = service_name.rpartition('.')
m = __import__( module )
path = service_name.split('.')[1:]
for p in path:
m = getattr(m, p)
return m
except Exception as ex:
raise Exception("%s" % (ex))
def add_source(self, source):
geocode_service = self._get_service_by_name(source[0])
self._sources.append(geocode_service(**source[1]))
def remove_source(self, source):
geocode_service = self._get_service_by_name(source[0])
self._sources.remove(geocode_service(**source[1]))
def set_sources(self, sources):
"""
Creates GeocodeServiceConfigs from each str source
"""
if len(sources) == 0:
raise Exception('Must declare at least one source for a geocoder')
self._sources = []
for source in sources: # iterate through a list of sources
self.add_source(source)
def __init__(self, sources=None, preprocessors=None, postprocessors=None,
waterfall=False):
"""
:arg dict sources: a dictionary of GeocodeServiceConfig() parameters,
keyed by module name for the GeocodeService to use, e.g.::
{'esri_wgs':{},
'bing': {'settings': {},
'preprocessors': [],
'postprocessors': []},
...}
:arg list preprocessors: list of universal preprocessors to use
:arg list postprocessors: list of universal postprocessors to use
:arg bool waterfall: sets default for waterfall on geocode() method (default ``False``)
"""
self._preprocessors = Geocoder.DEFAULT_PREPROCESSORS \
if preprocessors is None else preprocessors
self._postprocessors = Geocoder.DEFAULT_POSTPROCESSORS \
if postprocessors is None else postprocessors
sources = Geocoder.DEFAULT_SOURCES if sources is None else sources
self.set_sources(sources)
self.waterfall = waterfall
def geocode(self, pq, waterfall=None):
"""
:arg PlaceQuery pq: PlaceQuery object (required).
:arg bool waterfall: Boolean set to True if all geocoders listed should
be used to find results, instead of stopping after
the first geocoding service with valid candidates
(defaults to self.waterfall).
:returns: Returns a dictionary including:
* candidates - list of Candidate objects
* upstream_response_info - list of UpstreamResponseInfo objects
"""
start_time = time.time()
waterfall = self.waterfall if waterfall is None else waterfall
if type(pq) in (str, unicode):
pq = PlaceQuery(pq)
processed_pq = copy.copy(pq)
for p in self._preprocessors: # apply universal address preprocessing
processed_pq = p.process(processed_pq)
if processed_pq == False:
return get_result() # universal preprocessor rejects PlaceQuery
upstream_response_info_list = []
processed_candidates = []
for gs in self._sources: # iterate through each GeocodeService
candidates, upstream_response_info = gs.geocode(processed_pq)
if upstream_response_info is not None:
upstream_response_info_list.append(upstream_response_info)
processed_candidates += candidates # merge lists
if waterfall is False and len(processed_candidates) > 0:
break # if >= 1 good candidate, don't go to next geocoder
for p in self._postprocessors: # apply univ. candidate postprocessing
if processed_candidates == []:
break; # avoid post-processing empty list
processed_candidates = p.process(processed_candidates)
result = dict(candidates=processed_candidates,
upstream_response_info=upstream_response_info_list)
stats_dict = self.convert_geocode_result_to_nested_dicts(result)
stats_dict = dict(stats_dict, original_pq=pq.__dict__)
stats_logger.info(stats_dict)
return result
def get_candidates(self, pq, waterfall=None):
return self.geocode(pq, waterfall)['candidates']
def convert_geocode_result_to_nested_dicts(self, result):
def get_uri_dict(uri_item):
uri_dict = copy.copy(uri_item).__dict__
uri_dict['processed_pq'] = uri_dict['processed_pq'].__dict__
return uri_dict
uri_set = [get_uri_dict(uri_item) for uri_item in result['upstream_response_info']]
return dict(candidates=[candidate.__dict__ for candidate in result['candidates']],
upstream_response_info=uri_set)
|
Python
| 0
|
@@ -2938,32 +2938,59 @@
, waterfall=None
+, force_stats_logging=False
):%0A %22%22%22%0A
@@ -5113,16 +5113,33 @@
dict__)%0A
+ try:%0A
@@ -5168,16 +5168,123 @@
s_dict)%0A
+ except Exception as exception:%0A if force_stats_logging:%0A raise exception%0A
|
e5421ed64887e05c60bedae956cb3a31fb6d9130
|
Version bump to 2.1.0
|
wagtailnews/version.py
|
wagtailnews/version.py
|
version_info = (0, 18, 1)
version = '.'.join(map(str, version_info))
|
Python
| 0
|
@@ -13,16 +13,15 @@
= (
-0
+2
, 1
-8
,
-1
+0
)%0Ave
|
3de9ff408d911d536a45ba3bd6aafa64a178627b
|
Fix lint
|
weasyprint/svg/text.py
|
weasyprint/svg/text.py
|
"""
weasyprint.svg.text
-------------------
Draw text.
"""
from math import cos, radians, sin
from .bounding_box import EMPTY_BOUNDING_BOX, extend_bounding_box
from .utils import color, normalize, size
class TextBox:
"""Dummy text box used to draw text."""
def __init__(self, pango_layout, style):
self.pango_layout = pango_layout
self.style = style
def text(svg, node, font_size):
"""Draw text node."""
from ..css.properties import INITIAL_VALUES
from ..draw import draw_first_line
from ..text.line_break import split_first_line
# TODO: use real computed values
style = INITIAL_VALUES.copy()
style['font_family'] = node.get('font-family', 'sans-serif').split(',')
style['font_style'] = node.get('font-style', 'normal')
style['font_weight'] = node.get('font-weight', 400)
style['font_size'] = font_size
if style['font_weight'] == 'normal':
style['font_weight'] = 400
elif style['font_weight'] == 'bold':
style['font_weight'] = 700
else:
try:
style['font_weight'] = int(style['font_weight'])
except ValueError:
style['font_weight'] = 400
layout, length, resume_at, width, height, baseline = split_first_line(
node.text, style, svg.context, float('inf'), 0)
# TODO: get real values
x_bearing, y_bearing = 0, 0
# Get rotations and translations
x, y, dx, dy, rotate = [], [], [], [], [0]
if 'x' in node.attrib:
x = [size(i, font_size, svg.concrete_width)
for i in normalize(node.attrib['x']).strip().split(' ')]
if len(x) == 1:
x = []
if 'y' in node.attrib:
y = [size(i, font_size, svg.concrete_height)
for i in normalize(node.attrib['y']).strip().split(' ')]
if len(y) == 1:
y = []
if 'dx' in node.attrib:
dx = [size(i, font_size, svg.concrete_width)
for i in normalize(node.attrib['dx']).strip().split(' ')]
if 'dy' in node.attrib:
dy = [size(i, font_size, svg.concrete_height)
for i in normalize(node.attrib['dy']).strip().split(' ')]
if 'rotate' in node.attrib:
rotate = [radians(float(i)) if i else 0
for i in normalize(node.attrib['rotate']).strip().split(' ')]
last_r = rotate[-1]
letters_positions = [
([pl.pop(0) if pl else None for pl in (x, y, dx, dy, rotate)], char)
for char in node.text]
# Align text box horizontally
x_align = 0
letter_spacing = svg.length(node.get('letter-spacing'), font_size)
text_anchor = node.get('text-anchor')
# TODO: use real values
ascent, descent = 100, 20
if text_anchor == 'middle':
x_align = - (width / 2. + x_bearing)
if letter_spacing and node.text:
x_align -= (len(node.text) - 1) * letter_spacing / 2
elif text_anchor == 'end':
x_align = - (width + x_bearing)
if letter_spacing and node.text:
x_align -= (len(node.text) - 1) * letter_spacing
# Align text box vertically
# TODO: This is a hack. Other baseline alignment tags are not supported.
# See https://www.w3.org/TR/SVG2/text.html#TextPropertiesSVG
y_align = 0
display_anchor = node.get('display-anchor')
alignment_baseline = node.get(
'dominant-baseline', node.get('alignment-baseline'))
if display_anchor == 'middle':
y_align = -height / 2 - y_bearing
elif display_anchor == 'top':
y_align = -y_bearing
elif display_anchor == 'bottom':
y_align = -height - y_bearing
elif (alignment_baseline == 'central' or
alignment_baseline == 'middle'):
# TODO: This is wrong, we use font top-to-bottom
y_align = (ascent + descent) / 2 - descent
elif (alignment_baseline == 'text-before-edge' or
alignment_baseline == 'before_edge' or
alignment_baseline == 'top' or
alignment_baseline == 'hanging' or
alignment_baseline == 'text-top'):
y_align = ascent
elif (alignment_baseline == 'text-after-edge' or
alignment_baseline == 'after_edge' or
alignment_baseline == 'bottom' or
alignment_baseline == 'text-bottom'):
y_align = -descent
# Set bounding box
bounding_box = EMPTY_BOUNDING_BOX
# Return early when there’s no text
if not node.text:
x = x[0] if x else svg.cursor_position[0]
y = y[0] if y else svg.cursor_position[1]
dx = dx[0] if dx else 0
dy = dy[0] if dy else 0
svg.cursor_position = (x + dx, y + dy)
return
svg.stream.begin_text()
# Draw letters
for i, ((x, y, dx, dy, r), letter) in enumerate(letters_positions):
if x:
svg.cursor_d_position[0] = 0
if y:
svg.cursor_d_position[1] = 0
svg.cursor_d_position[0] += dx or 0
svg.cursor_d_position[1] += dy or 0
layout, _, _, width, height, _ = split_first_line(
letter, style, svg.context, float('inf'), 0)
svg.stream.push_state()
x = svg.cursor_position[0] if x is None else x
y = svg.cursor_position[1] if y is None else y
if i:
x += letter_spacing
x_position = x + svg.cursor_d_position[0] + x_align
y_position = y + svg.cursor_d_position[1] + y_align
svg.stream.move_to(x_position, y_position)
cursor_position = x + width, y
angle = last_r if r is None else r
if angle:
svg.stream.transform(1, 0, 0, 1, x_position, y_position)
svg.stream.transform(
cos(angle), sin(angle), -sin(angle), cos(angle), 0, 0)
svg.stream.transform(1, 0, 0, 1, -x_position, -y_position)
points = (
(cursor_position[0] + x_align +
svg.cursor_d_position[0],
cursor_position[1] + y_align +
svg.cursor_d_position[1]),
(cursor_position[0] + x_align + width +
svg.cursor_d_position[0],
cursor_position[1] + y_align + height +
svg.cursor_d_position[1]))
bounding_box = extend_bounding_box(bounding_box, points)
layout.reactivate(style)
svg.fill_stroke(node, font_size, text=True)
draw_first_line(
svg.stream, TextBox(layout, style), 'none', 'none',
x + svg.cursor_d_position[0], y + svg.cursor_d_position[1])
svg.stream.pop_state()
svg.cursor_position = cursor_position
svg.stream.end_text()
|
Python
| 0.000032
|
@@ -191,15 +191,8 @@
port
- color,
nor
|
178bde1703bbb044f8af8c70a57517af4490a3c0
|
Fix duplicate cookie issue and header parsing
|
databot/handlers/download.py
|
databot/handlers/download.py
|
import time
import requests
import bs4
from databot.recursive import call
class DownloadErrror(Exception):
pass
def dump_response(response):
return {
'headers': dict(response.headers),
'cookies': dict(response.cookies),
'status_code': response.status_code,
'encoding': response.encoding,
'content': response.content,
}
def download(url, delay=None, update=None, **kwargs):
update = update or {}
def func(row):
if delay is not None:
time.sleep(delay)
kw = call(kwargs, row)
_url = url(row)
response = requests.get(_url, **kw)
if response.status_code == 200:
value = dump_response(response)
for k, fn in update.items():
value[k] = fn(row)
yield _url, value
else:
raise DownloadErrror('Error while downloading %s, returned status code was %s, response content:\n\n%s' % (
_url, response.status_code, response.content,
))
return func
def get_content(data):
content_type = data.get('headers', {}).get('Content-Type')
if content_type == 'text/html':
soup = bs4.BeautifulSoup(data['content'], 'lxml')
return data['content'].decode(soup.original_encoding)
else:
return data['content']
|
Python
| 0.000001
|
@@ -31,16 +31,27 @@
port bs4
+%0Aimport cgi
%0A%0Afrom d
@@ -225,29 +225,24 @@
'cookies':
-dict(
response.coo
@@ -245,16 +245,26 @@
.cookies
+.get_dict(
),%0A
@@ -1105,16 +1105,23 @@
ent_type
+_header
= data.
@@ -1157,16 +1157,85 @@
nt-Type'
+, '')%0A content_type, params = cgi.parse_header(content_type_header
)%0A if
|
3722a807adf7d9458dd48d02bb3362aceeaf9051
|
Fix error
|
datasets/tests/test_views.py
|
datasets/tests/test_views.py
|
from django.test import Client, TestCase
from datasets.models import *
from datasets.views import *
from datasets.forms import *
from datasets.management.commands.generate_fake_data import create_sounds, create_users, create_annotations
class ContributeTest(TestCase):
fixtures = ['datasets/fixtures/initial.json']
def setUp(self):
create_sounds('fsd', 12)
create_users(1)
create_annotations('fsd', 24)
def test_save_contribute_validate_annotations_category(self):
# get a node id with at least one annotation
node_id = Annotation.objects.filter(sound_dataset__gt=1)[0].value
annotations = dataset.non_validated_annotations_per_taxonomy_node(node_id)
all_annotation_object_ids = annotations.values_list('id', flat=True)
nb_annotations = min(len(all_annotation_object_ids), 12)
form_data = {'form-MAX_NUM_FORMS': ['1000'],
'category_id': [node_id],
'form-INITIAL_FORMS': [nb_annotations],
'csrfmiddlewaretoken': ['qcPfVgmrpSKmUcU30cA48OHybUJfbEtQ5YZPsjx5azuPoyr7HkbuPQaAyPfJzyWc'],
'form-TOTAL_FORMS': [nb_annotations + 1],
}
for i in range(nb_annotations):
form_data['form-{0}-visited_sound'.format(i)] = ['True', 'False'][i%2]
form_data['form-{0}-annotation_id'.format(i)] = all_annotation_object_ids[i]
form_data['form-{0}-vote'] = [1, -1, 1][i%3]
response = self.client.post(reverse('save-contribute-validate-annotations-per-category', form_data))
response2 = self.client.post(reverse('save-contribute-validate-annotations-per-category', form_data))
# form_data = {'form-10-visited_sound': ['False'], 'form-11-vote': ['0'], 'form-1-vote': ['0'], 'form-6-vote': ['0'], 'form-2-vote': ['0'], 'form-0-vote': ['0'], 'form-5-visited_sound': ['False'], 'form-1-visited_sound': ['False'], 'form-7-vote': ['0'], 'form-7-visited_sound': ['False'], 'form-0-visited_sound': ['False'], 'comment': [''], 'form-4-vote': ['0'], 'form-10-vote': ['0'], 'form-4-annotation_id': ['214827'], 'form-7-annotation_id': ['313069'], 'form-5-vote': ['0'], 'form-11-annotation_id': ['648349'], 'form-9-annotation_id': ['412470'], 'csrfmiddlewaretoken': ['qcPfVgmrpSKmUcU30cA48OHybUJfbEtQ5YZPsjx5azuPoyr7HkbuPQaAyPfJzyWc'], 'form-6-visited_sound': ['False'], 'form-8-visited_sound': ['False'], 'dataset': ['1'], 'form-0-annotation_id': ['1700'], 'form-TOTAL_FORMS': ['13'], 'form-2-annotation_id': ['80457'], 'form-1-annotation_id': ['3370'], 'form-4-visited_sound': ['False'], 'form-5-annotation_id': ['247442'], 'form-8-vote': ['0'], 'form-MAX_NUM_FORMS': ['1000'], 'form-11-visited_sound': ['False'], 'form-9-visited_sound': ['False'], 'form-9-vote': ['0'], 'form-6-annotation_id': ['287498'], 'form-2-visited_sound': ['False'], 'form-3-annotation_id': ['175177'], 'form-3-visited_sound': ['False'], 'form-8-annotation_id': ['359251'], 'form-10-annotation_id': ['574778'], 'form-INITIAL_FORMS': ['12'], 'form-MIN_NUM_FORMS': ['0'], 'form-3-vote': ['0'], 'category_id': ['/m/0k5j']}
#response = c.post(reverse('save-contribute-validate-annotations-per-category'), form)
|
Python
| 0.000004
|
@@ -501,32 +501,97 @@
category(self):%0A
+ dataset = Dataset.objects.get(short_name='fsd')%0A %0A
# get a
@@ -627,16 +627,16 @@
otation%0A
-
@@ -927,32 +927,59 @@
), 12)%0A %0A
+ # create form data%0A
form_dat
@@ -1604,28 +1604,47 @@
%253%5D%0A
+%0A
+ # post request
%0A res
@@ -1719,32 +1719,33 @@
ns-per-category'
+)
, form_data))%0A
@@ -1732,33 +1732,32 @@
ry'), form_data)
-)
%0A respons
@@ -1837,16 +1837,17 @@
ategory'
+)
, form_d
@@ -1850,17 +1850,16 @@
rm_data)
-)
%0A
@@ -1851,32 +1851,454 @@
m_data)%0A
+duplicate_votes = %5B%5D%0A %0A # checking duplicates%0A for row in Vote.objects.all():%0A if Vote.objects.filter(created_by=row.created_by,%0A annotation_id = row.annotation_id, %0A created_at__lte=row.created_at+timedelta(seconds=10), %0A created_at__gt=row.created_at-timedelta(seconds=10)).count() %3E 1:%0A duplicate_votes.append(row)
%0A %0A
|
5de8209ec751fec9178a86e713393d8eafb7a124
|
Abort when strange things happen
|
emwin.py
|
emwin.py
|
from time import strptime, mktime
import logging
import sys
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s'))
log = logging.getLogger('emwin')
log.addHandler(handler)
log.setLevel(logging.DEBUG)
class Connection(object):
def __init__(self, sock):
self.sock = sock
def __iter__(self):
buf = ''
while True:
buf += self.sock.recv(1116)
if buf == '':
break
while len(buf) >= 1116:
if not buf.startswith('\xFF\xFF\xFF\xFF\xFF\xFF'):
offset = buf.find('\xFF\xFF\xFF\xFF\xFF\xFF')
if offset == -1:
log.info('Sync marker missing! Resetting buffer!')
buf = ''
continue
buf = buf[offset:]
log.info('Discarding %i bytes before sync marker' % offset)
try:
packet = Packet(buf[:1116])
log.debug(str(packet))
yield packet
except:
log.error(sys.exc_info()[1])
buf = buf[1116:]
log.error('Connection closed by remote host')
self.sock.close()
class Packet(object):
def __init__(self, data):
self.data = data
self.parse()
def parse(self):
self.data = ''.join([chr(ord(x) ^ 0xFF) for x in self.data])
self.header = self.parse_header(self.data[:86])
self.filename = self.header['PF']
self.block = int(self.header['PN'])
self.total_blocks = int(self.header['PT'])
self.checksum = int(self.header['CS'])
self.timestamp = int(mktime(strptime(self.header['FD'], '%m/%d/%Y %I:%M:%S %p')))
self.payload = self.data[86:-6]
if len(self.payload) != 1024:
raise ValueError('Packet is the wrong size!')
self.verify_checksum()
def parse_header(self, data):
if data[:6] != ('\x00' * 6):
raise ValueError('Invalid packet header')
data = data[6:]
header = data.rstrip(' \r\n')
header = header.split('/', 5)
header = (x for x in header if x)
header = ((x[:2], x[2:].strip(' ')) for x in header)
return dict(header)
def verify_checksum(self):
checksum = sum([ord(x) for x in self.payload])
if int(self.checksum) != checksum:
raise ValueError('Checksum failed! Got: %i Expecting: %i' % (checksum, self.checksum))
def dict(self):
d = {}
for field in ('filename', 'block', 'total_blocks', 'timestamp'):
value = getattr(self, field)
d[field] = value
return d
def __str__(self):
return '%s (%i/%i)' % (self.filename, self.block, self.total_blocks)
class FileAssembler(object):
def __init__(self, filename, callback=None):
self.filename = filename
self.callback = callback
self.parts = {}
def add_part(self, packet):
self.parts[packet.block] = packet.payload
self.check_parts(packet)
def check_parts(self, packet):
if self.callback is None:
return
if not None in [self.parts.get(i, None) for i in range(1, packet.total_blocks + 1)]:
parts = self.parts.items()
parts.sort(key=lambda x: x[0])
content = ''.join([x[1] for x in parts])
if not self.filename.endswith('.ZIS'):
content = content.rstrip('\x00')
self.content = content
self.callback(self.filename, self.content)
|
Python
| 0.000008
|
@@ -26,16 +26,22 @@
, mktime
+, time
%0Aimport
@@ -356,69 +356,425 @@
ock%0A
-%0A def __iter__(self):%0A buf = ''%0A while True:
+ self.ident = 'ByteBlast Client%7CNM-emwin@synack.me%7CV1'%0A self.ident = ''.join(%5Bchr(ord(x) %5E 0xFF) for x in self.ident%5D)%0A%0A def __iter__(self):%0A buf = ''%0A last_ident = 0%0A while True:%0A now = int(time())%0A if (now - last_ident) %3E 300:%0A log.info('Sending ident packet')%0A last_ident = now%0A self.sock.sendall(self.ident)%0A
%0A
@@ -1123,24 +1123,13 @@
ng!
-Resetting buffer
+Abort
!')%0A
@@ -1145,39 +1145,36 @@
b
-uf = ''
+reak
%0A
@@ -1186,16 +1186,16 @@
-continue
+buf = ''
%0A
@@ -1529,16 +1529,42 @@
o()%5B1%5D)%0A
+ break%0A
|
32446090486db452342ec76606d28a05f6736e81
|
Update tracking.py
|
panoptes/state/states/default/tracking.py
|
panoptes/state/states/default/tracking.py
|
import time
def on_enter(event_data):
""" The unit is tracking the target. Proceed to observations. """
pan = event_data.model
pan.say("Checking our tracking")
next_state = 'parking'
try:
pan.say("I'm adjusting the tracking rate")
pan.observatory.update_tracking()
next_state = 'observe'
pan.say("Done with tracking adjustment, going to observe")
# Trying to prevent stall
time.sleep(2)
except Exception as e:
pan.logger.warning("Tracking problem: {}".format(e))
pan.say("Yikes! A problem while updating our tracking.")
pan.goto(next_state)
|
Python
| 0.000001
|
@@ -259,24 +259,25 @@
e%22)%0A
+#
pan.observat
|
027b0b3471e62574f0b6025ba93b45756c8cd70e
|
correct bug scf only parsing, name error
|
data_magic.py
|
data_magic.py
|
import re
import hashlib
from parser import *
def line():
print('----------------------')
def debug(simulations, id_s):
for y in simulations[id_s]:
print(y)
print(simulations[id_s][y])
line()
def file_parser(file, log=None):
"""
file: name of the file as string
log: TextIOWrapper
output: dict with data (see README)
"""
if log is None:
# if I put it as log default parameter the file is
# always created
log = open('{}_parser.log'.format(file), 'a')
textfile = open(file, 'r')
filetext = textfile.read()
if len(re.findall(r_close, filetext)) == 1:
log.write('job eneded correctly\n')
elif len(re.findall(r_close, filetext)) == 0:
log.write('job ended uncorrectly\n')
else:
log.write('wut?? THIS FILE IS NOT MENT TO BE PARSED\n')
# TODO raise an error wold be better
return {}
# how many energies are in the file?
# 1 energy => 1 simulation.
# 0 energy => no simulation.
# 1+ energies => error.
matches = [x for x in re.findall(scf_data_out['r_total_energy'], filetext,
re.MULTILINE)]
if len(matches) != 0:
n_simulations = len(matches)
log.write('{} simulations found\n'.format(n_simulations))
else:
log.write('no energy found very bad!!!!\n')
# TODO raise an error wold be better
return {}
# simulation initialization:
simulations = {}
if_bfgs, split_text = find_bfgs(filetext)
if if_bfgs:
for i, v in enumerate(split_text):
kind, text = v
simulation = {}
valid_simulation = True
damage_simulation = False
if kind == 'scf':
try:
simulation.update(scf_complete(text))
except CorruptedData as e:
log.write(str(e) + '\n')
if 'total_energy' in e.parsed_data:
log.write('energy recovered')
simulation.update(e.parsed_data)
simulation['damage'] = True
damage_simulation = True
else:
valid_simulation = False
elif kind == 'bfgs':
try:
simulation.update(bfgs_complete(text))
except CorruptedData as e:
log.write(str(e) + '\n')
if 'total_energy' in e.parsed_data:
log.write('energy recovered')
simulation.update(e.parsed_data)
simulation['damage'] = True
damage_simulation = True
else:
valid_simulation = False
else:
raise ValueError('kind not implemented')
# be careful: key here is the key of the previous simulation!
if not valid_simulation:
for k, v in simulations.items():
v['last'] = key
break
# i = 0 does not have a previous key
if i > 0:
previous_key = key
key = hashlib.sha224(text.encode('utf-8')).hexdigest()
# key manager among the simulation
if i == 0:
simulations[key] = dict(file=str(file),
firts=key,
number=(i, len(split_text) - 1))
simulations[key].update(simulation)
first_key = key
elif (i == len(split_text) - 1) or damage_simulation:
simulations[previous_key]['next'] = key
for k, v in simulations.items():
v['last'] = key
simulations[key] = dict(file=str(file),
first=first_key,
number=(i, len(split_text) - 1),
previous=previous_key,
last=key)
simulations[key].update(simulation)
break
else:
simulations[key] = dict(file=str(file),
first=first_key,
number=(i, len(split_text) - 1),
previous=previous_key)
simulations[key].update(simulation)
simulations[previous_key]['next'] = key
else:
log.write('no bfgs calculation founded\n')
kind, text = split_text[0]
key = hashlib.sha224(text.encode('utf-8')).hexdigest()
simulations[key] = dict(file=str(file),
firts=key,
last=key,
number=(0, 0))
try:
simulations[key].update(scf_complete(text))
except CorruptedData as e:
log.write(str(e) + '\n')
if 'total_energy' in e.parsed_data:
log.write('energy recovered')
simulation.update(e.parsed_data)
simulation['damage'] = True
textfile.close()
debug(simulations, id_simulations[0])
return simulations
|
Python
| 0
|
@@ -5163,32 +5163,38 @@
simulation
+s%5Bkey%5D
.update(e.parsed
@@ -5218,32 +5218,38 @@
simulation
+s%5Bkey%5D
%5B'damage'%5D = Tru
@@ -5274,16 +5274,18 @@
se()%0A
+ #
debug(s
|
cbae828ee9eb91a2373a415f1a1521fb5dee3100
|
Add method to generate list of abscissa dicts
|
datac/main.py
|
datac/main.py
|
# -*- coding: utf-8 -*-
import copy
|
Python
| 0
|
@@ -29,8 +29,1140 @@
rt copy%0A
+%0Adef init_abscissa(params, abscissae, abscissa_name):%0A %22%22%22%0A List of dicts to initialize object w/ calc method%0A%0A This method generates a list of dicts; each dict is sufficient to initialize an object featuring a calculator method of interest. This list can be thought of as the abscissae of a set of data. Each dict will contain data which remains constant for each calculation, but it nonetheless required to initialize the object. Each dict will also contain a datum which is the abscissa for the calculation and is also required to initialize the object.%0A%0A :param dict params: Static parameters required to initialize the object featuring the ordinate calculator method.%0A :param list abscissae: Independent variable also required to initialize object featuring the ordinate calculator method.%0A :param str abscissa_name: Dictionary key for the abscissa name.%0A %22%22%22%0A dict_list = %5B%5D%0A%0A for abscissa in abscissae:%0A param_dict = copy.copy(params)%0A param_dict%5Babscissa_name%5D = abscissa%0A param_dict%5B%22abscissa_name%22%5D = abscissa_name%0A dict_list.append(param_dict)%0A%0A return dict_list%0A
|
cd5053ac36e13b57e95eeb1241032c97b48a4a85
|
Drop try/catch that causes uncaught errors in the Observer to be silently ignored
|
planetstack/openstack_observer/backend.py
|
planetstack/openstack_observer/backend.py
|
import threading
import time
from observer.event_loop import PlanetStackObserver
from observer.event_manager import EventListener
from util.logger import Logger, logging
logger = Logger(level=logging.INFO)
class Backend:
def run(self):
try:
# start the openstack observer
observer = PlanetStackObserver()
observer_thread = threading.Thread(target=observer.run)
observer_thread.start()
# start event listene
event_manager = EventListener(wake_up=observer.wake_up)
event_manager_thread = threading.Thread(target=event_manager.run)
event_manager_thread.start()
except:
logger.log_exc("Exception in child thread")
|
Python
| 0
|
@@ -244,21 +244,8 @@
f):%0A
- try:%0A
@@ -669,78 +669,6 @@
rt()
-%0A except:%0A logger.log_exc(%22Exception in child thread%22)
%0A%0A
|
b725ef74f8e6f0887737e13783062b987fb3dd77
|
bump to 7.0.3 final
|
device_inventory/__init__.py
|
device_inventory/__init__.py
|
VERSION = (7, 0, 3, 'beta', 6)
def get_version():
"Returns a PEP 386-compliant version number from VERSION."
assert len(VERSION) == 5
assert VERSION[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if VERSION[2] == 0 else 3
main = '.'.join(str(x) for x in VERSION[:parts])
sub = ''
if VERSION[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[VERSION[3]] + str(VERSION[4])
return str(main + sub)
|
Python
| 0.000002
|
@@ -18,16 +18,17 @@
3, '
-beta', 6
+final', 0
)%0A%0A%0A
|
584c2f69df66bd08ace0652da7337e8e71a72099
|
Use bool for zero_mask. Requires pytorch 1.7+
|
projects/transformers/models/sparse_embedding.py
|
projects/transformers/models/sparse_embedding.py
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2021, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import torch
from nupic.torch.modules.sparse_weights import SparseWeightsBase
__all__ = ["SparseEmbeddings"]
class SparseEmbeddings(SparseWeightsBase):
"""
This wraps a torch.nn.Embedding module to sparsify the weights where the sparsity is
applied per embedding. The embedding of an arbitrary index j will have the desired
sparsity specified through the init.
Note: A torch.nn.Embedding is already sparse in one sense. Specifically, it's input
is expected to be sparse (i.e. an integer specifying the index of the embedding).
In contrast, this introduces sparsity in the weights of the embedding layer, which
effectively yields sparse output embeddings.
:param module: A torch.nn.Embedding module
:param sparsity: Sparsity to apply to the weights; each output embedding will have
this level of sparsity.
"""
def __init__(self, module, sparsity=None):
assert len(module.weight.shape) == 2, "Should resemble a nn.Embedding"
super(SparseEmbeddings, self).__init__(
module, sparsity=sparsity
)
# For each unit, decide which weights are going to be zero
num_embeddings = self.module.num_embeddings
embedding_dim = self.module.embedding_dim
num_nz = int(round((1 - self.sparsity) * embedding_dim))
zero_mask = torch.ones(num_embeddings, embedding_dim, dtype=torch.bool)
for embedding_j in range(num_embeddings):
on_indices = np.random.choice(embedding_dim, num_nz, replace=False)
zero_mask[embedding_j, on_indices] = False
# Use float16 because pytorch distributed nccl doesn't support bools
self.register_buffer("zero_mask", zero_mask.half())
self.rezero_weights()
def rezero_weights(self):
self.module.weight.data[self.zero_mask.bool()] = 0
|
Python
| 0.000001
|
@@ -2405,18 +2405,78 @@
rch.bool
+,%0A device=module.weight.device
)%0A
-
@@ -2657,85 +2657,8 @@
se%0A%0A
- # Use float16 because pytorch distributed nccl doesn't support bools%0A
@@ -2708,15 +2708,8 @@
mask
-.half()
)%0A%0A
@@ -2768,16 +2768,16 @@
(self):%0A
+
@@ -2818,15 +2818,8 @@
mask
-.bool()
%5D =
|
3587c608cde4f273d33a572c0bf44dbe2b003250
|
better initial negative rate
|
python/alpenglow/experiments/FactorExperiment.py
|
python/alpenglow/experiments/FactorExperiment.py
|
import alpenglow.Getter as rs
import alpenglow as prs
class FactorExperiment(prs.OnlineExperiment):
"""FactorExperiment(dimension=10,begin_min=-0.01,begin_max=0.01,learning_rate=0.05,regularization_rate=0.0,negative_rate=0.0)
This class implements an online version of the well-known matrix factorization recommendation model [Koren2009]_
and trains it via stochastic gradient descent. The model is able to train on implicit data
using negative sample generation, see [X.He2016]_ and the **negative_rate** parameter.
.. [Koren2009] Koren, Yehuda, Robert Bell, and Chris Volinsky. "Matrix factorization techniques for recommender systems." Computer 42.8 (2009).
.. [X.He2016] X. He, H. Zhang, M.-Y. Kan, and T.-S. Chua. Fast matrix factorization for online recommendation with implicit feedback. In SIGIR, pages 549–558, 2016.
Parameters
----------
dimension : int
The latent factor dimension of the factormodel.
begin_min : double
The factors are initialized randomly, sampling each element uniformly from the interval (begin_min, begin_max).
begin_max : double
See begin_min.
learning_rate : double
The learning rate used in the stochastic gradient descent updates.
regularization_rate : double
The coefficient for the L2 regularization term.
negative_rate : int
The number of negative samples generated after each update. Useful for implicit recommendation.
"""
def _config(self, top_k, seed):
#config = self.parameter_defaults(
# top_k=100,
# min_time=0,
# seed=0,
# out_file=None,
# filters=[],
# loggers=[],
#)
model = rs.FactorModel(**self.parameter_defaults(
begin_min=-0.01,
begin_max=0.01,
dimension=10,
initialize_all=False,
))
updater = rs.FactorModelGradientUpdater(**self.parameter_defaults(
learning_rate=0.05,
regularization_rate=0.0
))
updater.set_model(model)
point_wise = rs.ObjectiveMSE()
gradient_computer = rs.GradientComputerPointWise()
gradient_computer.set_objective(point_wise)
gradient_computer.set_model(model)
gradient_computer.add_gradient_updater(updater)
negative_sample_generator = rs.UniformNegativeSampleGenerator(**self.parameter_defaults(
negative_rate=0.0,
initialize_all=False,
seed=67439852,
filter_repeats=False,
))
negative_sample_generator.add_updater(gradient_computer)
return (model, [negative_sample_generator], [], [])
|
Python
| 0.998618
|
@@ -2457,18 +2457,18 @@
ve_rate=
+1
0
-.
0,%0A
|
37fa40a9b5260f8090adaa8c15d3767c0867574f
|
Create a list of messages that contain system time.
|
python/fusion_engine_client/messages/__init__.py
|
python/fusion_engine_client/messages/__init__.py
|
from .core import *
from . import ros
message_type_to_class = {
# Navigation solution messages.
PoseMessage.MESSAGE_TYPE: PoseMessage,
PoseAuxMessage.MESSAGE_TYPE: PoseAuxMessage,
GNSSInfoMessage.MESSAGE_TYPE: GNSSInfoMessage,
GNSSSatelliteMessage.MESSAGE_TYPE: GNSSSatelliteMessage,
# Sensor measurement messages.
IMUMeasurement.MESSAGE_TYPE: IMUMeasurement,
# ROS messages.
ros.PoseMessage.MESSAGE_TYPE: ros.PoseMessage,
ros.GPSFixMessage.MESSAGE_TYPE: ros.GPSFixMessage,
ros.IMUMessage.MESSAGE_TYPE: ros.IMUMessage,
# Command and control messages.
CommandResponseMessage.MESSAGE_TYPE: CommandResponseMessage,
MessageRequest.MESSAGE_TYPE: MessageRequest,
ResetRequest.MESSAGE_TYPE: ResetRequest,
VersionInfoMessage.MESSAGE_TYPE: VersionInfoMessage,
EventNotificationMessage.MESSAGE_TYPE: EventNotificationMessage,
}
|
Python
| 0.00003
|
@@ -883,8 +883,117 @@
sage,%0A%7D%0A
+%0Amessages_with_system_time = %5Bt for t, c in message_type_to_class.items() if hasattr(c(), 'system_time_ns')%5D%0A
|
82f68c3a0bd734dc9a639d9c257b26f5720c0d9c
|
add prepare_dir
|
decorators.py
|
decorators.py
|
__author__ = 'zz'
from functools import wraps
from requests import Timeout, ConnectionError
from socket import timeout as socket_timeout
import logging
from .models import ArbitraryAccessObject
from shutil import get_terminal_size
timeouts = (Timeout, socket_timeout, ConnectionError)
def threading_lock(lock):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
with lock:
return func(*args, **kwargs)
return wrapper
return decorator
def retry_connect(retry_times, timeout, error=None):
if error is None:
error=ArbitraryAccessObject()
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try_times = 0
while True:
try:
ret = func(*args, timeout=timeout, **kwargs)
if ret.status_code != 200:
logging.warning('%s is %s', ret.url, ret.status_code)
if ret.status_code == 404:
raise Timeout
except timeouts:
try_times += 1
error.reconnect(try_times)
else:
return ret
if try_times >= retry_times:
raise Timeout
return wrapper
return decorator
def semalock_for_class(func):
@wraps(func)
def wrapper(self, s, *args, **kwargs):
with s:
return func(self, *args, **kwargs)
return wrapper
def semalock(func):
@wraps(func)
def wrapper(s, *args, **kwargs):
with s:
return func(*args, **kwargs)
return wrapper
def loop(func):
@wraps(func)
def wrapper(*args, **kwargs):
while True:
ret = func(*args, **kwargs)
if ret:
break
return wrapper
def resolve_timeout(replace_value):
"""
return replace value instead of raise timeout
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except timeouts as e:
return replace_value
return wrapper
return decorator
def clear_output(func):
terminal_width, _ = get_terminal_size()
@wraps(func)
def wrapper(*args, **kwargs):
print(' ' * terminal_width, end='\r')
return func(*args, **kwargs)
return wrapper
|
Python
| 0.000001
|
@@ -1,21 +1,13 @@
-__author__ = 'zz'
+import os
%0A%0Afr
@@ -273,16 +273,33 @@
Error)%0A%0A
+__author__ = 'zz'
%0A%0A%0Adef t
@@ -2448,28 +2448,304 @@
**kwargs)%0A return wrapper
+%0A%0A%0Adef prepare_dir(dirname):%0A def decorator(func):%0A @wraps(func)%0A def wrapper(*args, **kwargs):%0A if not os.path.exists(dirname):%0A os.mkdir(dirname)%0A%0A return func(*args, **kwargs)%0A return wrapper%0A return decorator
|
9ff314c9481605e174769416dec1b71e16936b83
|
Fix unicode error when creating SHA1 sum for ical UID
|
demo/utils.py
|
demo/utils.py
|
from datetime import datetime, time, timedelta
import hashlib
def export_event(event, format='ical'):
# Only ical format supported at the moment
if format != 'ical':
return
# Begin event
# VEVENT format: http://www.kanzaki.com/docs/ical/vevent.html
ical_components = [
'BEGIN:VCALENDAR',
'VERSION:2.0',
'PRODID:-//Torchbox//wagtail//EN',
]
# Work out number of days the event lasts
if event.date_to is not None:
days = (event.date_to - event.date_from).days + 1
else:
days = 1
for day in range(days):
# Get date
date = event.date_from + timedelta(days=day)
# Get times
if event.time_from is not None:
start_time = event.time_from
else:
start_time = time.min
if event.time_to is not None:
end_time = event.time_to
else:
end_time = time.max
# Combine dates and times
start_datetime = datetime.combine(
date,
start_time
)
end_datetime = datetime.combine(date, end_time)
def add_slashes(string):
string.replace('"', '\\"')
string.replace('\\', '\\\\')
string.replace(',', '\\,')
string.replace(':', '\\:')
string.replace(';', '\\;')
string.replace('\n', '\\n')
return string
# Make a uid
uid = hashlib.sha1(event.url + str(start_datetime)).hexdigest() + '@wagtaildemo'
# Make event
ical_components.extend([
'BEGIN:VEVENT',
'UID:' + add_slashes(uid),
'URL:' + add_slashes(event.url),
'DTSTAMP:' + start_time.strftime('%Y%m%dT%H%M%S'),
'SUMMARY:' + add_slashes(event.title),
'DESCRIPTION:' + add_slashes(event.search_description),
'LOCATION:' + add_slashes(event.location),
'DTSTART;TZID=Europe/London:' + start_datetime.strftime('%Y%m%dT%H%M%S'),
'DTEND;TZID=Europe/London:' + end_datetime.strftime('%Y%m%dT%H%M%S'),
'END:VEVENT',
])
# Finish event
ical_components.extend([
'END:VCALENDAR',
])
# Join components
return '\r'.join(ical_components)
|
Python
| 0.000184
|
@@ -1446,27 +1446,23 @@
-uid = hashlib.sha1(
+event_string =
even
@@ -1488,16 +1488,72 @@
atetime)
+%0A uid = hashlib.sha1(event_string.encode('utf-8')
).hexdig
|
6188363861cc3310e48556d33ec8a92758c88215
|
Fix code
|
dbaas_zabbix/provider.py
|
dbaas_zabbix/provider.py
|
# -*- coding: utf-8 -*-
import logging
LOG = logging.getLogger(__name__)
STATUS_ENABLE = 0
STATUS_DISABLE = 1
def set_client_group(attribute):
def decorator(method):
def wrapper(*args, **kwargs):
self = args[0]
kwargs["hostgroups"] = list(getattr(self.dbaas_api, attribute))
return method(*args, **kwargs)
return wrapper
return decorator
def set_slack_notification():
def decorator(method):
def wrapper(*args, **kwargs):
self = args[0]
notification_to = self.dbaas_api.slack_notification
if notification_to:
kwargs["notification_slack"] = notification_to
return method(*args, **kwargs)
return wrapper
return decorator
class ZabbixProvider(object):
__provider_name__ = None
__is_ha__ = None
__version__ = []
def __init__(self, dbaas_api, zabbix_api):
self.dbaas_api = dbaas_api
self.api = zabbix_api(dbaas_api.endpoint)
self.api.login(user=dbaas_api.user, password=dbaas_api.password)
def logout(self):
try:
self.api.user.logout()
except Exception as e:
LOG.error('Could not logout. Error: {}'.format(e))
def __getattr__(self, name):
return getattr(self.dbaas_api, name)
def _delete_monitors(self, host):
LOG.info("Destroying monitor for host: {}".format(host))
return self.api.globo.deleteMonitors(host)
@set_client_group("client_group_host")
def _create_basic_monitors(self, **kwargs):
LOG.info("Creating basic monitor with params: {}".format(kwargs))
return self.api.globo.createLinuxMonitors(**kwargs)
@set_client_group("client_group_database")
@set_slack_notification()
def _create_database_monitors(self, **kwargs):
LOG.info("Creating database monitor with params: {}".format(kwargs))
return self.api.globo.createDBMonitors(**kwargs)
@set_client_group("client_group_database")
@set_slack_notification()
def _create_mongo_three_monitors(self, **kwargs):
LOG.info("Creating mongo3 monitor with params: {}".format(kwargs))
return self.api.globo.createMongo3Monitors(**kwargs)
@set_client_group("client_group_database")
@set_slack_notification()
def _create_web_monitors(self, **kwargs):
LOG.info("Creating web monitor with params: {}".format(kwargs))
return self.api.globo.createWebMonitors(**kwargs)
@set_client_group("client_group_database")
@set_slack_notification()
def _create_tcp_monitors(self, **kwargs):
return self.api.globo.createTCPMonitors(**kwargs)
@set_client_group("client_group_database")
@set_slack_notification()
def _create_redis_monitors(self, **kwargs):
return self.api.globo.createRedisMonitors(**kwargs)
def _get_host_info(self, **kwargs):
return self.api.host.get(**kwargs)
def _update_host_interface(self, **kwargs):
return self.api.hostinterface.update(**kwargs)
def _get_host_interface(self, **kwargs):
return self.api.hostinterface.get(**kwargs)
def _update_host_info(self, **kwargs):
return self.api.host.update(**kwargs)
def _get_host_group_info(self, **kwargs):
return self.api.hostgroup.get(**kwargs)
def _disable_alarms(self, **kwargs):
return self.api.globo.disableAlarms(**kwargs)
def _enable_alarms(self, **kwargs):
return self.api.globo.enableAlarms(**kwargs)
def get_host_id(self, host_name):
host_info = self._get_host_info(search={'name': host_name})
for host in host_info:
if host['name'] == host_name:
return host['hostid']
return None
def get_host_interface_id(self, host_id):
host_interface = self.api.hostinterface.get(hostids=host_id)
return host_interface[0]['interfaceid']
def create_basic_monitors(self, **kwargs):
raise NotImplementedError
def delete_basic_monitors(self, **kwargs):
raise NotImplementedError
def create_database_monitors(self, **kwargs):
raise NotImplementedError
def delete_database_monitors(self, **kwargs):
raise NotImplementedError
def update_host_interface(self, **kwargs):
raise NotImplementedError
def get_all_hosts_name(self):
hosts = []
for zabbix_host in self.get_zabbix_databases_hosts():
hosts.append(zabbix_host)
for host in self.hosts:
hosts.append(host.hostname)
return hosts
def get_host_triggers(self, host_name):
host_id = self.get_host_id(host_name)
triggers = self.api.trigger.get(
output=['status'], hostids=[host_id]
)
if not triggers:
LOG.warning('Host {} does not have triggers'.format(host_name))
return triggers
def is_alarms_enabled(self):
for host in self.get_all_hosts_name():
for trigger in self.get_host_triggers(host):
status = int(trigger['status'])
if status == STATUS_DISABLE:
LOG.info(
'Trigger {} is disabled for host {}'.format(
trigger['triggerid'], host
)
)
return False
return True
def get_hostgroup_id(self, hostgroup_name):
hostgroups = self.api.hostgroup.get(
search={'name': hostgroup_name})
for hostgroup in hostgroups:
if hostgroup['name'] == hostgroup_name:
return hostgroup['groupid']
return None
def add_hostgroup_on_host(self, host_name, hostgroup_name):
host_id = self.get_host_id(host_name=host_name)
if not host_id:
LOG.info('Host id not found for host: {}'.format(host_name))
hostgroup_id = self.get_hostgroup_id(hostgroup_name=hostgroup_name)
if not hostgroup_id:
LOG.info('Hostgroup id not found for hostgroup: {}'.format(
hostgroup_name))
self.api.hostgroup.massadd(
groups={'groupid': 'hostgroup_id'},
hosts=[host_id,]
)
|
Python
| 0.000927
|
@@ -6177,17 +6177,16 @@
oupid':
-'
hostgrou
@@ -6189,17 +6189,16 @@
group_id
-'
%7D,%0A
|
d99dfa94a42d70900e31c36023602bea3e5efdfb
|
Bump forgotten version to 3.2
|
debinterface/__init__.py
|
debinterface/__init__.py
|
# -*- coding: utf-8 -*-
"""Imports for easier use"""
from .adapter import NetworkAdapter
from .adapterValidation import NetworkAdapterValidation
from .dnsmasqRange import (DnsmasqRange,
DEFAULT_CONFIG as DNSMASQ_DEFAULT_CONFIG)
from .hostapd import Hostapd
from .interfaces import Interfaces
from .interfacesReader import InterfacesReader
from .interfacesWriter import InterfacesWriter
__version__ = '3.1.0'
__all__ = [
'NetworkAdapter',
'NetworkAdapterValidation',
'DnsmasqRange',
'DNSMASQ_DEFAULT_CONFIG',
'Hostapd',
'Interfaces',
'InterfacesReader',
'InterfacesWriter'
]
|
Python
| 0
|
@@ -428,9 +428,9 @@
'3.
-1
+2
.0'%0A
|
e9e6d5a6c42ff1522010f003fbed2cd324eab48e
|
Update cluster config
|
configs/config_cluster.py
|
configs/config_cluster.py
|
CDNA = '/home/cmb-panasas2/skchoudh/genomes/hg19/kallisto/hg19'
GENOMES_DIR='/home/cmb-panasas2/skchoudh/genomes'
OUT_DIR = '/home/cmb-panasas2/skchoudh/HuR_results/analysis/rna_seq_star_hg38_annotated'
RAWDATA_DIR ='/home/cmb-06/as/skchoudh/data/HuR_Mouse_Human_liver/rna-seq/Penalva_L_08182016'
SAMPLES=['HepG2_CTRL1_S31_L004', 'HepG2_CTRL_2_S33_L004',
'HepG2_CTRL_7_S35_L004', 'HepG2_HuR_KD_1_S32_L004',
'HepG2_HuR_KD_2_S34_L004', 'HepG2_HuR_KD_7_S36_L004']
GENOME_BUILD = 'hg38'
GENOME_FASTA = GENOMES_DIR + '/' + GENOME_BUILD + '/fasta/'+ GENOME_BUILD+ '.fa'
STAR_INDEX = GENOMES_DIR + '/' + GENOME_BUILD + '/star_annotated'
GTF = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.v25.annotation.gtf'
|
Python
| 0.000001
|
@@ -162,16 +162,13 @@
lts/
-analysis
+human
/rna
@@ -193,16 +193,98 @@
otated'%0A
+SRC_DIR = '/home/cmb-panasas2/skchoudh/github_projects/clip_seq_pipeline/scripts'%0A
RAWDATA_
@@ -809,10 +809,363 @@
on.gtf'%0A
+GENE_NAMES = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + GENOME_BUILD+'_gene_names_stripped.tsv'%0AGENE_LENGTHS = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.v25.coding_lengths.tsv' #+ GENOME_BUILD+'_gene_lengths.tsv'%0AGENE_NAME_MAP = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + GENOME_BUILD + '_gene_names_stripped.tsv'%0A%0A
%0A%0A
|
64a54f6ca784e9cabc9048b9fe14f8594ad5e5c6
|
fix signal to create a SocialUser profile object
|
src/moderation/signals.py
|
src/moderation/signals.py
|
import logging
import random
import os
from django.db.utils import DatabaseError
from django.db import transaction
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
from moderation.thumbnail import generate_thumbnail
from moderation.models import (
Queue,
Moderation,
SocialUser,
UserCategoryRelationship,
Category,
Moderator
)
from moderation.tasks import (
handle_create_update_profile,
handle_sendmoderationdm,
)
logger = logging.getLogger(__name__)
@receiver(post_save, sender=Queue)
def create_moderation(sender, instance, created, **kwargs):
if created:
logger.debug(f"inside create_moderation {sender}, {instance}, {created}")
logger.debug(f"inside create_moderation if created {sender}, {instance}, {created}")
moderatorid_int_lst = []
if settings.MODERATION["moderator"]:
#moderatorid_int_lst.extend(SocialUser.objects.moderators())
moderatorid_int_lst.extend(SocialUser.objects.active_moderators())
elif settings.MODERATION["dev"]:
logger.debug(f"SocialUser.objects.devs(): {SocialUser.objects.devs()}")
moderatorid_int_lst.extend(SocialUser.objects.devs())
else:
return
logger.debug(f"moderatorid_int_lst: {moderatorid_int_lst}")
if not moderatorid_int_lst:
moderatorid_int_lst.extend(SocialUser.objects.devs())
if not moderatorid_int_lst:
return
random.seed(os.urandom(128))
chosenmoderatorid_int = random.choice(moderatorid_int_lst)
logger.debug(f"chosenmoderatorid_int: {chosenmoderatorid_int}")
moderator_mi = SocialUser.objects.get(user_id = chosenmoderatorid_int)
Moderation.objects.create(moderator = moderator_mi, queue = instance)
@receiver(post_save, sender=Queue)
def createprofile_queue(sender, instance, created, **kwargs):
if created:
logger.debug(f"instance {instance}")
logger.debug(f"instance.status_id {instance.user_id}")
handle_create_update_profile.apply_async(args=(instance.user_id,))
@receiver(post_save, sender=UserCategoryRelationship)
def log_usercategoryrelationship(sender, instance, created, **kwargs):
logger.debug(f"ucr saved: sender: {sender}; instance: {instance}; created: {created}")
logger.debug(f"instance.social_user.user_id: {instance.social_user.user_id}")
logger.debug(f"instance.moderator.user_id: {instance.moderator.user_id}")
logger.debug(f"instance.category: {instance.category}")
@receiver(post_save, sender=UserCategoryRelationship)
def moderator(sender, instance, created, **kwargs):
if created:
try:
mod_cat = Category.objects.get(name="moderator")
except Category.DoesNotExist:
return
if instance.category == mod_cat:
with transaction.atomic():
try:
Moderator.objects.create(socialuser=instance.social_user)
except DatabaseError:
return
generate_thumbnail()
@receiver(post_save, sender=UserCategoryRelationship)
def createprofile_usercategoryrelationship(sender, instance, created, **kwargs):
logger.debug(f"instance {instance}")
logger.debug(f"instance.social_user.user_id {instance.social_user.user_id}")
handle_create_update_profile.apply_async(args=(instance.social_user.user_id,))
@receiver(post_save, sender=Moderation)
def createupdatemoderatorprofile(sender, instance, created, **kwargs):
if created:
if not hasattr(instance.moderator, 'profile'):
logger.debug(f"instance.moderator.user_id: {instance.moderator.user_id}")
handle_create_update_profile.apply_async(args=(instance.moderator.user_id,))
@receiver(post_save, sender=SocialUser)
def create_update_socialuser_profile(sender, instance, created, **kwargs):
if created:
if not hasattr(instance.moderator, 'profile'):
logger.debug(f"instance.user_id: {instance.user_id}")
handle_create_update_profile.apply_async(args=(instance.user_id,))
@receiver(post_save, sender=Moderation)
def sendmoderationdm(sender, instance, created, **kwargs):
if created:
transaction.on_commit(
lambda: handle_sendmoderationdm.apply_async(
args=(instance.id,),
countdown=60.0
)
)
|
Python
| 0
|
@@ -3979,34 +3979,24 @@
ttr(instance
-.moderator
, 'profile')
|
79eb9241ac8ce36b14512287bc473a426db50cf1
|
Use elif to make it faster.
|
Example/Pluton/Plugins/Example/Example.py
|
Example/Pluton/Plugins/Example/Example.py
|
import clr
import sys
clr.AddReferenceByPartialName("UnityEngine")
clr.AddReferenceByPartialName("Pluton")
import UnityEngine
import Pluton
from Pluton import InvItem
from System import *
from UnityEngine import *
class Example:
def On_PlayerConnected(self, player):
for p in Server.ActivePlayers:
if(p.Name != player.Name):
p.Message(String.Format("{0} has joined the server!", player.Name))
def On_PlayerDisconnected(self, player):
for p in Server.ActivePlayers:
if(p.Name != player.Name):
p.Message(String.Format("{0} has left the server!", player.Name))
def On_Command(self, cmd):
try:
if(cmd.cmd == "kit"):
if(Server.LoadOuts.ContainsKey(cmd.quotedArgs[0])):
loadout = Server.LoadOuts[cmd.quotedArgs[0]]
loadout.ToInv(cmd.User.Inventory)
if(cmd.cmd == "apple"):
cmd.User.Message("An apple a day keeps the doctor away!")
item = InvItem("Apple")
item.Instantiate(Vector3(cmd.User.X + 3, cmd.User.Y + 3, cmd.User.Z + 3))
if(cmd.cmd == "help"):
cmd.User.Message("Usable command: /whereami, /kit starter")
except:
Debug.Log(String.Format("Something went wrong while executing: /{0} args", cmd.cmd, String.Join(" ", cmd.args)))
|
Python
| 0
|
@@ -779,24 +779,26 @@
ventory)%0A%09%09%09
+el
if(cmd.cmd =
@@ -976,24 +976,26 @@
.Z + 3))%0A%09%09%09
+el
if(cmd.cmd =
@@ -1194,8 +1194,9 @@
.args)))
+%0A
|
9af1cbe0676ca71edecfa6d44c66690a5a583b01
|
Rewrite for clarity
|
constructive_hierarchy.py
|
constructive_hierarchy.py
|
'''Reason about a directed graph in which the (non-)existence of some edges
must be inferred by the disconnectedness of certain vertices. Collect (truthy)
evidence for boolean function return values.'''
def transitive_closure_dict(vertices, edges):
'''Find the transitive closure of a dict mapping vertices to their paths.'''
neighbours = {b: vertices[a] + ((a, b),)
for a, b in edges if a in vertices}
if set(neighbours).issubset(set(vertices)):
return vertices
return transitive_closure_dict(dict(neighbours, **vertices), edges)
def transitive_closure(vertex, edges):
closure = transitive_closure_dict({vertex: ()}, edges)
# Use a (truthy) loop instead of an empty path
closure[vertex] = (vertex, vertex)
return closure
def downward_closure(vertex, edges):
'''Find the downward closure of a vertex.'''
return transitive_closure(vertex, edges)
def upward_closure(vertex, edges):
'''Find the upward closure of a vertex.'''
return transitive_closure(vertex, {(b, a) for a, b in edges})
def is_connected(a, b, edges):
'''Check if there is a path from a to b.'''
return downward_closure(a, edges).get(b, False)
def is_separated(a, b, edges, disconnections):
'''Check that a and b will remain not connected even if edges are added to
the graph, as long as the vertex pairs listed in disconnections remain
disconnected.'''
for p, p_path in upward_closure(a, edges).items():
for q, q_path in downward_closure(b, edges).items():
if (p, q) in disconnections:
# Should reverse p_path
return p_path, q_path
return False
def find_possible_connections(vertices, edges, disconnections):
'''Find which edges can be added to create new connections, without
connecting any pairs in disconnections.'''
return {(a, b) for a in vertices for b in vertices
if not is_connected(a, b, edges)
if not is_separated(a, b, edges, disconnections)}
def is_redundant_edge(edge, edges):
'''Give alternate path if one exists.'''
return is_connected(*edge, edges - {edge})
def spanning_tree(edges):
for edge in edges:
if is_redundant_edge(edge, edges):
return spanning_tree(edges - {edge})
return edges
def rank_possible_edge(edge, vertices, edges, disconnections):
evaluator = lambda x, y: len(find_possible_connections(vertices, x, y))
exists_rank = evaluator(edges | {edge}, disconnections)
not_exists_rank = evaluator(edges, disconnections | {edge})
return abs(exists_rank) + abs(not_exists_rank)
|
Python
| 0.000008
|
@@ -225,16 +225,22 @@
re_dict(
+known_
vertices
@@ -241,32 +241,32 @@
rtices, edges):%0A
-
'''Find the
@@ -338,25 +338,29 @@
-neighbour
+found_vertice
s = %7Bb:
vert
@@ -355,16 +355,22 @@
s = %7Bb:
+known_
vertices
@@ -385,16 +385,20 @@
a, b),)%0A
+
@@ -429,24 +429,30 @@
ges if a in
+known_
vertices%7D%0A
@@ -460,37 +460,47 @@
if
-set(neighbours).issubset(set(
+all(v in known_vertices for v in found_
vert
@@ -504,17 +504,16 @@
ertices)
-)
:%0A
@@ -521,16 +521,22 @@
return
+known_
vertices
@@ -532,24 +532,66 @@
wn_vertices%0A
+ found_vertices.update(known_vertices)%0A
return t
@@ -617,35 +617,22 @@
ict(
-dict(neighbours, **
+found_
vertices
), e
@@ -627,17 +627,16 @@
vertices
-)
, edges)
|
7760d75bb5ca38d2c96924e0ea1d65485cdc5c6f
|
Update version 0.12.2 -> 0.12.3
|
dimod/__init__.py
|
dimod/__init__.py
|
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# version is used by serialization below so we need it before everything
__version__ = '0.12.2'
from dimod.constrained import *
import dimod.constrained
from dimod.core import *
import dimod.core
from dimod.cyutilities import *
from dimod.reference import *
import dimod.reference
from dimod.roof_duality import fix_variables
from dimod.binary import *
import dimod.binary
from dimod.discrete import *
import dimod.testing
from dimod.converters import *
import dimod.decorators
import dimod.generators
from dimod.exceptions import *
import dimod.exceptions
from dimod.higherorder import make_quadratic, make_quadratic_cqm, reduce_binary_polynomial, poly_energy, poly_energies, BinaryPolynomial
import dimod.higherorder
from dimod.package_info import __version__, __author__, __authoremail__, __description__
from dimod.quadratic import *
import dimod.quadratic
from dimod.traversal import *
from dimod.sampleset import *
from dimod.serialization.format import set_printoptions
import dimod.lp
from dimod.utilities import *
import dimod.utilities
from dimod.vartypes import *
# flags for some global features
REAL_INTERACTIONS = False
|
Python
| 0.000001
|
@@ -697,17 +697,17 @@
= '0.12.
-2
+3
'%0A%0Afrom
|
a06204dcc8f9527135de7b1711234a03230d3e47
|
Change the name of the class that takes care of user ID validation.
|
src/server/rest_api.py
|
src/server/rest_api.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2016 Erik Nordstrøm <erik@nordstroem.no>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import uuid
import json
import falcon
from wsgiref import simple_server
def init_game (user_id):
game_id = str(uuid.uuid4())
# TODO: Create game, store in memory.
# Redis? Ring buffer?
# TODO: Include state and encrypted shadow state.
return { 'game_id': game_id }
class UserIDValidation:
def process_request (self, req, resp):
if 'user_id' in req.cookies:
try:
u = uuid.UUID(req.cookies['user_id'])
assert(u.variant == uuid.RFC_4122)
assert(u.version == 4)
except (ValueError, AssertionError):
raise falcon.HTTPBadRequest('User ID invalid',
'Cookie \'user_id\' must hold a Version 4 UUID.',
href='/docs/rest-api/request-headers.htm')
else:
raise falcon.HTTPBadRequest('User ID not provided',
'Cookie \'user_id\' must be set.',
href='/docs/rest-api/request-headers.htm')
class RequireJSON:
# https://falcon.readthedocs.io/en/stable/user/quickstart.html
def process_request (self, req, resp):
if not req.client_accepts_json:
raise falcon.HTTPNotAcceptable(
'This API only supports responses encoded as JSON.',
href='/docs/rest-api/response-body-json.htm')
if req.method in ('POST', 'PUT'):
if 'application/json' not in req.content_type:
raise falcon.HTTPUnsupportedMediaType(
'This API only supports requests encoded as JSON.',
href='/docs/rest-api/json/request-body-json.htm')
class JSONTranslator:
# https://falcon.readthedocs.io/en/stable/user/quickstart.html
def process_request (self, req, resp):
if req.content_length in (None, 0):
return
body = req.stream.read()
if not body:
raise falcon.HTTPBadRequest('Empty request body',
'A valid JSON document is required.',
href='/docs/rest-api/json/request-body-json.htm')
try:
req.context['doc'] = json.loads(body.decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPError(falcon.HTTP_753,
'Malformed JSON',
'Could not decode the request body. '
'The JSON was incorrect or not encoded as UTF-8.',
href='/docs/rest-api/json/request-body-json.htm')
def process_response (self, req, resp, resource):
if 'result' not in req.context:
return
resp.body = json.dumps(req.context['result'])
class CreateGame:
def on_post (self, req, resp):
game = init_game(req.cookies['user_id'])
req.context['result'] = game
resp.status = falcon.HTTP_201
resp.location = '/%s/' % game['game_id']
class PlayGame:
def on_get (self, req, resp, game_id):
# TODO: Ensure user is owner of game.
req.context['result'] = { 'game_id': game_id }
resp.status = falcon.HTTP_200
def on_post (self, req, resp, game_id):
# TODO: Ensure user is owner of game.
req.context['result'] = { 'game_id': game_id }
resp.status = falcon.HTTP_200
def on_put (self, req, resp, game_id):
# TODO: Restore from encrypted shadow state.
pass
app = falcon.API(middleware=[
UserIDValidation(), RequireJSON(), JSONTranslator()])
create_game = CreateGame()
app.add_route('/', create_game)
play_game = PlayGame()
app.add_route('/{game_id}/', play_game)
if __name__ == '__main__':
httpd = simple_server.make_server('127.0.0.1', 8080, app)
httpd.serve_forever()
|
Python
| 0
|
@@ -1115,19 +1115,18 @@
DValidat
-ion
+or
:%0A%0A d
@@ -4232,19 +4232,18 @@
DValidat
-ion
+or
(), Requ
|
8d72c58ac607f75c0a10ca9b79be9da59907cc7a
|
Update dev setting
|
src/server/settings.py
|
src/server/settings.py
|
"""
Django settings for server project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't9+m%qyni5%=__s8brz#tf#lv^1wy6)zj#m_2re&(_c(!_pixl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
import sys
TESTING = sys.argv[1:2] == ['test']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_assets',
'south',
'edge',
)
if TESTING:
INSTALLED_APPS += ('django_nose',)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
SOUTH_TESTS_MIGRATE = False
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'server.urls'
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'sqlite': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'mysql': {
'ENGINE': 'django.db.backends.mysql',
'OPTIONS': { "init_command": "SET storage_engine=INNODB;" },
'HOST': '',
'PORT': '',
'NAME' : 'edge',
'USER': 'root',
'PASSWORD': 'password',
'ATOMIC_REQUESTS': True,
}
}
DEFAULT_DB = 'mysql'
DATABASES['default'] = DATABASES[DEFAULT_DB]
if TESTING:
other_dbs = [db for db in DATABASES if db != 'default']
for db in other_dbs:
del DATABASES[db]
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler'
}
},
'loggers': {
#'django.db.backends': { 'level': 'DEBUG', 'handlers': ['console'], },
},
}
# NCBI blast
NCBI_DIR = BASE_DIR+'/../ncbi'
NCBI_BIN_DIR = NCBI_DIR+'/bin'
NCBI_DATA_DIR = NCBI_DIR+'/blastdb'
# Primer3
PRIMER3_DIR = BASE_DIR+'/../primer3'
|
Python
| 0
|
@@ -2088,20 +2088,27 @@
AME' : '
-edge
+toolbox_dev
',%0A
|
4d983708981029f0c0c5d103f8329427ff824b1f
|
add user output when generating key pair
|
conda_build/main_sign.py
|
conda_build/main_sign.py
|
# (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
import os
import sys
from os.path import isdir, join
try:
from Crypto.PublicKey import RSA
from Crypto import Random
except ImportError:
sys.exit("""\
Error: could not import Crypto (required for "conda sign").
Run the following command:
$ conda install -n root pycrypto
""")
from conda.utils import sha256_file
from conda.signature import KEYS_DIR, sig2ascii, verify
def keygen(name):
random_generator = Random.new().read
key = RSA.generate(1024, random_generator)
if not isdir(KEYS_DIR):
os.makedirs(KEYS_DIR)
with open(join(KEYS_DIR, name), 'wb') as fo:
fo.write(key.exportKey())
fo.write(b'\n')
with open(join(KEYS_DIR, '%s.pub' % name), 'wb') as fo:
fo.write(key.publickey().exportKey())
fo.write(b'\n')
def get_default_keyname():
if isdir(KEYS_DIR):
for fn in os.listdir(KEYS_DIR):
if not fn.endswith('.pub'):
return fn
return None
def sign(path, key):
return sig2ascii(key.sign(sha256_file(path), '')[0])
def main():
from optparse import OptionParser
p = OptionParser(
usage="usage: %prog [option] [FILE ...]",
description="tool for signing conda packages")
p.add_option('-k', '--keygen',
action="store",
help="generate a public-private "
"key pair ~/.conda/keys/<NAME>(.pub)",
metavar="NAME")
p.add_option('-v', '--verify',
action="store_true",
help="verify FILE(s)")
opts, args = p.parse_args()
if opts.keygen:
if args:
p.error('no arguments expected for --keygen')
keygen(opts.keygen)
return
if opts.verify:
for path in args:
print('%-65s %s' % (path, verify(path)))
return
key_name = get_default_keyname()
print("Using private key '%s' for signing." % key_name)
key = RSA.importKey(open(join(KEYS_DIR, key_name)).read())
for path in args:
print('signing: %s' % path)
with open('%s.sig' % path, 'w') as fo:
fo.write('%s ' % key_name)
fo.write(sign(path, key))
fo.write('\n')
if __name__ == '__main__':
main()
|
Python
| 0.000004
|
@@ -622,16 +622,67 @@
(name):%0A
+ print(%22Generating public/private key pair...%22)%0A
rand
@@ -821,34 +821,31 @@
S_DIR)%0A%0A
-with open(
+path =
join(KEYS_DI
@@ -848,24 +848,87 @@
S_DIR, name)
+%0A print(%22Storing private key: %25s%22 %25 path)%0A with open(path
, 'wb') as f
@@ -993,26 +993,23 @@
')%0A%0A
-with open(
+path =
join(KEY
@@ -1031,16 +1031,79 @@
%25 name)
+%0A print(%22Storing public key : %25s%22 %25 path)%0A with open(path
, 'wb')
|
580f8f477ccffa022f64c0f11686d51eb659ca26
|
Add a levelname Python logging key that is consistent with Java level names.
|
openquake/logs.py
|
openquake/logs.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2011, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# only, as published by the Free Software Foundation.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License version 3 for more details
# (a copy is included in the LICENSE file that accompanied this code).
#
# You should have received a copy of the GNU Lesser General Public License
# version 3 along with OpenQuake. If not, see
# <http://www.gnu.org/licenses/lgpl-3.0.txt> for a copy of the LGPLv3 License.
"""
Set up some system-wide loggers
TODO(jmc): init_logs should take filename, or sysout
TODO(jmc): support debug level per logger.
"""
from amqplib import client_0_8 as amqp
import logging
from celery.log import redirect_stdouts_to_logger
from openquake import flags
FLAGS = flags.FLAGS
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
# This parameter sets where bin/openquake and the likes will send their
# logging. This parameter has not effect on the workers. To have a similar
# effect on the workers use the celeryd --logfile parameter.
flags.DEFINE_string('logfile', '',
'Path to the log file. Leave empty to log to stderr.')
RISK_LOG = logging.getLogger("risk")
HAZARD_LOG = logging.getLogger("hazard")
LOG = logging.getLogger()
def init_logs(level='warn'):
"""Load logging config, and set log levels based on flags"""
logging_level = LEVELS.get(level, 'warn')
# Add the logging handler to the root logger. This will be a file or
# stdout depending on the presence of the logfile parameter.
#
# Note that what we are doing here is just a simplified version of what the
# standard logging.basicConfig is doing. An important difference is that
# we add our handler every time init_logs() is called, whereas basicConfig
# does nothing if there is at least one handler (any handler) present.
# This allows us to call init_logs multiple times during the unittest, to
# reinstall our handler after nose (actually its logcapture plugin) throws
# it away.
found = False
for hdlr in LOG.handlers:
if (isinstance(hdlr, logging.FileHandler)
or isinstance(hdlr, logging.StreamHandler)):
found = True
if not found:
filename = FLAGS.get('logfile', '')
if filename:
hdlr = logging.FileHandler(filename, 'a')
else:
hdlr = logging.StreamHandler()
hdlr.setFormatter(logging.Formatter(logging.BASIC_FORMAT, None))
LOG.addHandler(hdlr)
logging.getLogger("amqplib").setLevel(logging.ERROR)
LOG.setLevel(logging_level)
RISK_LOG.setLevel(logging_level)
HAZARD_LOG.setLevel(logging_level)
# capture java logging (this is what celeryd does with the workers, we use
# exactly the same system for bin/openquakes and the likes)
redirect_stdouts_to_logger(LOG)
class AMQPHandler(logging.Handler): # pylint: disable=R0902
"""
Logging handler that sends log messages to AMQP
:param host: AMQP `host:port` pair (port defaults to 5672)
:param username: AMQP username
:param password: AMQP password
:param virtual_host: AMQP virtual host
:param exchange: AMQP exchange name
:param routing_key: AMQP routing key (can use the same interpolation
values valid for a `logging` message format string)
:param level: logging level
"""
# mimic Log4j MDC
MDC = dict()
"""
A dictionary containing additional values that can be used for log message
and routing key formatting.
After doing::
AMQPHandler.MDC['job_key'] = some_value
the value can be interpolated in the log message and the routing key
by using the normal `%(job_key)s` Python syntax.
""" # pylint: disable=W0105
# pylint: disable=R0913
def __init__(self, host="localhost:5672", username="guest",
password="guest", virtual_host="/",
exchange="", routing_key="", level=logging.NOTSET):
logging.Handler.__init__(self, level=level)
self.host = host
self.username = username
self.password = password
self.virtual_host = virtual_host
self.exchange = exchange
self.routing_key = logging.Formatter(routing_key)
self.connection = None
self.channel = None
def _connect(self):
"""Create a new connection to the AMQP server"""
if self.connection and self.channel:
return self.channel
self.connection = amqp.Connection(host=self.host,
userid=self.username,
password=self.password,
virtual_host=self.virtual_host,
insist=False)
self.channel = self.connection.channel()
return self.channel
def _update_record(self, record):
"""
If the user set some values in the `AMQPHandler.MDC` attribute,
return a new :class:`logging.LogRecord` objects containing the
original values plus the values contained in the `MDC`.
"""
if not self.MDC:
return record
# create a new LogRecord object containing the custom keys in the
# MDC class field
args = self.MDC.copy()
args.update(record.args)
new_record = logging.LogRecord(
name=record.name, level=record.levelno, pathname=record.pathname,
lineno=record.lineno, msg=record.msg, args=[args],
exc_info=record.exc_info, func=record.funcName)
# the documentation says that formatters use .args; in reality
# the reach directly into __dict__
for key, value in self.MDC.items():
if key not in new_record.__dict__:
new_record.__dict__[key] = value
return new_record
def emit(self, record):
channel = self._connect()
full_record = self._update_record(record)
msg = amqp.Message(body=self.format(full_record))
routing_key = self.routing_key.format(full_record)
channel.basic_publish(msg, exchange=self.exchange,
routing_key=routing_key)
|
Python
| 0.999954
|
@@ -4166,16 +4166,98 @@
=W0105%0A%0A
+ LEVELNAMES = %7B%0A 'WARNING': 'WARN',%0A 'CRITICAL': 'FATAL',%0A %7D%0A%0A
# py
@@ -6165,16 +6165,17 @@
# the
+y
reach d
@@ -6333,24 +6333,145 @@
y%5D = value%0A%0A
+ new_record.__dict__%5B'loglevel'%5D = %5C%0A self.LEVELNAMES.get(new_record.levelname, new_record.levelname)%0A%0A
retu
|
0d8591cca2b7d9687c8915e1caf6f6af85e974f6
|
Comment PEP
|
TBFW/core.py
|
TBFW/core.py
|
# coding=utf-8
import gc
import json
import random
import socket
import threading
import time
import traceback
import urllib
from datetime import datetime
from logging import getLogger, Formatter, FileHandler, INFO, CRITICAL
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from TBFW.constant import *
from TBFW.plugin import PluginManager
class Core:
def __init__(self):
gc.enable()
socket.setdefaulttimeout(30)
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', userAgent), ('Accept-Language', acceptLanguage)]
urllib.request.install_opener(opener)
for directory in [pluginsDir, logDir]:
if not os.path.isdir(directory):
os.mkdir(directory)
PM = PluginManager()
PM.searchAllPlugins()
self.plugins = PM.plugins
self.attachedStreamId = PM.attachedStreamId
#connect = MongoClient(DBInfo.Host)
#self.db = connect.bot
#self.db.authenticate(DBInfo.Username, DBInfo.Password, mechanism=DBInfo.Method)
#self.Set = db['Set'].find_one()
# db['Set'].update_one({}, {"$set": {"lastrun": datetime.now()}})
# db['Set'].update_one({}, {"$set": {"timed": 0, "threadc": 1, "minly": {"tweet": 0, "event": 0}}})
self.logPath = logDir + "/" + datetime.now().strftime(messageLogDatetimeFormat) + ".log"
self.logger = self.__getLogger()
self.boottime = datetime.now()
self.logger.info(messageSuccessInitialization.format(self.boottime))
def __getLogger(self):
logger = getLogger()
handler = FileHandler(self.logPath, "w", encoding="utf-8")
formatter = Formatter(messageLogFormat)
handler.setFormatter(formatter)
getLogger("requests").setLevel(CRITICAL)
getLogger("tweepy").setLevel(CRITICAL)
logger.addHandler(handler)
logger.setLevel(INFO)
return logger
def run(self):
for threadPlugin in self.plugins[pluginThread]:
t = threadPlugin.do()
t.setName(threadPlugin.attributeName)
t.start()
threading.Thread(name="__scheduleRegularPlugins", target=self.__scheduleRegularPlugins, args=()).start()
threading.Thread(name="__watchThreadActivity", target=self.__watchThreadActivity, args=()).start()
event_handler = ChangeHandler()
observer = Observer()
observer.schedule(event_handler, pluginsDir, recursive=False)
observer.start()
for n in self.attachedStreamId:
t = threading.Thread(name='Streaming for %s' % n, target=MakeUserStreamConnection, args=(n,))
t.start()
while True:
time.sleep(60)
def __scheduleRegularPlugins(self):
logger = self.logger
def _do(plugin):
try:
plugin.code.do()
logger.info(messageSuccessExecutingRegularPlugin.format(plugin.attributeName))
except:
logger.warning(messageErrorExecutingRegularPlugin.format(plugin.attributeName, traceback.format_exc()))
while True:
wait_time = 60 - datetime.now().second
time.sleep(wait_time)
datetime_hour = datetime.now().hour
datetime_minute = datetime.now().minute
for regularPlugin in self.plugins["regular"]:
if random.randint(1, regularPlugin.attributeRatio) != 1:
continue
if datetime_hour in regularPlugin.attributeHours and datetime_minute in regularPlugin.attributeMinutes:
threading.Thread(name=regularPlugin.attributeName, target=_do, args=(regularPlugin, )).start()
time.sleep(1)
def __watchThreadActivity(self):
while True:
result = [thread.name for thread in threading.enumerate()]
json.dump(result, open(jsonDir + "/thread.json", "w"), sort_keys=True)
# db['Set'].update_one({}, {"$set": {"threadc": len(result)}})
for threadPlugin in self.plugins[pluginThread]:
if threadPlugin.attributeName not in result:
t = threadPlugin.do()
t.setName(threadPlugin.attributeName)
t.start()
time.sleep(15)
class ChangeHandler(FileSystemEventHandler):
def on_created(self, event):
if event.is_directory:
return
if not event.src_path.endswith('.py'):
return
name = event.src_path[:-3].replace(PLUGIN_DIR+'/', '')
loader = machinery.SourceFileLoader(name, event.src_path)
try:
plugin = loader.load_module(name)
plugin._NAME = name
plugins[plugin.TARGET.lower()].append(plugin)
logger.info('プラグイン \"%s\"は有効になりました。' % name)
except Exception as e:
logger.warning('プラグイン \"%s\"は壊れています。有効にできませんでした。\nエラー詳細: %s' % (name, e))
def on_modified(self, event):
if event.is_directory:
return
if not event.src_path.endswith('.py'):
return
name = event.src_path[:-3].replace(PLUGIN_DIR + '/', '')
loader = machinery.SourceFileLoader(name, event.src_path)
try:
plugin = loader.load_module(name)
plugin._NAME = name
i = 0
for old_plugin in plugins[plugin.TARGET.lower()]:
if old_plugin._NAME == name:
plugins[plugin.TARGET.lower()][i] = plugin
i += 1
except Exception as e:
logger.warning('プラグイン \"%s\"は壊れています。更新できませんでした。\nエラー詳細: %s' % (name, e))
def on_deleted(self, event):
if event.is_directory:
return
if not event.src_path.endswith('.py'):
return
name = event.src_path[:-3].replace(PLUGIN_DIR + '/', '')
for kind, _plugins in plugins.items():
for plugin in _plugins:
if plugin._NAME == name:
plugins[kind].remove(plugin)
|
Python
| 0
|
@@ -378,17 +378,16 @@
anager%0A%0A
-%0A
class Co
@@ -853,16 +853,17 @@
mId%0A%0A%09%09#
+
connect
@@ -888,24 +888,25 @@
fo.Host)%0A%09%09#
+
self.db = co
@@ -914,24 +914,25 @@
nect.bot%0A%09%09#
+
self.db.auth
@@ -1002,16 +1002,17 @@
hod)%0A%09%09#
+
self.Set
@@ -5136,16 +5136,17 @@
%5D.remove(plugin)
+%0A
|
00b7cf15877dc17d07d591c893671decb6b869e2
|
Enable touch events for smoothness tests.
|
tools/perf/measurements/smoothness.py
|
tools/perf/measurements/smoothness.py
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from metrics import power
from measurements import smoothness_controller
from telemetry.page import page_measurement
class Smoothness(page_measurement.PageMeasurement):
def __init__(self):
super(Smoothness, self).__init__('RunSmoothness')
self._power_metric = None
self._smoothness_controller = None
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
power.PowerMetric.CustomizeBrowserOptions(options)
def WillRunActions(self, page, tab):
self._power_metric = power.PowerMetric()
self._power_metric.Start(page, tab)
self._smoothness_controller = smoothness_controller.SmoothnessController()
self._smoothness_controller.Start(page, tab)
def DidRunActions(self, page, tab):
self._power_metric.Stop(page, tab)
self._smoothness_controller.Stop(tab)
def MeasurePage(self, page, tab, results):
self._power_metric.AddResults(tab, results)
self._smoothness_controller.AddResults(tab, results)
def CleanUpAfterPage(self, _, tab):
self._smoothness_controller.CleanUp(tab)
|
Python
| 0.00001
|
@@ -587,16 +587,77 @@
rking')%0A
+ options.AppendExtraBrowserArgs('--touch-events=enabled')%0A
powe
|
afb37f495f32ab03ea1a2b2dff566ae3d20eff5b
|
fix exception raising in svg2pdf
|
IPython/nbconvert/transformers/svg2pdf.py
|
IPython/nbconvert/transformers/svg2pdf.py
|
"""Module containing a transformer that converts outputs in the notebook from
one format to another.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import base64
import io
import os
import sys
import subprocess
from IPython.utils.tempdir import TemporaryDirectory
from IPython.utils.traitlets import Unicode
from .convertfigures import ConvertFiguresTransformer
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
INKSCAPE_COMMAND = 'inkscape --without-gui --export-pdf="{to_filename}" "{from_filename}"'
INKSCAPE_OSX_COMMAND = '/Applications/Inkscape.app/Contents/Resources/bin/inkscape --without-gui --export-pdf="{to_filename}" "{from_filename}"'
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class SVG2PDFTransformer(ConvertFiguresTransformer):
"""
Converts all of the outputs in a notebook from SVG to PDF.
"""
from_format = Unicode('svg', config=True, help='Format the converter accepts')
to_format = Unicode('pdf', config=False, help='Format the converter writes')
command = Unicode(config=True,
help="""The command to use for converting SVG to PDF
This string is a template, which will be formatted with the keys
to_filename and from_filename.
The conversion call must read the SVG from {from_flename},
and write a PDF to {to_filename}.
""")
def _command_default(self):
if sys.platform == "darwin":
return INKSCAPE_OSX_COMMAND
elif sys.platform == "win32":
# windows not yet supported
return ""
else:
return INKSCAPE_COMMAND
def convert_figure(self, data_format, data):
"""
Convert a single SVG figure to PDF. Returns converted data.
"""
#Work in a temporary directory
with TemporaryDirectory() as tmpdir:
#Write fig to temp file
input_filename = os.path.join(tmpdir, 'figure.' + data_format)
# SVG data is unicode text
with io.open(input_filename, 'w', encoding='utf8') as f:
f.write(data)
#Call conversion application
output_filename = os.path.join(tmpdir, 'figure.pdf')
shell = self.command.format(from_filename=input_filename,
to_filename=output_filename)
subprocess.call(shell, shell=True) #Shell=True okay since input is trusted.
#Read output from drive
# return value expects a filename
if os.path.isfile(output_filename):
with open(output_filename, 'rb') as f:
# PDF is a nb supported binary, data type, so base64 encode.
return base64.encodestring(f.read())
else:
return TypeError("Inkscape svg to png conversion failed")
|
Python
| 0.000001
|
@@ -3506,21 +3506,20 @@
r
-eturn
+aise
TypeErr
|
cf2004cec6e84cbec213f9e70dd8245327af541d
|
Update api.py
|
example/services/api.py
|
example/services/api.py
|
# external imports
from nautilus import APIGateway
from graphene import Schema, ObjectType, String, Mutation, Boolean
from nautilus.api import ServiceObjectType
from nautilus.api.fields import Connection
from nautilus.network import dispatchAction
from nautilus.conventions import getCRUDAction
# local imports
from .recipes import service as RecipeService
from .ingredients import service as IngredientService
# create the schema based on the query object
schema = Schema(name='Product Schema')
## define the schema that encapsulates the cloud
class Recipe(ServiceObjectType):
class Meta:
service = RecipeService
# connections are resolved/joined using the appropriate connection service
# you can avoid circular/undefined references using strings - nautilus will look
# for the corresponding ServiceObjectType
ingredients = Connection('Ingredient', description = 'The ingredients in this recipe.')
class Ingredient(ServiceObjectType):
class Meta:
service = IngredientService
recipes = Connection(Recipe, description = 'The recipes with this ingredient')
# add the query to the schema
schema.query = Query
# third party imports
class AddRecipeMutation(Mutation):
class Input:
name = String()
success = Boolean(description="Wether or not the dispatch was successful")
@classmethod
def mutate(cls, instance, args, info):
""" perform the mutation """
# send the new production action into the queue
dispatchAction({
'type': getCRUDAction('create', 'recipe'),
'payload': args
})
class Mutation(ObjectType):
""" the list of mutations that the api supports """
addRecipe = Field(AddRecipeMutation)
sceham.mutation = Mutation
# create a nautilus service with just the schema
service = APIGateway(schema=schema)
|
Python
| 0.000001
|
@@ -1172,32 +1172,8 @@
ry%0A%0A
-%0A# third party imports%0A%0A
clas
|
c4b83c9554ca0f501ac42c63a53394ff8b90c2af
|
bump version to 20190807
|
acbs/__init__.py
|
acbs/__init__.py
|
__version__ = '20181007'
|
Python
| 0
|
@@ -15,11 +15,11 @@
'201
-810
+908
07'%0A
|
c1c2ce6faa236fd37cf7b484b08f370d435d59f5
|
Make pyflakes happy
|
di-cleaner.py
|
di-cleaner.py
|
#!/usr/bin/env python
import argparse
import atexit
import logging
import sys
from datetime import datetime
from pprint import pformat
from operator import itemgetter
from docker import Client
from humanfriendly import format_size
DEFAULT_DOCKER_BASE_URL = 'unix://var/run/docker.sock'
HELP_DOCKER_BASE_URL = (
'Refers to the protocol+hostname+port where the '
'Docker server is hosted. Defaults to %s') % DEFAULT_DOCKER_BASE_URL
DEFAULT_DOCKER_API_VERSION = 'auto'
HELP_DOCKER_API_VERSION = (
'The version of the API the client will use. '
'Defaults to use the API version provided by the server')
DEFAULT_DOCKER_HTTP_TIMEOUT = 5
HELP_DOCKER_HTTP_TIMEOUT = (
'The HTTP request timeout, in seconds. '
'Defaults to %d secs') % DEFAULT_DOCKER_HTTP_TIMEOUT
DEFAULT_IMAGES_TO_KEEP = 2
HELP_IMAGES_TO_KEEP = (
'How many docker images to keep. '
'Defaults to %d images') % DEFAULT_IMAGES_TO_KEEP
HELP_KEEP_NONE_IMAGES = 'Keep <none> images'
HELP_NOOP = 'Do nothing'
HELP_VERBOSE = 'Print images to delete'
def _exit():
logging.shutdown()
def debug_var(name, var):
logging.debug('Var %s has: %s' % (name, pformat(var)))
def setup_parser(parser):
parser.add_argument('--debug', help='debug mode', action='store_true')
parser.add_argument(
'--base-url',
help=HELP_DOCKER_BASE_URL,
default=DEFAULT_DOCKER_BASE_URL)
parser.add_argument(
'--api-version',
help=HELP_DOCKER_API_VERSION,
default=DEFAULT_DOCKER_API_VERSION)
parser.add_argument(
'--http-timeout',
help=HELP_DOCKER_HTTP_TIMEOUT,
default=DEFAULT_DOCKER_HTTP_TIMEOUT,
type=int)
parser.add_argument(
'--images-to-keep',
help=HELP_IMAGES_TO_KEEP,
default=DEFAULT_IMAGES_TO_KEEP,
type=int)
parser.add_argument(
'--keep-none-images',
help=HELP_KEEP_NONE_IMAGES,
action='store_true')
parser.add_argument('--noop', help=HELP_NOOP, action='store_true')
parser.add_argument('--verbose', help=HELP_VERBOSE, action='store_true')
return parser
def validate_args(args):
if args.http_timeout < 0:
sys.stderr.write('HTTP timeout should be 0 or bigger\n')
sys.exit(1)
if args.images_to_keep < 0:
sys.stderr.write('Images to keep should be 0 or bigger\n')
sys.exit(1)
def split_by_none((non_none, none), dict_):
if u'<none>:<none>' in dict_[u'RepoTags']:
none.append(dict_)
else:
non_none.append(dict_)
return (non_none, none)
def split_images(images):
return reduce(split_by_none, images, ([], []))
def remove_keys_from_dict(keys, dict_):
return {k: v for k, v in dict_.iteritems() if k not in keys}
def add_image_to_grp_images(grp_images, image):
repo, _ = image[u'RepoTags'][0].split(':')
new_image = remove_keys_from_dict([u'RepoTags'], image)
new_image[u'Tags'] = [e.split(':')[-1] for e in image[u'RepoTags']]
if repo in grp_images:
grp_images[repo].append(new_image)
else:
grp_images[repo] = [new_image]
return grp_images
def group_by_repo(images):
return reduce(add_image_to_grp_images, images, {})
def reverse_sort_images_created(images):
return sorted(images, key=itemgetter(u'Created'), reverse=True)
def sort_images_in_repos(repos):
return {k: reverse_sort_images_created(v) for k, v in repos.iteritems()}
def fix_none_image(image):
new_image = remove_keys_from_dict([u'RepoTags'], image)
new_image[u'Tags'] = image[u'RepoTags']
return new_image
def beautify_image(image):
new_image = remove_keys_from_dict(
[u'RepoDigests', u'ParentId', u'Labels'],
image)
new_image[u'Created'] = datetime.fromtimestamp(
image[u'Created']).isoformat(' ')
new_image[u'Size'] = format_size(image[u'Size'])
new_image[u'VirtualSize'] = format_size(image[u'VirtualSize'])
return new_image
def print_images_to_delete(repos):
print('Images to delete')
print(pformat({k: [beautify_image(e) for e in v]
for k, v in repos.iteritems()}))
def remove_docker_image(client, id_):
try:
client.remove_image(id_)
except Exception as e:
pass
def clean_images_in_repo(client, images):
[remove_docker_image(client, image[u'Id']) for image in images]
def clean_repos(client, repos):
[clean_images_in_repo(client, images) for images in repos.itervalues()]
def main():
atexit.register(func=_exit)
parser = setup_parser(argparse.ArgumentParser(
description='Clean old docker images'))
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.debug:
debug_var(name='args', var=args)
validate_args(args)
client = Client(
base_url=args.base_url,
version=args.api_version,
timeout=args.http_timeout)
images = client.images()
if args.debug:
debug_var(name='images', var=images)
non_none_images, none_images = split_images(images)
if args.debug:
debug_var(name='non_none_images', var=non_none_images)
debug_var(name='none_images', var=none_images)
repos = sort_images_in_repos(group_by_repo(non_none_images))
if args.debug:
debug_var(name='repos', var=repos)
to_delete = {}
if not args.keep_none_images:
to_delete[u'<none>'] = [fix_none_image(e) for e in none_images]
if args.debug:
debug_var(name='to_delete', var=to_delete)
repos_w_images = {k: v for k, v in repos.iteritems()
if len(v) > args.images_to_keep}
if args.debug:
debug_var(name='repos_w_images', var=repos_w_images)
to_delete.update({k: v[args.images_to_keep:]
for k, v in repos_w_images.iteritems()})
if args.debug:
debug_var(name='to_delete', var=to_delete)
if args.verbose:
print_images_to_delete(to_delete)
if args.noop:
sys.exit(0)
clean_repos(client, to_delete)
if __name__ == '__main__':
main()
|
Python
| 0.00006
|
@@ -4198,13 +4198,8 @@
tion
- as e
:%0A
|
99cfafa9e0382e71644731590a1cc5737183e506
|
add exclude flag and some additional regexes to exclude strings that shouldn't be translated
|
django-template-i18n-lint.py
|
django-template-i18n-lint.py
|
#! /usr/bin/python
"""
Prints out all
"""
import os
import re
import sys
from optparse import OptionParser
def location(str, pos):
"""Given a string str and an integer pos, find the line number and character in that line that correspond to pos"""
lineno, charpos = 1, 1
counter = 0
for char in str:
if counter == pos:
return lineno, charpos
elif char == '\n':
lineno += 1
charpos = 1
counter += 1
else:
charpos += 1
counter += 1
return lineno, charpos
# Things that are OK:
GOOD_STRINGS = re.compile(
r"""
# django comment
( {%\ comment\ %}.*?{%\ endcomment\ %}
# already translated text
|{%\ ?blocktrans.*?{%\ ?endblocktrans\ ?%}
# any django template function (catches {% trans ..) aswell
|{%.*?%}
# CSS
|<style.*?</style>
# JS
|<script.*?</script>
# A html title or value attribute that's been translated
|(?:value|title|summary|alt)="{%\ ?trans.*?%}"
# A html title or value attribute that's just a template var
|(?:value|title|summary|alt)="{{.*?}}"
# An <option> value tag
|<option[^<>]+?value="[^"]*?"
# Any html attribute that's not value or title
|[a-z:-]+?(?<!alt)(?<!value)(?<!title)(?<!summary)='[^']*?'
# Any html attribute that's not value or title
|[a-z:-]+?(?<!alt)(?<!value)(?<!title)(?<!summary)="[^"]*?"
# HTML opening tag
|<[\w:]+
# End of a html opening tag
|>
|/>
# closing html tag
|</.*?>
# any django template variable
|{{.*?}}
# HTML doctype
|<!DOCTYPE.*?>
# IE specific HTML
|<!--\[if.*?<!\[endif\]-->
# HTML comment
|<!--.*?-->
# HTML entities
|&[a-z]{1,10};
# CSS style
|<style.*?</style>
# another common template comment
|{\#.*?\#}
)""",
# MULTILINE to match across lines and DOTALL to make . include the newline
re.MULTILINE|re.DOTALL|re.VERBOSE)
# Stops us matching non-letter parts, e.g. just hypens, full stops etc.
LETTERS = re.compile("\w")
def replace_strings(filename):
full_text_lines = []
for index, message in enumerate(GOOD_STRINGS.split(open(filename).read())):
if index % 2 == 0 and re.search("\w", message):
before, message, after = re.match("^(\s*)(.*?)(\s*)$", message, re.DOTALL).groups()
message = message.strip().replace("\n", "").replace("\r", "")
change = raw_input("Make '%s' translatable? [Y/n] " % message)
if change == 'y' or change == "":
message = '%s{%% trans "%s" %%}%s' % (before, message, after)
full_text_lines.append(message)
full_text = "".join(full_text_lines)
save_filename = filename.split(".")[0] + "_translated.html"
open(save_filename, 'w').write(full_text)
print "Fully translated! Saved as: %s" % save_filename
def non_translated_text(filename):
template = open(filename).read()
offset = 0
# Find the parts of the template that don't match this regex
# taken from http://www.technomancy.org/python/strings-that-dont-match-regex/
for index, match in enumerate(GOOD_STRINGS.split(template)):
if index % 2 == 0:
# Ignore it if it doesn't have letters
if LETTERS.search(match):
lineno, charpos = location(template, offset)
yield (lineno, charpos, match.strip().replace("\n", "").replace("\r", "")[:120])
offset += len(match)
def print_strings(filename):
for lineno, charpos, message in non_translated_text(filename):
print "%s:%s:%s:%s" % (filename, lineno, charpos, message)
if __name__ == '__main__':
parser = OptionParser(usage="usage: %prog [options] <filenames>")
parser.add_option("-r", "--replace", action="store_true", dest="replace",
help="Ask to replace the strings in the file.", default=False)
(options, args) = parser.parse_args()
# Create a list of files to check
if len(args) == 0:
args = [os.getcwd()]
files = []
for arg in args:
if os.path.isdir(arg):
for dirpath, dirs, filenames in os.walk(arg):
files.extend(os.path.join(dirpath, fname)
for fname in filenames
if fname.endswith('.html') or fname.endswith('.txt'))
else:
files.append(arg)
for filename in files:
if options.replace:
replace_strings(filename)
else:
print_strings(filename)
|
Python
| 0
|
@@ -1511,32 +1511,210 @@
ummary)=%22%5B%5E%22%5D*?%22
+%0A %0A # Any html attribute that's not value or title%0A %7C%5Ba-z:-%5D+?(?%3C!alt)(?%3C!value)(?%3C!title)(?%3C!summary)=%5B%5E%5CW%5D*?%5B(%5Cw%7C%3E)%5D%0A %0A %7C%5B(SELECTED%7CCHECKED)%5D
%0A%0A # HTM
@@ -1907,16 +1907,77 @@
%7C%7B%7B.*?%7D%7D
+%0A %0A # any django template tag%0A %7C%7B%25.*?%25%7D
%0A%0A
@@ -2152,16 +2152,16 @@
ntities%0A
-
@@ -2174,16 +2174,75 @@
%5D%7B1,10%7D;
+%0A %0A # HTML entities%0A %7C&%5C#x%5B0-9%5D%7B1,10%7D;
%0A%0A
@@ -4354,16 +4354,26 @@
+
help=%22As
@@ -4427,16 +4427,171 @@
=False)%0A
+ parser.add_option(%22-e%22, %22--exclude%22, action=%22append%22, dest=%22exclude_filename%22,%0A help=%22Exclude these filenames from being linted%22)%0A
(opt
@@ -4982,16 +4982,17 @@
if
+(
fname.en
@@ -5033,16 +5033,59 @@
'.txt'))
+ and fname not in options.exclude_filename)
%0A
|
ec831928b9e065b523eae2621f51091a8e332c71
|
Be more verbose
|
pissuu/api.py
|
pissuu/api.py
|
import requests
import md5
import json
class IssuuAPI(object):
def __init__(self, key, secret):
"""
Initialize an API client with the given ``key`` and ``secret``.
"""
self.key = key
self.secret = secret
def add_bookmark(self):
"""
Add a bookmark.
"""
raise NotImplementedError()
def list_bookmarks(self):
"""
List bookmarks.
"""
raise NotImplementedError()
def update_bookmark(self):
"""
Update a bookmark.
"""
raise NotImplementedError()
def delete_bookmark(self, names):
"""
Delete a bookmark.
"""
raise NotImplementedError()
def list_documents(self):
"""
List documents for this user.
"""
return self._query(
url = 'http://api.issuu.com/1_0',
action = 'issuu.documents.list'
)
def upload_document(self, file, title=''):
"""
Upload the given ``file``.
"""
response = self._query(
url = 'http://upload.issuu.com/1_0',
action = 'issuu.document.upload',
data = {
'file': file,
'title': title
}
)
return response['_content']['document']['documentId']
def update_document(self):
"""
Update a document.
"""
raise NotImplementedError()
def delete_document(self, id):
"""
Delete a document.
:param id: A string describing a document ID.
"""
self.delete_documents([id])
def delete_documents(self, ids):
"""
Delete the documents with the given ``ids``.
:param ids: A list of strings describing document IDs.
"""
self._query(
url = 'http://api.issuu.com/1_0',
action = 'issuu.document.delete',
data = {
'names': ','.join(ids)
}
)
def add_folder(self):
"""
Create a folder.
"""
raise NotImplementedError()
def list_folders(self):
"""
List folders.
"""
raise NotImplementedError()
def update_folder(self):
"""
Update a folder.
"""
raise NotImplementedError()
def delete_folder(self):
"""
Delete a folder.
"""
raise NotImplementedError()
def _query(self, url, action, data=None):
"""
Low-level access to the Issuu API.
"""
if not data:
data = {}
data.update({
'apiKey': self.key,
'format': 'json',
'action': action
})
data['signature'] = self._sign(data)
files = {}
for key in data:
if hasattr(data[key], 'read'):
files[key] = data[key]
for key in files:
data.pop(key)
response = requests.post(
url = url,
data = data,
files = files
)
try:
data = json.loads(response.content)['rsp']
except ValueError:
raise self.Error('API response could not be parsed as JSON')
if data['stat'] == 'fail':
raise self.Error(data['_content']['error']['message'])
else:
return data
def _sign(self, data):
"""
Create a signature of the given ``data``.
"""
signature = self.secret
data.update({
'apiKey': self.key
})
keys = data.keys()
for key in sorted(keys):
if isinstance(data[key], (str, unicode)):
signature += key + data[key]
return md5.new(signature).hexdigest()
class Error(StandardError):
pass
|
Python
| 0.999847
|
@@ -3260,17 +3260,40 @@
as JSON
-'
+: %25s' %25 response.content
)%0A%0A
|
2b20e803733db09ad4643be00b2af11ecea1eeb8
|
Increase version to 0.11.0 (#394)
|
opsdroid/const.py
|
opsdroid/const.py
|
"""Constants used by OpsDroid."""
import os
__version__ = "0.10.0"
DEFAULT_GIT_URL = "https://github.com/opsdroid/"
MODULES_DIRECTORY = "opsdroid-modules"
DEFAULT_ROOT_PATH = os.path.expanduser("~/.opsdroid")
DEFAULT_LOG_FILENAME = os.path.join(DEFAULT_ROOT_PATH, 'output.log')
DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules")
DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, "site-packages")
DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml")
DEFAULT_MODULE_BRANCH = "master"
EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"configuration/example_configuration.yaml")
REGEX_MAX_SCORE = 0.6
RASANLU_DEFAULT_URL = "http://localhost:5000"
RASANLU_DEFAULT_PROJECT = "opsdroid"
|
Python
| 0
|
@@ -56,17 +56,17 @@
_ = %220.1
-0
+1
.0%22%0A%0ADEF
|
058d006aef033b324d9facde848ee004fc54f9f4
|
Add serialisation check
|
plot_check.py
|
plot_check.py
|
from scanpointgenerator import LineGenerator, CompoundGenerator
from scanpointgenerator.rectangular_roi import RectangularROI
from scanpointgenerator.circular_roi import CircularROI
from scanpointgenerator.spiralgenerator import SpiralGenerator
from scanpointgenerator.lissajousgenerator import LissajousGenerator
from scanpointgenerator.randomoffsetmutator import RandomOffsetMutator
from scanpointgenerator.excluder import Excluder
from plotgenerator2 import plot_generator
from pkg_resources import require
require('matplotlib')
require('numpy')
require('scipy')
def grid_check():
x = LineGenerator("x", "mm", 0.0, 4.0, 5, alternate_direction=True)
y = LineGenerator("y", "mm", 0.0, 3.0, 4)
gen = CompoundGenerator([x, y], [], [])
plot_generator(gen)
def grid_circle_check():
x = LineGenerator("x", "mm", 0.0, 4.0, 5, alternate_direction=True)
y = LineGenerator("y", "mm", 0.0, 3.0, 4)
circle = CircularROI([2.0, 1.0], 2.0)
excluder = Excluder(circle, ['x', 'y'])
gen = CompoundGenerator([x, y], [], [excluder])
plot_generator(gen, circle)
def spiral_check():
gen = SpiralGenerator(['x', 'y'], "mm", [0.0, 0.0], 10.0)
plot_generator(gen)
def spiral_rectangle_check():
spiral = SpiralGenerator(['x', 'y'], "mm", [0.0, 0.0], 10.0)
rectangle = RectangularROI([0.0, 0.0], 10.0, 10.0)
excluder = Excluder(rectangle, ['x', 'y'])
gen = CompoundGenerator([spiral], [], [excluder])
plot_generator(gen, rectangle)
def lissajous_check():
bounding_box = dict(centre=[0.0, 0.0], width=1.0, height=1.0)
gen = LissajousGenerator(['x', 'y'], "mm", bounding_box, 2)
plot_generator(gen)
def lissajous_rectangle_check():
bounding_box = dict(centre=[0.0, 0.0], width=1.0, height=1.0)
lissajous = LissajousGenerator(['x', 'y'], "mm", bounding_box, 2)
rectangle = RectangularROI([0.0, 0.0], 0.8, 0.8)
excluder = Excluder(rectangle, ['x', 'y'])
gen = CompoundGenerator([lissajous], [], [excluder])
plot_generator(gen)
def line_2d_check():
gen = LineGenerator(["x", "y"], "mm", [1.0, 2.0], [5.0, 10.0], 5)
plot_generator(gen)
def random_offset_check():
x = LineGenerator("x", "mm", 0.0, 4.0, 5, alternate_direction=True)
y = LineGenerator("y", "mm", 0.0, 3.0, 4)
mutator = RandomOffsetMutator(2, dict(x=0.25, y=0.25))
gen = CompoundGenerator([x, y], [mutator], [])
plot_generator(gen)
gen = CompoundGenerator([x, y], [mutator, mutator], [])
plot_generator(gen)
gen = CompoundGenerator([x, y], [mutator, mutator, mutator], [])
plot_generator(gen)
gen = CompoundGenerator([x, y], [mutator, mutator, mutator, mutator, mutator], [])
plot_generator(gen)
grid_check()
grid_circle_check()
spiral_check()
spiral_rectangle_check()
lissajous_check()
lissajous_rectangle_check()
line_2d_check()
random_offset_check()
|
Python
| 0
|
@@ -2725,21 +2725,351 @@
)%0A%0A%0A
-grid_check()%0A
+def serialise_grid_check():%0A%0A x = LineGenerator(%22x%22, %22mm%22, 0.0, 4.0, 5, alternate_direction=True)%0A y = LineGenerator(%22y%22, %22mm%22, 0.0, 3.0, 4)%0A%0A gen = CompoundGenerator(%5Bx, y%5D, %5B%5D, %5B%5D)%0A%0A plot_generator(gen)%0A%0A gen = gen.to_dict()%0A print(gen)%0A gen = CompoundGenerator.from_dict(gen)%0A%0A plot_generator(gen)%0A%0A# grid_check()%0A#
grid
@@ -3080,24 +3080,26 @@
cle_check()%0A
+#
spiral_check
@@ -3101,16 +3101,18 @@
check()%0A
+#
spiral_r
@@ -3120,32 +3120,34 @@
ctangle_check()%0A
+#
lissajous_check(
@@ -3148,16 +3148,18 @@
check()%0A
+#
lissajou
@@ -3178,16 +3178,18 @@
check()%0A
+#
line_2d_
@@ -3196,16 +3196,18 @@
check()%0A
+#
random_o
@@ -3212,16 +3212,46 @@
_offset_check()%0A
+send_grid_over_server_check()%0A
|
a27eebab1381c5b94c9cf7e9411dda417198ce0c
|
fix paste logger
|
elmyra.ip.access.epo/setup.py
|
elmyra.ip.access.epo/setup.py
|
import os
from setuptools import setup, find_packages
#from distutils.core import setup
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = [
# ----------------------------------------------
# backend
# ----------------------------------------------
# pyramid core
#'pyramid==1.5a2',
'pyramid==1.4.2',
'pyramid_debugtoolbar',
'pyramid_mako',
'Akhet==2.0',
'waitress',
'Paste==1.7.5.1',
# caching
'Beaker==1.6.4',
'pyramid_beaker==0.8',
'pymongo==2.7.1',
# web services
'cornice==0.15',
# authorization
'PyCrypto==2.6.1',
'jws==0.1.2',
'python_jwt==0.3.1',
'pbkdf2==1.3',
# ----------------------------------------------
# business logic
# ----------------------------------------------
'requests==2.0.1',
'requests-oauthlib==0.4.0',
'mechanize==0.2.5',
'BeautifulSoup==3.2.1',
'ago==0.0.6',
'pyparsing==2.0.2',
'mongoengine==0.8.7',
'blinker==1.3',
'python-dateutil==2.2',
# ----------------------------------------------
# user interface
# ----------------------------------------------
# fanstatic
'fanstatic==1.0a2',
'pyramid_fanstatic==0.4',
# bootstrap
'js.bootstrap==2.3.1',
#'js.bootstrap==3.0.0.1',
# jquery
'js.jquery==1.9.1',
'js.jquery_shorten==1.0.0a1',
'js.purl==2.3.1a1',
'js.select2==3.4.1',
# jquerui
#'js.jqueryui==1.10.3',
#'js.jqueryui_bootstrap==0.0.0',
# fontawesome
'css.fontawesome==3.2.1',
# marionette, backbone and prerequisites
'js.marionette==1.1.0a2',
'js.underscore_string==2.3.0a1',
]
test_requires = [
# ----------------------------------------------
# testing
# ----------------------------------------------
'nose==1.3.3',
'nose-exclude==0.2.0',
'nose2-cov==1.0a4',
]
setup(name='elmyra.ip.access.epo',
version='0.37.0',
description='elmyra.ip.access.epo',
long_description=README,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi pylons pyramid',
packages=find_packages(),
#packages=['elmyra.ip.access.epo'],
include_package_data=True,
package_data={
'elmyra.ip.access.epo': [
'resources/*.*',
'templates/*.mako',
'static/js/**/*.js', 'static/js/**/*.map', '**/**/*.css',
'**/**/*.jpg', '**/**/*.gif', '**/**/*.svg', '**/**/**/*.svg',
'static/widget/**/**/*.*',
],
'elmyra.ip.util.render': ['*.js'],
},
zip_safe=False,
test_suite='nose.collector',
install_requires=requires,
tests_require=test_requires,
dependency_links=[
'https://github.com/elmyra-org/js.marionette/tarball/1.1.0a2#egg=js.marionette-1.1.0a2',
],
entry_points="""\
[paste.app_factory]
main = elmyra.ip.access.epo:main
[beaker.backends]
mongodb = elmyra.ip.util.database.beaker_mongodb:MongoDBNamespaceManager
""",
)
|
Python
| 0.000008
|
@@ -545,24 +545,49 @@
e==1.7.5.1',
+%0A 'PasteScript 1.7.5',
%0A%0A # cach
|
5f43ac2dbca1caba21b2d6f4afbc798323a0d79f
|
Clear memory more actively
|
osmhm/__init__.py
|
osmhm/__init__.py
|
import fetch
import filters
import inserts
import tables
import config
import send_notification
def run(time_type='hour', history=False, suspicious=False, monitor=True,
notification=False, notifier=send_notification.send_mail):
"""
"""
import osmhm
import osmdt
import datetime
import time
while True:
sequence = osmhm.fetch.fetch_last_read()
if not sequence:
osmhm.fetch.fetch_next(time_type=time_type, reset=True)
sequence = osmhm.fetch.fetch_last_read()
if sequence['read_flag'] is False:
print "Processing sequence %s." % (sequence['sequencenumber'])
count = 0
while True:
try:
count += 1
data_stream = osmdt.fetch(sequence['sequencenumber'], time=time_type)
break
except:
if count == 5:
msg = 'Current state file not retrievable after five times.'
raise Exception(msg)
print "File not reachable; waiting 60 more seconds..."
time.sleep(60)
data_object = osmdt.process(data_stream)
changesets = osmdt.extract_changesets(data_object)
objects = osmdt.extract_objects(data_object)
users = osmdt.extract_users(data_object)
if history:
osmhm.inserts.insert_all_changesets(changesets)
if suspicious:
osmhm.filters.suspicious_filter(changesets)
if monitor:
osmhm.filters.object_filter(objects, notification=notification, notifier=notifier)
osmhm.filters.user_filter(changesets, notification=notification, notifier=notifier)
#osmhm.filters.user_object_filter(objects, notification=notification, notifier=notifier) # not implemented yet
osmhm.filters.key_filter(objects, notification=notification, notifier=notifier)
osmhm.inserts.insert_file_read()
print "Finished processing %s." % (sequence['sequencenumber'])
if sequence['timetype'] == 'minute':
delta_time = 1
extra_time = 10
elif sequence['timetype'] == 'hour':
delta_time = 60
extra_time = 120
elif sequence['timetype'] == 'day':
delta_time = 1440
extra_time = 300
next_time = datetime.datetime.strptime(sequence['timestamp'],
"%Y-%m-%dT%H:%M:%SZ") + datetime.timedelta(minutes=delta_time)
if datetime.datetime.utcnow() < next_time:
sleep_time = (next_time - datetime.datetime.utcnow()).seconds + delta_time
print "Waiting %2.1f seconds for the next file." % (sleep_time)
else:
sleep_time = 1
time.sleep(sleep_time)
count = 0
while True:
try:
count += 1
osmhm.fetch.fetch_next(sequence['sequencenumber'], time_type=time_type)
break
except:
if count == 5:
msg = 'New state file not retrievable after five times.'
raise Exception(msg)
print "Waiting %2.1f more seconds..." % (extra_time)
time.sleep(extra_time)
|
Python
| 0
|
@@ -1217,16 +1217,44 @@
_stream)
+%0A del data_stream
%0A%0A
@@ -1419,16 +1419,44 @@
_object)
+%0A del data_object
%0A%0A
@@ -2074,16 +2074,60 @@
ifier)%0A%0A
+ del changesets, objects, users%0A%0A
|
71b6df9745b677187f4e36eda2a54db2ffc54676
|
Remove extra slash
|
democracylab/settings.py
|
democracylab/settings.py
|
"""
Django settings for democracylab project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import ast
import dj_database_url
from distutils.util import strtobool
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = strtobool(os.environ.get('DJANGO_DEBUG'))
ALLOWED_HOSTS = ['*']
S3_BUCKET = os.environ.get('S3_BUCKET')
# Application definition
INSTALLED_APPS = [
'civictechprojects.apps.CivictechprojectsConfig',
'common.apps.CommonConfig',
'democracylab.apps.DemocracylabConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'taggit'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'democracylab.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, 'democracylab/templates'),
os.path.join(PROJECT_ROOT, 'civictechprojects/templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'democracylab.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DL_DATABASE = os.environ.get('DL_DATABASE', '')
DATABASES = ast.literal_eval(DL_DATABASE) if DL_DATABASE else {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': 'p0stgres!',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
db_from_env = dj_database_url.config()
DATABASES['default'].update(db_from_env)
LOGIN_REDIRECT_URL = '/'
#Caching number of tag counts only for now - change this if other things are db-cached
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'default_db_cache',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
# Make sure to keep these in sync with the validators in SignUpController.jsx
# TODO: Find a validator for verifying password contains at least 1 number, letter, and non-alphanumeric character
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser',
],
'PAGE_SIZE': 10
}
DEFAULT_FROM_EMAIL = 'democracylabreset@gmail.com'
SERVER_EMAIL = 'democracylabreset@gmail.com'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'democracylabreset@gmail.com'
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
PROTOCOL_DOMAIN = os.environ['PROTOCOL_DOMAIN']
ADMIN_EMAIL = os.environ['ADMIN_EMAIL']
FOOTER_LINKS = os.environ.get('FOOTER_LINKS', '')
PROJECT_DESCRIPTION_EXAMPLE_URL = os.environ.get('PROJECT_DESCRIPTION_EXAMPLE_URL', '')
SECURE_SSL_REDIRECT = os.environ.get('DL_SECURE_SSL_REDIRECT', False) == 'True'
# Note: This environment variable should only be applied in Production
HOTJAR_APPLICATION_ID = os.environ.get('HOTJAR_APPLICATION_ID', '')\
GOOGLE_PROPERTY_ID = os.environ.get('GOOGLE_PROPERTY_ID', '')
# TODO: Set to True in productions
# SESSION_COOKIE_SECURE = True
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'custom_error_handler': {
'class': 'democracylab.logging.CustomErrorHandler'
}
},
'loggers': {
# Override global logger
'': {
'handlers': ['custom_error_handler'],
'level': 'ERROR',
'propagate': True
},
},
}
|
Python
| 0.00007
|
@@ -4627,9 +4627,8 @@
'')
-%5C
%0A%0AGO
|
9e146c540f01cd243bf9886763fde45c5897396a
|
Remove network and executable on uninstall
|
dip/config.py
|
dip/config.py
|
"""
Dip configuration.
"""
import collections
import os
import re
import subprocess
import time
import sys
from copy import deepcopy
import click
import compose.cli.command
import easysettings
import git
from . import __version__
from . import colors
from . import defaults
from . import exc
from . import utils
DEFAULT = {'dips': {},
'home': defaults.HOME,
'path': defaults.PATH,
'version': __version__}
def load(config_path=None):
""" Load config.json file. """
# Use supplied path or default
config_path = config_path or defaults.HOME
# Read config.json
try:
cfg = dict(easysettings.JSONSettings.from_file(config_path))
except (OSError, IOError, ValueError):
cfg = {}
# Merge config with defaults
return DipConfig(**utils.dict_merge(deepcopy(DEFAULT), cfg))
class DipConfig(collections.MutableMapping):
def __init__(self, **config):
self.config = config
self.home = config.get('home')
self.path = config.get('path')
self.version = config.get('version')
def __str__(self):
return self.home
def __repr__(self):
return "DipConfig({self})".format(self=self)
def __delitem__(self, key):
del self.config['dips'][key]
def __getitem__(self, key):
return Dip(key, **self.config['dips'][key])
def __iter__(self):
for key in self.config['dips']:
yield key
def __len__(self):
return len(self.config['dips'])
def __setitem__(self, key, val):
self.config['dips'][key] = dict(val)
def install(self, name, home, path, env, remote):
""" Install config entry. """
# Update config
try:
remote, branch = remote.split('/')
except (AttributeError, ValueError):
branch = None
val = Dip(name, home, path, env, remote, branch)
self[name] = val
self.save()
# Write executable
try:
utils.write_exe(path, name)
except (OSError, IOError):
raise exc.DipError(
"Could not write executable for '{name}'".format(name=name))
def save(self):
""" Save config to config.json file. """
try:
cfg = easysettings.JSONSettings()
cfg.update(self.config)
cfg.save(self.home, sort_keys=True)
except (OSError, IOError):
raise exc.DipConfigError(self.home)
def uninstall(self, name):
""" Uninstall config entry. """
del self[name]
self.save()
class Dip(object):
def __init__(self, name, home, path, env=None, remote=None,
branch=None):
self.name = name
self.home = os.path.abspath(home)
self.path = os.path.abspath(path)
self.env = env
self.remote = remote
self.branch = branch
def __str__(self):
return self.name
def __repr__(self):
return "Dip({self})".format(self=self)
def __iter__(self):
yield 'home', self.home
yield 'path', self.path
yield 'env', self.env or {}
yield 'remote', self.remote
yield 'branch', self.branch
@property
def repo(self):
return git.Repo(self.home, search_parent_directories=True)
@property
def project(self):
return compose.cli.command.get_project(self.home)
@property
def service(self):
return self.project.get_service(self.name)
@property
def definition(self):
for cfg in compose.config.config.get_default_config_files(self.home):
with open(cfg) as compose_file:
return compose_file.read()
def diff(self):
for local in compose.config.config.get_default_config_files(self.home):
# Format remote/branch:path/to/docker-compose.yml
repo = self.repo
remote = self.remote
branch = self.branch or repo.active_branch.name
rel = re.sub(r"^{root}".format(root=repo.working_dir), '', local)
remote = "{remote}/{branch}:{rel}"\
.format(remote=remote,
branch=branch,
rel=rel.strip('/'))
# Echo diff
try:
cmd = ['git', '--no-pager', 'diff', remote, local]
with open(os.devnull, 'w') as devnull:
diff = subprocess.check_output(cmd, stderr=devnull).strip()
if diff:
with utils.newlines():
msg = 'Local configuration has diverged from remote:\n'
click.echo(colors.amber(msg), err=True)
subprocess.call(cmd, stdout=sys.stderr)
msg = "\nSleeping for {sleep}s"\
.format(sleep=defaults.SLEEP)
click.echo(msg, err=True)
return time.sleep(defaults.SLEEP)
except subprocess.CalledProcessError:
click.echo(colors.amber("Could not access {remote}"
.format(remote=remote)), err=True)
def run(self, *args):
""" Run CLI. """
# Build CMD
cmd = ['docker-compose', 'run', '--rm']
if utils.notty():
cmd.append('-T')
# Get options for docker-compose
cmd += utils.flatten(['-e', '='.join(x)] for x in self.env.items())
# Call docker-compose run --rm <args> <svc> $*
subprocess.call(cmd + [self.name] + list(args),
stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin)
|
Python
| 0
|
@@ -2525,32 +2525,396 @@
nfig entry. %22%22%22%0A
+ # Remove executable%0A try:%0A path = os.path.join(self%5Bname%5D.path, name)%0A os.remove(path)%0A except (OSError, IOError):%0A pass%0A%0A # Remove network%0A try:%0A self%5Bname%5D.project.networks.remove()%0A except compose.config.errors.ComposeFileNotFound:%0A pass%0A%0A # Remove config%0A
del self
@@ -3049,24 +3049,33 @@
branch=None
+, *kwargs
):%0A s
|
8f4f1e8cc45daa8cf49f050200ce17a48f008e5a
|
Fix process entity migration
|
resolwe/flow/migrations/0023_process_entity_2.py
|
resolwe/flow/migrations/0023_process_entity_2.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-10-01 03:15
from __future__ import unicode_literals
from django.db import migrations
def migrate_flow_collection(apps, schema_editor):
"""Migrate 'flow_collection' field to 'entity_type'."""
Process = apps.get_model('flow', 'Process')
DescriptorSchema = apps.get_model('flow', 'DescriptorSchema')
for process in Process.objects.all():
process.entity_type = process.flow_collection
process.entity_descriptor_schema = process.flow_collection
if not DescriptorSchema.objects.filter(slug=process.entity_descriptor_schema).exists():
raise LookupError(
"Descriptow schema '{}' referenced in 'entity_descriptor_schema' not "
"found.".format(process.entity_descriptor_schema)
)
process.save()
class Migration(migrations.Migration):
dependencies = [
('flow', '0022_process_entity_1'),
]
operations = [
migrations.RunPython(migrate_flow_collection)
]
|
Python
| 0.000006
|
@@ -543,16 +543,82 @@
if
+(process.entity_descriptor_schema is not None and%0A
not Desc
@@ -692,16 +692,17 @@
exists()
+)
:%0A
|
21b453946bfa35c7730d5ab15e62b48d299170ed
|
Update password loading test
|
osfclient/tests/test_listing.py
|
osfclient/tests/test_listing.py
|
"""Test `osf ls` command"""
from unittest import mock
from unittest.mock import patch, MagicMock, PropertyMock, mock_open
from osfclient import OSF
from osfclient.cli import list_
from osfclient.tests.mocks import MockProject
@patch('osfclient.cli.OSF')
def test_anonymous_doesnt_use_password(MockOSF):
args = MagicMock()
username = PropertyMock(return_value=None)
type(args).username = username
list_(args)
MockOSF.assert_called_once_with(username=None, password=None)
@patch('osfclient.cli.OSF')
def test_username_password(MockOSF):
args = MagicMock()
username = PropertyMock(return_value='joe@example.com')
type(args).username = username
mock_open_func = mock_open(read_data="secret")
with patch('osfclient.cli.open', mock_open_func, create=True):
list_(args)
MockOSF.assert_called_once_with(username='joe@example.com',
password='secret')
assert mock_open_func.called
@patch.object(OSF, 'project', return_value=MockProject('1234'))
def test_get_project(OSF_project):
args = MagicMock()
username = PropertyMock(return_value=None)
type(args).username = username
project = PropertyMock(return_value='1234')
type(args).project = project
output = PropertyMock(return_value=None)
type(args).output = output
list_(args)
OSF_project.assert_called_once_with('1234')
# check that the project and the files have been printed
for store in OSF_project.return_value.storages:
assert store._name_mock.called
for f in store.files:
assert f._path_mock.called
|
Python
| 0
|
@@ -684,54 +684,93 @@
-mock_open_func = mock_open(read_data=%22
+def simple_getenv(key):%0A if key == 'OSF_PASSWORD':%0A return '
secret
-%22)
+'
%0A%0A
@@ -802,42 +802,75 @@
li.o
-p
+s.get
en
+v
',
- mock_open_func, create=True)
+%0A side_effect=simple_getenv) as mock_getenv
:%0A
@@ -1015,36 +1015,54 @@
-assert mock_open_func.called
+mock_getenv.assert_called_with('OSF_PASSWORD')
%0A%0A%0A@
|
65db3a0c9fd0330c5bdca8125d2e752ef4774dd5
|
add an arrow to show the direction of the route
|
osmroutes2maps.py
|
osmroutes2maps.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import json
from collections import defaultdict
from shapely.geometry import LineString, Point
from shapely.ops import linemerge
import gmplot
import urllib
import sys
import codecs
import pyproj
import yattag
import hashlib
routes = defaultdict(list)
osmfile = sys.argv[1]
portalfile = sys.argv[2]
resultdir = sys.argv[3]
portaljson = json.load(codecs.open(portalfile, 'r', 'utf-8-sig'))
# we create Point objects now, it saves ~20% of the time per iteration
portals = [Point(x['lngE6']/10.0**6, x['latE6']/10.0**6) for x in portaljson['portals']]
osm = json.load(open(osmfile))
features = osm['features']
geod = pyproj.Geod(ellps='WGS84')
print('Extacting routes information... ', end='', flush=True)
# extracts all the 'way' features (representing the sections of the route)
# and add them to their relative routes (a way can be part of multiple routes)
for feature in features:
if feature['id'].startswith('way/'):
for relation in feature['properties']['@relations']:
if relation['role'] != 'platform':
if 'name' in relation['reltags']:
routes[relation['reltags']['name']].append(LineString(feature['geometry']['coordinates']))
else:
routes[relation['reltags']['ref']].append(LineString(feature['geometry']['coordinates']))
# merge the segments composing the route
for route in routes:
routes[route] = linemerge(routes[route])
print('%d routes found' % len(routes), flush=True)
results = []
print('Generating maps', end='', flush=True)
for route in sorted(routes):
if type(routes[route]) == LineString:
lines = [routes[route], ]
else:
lines = routes[route]
gmap = gmplot.GoogleMapPlotter(center_lng=lines[0].centroid.x, center_lat=lines[0].centroid.y, zoom=14)
gmap.fitBounds(routes[route].bounds[1], routes[route].bounds[0],
routes[route].bounds[3], routes[route].bounds[2])
portals_set = set()
for line in lines:
for portal in portals:
interp = line.interpolate(line.project(portal))
# https://github.com/mlaloux/My-Python-GIS_StackExchange-answers/blob/master/What%20is%20the%20unit%20the%20shapely%20length%20attribute%3F.md
angle1, angle2, distance = geod.inv(portal.x, portal.y, interp.x, interp.y)
if abs(distance) <= 40.0:
portals_set.add(portal.coords[0])
lats = [x[1] for x in line.coords]
lngs = [x[0] for x in line.coords]
gmap.plot(lats, lngs)
for port in portals_set:
gmap.marker(port[1], port[0])
mapfile = '%s.html' % hashlib.sha1(route.encode('utf-8')).hexdigest()
gmap.draw('%s/%s' % (resultdir, mapfile))
results.append([route, len(portals_set), mapfile])
print('.', end='', flush=True)
# generate an html with the results
doc, tag, text = yattag.Doc().tagtext()
with tag('html'):
with tag('body'):
with tag('table', border = '1'):
with tag('tr'):
with tag('td'):
with tag('b'): text('Route')
with tag('td'):
with tag('b'): text('No. of Portals')
with tag('td'):
with tag('b'): text('Link to GMap')
for route, portals, mapfile in results:
with tag('tr'):
with tag('td'): text(route)
with tag('td'): text(str(portals))
with tag('td'):
with tag('a', target='_blank', href=mapfile):
text('Map')
with open('%s/index.html' % resultdir, 'w') as f:
f.write(doc.getvalue())
|
Python
| 0.000008
|
@@ -2562,23 +2562,235 @@
map.
-plot(lats, lngs
+add_symbol('arrowSymbol', %7B'path': 'google.maps.SymbolPath.FORWARD_CLOSED_ARROW',%0A 'scale': 2%7D)%0A%0A gmap.plot(lats, lngs, icons=%7B'icon': 'arrowSymbol', 'offset': '7%25', 'repeat': '7%25'%7D
)%0A
|
e9060c166987a18aa9faf3b790b80135b319ecca
|
Update example.py
|
libs/python/example.py
|
libs/python/example.py
|
#!/usr/bin/env python
import postscriptbarcode
c=postscriptbarcode.BWIPP("../barcode.ps")
c.get_version()
|
Python
| 0.000001
|
@@ -72,16 +72,44 @@
IPP(%22../
+../build/monolithic_package/
barcode.
|
068a94a455448b3fc2ee552616658d9f980104ea
|
Add comment.
|
numpy/distutils/command/bdist_rpm.py
|
numpy/distutils/command/bdist_rpm.py
|
import os
import sys
from distutils.command.bdist_rpm import bdist_rpm as old_bdist_rpm
class bdist_rpm(old_bdist_rpm):
def _make_spec_file(self):
spec_file = old_bdist_rpm._make_spec_file(self)
setup_py = os.path.basename(sys.argv[0])
if setup_py == 'setup.py':
return spec_file
new_spec_file = []
for line in spec_file:
line = line.replace('setup.py',setup_py)
new_spec_file.append(line)
return new_spec_file
|
Python
| 0.000001
|
@@ -201,16 +201,109 @@
le(self)
+%0A%0A # Replace hardcoded setup.py script name%0A # with the real setup script name.
%0A
|
6af3eacec303abfe6f260581687a38d89f7b7474
|
Fix wavelength issue for QE65000
|
oceanoptics/spectrometers/QE65xxx.py
|
oceanoptics/spectrometers/QE65xxx.py
|
# tested
# ----------------------------------------------------------
from oceanoptics.base import OceanOpticsBase as _OOBase
from oceanoptics.base import OceanOpticsTEC as _OOTEC
import struct
#----------------------------------------------------------
class _QE65xxx(_OOBase, _OOTEC):
def _set_integration_time(self, time_us):
""" send command 0x02 """
# XXX: The QE65000 requires the time set in Milliseconds!
# This overides the provided function of OOBase
time_ms = int(time_us/1000)
self._usb_send(struct.pack('<BI', 0x02, time_ms))
def _query_status(self):
""" 0xFE query status """
# XXX: The QE65000 also returns the time in Milliseconds!
# This overides the provided function of OOBase
# and pretends to return us
ret = self._usb_query(struct.pack('<B', 0xFE))
data = struct.unpack('<HLBBBBBBBBBB', ret[:])
ret = { 'pixels' : data[0],
'integration_time' : data[1] * 1000, # ms to us
'lamp_enable' : data[2],
'trigger_mode' : data[3],
'acquisition_status' : data[4],
'packets_in_spectrum' : data[5],
'power_down' : data[6],
'packets_in_endpoint' : data[7],
'usb_speed' : data[10] }
return ret
#--------
# tested
#--------
class QE65000(_QE65xxx):
def __init__(self):
super(QE65000, self).__init__('QE65000')
self.initialize_TEC()
#----------
# untested
#----------
class QE65pro(_QE65xxx):
def __init__(self):
super(QE65pro, self).__init__('QE65pro')
self.initialize_TEC()
|
Python
| 0.000001
|
@@ -1485,16 +1485,261 @@
65000')%0A
+ # The QE65000 needs a -10 offset for calculating the wavelengths%0A # due to some legacy issues...%0A self._wl = sum( self._wl_factors%5Bi%5D *%0A np.arange(-10, self._pixels - 10, dtype=np.float64)**i for i in range(4) )%0A
|
7655ba80da745ef2491a7ef872683620d6328304
|
Disable verbose logging by default
|
designateclient/shell.py
|
designateclient/shell.py
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
from cliff.app import App
from cliff.commandmanager import CommandManager
from designateclient.version import version_info as version
class DesignateShell(App):
CONSOLE_MESSAGE_FORMAT = '%(levelname)s: %(message)s'
def __init__(self):
super(DesignateShell, self).__init__(
description='Designate Client',
version=version.version_string(),
command_manager=CommandManager('designateclient.cli'),
)
self.log = logging.getLogger(__name__)
def configure_logging(self):
super(DesignateShell, self).configure_logging()
# Set requests logging
requests_logger = logging.getLogger('requests')
if self.options.verbose_level <= 1:
requests_logger.setLevel(logging.WARN)
else:
requests_logger.setLevel(logging.DEBUG)
def build_option_parser(self, description, version):
parser = super(DesignateShell, self).build_option_parser(
description, version)
parser.add_argument('--os-endpoint',
default=os.environ.get('OS_DNS_ENDPOINT'),
help="Defaults to env[OS_DNS_ENDPOINT]")
parser.add_argument('--os-auth-url',
default=os.environ.get('OS_AUTH_URL'),
help="Defaults to env[OS_AUTH_URL]")
parser.add_argument('--os-username',
default=os.environ.get('OS_USERNAME'),
help="Defaults to env[OS_USERNAME]")
parser.add_argument('--os-password',
default=os.environ.get('OS_PASSWORD'),
help="Defaults to env[OS_PASSWORD]")
parser.add_argument('--os-tenant-id',
default=os.environ.get('OS_TENANT_ID'),
help="Defaults to env[OS_TENANT_ID]")
parser.add_argument('--os-tenant-name',
default=os.environ.get('OS_TENANT_NAME'),
help="Defaults to env[OS_TENANT_NAME]")
parser.add_argument('--os-token',
default=os.environ.get('OS_SERVICE_TOKEN'),
help="Defaults to env[OS_SERVICE_TOKEN]")
parser.add_argument('--os-service-type',
default=os.environ.get('OS_DNS_SERVICE_TYPE',
'dns'),
help=("Defaults to env[OS_DNS_SERVICE_TYPE], or "
"'dns'"))
parser.add_argument('--os-region-name',
default=os.environ.get('OS_REGION_NAME'),
help="Defaults to env[OS_REGION_NAME]")
parser.add_argument('--sudo-tenant-id',
default=os.environ.get('DESIGNATE_SUDO_TENANT_ID'),
help="Defaults to env[DESIGNATE_SUDO_TENANT_ID]")
parser.add_argument('--insecure', action='store_true',
help="Explicitly allow 'insecure' SSL requests")
return parser
|
Python
| 0.000001
|
@@ -866,16 +866,46 @@
ssage)s'
+%0A DEFAULT_VERBOSE_LEVEL = 0
%0A%0A de
@@ -1136,16 +1136,16 @@
)%0A%0A
+
@@ -1188,348 +1188,8 @@
_)%0A%0A
- def configure_logging(self):%0A super(DesignateShell, self).configure_logging()%0A%0A # Set requests logging%0A requests_logger = logging.getLogger('requests')%0A%0A if self.options.verbose_level %3C= 1:%0A requests_logger.setLevel(logging.WARN)%0A else:%0A requests_logger.setLevel(logging.DEBUG)%0A%0A
|
654bd1be9dc2c22f77186f94bedcf1e06dbf3887
|
Update import django re_path to support Django 4.0
|
djangoql/admin.py
|
djangoql/admin.py
|
import json
from django.contrib import messages
from django.contrib.admin.views.main import ChangeList
from django.core.exceptions import FieldError, ValidationError
from django.db import DataError
from django.forms import Media
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.views.generic import TemplateView
from .compat import text_type
from .exceptions import DjangoQLError
from .queryset import apply_search
from .schema import DjangoQLSchema
from .serializers import SuggestionsAPISerializer
from .views import SuggestionsAPIView
try:
from django.core.urlresolvers import reverse
except ImportError: # Django 2.0
from django.urls import reverse
try:
from django.urls import re_path
except ImportError: # Django <2.0
from django.conf.urls import url as re_path
DJANGOQL_SEARCH_MARKER = 'q-l'
class DjangoQLChangeList(ChangeList):
def get_filters_params(self, *args, **kwargs):
params = super(DjangoQLChangeList, self).get_filters_params(
*args,
**kwargs
)
if DJANGOQL_SEARCH_MARKER in params:
del params[DJANGOQL_SEARCH_MARKER]
return params
class DjangoQLSearchMixin(object):
search_fields = ('_djangoql',) # just a stub to have search input displayed
djangoql_completion = True
djangoql_completion_enabled_by_default = True
djangoql_schema = DjangoQLSchema
djangoql_syntax_help_template = 'djangoql/syntax_help.html'
def search_mode_toggle_enabled(self):
# If search fields were defined on a child ModelAdmin instance,
# we suppose that the developer wants two search modes and therefore
# enable search mode toggle
return self.search_fields != DjangoQLSearchMixin.search_fields
def djangoql_search_enabled(self, request):
return request.GET.get(DJANGOQL_SEARCH_MARKER, '').lower() == 'on'
def get_changelist(self, *args, **kwargs):
return DjangoQLChangeList
def get_search_results(self, request, queryset, search_term):
if (
self.search_mode_toggle_enabled() and
not self.djangoql_search_enabled(request)
):
return super(DjangoQLSearchMixin, self).get_search_results(
request=request,
queryset=queryset,
search_term=search_term,
)
use_distinct = False
if not search_term:
return queryset, use_distinct
try:
qs = apply_search(queryset, search_term, self.djangoql_schema)
except (DjangoQLError, ValueError, FieldError, ValidationError) as e:
msg = self.djangoql_error_message(e)
messages.add_message(request, messages.WARNING, msg)
qs = queryset.none()
else:
# Hack to handle 'inet' comparison errors in Postgres. If you
# know a better way to check for such an error, please submit a PR.
try:
# Django >= 2.1 has built-in .explain() method
explain = getattr(qs, 'explain', None)
if callable(explain):
explain()
else:
list(qs[:1])
except DataError as e:
if 'inet' not in str(e):
raise
msg = self.djangoql_error_message(e)
messages.add_message(request, messages.WARNING, msg)
qs = queryset.none()
return qs, use_distinct
def djangoql_error_message(self, exception):
if isinstance(exception, ValidationError):
msg = exception.messages[0]
else:
msg = text_type(exception)
return render_to_string('djangoql/error_message.html', context={
'error_message': msg,
})
@property
def media(self):
media = super(DjangoQLSearchMixin, self).media
if self.djangoql_completion:
js = [
'djangoql/js/completion.js',
]
if self.search_mode_toggle_enabled():
js.append('djangoql/js/completion_admin_toggle.js')
if not self.djangoql_completion_enabled_by_default:
js.append('djangoql/js/completion_admin_toggle_off.js')
js.append('djangoql/js/completion_admin.js')
media += Media(
css={'': (
'djangoql/css/completion.css',
'djangoql/css/completion_admin.css',
)},
js=js,
)
return media
def get_urls(self):
custom_urls = []
if self.djangoql_completion:
custom_urls += [
re_path(
r'^introspect/$',
self.admin_site.admin_view(self.introspect),
name='%s_%s_djangoql_introspect' % (
self.model._meta.app_label,
self.model._meta.model_name,
),
),
re_path(
r'^suggestions/$',
self.admin_site.admin_view(self.suggestions),
name='%s_%s_djangoql_suggestions' % (
self.model._meta.app_label,
self.model._meta.model_name,
),
),
re_path(
r'^djangoql-syntax/$',
self.admin_site.admin_view(TemplateView.as_view(
template_name=self.djangoql_syntax_help_template,
)),
name='djangoql_syntax_help',
),
]
return custom_urls + super(DjangoQLSearchMixin, self).get_urls()
def introspect(self, request):
suggestions_url = reverse('admin:%s_%s_djangoql_suggestions' % (
self.model._meta.app_label,
self.model._meta.model_name,
))
serializer = SuggestionsAPISerializer(suggestions_url)
response = serializer.serialize(self.djangoql_schema(self.model))
return HttpResponse(
content=json.dumps(response, indent=2),
content_type='application/json; charset=utf-8',
)
def suggestions(self, request):
view = SuggestionsAPIView.as_view(
schema=self.djangoql_schema(self.model),
)
return view(request)
|
Python
| 0
|
@@ -725,32 +725,37 @@
from django.
+conf.
urls import re_p
@@ -769,32 +769,122 @@
ept ImportError:
+%0A try: # Django %3E= 4.0%0A from django.urls import re_path%0A except ImportError:
# Django %3C2.0%0A
@@ -879,20 +879,25 @@
Django %3C
+
2.0%0A
+
from
|
ba81c1d04a9896f1e24ca43592b93b26047705ef
|
Clean up command output
|
openstackclient/compute/v2/server.py
|
openstackclient/compute/v2/server.py
|
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
"""
Server action implementations
"""
import logging
from cliff.command import Command
from openstackclient.common import utils
def _find_server(cs, server):
"""Get a server by name or ID."""
return utils.find_resource(cs.servers, server)
def _print_server(cs, server):
# By default when searching via name we will do a
# findall(name=blah) and due a REST /details which is not the same
# as a .get() and doesn't get the information about flavors and
# images. This fix it as we redo the call with the id which does a
# .get() to get all informations.
if not 'flavor' in server._info:
server = _find_server(cs, server.id)
networks = server.networks
info = server._info.copy()
for network_label, address_list in networks.items():
info['%s network' % network_label] = ', '.join(address_list)
flavor = info.get('flavor', {})
flavor_id = flavor.get('id', '')
info['flavor'] = _find_flavor(cs, flavor_id).name
image = info.get('image', {})
image_id = image.get('id', '')
info['image'] = _find_image(cs, image_id).name
info.pop('links', None)
info.pop('addresses', None)
utils.print_dict(info)
class List_Server(Command):
"List server command."
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(List_Server, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='Additional fields are listed in output')
return parser
def run(self, parsed_args):
self.log.info('List_Server()')
self.log.info(' run(%s)' % parsed_args)
self.app.stdout.write('hi!\n')
class Show_Server(Command):
"Show server command."
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Show_Server, self).get_parser(prog_name)
parser.add_argument(
'server',
metavar='<server>',
help='Name or ID of server to display')
return parser
def run(self, parsed_args):
self.log.info('Show_Server()')
self.log.info(' run(%s)' % parsed_args)
self.app.stdout.write('hi!\n')
#s = _find_server(cs, args.server)
#_print_server(cs, s)
|
Python
| 0.999995
|
@@ -2309,16 +2309,19 @@
g.info('
+v2.
List_Ser
@@ -2327,98 +2327,31 @@
rver
-()')%0A self.log.info(' run(%25s)' %25 parsed_args)%0A self.app.stdout.write('hi!%5Cn'
+.run(%25s)' %25 parsed_args
)%0A%0Ac
@@ -2758,16 +2758,19 @@
g.info('
+v2.
Show_Ser
@@ -2776,98 +2776,31 @@
rver
-()')%0A self.log.info(' run(%25s)' %25 parsed_args)%0A self.app.stdout.write('hi!%5Cn'
+.run(%25s)' %25 parsed_args
)%0A
|
f4303bfba961ef7775e8a2f7c5e85980d6931bd5
|
add hashbang
|
fetch.py
|
fetch.py
|
import json
import urllib2
import time
import collections
import ConfigParser
import MySQLdb as db
config = ConfigParser.RawConfigParser()
config.read('config.cfg')
db_host = config.get('mysql', 'host')
db_user = config.get('mysql', 'uname')
db_pass = config.get('mysql', 'pw')
db_name = config.get('mysql', 'db_name')
sample_time = int(time.time())
con = db.connect(db_host, db_user, db_pass, db_name)
cursor = con.cursor()
def save_run_to_db(db_cursor, sampling_time):
sql_text = """
INSERT INTO sampling_runs (sample_time) VALUES (%s)
"""
db_cursor.execute(sql_text, (sampling_time,))
return db_cursor.lastrowid
def update_run_with_end_time(db_cursor, run_id, end_time):
sql_text = """
UPDATE sampling_runs set end_time = %s where id = %s
"""
db_cursor.execute(sql_text, (end_time, run_id))
run_id = save_run_to_db(cursor, sample_time)
TrafficSample = collections.namedtuple('TrafficSample', [
'segment_id',
'direction',
'road_status',
'update_time',
'aa',
'alert_counts',
'ac',
'alert_text'])
ROADS = {
1: "EDSA",
2: "Q.AVE",
3: "ESPANA",
4: "C5",
5: "ROXAS BLVD",
6: "SLEX",
7: "COMMONWEALTH",
8: "ORTIGAS",
9: "MARCOS HIWAY"
}
ROAD_STATUSES = {
0: "NO INFO",
1: "LIGHT",
2: "LIGHT-MED",
3: "MEDIUM",
4: "MEDIUM-HEAVY",
5: "HEAVY"
}
NODE_TYPES = {
0: 'TERMINATION',
1: 'ROAD',
2: 'INTERSECTION'
}
TRAFFIC_ENDPOINT = 'http://mmdatraffic.interaksyon.com/data.traffic.status.php'
ADVISORIES_ENDPOINT = 'http://mmdatraffic.interaksyon.com/data.traffic.advisories.php'
response = urllib2.urlopen(TRAFFIC_ENDPOINT)
traffic_points = json.load(response)
response = urllib2.urlopen(ADVISORIES_ENDPOINT)
advisories_list = json.load(response)
advisories = {}
for advisory in advisories_list:
segment_id = advisory[1]
info = advisory[2]
direction = 'NB'
text = ''
if type(info) is dict:
direction = 'SB'
text = str(info)
elif type(info) is list:
direction = 'NB'
text = str(info[0][0])
if segment_id not in advisories:
advisories[segment_id] = { 'NB': [], 'SB': []}
advisories[segment_id][direction].append(text)
def mmda_time_to_timestamp(mmdatime):
a = time.strptime(mmdatime, "%Y%m%d%H%M%S")
return time.mktime(a)
def save_to_db(db_cursor, traffic_sample, run_id):
sql_text = """
INSERT INTO traffic_samples
(segment_id, direction, road_status, update_time, run_id, aa, alert_counts, ac, alert_text)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
db_cursor.execute(sql_text,(traffic_sample.segment_id, traffic_sample.direction,
traffic_sample.road_status, mmda_time_to_timestamp(traffic_sample.update_time), run_id, traffic_sample.aa,
traffic_sample.alert_counts, traffic_sample.ac, traffic_sample.alert_text))
for point in traffic_points:
node_info = point[0]
node_north = point[1]
node_south = point[2]
(road_id, node_id, intersecting_node_id, node_type_arr, is_service_node, related_node_id, is_major_intersection) = node_info
(road_status, update_time, aa, alert_counts, ac) = node_north
north_alert_text = ''
south_alert_text = ''
if node_id in advisories:
alerts = advisories[node_id]
if len(alerts['NB']) > 0:
north_alert_text = " | ".join(alerts['NB'])
if len(alerts['SB']) > 0:
south_alert_text = " | ".join(alerts['SB'])
n_sample = TrafficSample(node_id, 'N', road_status, update_time, aa, alert_counts, ac, north_alert_text)
save_to_db(cursor, n_sample, run_id)
(road_status, update_time, aa, alert_counts, ac) = node_south
s_sample = TrafficSample(node_id, 'S', road_status, update_time, aa, alert_counts, ac, south_alert_text)
save_to_db(cursor, s_sample, run_id)
end_time = int(time.time())
update_run_with_end_time(cursor, run_id, end_time)
con.commit()
con.close()
|
Python
| 0.000087
|
@@ -1,12 +1,31 @@
+#!/usr/bin/python%0A%0A
import json%0A
|
f095e0a1a7aa3b277f86eb9029e9c663e1304a59
|
Allow using non-local boost
|
config/cbang/__init__.py
|
config/cbang/__init__.py
|
from SCons.Script import *
import inspect
import os
def GetHome():
path = inspect.getfile(inspect.currentframe())
return os.path.dirname(os.path.abspath(path))
def ConfigLocalBoost(env):
boost_source = os.environ.get('BOOST_SOURCE', None)
if not boost_source: raise Exception, 'BOOST_SOURCE not set'
env.Append(CPPPATH = [boost_source])
return boost_source
def ConfigBoost(conf, require = False):
return conf.CBConfig('boost', require, version = '1.40',
hdrs = ['version', 'iostreams/stream', 'ref',
'interprocess/sync/file_lock',
'date_time/posix_time/posix_time'],
libs = ['iostreams', 'system', 'filesystem', 'regex'])
def configure_deps(conf, local = True, with_openssl = True):
env = conf.env
conf.CBConfig('zlib', not local)
conf.CBConfig('bzip2', not local)
conf.CBConfig('XML', not local)
conf.CBConfig('sqlite3', not local)
if conf.CBConfig('event', False): conf.CBConfig('re2', not local)
if conf.CBCheckLib('leveldb') and conf.CBCheckLib('snappy'):
conf.CBCheckHome('leveldb')
if conf.CBCheckCXXHeader('leveldb/db.h'):
env.CBDefine('HAVE_LEVELDB')
if conf.CBCheckCHeader('mysql/mysql.h') and \
conf.CBCheckLib('mysqlclient') and \
conf.CBCheckFunc('mysql_real_connect_start'):
env.CBDefine('HAVE_MARIADB')
env.cb_enabled.add('mariadb')
if not ConfigBoost(conf) and not local:
env.ConfigLocalBoost()
ConfigBoost(conf, True)
# clock_gettime() needed by boost iterprocess
if env['PLATFORM'] == 'posix' and int(env.get('cross_osx', 0)) == 0 \
and not conf.CBCheckFunc('clock_gettime'):
conf.CBRequireLib('rt')
conf.CBRequireFunc('clock_gettime')
if with_openssl: conf.CBConfig('openssl', False, version = '1.0.0')
conf.CBConfig('chakra', False)
if env['PLATFORM'] == 'win32' or int(env.get('cross_mingw', 0)):
if not conf.CBCheckLib('ws2_32'): conf.CBRequireLib('wsock32')
conf.CBCheckLib('winmm')
conf.CBRequireLib('setupapi')
else: conf.CBConfig('pthreads')
# OSX frameworks
if env['PLATFORM'] == 'darwin' or int(env.get('cross_osx', 0)):
if not (conf.CheckOSXFramework('CoreServices') and
conf.CheckOSXFramework('IOKit') and
conf.CheckOSXFramework('CoreFoundation')):
raise Exception, \
'Need CoreServices, IOKit & CoreFoundation frameworks'
conf.CBConfig('valgrind', False)
# Debug
if env.get('debug', 0):
if conf.CBCheckCHeader('execinfo.h') and \
conf.CBCheckCHeader('bfd.h') and \
conf.CBCheckLib('iberty') and conf.CBCheckLib('bfd'):
env.CBDefine('HAVE_CBANG_BACKTRACE')
elif env.get('backtrace_debugger', 0):
raise Exception, \
'execinfo.h, bfd.h and libbfd needed for backtrace_debuger'
env.CBDefine('DEBUG_LEVEL=' + str(env.get('debug_level', 1)))
def configure(conf):
env = conf.env
home = GetHome() + '/../..'
env.AppendUnique(CPPPATH = [home + '/src', home + '/include'])
env.AppendUnique(LIBPATH = [home + '/lib'])
if not env.CBConfigEnabled('cbang-deps'):
conf.CBConfig('cbang-deps', local = False)
conf.CBRequireLib('cbang')
conf.CBRequireCXXHeader('cbang/Exception.h')
env.CBDefine('HAVE_CBANG')
def generate(env):
env.CBAddConfigTest('cbang', configure)
env.CBAddConfigTest('cbang-deps', configure_deps)
env.AddMethod(ConfigLocalBoost)
env.CBAddVariables(
BoolVariable('backtrace_debugger', 'Enable backtrace debugger', 0),
('debug_level', 'Set log debug level', 1))
env.CBLoadTools('''sqlite3 boost openssl pthreads valgrind osx zlib bzip2
XML chakra event re2'''.split(), GetHome() + '/..')
def exists(env):
return 1
|
Python
| 0.000001
|
@@ -1505,20 +1505,16 @@
%0A if
-not
ConfigBo
@@ -1522,20 +1522,250 @@
st(conf)
- and
+:%0A disable_local = env.get('disable_local', %5B%5D)%0A if isinstance(disable_local, str): disable_local = disable_local.split()%0A disable_local.append('boost')%0A env.Replace(disable_local = disable_local)%0A%0A elif
not loc
|
56ac633029c9d7ef40415e1881d2cb3c18c83d7b
|
Bump to version 0.17.1
|
ckanny/__init__.py
|
ckanny/__init__.py
|
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
ckanny
~~~~~~
Miscellaneous CKAN utility scripts
Examples:
literal blocks::
python example_google.py
Attributes:
module_level_variable1 (int): Module level variables may be documented in
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from manager import Manager
from . import datastorer, filestorer, package, hdx
__version__ = '0.17.0'
__title__ = 'ckanny'
__author__ = 'Reuben Cummings'
__description__ = 'Miscellaneous CKAN utility scripts'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
manager = Manager()
manager.merge(datastorer.manager, namespace='ds')
manager.merge(filestorer.manager, namespace='fs')
manager.merge(hdx.manager, namespace='hdx')
manager.merge(package.manager, namespace='pk')
@manager.command
def ver():
"""Show ckanny version"""
print('v%s' % __version__)
if __name__ == '__main__':
manager.main()
|
Python
| 0
|
@@ -472,17 +472,17 @@
= '0.17.
-0
+1
'%0A%0A__tit
|
20ffbab08c244ec788e8a6114ccdbf38e39d97b6
|
Fix unclassifiable problem
|
classifier/demo.py
|
classifier/demo.py
|
"""
This is a demo about how to use LibLINEAR to do the prediction
==============================================================
Usage: python demo.py
Author: Wenjun Wang
Date: June 18, 2015
"""
import pickle
import datetime
from liblinearutil import *
from feature import convert_query
# Read training file
#y, x = svm_read_problem(path_to_training_file)
# Train and save model
#m = train(y, x, '-c 1 -s 1 -B 1 -e 0.01 -v 5 -q')
#save_model(name_of_model_file,m)
# Load the trained model, which is in the same directory as this script
date = str(datetime.date.today())
m = load_model('model_'+date)
if m == None:
date = str(datetime.date.fromordinal(datetime.date.today().toordinal()-1))
m = load_model('model_'+date)
# Load feature file, which is also in the same directory
infile = open('features')
feature_list = pickle.load(infile)
# Class labels
y = [1,2,3,4,5]
# Example query
query = 'next comment'
# Convert query
x = convert_query(query, feature_list, 'test')
# Do the prediction
p_label, p_val = predict(y, x, m, '-b 0')
print p_label #predict class/label
print p_val #svm value for each class/label
|
Python
| 0.999999
|
@@ -900,20 +900,18 @@
= '
-next comment
+Alan Black
'%0A#
@@ -1033,16 +1033,107 @@
'-b 0')%0A
+# Cannot classify it to any class%0Aif p_val%5B0%5D%5Bint(p_label%5B0%5D)-1%5D == 0:%0A p_label%5B0%5D = -1%0A
print p_
|
d1af8b3814edb3e38c27c36c25875809d26a76c7
|
Comment HTTP handler
|
amerigo.py
|
amerigo.py
|
#!/usr/bin/env python
import socket
import http.server
import threading
import struct
import geojson
import argparse
import logging as log
FILE = "./position.geojson"
UDP_ADDR = "0.0.0.0"
UDP_PORT = 49000
# Relate the dataset (integer key) to the data (list of values)
rosetta = {1: ["real_time", "total_time", "mission_time", "timer_time",
"", "zulu_time", "local_time", "hobbs_time"],
20: ["lat", "lon", "alt_amsl", "alt_agl",
"on_rwy", "alt_ind", "lat_south", "lon_west"],
17: ["pitch", "roll", "hdg_true", "hdg_mag",
"", "", "", ""]
}
class LoggingHTTPHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
log.info("{}: [{}] {}".format(self.address_string(),
self.log_date_time_string(),
*args))
def split_payload(l, size=36):
"""
Split "l" into chunks of 36 pieces and yield each one
"""
for i in range(0, len(l) // size):
ret = l[size * i:size * (i + 1)]
yield ret
def parse_stream(data):
"""
Return a dictionary composed of the parsed binary 'data'.
b'raw data' -> {x-plane data}
data ::= binary literal
"""
header = data[0:5]
payload = data[5:]
if header == b"DATA@":
output = {}
for piece in split_payload(payload):
msg = struct.unpack_from("iffffffff", piece)
data_set = msg[0]
data = msg[1:]
if data_set in rosetta:
# Append the translated 'data_set' to the output dict
output.update(dict(zip(rosetta[data_set], data)))
else:
log.warning("Couldn't parse: idx:{} data:{}".format(data_set, data))
return output
def server(args):
"""
Start the HTTP server
"""
http_bind_addr = "127.0.0.1"
http_bind_port = 8000
handler = LoggingHTTPHandler
httpd = http.server.HTTPServer((http_bind_addr, http_bind_port), handler)
log.info("Serving HTTP on {}:{}".format(http_bind_addr, http_bind_port))
httpd.serve_forever()
def interpret(args):
"""
Bind to a socket, parse the input and write to file.
"""
# Bind to an internet-facing UDP socket
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.bind((args.address, args.port))
log.info("Binding to {}:{}".format(args.address, args.port))
while True:
# Receive with a buffer of 1024 bytes
data, addr = udp_socket.recvfrom(1024)
parsed = parse_stream(data)
if parsed["lon"] and parsed["lat"]:
point = geojson.Point((parsed["lon"], parsed["lat"]))
feature = geojson.Feature(geometry=point)
with open(args.output, mode="w") as f:
f.write(str(feature))
# Main program stars here
if __name__ == "__main__":
# Parse arguments
args = argparse.ArgumentParser()
args.add_argument("-p", "--port", type=int, default=UDP_PORT,
help="port to bind to (default: %(default)s)")
args.add_argument("-a", "--address", default=UDP_ADDR,
help="address to bind to (default: %(default)s)")
args.add_argument("-o", "--output", default=FILE,
help="file that shall be written (default: %(default)s)")
args.add_argument("-d", "--debug", const=log.DEBUG, nargs="?",
dest="loglevel", default=log.WARNING,
help="turn on debugging")
args.add_argument("-v", "--verbose", const=log.INFO, nargs="?",
dest="loglevel",
help="turn on verbose output")
args = args.parse_args()
log.basicConfig(level=args.loglevel)
# Create interpreter thread and send it the arguments
interpreter_thread = threading.Thread(target=interpret,
name="interpreter",
args=(args,))
server_thread = threading.Thread(target=server,
name="server",
args=(args,))
# Run the threads
interpreter_thread.start()
server_thread.start()
|
Python
| 0.000001
|
@@ -682,16 +682,114 @@
ndler):%0A
+ %22%22%22%0A Turn the SimpleHTTPRequestHandler into a logging, simple%0A request handler%0A %22%22%22%0A%0A
def
|
6c1f487aa7ac472fc7f726b21d26c841625b176d
|
Edit feed content
|
routes.py
|
routes.py
|
from flask import Flask, render_template, redirect, url_for, request, session,\
flash, jsonify
from werkzeug.contrib.atom import AtomFeed
import os
import psycopg2
from functools import wraps
import urlparse
import datetime
app = Flask(__name__)
app.secret_key = os.environ['SECRET_KEY']
def connectDB(wrapped):
@wraps(wrapped)
def inner(*args, **kwargs):
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
cur = conn.cursor()
ret = wrapped(cur, *args, **kwargs)
conn.commit()
cur.close()
conn.close()
return ret
return inner
def login_required(f):
@wraps(f)
def function(*args, **kwargs):
if 'username' not in session:
return redirect(url_for('home'))
return f(*args, **kwargs)
return function
@app.route('/', methods=['GET', 'POST'])
@connectDB
def home(cur):
if request.method == 'GET':
if 'username' in session:
return redirect(url_for('update'))
session['type'] = 'unknown'
return render_template('home.html', logged_in=False)
else:
cur.execute("SELECT * FROM ROOT")
auth = cur.fetchone()
user_input = (request.form['username'], request.form['password'])
if user_input == auth:
session['username'] = request.form['username']
return redirect(url_for('update'))
else:
return redirect(url_for('home'))
@app.route('/update', methods=['GET', 'POST'])
@login_required
@connectDB
def update(cur):
if request.method == 'GET':
return render_template('update.html', logged_in=True)
else:
cluster = request.form['cluster']
announcement = request.form['announcement']
timestamp = datetime.datetime.now()
try:
cur.execute("INSERT INTO ANNOUNCEMENTS (CLUSTER, ANNOUNCEMENT, TIME) VALUES \
(%s, %s, %s)", (cluster, announcement, timestamp))
flash('The update has been posted.')
except:
flash('ERROR ! The update was NOT posted.')
finally:
return redirect(url_for('update'))
@app.route('/logout')
@login_required
def logout():
session.pop('username', None)
session['type'] = 'unknown'
return redirect(url_for('home'))
@app.route('/recent.atom')
@connectDB
def recent_feed(cur):
feed = AtomFeed('Recent announcements',
feed_url=request.url,
url=request.url_root)
cur.execute("SELECT * FROM ANNOUNCEMENTS ORDER BY TIME DESC LIMIT 50")
announcements = cur.fetchall()
for i, announcement in enumerate(announcements):
feed.add(
"ANNOUNCEMENT " + str(i),
unicode(announcement[0] + announcement[1]),
author="KS",
url=request.url,
updated=announcement[2]
)
return feed.get_response()
@app.route('/api/count', methods=['GET'])
@connectDB
def count(cur):
cur.execute("SELECT COUNT(*) FROM ANNOUNCEMENTS")
count = cur.fetchone()[0]
return jsonify({'count': count})
if __name__ == '__main__':
app.run(host="127.0.0.1", port=6666, debug=True)
|
Python
| 0.000001
|
@@ -2873,16 +2873,20 @@
+ str(i
+ + 1
),%0A
@@ -2915,16 +2915,31 @@
ement%5B0%5D
+.strip() + ': '
+ annou
|
da31be1c27c7568fa50c89f28b04ad763481f541
|
Remove unused import
|
rparse.py
|
rparse.py
|
#!/usr/bin/env python
# Copyright 2015, Dmitry Veselov
from re import sub
from plyplus import Grammar, STransformer, \
ParseError, TokenizeError
try:
# Python 2.x and pypy
from itertools import imap as map
from itertools import ifilter as filter
except ImportError:
# Python 3.x already have lazy map
pass
__all__ = [
"parse"
]
grammar = Grammar(r"""
@start : package ;
package : name extras? specs? comment?;
name : string ;
specs : comparison version (',' comparison version)* ;
comparison : '<' | '<=' | '!=' | '==' | '>=' | '>' | '~=' | '===' ;
version : string ;
extras : '\[' (extra (',' extra)*)? '\]' ;
extra : string ;
comment : '\#.+' ;
@string : '[-A-Za-z0-9_\.]+' ;
SPACES: '[ \t\n]+' (%ignore) (%newline);
""")
class Requirement(object):
def __init__(self, name=None, extras=None, specs=None, comment=None):
self.name = name
self.extras = extras
self.specs = specs
self.comment = comment
def __str__(self):
return "<{0}(name='{1}'>".format(self.__class__.__name__, self.name)
class RTransformer(STransformer):
def package(self, node):
requirement = Requirement()
for key, value in node.tail:
setattr(requirement, key, value)
return requirement
def name(self, node):
return ("name", node.tail[0])
def specs(self, node):
comparisons, versions = node.tail[0::2], node.tail[1::2]
return ("specs", list(zip(comparisons, versions)))
def comparison(self, node):
return node.tail[0]
def version(self, node):
return node.tail[0]
def extras(self, node):
return ("extras", [name for name in node.tail])
def extra(self, node):
return node.tail[0]
def comment(self, node):
return ("comment", " ".join([word for word in node.tail]))
def comment_content(self, node):
return node.tail[0]
def _parse(line, g=grammar):
line = line.strip()
if line.startswith("#"):
return None
try:
if line:
return g.parse(line)
else:
return None
except (ParseError, TokenizeError):
message = "Invalid requirements line: '{0}'".format(line)
raise ValueError(message)
def parse(requirements):
"""
Parses given requirements line-by-line.
"""
transformer = RTransformer()
return map(transformer.transform, filter(None, map(_parse, requirements.splitlines())))
|
Python
| 0.000001
|
@@ -52,27 +52,8 @@
lov%0A
-from re import sub%0A
from
|
6fdee7b4cda74d7e57d901607ef07e511a80d4c9
|
Add white lines to separate methods.
|
rtmbot.py
|
rtmbot.py
|
#!/usr/bin/env python
import sys
sys.dont_write_bytecode = True
import glob
import yaml
import json
import os
import sys
import time
import logging
from argparse import ArgumentParser
from slackclient import SlackClient
def dbg(debug_string):
if debug:
logging.info(debug_string)
class RtmBot(object):
def __init__(self, token):
self.last_ping = 0
self.token = token
self.bot_plugins = []
self.slack_client = None
def connect(self):
"""Convenience method that creates Server instance"""
self.slack_client = SlackClient(self.token)
self.slack_client.rtm_connect()
def start(self):
self.connect()
self.load_plugins()
while True:
for reply in self.slack_client.rtm_read():
self.input(reply)
self.crons()
self.output()
self.autoping()
time.sleep(.1)
def autoping(self):
#hardcode the interval to 3 seconds
now = int(time.time())
if now > self.last_ping + 3:
self.slack_client.server.ping()
self.last_ping = now
def input(self, data):
if "type" in data:
function_name = "process_" + data["type"]
dbg("got {}".format(function_name))
for plugin in self.bot_plugins:
plugin.register_jobs()
plugin.do(function_name, data)
def output(self):
for plugin in self.bot_plugins:
limiter = False
for output in plugin.do_output():
channel = self.slack_client.server.channels.find(output[0])
if channel != None and output[1] != None:
if limiter == True:
time.sleep(.1)
limiter = False
message = output[1].encode('ascii','ignore')
channel.send_message("{}".format(message))
limiter = True
def crons(self):
for plugin in self.bot_plugins:
plugin.do_jobs()
def load_plugins(self):
for plugin in glob.glob(directory+'/plugins/*'):
sys.path.insert(0, plugin)
sys.path.insert(0, directory+'/plugins/')
for plugin in glob.glob(directory+'/plugins/*.py') + glob.glob(directory+'/plugins/*/*.py'):
logging.info(plugin)
name = plugin.split('/')[-1][:-3]
# try:
self.bot_plugins.append(Plugin(name, self))
# except:
# print "error loading plugin %s" % name
class Plugin(object):
def __init__(self, name, bot, plugin_config={}):
self.name = name
self.jobs = []
self.module = __import__(name)
self.register_jobs()
self.outputs = []
if name in config:
logging.info("config found for: " + name)
self.module.config = config[name]
if 'setup' in dir(self.module):
self.module.setup(bot)
def register_jobs(self):
if 'crontable' in dir(self.module):
for interval, function in self.module.crontable:
self.jobs.append(Job(interval, eval("self.module."+function)))
logging.info(self.module.crontable)
self.module.crontable = []
else:
self.module.crontable = []
def do(self, function_name, data):
if function_name in dir(self.module):
#this makes the plugin fail with stack trace in debug mode
if not debug:
try:
eval("self.module."+function_name)(data)
except:
dbg("problem in module {} {}".format(function_name, data))
else:
eval("self.module."+function_name)(data)
if "catch_all" in dir(self.module):
try:
self.module.catch_all(data)
except:
dbg("problem in catch all")
def do_jobs(self):
for job in self.jobs:
job.check()
def do_output(self):
output = []
while True:
if 'outputs' in dir(self.module):
if len(self.module.outputs) > 0:
logging.info("output from {}".format(self.module))
output.append(self.module.outputs.pop(0))
else:
break
else:
self.module.outputs = []
return output
class Job(object):
def __init__(self, interval, function):
self.function = function
self.interval = interval
self.lastrun = 0
def __str__(self):
return "{} {} {}".format(self.function, self.interval, self.lastrun)
def __repr__(self):
return self.__str__()
def check(self):
if self.lastrun + self.interval < time.time():
if not debug:
try:
self.function()
except:
dbg("problem")
else:
self.function()
self.lastrun = time.time()
pass
class UnknownChannel(Exception):
pass
def main_loop():
if "LOGFILE" in config:
logging.basicConfig(filename=config["LOGFILE"], level=logging.INFO, format='%(asctime)s %(message)s')
logging.info(directory)
try:
bot.start()
except KeyboardInterrupt:
sys.exit(0)
except:
logging.exception('OOPS')
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='Full path to config file.',
metavar='path'
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
directory = os.path.dirname(sys.argv[0])
if not directory.startswith('/'):
directory = os.path.abspath("{}/{}".format(os.getcwd(),
directory
))
config = yaml.load(file(args.config or 'rtmbot.conf', 'r'))
debug = config["DEBUG"]
bot = RtmBot(config["SLACK_TOKEN"])
site_plugins = []
files_currently_downloading = []
job_hash = {}
if config.has_key("DAEMON"):
if config["DAEMON"]:
import daemon
with daemon.DaemonContext():
main_loop()
main_loop()
|
Python
| 0
|
@@ -460,16 +460,17 @@
= None%0A
+%0A
def
@@ -634,24 +634,25 @@
m_connect()%0A
+%0A
def star
@@ -922,24 +922,25 @@
e.sleep(.1)%0A
+%0A
def auto
@@ -1140,16 +1140,17 @@
g = now%0A
+%0A
def
@@ -1423,24 +1423,25 @@
name, data)%0A
+%0A
def outp
@@ -1980,16 +1980,17 @@
= True%0A
+%0A
def
@@ -2067,24 +2067,25 @@
n.do_jobs()%0A
+%0A
def load
@@ -2582,16 +2582,17 @@
%25 name%0A%0A
+%0A
class Pl
@@ -3002,16 +3002,17 @@
up(bot)%0A
+%0A
def
@@ -3352,24 +3352,25 @@
ntable = %5B%5D%0A
+%0A
def do(s
@@ -3791,32 +3791,33 @@
ion_name)(data)%0A
+%0A
if %22catc
@@ -3969,16 +3969,17 @@
h all%22)%0A
+%0A
def
@@ -4023,16 +4023,16 @@
f.jobs:%0A
-
@@ -4047,16 +4047,17 @@
check()%0A
+%0A
def
|
c3951f942633438e91e43b523a814bf1a3528295
|
Add impl to analyzer.
|
analyze.py
|
analyze.py
|
#!/bin/python
from __future__ import print_function, division
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Analyze shogi board state in a photo""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args()
|
Python
| 0
|
@@ -62,24 +62,63 @@
%0Aimport
-argparse
+cv%0Aimport cv2%0Aimport argparse%0Aimport preprocess
%0A%0Aif __n
@@ -308,36 +308,636 @@
er)%0A
-%0A args = parser.parse_args(
+ parser.add_argument(%0A 'photo', metavar='PHOTO', nargs=1, type=str,%0A help='Photo image path')%0A parser.add_argument(%0A '--output-visualization', nargs='?', metavar='VISUALIZATION_PATH',%0A type=str, default=None, const=True,%0A help='Output path of pretty visualization image')%0A%0A args = parser.parse_args()%0A img = cv2.imread(args.photo%5B0%5D)%0A # TODO: Refactoring required%0A args.derive_emptiness = False%0A args.derive_types_up = False%0A args.derive_validness = False%0A%0A detected = preprocess.detect_board(%22%22, img, visualize=False, derive=args)%0A print(%22Detected?: %25s%22 %25 detected
)%0A
|
f39c5650372c37585a1331e251c931758a7f240e
|
fix issue with absolute path
|
default.py
|
default.py
|
import socket
import plugin
import ssl
import user
class bot(object):
def __init__(self, server):
self.server = server
self.port = 6667
self.ssl = None
self.channels = []
self.connectedChannels = []
self.nick = 'default_nick'
self.realName = 'default_nick default_nick'
self.socket = None
self.debugger = True
self.allowedCommands = {
'ping': self.ping, 'privmsg': self.privmsg, 'invite': self.invite,
'join': self.join, '433': self.f433, '307':self.f307, '353':self.f353}
self.autoInvite = True
self.plugins = plugin.Plugin(self.rawSend)
self.userlist = {}
def debug(self, line):
if self.debugger is not None:
print(line)
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.ssl is not None:
self.socket = ssl.wrap_socket(self.socket)
self.socket.connect((self.server, self.port))
self.authenticate()
def authenticate(self):
self.rawSend('NICK', self.nick)
self.rawSend('USER', ' '.join((self.nick, self.nick, self.realName)))
def joinChannel(self, channel = None):
if channel is not None:
self.channels.append(channel)
for chan in self.channels:
if chan not in self.connectedChannels:
self.rawSend('JOIN', chan)
self.connectedChannels.append(chan)
def rawSend(self, command, content, dest = ''):
line = ' '.join((command, dest, content, '\r\n'))
self.debug(line)
self.socket.send(bytes(line, 'UTF-8'))
def splitLine(self, line):
datas_dict = {}
if line.startswith(':'):
datas_dict['from'], line = line[1:].split(' ', 1)
datas_dict['from'] = datas_dict['from'].split('!')[0]
datas = line.split(' ', 1)
datas_dict['command'] = datas[0]
if datas_dict['command'].isdigit():
# numeric commands are server response and don't follow any logic. annoying :/
# so we just put the whole line into content. Parsing is done in functions
datas_dict['content'] = datas[1]
else:
splited = datas[1].split(':', 1)
if len(splited) > 1:
datas_dict['to'] = splited[0].strip()
datas_dict['content'] = splited[1]
else:
datas_dict['to'], datas_dict['content'] = splited[0].split(' ', 1)
return datas_dict
def parseLine(self, line):
self.debug(line)
datas = self.splitLine(line)
self.debug(datas)
if datas['command'].lower() in self.allowedCommands.keys():
self.allowedCommands[datas['command'].lower()](datas)
if datas['command'] == 'MODE':
self.joinChannel()
pass
def listen(self):
queue = ''
while(1):
raw = self.socket.recv(1024).decode('UTF-8', 'replace')
queue = ''.join((queue, raw))
splited = queue.split('\r\n')
if len(splited) > 1:
for line in splited[:-1]:
self.parseLine(line)
queue = splited[-1]
# received commands
def ping(self, datas):
self.rawSend('PONG', datas['content'])
def invite(self, datas):
if self.autoInvite:
self.joinChannel(datas['content'])
def privmsg(self, datas):
if(datas['to'] not in self.connectedChannels):
for chan in self.connectedChannels:
self.rawSend('PRIVMSG', ', '.join((datas['from'], 'il veut violer mon intimité.')), chan)
else:
# get first word, to check if it's a plugin
word = datas['content'].split(' ', 1)[0]
if(word.startswith('!') and word[1:].isalnum()):
self.plugins.execute(datas, self.userlist)
def join(self, datas):
self.whois(datas['from'])
def f433(self, datas):
# nickname is already in use.
self.debug('nick utilise. Adding a _')
b.nick = b.nick+'_'
self.authenticate()
def f307(self, datas):
# user is identified
user = datas['content'].split()[1]
self.userlist[user].identified = True
self.debug(self.userlist)
def f353(self, datas):
# list users connected to a channel
users = datas['content'].split(':')[1].split()
for user in users:
self.whois(user)
# send commands
def whois(self, username):
self.rawSend('WHOIS', '', username)
self.userlist[username] = user.user(username)
conf_file = open('config.ini').readlines()
config = {}
for line in conf_file:
if line.strip()[0] is not '#':
splited = line.split('=', 1)
config[splited[0].strip()] = splited[1].strip()
b = bot(config['server'])
b.nick=config['nick']
b.ssl = config['ssl']
b.port = int(config['port'])
for chan in config['channels'].split(','):
b.channels.append(chan.strip())
b.connect()
b.listen()
|
Python
| 0.000001
|
@@ -43,16 +43,26 @@
ort user
+%0Aimport os
%0A%0Aclass
@@ -4730,17 +4730,62 @@
= open(
-'
+os.path.dirname(os.path.realpath(__file__))+'/
config.i
|
6c2adf0ff9f5026a4280b3e374429dcb7ef48dce
|
Enable using script directly
|
openfisca_web_api_preview/scripts/serve.py
|
openfisca_web_api_preview/scripts/serve.py
|
# -*- coding: utf-8 -*-
import sys
import imp
import os.path
import logging
import argparse
from gunicorn.app.base import BaseApplication
from gunicorn.six import iteritems
from gunicorn import config
from openfisca_core.scripts import add_minimal_tax_benefit_system_arguments
from ..app import create_app
from imp import load_module
"""
Define the `openfisca serve` command line interface.
"""
DEFAULT_PORT = '5000'
HOST = '127.0.0.1'
DEFAULT_WORKERS_NUMBER = '3'
log = logging.getLogger(__name__)
def define_command_line_options(parser):
# Define OpenFisca modules configuration
parser = add_minimal_tax_benefit_system_arguments(parser)
# Define server configuration
parser.add_argument('-p', '--port', action = 'store', default = DEFAULT_PORT, help = "port to serve on", type = int)
parser.add_argument('--tracker_url', action = 'store', help = "tracking service url", type = str)
parser.add_argument('--tracker_idsite', action = 'store', help = "tracking service id site", type = int)
parser.add_argument('-f', '--configuration_file', action = 'store', help = "gunicorn configuration file", type = str)
return parser
def read_user_configuration(default_configuration, command_line_parser):
configuration = default_configuration
args, unknown_args = command_line_parser.parse_known_args()
if args.configuration_file:
# Configuration file overloads default configuration
module_name = os.path.splitext(os.path.basename(args.configuration_file))[0]
module_directory = os.path.dirname(args.configuration_file)
module = imp.load_module(module_name, *imp.find_module(module_name, [module_directory]))
file_configuration = [item for item in dir(module) if not item.startswith("__")]
for key in file_configuration:
value = getattr(module, key)
if value:
configuration[key] = value
if key == "port":
configuration['bind'] = configuration['bind'][:-4] + str(configuration['port'])
# Command line configuration overloads all configuration
command_line_parser = config.Config().parser()
configuration = update(configuration, vars(args))
configuration = update(configuration, unknown_args)
return configuration
def update(configuration, new_options):
for key in new_options:
value = new_options[key]
if not configuration.get(key) or value:
configuration[key] = value
if key == "port":
configuration['bind'] = configuration['bind'][:-4] + str(configuration['port'])
return configuration
class StandaloneApplication(BaseApplication):
def __init__(self, app, options = None):
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
def load_config(self):
for key, value in iteritems(self.options):
if value is None:
log.debug('Undefined value for key `{}`.'.format(key))
if key in self.cfg.settings and value is not None:
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def main(parser):
command_line_parser = define_command_line_options(parser)
configuration = {
'port': DEFAULT_PORT,
'bind': '{}:{}'.format(HOST, DEFAULT_PORT),
'workers': DEFAULT_WORKERS_NUMBER,
}
configuration = read_user_configuration(configuration, command_line_parser)
app = create_app(configuration['country_package'], configuration['extensions'], configuration['tracker_url'], configuration['tracker_idsite'])
StandaloneApplication(app, configuration).run()
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0
|
@@ -278,17 +278,41 @@
ts%0Afrom
-.
+openfisca_web_api_preview
.app imp
@@ -3254,28 +3254,88 @@
rser
-):%0A command_line_
+ = None):%0A if not parser:%0A parser = argparse.ArgumentParser()%0A
pars
@@ -3588,37 +3588,24 @@
figuration,
-command_line_
parser)%0A%0A
|
b5b40dc232b04a2cfa75438bb5143ffdb103a57c
|
split a method
|
AlphaTwirl/EventReader/ProgressReporter.py
|
AlphaTwirl/EventReader/ProgressReporter.py
|
# Tai Sakuma <sakuma@fnal.gov>
import multiprocessing
import time
from ProgressReport import ProgressReport
##____________________________________________________________________________||
class ProgressReporter(object):
def __init__(self, queue, pernevents = 1000):
self.queue = queue
self.pernevents = pernevents
self.interval = 0.02 # [second]
self._readTime()
def report(self, event, component):
if not self.needToReport(event, component): return
done = event.iEvent + 1
report = ProgressReport(name = component.name, done = done, total = event.nEvents)
self.queue.put(report)
self._readTime()
def needToReport(self, event, component):
iEvent = event.iEvent + 1 # add 1 because event.iEvent starts from 0
if self._time() - self.lastTime > self.interval: return True
if iEvent % self.pernevents == 0: return True
if iEvent == event.nEvents: return True
return False
def _time(self): return time.time()
def _readTime(self): self.lastTime = self._time()
##____________________________________________________________________________||
|
Python
| 0.999953
|
@@ -495,16 +495,97 @@
return%0A
+ self._report(event, component)%0A%0A def _report(self, event, component):%0A
|
6cc803f68876689629fa2c2bae1413d46a0d2002
|
Update different-ways-to-add-parentheses.py
|
Python/different-ways-to-add-parentheses.py
|
Python/different-ways-to-add-parentheses.py
|
# Time: O(n * 4^n / n^(3/2)) ~= n * (Catalan numbers) = n * (C(2n, n) - C(2n, n - 1))
# Space: O(n^2 * 4^n / n^(3/2))
#
# Given a string of numbers and operators, return all possible
# results from computing all the different possible ways to
# group numbers and operators. The valid operators are +, - and *.
#
#
# Example 1
# Input: "2-1-1".
#
# ((2-1)-1) = 0
# (2-(1-1)) = 2
# Output: [0, 2]
#
#
# Example 2
# Input: "2*3-4*5"
#
# (2*(3-(4*5))) = -34
# ((2*3)-(4*5)) = -14
# ((2*(3-4))*5) = -10
# (2*((3-4)*5)) = -10
# (((2*3)-4)*5) = 10
# Output: [-34, -14, -10, -10, 10]
#
class Solution:
# @param {string} input
# @return {integer[]}
def diffWaysToCompute(self, input):
tokens = re.split('(\D)', input)
nums = map(int, tokens[::2])
ops = map({'+': operator.add, '-': operator.sub, '*': operator.mul}.get, tokens[1::2])
lookup = [[None for _ in xrange(len(nums))] for _ in xrange(len(nums))]
def diffWaysToComputeRecu(left, right):
if left == right:
return [nums[left]]
if lookup[left][right]:
return lookup[left][right]
lookup[left][right] = [ops[i](x, y)
for i in xrange(left, right)
for x in diffWaysToComputeRecu(left, i)
for y in diffWaysToComputeRecu(i + 1, right)]
return lookup[left][right]
return diffWaysToComputeRecu(0, len(nums) - 1)
class Solution2:
# @param {string} input
# @return {integer[]}
def diffWaysToCompute(self, input):
lookup = [[None for _ in xrange(len(input) + 1)] for _ in xrange(len(input) + 1)]
ops = {'+': operator.add, '-': operator.sub, '*': operator.mul}
def diffWaysToComputeRecu(left, right):
if lookup[left][right]:
return lookup[left][right]
result = []
for i in xrange(left, right):
if input[i] in "+-*":
for x in diffWaysToComputeRecu(left, i):
for y in diffWaysToComputeRecu(i + 1, right):
result.append(ops[input[i]](x, y))
if not result:
result = [int(input[left:right])]
lookup[left][right] = result
return lookup[left][right]
return diffWaysToComputeRecu(0, len(input))
|
Python
| 0.000014
|
@@ -4,20 +4,16 @@
ime: O(
-n *
4%5En / n%5E
@@ -22,21 +22,16 @@
/2)) ~=
-n * (
Catalan
@@ -41,17 +41,11 @@
bers
-)
=
-n * (
C(2n
@@ -63,17 +63,16 @@
, n - 1)
-)
%0A# Space
|
c00b7d6af8c3fdbd45d08e69a06e4f03e4294219
|
change to write from write_to_xhtml
|
epub_clean/unit_tests_epub.py
|
epub_clean/unit_tests_epub.py
|
import copy
import unittest
import os
import os.path
import shutil
import tempfile
import time
import lxml.html
import chapter
from constants import *
import epub
class TestEpub(unittest.TestCase):
def setUp(self):
chapter_dir = os.path.join(TEST_DIR, 'test_chapters')
chapter_factory = chapter.ChapterFactory()
self.output_directory = os.path.join(TEST_DIR, 'epub_output')
self.chapter_list = []
file_list = [f for f in os.listdir(chapter_dir) if os.path.isfile(os.path.join(chapter_dir,f))]
for index, f in enumerate(file_list):
full_name = os.path.join(chapter_dir, f)
c = chapter_factory.create_chapter_from_file(full_name)
self.chapter_list.append(c)
self.chapter_titles = [
u'Quick Practical, Tactical Tips for Presentations',
u'Venture capital - Wikipedia, the free encyclopedia',
u"Ben's Blog",
u"The capture of Mosul: Terror\u2019s new headquarters | The Economist",
]
def test_TOCHTML(self):
def create_TOC():
self.test_toc = epub._TOC_HTML()
self.test_toc.add_chapters(self.chapter_list)
self.toc_element = self.test_toc.get_content_as_element()
def check_titles():
chapter_nodes = self.toc_element.get_element_by_id('chapters').getchildren()
self.assertEqual(len(chapter_nodes), len(self.chapter_list))
self.assertEqual(chapter_nodes[0][0].text,self.chapter_titles[0])
self.assertEqual(chapter_nodes[1][0].text,self.chapter_titles[1])
self.assertEqual(chapter_nodes[2][0].text,self.chapter_titles[2])
self.assertEqual(chapter_nodes[3][0].text,self.chapter_titles[3])
create_TOC()
check_titles()
self.test_toc.write(os.path.join(TEST_DIR, 'epub_output', 'toc.html'))
def test_TOCNCX(self):
def createTOC():
self.test_toc = epub._TOC_NCX()
self.test_toc.add_chapters(self.chapter_list)
self.toc_element = self.test_toc.get_content_as_element()
def checkTitles():
chapter_nodes = self.toc_element[2]
self.assertEqual(len(chapter_nodes),len(self.chapter_list))
for index, node in enumerate(chapter_nodes):
self.assertEqual(node[0][0].text,self.chapter_titles[index])
createTOC()
checkTitles()
self.test_toc.write(os.path.join(TEST_DIR, 'epub_output', 'toc_ncx.xml'))
def test_ContentOPF(self):
def createContentOPF():
self.test_opf = epub._Content_OPF('Sample Title')
self.test_opf.add_chapters(self.chapter_list)
opf_file = os.path.join(TEST_DIR, 'epub_output', 'opf.xml')
self.test_opf.write(opf_file)
self.opf_element = self.test_opf.get_content_as_element()
self.assertEqual(len(self.opf_element.getchildren()), 4)
def check_encoding():
pass
def checkSpine():
spine_nodes = self.opf_element[2].getchildren()
self.assertEqual(len(spine_nodes),len(self.chapter_list) + 1)
def checkManifest():
manifest_nodes = self.opf_element[1].getchildren()
self.assertEqual(len(manifest_nodes),len(self.chapter_list) + 2)
createContentOPF()
check_encoding()
checkSpine()
checkManifest()
def test_create_epub(self):
e = epub.Epub(os.path.join(TEST_DIR, 'epub_output'), 'Test Epub')
for index, c in enumerate(self.chapter_list):
output_name = os.path.join(TEST_DIR,
'epub_output', str(index) + '.xhtml')
c.write_to_xhtml(output_name)
e.add_chapter(c)
e.create_epub(epub_name = 'test_epub')
##class Test_Create_Epub_From_Folder(unittest.TestCase):
## def test_create_epub_from_folder_file(self):
## test_folder = os.path.join(TEST_DIR, 'test_create_epub_from_folder')
## information_file = os.path.join(test_folder, 'link_information.json')
## e = epub.create_epub_from_folder_file('Sample Title', 'Test',
## test_folder, test_folder, information_file)
## self.assertEqual(len(e.chapters),3)
## chapter_1 = e.chapters[0]
## self.assertEqual(chapter_1.title,'Example Domain')
## chapter_2 = e.chapters[1]
## self.assertEqual(chapter_2.title,'AVC')
## chapter_2 = e.chapters[2]
## self.assertEqual(chapter_2.title,'Wikipedia')
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -3736,17 +3736,8 @@
rite
-_to_xhtml
(out
|
08ae9fdccb285c4597fef83961ee1fbe2d04b86f
|
Fix to make_html_report
|
openquake/engine/tools/make_html_report.py
|
openquake/engine/tools/make_html_report.py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
import cgi
import time
from datetime import date, datetime, timedelta
import itertools
from docutils.examples import html_parts
from openquake.commonlib.datastore import read
from openquake.commonlib.views import view_fullreport
from openquake.calculators import base # needed for registering task_info
from openquake.engine.logs import dbcmd
tablecounter = itertools.count(0)
def html(header_rows):
"""
Convert a list of tuples describing a table into a HTML string
"""
name = 'table%d' % next(tablecounter)
return HtmlTable([map(str, row) for row in header_rows], name).render()
class HtmlTable(object):
"""
Convert a sequence header+body into a HTML table.
"""
css = """\
tr.evenRow { background-color: lightgreen }
tr.oddRow { }
th { background-color: lightblue }
"""
maxrows = 5000
border = "1"
summary = ""
def __init__(self, header_plus_body, name='noname',
empty_table='Empty table'):
header, body = header_plus_body[0], header_plus_body[1:]
self.name = name
self.empty_table = empty_table
rows = [] # rows is a finite sequence of tuples
for i, row in enumerate(body):
if i == self.maxrows:
rows.append(
["Table truncated because too big: more than %s rows" % i])
break
rows.append(row)
self.rows = rows
self.header = tuple(header) # horizontal header
def render(self, dummy_ctxt=None):
out = "\n%s\n" % "".join(list(self._gen_table()))
if not self.rows:
out += '<em>%s</em>' % cgi.escape(self.empty_table, quote=True)
return out
def _gen_table(self):
yield '<table id="%s" border="%s" summary="%s" class="tablesorter">\n'\
% (self.name, self.border, self.summary)
yield '<thead>\n'
yield '<tr>%s</tr>\n' % ''.join(
'<th>%s</th>\n' % h for h in self.header)
yield '</thead>\n'
yield '<tbody\n>'
for r, row in enumerate(self.rows):
yield '<tr class="%s">\n' % ["even", "odd"][r % 2]
for col in row:
yield '<td>%s</td>\n' % col
yield '</tr>\n'
yield '</tbody>\n'
yield '</table>\n'
JOB_STATS = '''
SELECT id, user_name, start_time, stop_time, status,
strftime('%s', stop_time) - strftime('%s', start_time) AS duration
FROM job WHERE id=$s;
'''
ALL_JOBS = '''
SELECT id, user_name, status, ds_calc_dir FROM job
WHERE start_time >= $s AND start_time < $s ORDER BY stop_time
'''
PAGE_TEMPLATE = '''\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<script src="http://ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
<link rel="stylesheet" href="http://ajax.googleapis.com/ajax/libs/jqueryui/1.11.2/themes/smoothness/jquery-ui.css" />
<script src="http://ajax.googleapis.com/ajax/libs/jqueryui/1.11.2/jquery-ui.min.js"></script>
<script>
$(function() {
$("#tabs").tabs();
});
</script>
</head>
<body>
%s
</body>
</html>
'''
def make_tabs(tag_ids, tag_status, tag_contents):
"""
Return a HTML string containing all the tabs we want to display
"""
templ = '''
<div id="tabs">
<ul>
%s
</ul>
%s
</div>'''
lis = []
contents = []
for i, (tag_id, status, tag_content) in enumerate(
zip(tag_ids, tag_status, tag_contents), 1):
mark = '.' if status == 'complete' else '!'
lis.append('<li><a href="#tabs-%d">%s%s</a></li>' % (i, tag_id, mark))
contents.append('<div id="tabs-%d">%s</div>' % (
i, tag_content))
return templ % ('\n'.join(lis), '\n'.join(contents))
def make_report(isodate='today'):
"""
Build a HTML report with the computations performed at the given isodate.
Return the name of the report, which is saved in the current directory.
"""
if isodate == 'today':
isodate = date.today()
else:
isodate = date(*time.strptime(isodate, '%Y-%m-%d')[:3])
isodate1 = isodate + timedelta(1) # +1 day
tag_ids = []
tag_status = []
tag_contents = []
# the fetcher returns an header which is stripped with [1:]
jobs = dbcmd(
'fetch', ALL_JOBS, isodate.isoformat(), isodate1.isoformat())[1:]
page = '<h2>%d job(s) finished before midnight of %s</h2>' % (
len(jobs), isodate)
for job_id, user, status, ds_calc in jobs:
tag_ids.append(job_id)
tag_status.append(status)
[stats] = dbcmd('fetch', JOB_STATS, job_id)
(job_id, user, start_time, stop_time, status, duration) = stats
try:
ds = read(job_id, datadir=os.path.dirname(ds_calc))
txt = view_fullreport('fullreport', ds)
report = html_parts(txt)
except Exception as exc:
report = dict(
html_title='Could not generate report: %s' % cgi.escape(
unicode(exc), quote=True),
fragment='')
page = report['html_title']
page += html([stats._fields, stats])
page += report['fragment']
tag_contents.append(page)
page = make_tabs(tag_ids, tag_status, tag_contents) + (
'Report last updated: %s' % datetime.now())
fname = 'jobs-%s.html' % isodate
with open(fname, 'w') as f:
f.write(PAGE_TEMPLATE % page.encode('utf-8'))
return fname
|
Python
| 0.000002
|
@@ -5041,20 +5041,16 @@
ormat())
-%5B1:%5D
%0A pag
|
ec013d194e2b26155949bf89a5cd03ef4a013cc5
|
Add import unicode on csv_importer
|
passpie/importers/csv_importer.py
|
passpie/importers/csv_importer.py
|
import csv
from passpie.importers import BaseImporter
from passpie._compat import is_python2
def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
if is_python2():
yield [unicode(cell, 'utf-8') for cell in row]
else:
yield [str(cell) for cell in row]
class CSVImporter(BaseImporter):
def match(self, filepath):
"""Dont match this importer"""
return False
def handle(self, filepath, cols):
credentials = []
with open(filepath) as csv_file:
reader = unicode_csv_reader(csv_file)
try:
next(reader)
except StopIteration:
raise ValueError('empty csv file: %s' % filepath)
for row in reader:
credential = {
'name': row[cols['name']],
'login': row[cols.get('login', '')],
'password': row[cols['password']],
'comment': row[cols.get('comment', '')],
}
credentials.append(credential)
return credentials
|
Python
| 0.000004
|
@@ -85,16 +85,25 @@
_python2
+, unicode
%0A%0A%0Adef u
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.