commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
4dac5069084e90a0c4b0fd12e763e92df79f31c5
|
rename ds_justification_reason to justification_reason - add migration
|
backend/unpp_api/apps/project/migrations/0017_auto_20170915_0734.py
|
backend/unpp_api/apps/project/migrations/0017_auto_20170915_0734.py
|
Python
| 0
|
@@ -0,0 +1,471 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.3 on 2017-09-15 07:34%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('project', '0016_remove_application_agency'),%0A %5D%0A%0A operations = %5B%0A migrations.RenameField(%0A model_name='application',%0A old_name='ds_justification_reason',%0A new_name='justification_reason',%0A ),%0A %5D%0A
|
|
d7f024bc47c362afc6930510dea3bc425d5b554a
|
create example_fabfile
|
pg_fabrep/example_fabfile.py
|
pg_fabrep/example_fabfile.py
|
Python
| 0.000002
|
@@ -0,0 +1,352 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0Afrom fabric.api import env, task%0Afrom pg_fabrep.tasks import *%0A%0A%0A@task%0Adef example_cluster():%0A # name of your cluster - no spaces, no special chars%0A env.cluster_name = 'example_cluster'%0A # always ask user for confirmation when run any tasks%0A # default: True%0A #env.ask_confirmation = True%0A
|
|
c4243483052ec7eec2f1f88ea72fafc953d35648
|
Add ptxgen sample
|
samples/ptxgen.py
|
samples/ptxgen.py
|
Python
| 0
|
@@ -0,0 +1,1920 @@
+# Copyright (c) 2013 NVIDIA Corporation%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A#%0A#%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE%0A# SOFTWARE.%0A%0A%22%22%22%0AThis sample illustrates a simple LLVM IR -%3E PTX compiler implemented using%0AlibNVVM. All command-line options are passed along to libNVVM. Arguments that%0Astart with '-' are assumed to be options and are passed along accordingly.%0AOtherwise, options are treated as file names and are read as IR input(s).%0A%22%22%22%0A%0Aimport sys%0Afrom pynvvm.compiler import Program, ProgramException%0A%0A%0Aif len(sys.argv) %3C 2:%0A print('Usage: %25s ir-file-1 %5Bir-file-2 %5Bir-file-3 ...%5D%5D' %25 sys.argv%5B0%5D)%0A sys.exit(1)%0A%0Atry:%0A p = Program()%0A options = %5B%5D%0A for a in sys.argv%5B1:%5D:%0A if a.startswith('-'):%0A options.append(a)%0A else:%0A with open(a, 'rb') as f:%0A p.add_module(f.read())%0A ptx = p.compile(options)%0A print(ptx)%0Aexcept ProgramException as e:%0A print('ERROR:%5Cn%25s%5Cn' %25 repr(e))%0A%0A
|
|
ebc417be95bcec7b7a25dc1ad587f17b1bfa521d
|
Add download_student_forms
|
scripts/download_student_forms.py
|
scripts/download_student_forms.py
|
Python
| 0
|
@@ -0,0 +1,2621 @@
+#!/usr/bin/env python2.5%0A#%0A# Copyright 2011 the Melange authors.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%22%22%22Downloads student forms.%0A%22%22%22%0A%0A__authors__ = %5B%0A '%22Sverre Rabbelier%22 %3Csverre@rabbelier.nl%3E',%0A%5D%0A%0Aimport optparse%0Aimport os%0Aimport shutil%0A%0Aimport interactive%0A%0A%0Aparser = optparse.OptionParser(usage=%22usage: %25prog %5Boptions%5D app_id%22)%0Aparser.add_option(%22-o%22, %22--output%22, dest=%22outputdir%22, default=%22forms%22,%0A help=%22write files to target DIR%22, metavar=%22DIR%22)%0A%0A%0Adef downloadStudentForms(options):%0A from google.appengine.ext import db%0A from soc.modules.gsoc.models.profile import GSoCStudentInfo%0A from soc.modules.gsoc.views.helper import lists as list_helper%0A%0A q = lambda: GSoCStudentInfo.all().filter('number_of_projects', 1)%0A%0A outputdir = os.path.abspath(options.outputdir)%0A%0A if not os.path.exists(outputdir):%0A os.mkdir(outputdir)%0A%0A if not os.path.isdir(outputdir):%0A print %22Could not create output dir: %25s%22 %25 outputdir%0A%0A print %22Fetching StudentInfo...%22%0A students = list(i for i in interactive.deepFetch(q) if i.tax_form)%0A%0A keys = list_helper.collectParentKeys(students)%0A keys = list(set(keys))%0A%0A prefetched = %7B%7D%0A%0A print %22Fetching Profile...%22%0A%0A for i in xrange(0, len(keys), 100):%0A chunk = keys%5Bi:i+100%5D%0A entities = db.get(chunk)%0A prefetched.update(dict((i.key(), i) for i in entities if i))%0A%0A list_helper.distributeParentKeys(students, prefetched)%0A%0A countries = %5B'United States'%5D%0A us_students = %5Bi for i in students if i.parent().res_country in countries%5D%0A%0A for student in us_students:%0A form = student.tax_form%0A _, ext = os.path.splitext(form.filename)%0A path = os.path.join(outputdir, student.parent().link_id + ext)%0A dst = open(path, %22w%22)%0A src = form.open()%0A shutil.copyfileobj(src, dst)%0A print %22Downloading form to '%25s'...%22 %25 path%0A%0A print %22Done.%22%0A%0A%0Adef main():%0A options, args = parser.parse_args()%0A%0A if len(args) %3C 1:%0A parser.error(%22Missing app_id%22)%0A%0A if len(args) %3E 1:%0A parser.error(%22Too many arguments%22)%0A%0A interactive.setup()%0A interactive.setupRemote(args%5B0%5D)%0A%0A downloadStudentForms(options)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
013d793c6ebe7a4d426d6c2d823510f90b84d19e
|
Add a landmine to get rid of obselete test netscape plugins
|
build/get_landmines.py
|
build/get_landmines.py
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import optparse
import sys
import landmine_utils
builder = landmine_utils.builder
distributor = landmine_utils.distributor
gyp_defines = landmine_utils.gyp_defines
gyp_msvs_version = landmine_utils.gyp_msvs_version
platform = landmine_utils.platform
def print_landmines(target):
"""
ALL LANDMINES ARE EMITTED FROM HERE.
target can be one of {'Release', 'Debug', 'Debug_x64', 'Release_x64'}.
"""
if (distributor() == 'goma' and platform() == 'win32' and
builder() == 'ninja'):
print 'Need to clobber winja goma due to backend cwd cache fix.'
if platform() == 'android':
print 'Clobber: Resources removed in r195014 require clobber.'
if platform() == 'win' and builder() == 'ninja':
print 'Compile on cc_unittests fails due to symbols removed in r185063.'
if platform() == 'linux' and builder() == 'ninja':
print 'Builders switching from make to ninja will clobber on this.'
if platform() == 'mac':
print 'Switching from bundle to unbundled dylib (issue 14743002).'
if platform() in ('win', 'mac'):
print ('Improper dependency for create_nmf.py broke in r240802, '
'fixed in r240860.')
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version() == '2012' and
gyp_defines().get('target_arch') == 'x64' and
gyp_defines().get('dcheck_always_on') == '1'):
print "Switched win x64 trybots from VS2010 to VS2012."
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version().startswith('2013')):
print "Switched win from VS2010 to VS2013."
print 'Need to clobber everything due to an IDL change in r154579 (blink)'
def main():
parser = optparse.OptionParser()
parser.add_option('-t', '--target',
help=='Target for which the landmines have to be emitted')
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
print_landmines(options.target)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000001
|
@@ -1931,16 +1931,113 @@
blink)'%0A
+ if (platform() != 'ios'):%0A print 'Clobber to get rid of obselete test plugin after r248358'%0A
%0A%0Adef ma
|
5401eb7b463dfd9a807b86b7bdfa4079fc0cb2ac
|
Define basic regular expressions
|
autoload/vimwiki_pytasks.py
|
autoload/vimwiki_pytasks.py
|
import vim
import re
from tasklib import task
"""
How this plugin works:
1.) On startup, it reads all the tasks and syncs info TW -> Vimwiki file. Task is identified by their
uuid.
2.) When saving, the opposite sync is performed (Vimwiki -> TW direction).
a) if task is marked as subtask by indentation, the dependency is created between
"""
INCOMPLETE_TASK_REGEXP = (
"\v\* \[[^X]\].*" # any amount of whitespace followed by uncompleted square
# Any of the following:
"(\(\d{4}-\d\d-\d\d( \d\d:\d\d)?\)" # Timestamp
"|#TW\s*$" # Task indicator (insert this to have the task added)
"|#[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})" # Task UUID
)
TASK_REGEXP = '#TW'
tw = task.TaskWarrior()
class Random(object):
attr = 'Ta dpc'
r = Random()
def get_task(uuid):
return tw.tasks.get(uuid=uuid)
def load_tasks():
valid_tasks = [line for line in vim.current.buffer if re.search(TASK_REGEXP, line)]
for line in valid_tasks:
vim.command('echom "%s"' % line)
r.attr = 'Whoohoooo'
def RandomExample():
vim.command('echom "volame daco"')
vim.command('echom "%s"' % r.attr)
def RandomExample3():
r.attr = r.attr + 'XO'
vim.command('echom "Random example 3"')
if __name__ == '__main__':
load_tasks()
|
Python
| 0.000304
|
@@ -14,16 +14,17 @@
port re%0A
+%0A
from tas
@@ -31,16 +31,21 @@
klib
+.task
import
task
@@ -40,20 +40,1038 @@
import
-task
+TaskWarrior, Task%0A%0A# Building blocks%0ABRACKET_OPENING = re.escape('* %5B')%0ABRACKET_CLOSING = re.escape('%5D ')%0AEMPTY_SPACE = r'(?P%3Cspace%3E%5Cs*)'%0ATEXT = r'(?P%3Ctext%3E.+)'%0AUUID = r'(?P%3Cuuid%3E%5B0-9a-fA-F%5D%7B8%7D-%5B0-9a-fA-F%5D%7B4%7D-%5B0-9a-fA-F%5D%7B4%7D-%5B0-9a-fA-F%5D%7B4%7D-%5B0-9a-fA-F%5D%7B12%7D)'%0ADUE = r'(?P%3Cdue%3E%5C(%5Cd%7B4%7D-%5Cd%5Cd-%5Cd%5Cd( %5Cd%5Cd:%5Cd%5Cd)?%5C))'%0ACOMPLETION_MARK = r'(?P%3Ccompleted%3E.)'%0AUUID_COMMENT = ' #%7B0%7D'.format(UUID)%0A%0A# Middle building blocks%0AINCOMPLETE_TASK_PREFIX = EMPTY_SPACE + BRACKET_OPENING + '%5B%5EX%5D' + BRACKET_CLOSING + TEXT%0A%0A# Final regexps%0ATASKS_TO_SAVE_TO_TW = ''.join(%5B%0A INCOMPLETE_TASK_PREFIX, # any amount of whitespace followed by uncompleted square%0A # Any of the following:%0A '(',%0A UUID_COMMENT, # Task UUID %0A ')?'%0A%5D)%0A%0AGENERIC_TASK = ''.join(%5B%0A EMPTY_SPACE,%0A BRACKET_OPENING,%0A COMPLETION_MARK,%0A BRACKET_CLOSING,%0A TEXT,%0A '(', DUE, ')?' # Due is optional%0A '(', UUID_COMMENT, ')?' # UUID is optional, it can't be there for new tasks%0A%5D)%0A%0Awith open(%22vystup%22, 'w') as f:%0A f.write(TASKS_TO_SAVE_TO_TW)%0A
%0A%0A%22%22%22%0AHo
|
b9fbc458ed70e6bffdfc5af9d9f0ad554b6c0cec
|
Decrease memory usage of LSTM text gen example
|
examples/lstm_text_generation.py
|
examples/lstm_text_generation.py
|
from __future__ import print_function
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.datasets.data_utils import get_file
import numpy as np
import random, sys
'''
Example script to generate text from Nietzsche's writings.
At least 20 epochs are required before the generated text
starts sounding coherent.
It is recommended to run this script on GPU, as recurrent
networks are quite computationally intensive.
If you try this script on new data, make sure your corpus
has at least ~100k characters. ~1M is better.
'''
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
chars = set(text)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 20
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i : i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)))
y = np.zeros((len(sentences), len(chars)))
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1.
y[i, char_indices[next_chars[i]]] = 1.
# build the model: 2 stacked LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(len(chars), 512, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(512, 512, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(512, len(chars)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# helper function to sample an index from a probability array
def sample(a, diversity=0.75):
if random.random() > diversity:
return np.argmax(a)
while 1:
i = random.randint(0, len(a)-1)
if a[i] > random.random():
return i
# train the model, output generated text after each iteration
for iteration in range(1, 60):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=128, nb_epoch=1)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.4, 0.6, 0.8]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index : start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for iteration in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
|
Python
| 0
|
@@ -1333,16 +1333,31 @@
(chars))
+, dtype=np.bool
)%0Ay = np
@@ -1391,16 +1391,31 @@
(chars))
+, dtype=np.bool
)%0Afor i,
@@ -1528,17 +1528,16 @@
ar%5D%5D = 1
-.
%0A y%5Bi
@@ -1570,17 +1570,16 @@
i%5D%5D%5D = 1
-.
%0A%0A%0A# bui
|
08402e98f9eb56ab3b103e5bf36004638461f903
|
Add koi7-to-utf8 script.
|
languages/python/koi7-to-utf8.py
|
languages/python/koi7-to-utf8.py
|
Python
| 0
|
@@ -0,0 +1,981 @@
+#!/usr/bin/python%0A# -*- encoding: utf-8 -*-%0A#%0A# %D0%9F%D0%B5%D1%80%D0%B5%D0%BA%D0%BE%D0%B4%D0%B8%D1%80%D0%BE%D0%B2%D0%BA%D0%B0 %D0%B8%D0%B7 %D1%81%D0%B5%D0%BC%D0%B8%D0%B1%D0%B8%D1%82%D0%BD%D0%BE%D0%B3%D0%BE %D0%BA%D0%BE%D0%B4%D0%B0 %D0%9A%D0%9E%D0%98-7 %D0%9D2%0A# (%D0%BA%D0%BE%D0%B4%D1%8B %D0%B4%D0%B8%D1%81%D0%BF%D0%BB%D0%B5%D1%8F Videoton-340) %D0%B2 %D0%BA%D0%BE%D0%B4%D0%B8%D1%80%D0%BE%D0%B2%D0%BA%D1%83 UTF-8.%0A# Copyright (C) 2016 Serge Vakulenko %3Cvak@cronyx.ru%3E%0A#%0Aimport sys%0A%0Aif len(sys.argv) != 2:%0A print %22Usage: koi7-to-utf8 file%22%0A sys.exit (1)%0A%0Atranslate = %7B%0A '%60':'%D0%AE', 'a':'%D0%90', 'b':'%D0%91', 'c':'%D0%A6', 'd':'%D0%94', 'e':'%D0%95', 'f':'%D0%A4', 'g':'%D0%93',%0A 'h':'%D0%A5', 'i':'%D0%98', 'j':'%D0%99', 'k':'%D0%9A', 'l':'%D0%9B', 'm':'%D0%9C', 'n':'%D0%9D', 'o':'%D0%9E',%0A 'p':'%D0%9F', 'q':'%D0%AF', 'r':'%D0%A0', 's':'%D0%A1', 't':'%D0%A2', 'u':'%D0%A3', 'v':'%D0%96', 'w':'%D0%92',%0A 'x':'%D0%AC', 'y':'%D0%AB', 'z':'%D0%97', '%7B':'%D0%A8', '%7C':'%D0%AD', '%7D':'%D0%A9', '~':'%D0%A7',%0A%7D%0A%0A# %D0%92%D1%81%D1%82%D1%80%D0%B5%D1%82%D0%B8%D0%BB%D0%B0%D1%81%D1%8C %D1%80%D1%83%D1%81%D1%81%D0%BA%D0%B0%D1%8F %D0%B1%D1%83%D0%BA%D0%B2%D0%B0, %D0%BE%D0%B1%D1%80%D0%B0%D0%B1%D0%B0%D1%82%D1%8B%D0%B2%D0%B0%D0%B5%D0%BC%0Adef decode_index(i):%0A global body%0A c = body%5Bi%5D%0A #print c,%0A%0A body = body%5B:i%5D + translate%5Bc%5D + body%5Bi+1:%5D%0A%0A# %D0%9E%D0%B1%D1%80%D0%B0%D0%B1%D0%B0%D1%82%D1%8B%D0%B2%D0%B0%D0%B5%D0%BC %D1%84%D0%B0%D0%B9%D0%BB%0Af = open(sys.argv%5B1%5D)%0Abody = f.read().encode(%22utf-8%22)%0Ai = 0%0Awhile i %3C len(body):%0A next = i+1%0A%0A c = body%5Bi%5D%0A if c %3E= '%60' and c %3C= '~':%0A decode_index(i)%0A%0A i = next%0A%0Asys.stdout.write(body)%0A
|
|
815845fd98627fe9df0b0444ee31fe337d1c63da
|
Add celery worker module
|
utils/celery_worker.py
|
utils/celery_worker.py
|
Python
| 0.000001
|
@@ -0,0 +1,1121 @@
+import os%0Aimport sys%0A# Append .. to sys path%0Asys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))%0Aimport multiscanner%0A%0Afrom celery import Celery%0Afrom celery.contrib.batches import Batches%0A%0Aapp = Celery('celery_worker', broker='pyamqp://guest@localhost//')%0A%0A@app.task(base=Batches, flush_every=100, flush_interval=10)%0Adef multiscanner_celery(filelist, config=multiscanner.CONFIG):%0A '''%0A TODO: Add other ars + config options...%0A This function essentially takes in a file list and runs%0A multiscanner on them. Results are stored in the%0A storage configured in storage.ini.%0A%0A Usage:%0A from celery_worker import multiscanner_celery%0A multiscanner_celery.delay(%5Blist, of, files, to, scan%5D)%0A '''%0A storage_conf = multiscanner.common.get_storage_config_path(config)%0A storage_handler = multiscanner.storage.StorageHandler(configfile=storage_conf)%0A%0A resultlist = multiscanner.multiscan(filelist, configfile=config)%0A results = multiscanner.parse_reports(resultlist, python=True)%0A%0A storage_handler.store(results, wait=False)%0A storage_handler.close()%0A return results%0A
|
|
60e37ece40e96ecd9bba16b72cdb64e1eb6f8f77
|
Fix purge_cluster script
|
utils/purge_cluster.py
|
utils/purge_cluster.py
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
""" Simple utility to clean out existing namespaces on engines. """
from __future__ import print_function
import sys
from distarray.context import Context
def dump():
""" Print out key names that exist on the engines. """
context = Context()
keylist = context.dump_keys(all_other_contexts=True)
num_keys = len(keylist)
print('*** %d ENGINE KEYS ***' % (num_keys))
for key, targets in keylist:
print('%s : %r' % (key, targets))
def purge():
""" Remove keys from the engine namespaces. """
print('Purging keys from engines...')
context = Context()
context.purge_keys(all_other_contexts=True)
if __name__ == '__main__':
cmd = sys.argv[1]
if cmd == 'dump':
dump()
elif cmd == 'purge':
purge()
else:
raise ValueError("%s command not found" % (cmd,))
|
Python
| 0.000001
|
@@ -923,18 +923,15 @@
ext.
-purge_keys
+cleanup
(all
|
43629166927a0e6e7f4648a165ce12e22b32508d
|
Add missing migration for DiscoveryItem (#15913)
|
src/olympia/discovery/migrations/0010_auto_20201104_1424.py
|
src/olympia/discovery/migrations/0010_auto_20201104_1424.py
|
Python
| 0
|
@@ -0,0 +1,467 @@
+# Generated by Django 2.2.16 on 2020-11-04 14:24%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('discovery', '0009_auto_20201027_1903'),%0A %5D%0A%0A operations = %5B%0A migrations.RemoveField(%0A model_name='discoveryitem',%0A name='custom_addon_name',%0A ),%0A migrations.RemoveField(%0A model_name='discoveryitem',%0A name='custom_heading',%0A ),%0A %5D%0A
|
|
c5f91aa604ccca0966be3076c46385d6019b65f2
|
Add utils refresh_db
|
APITaxi/utils/refresh_db.py
|
APITaxi/utils/refresh_db.py
|
Python
| 0.000001
|
@@ -0,0 +1,665 @@
+# -*- coding: utf-8 -*-%0A#Source: http://dogpilecache.readthedocs.org/en/latest/usage.html%0A%0Afrom sqlalchemy import event%0Afrom sqlalchemy.orm import Session%0A%0Adef cache_refresh(session, refresher, *args, **kwargs):%0A %22%22%22%0A Refresh the functions cache data in a new thread. Starts refreshing only%0A after the session was committed so all database data is available.%0A %22%22%22%0A assert isinstance(session, Session), %5C%0A %22Need a session, not a sessionmaker or scoped_session%22%0A%0A @event.listens_for(session, %22after_commit%22)%0A def do_refresh(session):%0A t = Thread(target=refresher, args=args, kwargs=kwargs)%0A t.daemon = True%0A t.start()%0A
|
|
4cba006f440ebf219eb2cb64dd322e0168bdc3bb
|
Was cat_StartdLogs.py
|
factory/tools/find_StartdLogs.py
|
factory/tools/find_StartdLogs.py
|
Python
| 0.998194
|
@@ -0,0 +1,1764 @@
+#!/bin/env python%0A#%0A# cat_StartdLogs.py%0A#%0A# Print out the StartdLogs for a certain date%0A#%0A# Usage: cat_StartdLogs.py %3Cfactory%3E YY/MM/DD %5Bhh:mm:ss%5D%0A#%0A%0Aimport sys,os,os.path,time%0Asys.path.append(%22lib%22)%0Asys.path.append(%22..%22)%0Asys.path.append(%22../../lib%22)%0Aimport gWftArgsHelper,gWftLogParser%0Aimport glideFactoryConfig%0A%0AUSAGE=%22Usage: cat_StartdLogs.py %3Cfactory%3E YY/MM/DD %5Bhh:mm:ss%5D%22%0A%0A# return a GlideinDescript with%0A# factory_dir, date_arr and time_arr%0Adef parse_args():%0A if len(sys.argv)%3C3:%0A raise ValueError,%22Not enough arguments!%22%0A%0A factory_dir=sys.argv%5B1%5D%0A try:%0A glideFactoryConfig.factoryConfig.glidein_descript_file=os.path.join(factory_dir,glideFactoryConfig.factoryConfig.glidein_descript_file)%0A glideinDescript=glideFactoryConfig.GlideinDescript()%0A except:%0A raise ValueError,%22%25s is not a factory!%22%25factory_dir%0A%0A glideinDescript.factory_dir=factory_dir%0A glideinDescript.date_arr=gWftArgsHelper.parse_date(sys.argv%5B2%5D)%0A if len(sys.argv)%3E=4:%0A glideinDescript.time_arr=gWftArgsHelper.parse_time(sys.argv%5B3%5D)%0A else:%0A glideinDescript.time_arr=(0,0,0)%0A%0A return glideinDescript%0A%0Adef main():%0A try:%0A glideinDescript=parse_args()%0A except ValueError, e:%0A sys.stderr.write(%22%25s%5Cn%5Cn%25s%5Cn%22%25(e,USAGE))%0A sys.exit(1)%0A entries=glideinDescript.data%5B'Entries'%5D.split(',')%0A%0A log_list=gWftLogParser.get_glidein_logs(glideinDescript.factory_dir,entries,glideinDescript.date_arr,glideinDescript.time_arr,%22err%22)%0A for fname in log_list:%0A sys.stdout.write(%22%25s%5Cn%22%25fname)%0A sys.stdout.write(%22===========================================================%5Cn%22)%0A sys.stdout.write(%22%25s%5Cn%22%25gWftLogParser.get_StartdLog(fname))%0A %0A%0A%0Aif __name__ == '__main__':%0A main()%0A %0A
|
|
fd97b0e0edffa331d11ba7961637eb03ea5b8881
|
Save an RPC call on each request, make sure we have a django_user before trying to update it
|
djangae/contrib/gauth/middleware.py
|
djangae/contrib/gauth/middleware.py
|
from django.contrib.auth import authenticate, login, logout, get_user, BACKEND_SESSION_KEY, load_backend
from django.contrib.auth.middleware import AuthenticationMiddleware as DjangoMiddleware
from django.contrib.auth.models import BaseUserManager, AnonymousUser
from djangae.contrib.gauth.backends import AppEngineUserAPI
from google.appengine.api import users
class AuthenticationMiddleware(DjangoMiddleware):
def process_request(self, request):
django_user = get_user(request)
google_user = users.get_current_user()
if django_user.is_anonymous() and google_user:
# If there is a google user, but we are anonymous, log in!
django_user = authenticate(google_user=google_user)
if django_user:
login(request, django_user)
elif not django_user.is_anonymous() and not google_user:
# If we are logged in with django, but not longer logged in with Google
# then log out
logout(request)
django_user = None
elif not django_user.is_anonymous() and django_user.username != google_user.user_id():
# If the Google user changed, we need to log in with the new one
logout(request)
django_user = authenticate(google_user=google_user)
if django_user:
login(request, django_user)
request.user = django_user or AnonymousUser()
backend_str = request.session.get(BACKEND_SESSION_KEY)
if backend_str:
backend = load_backend(backend_str)
# We only do this next bit if the user was authenticated with the AppEngineUserAPI
# backend, or one of its subclasses
if isinstance(backend, AppEngineUserAPI):
# Now make sure we update is_superuser and is_staff appropriately
is_superuser = users.is_current_user_admin()
google_email = BaseUserManager.normalize_email(users.get_current_user().email())
resave = False
if is_superuser != django_user.is_superuser:
django_user.is_superuser = django_user.is_staff = is_superuser
resave = True
# for users which already exist, we want to verify that their email is still correct
if django_user.email != google_email:
django_user.email = google_email
resave = True
if resave:
django_user.save()
|
Python
| 0
|
@@ -1513,16 +1513,32 @@
kend_str
+ and django_user
:%0A
@@ -1987,32 +1987,19 @@
ail(
-users.get_current
+google
_user
-()
.ema
|
38bfc1a536f43ece367a49a62501b57c89f689a1
|
Add script to delete tables.
|
django-server/feel/core/db/reset.py
|
django-server/feel/core/db/reset.py
|
Python
| 0
|
@@ -0,0 +1,511 @@
+from django.db.models.base import ModelBase%0A%0Afrom quiz.models import Quiz, ShortAnswer, Choice, QuizAttempt%0Afrom codequiz.models import CodeQuiz, CodeQuizAttempt%0A%0Afrom concept.models import Concept, ConceptSection%0Afrom course.models import Course, CourseSlug, CourseConcept, ConceptDependency%0A%0A%0Adef reset():%0A for key, item in globals().items():%0A if type(item) == ModelBase and item != ModelBase:%0A Model = item%0A Model.objects.all().delete()%0A%0Aif __name__ == '__main__':%0A reset()
|
|
4b3c3fb315c0f7450dd87a98e3d7f928408a8ab4
|
add documentation for do_layout() method
|
kivy/uix/layout.py
|
kivy/uix/layout.py
|
'''
Layout
======
Layouts are used to calculate and assign widget positions.
The :class:`Layout` class itself cannot be used directly. You must use one of:
- Anchor layout : :class:`kivy.uix.anchorlayout.AnchorLayout`
- Box layout : :class:`kivy.uix.boxlayout.BoxLayout`
- Float layout : :class:`kivy.uix.floatlayout.FloatLayout`
- Grid layout : :class:`kivy.uix.gridlayout.GridLayout`
- Stack layout : :class:`kivy.uix.stacklayout.StackLayout`
Understanding `size_hint` property in `Widget`
----------------------------------------------
The :data:`~kivy.uix.Widget.size_hint` is mostly used in Layout. This is the
size in percent, not in pixels. The format is::
widget.size_hint = (width_percent, height_percent)
The percent is specified as a floating point number in the range 0-1, ie 0.5
is 50%, 1 is 100%.
If you want a widget's width to be half of the parent's and their heights to
be identical, you can do::
widget.size_hint = (0.5, 1.0)
If you don't want to use size_hint for one of width or height, set the value to
None. For example, to make a widget that is 250px wide and 30% of the parent's
height, you can write::
widget.size_hint = (None, 0.3)
widget.width = 250
'''
__all__ = ('Layout', )
from kivy.clock import Clock
from kivy.uix.widget import Widget
class Layout(Widget):
'''Layout interface class, used to implement every layout. Check module
documentation for more information.
'''
def __init__(self, **kwargs):
if self.__class__ == Layout:
raise Exception('The Layout class cannot be used.')
kwargs.setdefault('size', (1, 1))
self._trigger_layout = Clock.create_trigger(self.do_layout, -1)
super(Layout, self).__init__(**kwargs)
def reposition_child(self, child, **kwargs):
'''Force the child to be repositioned on the screen. This method is used
internally in boxlayout.
'''
for prop in kwargs:
child.__setattr__(prop, kwargs[prop])
def do_layout(self, *largs):
pass
def add_widget(self, widget, index=0):
widget.bind(
size = self._trigger_layout,
size_hint = self._trigger_layout)
return super(Layout, self).add_widget(widget, index)
def remove_widget(self, widget):
widget.unbind(
size = self._trigger_layout,
size_hint = self._trigger_layout)
return super(Layout, self).remove_widget(widget)
|
Python
| 0.000001
|
@@ -2021,24 +2021,276 @@
f, *largs):%0A
+ '''This function is called when a layout is needed, with by a trigger.%0A If you are doing a new Layout subclass, don't call this function%0A directly, use :prop:%60_trigger_layout%60 instead.%0A%0A .. versionadded:: 1.0.8%0A '''%0A
pass
|
586c047cebd679f6a736c2dfec9b6df762938b12
|
Add command line tool.
|
simulate_packs.py
|
simulate_packs.py
|
Python
| 0.000001
|
@@ -0,0 +1,739 @@
+#!/usr/local/bin/python3%0Aimport argparse%0Aimport Panini%0Afrom Panini import StickerCollection%0Afrom Accumulator import Accumulator%0A%0Aparser = argparse.ArgumentParser('Simulate creating a Panini sticker collection')%0A%0Aparser.add_argument('runs', metavar='N', type= int)%0A%0Aruns = parser.parse_args().runs%0A%0Aresults = Accumulator.Accumulator()%0A%0Afor i in (1, runs+1):%0A collection = StickerCollection.StickerCollection(Panini.NUMBER_OF_STICKERS, Panini.STICKERS_TO_REQUEST, Panini.STICKERS_PER_PACK)%0A results.add_value(collection.packs_till_complete())%0A%0Aprint(%22Number of runs: %7B0%7D%22.format(runs))%0Aprint(%22Average number of packs needed: %7B0%7D%22.format(results.average()))%0Aprint(%22Standard deviation of packs: %7B0%7D%22.format(results.standard_deviation()))
|
|
bad6bc988f09bf1f135d81eb654c5fc6c1de9a28
|
add standalone gene report script. It will be used in the NDEx server as the GSEA exporter.
|
cx2grp.py
|
cx2grp.py
|
Python
| 0
|
@@ -0,0 +1,2621 @@
+#!/usr/bin/python%0A%0A'''%0AThis script takes a CX network from stdin and print out a set of gene symbols found in node names,%0Arepresents, alias and function terms.%0AGene Symbols are normallized to human genes using mygene.info services.%0A%0A'''%0A%0Aimport sys,json%0Aimport requests%0A%0Adef terms_from_function_term(function_term, term_set):%0A # if it is a function term, process all genes mentioned%0A for parameter in function_term%5B'args'%5D:%0A%0A if type(parameter) in (str, unicode):%0A add_term( term_set,parameter)%0A else:%0A terms_from_function_term(parameter, term_set)%0A%0Adef query_to_gene_all(q, tax_id='9606'):%0A r = requests.get('http://mygene.info/v2/query?q='+q+'&fields=symbol%252Centrezgene%252Censemblgene%252Cuniprot%252Calias&species='+tax_id+'&entrezonly=true')%0A result = r.json()%0A hits = result.get(%22hits%22)%0A if hits and len(hits) %3E 0:%0A hit = hits%5B0%5D%0A gene = hit.get('symbol')%0A return gene%0A return None%0A%0Adef add_term (term_set,term):%0A term_set.add(term)%0A words = term.split(%22:%22)%0A if ( len(words)%3E1) :%0A del words%5B0%5D%0A term_set.add(%22:%22.join(words))%0A%0A%0Adef main():%0A data = json.load(sys.stdin)%0A # f = open(%22/Users/abc/Downloads/S1P5 pathway.cx%22,%22r%22)%0A # data = json.load(f)%0A%0A namespaces = %7B%7D%0A%0A terms = set()%0A%0A for aspect in data:%0A if '@context' in aspect:%0A elements = aspect%5B'@context'%5D%0A if len(elements) %3E 0:%0A if len(elements) %3E 1 or namespaces:%0A raise RuntimeError('@context aspect can only have one element')%0A else:%0A namespaces = elements%5B0%5D%0A elif 'nodes' in aspect:%0A for node in aspect.get('nodes'):%0A if 'n' in node and node%5B'n'%5D:%0A add_term(terms,node%5B'n'%5D)%0A if 'r' in node and node%5B'r'%5D:%0A add_term(terms,node%5B'r'%5D)%0A elif %22nodeAttributes%22 in aspect:%0A for attr in aspect %5B%22nodeAttributes%22%5D:%0A if attr%5B%22n%22%5D == %22name%22 :%0A add_term(terms, attr%5B%22v%22%5D)%0A elif attr%5B%22n%22%5D == %22alias%22:%0A for alias in attr%5B'v'%5D:%0A add_term (terms, alias)%0A elif %22functionTerms%22 in aspect:%0A for functionTerm in aspect%5B'functionTerms'%5D:%0A terms_from_function_term(functionTerm,terms)%0A%0A%0A genes =set()%0A for term in terms :%0A gene =query_to_gene_all(term)%0A if gene :%0A genes.add(gene)%0A%0A for gene in genes:%0A sys.stdout.write(gene+ %22%5Cn%22)%0A%0A%0A sys.stdout.flush()%0A%0A%0A%0Aif __name__ == '__main__':%0A main()
|
|
9008d6e3d14a5a582f0ddbd6b4a113386b639f26
|
Add Pyramid parser module
|
webargs/pyramidparser.py
|
webargs/pyramidparser.py
|
Python
| 0.000002
|
@@ -0,0 +1,3308 @@
+# -*- coding: utf-8 -*-%0A%22%22%22Pyramid request argument parsing.%0A%0AExample usage: ::%0A%0A from wsgiref.simple_server import make_server%0A from pyramid.config import Configurator%0A from pyramid.response import Response%0A from webargs import Arg%0A from webargs.pyramidparser import use_args%0A%0A hello_args = %7B%0A 'name': Arg(str, default='World')%0A %7D%0A%0A @use_args(hello_args)%0A def hello_world(request, args):%0A return Response('Hello ' + args%5B'name'%5D)%0A%0A if __name__ == '__main__':%0A config = Configurator()%0A config.add_route('hello', '/')%0A config.add_view(hello_world, route_name='hello')%0A app = config.make_wsgi_app()%0A server = make_server('0.0.0.0', 6543, app)%0A server.serve_forever()%0A%22%22%22%0Aimport functools%0Aimport logging%0A%0Afrom webargs import core%0A%0Alogger = logging.getLogger(__name__)%0A%0Aclass PyramidParser(core.Parser):%0A %22%22%22Pyramid request argument parser.%22%22%22%0A%0A def parse_querystring(self, req, name, arg):%0A %22%22%22Pull a querystring value from the request.%22%22%22%0A return core.get_value(req.GET, name, arg.multiple)%0A%0A def parse_form(self, req, name, arg):%0A %22%22%22Pull a form value from the request.%22%22%22%0A return core.get_value(req.POST, name, arg.multiple)%0A%0A def parse_json(self, req, name, arg):%0A %22%22%22Pull a json value from the request.%22%22%22%0A try:%0A json_data = req.json_body%0A except ValueError:%0A return core.Missing%0A%0A return core.get_value(json_data, name, arg.multiple)%0A%0A def parse_cookies(self, req, name, arg):%0A %22%22%22Pull the value from the cookiejar.%22%22%22%0A return core.get_value(req.cookies, name, arg.multiple)%0A%0A def parse_headers(self, req, name, arg):%0A %22%22%22Pull a value from the header data.%22%22%22%0A return core.get_value(req.headers, name, arg.multiple)%0A%0A def parse_files(self, req, name, arg):%0A raise NotImplementedError('Files parsing not supported by %7B0%7D'%0A .format(self.__class__.__name__))%0A%0A def use_args(self, argmap, req=None, targets=core.Parser.DEFAULT_TARGETS,%0A validate=None):%0A %22%22%22Decorator that injects parsed arguments into a view function or method.%0A%0A :param dict argmap: Dictionary of argument_name:Arg object pairs.%0A :param req: The request object to parse%0A :param tuple targets: Where on the request to search for values.%0A :param callable validate:%0A Validation function that receives the dictionary of parsed arguments.%0A If the function returns %60%60False%60%60, the parser will raise a%0A :exc:%60ValidationError%60.%0A %22%22%22%0A def decorator(func):%0A @functools.wraps(func)%0A def wrapper(obj, *args, **kwargs):%0A # The first argument is either %60self%60 or %60request%60%0A try: # get self.request%0A request = obj.request%0A except AttributeError: # first arg is request%0A request = obj%0A parsed_args = self.parse(argmap, req=request, targets=targets,%0A validate=None)%0A return func(obj, parsed_args, *args, **kwargs)%0A return wrapper%0A return decorator%0A%0Aparser = PyramidParser()%0Ause_args = parser.use_args%0Ause_kwargs = parser.use_kwargs%0A
|
|
5149d86c7e787eff46f21669d448158ba0905a41
|
Add dbck.py: a database check tool
|
dbck.py
|
dbck.py
|
Python
| 0.000004
|
@@ -0,0 +1,1303 @@
+#!/usr/bin/python%0A#%0A# dbck.py%0A#%0A# Distributed under the MIT/X11 software license, see the accompanying%0A# file COPYING or http://www.opensource.org/licenses/mit-license.php.%0A#%0A%0A%0Aimport sys%0Aimport Log%0Aimport MemPool%0Aimport ChainDb%0Aimport cStringIO%0A%0Afrom bitcoin.coredefs import NETWORKS%0Afrom bitcoin.core import CBlock%0Afrom bitcoin.scripteval import *%0A%0ANET_SETTINGS = %7B%0A%09'mainnet' : %7B%0A%09%09'log' : '/tmp/dbck.log',%0A%09%09'db' : '/tmp/chaindb'%0A%09%7D,%0A%09'testnet3' : %7B%0A%09%09'log' : '/tmp/dbcktest.log',%0A%09%09'db' : '/tmp/chaintest'%0A%09%7D%0A%7D%0A%0AMY_NETWORK = 'mainnet'%0A%0ASETTINGS = NET_SETTINGS%5BMY_NETWORK%5D%0A%0Alog = Log.Log(SETTINGS%5B'log'%5D)%0Amempool = MemPool.MemPool(log)%0Achaindb = ChainDb.ChainDb(SETTINGS%5B'db'%5D, log, mempool, NETWORKS%5BMY_NETWORK%5D)%0A%0Ascanned = 0%0Afailures = 0%0A%0Afor height in xrange(chaindb.getheight()):%0A%09heightidx = ChainDb.HeightIdx()%0A%09heightidx.deserialize(chaindb.height%5Bstr(height)%5D)%0A%0A%09blkhash = heightidx.blocks%5B0%5D%0A%09ser_hash = ser_uint256(blkhash)%0A%0A%09f = cStringIO.StringIO(chaindb.blocks%5Bser_hash%5D)%0A%09block = CBlock()%0A%09block.deserialize(f)%0A%0A%09if not block.is_valid():%0A%09%09log.write(%22block %25064x failed%22 %25 (blkhash,))%0A%09%09failures += 1%0A%0A%09scanned += 1%0A%09if (scanned %25 1000) == 0:%0A%09%09log.write(%22Scanned height %25d (%25d failures)%22 %25 (%0A%09%09%09height, failures))%0A%0A%0Alog.write(%22Scanned %25d blocks (%25d failures)%22 %25 (scanned, failures))%0A%0A
|
|
48bc3bfa4ab6648d3599af15cfe7a2dd69abdb40
|
make gsid ctable schedule run hourly
|
custom/apps/gsid/ctable_mappings.py
|
custom/apps/gsid/ctable_mappings.py
|
from ctable.fixtures import CtableMappingFixture
from ctable.models import ColumnDef, KeyMatcher
class PatientSummaryMapping(CtableMappingFixture):
name = 'patient_summary'
domains = ['gsid']
couch_view = 'gsid/patient_summary'
schedule_active = True
@property
def columns(self):
columns = [
ColumnDef(name="domain", data_type="string", value_source="key", value_index=0),
ColumnDef(name="disease_name", data_type="string", value_source="key", value_index=1),
ColumnDef(name="test_version", data_type="string", value_source="key", value_index=2),
ColumnDef(name="country", data_type="string", value_source="key", value_index=3),
ColumnDef(name="province", data_type="string", value_source="key", value_index=4),
ColumnDef(name="district", data_type="string", value_source="key", value_index=5),
ColumnDef(name="clinic", data_type="string", value_source="key", value_index=6),
ColumnDef(name="gender", data_type="string", value_source="key", value_index=7),
ColumnDef(name="date", data_type="date", value_source="key", value_index=8, date_format="%Y-%m-%d"),
ColumnDef(name="diagnosis", data_type="string", value_source="key", value_index=9),
ColumnDef(name="lot_number", data_type="string", value_source="key", value_index=10),
ColumnDef(name="gps", data_type="string", value_source="key", value_index=11),
ColumnDef(name="gps_country", data_type="string", value_source="key", value_index=12),
ColumnDef(name="gps_province", data_type="string", value_source="key", value_index=13),
ColumnDef(name="gps_district", data_type="string", value_source="key", value_index=14),
ColumnDef(name="age", data_type="integer", value_source="key", value_index=15),
ColumnDef(name="cases", data_type="integer", value_source="value", value_attribute="sum"),
]
return columns
|
Python
| 0.000001
|
@@ -2007,8 +2007,153 @@
columns%0A
+%0A def customize(self, mapping):%0A mapping.schedule_type = 'hourly'%0A mapping.schedule_hour = -1%0A mapping.schedule_day = -1%0A
|
94ebfd057eb5a7c7190d981b26c027573578606d
|
validate using validator module
|
modularodm/fields/StringField.py
|
modularodm/fields/StringField.py
|
from ..fields import Field
import weakref
class StringField(Field):
default = ''
def __init__(self, *args, **kwargs):
super(StringField, self).__init__(*args, **kwargs)
def validate(self, value):
if isinstance(value, unicode):
return True
else:
try:
value.decode('utf-8')
return True
except:
return False
|
Python
| 0.000001
|
@@ -24,23 +24,48 @@
eld%0A
-%0Aimport weakref
+from ..validators import StringValidator
%0A%0Acl
@@ -107,16 +107,50 @@
t = ''%0A%0A
+ validate = StringValidator()%0A%0A
def
@@ -182,16 +182,16 @@
wargs):%0A
+
@@ -244,246 +244,4 @@
rgs)
-%0A%0A def validate(self, value):%0A if isinstance(value, unicode):%0A return True%0A else:%0A try:%0A value.decode('utf-8')%0A return True%0A except:%0A return False%0A
|
5748666a1f2c6cd307be79c33117252e10d6df01
|
Add matchup script
|
mzalendo/kenya/management/commands/kenya_matchup_coords_to_place.py
|
mzalendo/kenya/management/commands/kenya_matchup_coords_to_place.py
|
Python
| 0.000001
|
@@ -0,0 +1,1511 @@
+import re %0Aimport csv%0Aimport sys%0A%0Afrom optparse import make_option%0A%0Afrom django.core.management.base import LabelCommand%0Afrom django.contrib.gis.geos import Point%0A%0Afrom mapit.models import Area, Generation, Type, NameType, Country%0A%0Aclass Command(LabelCommand):%0A %22%22%22Read a file in, extract coordinates and lookup the constituency.%0A %0A Outputs the coordinates, constituency slug and name as CSV to STDOUT%0A %0A input is one pair of coords per line, eg:%0A %0A (1.23, 4.56)%0A %0A %22%22%22%0A%0A help = 'Import KML data'%0A args = '%3CKML files%3E'%0A%0A writer = csv.writer(sys.stdout)%0A%0A def handle_label(self, input_coords, **options):%0A%0A with open(input_coords) as input_file:%0A for line in input_file.readlines():%0A self.process_line( line.strip() )%0A %0A %0A%0A%0A def process_line(self, raw_line):%0A %22%22%22Extract coords from line and output constituency found%22%22%22%0A %0A # print raw_line%0A line = re.sub(r'%5B%5E%5Cd.,%5C-%5D+', '', raw_line)%0A%0A lng, lat = map( lambda x: float(x), re.split(',', line) )%0A point = Point( lat, lng, srid=4326)%0A%0A # print point%0A %0A areas = Area.objects.by_location( point )%0A %0A output = %5B raw_line %5D%0A%0A if areas:%0A places = areas%5B0%5D.place_set.all()%0A if places:%0A place = places%5B0%5D %0A output.append( place.name )%0A output.append( place.slug )%0A%0A self.writer.writerow( output )%0A
|
|
9386236d41298ed8888a6774f40a15d44b7e53fe
|
Create command for Data Log Report fixtures
|
data_log/management/commands/generate_report_fixture.py
|
data_log/management/commands/generate_report_fixture.py
|
Python
| 0
|
@@ -0,0 +1,1070 @@
+from django.core.management.base import BaseCommand%0Afrom django.core import serializers%0A%0Afrom data_log import models%0Aimport json%0A%0A%0Aclass Command(BaseCommand):%0A help = 'Create Data Log Report fixtures'%0A%0A def handle(self, *args, **kwargs):%0A self.stdout.write('Creating fixtures for Data Log Reports...')%0A JSONSerializer = serializers.get_serializer(%22json%22)%0A j = JSONSerializer()%0A data = %5B%5D%0A models_to_serialize = %5B%0A models.LevelReport, %0A models.SummonReport, %0A models.MagicShopRefreshReport, %0A models.MagicBoxCraftingReport, %0A models.WishReport, %0A models.RuneCraftingReport%0A %5D%0A%0A for model in models_to_serialize:%0A data += json.loads(j.serialize(model.objects.order_by('-generated_on')%5B:100%5D))%0A %0A data += json.loads(j.serialize(models.Report.objects.order_by('-generated_on')%5B:1000%5D))%0A%0A with open(%22fixture_reports.json%22, %22w+%22) as f:%0A json.dump(data, f)%0A%0A self.stdout.write(self.style.SUCCESS('Done!'))%0A
|
|
ebf790c6c94131b79cb5da4de6cb665f97e54799
|
Add viewset permission class for checking image permissions
|
app/grandchallenge/cases/permissions.py
|
app/grandchallenge/cases/permissions.py
|
Python
| 0
|
@@ -0,0 +1,412 @@
+from rest_framework import permissions%0A%0Afrom grandchallenge.serving.permissions import user_can_download_image%0A%0A%0Aclass ImagePermission(permissions.BasePermission):%0A %22%22%22%0A Permission class for APIViews in retina app.%0A Checks if user is in retina graders or admins group%0A %22%22%22%0A%0A def has_object_permission(self, request, view, obj):%0A return user_can_download_image(user=request.user, image=obj)%0A
|
|
496754c54005cf7e1b49ada8e612207f5e2846ff
|
Add dead letter SQS queue example
|
python/example_code/sqs/dead_letter_queue.py
|
python/example_code/sqs/dead_letter_queue.py
|
Python
| 0.000002
|
@@ -0,0 +1,1006 @@
+# Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22). You%0A# may not use this file except in compliance with the License. A copy of%0A# the License is located at%0A#%0A# http://aws.amazon.com/apache2.0/%0A#%0A# or in the %22license%22 file accompanying this file. This file is%0A# distributed on an %22AS IS%22 BASIS, WITHOUT WARRANTIES OR CONDITIONS OF%0A# ANY KIND, either express or implied. See the License for the specific%0A# language governing permissions and limitations under the License.%0A%0Aimport json%0A%0Aimport boto3%0A%0A%0A# Create SQS client%0Asqs = boto3.client('sqs')%0A%0Aqueue_url = 'SOURCE_QUEUE_URL'%0Adead_letter_queue_arn = 'DEAD_LETTER_QUEUE_ARN'%0A%0Aredrive_policy = %7B%0A 'deadLetterTargetArn': dead_letter_queue_arn,%0A 'maxReceiveCount': '10'%0A%7D%0A%0A%0A# Configure queue to send messages to dead letter queue%0Asqs.set_queue_attributes(%0A QueueUrl=queue_url,%0A Attributes=%7B%0A 'RedrivePolicy': json.dumps(redrive_policy)%0A %7D%0A)%0A
|
|
085a9aa05dfda6348d0e7e2aa6ac7f0c6ce6d63b
|
add some basic client-server tests
|
client_server_tests.py
|
client_server_tests.py
|
Python
| 0.000001
|
@@ -0,0 +1,446 @@
+import pyopentxs%0A%0A# this is defined by the sample data%0ASERVER_ID = %22r1fUoHwJOWCuK3WBAAySjmKYqsG6G2TYIxdqY6YNuuG%22%0A%0A%0Adef test_check_server_id():%0A nym_id = pyopentxs.create_pseudonym()%0A assert pyopentxs.check_server_id(SERVER_ID, nym_id)%0A%0Adef test_register_nym():%0A nym_id = pyopentxs.create_pseudonym()%0A pyopentxs.register_nym(SERVER_ID, nym_id)%0A # returns server %22contract%22%0A # TODO: maybe perform checks on the returned contract%0A%0A
|
|
7fc64847ed45229220e9bdfe20c25f3c83f10a80
|
Add isup.py
|
isup.py
|
isup.py
|
Python
| 0.000001
|
@@ -0,0 +1,567 @@
+#!/usr/bin/env python%0Aimport re%0Aimport sys%0Afrom urllib.request import urlopen%0A%0Adef isup(domain):%0A request = urlopen(%22http://www.isup.me/%22 + domain).read()%0A if type(request) != type(''):%0A request = request.decode('utf-8')%0A return domain + %22 %22 + (%22UP%22 if %22It's just you%22 in request else %22DOWN%22)%0A%0Adef main(cmd, args):%0A if len(args):%0A for d in args:%0A print(isup(d))%0A else:%0A print(%22usage: %22 + cmd + %22 domain1 %5Bdomain2 .. domainN%5D%22)%0A%0Aif __name__ == '__main__':%0A main(sys.argv%5B0%5D, sys.argv%5B1:%5D if len(sys.argv) %3E 1 else %5B%5D)%0A
|
|
9c7935ebbd4d995c44526c91fdb3b647a15eb877
|
Create API tasks.py with update_char_data
|
evewspace/API/tasks.py
|
evewspace/API/tasks.py
|
Python
| 0.000003
|
@@ -0,0 +1,1239 @@
+# Eve W-Space%0A# Copyright 2014 Andrew Austin and contributors%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Afrom celery import task%0Afrom API.models import APIKey, MemberAPIKey%0Afrom django.core.cache import cache%0Afrom django.contrib.auth import get_user_model%0Aimport sys%0Areload(sys)%0Asys.setdefaultencoding(%22utf-8%22)%0A%0AUser = get_user_model()%0A%0A@task()%0Adef update_char_data():%0A #Get all users%0A user_list = User.objects.all()%0A for user in user_list:%0A #Get all API keys of a user %0A for key in user.api_keys.all():%0A #Grab key and validate%0A current_key = MemberAPIKey(user=user,%0A keyid=key.keyid,%0A vcode=key.vcode)%0A current_key.validate()%0A
|
|
bf0445cff09c62e2ad76b8ca922509ed3108e520
|
Improve the test that checks if the correct templates are being used
|
okupy/tests/integration/login.py
|
okupy/tests/integration/login.py
|
# vim:fileencoding=utf8:et:ts=4:sts=4:sw=4:ft=python
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.db import DatabaseError
from django.test.client import Client
from mockldap import MockLdap
from ...common.test_helpers import OkupyTestCase
import mock
class LoginTestsEmptyDB(OkupyTestCase):
cursor_wrapper = mock.Mock()
cursor_wrapper.side_effect = DatabaseError
account = {'username': 'alice', 'password': 'ldaptest'}
@classmethod
def setUpClass(cls):
cls.mockldap = MockLdap(settings.DIRECTORY)
def setUp(self):
self.client = Client()
self.mockldap.start()
self.ldapobject = self.mockldap[settings.AUTH_LDAP_SERVER_URI]
def tearDown(self):
self.mockldap.stop()
def test_template(self):
response = self.client.get('/login/')
self.assertIn('login_form', response.context)
self.assertIn('messages', response.context)
def test_empty_user(self):
response = self.client.post('/login/')
self.assertFormError(response, 'login_form', 'username', [u'This field is required.'])
self.assertFormError(response, 'login_form', 'password', [u'This field is required.'])
self.assertMessage(response, 'Login failed', 40)
self.assertEqual(User.objects.count(), 0)
def test_incorrect_user(self):
wrong_account = {'username': 'username', 'password': 'password'}
response = self.client.post('/login/', wrong_account)
self.assertMessage(response, 'Login failed', 40)
self.assertEqual(User.objects.count(), 0)
def test_correct_user(self):
account = self.account.copy()
account['next'] = ''
response = self.client.post('/login/', account)
self.assertRedirects(response, '/')
user = User.objects.get(pk=1)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(user.username, 'alice')
self.assert_(not user.has_usable_password())
self.assertEqual(user.first_name, '')
self.assertEqual(user.last_name, '')
self.assertEqual(user.email, '')
def test_no_ldap(self):
self.mockldap.stop()
response = self.client.post('/login/', self.account)
self.assertMessage(response, 'Login failed', 40)
self.assertEqual(User.objects.count(), 0)
self.mockldap.start()
@mock.patch("django.db.backends.util.CursorWrapper", cursor_wrapper)
def test_no_database(self):
response = self.client.post('/login/', self.account)
self.assertMessage(response, "Can't contact the LDAP server or the database", 40)
self.assertEqual(len(mail.outbox), 1)
self.assertTrue(mail.outbox[0].subject.startswith('%sERROR:' % settings.EMAIL_SUBJECT_PREFIX))
def test_already_authenticated_user_redirects_to_index(self):
response = self.client.post('/login/', self.account)
response = self.client.get('/login/')
self.assertRedirects(response, '/')
def test_logout_for_logged_in_user(self):
response = self.client.post('/login/', self.account)
response = self.client.get('/logout/')
self.assertRedirects(response, '/login/')
def test_logout_for_anonymous_user(self):
response = self.client.get('/logout/')
self.assertRedirects(response, '/login/')
class LoginTestsOneAccountInDB(OkupyTestCase):
fixtures = ['alice']
account1 = {'username': 'alice', 'password': 'ldaptest'}
account2 = {'username': 'bob', 'password': 'ldapmoretest'}
@classmethod
def setUpClass(cls):
cls.mockldap = MockLdap(settings.DIRECTORY)
def setUp(self):
self.client = Client()
self.mockldap.start()
self.ldapobject = self.mockldap[settings.AUTH_LDAP_SERVER_URI]
def tearDown(self):
self.mockldap.stop()
def test_dont_authenticate_from_db_when_ldap_is_down(self):
self.mockldap.stop()
response = self.client.post('/login/', self.account1)
self.assertMessage(response, 'Login failed', 40)
self.assertEqual(User.objects.count(), 1)
self.mockldap.start()
def test_authenticate_account_that_is_already_in_db(self):
response = self.client.post('/login/', self.account1)
self.assertRedirects(response, '/')
user = User.objects.get(pk=1)
self.assertEqual(User.objects.count(), 1)
self.assertEqual(user.username, 'alice')
self.assert_(not user.has_usable_password())
self.assertEqual(user.first_name, '')
self.assertEqual(user.last_name, '')
self.assertEqual(user.email, '')
def test_authenticate_new_account(self):
response = self.client.post('/login/', self.account2)
self.assertRedirects(response, '/')
self.assertEqual(User.objects.count(), 2)
user1 = User.objects.get(pk=1)
self.assertEqual(user1.username, 'alice')
self.assert_(not user1.has_usable_password())
self.assertEqual(user1.first_name, '')
self.assertEqual(user1.last_name, '')
self.assertEqual(user1.email, '')
user2 = User.objects.get(pk=2)
self.assertEqual(user2.username, 'bob')
self.assert_(not user2.has_usable_password())
self.assertEqual(user2.first_name, '')
self.assertEqual(user2.last_name, '')
self.assertEqual(user2.email, '')
|
Python
| 0.000004
|
@@ -818,16 +818,40 @@
ef test_
+login_page_uses_correct_
template
@@ -850,32 +850,32 @@
template(self):%0A
-
response
@@ -927,93 +927,98 @@
sert
-In('login_form', response.context)%0A self.assertIn('messages', response.context
+TemplateUsed(response, 'base.html')%0A self.assertTemplateUsed(response, 'login.html'
)%0A%0A
|
1d1a37ad6f0aedbf18a72b551fdee4d96c92ea11
|
Update RICA example.
|
examples/mnist-rica.py
|
examples/mnist-rica.py
|
#!/usr/bin/env python
import climate
import matplotlib.pyplot as plt
import numpy as np
import theanets
from utils import load_mnist, plot_layers, plot_images
logging = climate.get_logger('mnist-rica')
climate.enable_default_logging()
class RICA(theanets.Autoencoder):
def J(self, weight_inverse=0, **kwargs):
cost = super(RICA, self).J(**kwargs)
if weight_inverse > 0:
cost += sum((weight_inverse / (w * w).sum(axis=0)).sum() for w in self.weights)
return cost
train, valid, _ = load_mnist()
# mean-center the digits and compute a pca whitening transform.
train -= 0.5
valid -= 0.5
logging.info('computing whitening transform')
vals, vecs = np.linalg.eigh(np.dot(train.T, train) / len(train))
vals = vals[::-1]
vecs = vecs[:, ::-1]
K = 197 # this retains 99% of the variance in the digit data.
vals = np.sqrt(vals[:K])
vecs = vecs[:, :K]
def whiten(x):
return np.dot(x, np.dot(vecs, np.diag(1. / vals)))
def color(z):
return np.dot(z, np.dot(np.diag(vals), vecs.T))
# now train our model on the whitened dataset.
N = 16
e = theanets.Experiment(
RICA,
layers=(K, N * N, K),
activation='linear',
hidden_l1=0.2,
no_learn_biases=True,
tied_weights=True,
train_batches=100,
weight_inverse=0.01,
)
e.train(whiten(train), whiten(valid))
# color the network weights so they are viewable as digits.
plot_layers(
[color(e.network.weights[0].get_value().T).T],
tied_weights=True)
plt.tight_layout()
plt.show()
plot_images(valid[:N*N], 121, 'Sample data')
plot_images(
color(e.network.predict(whiten(valid[:N*N]))),
122, 'Reconstructed data')
plt.tight_layout()
plt.show()
|
Python
| 0
|
@@ -461,14 +461,38 @@
um()
+%0A
for
-w
+l
in
@@ -496,16 +496,34 @@
in self.
+layers for w in l.
weights)
@@ -1463,29 +1463,26 @@
ork.
+get_
weights
-%5B0%5D.get_value(
+('hid1'
).T)
|
57cf0e1d153c2d06e722329ac35f2093a1d1c17c
|
use .py file to make for setup.py
|
Docs/city_fynder.py
|
Docs/city_fynder.py
|
Python
| 0.000001
|
@@ -0,0 +1,576 @@
+# Which city would like to live?%0A# Created by City Fynders - University of Washington%0A%0Aimport pandas as pd%0Aimport numpy as np%0Aimport geopy as gy%0Afrom geopy.geocoders import Nominatim%0A%0Aimport data_processing as dp%0A%0A%0A# import data%0A(natural, human, economy, tertiary) = dp.read_data()%0A%0A%0A# Add ranks in the DataFrame%0A(natural, human, economy, tertiary) = dp.data_rank(natural, human, economy, tertiary)%0A%0A%0A# Get location information%0A(Lat, Lon) = dp.find_loc(human)%0A%0A%0A# Create a rank DataFrame and save as csv file%0Arank = dp.create_rank(natural, human, economy, tertiary, Lat, Lon)%0A
|
|
64b321f1815c17562e4e8c3123b5b7fbbe23ce0b
|
Add logging test
|
pubres/tests/logging_test.py
|
pubres/tests/logging_test.py
|
Python
| 0.000001
|
@@ -0,0 +1,1491 @@
+import logging%0Aimport logging.handlers%0Aimport multiprocessing%0A%0Aimport pubres%0Afrom pubres.pubres_logging import setup_logging%0A%0Afrom .base import *%0A%0A%0Aclass MultiprocessingQueueStreamHandler(logging.handlers.BufferingHandler):%0A %22%22%22A logging handler that pushes the getMessage() of every%0A LogRecord into a multiprocessing.Queue.%0A%0A Used to test log messages of a server started in its own process.%0A %22%22%22%0A def __init__(self, *args, **kwargs):%0A super(MultiprocessingQueueStreamHandler, self).__init__(*args,%0A **kwargs)%0A self.mp_logrecord_queue = multiprocessing.Queue()%0A%0A # Don't override emit(self, record);%0A # BufferingHandler will append record to self.buffer%0A%0A def emit(self, record):%0A super(MultiprocessingQueueStreamHandler, self).emit(record)%0A self.mp_logrecord_queue.put(record.getMessage())%0A%0A def getLogRecordBuffer(self):%0A ret = %5B%5D%0A while not self.mp_logrecord_queue.empty():%0A log = self.mp_logrecord_queue.get()%0A ret.append(log)%0A return ret%0A%0A%0Adef test_logging():%0A # Set up log capturing%0A handler = MultiprocessingQueueStreamHandler(10)%0A setup_logging(handler=handler)%0A%0A # Do some server actions%0A with pubres.BackgroundServer():%0A with pub('key1', 'val1'):%0A pass%0A%0A # Make sure actions appear in log%0A log_buffer = handler.getLogRecordBuffer()%0A assert %22pub %7B'key1': 'val1'%7D%22 in log_buffer%0A
|
|
b52bad82bafed23d3db5a0e73c22a056d1753174
|
add card parsers
|
pypeerassets/card_parsers.py
|
pypeerassets/card_parsers.py
|
Python
| 0
|
@@ -0,0 +1,713 @@
+'''parse cards according to deck issue mode'''%0A%0A%0Adef none_parser(cards):%0A '''parser for NONE %5B0%5D issue mode'''%0A%0A return None%0A%0A%0Adef custom_parser(cards, parser=None):%0A '''parser for CUSTOM %5B1%5D issue mode,%0A please provide your custom parser as argument'''%0A%0A if not parser:%0A return cards%0A%0A else:%0A return parser(cards)%0A%0A%0Adef once_parser(cards):%0A '''parser for ONCE %5B2%5D issue mode'''%0A%0A return %5Bnext(i for i in cards if i.type == %22CardIssue%22)%5D%0A%0A%0Adef multi_parser(cards):%0A '''parser for MULTI %5B4%5D issue mode'''%0A%0A return cards%0A%0A%0Adef mono_parser(cards):%0A '''parser for MONO %5B8%5D issue mode'''%0A%0A return %5Bnext(i for i in cards if i.type == %22CardIssue%22 and i.amount%5B0%5D == 1)%5D%0A
|
|
a5b28834bb5e52857720139a1fbe6dfd1d1ea266
|
create a new string helper that concatenates arguments
|
radosgw_agent/util/string.py
|
radosgw_agent/util/string.py
|
Python
| 0.000023
|
@@ -0,0 +1,315 @@
+%0Adef concatenate(*a, **kw):%0A %22%22%22%0A helper function to concatenate all arguments with added (optional)%0A newlines%0A %22%22%22%0A newline = kw.get('newline', False)%0A string = ''%0A for item in a:%0A if newline:%0A string += item + '%5Cn'%0A else:%0A string += item%0A return string%0A
|
|
68c66c397f11637f650131ef69f4f16ebe6f43e4
|
Create luhn.py
|
luhn.py
|
luhn.py
|
Python
| 0.000001
|
@@ -0,0 +1,1640 @@
+# Luhn algorithm check%0A# From https://en.wikipedia.org/wiki/Luhn_algorithm%0Adef luhn_checksum(card_number):%0A def digits_of(n):%0A return %5Bint(d) for d in str(n)%5D%0A digits = digits_of(card_number)%0A odd_digits = digits%5B-1::-2%5D%0A even_digits = digits%5B-2::-2%5D%0A checksum = 0%0A checksum += sum(odd_digits)%0A for d in even_digits:%0A checksum += sum(digits_of(d*2))%0A return checksum %25 10%0A%0Adef is_luhn_valid(card_number):%0A return luhn_checksum(card_number) == 0%0A%0A# There's no do-while in python, lazy workaround%0Awhile True:%0A # Get the first value%0A # Check that's a number with 16 digits%0A try:%0A firstValue=int(raw_input('First PAN in range: '))%0A if (len(str(firstValue)) != 16):%0A print %22PAN must be 16 chars long%22%0A else:%0A break%0A except ValueError:%0A print(%22Not a number%22)%0A# Same process for the second number%0A# TODO: This should be a function%0Awhile True:%0A try:%0A lastValue = int(raw_input(('Last PAN in range: ')))%0A if (len(str(lastValue)) != 16):%0A print %22PAN must be 16 chars long%22%0A else:%0A break%0A except ValueError:%0A print(%22Not a number%22)%0A%0A# Swap variables if the first value is higher than the last%0Aif (firstValue %3E lastValue):%0A firstValue,lastValue = lastValue,firstValue%0A%0Aprint %22Valid card numbers in range %7B0%7D/%7B1%7D%22.format(firstValue,lastValue)%0AtotalValid = 0%0A# Check if the values in the range are luhn compliant%0Afor ccc in range(firstValue,lastValue):%0A if is_luhn_valid(ccc):%0A print %22%5Ct%22 + str(ccc)%0A totalValid += 1%0Aprint %22Total: %7B0%7D valid cards in range%22.format(totalValid)%0A
|
|
7655170c50b3e7d3af0a34c82478696b6b8f3d39
|
Disable Session Keepalives in the Request Library
|
rightscale/httpclient.py
|
rightscale/httpclient.py
|
from functools import partial
import requests
DEFAULT_ROOT_RES_PATH = '/'
class HTTPResponse(object):
"""
Wrapper around :class:`requests.Response`.
Parses ``Content-Type`` header and makes it available as a list of fields
in the :attr:`content_type` member.
"""
def __init__(self, raw_response):
self.raw_response = raw_response
content_type = raw_response.headers.get('content-type', '')
ct_fields = [f.strip() for f in content_type.split(';')]
self.content_type = ct_fields
def __getattr__(self, name):
return getattr(self.raw_response, name)
class HTTPClient(object):
"""
Convenience wrapper around Requests.
:param str endpoint: URL for the API endpoint. E.g. ``https://blah.org``.
:param dict extra_headers: When specified, these key-value pairs are added
to the default HTTP headers passed in with each request.
"""
def __init__(
self,
endpoint='',
extra_headers=None,
):
self.endpoint = endpoint
s = requests.session()
s.headers['Accept'] = 'application/json'
if extra_headers:
s.headers.update(extra_headers)
self.s = s
# convenience methods
self.delete = partial(self.request, 'delete')
self.get = partial(self.request, 'get')
self.head = partial(self.request, 'head')
self.post = partial(self.request, 'post')
self.put = partial(self.request, 'put')
def request(self, method, path='/', url=None, ignore_codes=[], **kwargs):
"""
Performs HTTP request.
:param str method: An HTTP method (e.g. 'get', 'post', 'PUT', etc...)
:param str path: A path component of the target URL. This will be
appended to the value of ``self.endpoint``. If both :attr:`path`
and :attr:`url` are specified, the value in :attr:`url` is used and
the :attr:`path` is ignored.
:param str url: The target URL (e.g. ``http://server.tld/somepath/``).
If both :attr:`path` and :attr:`url` are specified, the value in
:attr:`url` is used and the :attr:`path` is ignored.
:param ignore_codes: List of HTTP error codes (e.g. 404, 500) that
should be ignored. If an HTTP error occurs and it is *not* in
:attr:`ignore_codes`, then an exception is raised.
:type ignore_codes: list of int
:param kwargs: Any other kwargs to pass to :meth:`requests.request()`.
Returns a :class:`requests.Response` object.
"""
_url = url if url else (self.endpoint + path)
r = self.s.request(method, _url, **kwargs)
if not r.ok and r.status_code not in ignore_codes:
r.raise_for_status()
return HTTPResponse(r)
|
Python
| 0.000001
|
@@ -41,16 +41,17 @@
uests%0A%0A%0A
+%0A
DEFAULT_
@@ -1068,16 +1068,17 @@
ndpoint%0A
+%0A
@@ -1099,16 +1099,175 @@
ession()
+%0A%0A # Disable keepalives. They're unsafe in threaded apps that potentially%0A # re-use very old connection objects from the urllib3 connection pool.
%0A
@@ -1308,16 +1308,58 @@
n/json'%0A
+ s.headers%5B'Connection'%5D = 'close'%0A
|
9e6c2d1601170657fb0516e5c2addde65761b8fe
|
support grayscale + 8-bit alpha channel
|
rinoh/backend/pdf/png.py
|
rinoh/backend/pdf/png.py
|
# This file is part of RinohType, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
import png
from .cos import Name, XObjectImage, Array, Integer, HexString
from .filter import FlateDecode, FlateDecodeParams
__all__ = ['PNGReader']
class PNGReader(XObjectImage):
COLOR_SPACE = {0: 'DeviceGray',
2: 'DeviceRGB',
3: 'Indexed'}
NUM_COLOR_COMPONENTS = {0: 1,
2: 3,
4: 1,
6: 3}
def __init__(self, file_or_filename):
print('PNGReader:', file_or_filename)
self._png = png.Reader(file_or_filename)
self._png.preamble()
assert self._png.compression == 0
assert self._png.filter == 0
assert self._png.interlace == 0
try:
(x_density, y_density), unit = self._png.resolution
assert unit == 1
self.dpi = x_density / 100 * 2.54, y_density / 100 * 2.54
except AttributeError:
self.dpi = 72, 72
colorspace = Name(self.COLOR_SPACE[self._png.color_type])
if str(colorspace) == 'Indexed':
palette = self._png.palette('force')
num_entries = len(palette)
lookup = bytearray(3 * num_entries)
for i, (r, g, b, a) in enumerate(palette):
lookup[3 * i:3 * i + 3] = r, g, b
colorspace = Array([colorspace, Name('DeviceRGB'),
Integer(num_entries - 1), HexString(lookup)])
predictor_colors = self._png.bitdepth
else:
predictor_colors = self.NUM_COLOR_COMPONENTS[self._png.color_type]
flate_params = FlateDecodeParams(predictor=10, colors=predictor_colors,
bits_per_component=self._png.bitdepth,
columns=self._png.width)
super().__init__(self._png.width, self._png.height, colorspace,
self._png.bitdepth, filter=FlateDecode(flate_params))
for idat_chunk in self._png.idat():
self._data.write(idat_chunk)
|
Python
| 0.000005
|
@@ -256,16 +256,40 @@
nses/.%0A%0A
+from io import BytesIO%0A%0A
import p
@@ -566,16 +566,52 @@
Indexed'
+,%0A 4: 'DeviceGray'
%7D%0A NU
@@ -2261,23 +2261,916 @@
ode(
-flate_params))%0A
+))%0A if self._png.color_type == 4:%0A idat = BytesIO()%0A for idat_chunk in self._png.idatdecomp():%0A idat.write(idat_chunk)%0A idat.seek(0)%0A self%5B'SMask'%5D = XObjectImage(self._png.width, self._png.height,%0A Name('DeviceGray'), 8, FlateDecode())%0A while True:%0A row = idat.read(1 + 2 * self._png.width)%0A if not row:%0A break%0A self.write(row%5B0:1%5D + row%5B1::2%5D)%0A self%5B'SMask'%5D.write(row%5B0:1%5D + row%5B2::2%5D)%0A soft_mask_filter_params = FlateDecodeParams(predictor=10, colors=1,%0A bits_per_component=8,%0A columns=self._png.width)%0A self%5B'SMask'%5D.filter.params = soft_mask_filter_params%0A else:%0A
@@ -3217,24 +3217,28 @@
+
+
self._data.w
@@ -3238,24 +3238,66 @@
_data.write(idat_chunk)%0A
+ self.filter.params = flate_params%0A
|
1718926c99692fefb90627c55589990cd0e0225b
|
Make migrations in project_template home app reversible
|
wagtail/project_template/home/migrations/0002_create_homepage.py
|
wagtail/project_template/home/migrations/0002_create_homepage.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_homepage(apps, schema_editor):
# Get models
ContentType = apps.get_model('contenttypes.ContentType')
Page = apps.get_model('wagtailcore.Page')
Site = apps.get_model('wagtailcore.Site')
HomePage = apps.get_model('home.HomePage')
# Delete the default homepage
Page.objects.get(id=2).delete()
# Create content type for homepage model
homepage_content_type, created = ContentType.objects.get_or_create(
model='homepage', app_label='home')
# Create a new homepage
homepage = HomePage.objects.create(
title="Homepage",
slug='home',
content_type=homepage_content_type,
path='00010001',
depth=2,
numchild=0,
url_path='/home/',
)
# Create a site with the new homepage set as the root
Site.objects.create(
hostname='localhost', root_page=homepage, is_default_site=True)
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RunPython(create_homepage),
]
|
Python
| 0.000005
|
@@ -383,24 +383,99 @@
lt homepage%0A
+ # If migration is run multiple times, it may have already been deleted%0A
Page.obj
@@ -479,19 +479,22 @@
objects.
-get
+filter
(id=2).d
@@ -578,15 +578,10 @@
pe,
-created
+__
= C
@@ -1071,16 +1071,437 @@
True)%0A%0A%0A
+def remove_homepage(apps, schema_editor):%0A # Get models%0A ContentType = apps.get_model('contenttypes.ContentType')%0A HomePage = apps.get_model('home.HomePage')%0A%0A # Delete the default homepage%0A # Page and Site objects CASCADE%0A HomePage.objects.filter(slug='home', depth=2).delete()%0A%0A # Delete content type for homepage model%0A ContentType.objects.filter(model='homepage', app_label='home').delete()%0A%0A%0A
class Mi
@@ -1657,16 +1657,33 @@
homepage
+, remove_homepage
),%0A %5D
|
889b322261384c90ac165ddd1e8bf2944b3e7785
|
Add machine types people use as host for TF builds.
|
third_party/remote_config/remote_platform_configure.bzl
|
third_party/remote_config/remote_platform_configure.bzl
|
"""Repository rule to create a platform for a docker image to be used with RBE."""
def _remote_platform_configure_impl(repository_ctx):
platform = repository_ctx.attr.platform
if platform == "local":
os = repository_ctx.os.name.lower()
if os.startswith("windows"):
platform = "windows"
elif os.startswith("mac os"):
platform = "osx"
else:
platform = "linux"
cpu = "x86_64"
if "MACHTYPE" in repository_ctx.os.environ:
machine_type = repository_ctx.os.environ["MACHTYPE"]
if machine_type.startswith("ppc"):
cpu = "ppc"
exec_properties = repository_ctx.attr.platform_exec_properties
serialized_exec_properties = "{"
for k, v in exec_properties.items():
serialized_exec_properties += "\"%s\" : \"%s\"," % (k, v)
serialized_exec_properties += "}"
repository_ctx.template(
"BUILD",
Label("@org_tensorflow//third_party/remote_config:BUILD.tpl"),
{
"%{platform}": platform,
"%{exec_properties}": serialized_exec_properties,
"%{cpu}": cpu,
},
)
remote_platform_configure = repository_rule(
implementation = _remote_platform_configure_impl,
attrs = {
"platform_exec_properties": attr.string_dict(mandatory = True),
"platform": attr.string(default = "linux", values = ["linux", "windows", "local"]),
},
)
|
Python
| 0.000004
|
@@ -568,16 +568,17 @@
if
+(
machine_
@@ -603,32 +603,156 @@
pc%22)
-:%0A cpu = %22ppc
+ or%0A machine_type.startswith(%22powerpc%22)):%0A cpu = %22ppc%22%0A elif machine_type.startswith(%22s390x%22):%0A cpu = %22s390x
%22%0A%0A
|
2dea3ee1e50d5365ca190ee894536faea3148c7d
|
Add ChromiumTestShell activity and socket to constants.
|
build/android/pylib/constants.py
|
build/android/pylib/constants.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines a set of constants shared by test runners and other scripts."""
import os
CHROME_PACKAGE = 'com.google.android.apps.chrome'
CHROME_ACTIVITY = 'com.google.android.apps.chrome.Main'
CHROME_TESTS_PACKAGE = 'com.google.android.apps.chrome.tests'
LEGACY_BROWSER_PACKAGE = 'com.google.android.browser'
LEGACY_BROWSER_ACTIVITY = 'com.android.browser.BrowserActivity'
CONTENT_SHELL_PACKAGE = "org.chromium.content_shell_apk"
CONTENT_SHELL_ACTIVITY = "org.chromium.content_shell_apk.ContentShellActivity"
CHROME_SHELL_PACKAGE = 'org.chromium.chrome.browser.test'
CHROMIUM_TEST_SHELL_PACKAGE = 'org.chromium.chrome.testshell'
CHROME_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..'))
# Ports arrangement for various test servers used in Chrome for Android.
# Lighttpd server will attempt to use 9000 as default port, if unavailable it
# will find a free port from 8001 - 8999.
LIGHTTPD_DEFAULT_PORT = 9000
LIGHTTPD_RANDOM_PORT_FIRST = 8001
LIGHTTPD_RANDOM_PORT_LAST = 8999
TEST_SYNC_SERVER_PORT = 9031
# The net test server is started from 10000. Reserve 20000 ports for the all
# test-server based tests should be enough for allocating different port for
# individual test-server based test.
TEST_SERVER_PORT_FIRST = 10000
TEST_SERVER_PORT_LAST = 30000
# A file to record next valid port of test server.
TEST_SERVER_PORT_FILE = '/tmp/test_server_port'
TEST_SERVER_PORT_LOCKFILE = '/tmp/test_server_port.lock'
TEST_EXECUTABLE_DIR = '/data/local/tmp'
# Directories for common java libraries for SDK build.
# These constants are defined in build/android/ant/common.xml
SDK_BUILD_TEST_JAVALIB_DIR = 'test.lib.java'
SDK_BUILD_APKS_DIR = 'apks'
# The directory on the device where perf test output gets saved to.
DEVICE_PERF_OUTPUT_DIR = '/data/data/' + CHROME_PACKAGE + '/files'
SCREENSHOTS_DIR = os.path.join(CHROME_DIR, 'out_screenshots')
ANDROID_SDK_VERSION = 17
ANDROID_SDK_ROOT = os.path.join(CHROME_DIR, 'third_party/android_tools/sdk')
ANDROID_NDK_ROOT = os.path.join(CHROME_DIR, 'third_party/android_tools/ndk')
UPSTREAM_FLAKINESS_SERVER = 'test-results.appspot.com'
|
Python
| 0.000002
|
@@ -354,16 +354,67 @@
e.Main'%0A
+CHROME_DEVTOOLS_SOCKET = 'chrome_devtools_remote'%0A%0A
CHROME_T
@@ -467,16 +467,17 @@
.tests'%0A
+%0A
LEGACY_B
@@ -586,16 +586,17 @@
tivity'%0A
+%0A
CONTENT_
@@ -611,17 +611,17 @@
CKAGE =
-%22
+'
org.chro
@@ -642,17 +642,17 @@
hell_apk
-%22
+'
%0ACONTENT
@@ -669,17 +669,17 @@
IVITY =
-%22
+'
org.chro
@@ -721,17 +721,18 @@
Activity
-%22
+'%0A
%0ACHROME_
@@ -782,16 +782,17 @@
r.test'%0A
+%0A
CHROMIUM
@@ -844,16 +844,187 @@
stshell'
+%0ACHROMIUM_TEST_SHELL_ACTIVITY = (%0A 'org.chromium.chrome.testshell.ChromiumTestShellActivity')%0ACHROMIUM_TEST_SHELL_DEVTOOLS_SOCKET = 'chromium_testshell_devtools_remote'
%0A%0ACHROME
|
4ab784d9526b2a4555e288038df0490269b17683
|
完成1题
|
已完成/ToLeetSpeak.py
|
已完成/ToLeetSpeak.py
|
Python
| 0.000004
|
@@ -0,0 +1,1693 @@
+#!/usr/bin/python%0A# -*- coding: UTF-8 -*-%0A%0A'''%0A# ToLeetSpeak%E9%A2%98%E7%9B%AE%E5%9C%B0%E5%9D%80%EF%BC%9Ahttps://www.codewars.com/kata/57c1ab3949324c321600013f/train/python%0A'''%0A%0Aimport unittest%0A%0Aclass TestCases(unittest.TestCase):%0A def setUp(self):%0A pass%0A def test1(self):self.assertEqual(to_leet_speak(%22LEET%22), %221337%22)%0A def test2(self):self.assertEqual(to_leet_speak(%22CODEWARS%22), %22(0D3W@R$%22)%0A def test3(self):self.assertEqual(to_leet_speak(%22HELLO WORLD%22), %22#3110 W0R1D%22)%0A def test4(self):self.assertEqual(to_leet_speak(%22LOREM IPSUM DOLOR SIT AMET%22), %2210R3M !P$UM D010R $!7 @M37%22)%0A def test5(self):self.assertEqual(to_leet_speak(%22THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG%22), %227#3 QU!(K 8R0WN F0X JUMP$ 0V3R 7#3 1@2Y D06%22)%0A%0A%0Adef to_leet_speak(str):%0A alphabet = %7B%22A%22 : '@', %22B%22: '8', %22C%22 : '(', %22D%22 : 'D', %22E%22 : '3', %22F%22 : 'F', %22G%22 : '6', %22H%22 : '#', %22I%22 : '!', %22J%22 : 'J', %22K%22 : 'K', %22L%22 : '1', %22M%22 : 'M', %22N%22 : 'N', %22O%22 : '0', %22P%22 : 'P', %22Q%22 : 'Q', %22R%22 : 'R', %22S%22 : '$', %22T%22 : '7', %22U%22 : 'U', %22V%22 : 'V', %22W%22 : 'W', %22X%22 : 'X', %22Y%22 : 'Y', %22Z%22 : '2'%7D%0A res = %5B%5D%0A for i in range(len(str)):%0A if str%5Bi%5D in alphabet:%0A res.append(alphabet%5Bstr%5Bi%5D%5D)%0A else:%0A res.append(str%5Bi%5D)%0A return %22%22.join(res)%0A %0Aif __name__ == '__main__':%0A unittest.main()%0A%0A%0A %0A%0A%0A%0A %0A'''%0A%E5%8F%82%E8%80%83%E8%A7%A3%E6%B3%95%EF%BC%9A%0Adef to_leet_speak(str):%0A leet = %7B'A' : '@','B' : '8','C' : '(','D' : 'D','E' : '3','F' : 'F','G' : '6','H' : '#','I' : '!','J' : 'J','K' : 'K','L' : '1','M' : 'M','N' : 'N','O' : '0','P' : 'P','Q' : 'Q','R' : 'R','S' : '$','T' : '7','U' : 'U','V' : 'V','W' : 'W','X' : 'X','Y' : 'Y','Z' : '2'%7D%0A for i in leet:%0A str = str.replace(i, leet%5Bi%5D)%0A return str%0A'''
|
|
988b56b4348ec8be3127cfd6576779de4367d488
|
Add pywikibot user-config file
|
.pywikibot/user-config.py
|
.pywikibot/user-config.py
|
Python
| 0
|
@@ -0,0 +1,146 @@
+family = 'wikipedia'%0Amylang = 'en'%0A%0Ausernames%5B'wikipedia'%5D%5B'en'%5D = u'ExampleBot'%0A%0Aconsole_encoding = 'utf-8'%0Atextfile_encoding = 'unicode_escape'%0A
|
|
d808d55b5ca9ae2e45418aca718ee21a9beb84f9
|
Create a custom reverse() function (not implemented yet)
|
djangorestframework/urlresolvers.py
|
djangorestframework/urlresolvers.py
|
Python
| 0
|
@@ -0,0 +1,168 @@
+from django.core.urlresolvers import reverse%0A%0Adef reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None):%0A raise NotImplementedError%0A
|
|
631faacaf077c2b4d0d446e42076fd4e4f27ed37
|
Add tests for template tags
|
djlotrek/tests/test_templatetags.py
|
djlotrek/tests/test_templatetags.py
|
Python
| 0
|
@@ -0,0 +1,1328 @@
+import os%0Aimport mock%0A%0Afrom django.test import TestCase%0A%0Afrom djlotrek.templatetags.djlotrek_tags import absolute_url%0A%0Afrom django.test import RequestFactory%0A%0A%0Aclass TemplateTagsTestCase(TestCase):%0A%0A def setUp(self):%0A pass%0A%0A def test_absolute_url(self):%0A %22%22%22Our beloved get_host_url utility%22%22%22%0A request_factory = RequestFactory()%0A request = request_factory.get('/path')%0A request.META%5B'HTTP_HOST'%5D = 'localhost'%0A%0A context = %7B%0A 'request' : request%0A %7D%0A%0A abs_url = absolute_url(context, '/ciao/')%0A self.assertEqual(abs_url, 'http://localhost/ciao/')%0A%0A abs_url = absolute_url(context, 'ciao/')%0A self.assertEqual(abs_url, 'http://localhost/ciao/')%0A%0A abs_url = absolute_url(context, 'ciao')%0A self.assertEqual(abs_url, 'http://localhost/ciao')%0A%0A abs_url = absolute_url(context, 'ciao/a/tutti')%0A self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti')%0A%0A abs_url = absolute_url(context, 'ciao/a/tutti?language=it')%0A self.assertEqual(abs_url, 'http://localhost/ciao/a/tutti?language=it')%0A%0A def test_absolute_url_without_request(self):%0A %22%22%22Our beloved get_host_url utility%22%22%22%0A context = %7B%7D%0A%0A abs_url = absolute_url(context, '/ciao/')%0A self.assertEqual(abs_url, '/ciao/')
|
|
e812029c03cb6a7a6e474546fb686342e6d2c064
|
Add test for `wsgiref.simple_server`
|
python/ql/test/library-tests/frameworks/stdlib/wsgiref_simple_server_test.py
|
python/ql/test/library-tests/frameworks/stdlib/wsgiref_simple_server_test.py
|
Python
| 0
|
@@ -0,0 +1,1943 @@
+# This test file demonstrates how to use an application with a wsgiref.simple_server%0A# see https://docs.python.org/3/library/wsgiref.html#wsgiref.simple_server.WSGIServer%0Aimport sys%0Aimport wsgiref.simple_server%0A%0Adef ignore(*arg, **kwargs): pass%0Aensure_tainted = ensure_not_tainted = ignore%0A%0AADDRESS = (%22localhost%22, 8000)%0A%0A%0A# I wanted to showcase that we handle both functions and bound-methods, so it's possible%0A# to run this test-file in 2 different ways.%0A%0Adef func(environ, start_response): # $ MISSING: requestHandler%0A ensure_tainted(%0A environ, # $ MISSING: tainted%0A environ%5B%22PATH_INFO%22%5D, # $ MISSING: tainted%0A )%0A write = start_response(%22200 OK%22, %5B(%22Content-Type%22, %22text/plain%22)%5D)%0A write(b%22hello%22) # $ MISSING: HttpResponse responseBody=b%22hello%22%0A write(data=b%22 %22) # $ MISSING: HttpResponse responseBody=b%22 %22%0A%0A # function return value should be an iterable that will also be written to to the%0A # response.%0A return %5Bb%22world%22, b%22!%22%5D # $ MISSING: HttpResponse responseBody=List%0A%0A%0Aclass MyServer(wsgiref.simple_server.WSGIServer):%0A def __init__(self):%0A super().__init__(ADDRESS, wsgiref.simple_server.WSGIRequestHandler)%0A self.set_app(self.my_method)%0A%0A def my_method(self, _env, start_response): # $ MISSING: requestHandler%0A start_response(%22200 OK%22, %5B%5D)%0A return %5Bb%22my_method%22%5D # $ MISSING: HttpResponse responseBody=List%0A%0A%0Acase = sys.argv%5B1%5D%0Aif case == %221%22:%0A server = wsgiref.simple_server.WSGIServer(ADDRESS, wsgiref.simple_server.WSGIRequestHandler)%0A server.set_app(func)%0Aelif case == %222%22:%0A server = MyServer()%0Aelif case == %223%22:%0A server = MyServer()%0A def func3(_env, start_response): # $ MISSING: requestHandler%0A start_response(%22200 OK%22, %5B%5D)%0A return %5Bb%22foo%22%5D # $ MISSING: HttpResponse responseBody=List%0A server.set_app(func3)%0Aelse:%0A sys.exit(%22wrong case%22)%0A%0A%0Aprint(f%22Running on http://%7BADDRESS%5B0%5D%7D:%7BADDRESS%5B1%5D%7D%22)%0A%0Aserver.serve_forever()%0A
|
|
f2028ab194fe7c1c1497ee9320ddddbbece6406a
|
Add eventlet backdoor to facilitate troubleshooting.
|
nova/common/eventlet_backdoor.py
|
nova/common/eventlet_backdoor.py
|
Python
| 0.000002
|
@@ -0,0 +1,1990 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A%0A# Copyright (c) 2012 Openstack, LLC.%0A# Administrator of the National Aeronautics and Space Administration.%0A# All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Aimport gc%0Aimport traceback%0A%0Aimport eventlet%0Aimport eventlet.backdoor%0Aimport greenlet%0A%0Afrom nova import flags%0Afrom nova.openstack.common import cfg%0A%0Aeventlet_backdoor_opts = %5B%0A cfg.IntOpt('backdoor_port',%0A default=None,%0A help='port for eventlet backdoor to listen')%0A %5D%0A%0AFLAGS = flags.FLAGS%0AFLAGS.register_opts(eventlet_backdoor_opts)%0A%0A%0Adef dont_use_this():%0A print %22Don't use this, just disconnect instead%22%0A%0A%0Adef find_objects(t):%0A return filter(lambda o: isinstance(o, t), gc.get_objects())%0A%0A%0Adef print_greenthreads():%0A for i, gt in enumerate(find_objects(greenlet.greenlet)):%0A print i, gt%0A traceback.print_stack(gt.gr_frame)%0A print%0A%0A%0Abackdoor_locals = %7B%0A '_': None, # So it doesn't interfere with the global%0A 'exit': dont_use_this, # So we don't exit the entire process%0A 'quit': dont_use_this, # So we don't exit the entire process%0A 'fo': find_objects,%0A 'pgt': print_greenthreads,%0A%7D%0A%0A%0Adef initialize_if_enabled():%0A if FLAGS.backdoor_port is None:%0A return%0A%0A eventlet.spawn(eventlet.backdoor.backdoor_server,%0A eventlet.listen(('localhost', FLAGS.backdoor_port)),%0A locals=backdoor_locals)%0A
|
|
d81ba7d656f11e817eb610b1c65a4880fddc9004
|
Fix getting money from arcade games.
|
saylua/modules/arcade/api.py
|
saylua/modules/arcade/api.py
|
from saylua.wrappers import api_login_required
from flask import g, request
from models.db import Game, GameLog
from saylua.utils import int_or_none
import json
# Send a score to the API.
@api_login_required()
def api_send_score(game_id):
try:
gameName = Game(game_id)
except IndexError:
return json.dumps(dict(error='Invalid game!')), 400
finally:
if gameName == "blocks":
# TODO sanity check the game log and other variables sent to catch
# low hanging fruit attempts at cheating.
data = request.get_json()
score = int_or_none(data.get('score')) or 0
GameLog.record_score(g.user.id, game_id, score)
g.user.cloud_coins += score
return json.dumps(dict(cloud_coins=g.user.cloud_coins, star_shards=g.user.star_shards))
return json.dumps(dict(error='Bad request.')), 400
|
Python
| 0
|
@@ -1,20 +1,43 @@
+from saylua import db%0A%0A
from saylua.wrappers
@@ -757,16 +757,48 @@
= score%0A
+ db.session.commit()%0A
|
43d7160272511107528a33d7dff932ed274d9b58
|
add sitemaps
|
fluent_faq/sitemaps.py
|
fluent_faq/sitemaps.py
|
Python
| 0.000001
|
@@ -0,0 +1,1078 @@
+from django.contrib.sitemaps import Sitemap%0Afrom fluent_faq.models import FaqCategory, FaqQuestion%0Afrom fluent_faq.urlresolvers import faq_reverse%0A%0A%0Aclass FaqQuestionSitemap(Sitemap):%0A %22%22%22%0A Sitemap for FAQ questions%0A %22%22%22%0A def items(self):%0A return FaqQuestion.objects.published()%0A%0A def lastmod(self, category):%0A %22%22%22Return the last modification of the object.%22%22%22%0A return category.modification_date%0A%0A def location(self, category):%0A %22%22%22Return url of an question.%22%22%22%0A return faq_reverse('faqcategory_detail', kwargs=%7B'slug': category.slug%7D, ignore_multiple=True)%0A%0A%0A%0Aclass FaqCategorySitemap(Sitemap):%0A %22%22%22%0A Sitemap for FAQ categories.%0A %22%22%22%0A def items(self):%0A return FaqCategory.objects.published()%0A%0A def lastmod(self, category):%0A %22%22%22Return the last modification of the object.%22%22%22%0A return category.modification_date%0A%0A def location(self, category):%0A %22%22%22Return url of an category.%22%22%22%0A return faq_reverse('faqcategory_detail', kwargs=%7B'slug': category.slug%7D, ignore_multiple=True)%0A
|
|
23f4e54ea84a23af55e29ead27a38af12672aa43
|
Create multi_currency_prices.py
|
examples/multi_currency_prices.py
|
examples/multi_currency_prices.py
|
Python
| 0
|
@@ -0,0 +1,1227 @@
+%0A from pyoanda import Client, PRACTICE%0A%0A client = Client(environment=PRACTICE,account_id=%22Your Oanda account ID%22,access_token=%22Your Oanda access token%22)%0A%0A # Get prices for a list of instruments%0A%0A pair_list = %5B'AUD_JPY','EUR_JPY','GBP_JPY','AUD_USD'%5D%0A%0A dataset = client.get_prices(instruments=','.join(pair_list),stream=False)%0A%0A #json response::%0A %7Bu'prices': %5B%7Bu'ask': 81.551,%0A u'bid': 81.53,%0A u'instrument': u'AUD_JPY',%0A u'time': u'2016-01-26T07:39:56.525788Z'%7D,%0A %7Bu'ask': 127.975,%0A u'bid': 127.957,%0A u'instrument': u'EUR_JPY',%0A u'time': u'2016-01-26T07:39:55.712253Z'%7D,%0A %7Bu'ask': 167.269,%0A u'bid': 167.239,%0A u'instrument': u'GBP_JPY',%0A u'time': u'2016-01-26T07:39:58.333404Z'%7D,%0A %7Bu'ask': 0.69277,%0A u'bid': 0.6926,%0A u'instrument': u'AUD_USD',%0A u'time': u'2016-01-26T07:39:50.358020Z'%7D%5D%7D%0A %0A %0A # simplistic way of extracting data from the json response::%0A %0A aud_jpy = %5Bd for d in dataset%5B'prices'%5D if d%5B'instrument'%5D=='AUD_JPY'%5D%0A bid = %5Bd%5B'bid'%5D for d in aud_jpy%5D%5B-1%5D%0A ask = %5Bd%5B'ask'%5D for d in aud_jpy%5D%5B-1%5D%0A time = %5Bd%5B'time'%5D for d in aud_jpy%5D%5B-1%5D%0A %0A %0A%0A%0A%0A
|
|
22b04a8a6a014ee4e077f2dc03338bdc9479cc5c
|
package module for handling wavelength calib
|
comoving_rv/longslit/wavelength.py
|
comoving_rv/longslit/wavelength.py
|
Python
| 0
|
@@ -0,0 +1,2251 @@
+# Third-party%0Aimport numpy as np%0Afrom scipy.optimize import minimize, leastsq%0Afrom scipy.stats import scoreatpercentile%0A%0A# Project%0Afrom .models import voigt_polynomial%0A%0A__all__ = %5B'fit_emission_line'%5D%0A%0Adef errfunc(p, pix, flux, flux_ivar):%0A amp, x_0, std_G, fwhm_L, *bg_coef = p%0A return (voigt_polynomial(pix, amp, x_0, std_G, fwhm_L, bg_coef) - flux) * np.sqrt(flux_ivar)%0A%0Adef fit_emission_line(pix, flux, flux_ivar=None,%0A amp0=None, x0=None, std_G0=None, fwhm_L0=None, n_bg_coef=1):%0A %22%22%22%0A TODO:%0A%0A Parameters%0A ----------%0A pix : array_like%0A Must be the same shape as %60%60flux%60%60.%0A flux : array_like%0A Must be the same shape as %60%60pix_grid%60%60.%0A amp0 : numeric (optional)%0A Initial guess for line amplitude.%0A x0 : numeric (optional)%0A Initial guess for line centroid.%0A n_bg_coef : int%0A Number of terms in the background polynomial fit.%0A %22%22%22%0A%0A if x0 is None: # then estimate the initial guess for the centroid%0A x0 = pix%5Bnp.argmax(flux)%5D%0A%0A int_ctrd0 = int(round(x0-pix.min()))%0A if amp0 is None: # then estimate the initial guess for amplitude%0A amp0 = flux%5Bint_ctrd0%5D # flux at initial guess%0A%0A bg0 = np.array(%5B0.%5D * n_bg_coef)%0A bg0%5B0%5D = scoreatpercentile(flux%5Bflux%3E0%5D, 5.)%0A%0A if std_G0 is None:%0A std_G0 = 2. # MAGIC NUMBER%0A%0A if fwhm_L0 is None:%0A fwhm_L0 = 0.5 # MAGIC NUMBER%0A%0A if flux_ivar is None:%0A flux_ivar = 1.%0A%0A p0 = %5Bamp0, x0, std_G0, fwhm_L0%5D + bg0.tolist()%0A print(p0)%0A p_opt,p_cov,*_,mesg,ier = leastsq(errfunc, p0, args=(pix, flux, flux_ivar),%0A full_output=True)%0A print(p_opt)%0A%0A # res = minimize(_errfunc, x0=p0, args=(pix_grid, flux, flux_ivar))%0A # p = res.x%0A%0A fit_amp, fit_x0, fit_std_G, fit_fwhm_L, *fit_bg = p_opt%0A%0A fail_msg = %22Fitting spectral line in comp lamp spectrum failed. %7Bmsg%7D%22%0A%0A if ier %3C 1 or ier %3E 4:%0A raise RuntimeError(fail_msg.format(msg=mesg))%0A%0A if fit_x0 %3C min(pix) or fit_x0 %3E max(pix):%0A raise ValueError(fail_msg.format(msg=%22Unphysical peak centroid: %7B:.3f%7D%22.format(fit_x0)))%0A%0A return dict(amp=fit_amp, x_0=fit_x0,%0A std_G=fit_std_G, fwhm_L=fit_fwhm_L,%0A bg_coef=fit_bg)%0A
|
|
21e411171e811e1b68ad3674567ecb05f6f7a7ad
|
add migrations
|
cmsplugin_contact_plus/migrations/0004_auto_20170410_1553.py
|
cmsplugin_contact_plus/migrations/0004_auto_20170410_1553.py
|
Python
| 0.000001
|
@@ -0,0 +1,700 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('cmsplugin_contact_plus', '0003_auto_20161102_1927'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='extrafield',%0A name='placeholder',%0A field=models.CharField(max_length=250, null=True, verbose_name='Placeholder Value', blank=True),%0A ),%0A migrations.AlterField(%0A model_name='contactplus',%0A name='recipient_email',%0A field=models.EmailField(default=b'', max_length=254, verbose_name='Email of recipients'),%0A ),%0A %5D%0A
|
|
fe18d3387f7f8072b4f23990e5108f646729a860
|
Create pKaKs2.7.py
|
Modules/pKaKs2.7.py
|
Modules/pKaKs2.7.py
|
Python
| 0.000002
|
@@ -0,0 +1,828 @@
+#This short script uses the output values of KaKs.pl & SnpEff to calculate mutational load using Nei-Gojobori: pKa/Ks = %5B-3/4ln(1-4pn/3)%5D / %5B-3/4ln(1-4ps/3)%5D, where ps = syn SNPs / syn sites and pn = nonsyn SNPs / nonsyn sites%0A%0Afrom math import log #If for some reason you need to calculate the logarithm of a negative number, import cmath instead.%0Aimport ConfigParser%0A%0Aconfig = ConfigParser.RawConfigParser()%0Aconfig.read(%22config.ini%22)%0AnonSyn_site = float(config.get(%22myvars%22, %22non-synonymous_number%22))%0ASyn_site = float(config.get(%22myvars%22, %22synonymous_number%22))%0AnonSyn_SNP = float(config.get(%22myvars%22, %22non-synonymous_snp%22))%0ASyn_SNP = float(config.get(%22myvars%22, %22synonymous_snp%22))%0A%0Apn = nonSyn_SNP/nonSyn_site%0Aps = Syn_SNP/Syn_site%0A%0Aprint %22The pKs/Ks ratio for this organism is:%22, (-3/4*log(1-(4*pn)/3))/(-3/4*log(1-(4*ps)/3))%0A
|
|
61e56ad3feecef6fe422db8fb5d7b9b26dc03d6a
|
Add day 3 part 2.
|
day3-2.py
|
day3-2.py
|
Python
| 0.000227
|
@@ -0,0 +1,891 @@
+%22%22%22This module checks how many valid triangles are in the input data.%22%22%22%0A%0A%0Adef main():%0A %22%22%22Run main function.%22%22%22%0A with open('data/day3data.txt', 'r') as f:%0A input = f.readlines()%0A%0A dataList = %5Bmap(int, i.strip('%5Cn').split()) for i in input%5D%0A%0A # Transpose the data.%0A dataList = %5Blist(i) for i in zip(*dataList)%5D%0A%0A # Flatten the list.%0A triList = %5Bitem for sublist in dataList for item in sublist%5D%0A%0A triangles = 0%0A%0A for i in range(0, len(triList), 3):%0A print(%5BtriList%5Bi%5D, triList%5Bi + 1%5D, triList%5Bi + 2%5D%5D)%0A if isTriangle(%5BtriList%5Bi%5D, triList%5Bi + 1%5D, triList%5Bi + 2%5D%5D):%0A triangles += 1%0A print('There are %7B%7D valid triagles.').format(triangles)%0A%0A%0Adef isTriangle(input):%0A %22%22%22Check if list of three sides is a triangle.%22%22%22%0A if 2 * max(input) %3C sum(input):%0A return True%0A return False%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
047541a111e9da5d59b47d40a528bc990bae6927
|
add scope expression
|
compiler/eLisp/eLisp/expr/scope.py
|
compiler/eLisp/eLisp/expr/scope.py
|
Python
| 0.000002
|
@@ -0,0 +1,3023 @@
+#!/usr/bin/env python%0A# -*- encoding: utf-8 -*-%0A#%0A# Copyright (c) 2015 ASMlover. All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions%0A# are met:%0A#%0A# * Redistributions of source code must retain the above copyright%0A# notice, this list ofconditions and the following disclaimer.%0A#%0A# * Redistributions in binary form must reproduce the above copyright%0A# notice, this list of conditions and the following disclaimer in%0A# the documentation and/or other materialsprovided with the%0A# distribution.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS%0A# %22AS IS%22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT%0A# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS%0A# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE%0A# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,%0A# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,%0A# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;%0A# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER%0A# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT%0A# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN%0A# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE%0A# POSSIBILITY OF SUCH DAMAGE.%0A%0Afrom eLisp.expr.procedure import (%0A primitive_procedure_names, primitive_procedure_values)%0Afrom eLisp.expr.util import pair_to_list%0A%0Aclass Environment(object):%0A def __init__(self, parent=None, bindings=None):%0A self.parent = parent%0A self.bindings = dict() if bindings is None else bindings%0A%0A def define_variable(self, symbol, val):%0A self.bindings%5Bsymbol.name%5D = val%0A%0A def set_variable_value(self, symbol, val):%0A name = symbol.name%0A if name in self.bindings:%0A self.bindings%5Bname%5D = val%0A elif self.parent is not None:%0A self.parent.set_variable_value(symbol, val)%0A else:%0A raise Number('Unbound variable - SET! %25s' %25 name)%0A%0A def load(self, symbol):%0A name = symbol.name%0A if name in self.bindings:%0A return self.bindings%5Bname%5D%0A%0A if self.parent is not None:%0A return self.parent.load(symbol)%0A%0A return None%0A%0A def setup_environment():%0A bindings = dict(zip(%0A primitive_procedure_names(), primitive_procedure_values()))%0A return Environment(bindings=bindings)%0A%0A def define_variable(var, val, env):%0A env.define_variable(var, val)%0A%0A def lookup_variable_value(var, env):%0A val = env.load(var)%0A if val is None:%0A raise NameError('Unbound variable: %25s' %25 var)%0A return val%0A%0A def extend_environment(variables, values, env):%0A bindings = dict(zip(%0A %5Bvar.name for var in pair_to_list(variables)%5D, %0A pair_to_list(values)))%0A env = Environment(parent=env, bindings=bindings)%0A return env%0A
|
|
2d05a12a9b9534ad1925e7d543e6f66d8a79d3f8
|
Initialize P02_deleteBigFiles
|
books/AutomateTheBoringStuffWithPython/Chapter09/PracticeProjects/P02_deleteBigFiles.py
|
books/AutomateTheBoringStuffWithPython/Chapter09/PracticeProjects/P02_deleteBigFiles.py
|
Python
| 0.000014
|
@@ -0,0 +1,610 @@
+# t%E2%80%99s not uncommon for a few unneeded but humongous files or folders to take up the%0A# bulk of the space on your hard drive. If you%E2%80%99re trying to free up room on your%0A# computer, you%E2%80%99ll get the most bang for your buck by deleting the most massive of%0A# the unwanted files. But first you have to find them.%0A#%0A# Write a program that walks through a folder tree and searches for exceptionally%0A# large files or folders%E2%80%94say, ones that have a file size of more than 100MB.%0A# (Remember, to get a file%E2%80%99s size, you can use os.path.getsize() from the os module.)%0A# Print these files with their absolute path to the screen.%0A
|
|
cee924604f070bd1bbca33dda53c5783e2678c5d
|
Add tests for sagepay
|
payments/sagepay/test_sagepay.py
|
payments/sagepay/test_sagepay.py
|
Python
| 0
|
@@ -0,0 +1,2074 @@
+from __future__ import unicode_literals%0Afrom unittest import TestCase%0Afrom mock import patch, MagicMock%0A%0Afrom . import SagepayProvider%0A%0A%0AVENDOR = 'abcd1234'%0AENCRYPTION_KEY = '1234abdd1234abcd'%0A%0A%0Aclass Payment(MagicMock):%0A%0A id = 1%0A variant = 'sagepay'%0A currency = 'USD'%0A total = 100%0A status = 'waiting'%0A transaction_id = None%0A captured_amount = 0%0A billing_first_name = 'John'%0A%0A def get_process_url(self):%0A return 'http://example.com'%0A%0A def get_failure_url(self):%0A return 'http://cancel.com'%0A%0A def get_success_url(self):%0A return 'http://success.com'%0A%0A def change_status(self, status):%0A self.status = status%0A%0A%0A%0Aclass TestSagepayProvider(TestCase):%0A%0A def setUp(self):%0A self.payment = Payment()%0A self.provider = SagepayProvider(%0A vendor=VENDOR, encryption_key=ENCRYPTION_KEY)%0A%0A @patch('payments.sagepay.redirect')%0A def test_provider_raises_redirect_needed_on_success(self, mocked_redirect):%0A data = %7B'Status': 'OK'%7D%0A data = %22&%22.join(u%22%25s=%25s%22 %25 kv for kv in data.items())%0A with patch.object(SagepayProvider, 'aes_dec', return_value=data):%0A self.provider.process_data(self.payment, MagicMock())%0A self.assertEqual(self.payment.status, 'confirmed')%0A self.assertEqual(self.payment.captured_amount, self.payment.total)%0A%0A @patch('payments.sagepay.redirect')%0A def test_provider_raises_redirect_needed_on_failure(self, mocked_redirect):%0A data = %7B'Status': ''%7D%0A data = %22&%22.join(u%22%25s=%25s%22 %25 kv for kv in data.items())%0A with patch.object(SagepayProvider, 'aes_dec', return_value=data):%0A self.provider.process_data(self.payment, MagicMock())%0A self.assertEqual(self.payment.status, 'rejected')%0A self.assertEqual(self.payment.captured_amount, 0)%0A%0A def test_provider_encrypts_data(self):%0A data = self.provider.get_hidden_fields(self.payment)%0A decrypted_data = self.provider.aes_dec(data%5B'Crypt'%5D)%0A self.assertIn(self.payment.billing_first_name, decrypted_data)
|
|
a07e4d08b475e0d921265f9da104f109943901bc
|
Add lammps wrapper tests with cuds
|
simlammps/tests/cuds_test.py
|
simlammps/tests/cuds_test.py
|
Python
| 0
|
@@ -0,0 +1,1775 @@
+%22%22%22Tests for running lammps using CUDS and Simulation classes.%22%22%22%0Aimport unittest%0A%0Afrom simphony.core.cuba import CUBA%0Afrom simphony import CUDS, Simulation%0Afrom simphony.engine import EngineInterface%0Afrom simphony.testing.utils import create_particles_with_id%0Afrom simphony.cuds.particles import Particle, Particles%0A%0A%0Aclass LAMMPSCUDSTestCase(unittest.TestCase):%0A def setUp(self):%0A self.cuds = self.generate_cuds()%0A%0A def generate_cuds(self):%0A pset1 = create_particles_with_id(restrict=%5BCUBA.VELOCITY%5D)%0A pset2 = create_particles_with_id(restrict=%5BCUBA.VELOCITY%5D)%0A%0A ps1 = Particles('ps1')%0A ps2 = Particles('ps2')%0A%0A ps1.add_particles(pset1)%0A ps2.add_particles(pset2)%0A%0A c = CUDS()%0A c.add(ps1)%0A c.add(ps2)%0A%0A return c%0A%0A def test_create_lammps_internal_simulation(self):%0A self.assertRaisesRegexp(RuntimeError,%0A 'CUBAExtension.BOX_VECTORS',%0A Simulation,%0A self.cuds,%0A 'LAMMPS',%0A EngineInterface.Internal)%0A%0A def test_create_lammps_fileio_simulation(self):%0A sim = Simulation(self.cuds, 'LAMMPS', EngineInterface.FileIO)%0A%0A def test_create_liggghts_internal_simulation(self):%0A self.assertRaisesRegexp(RuntimeError,%0A 'DEM using the INTERNAL interface is not yet supported',%0A Simulation,%0A self.cuds,%0A 'LIGGGHTS',%0A EngineInterface.Internal)%0A%0A def test_create_liggghts_fileio_simulation(self):%0A sim = Simulation(self.cuds, 'LIGGGHTS', EngineInterface.FileIO)
|
|
257c5bffe1804d694510f5a4638de8e6ae6a1470
|
Create lstm_gan_mnist.py
|
lstm_gan_mnist.py
|
lstm_gan_mnist.py
|
Python
| 0.000004
|
@@ -0,0 +1,25 @@
+import tensorflow as tf %0A
|
|
7a813d21043c394ab10e1ddb687d7827a8b7e761
|
add slideshare plugin
|
plugins/slideshare/slideshare.py
|
plugins/slideshare/slideshare.py
|
Python
| 0
|
@@ -0,0 +1,2356 @@
+#!/usr/bin/env python%0A%0Aimport urllib2%0Aimport re%0Aimport urllib%0Aimport time%0Aimport sha%0Aimport BeautifulSoup%0Afrom BeautifulSoup import BeautifulStoneSoup %0A%0Afrom optparse import OptionParser%0A%0ATOTALIMPACT_SLIDESHARE_KEY = %22nyHCUoNM%22%0ATOTALIMPACT_SLIDESHARE_SECRET = %22z7sRiGCG%22%0AMENDELEY_DOI_URL = %22http://www.slideshare.net/api/2/get_slideshow?api_key=nyHCUoNM&detailed=1&ts=%25s&hash=%25s&slideshow_url=%25s%22%0A%0A%0Adef get_page(id):%0A if not id:%0A return(None)%0A ts = time.time()%0A hash_combo = sha.new(TOTALIMPACT_SLIDESHARE_SECRET + str(ts)).hexdigest()%0A url = MENDELEY_DOI_URL %25(ts, hash_combo, id)%0A print url%0A try:%0A page = urllib2.urlopen(url).read()%0A except urllib2.HTTPError, err:%0A if err.code == 404:%0A page = None%0A else:%0A raise %0A return(page) %0A%0Adef get_stats(page):%0A if not page:%0A return(None)%0A soup = BeautifulStoneSoup(page)%0A downloads = soup.numdownloads.text%0A views = soup.numviews.text%0A comments = soup.numcomments.text%0A favorites = soup.numfavorites.text%0A response = %7B%22downloads%22:downloads, %22views%22:views, %22comments%22:comments, %22favorites%22:favorites%7D%0A return(response) %0A %0A%0Afrom optparse import OptionParser%0A%0Adef main():%0A parser = OptionParser(usage=%22usage: %25prog %5Boptions%5D filename%22,%0A version=%22%25prog 1.0%22)%0A #parser.add_option(%22-x%22, %22--xhtml%22,%0A # action=%22store_true%22,%0A # dest=%22xhtml_flag%22,%0A # default=False,%0A # help=%22create a XHTML template instead of HTML%22)%0A (options, args) = parser.parse_args()%0A%0A if len(args) != 1:%0A parser.error(%22wrong number of arguments%22)%0A%0A print options%0A print args%0A %0A id = args%5B0%5D%0A page = get_page(id)%0A response = get_stats(page)%0A print response%0A%0A%0Aif __name__ == '__main__':%0A main()%0A%0A#example = %22http://www.slideshare.net/hpiwowar/7-data-citation-challenges-illustrated-with-data-includes-elephants%22%0A%0Amendeley_test_id = %22http://www.slideshare.net/hpiwowar/7-data-citation-challenges-illustrated-with-data-includes-elephants%22%0A#mendeley_test_doi = %2210.1371/journal.pcbi.1000361%22%0A#mendeley_test_doi = %2210.1371/journal.pmed.0040215%22%0A#mendeley_test_doi = %2210.1371/journal.pone.0000308%22%0A%0A#page = get_mendeley_page(mendeley_test_doi)%0A#response = get_stats(page)%0A#print response%0A
|
|
de2d21316ca47d1839584a7cccbe8026489ace7d
|
Change Schema.schema to a property
|
iati/core/schemas.py
|
iati/core/schemas.py
|
"""A module containing a core representation of IATI Schemas."""
from lxml import etree
import iati.core.exceptions
import iati.core.resources
import iati.core.utilities
class Schema(object):
"""Represenation of a Schema as defined within the IATI SSOT.
Attributes:
name (str): The name of the Schema.
schema (etree.XMLSchema): An actual Schema that can be used for validation.
codelists (set): The Codelists asspciated with this Schema. This is a read-only attribute.
Todo:
Create a custom dictionary type that prevents overwriting values and only allows the correct types to be added.
"""
def __init__(self, name=None):
"""Initialise a Schema.
Args:
name (str): The name of the schema being initialised.
This name refers to a file contained within the core IATI resources folder.
Raises:
iati.core.exceptions.SchemaError: An error occurred during the creation of the Schema.
Todo:
Allow for generation of schemas outside the IATI SSOT.
Better use the try-except pattern.
"""
self.name = name
self.schema = None
self.codelists = set()
if name:
path = iati.core.resources.path_schema(self.name)
try:
loaded_tree = iati.core.resources.load_as_tree(path)
except (IOError, OSError):
msg = "Failed to load tree at '{0}' when creating Schema.".format(path)
iati.core.utilities.log_error(msg)
raise iati.core.exceptions.SchemaError
else:
generated_schema = iati.core.utilities.convert_tree_to_schema(loaded_tree)
if isinstance(generated_schema, etree.XMLSchema):
self.schema = generated_schema
|
Python
| 0
|
@@ -1173,22 +1173,28 @@
self.
+_
schema
+_base
= None%0A
@@ -1823,22 +1823,28 @@
self.
+_
schema
+_base
= gener
@@ -1855,8 +1855,916 @@
_schema%0A
+%0A @property%0A def schema(self):%0A %22%22%22A Schema that can be used for validation.%0A%0A Takes the base Schema that was loaded and dynamically injects elements for content checking.%0A%0A Raises:%0A TypeError: If a value being assigned is not an XMLSchema.%0A%0A Note:%0A Setting this property will set the base schema, ontop of which content checking is added through the associated Codelists.%0A%0A Todo:%0A Implement Codelist content checking.%0A%0A Implement Ruleset content checking.%0A %22%22%22%0A return self._schema_base%0A%0A @schema.setter%0A def schema(self, value):%0A if isinstance(value, etree.XMLSchema):%0A self._schema_base = value%0A else:%0A msg = %22Schemas must be of type XMLSchemas. Actual type: %7B0%7D%22.format(type(value))%0A iati.core.utilities.log_error(msg)%0A raise TypeError(msg)%0A
|
c388e6a4143b3646df5947cb5f596ec137488513
|
Add minimal skeleton for plotting script
|
plot.py
|
plot.py
|
Python
| 0
|
@@ -0,0 +1,398 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0Aimport argparse%0Aimport matplotlib.pyplot as plt%0Aimport pandas as pd%0A%0Aparser = argparse.ArgumentParser(description='Plot data from output of the n-body simulation.')%0Aparser.add_argument('--output', type=str, default='output_int.dat',%0A help='The output file (default %25(default)s)')%0Aargs = parser.parse_args()%0A%0A%0Aif __name__ == '__main__':%0A
|
|
a7ad8f2075e7661ad9ed539083a8785f7a628b95
|
test 1
|
dashsniffer/sniff.py
|
dashsniffer/sniff.py
|
Python
| 0.000201
|
@@ -0,0 +1,82 @@
+def greet(name):%0A print 'Hello', name%0Agreet('Jack')%0Agreet('Jill')%0Agreet('Bob')%0A
|
|
faf13ff99fd47424c4fb93f1c2a6b3b80c81e0d1
|
replace bin<->text converters for ipv6
|
ryu/lib/ip.py
|
ryu/lib/ip.py
|
import struct
def ipv4_arg_to_bin(w, x, y, z):
"""Generate unsigned int from components of IP address
returns: w << 24 | x << 16 | y << 8 | z"""
return (w << 24) | (x << 16) | (y << 8) | z
def ipv4_to_bin(ip):
'''
Parse an IP address and return an unsigned int.
The IP address is in dotted decimal notation.
'''
args = [int(arg) for arg in ip.split('.')]
return ipv4_arg_to_bin(*args)
def ipv4_to_str(ip):
"""Generate IP address string from an unsigned int.
ip: unsigned int of form w << 24 | x << 16 | y << 8 | z
returns: ip address string w.x.y.z"""
w = (ip >> 24) & 0xff
x = (ip >> 16) & 0xff
y = (ip >> 8) & 0xff
z = ip & 0xff
return "%i.%i.%i.%i" % (w, x, y, z)
IPV6_PACK_STR = '!8H'
def ipv6_to_arg_list(ipv6):
'''
convert ipv6 string to a list of 8 different parts
'''
args = []
if '::' in ipv6:
h, t = ipv6.split('::')
h_list = [int(x, 16) for x in h.split(':')]
t_list = [int(x, 16) for x in t.split(':')]
args += h_list
zero = [0]
args += ((8 - len(h_list) - len(t_list)) * zero)
args += t_list
else:
args = [int(x, 16) for x in ipv6.split(':')]
return args
def ipv6_to_bin(ipv6):
'''
convert ipv6 string to binary representation
'''
args = ipv6_to_arg_list(ipv6)
return struct.pack(IPV6_PACK_STR, *args)
def ipv6_to_str(bin_addr):
'''
convert binary representation to human readable string
'''
args = struct.unpack_from(IPV6_PACK_STR, bin_addr)
return ':'.join('%x' % x for x in args)
|
Python
| 0
|
@@ -1,17 +1,32 @@
-import struct
+from ryu.lib import addrconv
%0A%0A%0Ad
@@ -771,509 +771,8 @@
z)%0A%0A
-IPV6_PACK_STR = '!8H'%0A%0A%0Adef ipv6_to_arg_list(ipv6):%0A '''%0A convert ipv6 string to a list of 8 different parts%0A '''%0A args = %5B%5D%0A if '::' in ipv6:%0A h, t = ipv6.split('::')%0A h_list = %5Bint(x, 16) for x in h.split(':')%5D%0A t_list = %5Bint(x, 16) for x in t.split(':')%5D%0A args += h_list%0A zero = %5B0%5D%0A args += ((8 - len(h_list) - len(t_list)) * zero)%0A args += t_list%0A else:%0A args = %5Bint(x, 16) for x in ipv6.split(':')%5D%0A%0A return args%0A%0A
%0Adef
@@ -868,81 +868,45 @@
-args = ipv6_to_arg_list(ipv6)%0A return struct.pack(IPV6_PACK_STR, *args
+return addrconv.ipv6.text_to_bin(ipv6
)%0A%0A%0A
@@ -1019,99 +1019,47 @@
-args = struct.unpack_from(IPV6_PACK_STR, bin_addr)%0A return ':'.join('%25x' %25 x for x in args
+return addrconv.ipv6.bin_to_text(bin_addr
)%0A
|
21ee2c1762629c826efa4306cffaf7f42b29ed29
|
modify comments
|
crosscat/utils/experiment_utils.py
|
crosscat/utils/experiment_utils.py
|
"""A framework for running engine-agnostic experiments.
The experiment engine must provide 'runner', 'reader', 'writer' functions and
optionally a function to identify 'config' files, is_config_file. The 'runner'
converts 'config's into 'result's. 'writer' and 'reader' serialize and
deserialize 'result's.
A 'result' must be a dictionary with at least one key: 'config' which includes
the actual config used to generate the 'result'.
is_config_file is for the special case of searching for results stored to disk.
In this case, 'reader' and 'writer' implicilty have a naming convention.
"""
import os
import operator
import functools
#
from crosscat.utils.file_utils import unpickle, ensure_dir
from crosscat.utils.general_utils import ensure_listlike, MapperContext, MyPool
def find_configs(dirname):
"""Searches a directory for files that contain 'config's
Utilizes provided is_config_file. Looks ONLY in the specified directory,
not recursively
Args:
dirname: (string) local filesystem directory to look in
Returns:
filepaths: (list of strings) list of filepaths that could be passed to
'open'
"""
def get_config_files((root, directories, filenames)):
join = lambda filename: os.path.join(root, filename)
filenames = map(join, filenames)
return filter(is_config_file, filenames)
def is_this_dirname(filepath):
_dir, _file = os.path.split(filepath)
return os.path.split(_dir)[0] == dirname
filepaths_list = map(get_config_files, os.walk(dirname))
filepaths = reduce(operator.add, filepaths_list)
filepaths = filter(is_this_dirname, filepaths)
return filepaths
def read_all_configs(dirname='./'):
"""Reads and extracts 'config's from all files that contain 'config's in a
directory
Args:
dirname: (string) local filesystem directory to look in
Returns:
config_list: (list of 'config's) list of all 'config's found
"""
def read_config(filepath):
result = unpickle(filepath)
config = result['config']
return config
filepaths = find_configs(dirname)
config_list = map(read_config, filepaths)
return config_list
def read_results(config_list, dirname='./'):
"""Reads and extracts 'result's from all files that contain 'result's in a
directory
Args:
dirname: (string) local filesystem directory to look in
Returns:
results: (list of 'result's) list of all 'result's found
"""
_read_result = lambda config: reader(config, dirname)
config_list = ensure_listlike(config_list)
results = map(_read_result, config_list)
return results
def write_results(results, dirname='./'):
"""Writes all 'result's into a specified directory
Args:
results: (list of 'result's) list of all 'result's to write
dirname: (string) local filesystem directory to look in
Returns:
None
"""
_write_result = lambda result: writer(result, dirname)
map(_write_result, results)
return
def do_experiment(config, runner, writer, dirname):
"""Runs and writes provided 'config' using provided runner, writer
Args:
config: ('config') 'config' to run with runner
runner: ('config' -> 'result') function that takes config and returns
result. This is where the computation occurs.
writer: ('result' -> None) function that takes single result and writes
it to local filesystem
dirname: (string) local filesystem directory to write serialize
'result's to
Returns:
None
"""
result = runner(config)
writer(result, dirname)
return
def do_experiments(config_list, runner, writer, dirname='./', mapper=map):
"""Runs and writes provided 'config's using provided runner, writer, mapper
Args:
config_list: (list of 'config's) 'config's to run with runner
runner: ('config' -> 'result') function that takes config and returns
result. This is where the computation occurs.
writer: ('result' -> None) function that takes single result and writes
it to local filesystem
dirname: (string) local filesystem directory to write serialize
'result's to
mapper: (function, args -> outputs) mapper to use. Enables use of
multiprocessing or ipython.parallel
Returns:
None
"""
ensure_dir(dirname)
config_list = ensure_listlike(config_list)
_do_experiment = functools.partial(do_experiment, runner=runner,
writer=writer, dirname=dirname)
mapper(_do_experiment, config_list)
return
if __name__ == '__main__':
# demonstrate using geweke_utils
import crosscat.utils.geweke_utils as geweke_utils
is_config_file = geweke_utils.is_summary_file
writer = geweke_utils.write_result
reader = geweke_utils.read_result
runner = geweke_utils.run_geweke
args_to_config = geweke_utils.args_to_config
args_list = [
['--num_rows', '10', '--num_cols', '2', '--num_iters', '300', ],
['--num_rows', '10', '--num_cols', '3', '--num_iters', '300', ],
['--num_rows', '20', '--num_cols', '2', '--num_iters', '300', ],
['--num_rows', '20', '--num_cols', '3', '--num_iters', '300', ],
]
dirname = 'my_expt_bank'
# demonstrate generating experiments
config_list = map(args_to_config, args_list)
with MapperContext(Pool=MyPool) as mapper:
do_experiments(config_list, runner, writer, dirname, mapper)
# demonstrate reading experiments
configs_list = read_all_configs(dirname)
has_three_cols = lambda config: config['num_cols'] == 3
configs_list = filter(has_three_cols, configs_list)
results = read_results(configs_list, dirname)
|
Python
| 0
|
@@ -3857,24 +3857,186 @@
er, mapper%0A%0A
+ Same as do_experiment but takes list of 'config's and optional mapper%0A argument. Optional mapper argument allows multiprocessing or%0A IPython.parallel%0A%0A
Args:%0A
@@ -4513,73 +4513,8 @@
use
-. Enables use of%0A multiprocessing or ipython.parallel
%0A%0A
|
f189137d52b9f44db0e82723b0e7a16a602c6523
|
Create duplicate_encoder.py
|
duplicate_encoder.py
|
duplicate_encoder.py
|
Python
| 0.001774
|
@@ -0,0 +1,258 @@
+#Kunal Gautam%0A#Codewars : @Kunalpod%0A#Problem name: Duplicate Encoder%0A#Problem level: 6 kyu%0A%0Adef duplicate_encode(word. st=%22%22):%0A for char in word.lower():%0A if word.lower().count(char)%3E1: st = st + ')'%0A else: st = st + '('%0A return st%0A
|
|
488717ab6c84c771737a3b2ccfe8cbf4d270c9b7
|
Implement dragon class
|
mugloar/dragon.py
|
mugloar/dragon.py
|
Python
| 0.000027
|
@@ -0,0 +1,1455 @@
+import json%0A%0A%0Aclass Dragon:%0A%0A # By default, stay home.%0A scaleThickness = 0%0A clawSharpness = 0%0A wingStrength = 0%0A fireBreath = 0%0A%0A def __init__(self, weather_code):%0A if weather_code == 'T E':%0A # Draught requires a 'balanced' dragon, ha ha%0A self.scaleThickness = 5%0A self.clawSharpness = 5%0A self.wingStrength = 5%0A self.fireBreath = 5%0A elif weather_code == 'FUNDEFINEDG':%0A # Fog means we're unseen, no need to fly%0A self.scaleThickness = 8%0A self.clawSharpness = 8%0A self.wingStrength = 0%0A self.fireBreath = 4%0A elif weather_code == 'NMR':%0A self.scaleThickness = 6%0A self.clawSharpness = 6%0A self.wingStrength = 4%0A self.fireBreath = 4%0A elif weather_code == 'SRO':%0A # Stay at home if there's a storm.%0A pass%0A else:%0A # Fire is useless in the rain. Additional claw-sharpening is needed to destroy the umbrellaboats%0A self.scaleThickness = 5%0A self.clawSharpness = 10%0A self.wingStrength = 5%0A self.fireBreath = 0%0A%0A def get_json(self):%0A return %7B%22dragon%22: %7B%0A %22scaleThickness%22: self.scaleThickness,%0A %22clawSharpness%22: self.clawSharpness,%0A %22wingStrength%22: self.wingStrength,%0A %22fireBreath%22: self.fireBreath%7D%7D%0A
|
|
5f092edf48828f61042c78878474b8c85b62fbdd
|
Bump version to turn on SET_MAX_FPS.
|
o3d/installer/win/o3d_version.py
|
o3d/installer/win/o3d_version.py
|
#!/usr/bin/python2.4
# Copyright 2008-9 Google Inc. All Rights Reserved.
# version = (major, minor, trunk, patch)
plugin_version = (0, 1, 43, 1)
sdk_version = plugin_version
|
Python
| 0.000001
|
@@ -136,17 +136,17 @@
1, 43,
-1
+2
)%0Asdk_ve
|
beeb3065e2d366dd68021eb5f55c94e2c61684e4
|
add experiment script
|
ftrl/single_feature_experiment.py
|
ftrl/single_feature_experiment.py
|
Python
| 0.000001
|
@@ -0,0 +1,208 @@
+import subprocess%0A%0Afor i in range(3, 23):%0A print %22%5Cn%5Cn%5Cnrun field %22 + str(i) + %22%5Cn%22%0A subprocess.call(%22python ftrl/ftrl.py train.raw.csv test.raw.csv submission.csv %7B0%7D%22.format(i).split(%22 %22), shell=True)
|
|
e5f130c1f006d2b96ca81be5a9f66c15b97b8793
|
Create sol2.py
|
project_euler/problem_12/sol2.py
|
project_euler/problem_12/sol2.py
|
Python
| 0.000044
|
@@ -0,0 +1,278 @@
+def triangle_number_generator(): %0A for n in range(1,1000000):%0A yield n*(n+1)//2%0A %0Adef count_divisors(n): %0A return sum(%5B2 for i in range(1,int(n**0.5)+1) if n%25i==0 and i*i != n%5D)%0A%0Aprint(next(i for i in triangle_number_generator() if count_divisors(i) %3E 500))%0A
|
|
5938a5d751bcac40eac2bf7f7090e1970f097ebc
|
Add py-rq (#19175)
|
var/spack/repos/builtin/packages/py-rq/package.py
|
var/spack/repos/builtin/packages/py-rq/package.py
|
Python
| 0
|
@@ -0,0 +1,1677 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyRq(PythonPackage):%0A %22%22%22RQ (Redis Queue) is a simple Python library for queueing%0A jobs and processing them in the background with workers.%22%22%22%0A%0A homepage = %22https://github.com/rq/rq%22%0A url = %22https://github.com/rq/rq/archive/v1.5.2.tar.gz%22%0A%0A version('1.5.2', sha256='e8e7b6ffc4a962837eaff8eb0137514346e629633bf08550a1649771cdc4ace6')%0A version('1.5.1', sha256='36ca5cd2762b5b15bb176943f77da933fac6c2f4e5b5d47a0475f918c167fd4c')%0A version('1.5.0', sha256='97443acd8aab1c273710f74db197838f68a0678f9cabb64c3598dfb816d35e13')%0A version('1.4.3', sha256='a971aa16d346d1c145442af3bfb171ea620f375d240fbade3c42c2246d3d698a')%0A version('1.4.2', sha256='478bd19ac4f66d3066459f5e8253cf5f477bfe128f69ed952f7565cb530ac6a4')%0A version('1.4.1', sha256='fe158e3d9d4efe533f5698738f14e975656e396cd280c6acfd45952dc5ddfc66')%0A version('1.4.0', sha256='03cd39392d31d00205bd1d84930e9b7aefc5d3ac9770c59092bdd8a94fc8a47d')%0A version('1.3.0', sha256='ce94d07125b96313e8c4512b30c62da290ae6f5eeff60b8c3e2a0a08055f5608')%0A version('1.2.2', sha256='ea71f805d4e3b972b4df5545529044df4bc0fbae30814a48bc28d8d0a39c0068')%0A version('1.2.1', sha256='0b38344cda68710e572df9c70b733e95f1cdf13ce727a970f68307cedc98376a')%0A%0A depends_on('python@3.5:3.8', type=('build', 'run'))%0A depends_on('py-setuptools', type='build')%0A depends_on('py-redis@3.5.0:', type=('build', 'run'))%0A depends_on('py-click@5.0.0:', type=('build', 'run'))%0A
|
|
be3428c9fe6de7741cec7f3899bcc71049b113ca
|
Create HR_IntroToConditionalStatements.py
|
HR_IntroToConditionalStatements.py
|
HR_IntroToConditionalStatements.py
|
Python
| 0
|
@@ -0,0 +1,232 @@
+#!/bin/python3%0A%0Aimport math%0Aimport os%0Aimport random%0Aimport re%0Aimport sys%0A%0A%0A%0Aif __name__ == '__main__':%0A #N = int(input())%0A # One liner in a lambda function%0A print((lambda N:'Weird' if N %25 2 else 'Not Weird')(int(input())))%0A
|
|
9f2fc78155dc725842ebbc82203994e26d1c7333
|
Add marv_ros skeleton for ROS specific code
|
code/marv-robotics/marv_ros/__init__.py
|
code/marv-robotics/marv_ros/__init__.py
|
Python
| 0
|
@@ -0,0 +1,69 @@
+# Copyright 2019 Ternaris.%0A# SPDX-License-Identifier: AGPL-3.0-only%0A
|
|
cb29ce461eb143dc44b244576b153a0b7a3b1a7d
|
Create missing_element.py
|
missing_element.py
|
missing_element.py
|
Python
| 0.000189
|
@@ -0,0 +1,299 @@
+%22%22%22%0AThere is an array of non-negative integers.%0AA second array is formed by shuffling the elements of the first array and deleting a random element.%0AGiven these two arrays, find which element is missing in the second array.%0Ahttp://www.ardendertat.com/2012/01/09/programming-interview-questions/%0A%22%22%22%0A
|
|
527288828306c3620442e611fc9fb23180ee09fe
|
Add remove-nth-node-from-end-of-list
|
remove-nth-node-from-end-of-list.py
|
remove-nth-node-from-end-of-list.py
|
Python
| 0.000003
|
@@ -0,0 +1,697 @@
+# Link: https://leetcode.com/problems/remove-nth-node-from-end-of-list/%0A# Definition for singly-linked list.%0A# class ListNode:%0A# def __init__(self, x):%0A# self.val = x%0A# self.next = None%0A%0Aclass Solution:%0A # @param %7BListNode%7D head%0A # @param %7Binteger%7D n%0A # @return %7BListNode%7D%0A def removeNthFromEnd(self, head, n):%0A if not head or not head.next:%0A return None%0A%0A fast = head%0A for i in range(0, n):%0A fast = fast.next%0A%0A if not fast:%0A return head.next%0A%0A slow = head%0A while fast.next:%0A fast = fast.next%0A slow = slow.next%0A%0A slow.next = slow.next.next%0A%0A return head%0A
|
|
066a65aa0d60a6f83e13798eb74848bbffd199f6
|
Update remove-comments.py
|
Python/remove-comments.py
|
Python/remove-comments.py
|
# Time: O(n), n is the length of the source
# Space: O(k), k is the max length of a line.
# Given a C++ program, remove comments from it.
# The program source is an array where source[i] is the i-th line of the source code.
# This represents the result of splitting the original source code string by the newline character \n.
#
# In C++, there are two types of comments, line comments, and block comments.
#
# The string // denotes a line comment, which represents that it and
# rest of the characters to the right of it in the same line should be ignored.
#
# The string /* denotes a block comment,
# which represents that all characters until the next (non-overlapping) occurrence of */
# should be ignored. (Here, occurrences happen in reading order: line by line from left to right.)
# To be clear, the string /*/ does not yet end the block comment, as the ending would be overlapping the beginning.
#
# The first effective comment takes precedence over others:
# if the string // occurs in a block comment, it is ignored.
# Similarly, if the string /* occurs in a line or block comment, it is also ignored.
#
# If a certain line of code is empty after removing comments,
# you must not output that line: each string in the answer list will be non-empty.
#
# There will be no control characters, single quote, or double quote characters.
# For example, source = "string s = "/* Not a comment. */";" will not be a test case.
# (Also, nothing else such as defines or macros will interfere with the comments.)
#
# It is guaranteed that every open block comment will eventually be closed,
# so /* outside of a line or block comment always starts a new comment.
#
# Finally, implicit newline characters can be deleted by block comments. Please see the examples below for details.
#
# After removing the comments from the source code, return the source code in the same format.
#
# Example 1:
# Input:
# source = ["/*Test program */", "int main()", "{ ", " // variable declaration ", "int a, b, c;", "/* This is a test", " multiline ", " comment for ", " testing */", "a = b + c;", "}"]
#
# The line by line code is visualized as below:
# /*Test program */
# int main()
# {
# // variable declaration
# int a, b, c;
# /* This is a test
# multiline
# comment for
# testing */
# a = b + c;
# }
#
# Output: ["int main()","{ "," ","int a, b, c;","a = b + c;","}"]
#
# The line by line code is visualized as below:
# int main()
# {
# int a, b, c;
# a = b + c;
# }
# Explanation:
# The string
# /*
# denotes a block comment, including line 1 and lines 6-9. The string
# //
# denotes line 4 as comments.
#
# Example 2:
# Input:
# source = ["a/*comment", "line", "more_comment*/b"]
# Output: ["ab"]
# Explanation: The original source string is "a/*comment\nline\nmore_comment*/b",
# where we have bolded the newline characters.
# After deletion, the implicit newline characters are deleted,
# leaving the string "ab", which when delimited by newline characters becomes ["ab"].
#
# Note:
# - The length of source is in the range [1, 100].
# - The length of source[i] is in the range [0, 80].
# - Every open block comment is eventually closed.
# - There are no single-quote, double-quote, or control characters in the source code.
class Solution(object):
def removeComments(self, source):
"""
:type source: List[str]
:rtype: List[str]
"""
in_block = False
result = []
for line in source:
i = 0
if not in_block:
newline = []
while i < len(line):
if not in_block and i+1 < len(line) and line[i:i+2] == '/*':
in_block = True
i += 1
elif in_block and i+1 < len(line) and line[i:i+2] == '*/':
in_block = False
i += 1
elif not in_block and i+1 < len(line) and line[i:i+2] == '//':
break
elif not in_block:
newline.append(line[i])
i += 1
if newline and not in_block:
result.append("".join(newline))
return result
|
Python
| 0
|
@@ -82,17 +82,16 @@
f a line
-.
%0A%0A# Give
@@ -3445,18 +3445,31 @@
result
- =
+, newline = %5B%5D,
%5B%5D%0A
@@ -3496,26 +3496,8 @@
ce:%0A
- i = 0%0A
@@ -3513,32 +3513,32 @@
f not in_block:%0A
-
@@ -3546,24 +3546,42 @@
ewline = %5B%5D%0A
+ i = 0%0A
|
90218ad99cf9d9f4599f065790ac4d388adc3521
|
Add markup template filter.
|
blog/templatetags/markup.py
|
blog/templatetags/markup.py
|
Python
| 0
|
@@ -0,0 +1,302 @@
+from django import template%0Afrom django.template.defaultfilters import stringfilter%0Afrom django.utils.safestring import mark_safe%0A%0Afrom blog.models import markup%0A%0A%0Aregister = template.Library()%0A%0A%0A@register.filter%0A@stringfilter%0Adef markup(value):%0A result = markup(value)%0A return mark_safe(result)%0A
|
|
28ed25a30ed495cce2d5ace3ac12c00f35f9dbcd
|
Add AmazonDriver
|
calplus/v1/object_storage/drivers/amazon.py
|
calplus/v1/object_storage/drivers/amazon.py
|
Python
| 0.000001
|
@@ -0,0 +1,3012 @@
+import boto3%0A%0Afrom calplus.v1.object_storage.drivers.base import BaseDriver, BaseQuota%0A%0A%0APROVIDER = 'AMAZON'%0A%0A%0Aclass AmazonDriver(BaseDriver):%0A %22%22%22AmazonDriver for Object Storage%22%22%22%0A%0A def __init__(self, cloud_config):%0A super(AmazonDriver, self).__init__()%0A self.aws_access_key_id = cloud_config%5B'aws_access_key_id'%5D%0A self.aws_secret_access_key = cloud_config%5B'aws_secret_access_key'%5D%0A self.endpoint_url = cloud_config%5B'endpoint_url'%5D%0A self.region_name = cloud_config.get('region_name', None)%0A self.driver_name = %5C%0A cloud_config.get('driver_name', 'default')%0A self.limit = cloud_config.get('limit', None)%0A self._setup()%0A%0A def _setup(self):%0A parameters = %7B%0A 'aws_access_key_id': self.aws_access_key_id,%0A 'aws_secret_access_key': self.aws_secret_access_key,%0A 'region_name': self.region_name,%0A 'endpoint_url': self.endpoint_url%0A %7D%0A%0A self.client = boto3.client('s3', **parameters)%0A self.quota = AmazonQuota(self.client, self.limit)%0A%0A def create_container(self, container, **kwargs):%0A return self.client.create_bucket(Bucket=container, **kwargs)%0A%0A def delete_container(self, container):%0A return self.client.delete_bucket(Bucket=container)%0A%0A def list_containers(self):%0A return self.client.list_buckets()%0A%0A def stat_container(self, container):%0A return self.client.head_bucket(Bucket=container)%0A%0A def update_container(self, container, headers, **kwargs):%0A pass%0A%0A def upload_object(self, container, obj, contents,%0A content_length=None, **kwargs):%0A return self.client.put_object(Bucket=container, Key=obj,%0A ContentLength=content_length,%0A Body=contents)%0A%0A def download_object(self, container, obj, **kwargs):%0A return self.client.get_object(Bucket=container, Key=obj)%0A%0A def stat_object(self, container, obj):%0A return self.client.head_object(Bucket=container, Key=obj)%0A%0A def delete_object(self, container, obj, **kwargs):%0A return self.client.delete_object(Bucket=container, Key=obj,%0A **kwargs)%0A%0A def list_container_objects(self, container):%0A return self.client.list_objects(Bucket=container)%0A%0A def update_object(self, container, obj, headers, **kwargs):%0A pass%0A%0A def copy_object(self, container, obj, destination=None, **kwargs):%0A copysource = %7B%0A 'Bucket': container,%0A 'Key': obj%0A %7D%0A%0A return self.client.copy_object(Bucket=container, Key=destination,%0A CopySource=copysource)%0A%0A%0Aclass AmazonQuota(BaseQuota):%0A %22%22%22AmazonQuota for ObjectStorage%22%22%22%0A%0A def __init__(self, client, limit=None):%0A super(AmazonQuota, self).__init__()%0A self.client = client%0A self.limit = limit%0A self._setup()%0A%0A def _setup(self):%0A pass%0A
|
|
75ec71f313e43bbadc0441d7231fbdfb0e4dfe7b
|
add default OPENSTACK_KEYSTONE_DEFAULT_ROLE='Member'. You must also make sure that this role exists in keystone: ./keystone-manage $* role add Member
|
openstack-dashboard/dashboard/settings.py
|
openstack-dashboard/dashboard/settings.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Fourth Paradigm Development, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import sys
ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT_PATH)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
SITE_BRANDING = 'OpenStack'
SITE_NAME = 'openstack'
ENABLE_VNC = True
LOGIN_URL = '/auth/login'
LOGIN_REDIRECT_URL = '/'
MEDIA_ROOT = os.path.join(ROOT_PATH, '..', 'media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin/'
CREDENTIAL_AUTHORIZATION_DAYS = '5'
ROOT_URLCONF = 'dashboard.urls'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django_openstack.middleware.keystone.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'dashboard.middleware.DashboardLogUnhandledExceptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
#'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
'django_openstack.context_processors.tenants',
'django_openstack.context_processors.swift',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
TEMPLATE_DIRS = (
os.path.join(ROOT_PATH, 'templates'),
)
INSTALLED_APPS = (
'dashboard',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.comments',
'django.contrib.sites',
'django.contrib.markup',
'django.contrib.syndication',
'django_openstack',
'django_openstack.templatetags',
'mailer',
)
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
TIME_ZONE = None
gettext_noop = lambda s: s
LANGUAGES = (
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('es', gettext_noop('Spanish')),
('fr', gettext_noop('French')),
('ja', gettext_noop('Japanese')),
('pt', gettext_noop('Portuguese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
LANGUAGE_CODE = 'en'
USE_I18N = True
ACCOUNT_ACTIVATION_DAYS = 7
TOTAL_CLOUD_RAM_GB = 10
try:
from local.local_settings import *
except Exception, e:
logging.exception(e)
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
|
Python
| 0.000009
|
@@ -3565,8 +3565,50 @@
.DEBUG)%0A
+%0AOPENSTACK_KEYSTONE_DEFAULT_ROLE='Member'%0A
|
b78518df363fb1cb398c70920f219ca9be78f816
|
Test another implementation of scipy's _spectral
|
pythran/tests/scipy/_spectral.py
|
pythran/tests/scipy/_spectral.py
|
Python
| 0
|
@@ -0,0 +1,2071 @@
+# Author: Pim Schellart%0A# 2010 - 2011%0A%0A%22%22%22Tools for spectral analysis of unequally sampled signals.%22%22%22%0A%0Aimport numpy as np%0A%0A#pythran export _lombscargle(float64%5B%5D, float64%5B%5D, float64%5B%5D)%0A##runas import numpy; x = numpy.arange(2., 12.); y = numpy.arange(1., 11.); z = numpy.arange(3., 13.); _lombscargle(x, y, z)%0A%0Adef _lombscargle(x, y, freqs):%0A %22%22%22%0A _lombscargle(x, y, freqs)%0A%0A Computes the Lomb-Scargle periodogram.%0A%0A Parameters%0A ----------%0A x : array_like%0A Sample times.%0A y : array_like%0A Measurement values (must be registered so the mean is zero).%0A freqs : array_like%0A Angular frequencies for output periodogram.%0A%0A Returns%0A -------%0A pgram : array_like%0A Lomb-Scargle periodogram.%0A%0A Raises%0A ------%0A ValueError%0A If the input arrays %60x%60 and %60y%60 do not have the same shape.%0A%0A See also%0A --------%0A lombscargle%0A%0A %22%22%22%0A%0A # Check input sizes%0A if x.shape != y.shape:%0A raise ValueError(%22Input arrays do not have the same size.%22)%0A%0A # Create empty array for output periodogram%0A pgram = np.empty_like(freqs)%0A%0A c = np.empty_like(x)%0A s = np.empty_like(x)%0A%0A for i in range(freqs.shape%5B0%5D):%0A%0A xc = 0.%0A xs = 0.%0A cc = 0.%0A ss = 0.%0A cs = 0.%0A%0A c%5B:%5D = np.cos(freqs%5Bi%5D * x)%0A s%5B:%5D = np.sin(freqs%5Bi%5D * x)%0A%0A for j in range(x.shape%5B0%5D):%0A xc += y%5Bj%5D * c%5Bj%5D%0A xs += y%5Bj%5D * s%5Bj%5D%0A cc += c%5Bj%5D * c%5Bj%5D%0A ss += s%5Bj%5D * s%5Bj%5D%0A cs += c%5Bj%5D * s%5Bj%5D%0A%0A if freqs%5Bi%5D == 0:%0A raise ZeroDivisionError()%0A%0A tau = np.arctan2(2 * cs, cc - ss) / (2 * freqs%5Bi%5D)%0A c_tau = np.cos(freqs%5Bi%5D * tau)%0A s_tau = np.sin(freqs%5Bi%5D * tau)%0A c_tau2 = c_tau * c_tau%0A s_tau2 = s_tau * s_tau%0A cs_tau = 2 * c_tau * s_tau%0A%0A pgram%5Bi%5D = 0.5 * (((c_tau * xc + s_tau * xs)**2 / %5C%0A (c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + %5C%0A ((c_tau * xs - s_tau * xc)**2 / %5C%0A (c_tau2 * ss - cs_tau * cs + s_tau2 * cc)))%0A%0A return pgram%0A
|
|
a1e679b4b0802f1c40d08f1f7cba212b13de61a4
|
Create testing2.py
|
myPack/testing2.py
|
myPack/testing2.py
|
Python
| 0.000001
|
@@ -0,0 +1,56 @@
+import aldmbmtl%0A%0Aaldmbmtl.toolbox.myPack.testing.test()%0A
|
|
0ae07ef204806ab45b746df16371c3925ea894e9
|
Create problem6.py
|
Project-Euler/Problem6/problem6.py
|
Project-Euler/Problem6/problem6.py
|
Python
| 0.000035
|
@@ -0,0 +1,941 @@
+%22%22%22%0A%5Bref.href%5D https://projecteuler.net/problem=6%0A%0ASum square difference.%0A%0AThe sum of the squares of the first ten natural numbers is:%0A%0A 1%5E2 + 2%5E2 + ... + 10%5E2 = 385%0A%0AThe square of the sum of the first ten natural numbers is:%0A%0A (1 + 2 + ... + 10)%5E2 = 55%5E2 = 3025%0A%0AHence the difference between the sum of the squares of the%0Afirst ten natural numbers and the square of the sum is:%0A%0A 3025 - 385 = 2640.%0A%0AFind the difference between the sum of the squares of the%0Afirst one hundred natural numbers and the square of the sum.%0A%22%22%22%0AsmallestNaturalNumber = 1%0AnaturalNumberCount = 100%0Anums = range(smallestNaturalNumber,%0A naturalNumberCount + smallestNaturalNumber)%0Asquares = map(lambda x : x ** 2, nums)%0AsumOfSquares = sum(squares)%0AsquaredSumOfNums = sum(nums) ** 2%0Adiff = squaredSumOfNums - sumOfSquares%0Aprint %22The difference between the sum of squares of the first %22%5C%0A + str(naturalNumberCount) + %22 is %22 + str(diff) + %22.%22%0A
|
|
8b4304d5eb80e8506aa19dc412c0c5bade993eea
|
change csv exporter to check flag inmediately instead of calling another function
|
scrapy/contrib/exporter/__init__.py
|
scrapy/contrib/exporter/__init__.py
|
"""
Item Exporters are used to export/serialize items into different formats.
"""
import csv
import pprint
from cPickle import Pickler
from xml.sax.saxutils import XMLGenerator
__all__ = ['BaseItemExporter', 'PprintItemExporter', 'PickleItemExporter', \
'CsvItemExporter', 'XmlItemExporter']
class BaseItemExporter(object):
def __init__(self, **kwargs):
self._configure(kwargs)
def _configure(self, options, dont_fail=False):
"""Configure the exporter by poping options from the ``options`` dict.
If dont_fail is set, it won't raise an exception on unexpected options
(useful for using with keyword arguments in subclasses constructors)
"""
self.fields_to_export = options.pop('fields_to_export', None)
self.export_empty_fields = options.pop('export_empty_fields', False)
self.encoding = options.pop('encoding', 'utf-8')
if not dont_fail and options:
raise TypeError("Unexpected options: %s" % ', '.join(options.keys()))
def export_item(self, item):
raise NotImplementedError
def serialize_field(self, field, name, value):
serializer = field.get('serializer', self._to_str_if_unicode)
return serializer(value)
def start_exporting(self):
pass
def finish_exporting(self):
pass
def _to_str_if_unicode(self, value):
return value.encode(self.encoding) if isinstance(value, unicode) else value
def _get_serialized_fields(self, item, default_value=None, include_empty=None):
"""Return the fields to export as an iterable of tuples (name,
serialized_value)
"""
if include_empty is None:
include_empty = self.export_empty_fields
if self.fields_to_export is None:
if include_empty:
field_iter = item.fields.iterkeys()
else:
field_iter = item.iterkeys()
else:
if include_empty:
field_iter = self.fields_to_export
else:
nonempty_fields = set(item.keys())
field_iter = (x for x in self.fields_to_export if x in \
nonempty_fields)
for field_name in field_iter:
if field_name in item:
field = item.fields[field_name]
value = self.serialize_field(field, field_name, item[field_name])
else:
value = default_value
yield field_name, value
class XmlItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self.item_element = kwargs.pop('item_element', 'item')
self.root_element = kwargs.pop('root_element', 'items')
self._configure(kwargs)
self.xg = XMLGenerator(file, encoding=self.encoding)
def start_exporting(self):
self.xg.startDocument()
self.xg.startElement(self.root_element, {})
def export_item(self, item):
self.xg.startElement(self.item_element, {})
for name, value in self._get_serialized_fields(item, default_value=''):
self._export_xml_field(name, value)
self.xg.endElement(self.item_element)
def finish_exporting(self):
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name, serialized_value):
self.xg.startElement(name, {})
self.xg.characters(serialized_value)
self.xg.endElement(name)
class CsvItemExporter(BaseItemExporter):
def __init__(self, file, include_headers_line=True, **kwargs):
self._configure(kwargs, dont_fail=True)
self.include_headers_line = include_headers_line
self.csv_writer = csv.writer(file, **kwargs)
self._headers_written = False
def export_item(self, item):
self._write_headers_and_set_fields_to_export(item)
fields = self._get_serialized_fields(item, default_value='', \
include_empty=True)
values = [x[1] for x in fields]
self.csv_writer.writerow(values)
def _write_headers_and_set_fields_to_export(self, item):
if self._headers_written:
return
self._headers_written = True
if not self.include_headers_line:
return
if not self.fields_to_export:
self.fields_to_export = item.fields.keys()
self.csv_writer.writerow(self.fields_to_export)
class PickleItemExporter(BaseItemExporter):
def __init__(self, file, protocol=0, **kwargs):
self._configure(kwargs)
self.pickler = Pickler(file, protocol)
def export_item(self, item):
self.pickler.dump(dict(self._get_serialized_fields(item)))
class PprintItemExporter(BaseItemExporter):
def __init__(self, file, **kwargs):
self._configure(kwargs)
self.file = file
def export_item(self, item):
itemdict = dict(self._get_serialized_fields(item))
self.file.write(pprint.pformat(itemdict) + '\n')
|
Python
| 0
|
@@ -3741,32 +3741,36 @@
self._headers_
+not_
written = False%0A
@@ -3763,20 +3763,19 @@
itten =
-Fals
+Tru
e%0A%0A d
@@ -3794,32 +3794,120 @@
em(self, item):%0A
+ if self._headers_not_written:%0A self._headers_not_written = False%0A
self._wr
@@ -3949,16 +3949,17 @@
t(item)%0A
+%0A
@@ -4211,102 +4211,8 @@
if
-self._headers_written:%0A return%0A self._headers_written = True%0A if not
self
@@ -4242,23 +4242,8 @@
- return%0A
@@ -4276,16 +4276,20 @@
export:%0A
+
@@ -4331,24 +4331,28 @@
elds.keys()%0A
+
self
|
a8b07a61b56f87509f33cd3f79e7800837ef4f29
|
Add lc0189_rotate_array.py
|
lc0189_rotate_array.py
|
lc0189_rotate_array.py
|
Python
| 0.000061
|
@@ -0,0 +1,1007 @@
+%22%22%22Leetcode 189. Rotate Array%0AEasy%0A%0AURL: https://leetcode.com/problems/rotate-array/%0A%0AGiven an array, rotate the array to the right by k steps, where k is non-negative.%0A%0AExample 1:%0AInput: %5B1,2,3,4,5,6,7%5D and k = 3%0AOutput: %5B5,6,7,1,2,3,4%5D%0AExplanation:%0Arotate 1 steps to the right: %5B7,1,2,3,4,5,6%5D%0Arotate 2 steps to the right: %5B6,7,1,2,3,4,5%5D%0Arotate 3 steps to the right: %5B5,6,7,1,2,3,4%5D%0A%0AExample 2:%0AInput: %5B-1,-100,3,99%5D and k = 2%0AOutput: %5B3,99,-1,-100%5D%0AExplanation: %0Arotate 1 steps to the right: %5B99,-1,-100,3%5D%0Arotate 2 steps to the right: %5B3,99,-1,-100%5D%0A%0ANote:%0A- Try to come up as many solutions as you can, %0A there are at least 3 different ways to solve this problem.%0A- Could you do it in-place with O(1) extra space?%0A%22%22%22%0A%0Aclass Solution(object):%0A def rotate(self, nums, k):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :type k: int%0A :rtype: None Do not return anything, modify nums in-place instead.%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
7bae6e3f490f4986f07ce45bf333a5982b505bd4
|
add 255
|
python/255_verify_preorder_sequence_in_binary_search_tree.py
|
python/255_verify_preorder_sequence_in_binary_search_tree.py
|
Python
| 0.999996
|
@@ -0,0 +1,848 @@
+%22%22%22%0AGiven an array of numbers, verify whether it is the correct preorder traversal%0Asequence of a binary search tree.%0A%0AYou may assume each number in the sequence is unique.%0A%0AFollow up:%0ACould you do it using only constant space complexity?%0A%22%22%22%0Aclass Solution(object):%0A def verifyPreorder(self, preorder):%0A %22%22%22%0A :type preorder: List%5Bint%5D%0A :rtype: bool%0A %22%22%22%0A if preorder is None or preorder == %5B%5D:%0A return True%0A lb = -2 ** 31%0A stack = %5B%5D%0A for i in preorder:%0A if i %3C lb:%0A return False%0A while stack and i %3E stack%5B-1%5D:%0A lb = stack.pop()%0A stack.append(i)%0A return True%0A%0Afrom binarySearchTree import *%0Aa = Solution()%0At = BST(%5B10,5,12,2,6%5D)%0Aprint(t.toList(order=-1))%0Aprint(a.verifyPreorder(t.toList(order=-1)) == True)%0A
|
|
86686926809bfef55b71618888eec6667faaeec9
|
complete 26 reciprocal cycles
|
26-reciprocal-cycles.py
|
26-reciprocal-cycles.py
|
Python
| 0.000069
|
@@ -0,0 +1,1065 @@
+%22%22%22Based on chillee's answer at Fri, 6 Jan 2017, 05:06:%0AThere's so many convoluted substring solutions.%0A%0A1/3 = 3/9 = 0.(3)%0A1/7 = 148257/999999 = 0.(148257)%0A%0ATherefore, the length of the repeating portion is length of the numerator when%0Ayou set the denominator equal to some string of 9s.%0A%0AThere's one other thing to keep in mind, which is that 1/5 and 1/2 have%0Aterminating decimals, as 5 and 2 are the only divisors of 10. So, for example,%0A1/6 = 1/3 * 1/2, and it's clear that the length of the repeating decimal is the%0Alength of the repeating decimal of 1/3. And if the prime factors of the%0Adenominator are only composed of 2's and 5's, the denominator is terminating.%0A%22%22%22%0A%0A%0Adef cycle_length(x):%0A while x %25 2 == 0:%0A x //= 2%0A while x %25 5 == 0:%0A x //= 5%0A if x == 1:%0A return 0%0A t = 9%0A while True:%0A if t %25 x == 0:%0A return len(str(t))%0A else:%0A t = t * 10 + 9%0A%0Aif __name__ == '__main__':%0A results = %5B(x, cycle_length(x)) for x in range(1, 1001)%5D%0A print(max(results, key=lambda pair: pair%5B1%5D))%0A
|
|
4c6442382adcb716ea817fbc781a402dec36aac9
|
set app.debug = True.
|
app.py
|
app.py
|
from flask import Flask
import redis
import os
from rq import Queue
app = Flask(__name__)
my_redis = redis.from_url(
os.getenv("REDIS_URL", "redis://127.0.0.1:6379"),
db=10
)
redis_rq_conn = redis.from_url(
os.getenv("REDIS_URL", "redis://127.0.0.1:6379"),
db=14
)
scopus_queue = Queue("scopus", connection=redis_rq_conn)
refset_queue = Queue("refset", connection=redis_rq_conn)
|
Python
| 0.000067
|
@@ -83,16 +83,33 @@
_name__)
+%0Aapp.debug = True
%0A%0Amy_red
|
3cd4a151f9f03ecf2674348e9377e00346bbd849
|
add first revision of the script
|
rtp.py
|
rtp.py
|
Python
| 0
|
@@ -0,0 +1,2205 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Afrom bs4 import BeautifulSoup%0Aimport urllib2%0Aimport re%0Aimport unicodedata%0Aimport os%0Aimport string%0A%0AvalidFilenameChars = %22-_. %25s%25s%22 %25 (string.ascii_letters, string.digits)%0A%0Adef removeDisallowedFilenameChars(filename):%0A cleanedFilename = unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore')%0A return ''.join(c for c in cleanedFilename if c in validFilenameChars)%0A%0Adef parseRTMP(url,dt):%0A url = 'http://www.rtp.pt' + url%0A page = urllib2.urlopen(url)%0A match = re.search('%22file%22: %22(.*?)%22,%22application%22: %22(.*?)%22,%22streamer%22: %22(.*?)%22', page.read(), re.MULTILINE)%0A if match:%0A fn = match.group(1).split('/')%5B5%5D.replace('.mp3', '.flv')%0A cmd = 'rtmpdump -r %22rtmp://' + match.group(3) + '/' + match.group(2) + '%22 -y %22mp3:' + match.group(1) + '%22 -o %22'+ dt + '.flv%22'%0A%0A #print cmd%0A if os.path.isfile(dt+'.mp3'):%0A print %22- Ja downloadada... a ignorar%22%0A return%0A %0A print %22- A sacar...%22%0A os.system(cmd + %22%3E /dev/null 2%3E&1%22)%0A print %22- A extrair mp3 do flv...%22%0A os.system('ffmpeg -i %22' + dt + '.flv%22 -acodec copy %22'+dt+'.mp3%22 %3E /dev/null 2%3E&1')%0A os.remove(dt + '.flv')%0A print %22- Done%22%0A%0Aid = %221085%22%0A%0A# apanhar o numero total de paginas%0Aurl = %22http://www.rtp.pt/play/browseprog/%22+id+%22/1/true%22%0Apage = urllib2.urlopen(url)%0Amatch = re.search(r'%3Ca title=%22Fim.*?,page:(%5Cd+)%5C%7D', page.read(), re.MULTILINE)%0Aif match:%0A totalpages = match.group(1)%0Aelse:%0A exit%0A%0Afor c in range(1,int(totalpages)):%0A print %22--- Pagina %22 + str(c)%0A url = %22http://www.rtp.pt/play/browseprog/%22+id+%22/%22+str(c)+%22/%22%0A page = urllib2.urlopen(url)%0A soup = BeautifulSoup(page.read())%0A%0A # apanha todos os items da pagina%0A items = soup.findAll('div',%7B'class': 'Elemento'%7D)%0A%0A for item in items:%0A # url%0A link = item.find('a')%0A # data%0A dt = item.find('b').contents%5B0%5D.strip()%0A dt = dt.replace(' ', '_')%0A # parte ?%0A pt = item.find('p').contents%5B0%5D.strip()%0A pt = pt.replace(' ', '_')%0A%0A print %22-- %22 + dt, pt%0A%0A title = removeDisallowedFilenameChars(dt + %22-%22 + pt)%0A parseRTMP(link%5B'href'%5D,title)%0A%0A%0A%0A
|
|
17d5372feac64099373ed7ac844fe8bdbf8fad71
|
Fix list/generator issue with .keys() in Python 3
|
loginas/tests/tests.py
|
loginas/tests/tests.py
|
try:
from urllib.parse import urlsplit
except ImportError:
from urlparse import urlsplit
from django.test import TestCase
from django.contrib.auth.models import User
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.contrib.messages.storage.cookie import CookieStorage
from django.utils.six import text_type
def create_user(username='', password='', **kwargs):
user = User(username=username, **kwargs)
if password:
user.set_password(password)
user.save()
return user
class ViewTest(TestCase):
"""Tests for user_login view"""
def login_as_nonstaff(request, user):
return request.user.is_superuser or (request.user.is_staff and
not user.is_staff)
def setUp(self):
self.target_user = User.objects.create(username='target')
def get_target_url(self, target_user=None):
if target_user is None:
target_user = self.target_user
response = self.client.get(reverse(
"loginas-user-login", kwargs={'user_id': target_user.id}))
self.assertEqual(response.status_code, 302)
self.assertEqual(urlsplit(response['Location'])[2], "/")
return response
def assertCurrentUserIs(self, user):
id_ = text_type(user.id if user is not None else None).encode('utf-8')
self.assertEqual(self.client.get(reverse("current_user")).content, id_)
def assertLoginError(self, resp):
messages = CookieStorage(resp)._decode(resp.cookies['messages'].value)
self.assertEqual([(m.level, m.message) for m in messages],
[(40, "You do not have permission to do that.")])
def assertRaisesExact(self, exception, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.assertFail("{0} not raised".format(exc))
except exception.__class__ as caught:
self.assertEqual(caught.message, exception.message)
def clear_cookies(self):
for key in self.client.cookies.keys():
del self.client.cookies[key]
@override_settings(CAN_LOGIN_AS=login_as_nonstaff)
def test_custom_permissions(self):
user = create_user("user", "pass", is_superuser=False, is_staff=False)
staff1 = create_user("staff", "pass", is_superuser=False, is_staff=True)
staff2 = create_user("super", "pass", is_superuser=True, is_staff=True)
# Regular user can't login as anyone
self.assertTrue(self.client.login(username="user", password="pass"))
self.assertLoginError(self.get_target_url())
self.assertCurrentUserIs(user)
self.clear_cookies()
# Non-superuser staff user can login as regular user
self.assertTrue(self.client.login(username="staff", password="pass"))
response = self.get_target_url(user)
self.assertNotIn('messages', response.cookies)
self.assertCurrentUserIs(user)
self.clear_cookies()
# Non-superuser staff user cannot login as other staff
self.assertTrue(self.client.login(username="staff", password="pass"))
self.assertLoginError(self.get_target_url(staff2))
self.assertCurrentUserIs(staff1)
self.clear_cookies()
# Superuser staff user can login as other staff
self.assertTrue(self.client.login(username="super", password="pass"))
response = self.get_target_url(staff1)
self.assertNotIn('messages', response.cookies)
self.assertCurrentUserIs(staff1)
@override_settings(CAN_LOGIN_AS='loginas.tests.login_as_shorter_username')
def test_custom_permissions_as_string(self):
ray = create_user("ray", "pass")
lonnie = create_user("lonnie", "pass")
# Ray cannot login as Lonnie
self.assertTrue(self.client.login(username="ray", password="pass"))
self.assertLoginError(self.get_target_url(lonnie))
self.assertCurrentUserIs(ray)
self.clear_cookies()
# Lonnie can login as Ray
self.assertTrue(self.client.login(username="lonnie", password="pass"))
response = self.get_target_url(ray)
self.assertNotIn('messages', response.cookies)
self.assertCurrentUserIs(ray)
def test_custom_permissions_invalid_path(self):
def assertMessage(message):
self.assertRaisesExact(ImproperlyConfigured(message),
self.get_target_url)
with override_settings(CAN_LOGIN_AS='loginas.tests.invalid_func'):
assertMessage("Module loginas.tests does not define a invalid_func "
"function.")
with override_settings(CAN_LOGIN_AS='loginas.tests.invalid_path.func'):
assertMessage("Error importing CAN_LOGIN_AS function.")
def test_as_superuser(self):
create_user("me", "pass", is_superuser=True, is_staff=True)
self.assertTrue(self.client.login(username="me", password="pass"))
response = self.get_target_url()
self.assertNotIn('messages', response.cookies)
self.assertCurrentUserIs(self.target_user)
def test_as_non_superuser(self):
user = create_user("me", "pass", is_staff=True)
self.assertTrue(self.client.login(username="me", password="pass"))
self.assertLoginError(self.get_target_url())
self.assertCurrentUserIs(user)
def test_as_anonymous_user(self):
self.assertLoginError(self.get_target_url())
self.assertCurrentUserIs(None)
|
Python
| 0.000018
|
@@ -2110,16 +2110,21 @@
key in
+list(
self.cli
@@ -2141,16 +2141,17 @@
s.keys()
+)
:%0A
|
8947167b0442b8d03cfd328fd77961a864f54638
|
Create double.py
|
CodeWars/8kyu/double.py
|
CodeWars/8kyu/double.py
|
Python
| 0.000019
|
@@ -0,0 +1,39 @@
+def doubleInteger(i):%0A return i + i%0A
|
|
3f2a1aa0ce76dc50662e11da50149d0de231c848
|
add keys
|
keys.py
|
keys.py
|
Python
| 0.000003
|
@@ -0,0 +1,17 @@
+G_EMAIL_KEY = %22%22%0A
|
|
79b284a723303daa486b97c0da69eb1c4bf56a95
|
add latex for gini index
|
skbio/maths/diversity/alpha/gini.py
|
skbio/maths/diversity/alpha/gini.py
|
#!/usr/bin/env python
from __future__ import division
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
from .base import _validate
def gini_index(data, method='rectangles'):
"""Calculate the Gini index.
Formula is ``G = A/(A+B)`` where ``A`` is the area between ``y=x`` and the
Lorenz curve and ``B`` is the area under the Lorenz curve. Simplifies to
``1-2B`` since ``A+B=0.5``.
Parameters
----------
data : (N,) array_like
Vector of counts, abundances, proportions, etc. All entries must be
non-negative.
method : {'rectangles', 'trapezoids'}
Method for calculating the area under the Lorenz curve. If
``'rectangles'``, connects the Lorenz curve points by lines parallel to
the x axis. This is the correct method (in our opinion) though
trapezoids might be desirable in some circumstances. Forumla is:
``dx(sum(i=1,i=n,h_i))``.
If ``'trapezoids'``, connects the Lorenz curve points by linear
segments between them. Basically assumes that the given sampling is
accurate and that more features of given data would fall on linear
gradients between the values of this data. Formula is:
``dx[(h_0+h_n)/2 + sum(i=1,i=n-1,h_i)]``.
Returns
-------
double
Gini index.
Raises
------
ValueError
If `method` isn't one of the supported methods for calculating the area
under the curve.
Notes
-----
The Gini index was introduced in [1]_.
References
----------
.. [1] Gini, C. (1912). "Variability and Mutability", C. Cuppini, Bologna,
156 pages. Reprinted in Memorie di metodologica statistica (Ed. Pizetti
E, Salvemini, T). Rome: Libreria Eredi Virgilio Veschi (1955).
"""
# Suppress cast to int because this method supports ints and floats.
data = _validate(data, suppress_cast=True)
lorenz_points = _lorenz_curve(data)
B = _lorenz_curve_integrator(lorenz_points, method)
return 1 - 2 * B
def _lorenz_curve(data):
"""Calculate the Lorenz curve for input data.
Notes
-----
Formula available on wikipedia.
"""
sorted_data = np.sort(data)
Sn = sorted_data.sum()
n = sorted_data.shape[0]
return np.arange(1, n + 1) / n, sorted_data.cumsum() / Sn
def _lorenz_curve_integrator(lc_pts, method):
"""Calculates the area under a Lorenz curve.
Notes
-----
Could be utilized for integrating other simple, non-pathological
"functions" where width of the trapezoids is constant.
"""
x, y = lc_pts
# each point differs by 1/n
dx = 1 / x.shape[0]
if method == 'trapezoids':
# 0 percent of the population has zero percent of the goods
h_0 = 0.0
h_n = y[-1]
# the 0th entry is at x=1/n
sum_hs = y[:-1].sum()
return dx * ((h_0 + h_n) / 2 + sum_hs)
elif method == 'rectangles':
return dx * y.sum()
else:
raise ValueError("Method '%s' not implemented. Available methods: "
"'rectangles', 'trapezoids'." % method)
|
Python
| 0
|
@@ -537,46 +537,97 @@
-Formula is %60%60G = A/(A+B)%60%60
+The Gini index is defined as%0A%0A .. math::%0A%0A G=%5C%5Cfrac%7BA%7D%7BA+B%7D%0A%0A
where
-%60
+:math:
%60A%60
-%60
is
@@ -647,23 +647,27 @@
een
-%60
+:math:
%60y=x%60
-%60
and the
%0A
@@ -662,20 +662,16 @@
and the
-%0A
Lorenz
@@ -683,14 +683,22 @@
and
- %60
+%0A :math:
%60B%60
-%60
is
@@ -747,29 +747,38 @@
s to
-%0A %60
+ :math:
%601-2B%60
-%60
+%0A
since
-%60
+:math:
%60A+B
@@ -782,17 +782,16 @@
A+B=0.5%60
-%60
.%0A%0A P
@@ -1208,16 +1208,19 @@
+%60%60'
trapezoi
@@ -1221,16 +1221,19 @@
apezoids
+'%60%60
might b
@@ -1271,64 +1271,18 @@
es.
-Forumla is:%0A %60%60dx(sum(i=1,i=n,h_i))%60%60.%0A If
+If%0A
%60%60'
@@ -1338,24 +1338,16 @@
y linear
-%0A
segment
@@ -1343,24 +1343,32 @@
ear segments
+%0A
between the
@@ -1414,24 +1414,16 @@
pling is
-%0A
accurat
@@ -1427,16 +1427,24 @@
rate and
+%0A
that mo
@@ -1489,24 +1489,16 @@
n linear
-%0A
gradien
@@ -1503,24 +1503,32 @@
ents between
+%0A
the values
@@ -1544,70 +1544,8 @@
ata.
- Formula is:%0A %60%60dx%5B(h_0+h_n)/2 + sum(i=1,i=n-1,h_i)%5D%60%60.
%0A%0A
@@ -1803,16 +1803,221 @@
in %5B1%5D_.
+ The formula for%0A %60%60method='rectangles'%60%60 is%0A%0A .. math::%0A%0A dx%5C%5Csum_%7Bi=1%7D%5En h_i%0A%0A The formula for %60%60method='trapezoids'%60%60 is%0A%0A .. math::%0A%0A dx(%5C%5Cfrac%7Bh_0+h_n%7D%7B2%7D+%5Csum_%7Bi=1%7D%5E%7Bn-1%7D h_i)
%0A%0A Re
|
cec1cc8082854a0fd61ea83bb69ba1e9d013b089
|
Create libs.py
|
libs.py
|
libs.py
|
Python
| 0.000001
|
@@ -0,0 +1,1128 @@
+# coding: utf-8%0A%0A'''%D0%91%D0%B8%D0%B1%D0%BB%D0%B8%D0%BE%D1%82%D0%B5%D0%BA%D0%B8 SEO %D0%BC%D0%BE%D0%B4%D1%83%D0%BB%D1%8F'''%0A%0Aimport sublime, sublime_plugin, re, urllib%0A%0Aclass xenuTools:%0A%0A%09def download_url_to_string(url):%0A%09%09request = urllib.request.Request(url)%0A%09%09response = urllib.request.urlopen(request)%0A%09%09html = response.read()%0A%09%09return html%0A%0A%09def getrobots(url):%0A%09%09#TODO: split single line files%0A%09%09robots_rules = ''%0A%0A%09%09robots = xenuTools.download_url_to_string(url)%0A%09%09# remove leading and trailing white space%0A%09%09robots = robots.strip()%0A%09%09# put each line into a list%0A%09%09robots_list = robots.decode(%22utf-8%22).strip().splitlines()%0A%0A%09%09for item in robots_list:%0A%09%09%09mach = re.search('%5EDisallow: +(%5B%5E%5Cs%5D+)$', item, flags=re.IGNORECASE)%0A%09%09%09if item == %22%22 or mach == None:%0A%09%09%09%09continue%0A%0A%09%09%09item = mach.group(1)%0A%0A%09%09%09if item.find('#') %3E 0:%0A%09%09%09%09# comment removing%0A%09%09%09%09item = re.sub(r%22(%5B%5E#%5D*)#.*%22, r%22%5C1%22, item)%0A%0A%09%09%09item = re.sub(r%22%5C*$%22, %22%22, item)%0A%09%09%09item = item.replace(%22*%22, %22.*%22).replace(%22?%22, %22%5C?%22).replace(%22$%22, %22%5Cn%22).strip()%0A%09%09%09%0A%09%09%09robots_rules = robots_rules + item + '%7C'%0A%09%09robots_rules = robots_rules%5B:-1%5D%0A%0A%09%09# TODO: cut images%0A%09%09robots_rules = r'(?s)%5Ehttps?:%5CS+('+robots_rules+')(.*?)%5Cn%5Cn'%0A%09%09%0A%09%09return robots_rules%0A
|
|
636b02bbe33e60348b0171241b7a3c264f1c90a7
|
too soon. sorry
|
src/pyechonest/decorators.py
|
src/pyechonest/decorators.py
|
Python
| 0.999999
|
@@ -0,0 +1,852 @@
+# from http://wiki.python.org/moin/PythonDecoratorLibrary#Memoize%0Aclass memoized(object):%0A %22%22%22Decorator that caches a function's return value each time it is called.%0A If called later with the same arguments, the cached value is returned, and%0A not re-evaluated.%0A %22%22%22%0A def __init__(self, func):%0A self.func = func%0A self.cache = %7B%7D%0A %0A def __call__(self, *args):%0A try:%0A return self.cache%5Bargs%5D%0A except KeyError:%0A self.cache%5Bargs%5D = value = self.func(*args)%0A return value%0A except TypeError:%0A # uncachable -- for instance, passing a list as an argument.%0A # Better to not cache than to blow up entirely.%0A return self.func(*args)%0A %0A def __repr__(self):%0A %22%22%22Return the function's docstring.%22%22%22%0A return self.func.__doc__%0A
|
|
c04610422ffd6e0fe87c62d7a8039116f804467c
|
Add jupyterhub config
|
jupyterhub_config.py
|
jupyterhub_config.py
|
Python
| 0.000001
|
@@ -0,0 +1,1178 @@
+import os%0Aimport everware%0A%0Aimport jupyterhub.handlers.pages%0Ajupyterhub.handlers.pages.HomeHandler.get = everware.HomeHandler.get%0Ajupyterhub.handlers.pages.HomeHandler.post = everware.HomeHandler.post%0A%0Ac = get_config()%0A%0A# spawn with custom docker containers%0Ac.JupyterHub.spawner_class = 'everware.CustomDockerSpawner'%0A%0A# The docker instances need access to the Hub, so the default loopback port doesn't work:%0Afrom IPython.utils.localinterfaces import public_ips%0Ac.JupyterHub.hub_ip = public_ips()%5B0%5D%0Ac.JupyterHub.hub_api_ip = public_ips()%5B0%5D%0A%0Ac.JupyterHub.authenticator_class = 'everware.GitHubOAuthenticator'%0Ac.Authenticator.whitelist = set()%0A%0Ac.GitHubOAuthenticator.oauth_callback_url = os.environ%5B'OAUTH_CALLBACK_URL'%5D%0Ac.GitHubOAuthenticator.client_id = os.environ%5B'GITHUB_CLIENT_ID'%5D%0Ac.GitHubOAuthenticator.client_secret = os.environ%5B'GITHUB_CLIENT_SECRET'%5D%0A%0Ac.Spawner.tls = True%0Ac.Spawner.debug = True%0Ac.Spawner.http_timeout = 32%0A%0Ac.JupyterHub.data_files_path = 'share'%0Ac.JupyterHub.template_paths = %5B'share/static/html'%5D%0A%0A# change this to the ip that %60boot2docker ip%60 tells you if%0A# you use boot2docker, otherwise remove the line%0A#c.Spawner.container_ip = '192.168.59.103'%0A
|
|
010f19ab2f9c0f3305d7f2eabcbbd33952a58fdd
|
Add a dir
|
stingroc/0002/0002.py
|
stingroc/0002/0002.py
|
Python
| 0.99927
|
@@ -0,0 +1,13 @@
+print %220002%22%0A
|
|
287a89307af6ad720978682f49c01e39259303ec
|
Create censys_monitor.py
|
censys_monitor.py
|
censys_monitor.py
|
Python
| 0.000001
|
@@ -0,0 +1,1910 @@
+import censys.certificates%0Aimport json%0Aimport requests%0Aimport os%0Aimport random%0A%0A#UID = %22%22%0A#SECRET = %22%22%0A%0A%0A#api for remynseit and remynse%0AUIDS = %5B%22UID1%22, %22UID2%22, %22UID3%22%5D%0ASECRETS = %7B%22secret%22: %22value%22, %22secret2%22: %22value2%22%7D%0A%0A'''%0ASearch%0A(utah.edu.*) AND NOT parsed.subject_dn.raw:/.*utah.edu/%0A'''%0A%0Aalert_webhook = ''%0Aknown_certs = %5B%5D%0A%0A%0Adef getCerts():%0A UID = random.choice(UIDS)%0A SECRET = SECRETS%5BUID%5D%0A c = censys.certificates.CensysCertificates(UID, SECRET)%0A certs = c.search(%22(utah.edu.* OR utahedu.*) AND NOT parsed.subject_dn.raw:/.*utah.edu/%22)%0A return certs%0A%0A%0Adef knownCerts():%0A known_certs = %5B%5D%0A if os.path.isfile('/opt/censys/certs.txt'):%0A with open('/opt/censys/certs.txt') as f:%0A for line in f:%0A known_certs.append(line.rstrip())%0A return known_certs%0A else:%0A pass%0Adef alert(cert):%0A text = f%22New SSL Cert Detected:%5Cn Sha256: %7Bcert%5B'parsed.fingerprint_sha256'%5D%7D%5Cn SubjectCN: %7Bcert%5B'parsed.subject_dn'%5D%7D%22%0A message = %7B%22text%22: text%7D%0A resp = requests.post(alert_webhook, data=json.dumps(message), headers=%7B'Content-Type': 'application/json'%7D)%0A if resp.status_code != 200:%0A raise ValueError(f'Status Code: %7Bresp.status_code%7D, Error: %7Bresp.text%7D')%0A%0Adef main():%0A known_certs = knownCerts()%0A new_certs = getCerts()%0A for i in new_certs:%0A if known_certs:%0A if i%5B'parsed.fingerprint_sha256'%5D in known_certs:%0A pass%0A else:%0A alert(i)%0A print(%22Found New Cert%22)%0A with open('/opt/censys/certs.txt', 'a+') as f:%0A f.write(i%5B'parsed.fingerprint_sha256'%5D + %22%5Cr%5Cn%22)%0A else:%0A print(%22First Run, adding certs and alerting%22)%0A alert(i)%0A with open('/opt/censys/certs.txt', 'a+') as f:%0A f.write(i%5B'parsed.fingerprint_sha256'%5D + %22%5Cr%5Cn%22)%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
731118d82aa41689f12adb32ea37be55be89a757
|
Add gpu_buffer.py
|
fafnir/gpu_buffer.py
|
fafnir/gpu_buffer.py
|
Python
| 0.002058
|
@@ -0,0 +1,1163 @@
+import panda3d.core as p3d%0A%0A%0Aclass GpuBuffer:%0A def __init__(self, name, count, data_type, data_format):%0A self.buffer = p3d.Texture(name)%0A self.data_type = data_type%0A self.data_format = data_format%0A self.resize(count)%0A%0A def resize(self, count):%0A self.buffer.setup_buffer_texture(%0A count,%0A self.data_type,%0A self.data_format,%0A p3d.GeomEnums.UH_dynamic%0A )%0A self.buffer.prepare(base.win.get_gsg().get_prepared_objects())%0A%0A ram_image = self.buffer.modify_ram_image()%0A for i in range(len(ram_image)):%0A ram_image%5Bi%5D = 0%0A%0A def get_buffer_id(self):%0A gsg = base.win.get_gsg()%0A pgo = gsg.get_prepared_objects()%0A context = self.buffer.prepare_now(0, pgo, gsg)%0A return context.get_native_buffer_id()%0A%0A def get_texture(self):%0A return self.buffer%0A%0A def print_buffer(self, count):%0A base.graphics_engine.extract_texture_data(self.buffer, base.win.get_gsg())%0A view = memoryview(self.buffer.get_ram_image()).cast('f')%0A for i in range(count):%0A print(view%5Bi%5D, end=' ')%0A print()%0A
|
|
3cb7c1cd73dfb73d96af15a183d4e7ef6a9369e8
|
create src
|
src/GitApi.py
|
src/GitApi.py
|
Python
| 0
|
@@ -0,0 +1,2636 @@
+#!/usr/bin/env python%0A# -*- coding:utf-8 -*-%0A%0Afrom requests import get%0Afrom json import loads%0Afrom argparse import ArgumentParser%0A%0A%0Aclass GitHub():%0A%0A def GetRepos(self, user):%0A self.msg = %22%22%0A req = loads(get('https://api.github.com/users/' +%0A user + '/repos').text)%0A self.msg += '%5CnRepositorys of user.'%0A for i in range(len(req)):%0A self.msg += '%5Cn%5CnName repository: ' + str(req%5Bi%5D%5B'name'%5D)%0A self.msg += '%5CnDescription repository: ' + %5C%0A str(req%5Bi%5D%5B'description'%5D)%0A self.msg += '%5CnURL repository: ' + str(req%5Bi%5D%5B'html_url'%5D)%0A self.msg += '%5CnStars: total: ' + %5C%0A str(req%5Bi%5D%5B'stargazers_count'%5D)%0A self.msg += '%5CnForks total: ' + %5C%0A str(req%5Bi%5D%5B'forks_count'%5D)%0A return self.msg%0A%0A def GetInfo(self, user):%0A self.msg = %22%22%0A req = loads(get('https://api.github.com/users/' + user).text)%0A self.msg += '%5CnInformation of user:%5Cn'%0A self.msg += '%5CnName: ' + str(req%5B'name'%5D)%0A self.msg += '%5CnEmail: ' + str(req%5B'email'%5D)%0A self.msg += '%5CnCompany: ' + str(req%5B'company'%5D)%0A self.msg += '%5CnBlog: ' + str(req%5B'blog'%5D)%0A self.msg += '%5CnBio: ' + str(req%5B'bio'%5D)%0A self.msg += '%5CnLocation: ' + str(req%5B'location'%5D)%0A self.msg += '%5CnPublic repository: ' + str(req%5B'public_repos'%5D)%0A self.msg += '%5CnFollowers: ' + str(req%5B'followers'%5D) + '%5Cn'%0A return self.msg%0A%0A def Arguments(self):%0A self.user = GitHub()%0A self.parser = ArgumentParser()%0A self.parser.add_argument('--repos', dest='repos', action='store_true',%0A help='List all repository.')%0A self.parser.add_argument('--user', dest='user', action='store',%0A required=True, help='Parameter for set user.')%0A self.parser.add_argument('--info', dest='info', action='store_true',%0A help='Parameter for to get info of user')%0A self.parser.add_argument('--all', dest='all', action='store_true',%0A help='Parameter for to define all options')%0A self.args = self.parser.parse_args()%0A if self.args.user and self.args.info:%0A print(self.user.GetInfo(self.args.user))%0A elif self.args.user and self.args.repos:%0A print(self.user.GetRepos(self.args.user))%0A elif self.args.user and self.args.all:%0A print(self.user.GetRepos(self.args.user))%0A print(self.user.GetInfo(self.args.user))%0A else:%0A print('Use --info, --repos or --all.')%0A
|
|
4f751298176bf2118d4a638e106d5e9572725178
|
Add utility class
|
konstrukteur/Util.py
|
konstrukteur/Util.py
|
Python
| 0.000001
|
@@ -0,0 +1,659 @@
+#%0A# Konstrukteur - Static website generator%0A# Copyright 2013 Sebastian Fastner%0A#%0A%0Aimport re%0Aimport unidecode%0A%0Adef fixCoreTemplating(content):%0A%09%22%22%22 This fixes differences between core JS templating and standard mustache templating %22%22%22%0A%0A%09# Replace %7B%7B=tagname%7D%7D with %7B%7B&tagname%7D%7D%0A%09content = re.sub(r%22%7B%7B=(?P%3Ctag%3E.+?)%7D%7D%22, %22%7B%7B&%5Cg%3Ctag%3E%7D%7D%22, content)%0A%0A%09# Replace %7B%7B?tagname%7D%7D with %7B%7B#tagname%7D%7D%0A%09content = re.sub(r%22%7B%7B%5C?(?P%3Ctag%3E.+?)%7D%7D%22, %22%7B%7B#%5Cg%3Ctag%3E%7D%7D%22, content)%0A%0A%09return content%0A%0A%0Adef fixSlug(slug):%0A%09%22%22%22 Replaces unicode character with something equal from ascii ( e.g. %C3%BC -%3E u ) %22%22%22%0A%09%0A%09pattern = r'%5B.%5Cs%5D+'%0A%09return re.sub(pattern, %22-%22, unidecode.unidecode(slug).lower())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.