commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
ab0868b777101a8442cec80009b84b40e79a3a08
|
add a sample script
|
decode.py
|
decode.py
|
Python
| 0.000001
|
@@ -0,0 +1,505 @@
+%22%22%22Command-line tool to decode audio files to raw PCM.%22%22%22%0Aimport audioread%0Aimport sys%0Aimport os%0A%0Adef decode(filename):%0A filename = os.path.abspath(os.path.expanduser(filename))%0A with audioread.audio_open(filename) as f:%0A print 'Input file: %25i channels at %25i Hz; %25.1f seconds.' %25 %5C%0A (f.channels, f.samplerate, f.duration)%0A with open(filename + '.pcm', 'wb') as of:%0A for buf in f:%0A of.write(buf)%0A%0Aif __name__ == '__main__':%0A decode(sys.argv%5B1%5D)%0A
|
|
51055b6fbd81ffc88215b88f9a63d702535bdd93
|
Add missing file
|
jobslave/job_data.py
|
jobslave/job_data.py
|
Python
| 0.000006
|
@@ -0,0 +1,1043 @@
+#%0A# Copyright (c) 2011 rPath, Inc.%0A#%0A%0A%0ABUILD_DEFAULTS = %7B%0A 'autoResolve': False,%0A 'maxIsoSize': '681574400',%0A 'bugsUrl': 'http://issues.rpath.com/',%0A 'natNetworking': False,%0A 'vhdDiskType': 'dynamic',%0A 'anacondaCustomTrove': '',%0A 'stringArg': '',%0A 'mediaTemplateTrove': '',%0A 'baseFileName': '',%0A 'vmSnapshots': False,%0A 'swapSize': 128,%0A 'betaNag': False,%0A 'anacondaTemplatesTrove': '',%0A 'enumArg': '2',%0A 'vmMemory': 256,%0A 'installLabelPath': '',%0A 'intArg': 0,%0A 'freespace': 250,%0A 'boolArg': False,%0A 'mirrorUrl': '',%0A 'zisofs': True,%0A 'diskAdapter': 'lsilogic',%0A 'unionfs': False,%0A 'showMediaCheck': False,%0A 'amiHugeDiskMountpoint': '',%0A 'platformName': '',%0A %7D%0A%0A%0Aclass JobData(dict):%0A%0A def getBuildData(self, key):%0A value = self.get('data', %7B%7D).get(key)%0A if value is None:%0A value = BUILD_DEFAULTS.get(key)%0A return value%0A
|
|
41cc4d54d5eddcf30e2c9a98179ff1e745a12f90
|
Add missing migration
|
core/migrations/0009_wagtail112upgrade.py
|
core/migrations/0009_wagtail112upgrade.py
|
Python
| 0.000001
|
@@ -0,0 +1,833 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.7 on 2017-11-21 23:14%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('core', '0008_wagtailcompanypage_sites_ordering'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='submitformfield',%0A name='field_type',%0A field=models.CharField(choices=%5B('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('multiselect', 'Multiple select'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')%5D, max_length=16, verbose_name='field type'),%0A ),%0A %5D%0A
|
|
896132d9068ed736587047e7a4501f49d7a1bffd
|
Update test helpers
|
test/selenium/src/lib/test_helpers.py
|
test/selenium/src/lib/test_helpers.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""
Utility classes for page objects used in tests.
Details:
Most of the tests require a sequence of primitive methods of the page
object. If the sequence repeats itself among tests, it should be shared in
this module.
"""
import uuid
import re
from lib import base
from lib.constants.test import modal_create_new
from lib.constants.test import modal_custom_attribute
class HtmlParser(base.Test):
"""The HtmlParser class simulates what happens with (non-rich)text in HTML.
"""
@staticmethod
def parse_text(text):
"""Simulates text parsed by html
Args:
text (str)
"""
return re.sub(r'\s+', " ", text)
class ModalNewProgramPage(base.Test):
"""Methods for simulating common user actions"""
@staticmethod
def enter_test_data(modal):
"""Fills out all fields in the lhn_modal
Args:
modal (lib.page.lhn_modal.new_program.EditProgramModal)
"""
unique_id = str(uuid.uuid4())
modal.enter_title(modal_create_new.Program.TITLE + unique_id)
modal.enter_description(
modal_create_new.Program.DESCRIPTION_SHORT + unique_id)
modal.enter_notes(
modal_create_new.Program.NOTES_SHORT + unique_id)
modal.enter_code(modal_create_new.Program.CODE + unique_id)
modal.filter_and_select_primary_contact("example")
modal.filter_and_select_secondary_contact("example")
modal.enter_program_url(
unique_id + modal_create_new.Program.PROGRAM_URL)
modal.enter_reference_url(
unique_id + modal_create_new.Program.REFERENCE_URL)
@staticmethod
def set_start_end_dates(modal, day_start, day_end):
"""
Sets the dates from the datepicker in the new program/edit modal.
Args:
modal (lib.page.lhn_modal.new_program.EditProgramModal)
day_start (int): for more info see
base.DatePicker.select_day_in_current_month
day_end (int): for more info see
base.DatePicker.select_day_in_current_month
"""
modal.enter_effective_date_start_month(day_start)
modal.enter_stop_date_end_month(day_end)
class ModalNewProgramCustomAttribute(base.Test):
@staticmethod
def enter_test_data(modal):
"""Fills out all fields in the lhn_modal
Args:
modal (lib.page.widget.custom_attribute.NewCustomAttributeModal)
"""
modal.enter_title(modal_custom_attribute.Program.TITLE)
modal.enter_inline_help(modal_custom_attribute.Program.INLINE_HELP)
modal.enter_placeholder(modal_custom_attribute.Program.PLACEHOLDER)
|
Python
| 0.000001
|
@@ -799,16 +799,17 @@
by html%0A
+%0A
Args
@@ -825,16 +825,27 @@
ext (str
+ or unicode
)%0A %22%22
@@ -899,23 +899,23 @@
ModalNew
-Program
+Control
Page(bas
@@ -1119,57 +1119,93 @@
new_
-program.EditProgramModal)%0A %22%22%22%0A unique_id =
+control.NewControlModal)%0A %22%22%22%0A modal.enter_title(modal_create_new.SHORT_TITLE +
str
@@ -1218,17 +1218,17 @@
uuid4())
-%0A
+)
%0A mod
@@ -1228,37 +1228,43 @@
modal.enter_
-title
+description
(modal_create_ne
@@ -1269,126 +1269,683 @@
new.
-Program.TITLE + unique_id)%0A modal.enter_description(%0A modal_create_new.Program.DESCRIPTION_SHORT + unique_id
+SHORT_TITLE + str(uuid.uuid4()))%0A modal.enter_test_plan(modal_create_new.SHORT_TITLE + str(uuid.uuid4()))%0A modal.enter_notes(modal_create_new.SHORT_TITLE + str(uuid.uuid4()))%0A modal.enter_code(modal_create_new.SHORT_TITLE + str(uuid.uuid4()))%0A%0A%0Aclass ModalNewProgramPage(base.Test):%0A %22%22%22Methods for simulating common user actions%22%22%22%0A%0A @staticmethod%0A def enter_test_data(modal):%0A %22%22%22Fills out all fields in the lhn_modal%0A%0A Args:%0A modal (lib.page.modal.edit_object.EditProgramModalBase)%0A %22%22%22%0A modal.enter_title(modal_create_new.SHORT_TITLE + str(uuid.uuid4()))%0A modal.enter_description(%0A modal_create_new.SHORT_TITLE + str(uuid.uuid4())
)%0A
@@ -1994,39 +1994,39 @@
new.
-Program.NOTES_SHORT + unique_id
+SHORT_TITLE + str(uuid.uuid4())
)%0A
@@ -2065,32 +2065,39 @@
new.
-Program.CODE + unique_id
+SHORT_TITLE + str(uuid.uuid4())
)%0A
@@ -2239,33 +2239,41 @@
rl(%0A
-unique_id
+str(uuid.uuid4())
+ modal_cre
@@ -2344,17 +2344,25 @@
-unique_id
+str(uuid.uuid4())
+ m
@@ -2586,29 +2586,25 @@
age.
-lhn_
modal.
-new_program
+edit_object
.Edi
@@ -2612,24 +2612,28 @@
ProgramModal
+Base
)%0A day_
|
9bc05db6bc3b6b2570c8e37d528639c310a6f162
|
Add login required decorator
|
pybossa_analyst/login.py
|
pybossa_analyst/login.py
|
Python
| 0.000001
|
@@ -0,0 +1,365 @@
+# -*- coding: utf8 -*-%0A%0Afrom functools import wraps%0Afrom flask import session, request, redirect, url_for%0A%0A%0Adef login_required(f):%0A @wraps(f)%0A def decorated_function(*args, **kwargs):%0A if session.get('api_key') is None:%0A return redirect(url_for('home.login', next=request.url))%0A return f(*args, **kwargs)%0A return decorated_function
|
|
24bdb8217a921f3f1833a8e9c1bcd7cc1615ea9b
|
add mgmt command to migrate testruns between projects
|
squad/core/management/commands/migrate_test_runs.py
|
squad/core/management/commands/migrate_test_runs.py
|
Python
| 0.005135
|
@@ -0,0 +1,3495 @@
+import sys%0Afrom django.core.exceptions import ObjectDoesNotExist%0Afrom django.core.management.base import BaseCommand%0A%0Afrom squad.core.models import Project, Build, Environment%0Afrom squad.core.tasks import UpdateProjectStatus%0A%0A%0Aclass Command(BaseCommand):%0A%0A help = %22%22%22Move test runs identified by environment slug%0A from one project to another. This action preserves%0A datetime of the objects and statuses.%22%22%22%0A%0A def add_arguments(self, parser):%0A parser.add_argument(%0A '--old-project-slug',%0A dest=%22old_project_slug%22,%0A help=%22Slug of the project from which to migrate test runs%22%0A )%0A parser.add_argument(%0A '--new-project-slug',%0A dest=%22new_project_slug%22,%0A help=%22Slug of the project to which to migrate test runs%22%0A )%0A parser.add_argument(%0A '--env-slug',%0A dest=%22env_slug%22,%0A help=%22Slug of the environment to migrate to new project%22%0A )%0A%0A def handle(self, *args, **options):%0A self.options = options%0A%0A if not self.options%5B'old_project_slug'%5D:%0A print(%22ERROR: old_project_slug missing%22)%0A sys.exit(1)%0A%0A if not self.options%5B'new_project_slug'%5D:%0A print(%22ERROR: new_project_slug missing%22)%0A sys.exit(1)%0A%0A if not self.options%5B'env_slug'%5D:%0A print(%22ERROR: env_slug missing%22)%0A sys.exit(1)%0A%0A old_project = None%0A new_project = None%0A env = None%0A try:%0A old_project = Project.objects.get(slug=self.options%5B'old_project_slug'%5D)%0A except ObjectDoesNotExist:%0A print(%22Project: %25s not found. Exiting%22 %25 self.options%5B'old_project_slug'%5D)%0A sys.exit(0)%0A try:%0A new_project = Project.objects.get(slug=self.options%5B'new_project_slug'%5D)%0A except ObjectDoesNotExist:%0A print(%22Project: %25s not found. Exiting%22 %25 self.options%5B'new_project_slug'%5D)%0A sys.exit(0)%0A try:%0A env = Environment.objects.get(project=old_project, slug=self.options%5B'env_slug'%5D)%0A except ObjectDoesNotExist:%0A print(%22Environment: %25s not found. Exiting%22 %25 self.options%5B'env_slug'%5D)%0A sys.exit(0)%0A print(%22Migrating testruns from project %25s to %25s%22 %25 (old_project.slug, new_project.slug))%0A print(%22All test runs with environment name: %25s will be migrated%22 %25 env.slug)%0A%0A for build in old_project.builds.all():%0A if build.test_runs.filter(environment=env):%0A print(%22moving build: %25s%22 %25 build)%0A new_build, _ = Build.objects.get_or_create(%0A version=build.version,%0A project=new_project,%0A created_at=build.created_at)%0A for testrun in build.test_runs.filter(environment=env):%0A testrun.build = new_build%0A testrun.save()%0A testrun.environment.project = new_project%0A testrun.environment.save()%0A for testjob in testrun.test_jobs.all():%0A testjob.target = new_project%0A testjob.save()%0A UpdateProjectStatus()(testrun)%0A new_build.status.created_at = build.status.created_at%0A new_build.status.last_updated = build.status.last_updated%0A new_build.status.save()%0A else:%0A print(%22No matching test runs found in build: %25s%22 %25 build)%0A
|
|
0b09eeede5f8a1533c166fe97844834ceb2ee178
|
remove some hard-coded paths
|
autoload/nimrod_vim.py
|
autoload/nimrod_vim.py
|
import threading, Queue, subprocess, signal, os
try:
import vim
except ImportError:
class Vim:
def command(self, x):
print("Executing vim command: " + x)
vim = Vim()
def disable_sigint():
# Ignore the SIGINT signal by setting the handler to the standard
# signal handler SIG_IGN.
signal.signal(signal.SIGINT, signal.SIG_IGN)
class NimrodThread(threading.Thread):
def __init__(self):
super(NimrodThread, self).__init__()
self.tasks = Queue.Queue()
self.responses = Queue.Queue()
self.nim = subprocess.Popen(
["nimrod", "serve", "--server.type:stdin", "nimrod.nim"],
cwd = "/Users/zahary/Projects/nim/compiler",
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
universal_newlines = True,
preexec_fn = disable_sigint,
bufsize = 1)
def postNimCmd(self, msg, async = True):
self.tasks.put((msg, async))
if not async:
return self.responses.get()
def run(self):
while True:
(msg, async) = self.tasks.get()
if msg == "quit":
self.nim.terminate()
break
self.nim.stdin.write(msg + "\n")
result = ""
while True:
line = self.nim.stdout.readline()
result += line
if line == "\n":
if not async:
self.responses.put(result)
else:
self.asyncOpComplete(msg, result)
break
def vimEscapeExpr(expr):
return expr.replace("\\", "\\\\").replace('"', "\\\"").replace("\n", "\\n")
class NimrodVimThread(NimrodThread):
def asyncOpComplete(self, msg, result):
cmd = "/usr/local/bin/mvim --remote-expr 'NimrodAsyncCmdComplete(1, \"" + vimEscapeExpr(result) + "\")'"
os.system (cmd)
projects = {}
log = open("/tmp/nim-log.txt", "w")
def execNimCmd(project, cmd, async = True):
target = None
if projects.has_key(project):
target = projects[project]
else:
target = NimrodVimThread()
projects[project] = target
target.start()
result = target.postNimCmd(cmd, async)
if result != None:
log.write(result)
log.flush()
if not async:
vim.command('let l:py_res = "' + vimEscapeExpr(result) + '"')
|
Python
| 0.999058
|
@@ -399,24 +399,38 @@
_init__(self
+, project_path
):%0A super
@@ -614,20 +614,20 @@
n%22,
-%22nimrod.nim%22
+project_path
%5D,%0A
@@ -642,45 +642,37 @@
d =
-%22/Users/zahary/Projects/nim/compiler%22
+os.path.dirname(project_path)
,%0A
@@ -1978,16 +1978,23 @@
mThread(
+project
)%0A pr
|
0ba2b5f63aeb39a1d1faf6d0f9eb2626bcc86d3e
|
Create urls.py
|
urls.py
|
urls.py
|
Python
| 0.000017
|
@@ -0,0 +1,331 @@
+from django.conf.urls import url%0Afrom django.contrib import admin%0Afrom django.conf.urls.static import static%0Afrom django.conf import settings%0Afrom likes import *%0A%0Aurlpatterns = %5B%0A url(r'%5Eadmin/', admin.site.urls),%0A url(r'%5E$', index),%0A url(r'%5Elike', uri_manager, name='likes'),%0A url(r'%5Eauth', authorize, name='auth'),%0A%5D%0A
|
|
5f7dfe4b83f318cc712bc1a7ed955d6c6374f7f5
|
Create es4_windows.py
|
04-work-with-multiple-windows/python/es4_windows.py
|
04-work-with-multiple-windows/python/es4_windows.py
|
Python
| 0.000001
|
@@ -0,0 +1,1360 @@
+# http://elementalselenium.com/tips/4-work-with-multiple-windows%0Aimport os%0Aimport unittest%0Afrom selenium import webdriver%0Afrom selenium.webdriver.common.keys import Keys # for send_keys%0A%0Aclass ES4_Windows(unittest.TestCase):%0A %0A def setUp(self):%0A self.driver = webdriver.Firefox()%0A%0A def test_example_1(self):%0A self.driver.get('http://the-internet.herokuapp.com/windows')%0A self.driver.find_element_by_css_selector('.example a').click()%0A self.driver.switch_to_window(self.driver.window_handles%5B0%5D)%0A assert(self.driver.title != %22New Window%22)%0A self.driver.switch_to_window(self.driver.window_handles%5B-1%5D)%0A assert(self.driver.title == %22New Window%22)%0A %0A def test_example_2(self):%0A self.driver.get('http://the-internet.herokuapp.com/windows')%0A %0A first_window = self.driver.window_handles%5B0%5D%0A self.driver.find_element_by_css_selector('.example a').click()%0A all_windows = self.driver.window_handles%0A for window in all_windows:%0A if window != first_window:%0A new_window = window%0A self.driver.switch_to_window(first_window)%0A assert(self.driver.title != %22New Window%22)%0A self.driver.switch_to_window(new_window)%0A assert(self.driver.title == %22New Window%22)%0A %0A def tearDown(self):%0A self.driver.quit()%0A
|
|
8c0612232a79c55b3c58a6230f590e5d6d4e36f9
|
Implement taking info parser
|
backend/core/parser.py
|
backend/core/parser.py
|
Python
| 0.000006
|
@@ -0,0 +1,1227 @@
+def parse_credit(text):%0A semester_name = %7B%0A 'U000200001U000300001': '1',%0A 'U000200001U000300002': 'S',%0A 'U000200002U000300001': '2',%0A 'U000200002U000300002': 'W',%0A %7D%0A lines = text.split('%5Cn')%5B:-1%5D%0A columns = %5B%5D%0A infos = %5B%5D%0A for line in lines:%0A line = line.split('%5Ct')%0A if len(columns) != len(line):%0A columns = line%0A continue%0A dic = to_dict(columns, line)%0A%0A # 26 : course taking info%0A # 13 : semester info%0A # 9 : total info%0A if dic.haskey('SBJT_CD'):%0A taking_info = %7B%0A 'year': int(dic%5B'SCHYY'%5D),%0A 'semester': semester_name%5Bdic%5B'SHTM_FG'%5D + dic%5B'DETA_SHTM_FG'%5D%5D,%0A 'code': dic%5B'SBJT_CD'%5D,%0A 'number': dic%5B'LT_NO'%5D,%0A 'title': dic%5B'SBJT_NM'%5D,%0A 'credit': int(dic%5B'ACQ_PNT'%5D),%0A 'grade': dic%5B'MRKS_GRD_CD'%5D,%0A 'category': dic%5B'CPTN_SUBMATT_FG_CD_NM'%5D%0A %7D%0A infos.append(taking_info)%0A return infos%0A%0A%0Adef to_dict(columns, row):%0A assert len(columns) == len(row)%0A dic = %7B%7D%0A for column, content in zip(columns, row):%0A dic%5Bcolumn%5D = content%0A return dic%0A
|
|
8e16ac329d459d11b971216b0eab032e81b5557e
|
Add flowline benchmarks (#489)
|
benchmarks/flowline.py
|
benchmarks/flowline.py
|
Python
| 0
|
@@ -0,0 +1,2216 @@
+# Write the benchmarking functions here.%0A# See %22Writing benchmarks%22 in the asv docs for more information.%0Aimport os%0Aimport shutil%0Aimport numpy as np%0Aimport oggm%0Afrom oggm.tests.funcs import init_hef, get_test_dir%0Afrom oggm import utils, tasks%0Afrom oggm.core import massbalance, flowline%0A%0A%0Atestdir = os.path.join(get_test_dir(), 'benchmarks')%0Autils.mkdir(testdir, reset=True)%0Aheights = np.linspace(2200, 3600, 120)%0Ayears = np.arange(151) + 1850%0A%0A%0Adef teardown():%0A if os.path.exists(testdir):%0A shutil.rmtree(testdir)%0A%0A%0Adef setup():%0A global gdir%0A gdir = init_hef(border=80)%0A teardown()%0A gdir = tasks.copy_to_basedir(gdir, base_dir=testdir, setup='all')%0A flowline.init_present_time_glacier(gdir)%0A%0A%0Adef time_hef_run_until():%0A%0A mb_mod = massbalance.RandomMassBalance(gdir, bias=0, seed=0)%0A fls =gdir.read_pickle('model_flowlines')%0A model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0.)%0A model.run_until(200)%0A%0A%0Adef time_hef_run_until_in_steps():%0A%0A mb_mod = massbalance.RandomMassBalance(gdir, bias=0, seed=0)%0A fls =gdir.read_pickle('model_flowlines')%0A model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0.)%0A for yr in np.linspace(0, 200, 400):%0A model.run_until(yr)%0A%0A%0Adef time_hef_run_until_and_store():%0A%0A mb_mod = massbalance.RandomMassBalance(gdir, bias=0, seed=0)%0A fls =gdir.read_pickle('model_flowlines')%0A model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0.)%0A model.run_until_and_store(200)%0A%0A%0Adef time_hef_run_until_and_store_with_nc():%0A%0A mb_mod = massbalance.RandomMassBalance(gdir, bias=0, seed=0)%0A fls =gdir.read_pickle('model_flowlines')%0A model = flowline.FluxBasedModel(fls, mb_model=mb_mod, y0=0.)%0A model.run_until_and_store(200, run_path=os.path.join(testdir, 'run.nc'),%0A diag_path=os.path.join(testdir, 'diag.nc'))%0A%0A%0Atime_hef_run_until.setup = setup%0Atime_hef_run_until.teardown = teardown%0A%0Atime_hef_run_until_in_steps.setup = setup%0Atime_hef_run_until_in_steps.teardown = teardown%0A%0Atime_hef_run_until_and_store.setup = setup%0Atime_hef_run_until_and_store.teardown = teardown%0A%0Atime_hef_run_until_and_store_with_nc.setup = setup%0Atime_hef_run_until_and_store_with_nc.teardown = teardown%0A
|
|
59c173bf4179fc77eacf97f3f08359498d35a635
|
Add switch class for good utility
|
util.py
|
util.py
|
Python
| 0
|
@@ -0,0 +1,653 @@
+# coding=utf-8%0A%22%22%22%0AThe switch class refers to PEP 275 (Marc-Andr%C3%A9 Lemburg %3Cmal at lemburg.com%3E)%0A(https://www.python.org/dev/peps/pep-0275/)%0A%0A%22%22%22%0Aclass switch(object):%0A def __init__(self,value):%0A self.value = value%0A self.fall = False%0A%0A def __iter__(self):%0A %22%22%22Return the match method once, then Stop%22%22%22%0A yield self.match%0A raise StopIteration%0A%0A def match(self,*args):%0A %22%22%22Indicate whether or not to enter a case suite%22%22%22%0A if self.fall or not args:%0A return True%0A elif self.value in args:%0A self.fall = True%0A return True%0A else:%0A return False%0A
|
|
ef5260a53773bdc024702ca804b128ba9b12698b
|
566. Reshape the Matrix. C matrix
|
p566_c_matrix.py
|
p566_c_matrix.py
|
Python
| 0.999999
|
@@ -0,0 +1,1049 @@
+import unittest%0A%0A%0Aclass Solution(object):%0A def matrixReshape(self, nums, r, c):%0A %22%22%22%0A :type nums: List%5BList%5Bint%5D%5D%0A :type r: int%0A :type c: int%0A :rtype: List%5BList%5Bint%5D%5D%0A %22%22%22%0A if not nums or len(nums) * len(nums%5B0%5D) != r * c:%0A return nums%0A%0A result = %5B%5D%0A for row in nums:%0A result += row%0A%0A return %5Bresult%5Bi:i + c%5D for i in xrange(0, len(result), c)%5D%0A%0A%0Aclass Test(unittest.TestCase):%0A def test(self):%0A self._test(%0A %5B%0A %5B1, 2%5D,%0A %5B3, 4%5D,%0A %5D,%0A 1, 4,%0A %5B%0A %5B1, 2, 3, 4%5D,%0A %5D)%0A self._test(%0A %5B%0A %5B1, 2%5D,%0A %5B3, 4%5D,%0A %5D,%0A 2, 4,%0A %5B%0A %5B1, 2%5D,%0A %5B3, 4%5D,%0A %5D)%0A%0A def _test(self, nums, r, c, expected):%0A actual = Solution().matrixReshape(nums, r, c)%0A self.assertEqual(actual, expected)%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
3cb47114dc40ebbbcc946c79946ec59813a7af7f
|
add email module
|
emails.py
|
emails.py
|
Python
| 0.000006
|
@@ -0,0 +1,885 @@
+import smtplib%0A%0Afrom config import MAIL_SERVER%0Afrom config import MAIL_PORT%0Afrom config import MAIL_USERNAME%0Afrom config import MAIL_PASSWORD%0Afrom config import DEFAULT_MAIL_SENDER%0A%0A%0Adef send_email(recipient, subject, body, sender=None):%0A%0A if sender is None:%0A FROM = DEFAULT_MAIL_SENDER%0A else:%0A FROM = sender%0A%0A TO = recipient if type(recipient) is list else %5Brecipient%5D%0A SUBJECT = subject%0A TEXT = body%0A%0A # Prepare actual message%0A message = %22%22%22%5CFrom: %25s%5CnTo: %25s%5CnSubject: %25s%5Cn%5Cn%25s%0A %22%22%22 %25 (FROM, %22, %22.join(TO), SUBJECT, TEXT)%0A try:%0A server = smtplib.SMTP(MAIL_SERVER, MAIL_PORT)%0A server.ehlo()%0A server.starttls()%0A server.login(MAIL_USERNAME, MAIL_PASSWORD)%0A server.sendmail(FROM, TO, message)%0A server.close()%0A print ('successfully sent the mail')%0A except:%0A print ('failed to send mail')%0A
|
|
c08ebde007b0c57780d8d63c1f925c1d6a8bfc7c
|
Append 'grpc' to the C++ service namespaces for google APIs
|
third_party/googleapis/build_rules.bzl
|
third_party/googleapis/build_rules.bzl
|
# Copyright 2020 The TensorFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for building grpc and proto libraries from googleapis.
"""
load("@rules_cc//cc:defs.bzl", native_cc_proto_library = "cc_proto_library")
load("@com_github_grpc_grpc//bazel:generate_cc.bzl", "generate_cc")
def _tf_cc_headers(ctx):
if len(ctx.attr.deps) != 1:
fail("deps must have exactly 1 photo_library")
return [
CcInfo(
compilation_context = ctx.attr.deps[0][CcInfo].compilation_context,
),
DefaultInfo(
files = ctx.attr.deps[0][CcInfo].compilation_context.headers,
),
]
tf_cc_headers = rule(
implementation = _tf_cc_headers,
attrs = {
"deps": attr.label_list(providers = [CcInfo]),
},
)
def cc_proto_library(name, deps):
"""Generates a cc library and a header only cc library from a proto library
Args:
name: the name of the cc_library
deps: a list that contains exactly one proto_library
"""
native_cc_proto_library(
name = name,
deps = deps,
visibility = ["//visibility:public"],
)
tf_cc_headers(
name = name + "_headers_only",
deps = [":" + name],
visibility = ["//visibility:public"],
)
def cc_grpc_library(name, srcs, deps, **kwargs):
"""Generates a cc library with grpc implementation and cc proto headers
Args:
name: the name of the cc_grpc_library to be created
srcs: the proto_libraries used to generate the cc_grpc_library
deps: the dependencies used to link into this cc_grpc_library, defined by
cc_proto_library
**kwargs: other args not used, for compatibility only
"""
if len(srcs) != 1:
fail("srcs must have exactly 1 photo_library", "srcs")
codegen_grpc_target = "_" + name + "_grpc_codegen"
generate_cc(
name = codegen_grpc_target,
srcs = srcs,
plugin = "@com_github_grpc_grpc//src/compiler:grpc_cpp_plugin",
well_known_protos = True,
generate_mocks = True,
)
grpc_proto_dep = "@com_github_grpc_grpc//:grpc++_codegen_proto"
native.cc_library(
name = name,
srcs = [":" + codegen_grpc_target],
hdrs = [":" + codegen_grpc_target],
deps = [dep + "_headers_only" for dep in deps] + [grpc_proto_dep],
visibility = ["//visibility:public"],
)
|
Python
| 0.000021
|
@@ -1835,16 +1835,44 @@
s, deps,
+ service_namespace = %22grpc%22,
**kwarg
@@ -2477,16 +2477,100 @@
= srcs,%0A
+ flags = %5B%0A %22services_namespace=%22 + service_namespace,%0A %5D,%0A
|
f301aa5ecdc3f9d821a72bb6a03d9857c43e72fc
|
add missing file
|
app/fields/auth.py
|
app/fields/auth.py
|
Python
| 0.000003
|
@@ -0,0 +1,308 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0Afrom flask_restplus import fields%0Afrom app import api%0A%0Aauth_fields_get = api.model('AuthGet', %7B 'access_token': fields.String %7D)%0Aauth_fields_post = api.model('AuthPost', %7B%0A%09'username': fields.String(required=True),%0A%09'password': fields.String(required=True)%0A%09%7D%0A)%0A
|
|
2785b8d5b1338126b6887b23fefec106ed877601
|
Add first 16 hexagrams
|
hexagrams.py
|
hexagrams.py
|
Python
| 0.999999
|
@@ -0,0 +1,566 @@
+# -*- coding: utf-8 -*-%0A%0Ahexagrams = %7B%0A %22Ch'ien%22: (1, 1, 1, 1, 1, 1),%0A %22K'un%22: (0, 0, 0, 0, 0, 0),%0A %22Chun%22: (0, 1, 0, 0, 0, 1),%0A %22M%C3%AAng%22: (1, 0, 0, 0, 1, 0),%0A %22Hs%C3%BC%22: (0, 1, 0, 1, 1, 1 ),%0A %22Sung%22: (1, 1, 1, 0, 1, 0),%0A %22Shih%22: (0, 0, 0, 0, 1, 0),%0A %22Pi%22: (0, 1, 0, 0, 0, 0),%0A %22Hsiao Ch'u%22: (1, 1, 0, 1, 1, 1),%0A %22L%C3%BC%22: (1, 1, 1, 0, 1, 1),%0A %22T'ai%22: (0, 0, 0, 1, 1, 1),%0A %22P'i%22: (1, 1, 1, 0, 0, 0),%0A %22T'ung J%C3%AAn %22: (1, 1, 1, 1, 0, 1),%0A %22Ta Yu%22: (1, 0, 1, 1, 1, 1),%0A %22Ch'ien%22: (0, 0, 0, 1, 0, 0),%0A %22Y%C3%BC%22: (0, 0, 1, 0, 0, 0) %0A%7D%0A
|
|
11cc94f4a6545b49317042df4edf1804ccc3bdd8
|
add a python script to export the links
|
export.py
|
export.py
|
Python
| 0
|
@@ -0,0 +1,1457 @@
+#!/usr/bin/env python%0A%22%22%22%0AA quick script to turn the database into KML showing pairs of point-to-point links%0A%22%22%22%0Aimport sys%0Aimport sqlite3%0Afrom xml.sax.saxutils import escape%0A%0Aconn = sqlite3.connect(%22prism.sqlite3%22)%0Ac = conn.cursor()%0Asql = open(%22getpairs.sql%22).read()%0Ac.execute(sql)%0A%0Aprint %22%22%22%3C?xml version=%221.0%22 encoding=%22UTF-8%22?%3E%0A%3Ckml xmlns=%22http://www.opengis.net/kml/2.2%22%3E%0A %3CDocument%3E%0A %3Cname%3EPaths%3C/name%3E%0A %3Cdescription%3EExamples of paths. Note that the tessellate tag is by default%0A set to 0. If you want to create tessellated lines, they must be authored%0A (or edited) directly in KML.%3C/description%3E%0A %3CStyle id=%22yellowLineGreenPoly%22%3E%0A %3CLineStyle%3E%0A %3Ccolor%3E7f00ffff%3C/color%3E%0A %3Cwidth%3E4%3C/width%3E%0A %3C/LineStyle%3E%0A %3CPolyStyle%3E%0A %3Ccolor%3E7f00ff00%3C/color%3E%0A %3C/PolyStyle%3E%0A %3C/Style%3E%22%22%22%0A%0Afor licenceid, rxlng, rxlat, rxalt, txlng, txlat, txalt, rxname, txname in c:%0A print %22%22%22%0A %3CPlacemark%3E%0A %3Cname%3ELicence ID %25s%3C/name%3E%0A %3Cdescription%3Erx: %25s, tx: %25s%3C/description%3E%0A %3CstyleUrl%3E#yellowLineGreenPoly%3C/styleUrl%3E%0A %3CLineString%3E%0A %3Cextrude%3E1%3C/extrude%3E%0A %3CaltitudeMode%3Eabsolute%3C/altitudeMode%3E%0A %3Ccoordinates%3E%0A %25s,%25s,%25s%0A %25s,%25s,%25s%0A %3C/coordinates%3E%0A %3C/LineString%3E%0A %3C/Placemark%3E%22%22%22 %25 (licenceid, escape(rxname).replace(%22%5Cx12%22,%22%22), escape(txname).replace(%22%5Cx12%22,%22%22), rxlng, rxlat, rxalt, txlng, txlat, txalt)%0A%0Aprint %22%22%22%0A %3C/Document%3E%0A%3C/kml%3E%0A%22%22%22%0A
|
|
6e80dd7537ec545175f9ab16c12c4591c5ac0531
|
add jurisdiction_id
|
pupa/scrape/popolo.py
|
pupa/scrape/popolo.py
|
import copy
from .base import (BaseModel, SourceMixin, LinkMixin, ContactDetailMixin, OtherNameMixin,
IdentifierMixin)
from .schemas.post import schema as post_schema
from .schemas.person import schema as person_schema
from .schemas.membership import schema as membership_schema
from .schemas.organization import schema as org_schema
# a copy of the org schema without sources
org_schema_no_sources = copy.deepcopy(org_schema)
org_schema_no_sources['properties'].pop('sources')
class Post(BaseModel, LinkMixin, ContactDetailMixin):
"""
A popolo-style Post
"""
_type = 'post'
_schema = post_schema
def __init__(self, label, role, organization_id, start_date='', end_date=''):
super(Post, self).__init__()
self.label = label
self.role = role
self.organization_id = organization_id
self.start_date = start_date
self.end_date = end_date
def __str__(self):
return self.label
__unicode__ = __str__
class Membership(BaseModel, ContactDetailMixin, LinkMixin):
"""
A popolo-style Membership.
"""
_type = 'membership'
_schema = membership_schema
def __init__(self, person_id, organization_id, post_id=None,
role='', label='', start_date='', end_date='', on_behalf_of_id=None):
"""
Constructor for the Membership object.
We require a person ID and organization ID, as required by the
popolo spec. Additional arguments may be given, which match those
defined by popolo.
"""
super(Membership, self).__init__()
self.person_id = person_id
self.organization_id = organization_id
self.post_id = post_id
self.start_date = start_date
self.end_date = end_date
self.role = role
self.label = label
self.on_behalf_of_id = on_behalf_of_id
self._unmatched_legislator = None
def __str__(self):
if self.person_id:
return self.person_id + ' membership in ' + self.organization_id
else:
return (self._unmatched_legislator['name'] + ' membership in ' + self.organization_id)
__unicode__ = __str__
class Person(BaseModel, SourceMixin, ContactDetailMixin, LinkMixin, IdentifierMixin,
OtherNameMixin):
"""
Details for a Person in Popolo format.
"""
_type = 'person'
_schema = person_schema
def __init__(self, name, birth_date='', death_date='', biography='', summary='', image='',
gender='', national_identity=''):
super(Person, self).__init__()
self.name = name
self.birth_date = birth_date
self.death_date = death_date
self.biography = biography
self.summary = summary
self.image = image
self.gender = gender
self.national_identity = national_identity
def add_membership(self, organization, role='member', **kwargs):
"""
add a membership in an organization and return the membership
object in case there are more details to add
"""
membership = Membership(self._id, organization._id, role=role, **kwargs)
self._related.append(membership)
return membership
def __str__(self):
return self.name
__unicode__ = __str__
class Organization(BaseModel, SourceMixin, ContactDetailMixin, LinkMixin, IdentifierMixin,
OtherNameMixin):
"""
A single popolo-style Organization
"""
_type = 'organization'
_schema = org_schema
def __init__(self, name, classification=None, parent_id=None, founding_date='',
dissolution_date='', image=''):
"""
Constructor for the Organization object.
"""
super(Organization, self).__init__()
self.name = name
self.classification = classification
self.founding_date = founding_date
self.dissolution_date = dissolution_date
self.parent_id = parent_id
self.image = image
def __str__(self):
return self.name
__unicode__ = __str__
def validate(self):
schema = None
if self.classification in ['party']:
schema = org_schema_no_sources
return super(Organization, self).validate(schema=schema)
#@property
#def parent(self):
# return self.parent_id
#@parent.setter
#def parent(self, val):
# self.parent_id = val._id
def add_post(self, label, role, **kwargs):
post = Post(label=label, role=role, organization_id=self._id, **kwargs)
self._related.append(post)
|
Python
| 0.999611
|
@@ -3595,16 +3595,38 @@
f, name,
+ jurisdiction_id=None,
classif
@@ -3638,16 +3638,33 @@
on=None,
+%0A
parent_
@@ -3689,33 +3689,16 @@
date='',
-%0A
dissolu
@@ -3706,24 +3706,41 @@
ion_date='',
+%0A
image=''):%0A
@@ -4082,16 +4082,96 @@
image%0A%0A
+ if jurisdiction_id:%0A self.jurisdiction_id = jurisdiction_id%0A%0A
def
|
2d7d4b533999c029a33d7802d7ddfba631e07604
|
Add Django 1.4 style wsgi app settings file
|
wsgi.py
|
wsgi.py
|
Python
| 0
|
@@ -0,0 +1,1693 @@
+%22%22%22 WSGI application settings for FST instance %0A%0AThe default setup at Domstolsverket assumes every instance of FST is %0Arunning under it's own virtualenv. %0A%22%22%22%0Aimport os%0Aimport sys%0Aimport site%0A%0A#VIRTUALENV_PATH = '/path/of/your/virtualenv'%0APYTHON_SITE_PACKAGES = 'lib/python2.6/site-packages'%0A%0A# Specify the site-packages folder of your virtualenv%0A# ALLDIRS = %5B'/opt/rinfo/fst/instances/fffs/lib/python2.6/site-packages'%5D%0AALLDIRS = %5Bos.path.join(VIRTUALENV_PATH, PYTHON_SITE_PACKAGES)%5D%0A%0A# Redirect sys.stdout to sys.stderr for bad libraries like geopy that uses%0A# print statements for optional import exceptions.%0Asys.stdout = sys.stderr%0Aprev_sys_path = list(sys.path)%0A%0A# Add all third-party libraries from your virtualenv%0Afor directory in ALLDIRS:%0A site.addsitedir(directory)%0A%0A# Reorder sys.path so new directories come first.%0Anew_sys_path = %5B%5D%0Afor item in list(sys.path):%0A if item not in prev_sys_path:%0A new_sys_path.append(item)%0A sys.path.remove(item)%0Asys.path%5B:0%5D = new_sys_path %0A%0A# Activate the virtualenv%0Aactivate_this = os.path.join(VIRTUALENV_PATH, 'bin/activate_this.py')%0Aexecfile(activate_this, dict(__file__=activate_this))%0Afrom os.path import abspath, dirname, join%0A%0A# Some more path trickery...%0Asys.path.insert(0, abspath(join(dirname(__file__), %22../../%22)))%0A%0A# Now we can run our Django app under WSGI!%0Afrom django.conf import settings%0Aos.environ%5B%22DJANGO_SETTINGS_MODULE%22%5D = %22fst_web.settings%22%0A%0Apath = os.path.dirname(__file__)%0Asubpath = path + os.sep + %22fst_web%22%0Aif subpath not in sys.path:%0A sys.path.insert(0,subpath)%0A%0Aif path not in sys.path:%0A sys.path.insert(0,path)%0A%0Afrom django.core.wsgi import get_wsgi_application%0Aapplication = get_wsgi_application()
|
|
c131e6108a72c57af4d3bdbe67d182d6c0ddb1eb
|
Add migration to modify on_delete
|
geotrek/feedback/migrations/0008_auto_20200326_1252.py
|
geotrek/feedback/migrations/0008_auto_20200326_1252.py
|
Python
| 0.000001
|
@@ -0,0 +1,1154 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.27 on 2020-03-26 12:52%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('feedback', '0007_auto_20200324_1412'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='report',%0A name='activity',%0A field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportActivity', verbose_name='Activity'),%0A ),%0A migrations.AlterField(%0A model_name='report',%0A name='category',%0A field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportCategory', verbose_name='Category'),%0A ),%0A migrations.AlterField(%0A model_name='report',%0A name='problem_magnitude',%0A field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='feedback.ReportProblemMagnitude', verbose_name='Problem magnitude'),%0A ),%0A %5D%0A
|
|
b42c13de01a49e7fe3fb7caa22089ea1cd87f7bf
|
Add sanity tests for baremetal power state commands
|
ironicclient/tests/functional/osc/v1/test_baremetal_node_power_states.py
|
ironicclient/tests/functional/osc/v1/test_baremetal_node_power_states.py
|
Python
| 0.999226
|
@@ -0,0 +1,2393 @@
+# Copyright (c) 2016 Mirantis, Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom ironicclient.tests.functional.osc.v1 import base%0A%0A%0Aclass PowerStateTests(base.TestCase):%0A %22%22%22Functional tests for baremetal node power state commands.%22%22%22%0A%0A def setUp(self):%0A super(PowerStateTests, self).setUp()%0A self.node = self.node_create()%0A%0A def test_off_reboot_on(self):%0A %22%22%22Reboot node from Power OFF state.%0A%0A Test steps:%0A 1) Create baremetal node in setUp.%0A 2) Set node Power State OFF as precondition.%0A 3) Call reboot command for baremetal node.%0A 4) Check node Power State ON in node properties.%0A %22%22%22%0A self.openstack('baremetal node power off %7B0%7D'%0A .format(self.node%5B'uuid'%5D))%0A show_prop = self.node_show(self.node%5B'uuid'%5D, %5B'power_state'%5D)%0A self.assertEqual('power off', show_prop%5B'power_state'%5D)%0A%0A self.openstack('baremetal node reboot %7B0%7D'.format(self.node%5B'uuid'%5D))%0A show_prop = self.node_show(self.node%5B'uuid'%5D, %5B'power_state'%5D)%0A self.assertEqual('power on', show_prop%5B'power_state'%5D)%0A%0A def test_on_reboot_on(self):%0A %22%22%22Reboot node from Power ON state.%0A%0A Test steps:%0A 1) Create baremetal node in setUp.%0A 2) Set node Power State ON as precondition.%0A 3) Call reboot command for baremetal node.%0A 4) Check node Power State ON in node properties.%0A %22%22%22%0A self.openstack('baremetal node power on %7B0%7D'.format(self.node%5B'uuid'%5D))%0A show_prop = self.node_show(self.node%5B'uuid'%5D, %5B'power_state'%5D)%0A self.assertEqual('power on', show_prop%5B'power_state'%5D)%0A%0A self.openstack('baremetal node reboot %7B0%7D'.format(self.node%5B'uuid'%5D))%0A show_prop = self.node_show(self.node%5B'uuid'%5D, %5B'power_state'%5D)%0A self.assertEqual('power on', show_prop%5B'power_state'%5D)%0A
|
|
f0b5b6a84f9b217376b54361b2afaeb19af24380
|
Add containers
|
blox/containers.py
|
blox/containers.py
|
Python
| 0.000005
|
@@ -0,0 +1,1291 @@
+'''blox/containers.py%0A%0AContains containers that aid in group bloxs together in a logical way%0A%0ACopyright (C) 2015 Timothy Edmund Crosley%0A%0APermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated%0Adocumentation files (the %22Software%22), to deal in the Software without restriction, including without limitation%0Athe rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and%0Ato permit persons to whom the Software is furnished to do so, subject to the following conditions:%0A%0AThe above copyright notice and this permission notice shall be included in all copies or%0Asubstantial portions of the Software.%0A%0ATHE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED%0ATO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL%0ATHE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF%0ACONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR%0AOTHER DEALINGS IN THE SOFTWARE.%0A%0A'''%0A%0Afrom blox.base import Container%0Afrom blox.builder import Factory%0A%0Afactory = Factory(%22Containers%22)%0A%0Afactory.add()(Container)%0A
|
|
2c174eeafa48302951e743ed2b9bbe91a1992899
|
make sure that the file you are trying to import actually exists
|
tests/utils.py
|
tests/utils.py
|
Python
| 0
|
@@ -0,0 +1,1507 @@
+from __future__ import unicode_literals%0A%0ARAW_DOC = %7B%0A 'doc': str('%7B%7D'),%0A 'docID': 'someID',%0A 'timestamps': %7B%0A 'consumeFinished': '2012-11-30T17:05:48+00:00',%0A 'consumeStarted': '2012-11-30T17:05:48+00:00',%0A 'consumeTaskCreated': '2012-11-30T17:05:48+00:00'%0A %7D,%0A 'filetype': 'json',%0A 'source': 'tests'%0A%7D%0A%0ARECORD = %7B%0A 'title': 'Using Table Stable Carbon in Gold and STAR Isotopes',%0A 'contributors': %5B%0A %7B%0A 'prefix': 'The One And Only',%0A 'given': 'DEVON',%0A 'middle': 'Get The Tables',%0A 'family': 'DUDLEY',%0A 'suffix': 'Thirsty Boy',%0A 'email': 'dudley.boyz@email.uni.edu',%0A 'ORCID': 'BubbaRayDudley'%0A %7D%0A %5D,%0A 'id': %7B%0A 'url': 'http://www.plosone.org/article',%0A 'doi': '10.1371/doi.DOI!',%0A 'serviceID': 'AWESOME'%0A %7D,%0A 'properties': %7B%0A 'figures': %5B'http://www.plosone.org/article/image.png'%5D,%0A 'type': 'text',%0A 'yep': 'A property'%0A %7D,%0A 'description': 'This study seeks to understand how humans impact%5C%0A the dietary patterns of eight free-ranging vervet monkey%5C%0A (Chlorocebus pygerythrus) groups in South Africa using stable%5C%0A isotope analysis.',%0A 'tags': %5B%0A 'behavior',%0A 'genetics'%0A %5D,%0A 'source': 'example_pusher',%0A 'dateCreated': '2012-11-30T17:05:48+00:00',%0A 'dateUpdated': '2015-02-23T17:05:48+00:01',%0A '_id': 'yes! yes! yes!',%0A 'count': 0%0A%7D%0A
|
|
3b06894ba1aadb3460ed60e47e931065484976ee
|
Add a command to undo results imports for specified projects
|
akvo/rsr/management/commands/undo_results_import.py
|
akvo/rsr/management/commands/undo_results_import.py
|
Python
| 0.000121
|
@@ -0,0 +1,1018 @@
+# -*- coding: utf-8 -*-%0A%0A# Akvo Reporting is covered by the GNU Affero General Public License.%0A# See more details in the license.txt file located at the root folder of the Akvo RSR module.%0A# For additional details on the GNU license please see %3C http://www.gnu.org/licenses/agpl.html %3E.%0A%0A%22%22%22Undo results framework import for the specified projects%0A%0AUsage:%0A%0A python manage.py undo_results_import %3Cproject-id1%3E %5B%3Cproject-id2%3E ...%5D%0A%0A%22%22%22%0A%0Aimport sys%0A%0Afrom django.core.management.base import BaseCommand%0Afrom ...models import Result%0A%0A%0Aclass Command(BaseCommand):%0A help = u%22Undo results framework import for the specified projects%22%0A%0A def handle(self, *args, **options):%0A if not args:%0A print(__doc__)%0A sys.exit(1)%0A%0A project_ids = map(int, args)%0A%0A for id_ in project_ids:%0A results = Result.objects.filter(project__id=id_).exclude(parent_result=None)%0A print %22Deleting %7B%7D results for project %7B%7D%22.format(results.count(), id_)%0A results.delete()%0A
|
|
6c28bd060ca1d28eb581ba32dc7f5b17ce1e70ac
|
Remove redundant output
|
mllib/collaborative_filtering/als/movie_recommender.py
|
mllib/collaborative_filtering/als/movie_recommender.py
|
#!/usr/bin/env python
import sys
from math import sqrt
from operator import add
from os.path import isfile
from pyspark import SparkConf, SparkContext
from pyspark.mllib.recommendation import ALS
##########################################################################
# Imports
##########################################################################
def parse_rating(line, sep='::'):
"""
Parses a rating line
Returns: tuple of (timestamp, (user_id, movie_id, rating))
"""
fields = line.strip().split(sep)
user_id = int(fields[0]) # convert user_id to int
movie_id = int(fields[1]) # convert movie_id to int
rating = float(fields[2]) # convert rating to float
last_digit = long(fields[3]) % 10 # convert to last digit of timestamp
return last_digit, (user_id, movie_id, rating)
def parse_movie(line, sep='::'):
"""
Parses a movie line
Returns: tuple of (movie_id, title)
"""
fields = line.strip().split(sep)
movie_id = int(fields[0]) # convert movie_id to int
title = fields[1]
return movie_id, title
def load_ratings(file, sep='\t'):
"""
Load ratings from file
"""
if not isfile(file):
print "File %s does not exist." % file
sys.exit(1)
f = open(file, 'r')
# Filter based on movie ratings that have been seen (0 = not seen)
ratings = filter(lambda r: r[2] > 0, [parse_rating(line, '\t')[1] for line in f])
f.close()
if not ratings:
print "No ratings provided."
sys.exit(1)
else:
return ratings
def compute_rmse(model, data, n):
"""
Compute RMSE (Root Mean Squared Error), or square root of the average value
of (actual rating - predicted rating)^2
"""
predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))
predictions_ratings = predictions.map(lambda x: ((x[0], x[1]), x[2])) \
.join(data.map(lambda x: ((x[0], x[1]), x[2]))) \
.values()
return sqrt(predictions_ratings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))
##########################################################################
# Main
##########################################################################
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Incorrect number of arguments, correct usage: movie_recommender.py [ratingsfile]"
sys.exit(-1)
# Configure Spark
conf = SparkConf().setMaster("local") \
.setAppName("Movie Recommender") \
.set("spark.executor.memory", "2g")
sc = SparkContext(conf=conf)
# set ratings_file to argument
ratings_file = sys.argv[1]
# load personal ratings as array of rating tuples (filter out 0 ratings)
my_ratings = load_ratings(ratings_file)
my_ratings_rdd = sc.parallelize(my_ratings, 1)
# Create ratings RDD of (last digit of timestamp, (user_id, movie_id, rating))
ratings = sc.textFile("/home/hadoop/ml-1m/ratings.dat") \
.map(parse_rating)
# Create movies RDD, collect, and convert to dict of {movie_id: title}
movies = dict(sc.textFile("/home/hadoop/ml-1m/movies.dat")
.map(parse_movie)
.collect())
# Compute total ratings, and unique users and movies for statistics info
num_ratings = ratings.count()
num_users = ratings.values().map(lambda r: r[0]).distinct().count()
num_movies = ratings.values().map(lambda r: r[1]).distinct().count()
print "Got %d ratings from %d users on %d movies." % (num_ratings, num_users, num_movies)
# Create the training (60%) and validation (40%) set, based on last digit
# of timestamp
num_partitions = 4
training = ratings.filter(lambda x: x[0] < 6) \
.values() \
.union(my_ratings_rdd) \
.repartition(num_partitions) \
.cache()
validation = ratings.filter(lambda x: x[0] >= 6) \
.values() \
.repartition(num_partitions) \
.cache()
num_training = training.count()
num_validation = validation.count()
print "Training: %d and validation: %d\n" % (num_training, num_validation)
# rank is the number of latent factors in the model.
# iterations is the number of iterations to run.
# lambda specifies the regularization parameter in ALS
rank = 12
num_iterations = 10
lmbda = 0.1
print "Training model based on rank = %d, %d iterations, and lambda = %.1f.\n" % \
(rank, num_iterations, lmbda)
# Train model with training data and configured rank and iterations
model = ALS.train(training, rank, num_iterations, lmbda)
# Print RMSE of model
validation_rmse = compute_rmse(model, validation, num_validation)
# evaluate the trained model on the validation set
print "The model was trained with rank = %d, lambda = %.1f, and %d iterations." % \
(rank, lmbda, num_iterations)
print "Its RMSE on the validation set is %f." % validation_rmse
# get set of movie_ids from my_ratings
my_rated_movie_ids = set([r[1] for r in my_ratings])
# get movies not seen/rated
candidates = sc.parallelize([m for m in movies if m not in my_rated_movie_ids])
# run predictions with trained model
predictions = model.predictAll(candidates.map(lambda x: (0, x))).collect()
# sort the recommedations
recommendations = sorted(predictions, key=lambda x: x[2], reverse=True)[:50]
print "Movies recommended for you:"
for i in xrange(len(recommendations)):
print ("%2d: %s" % (i + 1, movies[recommendations[i][1]])).encode('ascii', 'ignore')
# clean up
sc.stop()
|
Python
| 0.999999
|
@@ -4472,134 +4472,8 @@
.1%0A%0A
- print %22Training model based on rank = %25d, %25d iterations, and lambda = %25.1f.%5Cn%22 %25 %5C%0A (rank, num_iterations, lmbda)%0A%0A
|
5dde9f6aca671440253729c29530e93974921ea0
|
Add a migration to add the 'Other' field to QueuedImage.why_allowed
|
moderation_queue/migrations/0007_auto_20150303_1420.py
|
moderation_queue/migrations/0007_auto_20150303_1420.py
|
Python
| 0.000001
|
@@ -0,0 +1,887 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('moderation_queue', '0006_auto_20150303_0838'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='queuedimage',%0A name='why_allowed',%0A field=models.CharField(default=b'other', max_length=64, choices=%5B(b'public-domain', b'This photograph is free of any copyright restrictions'), (b'copyright-assigned', b'I own copyright of this photo and I assign the copyright to Democracy Club Limited in return for it being displayed on YourNextMP'), (b'profile-photo', b%22This is the candidate's public profile photo from social media (e.g. Twitter, Facebook) or their official campaign page%22), (b'other', b'Other')%5D),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
065b8ec2865b0746393a427d1d078c655905241c
|
Add renaming PDF files
|
rename-pdf.py
|
rename-pdf.py
|
Python
| 0
|
@@ -0,0 +1,967 @@
+#!/usr/bin/env python%0A__author__ = 'Jacob Bieker'%0Aimport os%0ADATA_DIRECTORY = os.path.join(%22test_file%22)%0ADATA = os.listdir(DATA_DIRECTORY)%0Afile_name_dict = %7B%7D%0A%0Afor file_name in DATA:%0A split_name = file_name.split(%22_%22)%0A print split_name%0A file_name_dict.setdefault(split_name%5B0%5D, %5B%5D)%0A # Name has the extra _NUM extension%0A if len(split_name) %3E 1:%0A file_name_dict%5Bsplit_name%5B0%5D%5D.append(split_name%5B1%5D)%0A else:%0A file_name_dict%5Bsplit_name%5B0%5D%5D.append(0)%0A%0Afor key in file_name_dict:%0A if len(file_name_dict%5Bkey%5D) == 1:%0A continue%0A else:%0A max = 0%0A for value in file_name_dict%5Bkey%5D:%0A if int(value) %3E max:%0A max = value%0A elif int(value) == 0:%0A path = os.path.join(DATA_DIRECTORY, str(key))%0A os.remove(path)%0A else:%0A path = os.path.join(DATA_DIRECTORY, str(key) + %22_%22 + str(value))%0A os.remove(path)%0A%0Aprint file_name_dict
|
|
57b396177854636257ad5884b0deeca6a79d207a
|
Add libpfm4 package. (#3667)
|
var/spack/repos/builtin/packages/libpfm4/package.py
|
var/spack/repos/builtin/packages/libpfm4/package.py
|
Python
| 0
|
@@ -0,0 +1,1968 @@
+##############################################################################%0A# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the LICENSE file for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass Libpfm4(MakefilePackage):%0A %22%22%22libpfm4 is a userspace library to help%0A setup performance events for use with%0A the perf_events Linux kernel interface.%22%22%22%0A%0A homepage = %22http://perfmon2.sourceforge.net%22%0A url = %22https://downloads.sourceforge.net/project/perfmon2/libpfm4/libpfm-4.8.0.tar.gz%22%0A%0A version('4.8.0', '730383896db92e12fb2cc10f2d41dd43')%0A%0A # Fails to build libpfm4 with intel compiler version 16 and 17%0A conflicts('intel@16:17')%0A%0A @property%0A def install_targets(self):%0A return %5B'DESTDIR=%7B0%7D'.format(self.prefix),%0A 'LIBDIR=/lib',%0A 'INCDIR=/include',%0A 'MANDIR=/man',%0A 'LDCONFIG=true',%0A 'install'%5D%0A
|
|
d4f498db7c2cb5da0ad0202d7a4b6a919ac896b3
|
add new package (#24989)
|
var/spack/repos/builtin/packages/py-rply/package.py
|
var/spack/repos/builtin/packages/py-rply/package.py
|
Python
| 0
|
@@ -0,0 +1,591 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyRply(PythonPackage):%0A %22%22%22A pure Python Lex/Yacc that works with RPython.%22%22%22%0A%0A homepage = %22https://github.com/alex/rply/%22%0A pypi = %22rply/rply-0.7.8.tar.gz%22%0A%0A version('0.7.8', sha256='2a808ac25a4580a9991fc304d64434e299a8fc75760574492f242cbb5bb301c9')%0A%0A depends_on('py-setuptools', type='build')%0A depends_on('py-appdirs', type=('build', 'run'))%0A
|
|
f627f04ebe0186b19d58619cab8b7098f5ca2e4c
|
Add plugin for Nova server state metrics
|
plugins/openstack/nova/nova-server-state-metrics.py
|
plugins/openstack/nova/nova-server-state-metrics.py
|
Python
| 0
|
@@ -0,0 +1,1646 @@
+#!/usr/bin/env python%0Afrom argparse import ArgumentParser%0Aimport socket%0Aimport time%0A%0Afrom novaclient.v3 import Client%0A%0ADEFAULT_SCHEME = '%7B%7D.nova.states'.format(socket.gethostname())%0A%0Adef output_metric(name, value):%0A print '%7B%7D%5Ct%7B%7D%5Ct%7B%7D'.format(name, value, int(time.time()))%0A%0Adef main():%0A parser = ArgumentParser()%0A parser.add_argument('-u', '--user', default='admin')%0A parser.add_argument('-p', '--password', default='admin')%0A parser.add_argument('-t', '--tenant', default='admin')%0A parser.add_argument('-a', '--auth-url', default='http://localhost:5000/v2.0')%0A parser.add_argument('-S', '--service-type', default='compute')%0A parser.add_argument('-s', '--scheme', default=DEFAULT_SCHEME)%0A args = parser.parse_args()%0A%0A client = Client(args.user, args.password, args.tenant, args.auth_url, service_type=args.service_type)%0A%0A servers = client.servers.list()%0A %0A # http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html%0A states = %7B%0A 'ACTIVE': 0,%0A 'BUILD': 0,%0A 'DELETED': 0,%0A 'ERROR': 0,%0A 'HARD_REBOOT': 0,%0A 'PASSWORD': 0,%0A 'REBOOT': 0,%0A 'REBUILD': 0,%0A 'RESCUE': 0,%0A 'RESIZE': 0,%0A 'REVERT_RESIZE': 0,%0A 'SHUTOFF': 0,%0A 'SUSPENDED': 0,%0A 'UNKNOWN': 0,%0A 'VERIFY_RESIZE': 0,%0A %7D%0A %0A for server in servers:%0A if server.status not in states:%0A states%5Bserver.status%5D = 0%0A%0A states%5Bserver.status%5D += 1%0A%0A for state, count in states.iteritems():%0A output_metric('%7B%7D.%7B%7D'.format(args.scheme, state.lower()), count)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
c0cd62af0ee29743430156eda613665dd58a97c3
|
Apply SA naming convention
|
chat/indico_chat/migrations/201503101131_35badbd96474_apply_naming_convention.py
|
chat/indico_chat/migrations/201503101131_35badbd96474_apply_naming_convention.py
|
Python
| 0
|
@@ -0,0 +1,1260 @@
+%22%22%22Apply naming convention%0A%0ARevision ID: 35badbd96474%0ARevises: 1bd6c5129d29%0ACreate Date: 2015-03-10 11:31:42.850496%0A%22%22%22%0A%0Afrom alembic import op%0A%0Afrom indico.core.db.sqlalchemy.util.bulk_rename import bulk_rename%0A%0A%0A# revision identifiers, used by Alembic.%0Arevision = '35badbd96474'%0Adown_revision = '1bd6c5129d29'%0A%0A%0Amapping = %7B%0A 'plugin_chat.chatroom_events': %7B%0A 'indexes': %7B%0A 'chatroom_events_pkey': 'pk_chatroom_events',%0A 'ix_plugin_chat_chatroom_events_chatroom_id': 'ix_chatroom_events_chatroom_id',%0A 'ix_plugin_chat_chatroom_events_event_id': 'ix_chatroom_events_event_id',%0A %7D,%0A 'constraints': %7B%0A 'chatroom_events_chatroom_id_fkey': 'fk_chatroom_events_chatroom_id_chatrooms',%0A %7D%0A %7D,%0A 'plugin_chat.chatrooms': %7B%0A 'indexes': %7B%0A 'chatrooms_pkey': 'pk_chatrooms',%0A 'ix_plugin_chat_chatrooms_created_by_id': 'ix_chatrooms_created_by_id',%0A %7D,%0A 'constraints': %7B%0A 'chatrooms_jid_node_custom_server_key': 'uq_chatrooms_jid_node_custom_server',%0A %7D%0A %7D%0A%7D%0A%0A%0Adef upgrade():%0A for stmt in bulk_rename(mapping):%0A op.execute(stmt)%0A%0A%0Adef downgrade():%0A for stmt in bulk_rename(mapping, True):%0A op.execute(stmt)%0A
|
|
46c1a39041d89325d849339ebab0854a91f520d4
|
Create rtl-config.py
|
rtl-config.py
|
rtl-config.py
|
Python
| 0.000001
|
@@ -0,0 +1,320 @@
+##%0A## User configuration file - edit these settings to suit your own project%0A##%0Afile_path = '/home/pi/RPi-RTL/images/' ## path to save images%0Afile_prefix = 'img_' ## prefix before timestamp.jpg, if needed - e.g. a project number%0Ause_timestamp = True ## True = timestamp in filename, False = incremental numbering%0A
|
|
9ad5d9668c3faf85c38d5655054b8530d2e14444
|
set up migration for meta_data ingest
|
datapoints/migrations/0002_populate_initial_data.py
|
datapoints/migrations/0002_populate_initial_data.py
|
Python
| 0
|
@@ -0,0 +1,1270 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Aimport jsonfield.fields%0Aimport django.db.models.deletion%0Afrom django.conf import settings%0A%0Aimport pandas as pd%0Afrom datapoints.models import IndicatorTag%0Afrom django.db.models import get_app, get_models%0A%0Adef populate_initial_data(apps, schema_editor):%0A%0A xl = pd.ExcelFile('initial_data.xlsx')%0A all_sheets = xl.sheet_names%0A%0A datapoints_app = get_app('datapoints')%0A%0A for model in get_models(datapoints_app):%0A%0A print model._meta.db_table%0A print all_sheets%0A%0A if model._meta.db_table in all_sheets:%0A%0A model_df = xl.parse(model._meta.db_table)%0A model_ids = model_df_to_data(model_df,model)%0A%0A%0Adef model_df_to_data(model_df,model):%0A%0A meta_ids = %5B%5D%0A%0A non_null_df = model_df.where((pd.notnull(model_df)), None)%0A list_of_dicts = non_null_df.transpose().to_dict()%0A%0A for row_ix, row_dict in list_of_dicts.iteritems():%0A%0A row_id = model.objects.create(**row_dict)%0A meta_ids.append(row_id)%0A%0A return meta_ids%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('datapoints', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(populate_initial_data),%0A %5D%0A
|
|
828f7dc96ac5ad10baf57e8697cb797f81863d48
|
change transaction related_name for consistency
|
OIPA/iati/transaction/models.py
|
OIPA/iati/transaction/models.py
|
from decimal import Decimal
from django.db import models
from django.contrib.contenttypes.fields import GenericRelation
from iati.transaction.transaction_manager import TransactionQuerySet
from geodata.models import Country
from geodata.models import Region
from iati_vocabulary.models import RegionVocabulary
from iati_vocabulary.models import SectorVocabulary
from iati_codelists.models import AidType
from iati_codelists.models import DisbursementChannel
from iati_codelists.models import FinanceType
from iati_codelists.models import FlowType
from iati_codelists.models import TiedStatus
from iati_codelists.models import Currency
from iati_codelists.models import Sector
from iati_codelists.models import TransactionType
from iati_organisation.models import Organisation, OrganisationType
from iati.models import Activity
from iati.models import Narrative
class Transaction(models.Model):
activity = models.ForeignKey(Activity)
ref = models.CharField(max_length=255, null=True, blank=True, default="")
transaction_type = models.ForeignKey(TransactionType)
transaction_date = models.DateField(db_index=True)
value = models.DecimalField(max_digits=15, decimal_places=2)
value_string = models.CharField(max_length=50)
currency = models.ForeignKey(Currency)
value_date = models.DateField()
humanitarian = models.NullBooleanField(null=True, blank=True)
xdr_value = models.DecimalField(max_digits=20, decimal_places=7, default=Decimal(0))
usd_value = models.DecimalField(max_digits=20, decimal_places=7, default=Decimal(0))
eur_value = models.DecimalField(max_digits=20, decimal_places=7, default=Decimal(0))
gbp_value = models.DecimalField(max_digits=20, decimal_places=7, default=Decimal(0))
jpy_value = models.DecimalField(max_digits=20, decimal_places=7, default=Decimal(0))
cad_value = models.DecimalField(max_digits=20, decimal_places=7, default=Decimal(0))
disbursement_channel = models.ForeignKey(
DisbursementChannel,
null=True,
blank=True,
default=None)
flow_type = models.ForeignKey(FlowType, null=True, blank=True, default=None)
finance_type = models.ForeignKey(FinanceType, null=True, blank=True, default=None)
aid_type = models.ForeignKey(AidType, null=True, blank=True, default=None)
tied_status = models.ForeignKey(TiedStatus, null=True, blank=True, default=None)
objects = TransactionQuerySet.as_manager()
def __unicode__(self, ):
return "value: %s - transaction date: %s - type: %s" % (
self.value,
self.transaction_date,
self.transaction_type,)
class TransactionProvider(models.Model):
ref = models.CharField(blank=True, default="", max_length=250)
normalized_ref = models.CharField(max_length=120, default="")
organisation = models.ForeignKey(
Organisation,
related_name="transaction_providing_organisation",
on_delete=models.SET_NULL,
null=True,
blank=True,
default=None)
type = models.ForeignKey(
OrganisationType,
null=True,
default=None,
blank=True)
provider_activity = models.ForeignKey(
Activity,
related_name="transaction_provider_activity",
on_delete=models.SET_NULL,
null=True,
blank=True,
default=None)
provider_activity_ref = models.CharField(
db_index=True,
max_length=200,
null=True,
blank=True,
default="",
verbose_name='provider-activity-id')
transaction = models.OneToOneField(
Transaction,
related_name="provider_organisation")
narratives = GenericRelation(
Narrative,
content_type_field='related_content_type',
object_id_field='related_object_id')
# first narrative
primary_name = models.CharField(
max_length=250,
null=False,
blank=True,
default="",
db_index=True)
def __unicode__(self, ):
return "%s - %s" % (self.ref,
self.provider_activity_ref,)
class TransactionReceiver(models.Model):
ref = models.CharField(blank=True, default="", max_length=250)
normalized_ref = models.CharField(max_length=120, default="")
organisation = models.ForeignKey(
Organisation,
related_name="transaction_receiving_organisation",
on_delete=models.SET_NULL,
null=True,
blank=True,
default=None)
type = models.ForeignKey(
OrganisationType,
null=True,
default=None,
blank=True)
receiver_activity = models.ForeignKey(
Activity,
related_name="transaction_receiver_activity",
on_delete=models.SET_NULL,
null=True,
blank=True,
default=None)
receiver_activity_ref = models.CharField(
db_index=True,
max_length=200,
null=True,
blank=True,
default="",
verbose_name='receiver-activity-id')
transaction = models.OneToOneField(
Transaction,
related_name="receiver_organisation")
narratives = GenericRelation(
Narrative,
content_type_field='related_content_type',
object_id_field='related_object_id')
# first narrative
primary_name = models.CharField(
max_length=250,
null=False,
blank=True,
default="",
db_index=True)
def __unicode__(self, ):
return "%s - %s" % (self.ref,
self.receiver_activity_ref,)
class TransactionDescription(models.Model):
transaction = models.OneToOneField(
Transaction,
related_name="description")
narratives = GenericRelation(
Narrative,
content_type_field='related_content_type',
object_id_field='related_object_id')
class TransactionSector(models.Model):
transaction = models.ForeignKey(
Transaction,
on_delete=models.CASCADE
)
sector = models.ForeignKey(
Sector,
on_delete=models.CASCADE)
vocabulary = models.ForeignKey(
SectorVocabulary,
null=True,
blank=True,
default=None,
on_delete=models.CASCADE)
vocabulary_uri = models.URLField(null=True, blank=True)
reported_on_transaction = models.BooleanField(default=True)
percentage = models.DecimalField(
max_digits=5,
decimal_places=2)
def __unicode__(self, ):
return "%s - %s" % (self.transaction.id, self.sector)
class TransactionRecipientCountry(models.Model):
transaction = models.ForeignKey(
Transaction,
on_delete=models.CASCADE,
related_name="recipient_country"
)
country = models.ForeignKey(
Country,
on_delete=models.CASCADE)
reported_on_transaction = models.BooleanField(default=True)
percentage = models.DecimalField(
max_digits=5,
decimal_places=2)
def __unicode__(self, ):
return "%s - %s" % (self.transaction.id, self.country)
class TransactionRecipientRegion(models.Model):
transaction = models.OneToOneField(
Transaction,
on_delete=models.CASCADE,
related_name="recipient_region"
)
region = models.ForeignKey(
Region,
on_delete=models.CASCADE)
vocabulary = models.ForeignKey(
RegionVocabulary,
null=True,
blank=True,
default=1,
on_delete=models.CASCADE)
vocabulary_uri = models.URLField(null=True, blank=True)
reported_on_transaction = models.BooleanField(default=True)
percentage = models.DecimalField(
max_digits=5,
decimal_places=2)
def __unicode__(self, ):
return "%s - %s" % (self.transaction.id, self.region)
|
Python
| 0.000001
|
@@ -6710,32 +6710,34 @@
CASCADE,%0A
+ #
related_name=%22r
@@ -7242,32 +7242,34 @@
CASCADE,%0A
+ #
related_name=%22r
|
9aeb9d35cd49ccd7ab1ede87d70666e34b80320c
|
Add tests for run docker mgmt command
|
readthedocs/rtd_tests/tests/test_core_management.py
|
readthedocs/rtd_tests/tests/test_core_management.py
|
Python
| 0.000001
|
@@ -0,0 +1,2433 @@
+from StringIO import StringIO%0A%0Afrom django.test import TestCase%0Afrom mock import patch%0A%0Afrom core.management.commands import run_docker%0Afrom projects.models import Project%0Afrom builds.models import Version%0A%0A%0Aclass TestRunDocker(TestCase):%0A '''Test run_docker command with good input and output'''%0A%0A fixtures = %5B'test_data'%5D%0A%0A def setUp(self):%0A self.project = Project.objects.get(slug='pip')%0A self.version = Version(slug='foo', verbose_name='foobar')%0A self.project.versions.add(self.version)%0A%0A def _get_input(self, files=None):%0A return ('%7B%22project%22: %7B%22id%22: 6, %22name%22: %22Pip%22, %22slug%22: %22pip%22%7D,'%0A '%22id%22: 71, %22type%22: %22tag%22, %22identifier%22: %22437fb316fbbdba1acdd22e07dbe7c4809ffd97e6%22,'%0A '%22verbose_name%22: %22stable%22, %22slug%22: %22stable%22%7D')%0A%0A def _docker_build(data):%0A if isinstance(data, Version):%0A return %7B'html': (0, 'DOCKER PASS', '')%7D%0A else:%0A return %7B'html': (1, '', 'DOCKER FAIL')%7D%0A%0A def test_stdin(self):%0A '''Test docker build command'''%0A%0A def _input(_, files=None):%0A return '%7B%22test%22: %22foobar%22%7D'%0A%0A with patch.object(run_docker.Command, '_get_input', _input):%0A cmd = run_docker.Command()%0A assert cmd._get_input() == '%7B%22test%22: %22foobar%22%7D'%0A%0A @patch.object(run_docker.Command, '_get_input', _get_input)%0A @patch('projects.tasks.docker_build', _docker_build)%0A @patch('sys.stdout', new_callable=StringIO)%0A def test_good_input(self, mock_output):%0A '''Test docker build command'''%0A cmd = run_docker.Command()%0A self.assertEqual(cmd._get_input(), self._get_input())%0A cmd.handle()%0A self.assertEqual(%0A mock_output.getvalue(),%0A '%7B%22html%22: %5B0, %22DOCKER PASS%22, %22%22%5D%7D%5Cn'%0A )%0A%0A @patch('projects.tasks.docker_build', _docker_build)%0A def test_bad_input(self):%0A '''Test docker build command'''%0A with patch.object(run_docker.Command, '_get_input') as mock_input:%0A with patch('sys.stdout', new_callable=StringIO) as mock_output:%0A mock_input.return_value = 'BAD JSON'%0A cmd = run_docker.Command()%0A cmd.handle()%0A self.assertEqual(%0A mock_output.getvalue(),%0A ('%7B%22doc_builder%22: '%0A '%5B-1, %22%22, %22ValueError: No JSON object could be decoded%22%5D%7D'%0A '%5Cn')%0A )%0A
|
|
7df706d8727064275aeccf04f5411661b33f9250
|
Create show_vlans.py
|
show_vlans.py
|
show_vlans.py
|
Python
| 0
|
@@ -0,0 +1,1552 @@
+%0A__version__ = '0.9'%0A__author__ = 'Remi Batist'%0A%0A# Overview of vlans including ip-addresses in procurve-style%0A# An example below%0A%0A# VLAN-ID %7C IP Address IP Subnet NAME%0A# ------- %7C --------------- --------------- ---------------%0A# 1 %7C VLAN 0001%0A# 6 %7C VLAN 0006%0A# 10 %7C 10.10.10.2 255.255.255.0 VLAN10-SERVERS%0A# 20 %7C wifi%0A# 30 %7C 10.10.30.253 255.255.255.0 VLAN30-CLIENTS%0A%0A%0A#### Importing python modules%0A%0Aimport comware%0A%0Adef main():%0A%09print 'VLAN-ID' + '%5Ct' + '%7C IP Address' + '%5Ct' + ' IP Subnet' + '%5Ct ' + ' NAME'%0A%09print '------- %7C --------------- --------------- ---------------'%0A%0A#### Importing current information%0A%09result = comware.CLI('display vlan all', False).get_output()%0A%09vlanid = ''%0A%09vlanna = ''%0A%09vlanip = ''%0A%09vlansn = ''%0A%09found = False%0A%0A#### Collecting specific items%0A%09for line in result:%0A%09%09if 'VLAN ID' in line:%0A%09%09%09s1 = line.rindex(':') + 1%0A%09%09%09e1 = len(line)%0A%09%09%09vlanid = line%5Bs1:e1%5D%0A%09%09%09vlanip = ' '%0A%09%09%09vlansn = ' '%0A%09%09elif 'IPv4 address' in line:%0A%09%09%09s2 = line.rindex(':') + 2%0A%09%09%09e2 = len(line)%0A%09%09%09vlanip = line%5Bs2:e2%5D%0A%09%09elif 'IPv4 subnet mask' in line:%0A%09%09%09s5 = line.rindex(':') + 2%0A%09%09%09e5 = len(line)%0A%09%09%09vlansn = line%5Bs5:e5%5D%0A%09%09elif 'Name' in line:%0A%09%09%09s3 = line.rindex(':') + 2%0A%09%09%09e3 = len(line)%0A%09%09%09vlanna = line%5Bs3:e3%5D%0A%0A#### Printing specific items%0A%09%09%09print %22%25-7s %7C %25-16s %25-16s %25s%22 %25 (vlanid, vlanip, vlansn, vlanna)%0A%0Aif __name__ == %22__main__%22:%0A%09main()%0A
|
|
66831279407e7a6626b8fe0ea06c886d5191f699
|
Create q24telnetexec.py
|
q24telnetexec.py
|
q24telnetexec.py
|
Python
| 0.000014
|
@@ -0,0 +1,1022 @@
+#%0A# Q24 Util. for Pythonista%0A# 2014/12/23 : First Release%0A#%0A##############################################################################################################%0A%0Aimport sys%0Aimport telnetlib%0Aimport time%0Aimport os%0A#import subprocess%0Afrom ftplib import FTP%0A%0A##################################################################%0A#ftp = FTP(%22192.168.1.150%22,%22target%22,%22password%22,%22%22,3)%0A#ftp.retrlines('LIST')%0A#ftp.retrbinary('RETR README', open('README', 'wb').write)%0A#ftp.abort()%0A#ftp.quit()%0A%0AHOST = %22192.168.1.172%22%0Auser = %22target%22%0Apassword = %22password%22%0AEXECCOMMAND = %22ls%22%0A%0Atn = telnetlib.Telnet(HOST)%0Av = tn.read_until(%22login: %22,1)%0Atn.write(user + %22%5Cr%5Cn%22)%0Atime.sleep(0.3)%0Aif password:%0A%09v = tn.read_until(%22Password: %22,1)%0A%09tn.write(password + %22%5Cr%5Cn%22)%0A%09time.sleep(0.3)%0A%0Aprint %22Connect%22%0Av = tn.read_until(%22%3E%22,1)%0Aprint %22%22%0Aprint %22Execute%22%0Atn.write(EXECCOMMAND+%22%5Cr%5Cn%22)%0Ar = tn.read_until(%22-%3E%22,20)%0Atn.close()%0Aa = %5Bi.strip(%22%5Cn%22) for i in r.split(%22%5Cr%22)%5B1:%5D%5D%0Aprint %22-- Result from %22+HOST+%22 --%22%0Afor i in a:%0A%09print i%0Aprint %22-- END --%22%0A
|
|
991d40c62a7a44fc94ebd83e6726e5beaf867728
|
Add exercise Chuck Norris
|
codingame/easy/chuck_norris.py
|
codingame/easy/chuck_norris.py
|
Python
| 0.000205
|
@@ -0,0 +1,874 @@
+def writeSegment(type, length):%0A%09out = %22%22%0A%09if (type == 1):%0A%09%09out += %220 %22%0A%09else:%0A%09%09out += %2200 %22%0A%0A%09out += '0' * length%0A%0A%09return out%0A%0Adef inputToBits(text):%0A%09out = %22%22%0A%09for ch in text:%0A%09%09chBin = bin(ord(ch))%5B2:%5D%0A%09%09while len(chBin) %3C 7:%0A%09%09%09chBin = '0' + chBin%0A%09%09out += chBin%0A%0A%09return out%0A%0Abits = inputToBits(raw_input())%0A%0Aanswer = %22%22%0AcurrentBit = int(bits%5B0%5D)%0AlengthSequence = 1%0A%0A# Skip the first and the last bits%0Afor bit in bits%5B1:-1%5D:%0A%09bit = int(bit)%0A%09if bit == currentBit:%0A%09%09lengthSequence += 1%0A%09else:%0A%09%09answer += writeSegment(currentBit, lengthSequence) + %22 %22%0A%09%09currentBit = bit%0A%09%09lengthSequence = 1%0A%0AlastBit = int(bits%5B-1%5D)%0Aif lastBit == currentBit:%0A%09answer += writeSegment(currentBit, lengthSequence + 1)%0Aelse:%0A%09# Write the old sequence%0A%09answer += writeSegment(currentBit, lengthSequence) + %22 %22%0A%09# Do not omit the last bit%0A%09answer += writeSegment(lastBit, 1)%0A%0Aprint answer
|
|
e4cde9db9f9b99622cac8fa88f4acab1fce41d80
|
add myrename
|
bin/myrename.py
|
bin/myrename.py
|
Python
| 0.000001
|
@@ -0,0 +1,754 @@
+#!/usr/bin/env python3%0A%0Aimport sys%0Aimport shutil%0Aimport re%0Aimport os%0A%0Aif len(sys.argv) %3E 1:%0A src_filename = sys.argv%5B1%5D%0Aelse:%0A print(%22Please supply filename%22)%0A sys.exit()%0A%0Aif not os.path.exists(src_filename):%0A print(%22%7B%7D does't exist%22.format(src_filename))%0A sys.exit()%0A%0Adst_filename, dst_extension = os.path.splitext(src_filename)%0A%0Achars_to_remove = %5B' ', ',', '(', ')', '%5B', '%5D', %5D%0A%0Afor character in chars_to_remove:%0A if character in dst_filename:%0A dst_filename = dst_filename.replace(character, '.')%0A%0Adst_filename = dst_filename.replace('..', '.')%0Adst_filename = re.sub('%5C.$', '', dst_filename)%0A%0Aprint(%22Moving %7B%7D =%3E %7B%7D%22.format(src_filename, dst_filename + dst_extension))%0Ashutil.move(src_filename, dst_filename + dst_extension)%0A
|
|
92ec039cdecffd7ec03f5954dfe5c69499921ba7
|
Make consonants
|
lib/hindkit/_lab.py
|
lib/hindkit/_lab.py
|
Python
| 0.999999
|
@@ -0,0 +1,2577 @@
+#!/usr/bin/env AFDKOPython%0A# encoding: UTF-8%0Afrom __future__ import division, absolute_import, print_function, unicode_literals%0Aimport collections%0Aimport pytoml%0A%0Aenum = collections.namedtuple%0A%0Ap = enum(%0A %22Property%22,%0A %22%22%22%0A Vowel Consonant%0A Stop Nasal Approximant Fricative%0A Guttural Palatal Retroflex Dental Labial%0A Voiced Aspirated%0A %22%22%22%0A)%0A%0Aclass Letter(object):%0A # IMPLICIT_PROPERTIES = %7B%0A # %22Nasal%22: %22Voiced%22,%0A # %7D%0A def __init__(self, name, properties):%0A self.name = name%0A self.properties = set(properties)%0A # for k, v in self.IMPLICIT_PROPERTIES.items():%0A # if p.__dict__%5Bk%5D in self.properties:%0A # self.properties.update(p.__dict__%5Bi%5D for i in v.split())%0A%0A def __contains__(self, item):%0A return item in self.properties%0A%0Aclass Consonant(object):%0A pass%0A%0ADATA = %22%22%22%0A K: Consonant Guttural Stop,%0A KH: Consonant Guttural Stop Aspirated,%0A G: Consonant Guttural Stop Voiced,%0A GH: Consonant Guttural Stop Voiced Aspirated,%0A NG: Consonant Guttural Nasal Voiced,%0A%0A C: Consonant Palatal Stop,%0A CH: Consonant Palatal Stop Aspirated,%0A J: Consonant Palatal Stop Voiced,%0A JH: Consonant Palatal Stop Voiced Aspirated,%0A NY: Consonant Palatal Nasal Voiced,%0A%0A TT: Consonant Retroflex Stop,%0A TTH: Consonant Retroflex Stop Aspirated,%0A DD: Consonant Retroflex Stop Voiced,%0A DDH: Consonant Retroflex Stop Voiced Aspirated,%0A NN: Consonant Retroflex Nasal Voiced,%0A%0A T: Consonant Dental Stop,%0A TH: Consonant Dental Stop Aspirated,%0A D: Consonant Dental Stop Voiced,%0A DH: Consonant Dental Stop Voiced Aspirated,%0A N: Consonant Dental Nasal Voiced,%0A%0A P: Consonant Labial Stop,%0A PH: Consonant Labial Stop Aspirated,%0A B: Consonant Labial Stop Voiced,%0A BH: Consonant Labial Stop Voiced Aspirated,%0A M: Consonant Labial Nasal Voiced,%0A%0A Y: Consonant Approximant Voiced Palatal,%0A R: Consonant Approximant Voiced Retroflex,%0A L: Consonant Approximant Voiced Dental,%0A V: Consonant Approximant Voiced Labial,%0A%0A SH: Consonant Fricative Palatal,%0A SS: Consonant Fricative Retroflex,%0A S: Consonant Fricative Dental,%0A H: Consonant Fricative Voiced Guttural,%0A%22%22%22%0Afor item in DATA.split(%22,%22)%5B:-1%5D:%0A name, _, properties = item.partition(%22:%22)%0A name = name.strip()%0A properties = properties.split()%0A Consonant.__setattr__(name, Letter(name, (p.__getattr__(i) for i in properties)))%0A Consonant.list.append(Consonant.__getattr__(name))%0A%0Aprint(l.__dict__)%0A
|
|
ab553873c782a7975e6b2cb3d240d5b9751c7a44
|
Add API for RepositoryLayout.
|
layout.py
|
layout.py
|
Python
| 0
|
@@ -0,0 +1,2031 @@
+# Copyright (C) 2005-2007 Jelmer Vernooij %3Cjelmer@samba.org%3E%0A %0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation; either version 3 of the License, or%0A# (at your option) any later version.%0A%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A%0A# You should have received a copy of the GNU General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0A%0Aclass RepositoryLayout:%0A %22%22%22Describes a repository layout.%22%22%22%0A def get_tag_path(self, name, project=%22%22):%0A %22%22%22Return the path at which the tag with specified name should be found.%0A%0A :param name: Name of the tag. %0A :param project: Optional name of the project the tag is for. Can include slashes.%0A :return: Path of the tag.%22%0A %22%22%22%0A raise NotImplementedError%0A%0A def get_branch_path(self, name, project=%22%22):%0A %22%22%22Return the path at which the branch with specified name should be found.%0A%0A :param name: Name of the branch. %0A :param project: Optional name of the project the branch is for. Can include slashes.%0A :return: Path of the branch.%0A %22%22%22%0A raise NotImplementedError%0A%0A def parse(self, path):%0A %22%22%22Parse a path.%0A%0A :return: Tuple with type ('tag' or 'branch'), project name, branch name and path %0A inside the branch%0A %22%22%22%0A raise NotImplementedError%0A%0A def get_branches(self, project=%22%22, revnum=None):%0A %22%22%22Retrieve a list of paths that refer to branches in a specific revision.%0A %22%22%22%0A raise NotImplementedError%0A%0A def get_tags(self, project=%22%22, revnum=None):%0A %22%22%22Retrieve a list of paths that refer to tags in a specific revision.%0A %22%22%22%0A raise NotImplementedError%0A
|
|
cbaf4e86c4409735a8f011f5a8f801a34278c21c
|
Increase text index property size
|
src/ggrc/migrations/versions/20170112013716_421b2179c02e_update_fulltext_index.py
|
src/ggrc/migrations/versions/20170112013716_421b2179c02e_update_fulltext_index.py
|
Python
| 0
|
@@ -0,0 +1,976 @@
+# Copyright (C) 2017 Google Inc.%0A# Licensed under http://www.apache.org/licenses/LICENSE-2.0 %3Csee LICENSE file%3E%0A%0A%22%22%22Update fulltext index.%0A%0ACreate Date: 2017-01-12 01:37:16.801973%0A%22%22%22%0A# disable Invalid constant name pylint warning for mandatory Alembic variables.%0A# pylint: disable=invalid-name%0A%0Aimport sqlalchemy as sa%0Afrom alembic import op%0A%0A%0A# revision identifiers, used by Alembic.%0Arevision = '421b2179c02e'%0Adown_revision = '177a979b230a'%0A%0A%0Adef upgrade():%0A %22%22%22Upgrade database schema and/or data, creating a new revision.%22%22%22%0A op.alter_column(%0A %22fulltext_record_properties%22,%0A %22property%22,%0A existing_type=sa.String(length=64),%0A type_=sa.String(length=250),%0A nullable=False%0A )%0A%0A%0Adef downgrade():%0A %22%22%22Downgrade database schema and/or data back to the previous revision.%22%22%22%0A op.alter_column(%0A %22fulltext_record_properties%22,%0A %22property%22,%0A existing_type=sa.String(length=250),%0A type_=sa.String(length=64),%0A nullable=False%0A )%0A
|
|
fa5a570af022404d7a68f2d58851033c75a60920
|
add import script for Hartlepool
|
polling_stations/apps/data_collection/management/commands/import_hartlepool.py
|
polling_stations/apps/data_collection/management/commands/import_hartlepool.py
|
Python
| 0
|
@@ -0,0 +1,1110 @@
+from data_collection.management.commands import BaseXpressWebLookupCsvImporter%0A%0Aclass Command(BaseXpressWebLookupCsvImporter):%0A council_id = 'E06000001'%0A addresses_name = 'HartlepoolProperty Post Code New.csv'%0A stations_name = 'HartlepoolProperty Post Code New.csv'%0A elections = %5B'mayor.tees-valley.2017-05-04'%5D%0A%0A # Hartlepool use Xpress, but they've provided a slightly trimmed down%0A # version of the WebLookup export. We need to customise a bit..%0A%0A station_postcode_field = None%0A station_address_fields = %5B%0A 'pollingplaceaddress1',%0A 'pollingplaceaddress2',%0A %5D%0A station_id_field = 'pollingplaceid'%0A easting_field = 'pollingplaceeasting'%0A northing_field = 'pollingplacenorthing'%0A%0A def station_record_to_dict(self, record):%0A address = self.get_station_address(record)%0A location = None%0A return %7B%0A 'internal_council_id': getattr(record, self.station_id_field).strip(),%0A 'postcode' : '',%0A 'address' : address.strip(),%0A 'location' : location%0A %7D%0A
|
|
5deb0710578313b61c4099b0970de1c3d1a15171
|
remove a print()
|
ltu/engine/client.py
|
ltu/engine/client.py
|
import os
import requests
from result import Result
# Register the streaming http handlers with requests
class BaseClient(object):
"""Base class from which ModifyClient and QueryClient inherit.
This class contains basic methods for accessing the API.
"""
def __init__(self, application_key, server_url):
"""Constructor
Args:
application_key: authentication key provided by the application.
server_url: complete http url to the OnDemand server.
"""
self.application_key = application_key
self.server_url = server_url
def get_url(self, service):
"""Combine a service name and the server url to produce the service url.
"""
return requests.compat.urljoin(self.server_url, service)
def get_data(self, params={}):
"""Return appropriate HTTP POST parameters and optional file
The application key is automatically added.
Args:
params: a dictionary with service-specific parameters
Returns:
filtered_params, files to be passed to requests.
"""
data = [("application_key", self.application_key)]
print(params)
for key, val in params.items():
if val is not None:
if isinstance(val, (list, tuple, set)):
for v in val:
data.append((key, v));
else:
data.append((key, val))
return data
def check_status(self):
"""Check that this client can successfully access your application.
Logs advice on actions to take as warnings in case of wrong status.
"""
result = self.get_application_status()
if result.status_code == 0:
return True
else:
return False
def open_service(self, service, params={}, files=None):
"""Open corresponding API service with appropriate parameters.
Args:
service: service name, e.g: GetApplicationStatus
params: a dictionary of arguments to be passed to the service
Returns:
The response content.
"""
data = self.get_data(params)
url = self.get_url(service)
request = requests.post(url, data=data, files=files)
return request.text
def get_application_status(self):
"""Check the application status.
Example:
result = client.get_application_status()
if(result.status_code < 0):
raise Exception(result.status_message)
"""
result = self.open_service("GetApplicationStatus")
return Result(result)
def get_image(self, image_id):
"""Search for an image based on its id."""
result = self.open_service("GetImageById", params={"image_id": image_id})
return Result(result)
class QueryClient(BaseClient):
"""Client that can run searches on an LTU Engine application."""
DEFAULT_QUERY_URL = "https://api.ltu-engine.com/v2/ltuquery/json/"
def __init__(self, application_key, server_url=None):
"""Constructor
Args:
application_key: authentication key provided by the application.
server_url: complete http url to the OnDemand server. If it not
specified, it will default to the default url.
"""
if not server_url:
server_url = QueryClient.DEFAULT_QUERY_URL
BaseClient.__init__(self, application_key, server_url)
def search_image(self, image):
"""Image retrieval based on a image stored on disk
Args:
image: path to image file.
"""
with open(image, 'rb') as img:
result = self.open_service("SearchImageByUpload",
files={"image_content": img})
return Result(result)
# TODO test this
def search_image_by_keywords(self, keywords, starting_index=None,
nb_results=None, ids_list=None):
"""Search all images with associated keywords.
Args:
keywords: an iterator on a keyword strings
"""
result = self.open_service("SearchImageByKeywords",
params={"keywords": keywords, "starting_index":
starting_index, "nb_results":
nb_results, "ids_list": ids_list})
return Result(result)
class ModifyClient(BaseClient):
"""Client that can modify an LTU Engine application, e.g: by adding and
removing images."""
DEFAULT_MODIFY_URL = "https://api.ltu-engine.com/v2/ltumodify/json/"
def __init__(self, application_key, server_url = None):
"""Constructor
Args:
application_key: authentication key provided by the application.
server_url: complete http url to the OnDemand server. If it is not
specified, it will default to the default url.
"""
if not server_url:
server_url = ModifyClient.DEFAULT_MODIFY_URL
BaseClient.__init__(self, application_key, server_url)
def add_image(self, image_id, image, keywords=[]):
"""Add an image to the database
Args:
image_id: any unique identifier
image: path to image file
keywords: an iterator on a keyword strings
"""
with open(image, 'rb') as img:
result = self.open_service("AddImage",
params={"image_id": image_id, "keywords": keywords},
files={"image_content": img})
return Result(result)
def delete_image(self, image_id):
"""Remove an image from the database"""
result = self.open_service("DeleteImage", params={"image_id": image_id})
return Result(result)
|
Python
| 0.000348
|
@@ -1097,26 +1097,8 @@
y)%5D%0A
- print(params)%0A
|
241ac6d844febf829f6442897ebf547a291e5db4
|
Add Summarization API code for blog post
|
summarization/summarization.py
|
summarization/summarization.py
|
Python
| 0
|
@@ -0,0 +1,1075 @@
+import indicoio%0Aimport csv%0A%0Aindicoio.config.api_key = 'YOUR_API_KEY'%0A%0Adef clean_article(article):%0A return article.replace(%22%5Cn%22, %22 %22).decode('cp1252').encode('utf-8', 'replace')%0A%0Adef clean_articles(article_list):%0A # data processing: clean up new lines and convert strings into utf-8 so the indico API can read the data%0A # put all articles into a list for easy batch processing%0A cleaned_articles = %5Bclean_article(text) for row in article_list for text in row%5D%0A print %22Articles cleaned and ready for batch processing!%22%0A return cleaned_articles%0A%0Adef get_summary(cleaned_articles):%0A # get article summaries%0A summary = %5Bindicoio.summarization(item) for item in cleaned_articles%5D%0A # clean up result for easy readability%0A print %22Here are the summaries for all %25d articles:%22 %25 (len(summary))%0A for line in summary:%0A print %22%5Cn%22 + %22 %22.join(line)%0A%0Aif __name__ == %22__main__%22:%0A with open('articles.csv', 'rU') as f:%0A article_list = csv.reader(f)%0A cleaned_articles = clean_articles(article_list)%0A get_summary(cleaned_articles)%0A
|
|
0c305fac7138fe0354d220cdee69c995cbd73eda
|
Include gstreamer in pyinstaller
|
kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py
|
kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py
|
'''
Kivy hook for PyInstaller
=========================
Kivy load itself in a complete dynamic way. PyImported don't see most of the
import cause of the Factory and Core.
In addition, the data and missing module are not copied automatically.
With this hook, everything needed for running kivy is correctly copied.
Check kivy documentation about how to use theses hook for packaging application.
'''
import kivy
from kivy.factory import Factory
def get_modules():
return [x.get('module', None) for x in Factory.classes.values()]
datas = [
(kivy.kivy_data_dir, 'kivy_install'),
(kivy.kivy_modules_dir, 'kivy_install'),
(kivy.kivy_exts_dir, 'kivy_install'),
]
# extensions
_kivy_modules = [
# pygame
'pygame.event',
'pygame.video',
'pygame.image',
'pygame.display',
'pygame',
# external modules
'kivy.cache',
'kivy.atlas',
'kivy.network',
'kivy.network.urlrequest',
'kivy.lib.osc',
'kivy.lib.osc.OSC',
'kivy.lib.osc.oscAPI',
'kivy.lib.mtdev',
'kivy.factory_registers',
'kivy.input.recorder',
'kivy.input.providers',
'kivy.input.providers.tuio',
'kivy.input.providers.mouse',
'kivy.input.providers.wm_common',
'kivy.input.providers.wm_touch',
'kivy.input.providers.wm_pen',
'kivy.input.providers.hidinput',
'kivy.input.providers.linuxwacom',
'kivy.input.providers.mactouch',
'kivy.input.providers.mouse',
'kivy.input.providers.mtdev',
# compiled modules
'kivy.event',
'kivy.graphics.buffer',
'kivy.graphics.c_opengl_debug',
'kivy.graphics.compiler',
'kivy.graphics.context_instructions',
'kivy.graphics.fbo',
'kivy.graphics.instructions',
'kivy.graphics.opengl',
'kivy.graphics.opengl_utils',
'kivy.graphics.shader',
'kivy.graphics.stenctil_instructions',
'kivy.graphics.texture',
'kivy.graphics.transformation',
'kivy.graphics.vbo',
'kivy.graphics.vertex',
'kivy.graphics.vertex_instructions',
'kivy.properties',
# core
'kivy.core.image.img_pygame',
'kivy.core.audio.audio_gstreamer',
'kivy.core.audio.audio_pygame',
'kivy.core.camera.camera_gstreamer',
'kivy.core.camera.camera_opencv',
'kivy.core.video.video_pyglet',
'kivy.core.video.video_gstreamer',
'kivy.core.text.text_pygame',
'kivy.core.text.markup',
'kivy.core.clipboard.clipboard_pygame',
'kivy.core.clipboard.clipboard_dummy',
'kivy.core.window.window_pygame',
]
hiddenimports = _kivy_modules + get_modules()
hiddenimports = list(set(hiddenimports))
|
Python
| 0
|
@@ -2210,24 +2210,95 @@
ra_opencv',%0A
+ 'kivy.core.video.video_gi',%0A 'kivy.core.video.video_gstplayer',%0A
'kivy.co
@@ -2348,25 +2348,21 @@
o.video_
+py
gst
-reamer
',%0A '
|
49b8d12e03088950d960105414294af8a56e1393
|
Create get_genome_size.py
|
kinetic_datanator/data_source/array_express_tools/get_genome_size.py
|
kinetic_datanator/data_source/array_express_tools/get_genome_size.py
|
Python
| 0.000003
|
@@ -0,0 +1,1024 @@
+import numpy as np%0Afrom ete3 import NCBITaxa%0A%0A%0Adef get_genome_size(organism_name):%0A%09domain = get_taxonomic_lineage(organism_name)%5B-3:-2%5D%5B0%5D%0A%09if domain == %22Bacteria%22:%0A%09%09file = open('number_of_prokaryote_genes.txt')%0A%09if domain == 'Eukaryota':%0A%09%09file = open('number_of_eukaryote_genes.txt')%0A%09lines = file.readlines()%0A%09lines = %5Bline.split(%22%09%22) for line in lines%5D%0A%09total = %5B%5D%0A%09for line in lines:%0A%09%09if line%5B0%5D == organism_name:%0A%09%09%09if not line%5B12%5D == '-':%0A%09%09%09%09total.append(int(line%5B12%5D))%0A%0A%09print total%0A%09return np.average(total)%0A%0Adef get_taxonomic_lineage(baseSpecies):%0A%09ncbi = NCBITaxa()%0A%09baseSpecies = ncbi.get_name_translator(%5BbaseSpecies%5D)%5BbaseSpecies%5D%5B0%5D%0A%09lineage = ncbi.get_lineage(baseSpecies)%0A%09names = ncbi.get_taxid_translator(lineage)%0A%09chain = %5Bnames%5Btaxid%5D for taxid in lineage%5D%0A%09i = len(chain)%0A%09new = %5B%5D%0A%09while i %3E 0:%0A%09%09new.append(chain%5Bi-1%5D)%0A%09%09i = i-1%0A%09return new%0A%0Aif __name__ == '__main__':%0A%09print get_genome_size('Mycoplasma pneumoniae')%0A%09print get_genome_size('Mus musculus')%0A%09print get_genome_size('Homo sapiens')%0A
|
|
4efe6903ac54cefd9871965853f63ec8841b6e4b
|
add new splitted countries and remove old ones
|
osmaxx/excerptexport/migrations/0039_update_countries_20160528_2049.py
|
osmaxx/excerptexport/migrations/0039_update_countries_20160528_2049.py
|
Python
| 0.000002
|
@@ -0,0 +1,1564 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.6 on 2016-05-28 18:49%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0Afrom osmaxx.utils.polyfile_helpers import get_polyfile_names_to_file_mapping%0A%0A%0Adef update_excerpt(excerpt, polyfile_path):%0A from osmaxx.utils.polyfile_helpers import polyfile_to_geos_geometry%0A geometry = polyfile_to_geos_geometry(polyfile_path)%0A excerpt.bounding_geometry = geometry%0A excerpt.save()%0A%0A%0Adef update_countries(apps, schema_editor): # noqa%0A Excerpt = apps.get_model(%22excerptexport%22, %22Excerpt%22) # noqa%0A done_countries = %5B%5D%0A for name, polyfile_path in get_polyfile_names_to_file_mapping().items():%0A done_countries.append(name)%0A existing_excerpts = list(Excerpt.objects.filter(excerpt_type='country', name=name))%0A if len(existing_excerpts) == 0:%0A excerpt = Excerpt.objects.create(%0A is_public=True,%0A name=name,%0A excerpt_type='country',%0A )%0A update_excerpt(excerpt, polyfile_path)%0A else:%0A for excerpt in existing_excerpts:%0A update_excerpt(excerpt, polyfile_path)%0A # remove old countries, yes, this deletes the existing exports with those countries as well!%0A Excerpt.objects.filter(excerpt_type='country').exclude(name__in=done_countries).delete()%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('excerptexport', '0038_remove_outputfile_file_old'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(update_countries)%0A %5D%0A
|
|
626b0b54fdcc9cd797bd5a1b355c01f9e9947252
|
Fix empty query result
|
node/datastore.py
|
node/datastore.py
|
#!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net
import UserDict
from pymongo import MongoClient
import time
import os
import logging
class DataStore(UserDict.DictMixin):
""" Interface for classes implementing physical storage (for data
published via the "STORE" RPC) for the Kademlia DHT
@note: This provides an interface for a dict-like object
"""
def keys(self):
""" Return a list of the keys in this data store """
def lastPublished(self, key):
""" Get the time the C{(key, value)} pair identified by C{key}
was last published """
def originalPublisherID(self, key):
""" Get the original publisher of the data's node ID
@param key: The key that identifies the stored data
@type key: str
@return: Return the node ID of the original publisher of the
C{(key, value)} pair identified by C{key}.
"""
def originalPublishTime(self, key):
""" Get the time the C{(key, value)} pair identified by C{key}
was originally published """
def setItem(self, key, value, lastPublished, originallyPublished, originalPublisherID):
""" Set the value of the (key, value) pair identified by C{key};
this should set the "last published" value for the (key, value)
pair to the current time
"""
def __getitem__(self, key):
""" Get the value identified by C{key} """
def __setitem__(self, key, value):
""" Convenience wrapper to C{setItem}; this accepts a tuple in the
format: (value, lastPublished, originallyPublished, originalPublisherID) """
self.setItem(key, *value)
def __delitem__(self, key):
""" Delete the specified key (and its value) """
class DictDataStore(DataStore):
""" A datastore using an in-memory Python dictionary """
def __init__(self):
# Dictionary format:
# { <key>: (<value>, <lastPublished>, <originallyPublished> <originalPublisherID>) }
self._dict = {}
self._log = logging.getLogger(self.__class__.__name__)
def keys(self):
""" Return a list of the keys in this data store """
return self._dict.keys()
def lastPublished(self, key):
""" Get the time the C{(key, value)} pair identified by C{key}
was last published """
return self._dict[key][1]
def originalPublisherID(self, key):
""" Get the original publisher of the data's node ID
@param key: The key that identifies the stored data
@type key: str
@return: Return the node ID of the original publisher of the
C{(key, value)} pair identified by C{key}.
"""
return self._dict[key][3]
def originalPublishTime(self, key):
""" Get the time the C{(key, value)} pair identified by C{key}
was originally published """
return self._dict[key][2]
def setItem(self, key, value, lastPublished, originallyPublished, originalPublisherID):
""" Set the value of the (key, value) pair identified by C{key};
this should set the "last published" value for the (key, value)
pair to the current time
"""
self._dict[key] = (value, lastPublished, originallyPublished, originalPublisherID)
def __getitem__(self, key):
""" Get the value identified by C{key} """
return self._dict[key][0]
def __delitem__(self, key):
""" Delete the specified key (and its value) """
del self._dict[key]
class MongoDataStore(DataStore):
""" Example of a MongoDB database-based datastore
"""
def __init__(self):
MONGODB_URI = 'mongodb://localhost:27017'
_dbclient = MongoClient()
self._db = _dbclient.openbazaar
self._log = logging.getLogger(self.__class__.__name__)
def keys(self):
""" Return a list of the keys in this data store """
keys = []
try:
db_keys = self._db.data.find({}, { 'key':1 })
print db_keys
for row in db_keys:
keys.append(row['key'].decode('hex'))
finally:
self._log.info('Keys: %s' % keys)
return keys
def lastPublished(self, key):
""" Get the time the C{(key, value)} pair identified by C{key}
was last published """
return int(self._dbQuery(key, 'lastPublished'))
def originalPublisherID(self, key):
""" Get the original publisher of the data's node ID
@param key: The key that identifies the stored data
@type key: str
@return: Return the node ID of the original publisher of the
C{(key, value)} pair identified by C{key}.
"""
return self._dbQuery(key, 'originalPublisherID')
def originalPublishTime(self, key):
""" Get the time the C{(key, value)} pair identified by C{key}
was originally published """
return int(self._dbQuery(key, 'originallyPublished'))
def setItem(self, key, value, lastPublished, originallyPublished, originalPublisherID):
# Encode the key so that it doesn't corrupt the database
encodedKey = key.encode('hex')
#self._cursor.execute("select key from data where key=:reqKey", {'reqKey': encodedKey})
row = self._db.data.update({'key':encodedKey}, {'key':encodedKey,
'value':value,
'lastPublished':lastPublished,
'originallyPublished':originallyPublished,
'originalPublisherID':originalPublisherID}, True)
# if self._cursor.fetchone() == None:
# self._cursor.execute('INSERT INTO data(key, value, lastPublished, originallyPublished, originalPublisherID) VALUES (?, ?, ?, ?, ?)', (encodedKey, buffer(pickle.dumps(value, pickle.HIGHEST_PROTOCOL)), lastPublished, originallyPublished, originalPublisherID))
# else:
# self._cursor.execute('UPDATE data SET value=?, lastPublished=?, originallyPublished=?, originalPublisherID=? WHERE key=?', (buffer(pickle.dumps(value, pickle.HIGHEST_PROTOCOL)), lastPublished, originallyPublished, originalPublisherID, encodedKey))
def _dbQuery(self, key, columnName):
row = self._db.data.find_one({ 'key':key.encode('hex')}, {columnName:1})
print row
value = str(row[columnName])
return value
def __getitem__(self, key):
return self._dbQuery(key, 'value')
def __delitem__(self, key):
self._db.data.remove({'key':key.encode('hex')})
|
Python
| 0.999994
|
@@ -6730,18 +6730,28 @@
-print row%0A
+if row != None:%0A
@@ -6787,24 +6787,28 @@
e%5D)%0A
+
return value
@@ -6807,16 +6807,49 @@
rn value
+%0A else:%0A return
%0A%0A de
|
6612fffdb5d45b3752851d6d1c692753d6827795
|
Update user module URLS
|
plinth/modules/users/urls.py
|
plinth/modules/users/urls.py
|
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
URLs for the Users module
"""
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'plinth.modules.users.views',
url(r'^sys/users/$', views.UserList.as_view(), name='index'),
url(r'^sys/users/create/$', views.UserCreate.as_view(), name='create'),
url(r'^sys/users/edit/(?P<slug>[\w.@+-]+)$', views.UserUpdate.as_view(),
name='edit'),
url(r'^sys/users/delete/(?P<slug>[\w.@+-]+)$', views.UserDelete.as_view(),
name='delete'),
url(r'^sys/users/change_password/(?P<slug>[\w.@+-]+)$',
views.UserChangePassword.as_view(), name='change_password'),
)
|
Python
| 0
|
@@ -764,16 +764,17 @@
ns, url%0A
+%0A
from . i
@@ -820,34 +820,8 @@
'
-plinth.modules.users.views
',%0A
@@ -986,13 +986,8 @@
ers/
-edit/
(?P%3C
@@ -993,32 +993,38 @@
%3Cslug%3E%5B%5Cw.@+-%5D+)
+/edit/
$', views.UserUp
@@ -1086,15 +1086,8 @@
ers/
-delete/
(?P%3C
@@ -1101,16 +1101,24 @@
w.@+-%5D+)
+/delete/
$', view
@@ -1190,24 +1190,8 @@
ers/
-change_password/
(?P%3C
@@ -1205,16 +1205,33 @@
w.@+-%5D+)
+/change_password/
$',%0A
|
e10a31d85900fbd815cefa3c6d1227c8cc65911c
|
Improve recurive_get() applying code review comments
|
udata/utils.py
|
udata/utils.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
import re
from datetime import date, datetime
from calendar import monthrange
from math import ceil
def get_by(lst, field, value):
'''Find an object in a list given a field value'''
for row in lst:
if ((isinstance(row, dict) and row.get(field) == value)
or (getattr(row, field, None) == value)):
return row
def multi_to_dict(multi):
'''Transform a Werkzeug multidictionnary into a flat dictionnary'''
return dict(
(key, value[0] if len(value) == 1 else value)
for key, value in multi.to_dict(False).items()
)
FIRST_CAP_RE = re.compile('(.)([A-Z][a-z]+)')
ALL_CAP_RE = re.compile('([a-z0-9])([A-Z])')
def camel_to_lodash(name):
s1 = FIRST_CAP_RE.sub(r'\1_\2', name)
return ALL_CAP_RE.sub(r'\1_\2', s1).lower()
class Paginable(object):
'''
A simple helper mixin for pagination
'''
@property
def pages(self):
if self.page_size:
return int(ceil(self.total / float(self.page_size)))
else:
return 0
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
@property
def page_start(self):
if self.page_size is not None:
return (self.page - 1) * self.page_size + 1
else:
return 1
@property
def page_end(self):
return min(self.total, self.page_size * self.page)
def iter_pages(self, left_edge=2, left_current=2, right_current=5,
right_edge=2):
last = 0
for num in xrange(1, self.pages + 1):
if (num <= left_edge or
(num > self.page - left_current - 1
and num < self.page + right_current) or
num > self.pages - right_edge):
if last + 1 != num:
yield None
yield num
last = num
class Paginator(Paginable):
'''A simple paginable implementation'''
def __init__(self, page, page_size, total):
self.page = page
self.page_size = page_size
self.total = total
def daterange_start(string):
'''Parse a date range start boundary'''
if not string:
return None
parts = string.split('-')
if len(parts) == 3:
return date(*(int(part) for part in parts))
elif len(parts) == 2:
return date(int(parts[0]), int(parts[1]), 1)
else:
return date(int(parts[0]), 1, 1)
def daterange_end(string):
'''Parse a date range end boundary'''
if not string:
return None
parts = string.split('-')
if len(parts) == 3:
return date(*(int(part) for part in parts))
elif len(parts) == 2:
year, month = int(parts[0]), int(parts[1])
_, end_of_month = monthrange(year, month)
return date(year, month, end_of_month)
else:
return date(int(parts[0]), 12, 31)
def to_iso(dt):
'''
Format a date or datetime into an ISO-8601 string
Support dates before 1900.
'''
if isinstance(dt, datetime):
return to_iso_datetime(dt)
elif isinstance(dt, date):
return to_iso_date(dt)
def to_iso_date(dt):
'''
Format a date or datetime into an ISO-8601 date string.
Support dates before 1900.
'''
if dt:
return '{dt.year:02d}-{dt.month:02d}-{dt.day:02d}'.format(dt=dt)
def to_iso_datetime(dt):
'''
Format a date or datetime into an ISO-8601 datetime string.
Time is set to 00:00:00 for dates.
Support dates before 1900.
'''
if dt:
date_str = to_iso_date(dt)
time_str = '{dt.hour:02d}:{dt.minute:02d}:{dt.second:02d}'.format(
dt=dt) if isinstance(dt, datetime) else '00:00:00'
return 'T'.join((date_str, time_str))
def to_bool(value):
'''
Transform a value into a boolean with the following rules:
- a boolean is returned untouched
- a string value should match any casinf of 'true' to be True
- an integer should be superior to zero to be True
- all other values are False
'''
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
return value.lower() == 'true'
elif isinstance(value, int):
return value > 0
else:
return False
def hash_url(url):
'''Hash an URL to make it indexable'''
return hashlib.sha1(url.encode('utf-8')).hexdigest() if url else None
def recursive_get(obj, key):
'''
Get an attribute or a key recursively.
:param obj: The object to fetch attribute or key on
:type obj: object|dict
:param key: Either a string in dotted-notation ar an array of string
:type key: string|list|tuple
'''
if not obj or not key:
return
parts = key.split('.') if isinstance(key, basestring) else key
key = parts[0]
if isinstance(obj, dict):
value = obj.get(key, None)
else:
value = getattr(obj, key, None)
if len(parts) > 1:
return recursive_get(value, parts[1:])
return value
|
Python
| 0
|
@@ -4960,19 +4960,23 @@
= parts
-%5B0%5D
+.pop(0)
%0A if
@@ -5087,35 +5087,8 @@
ne)%0A
- if len(parts) %3E 1:%0A
@@ -5124,24 +5124,23 @@
arts
-%5B1:%5D)%0A return
+) if parts else
val
|
bb43a2e63f7f7c337b01ef855d426a84b73eeee5
|
Add a command prototype to list all items from a playlist
|
telemeta/management/commands/telemeta-export-items-from-user-playlists.py
|
telemeta/management/commands/telemeta-export-items-from-user-playlists.py
|
Python
| 0
|
@@ -0,0 +1,1191 @@
+from optparse import make_option%0Afrom django.conf import settings%0Afrom django.core.management.base import BaseCommand, CommandError%0Afrom django.contrib.auth.models import User%0Afrom django.template.defaultfilters import slugify%0Afrom django.utils import translation%0A%0Afrom telemeta.models import Playlist, MediaCollection, MediaItem%0A%0A%0Aclass Command(BaseCommand):%0A help = %22Export media files from playlists of a given user%22%0A args = %22username%22%0A%0A def handle(self, *args, **options):%0A username = args%5B0%5D%0A user = User.objects.get(username=username)%0A playlists = user.playlists.all()%0A items = %5B%5D%0A%0A for playlist in playlists:%0A resources = playlist.resources.all()%0A for resource in resources:%0A if resource.resource_type == 'collection':%0A collection = MediaCollection.objects.get(id=resource.resource_id)%0A for item in collection.items.all():%0A items.append(item)%0A elif resource.resource_type == 'item':%0A item = MediaItem.objects.get(id=resource.resource_id)%0A items.append(item)%0A%0A print(items)%0A %0A
|
|
811b51c9d3003dc156f8d7ea005182a0be875e32
|
add utils for sorting functionality with missing values
|
bonspy/utils.py
|
bonspy/utils.py
|
Python
| 0
|
@@ -0,0 +1,443 @@
+def compare_vectors(x, y):%0A for x_i, y_i in zip(x, y):%0A comparison = _compare(x_i, y_i)%0A if comparison == 0:%0A continue%0A else:%0A return comparison%0A return 0%0A%0A%0Adef _compare(x, y):%0A if x is not None and y is not None:%0A return int(x %3E y) - int(x %3C y)%0A elif x is not None and y is None:%0A return -1%0A elif x is None and y is not None:%0A return 1%0A else:%0A return 0%0A
|
|
b3e1b721d6439cf8ba69f40536b4fb4e6be0b1ec
|
fix beaker.py for python3, add that as new file in beaker3.py. next need to make it dynamically choose which to load.
|
plugin/ipythonPlugins/src/dist/ipython/beaker3.py
|
plugin/ipythonPlugins/src/dist/ipython/beaker3.py
|
Python
| 0
|
@@ -0,0 +1,3109 @@
+# Copyright 2014 TWO SIGMA OPEN SOURCE, LLC%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Aimport os, urllib.request, urllib.parse, urllib.error, urllib.request, urllib.error, urllib.parse, json, pandas, yaml%0A%0A# should be inner class to Beaker%0Aclass DataFrameEncoder(json.JSONEncoder):%0A def default(self, obj):%0A # similarly handle Panels.%0A # make this extensible by the user to handle their own types.%0A if type(obj) == pandas.core.frame.DataFrame:%0A return obj.to_dict(outtype='list')%0A if type(obj) == pandas.core.series.Series:%0A return obj.to_dict()%0A return json.JSONEncoder.default(self, obj)%0A%0Aclass Beaker:%0A %22%22%22Runtime support for Python code in Beaker.%22%22%22%0A session_id = ''%0A core_url = '127.0.0.1:' + os.environ%5B'beaker_core_port'%5D%0A password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()%0A password_mgr.add_password(None, core_url, 'beaker',%0A os.environ%5B'beaker_core_password'%5D)%0A urllib.request.install_opener(urllib.request.build_opener(urllib.request.HTTPBasicAuthHandler(password_mgr)))%0A%0A def set4(self, var, val, unset, sync):%0A args = %7B'name': var, 'session':self.session_id, 'sync':sync%7D%0A if not unset:%0A args%5B'value'%5D = json.dumps(val, cls=DataFrameEncoder)%0A req = urllib.request.Request('http://' + self.core_url + '/rest/namespace/set',%0A urllib.parse.urlencode(args).encode('utf8'))%0A conn = urllib.request.urlopen(req)%0A reply = conn.read().decode(%22utf-8%22)%0A if reply != 'ok':%0A raise NameError(reply)%0A %0A def get(self, var):%0A req = urllib.request.Request('http://' + self.core_url + '/rest/namespace/get?' + %0A urllib.parse.urlencode(%7B%0A 'name': var,%0A 'session':self.session_id%7D))%0A conn = urllib.request.urlopen(req)%0A result = yaml.load(conn.read()) # would use json.loads but it returns unicode%0A if not result%5B'defined'%5D:%0A raise NameError('name %5C'' + var + '%5C' is not defined in notebook namespace')%0A return result%5B'value'%5D%0A%0A%0Abeaker_instance = Beaker()%0A%0Adef set(var, val):%0A return beaker_instance.set4(var, val, False, True)%0A%0A# returns before the write completes%0Adef set_fast(var, val):%0A return beaker_instance.set4(var, val, False, False)%0A%0A# remove a var from the namespace%0Adef unset(var):%0A return beaker_instance.set4(var, None, True, True)%0A%0Adef get(var):%0A return beaker_instance.get(var)%0A%0Adef set_session(id):%0A beaker_instance.session_id = id%0A
|
|
f9331db039dc549bf9e1d149338f67c455f29bea
|
add manage.py
|
manage.py
|
manage.py
|
Python
| 0.000001
|
@@ -0,0 +1,542 @@
+#!/usr/bin/python%0Afrom django.core.management import execute_manager%0Atry:%0A import settings # Assumed to be in the same directory.%0Aexcept ImportError:%0A import sys%0A sys.stderr.write(%22Error: Can't find the file 'settings.py' in the directory containing %25r. It appears you've customized things.%5CnYou'll have to run django-admin.py, passing it your settings module.%5Cn(If the file settings.py does indeed exist, it's causing an ImportError somehow.)%5Cn%22 %25 __file__)%0A sys.exit(1)%0A%0Aif __name__ == %22__main__%22:%0A execute_manager(settings)%0A
|
|
a62dc18745f952b3fcb05ddf4768758e25883698
|
Add datamigration to create staff clearances
|
accelerator/migrations/0058_grant_staff_clearance_for_existing_staff_members.py
|
accelerator/migrations/0058_grant_staff_clearance_for_existing_staff_members.py
|
Python
| 0
|
@@ -0,0 +1,1400 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.8 on 2019-06-12 19:38%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0ASTAFF = %22Staff%22 # don't import from models in migrations.%0A%0Adef grant_staff_clearances_for_role_grantees(apps, program_role):%0A Clearance = apps.get_model('accelerator', 'Clearance')%0A program_family = program_role.program.program_family%0A user_ids = program_role.programrolegrant_set.values_list(%0A %22person_id%22, flat=True)%0A for user_id in user_ids:%0A Clearance.objects.get_or_create(%0A user_id=user_id,%0A program_family=program_family,%0A defaults=%7B%22level%22: STAFF%7D)%0A %0A%0Adef grant_clearances_for_mc_staff_users(apps, schema_editor):%0A ProgramRole = apps.get_model('accelerator', %22ProgramRole%22) %0A %0A for program_role in ProgramRole.objects.filter(%0A user_role__name=STAFF):%0A grant_staff_clearances_for_role_grantees(apps, program_role)%0A%0Adef revoke_staff_clearances(apps, schema_editor):%0A Clearance = apps.get_model(%22accelerator%22, %22Clearance%22)%0A Clearance.objects.filter(level=STAFF).delete()%0A %0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('accelerator', '0057_add_clearance_level_staff'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(%0A grant_clearances_for_mc_staff_users,%0A revoke_staff_clearances)%0A %5D%0A
|
|
ed514214967f4f9dde252070993f519db67057c3
|
add tests for intermittent water
|
test/668-intermittent-water.py
|
test/668-intermittent-water.py
|
Python
| 0.000002
|
@@ -0,0 +1,833 @@
+#http://www.openstreetmap.org/way/107817218%0A# Arizona Canal Diversion Channel (ACDC) %0Aassert_has_feature(%0A 16, 12353, 26272, 'water',%0A %7B 'kind': 'river', 'intermittent': 'yes' %7D)%0A%0A#http://www.openstreetmap.org/way/96528126%0A# 10th Street Wash%0Aassert_has_feature(%0A 16, 12368, 26272, 'water',%0A %7B 'kind': 'drain', 'intermittent': 'yes' %7D)%0A%0A#http://www.openstreetmap.org/way/61954975%0A# Unnamed drain%0Aassert_has_feature(%0A 16, 12372, 26272, 'water',%0A %7B 'kind': 'drain', 'intermittent': 'yes' %7D)%0A%0A#http://www.openstreetmap.org/way/321690441%0A# Unnamed stream%0Aassert_has_feature(%0A 16, 12492, 26279, 'water',%0A %7B 'kind': 'stream', 'intermittent': 'yes' %7D)%0A%0A#http://www.openstreetmap.org/way/68709904%0A# Unnamed water (lake)%0Aassert_has_feature(%0A 16, 12349, 26257, 'water',%0A %7B 'kind': 'water', 'intermittent': 'yes' %7D)
|
|
2d18583309a189e263bda13e19f7a05ba832c14d
|
Add file to write templates to json
|
backend/scripts/templates/templates2file.py
|
backend/scripts/templates/templates2file.py
|
Python
| 0
|
@@ -0,0 +1,728 @@
+#!/usr/bin/env python%0A%0Aimport rethinkdb as r%0Afrom optparse import OptionParser%0Aimport json%0Aimport os%0A%0Aif __name__ == %22__main__%22:%0A parser = OptionParser()%0A parser.add_option(%22-P%22, %22--port%22, dest=%22port%22, type=%22int%22,%0A help=%22rethinkdb port%22, default=30815)%0A (options, args) = parser.parse_args()%0A%0A conn = r.connect('localhost', options.port, db='materialscommons')%0A%0A templates = list(r.table('templates').run(conn))%0A%0A try:%0A os.mkdir('/tmp/templates')%0A except:%0A pass%0A%0A for template in templates:%0A try:%0A with open(%22/tmp/templates/%7B%7D%22.format(template%5B'name'%5D), 'w') as out:%0A json.dump(template, out, indent=4)%0A except:%0A pass%0A
|
|
72a851829a2190adc240bff3a62f4f03526a5e39
|
add basic checks
|
ndd/check.py
|
ndd/check.py
|
Python
| 0
|
@@ -0,0 +1,1263 @@
+# -*- coding: utf-8 -*-%0A%22%22%22Basic distribution tests.%22%22%22%0A# pylint: disable=missing-docstring%0Aimport pytest%0A%0Aimport ndd%0A%0ACOUNTS = %5B4, 12, 4, 5, 3, 1, 5, 1, 2, 2, 2, 2, 11, 3, 4, 12, 12, 1, 2%5D%0AK = 100%0A%0A%0Adef rounded(*args, **kwargs):%0A result = ndd.entropy(COUNTS, *args, **kwargs)%0A return round(result, 3)%0A%0A%0Adef test_base():%0A assert rounded() == 2.813%0A%0A%0Adef test_k():%0A assert rounded(k=K) == 2.806%0A%0A%0Adef test_plugin():%0A assert rounded(estimator='plugin') == 2.635%0A%0A%0Adef test_pmf_plugin():%0A assert rounded(estimator='pmf_plugin') == 1.678%0A%0A%0Adef test_miller_madow():%0A assert rounded(estimator='miller_madow') == 2.738%0A%0A%0Adef test_wolper_wolf():%0A with pytest.raises(TypeError):%0A _ = rounded(estimator='wolpert_wolf')%0A%0A%0Adef test_nsb_nok():%0A with pytest.raises(ndd.exceptions.NddError):%0A _ = rounded(estimator='nsb')%0A%0A%0Adef test_nsb_k():%0A assert rounded(estimator='nsb', k=K) == 2.806%0A%0A%0Adef test_asymptotic_nsb():%0A assert rounded(estimator='asymptotic_nsb') == 4.612%0A%0A%0Adef test_grassberger():%0A assert rounded(estimator='grassberger') == 6.221%0A%0A%0Adef test_auto_estimator_k():%0A assert rounded(estimator='auto_estimator', k=K) == 2.806%0A%0A%0Adef test_auto_estimator_nok():%0A assert rounded(estimator='auto_estimator') == 2.813%0A
|
|
10d020cc7b91257b74e63c0579788c0f9435857f
|
add loadTestData.py
|
templates/test/loadTestData.py
|
templates/test/loadTestData.py
|
Python
| 0.000003
|
@@ -0,0 +1,1093 @@
+import os%0A%0Aif not os.path.exists('setup.py'):%0A print %22This script should be run from /install/community-edition-setup/%22%0A sys.exit()%0A %0Aif not os.path.exists('/install/community-edition-setup/setup.properties.last'):%0A print %22setup.properties.last is missing can't continue%22%0A sys.exit()%0A%0Af=open('setup.py').readlines()%0A%0Afor l in f:%0A if l.startswith('from pyDes import *'):%0A break%0Aelse:%0A f.insert(30, 'from pyDes import *%5Cn')%0A with open('setup.py','w') as w:%0A w.write(''.join(f))%0A%0Afrom setup import *%0A%0AinstallObject = Setup( os.path.dirname(os.path.realpath(__file__)))%0A%0A%0A%0AinstallObject.load_properties('setup.properties.last')%0A%0Aif installObject.ldap_type == 'opendj':%0A installObject.createLdapPw()%0A%0A%0AinstallObject.encode_test_passwords()%0AinstallObject.generate_passport_configuration()%0AinstallObject.generate_scim_configuration()%0A%0AinstallObject.prepare_base64_extension_scripts()%0A%0AinstallObject.render_templates()%0AinstallObject.render_test_templates()%0AinstallObject.loadTestData()%0A%0Aif installObject.ldap_type == 'opendj':%0A installObject.deleteLdapPw()%0A
|
|
6df873a26ff71b07e68dcb2e9fa9c4b1725a70ce
|
Add migration for expert bid [WAL-976]
|
src/nodeconductor_assembly_waldur/experts/migrations/0003_expertbid.py
|
src/nodeconductor_assembly_waldur/experts/migrations/0003_expertbid.py
|
Python
| 0
|
@@ -0,0 +1,1222 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.1 on 2017-07-07 15:09%0Afrom __future__ import unicode_literals%0A%0Afrom decimal import Decimal%0Aimport django.core.validators%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0Aimport nodeconductor.core.fields%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('structure', '0052_customer_subnets'),%0A ('experts', '0002_expertrequest'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='ExpertBid',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),%0A ('uuid', nodeconductor.core.fields.UUIDField()),%0A ('price', models.DecimalField(decimal_places=7, default=0, max_digits=22, validators=%5Bdjango.core.validators.MinValueValidator(Decimal('0'))%5D)),%0A ('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='experts.ExpertRequest')),%0A ('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Project')),%0A %5D,%0A options=%7B%0A 'abstract': False,%0A %7D,%0A ),%0A %5D%0A
|
|
578ee687ca5b2270c3d01376606c1f074fd15e4e
|
convert to float/int
|
src/collectors/memcached/memcached.py
|
src/collectors/memcached/memcached.py
|
# coding=utf-8
"""
Collect memcached stats
#### Dependencies
* subprocess
#### Example Configuration
MemcachedCollector.conf
```
enabled = True
hosts = localhost:11211, app-1@localhost:11212, app-2@localhost:11213, etc
```
TO use a unix socket, set a host string like this
```
hosts = /path/to/blah.sock, app-1@/path/to/bleh.sock,
```
"""
import diamond.collector
import socket
import re
class MemcachedCollector(diamond.collector.Collector):
GAUGES = [
'bytes',
'connection_structures',
'curr_connections',
'curr_items',
'threads',
'reserved_fds',
'limit_maxbytes',
'hash_power_level',
'hash_bytes',
'hash_is_expanding',
]
def get_default_config_help(self):
config_help = super(MemcachedCollector, self).get_default_config_help()
config_help.update({
'publish': "Which rows of 'status' you would like to publish."
+ " Telnet host port' and type stats and hit enter to see the list"
+ " of possibilities. Leave unset to publish all.",
'hosts': "List of hosts, and ports to collect. Set an alias by "
+ " prefixing the host:port with alias@",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemcachedCollector, self).get_default_config()
config.update({
'path': 'memcached',
# Which rows of 'status' you would like to publish.
# 'telnet host port' and type stats and hit enter to see the list of
# possibilities.
# Leave unset to publish all
#'publish': ''
# Connection settings
'hosts': ['localhost:11211']
})
return config
def get_raw_stats(self, host, port):
data = ''
# connect
try:
if port is None:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(host)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, int(port)))
# request stats
sock.send('stats\n')
# something big enough to get whatever is sent back
data = sock.recv(4096)
except socket.error:
self.log.exception('Failed to get stats from %s:%s',
host, port)
return data
def get_stats(self, host, port):
# stuff that's always ignored, aren't 'stats'
ignored = ('libevent', 'pointer_size', 'time', 'version',
'repcached_version', 'replication', 'accepting_conns',
'pid')
pid = None
stats = {}
data = self.get_raw_stats(host, port)
# parse stats
for line in data.splitlines():
pieces = line.split(' ')
if pieces[0] != 'STAT' or pieces[1] in ignored:
continue
elif pieces[1] == 'pid':
pid = pieces[2]
continue
stats[pieces[1]] = pieces[2]
# get max connection limit
self.log.debug('pid %s', pid)
try:
cmdline = "/proc/%s/cmdline" % pid
f = open(cmdline, 'r')
m = re.search("-c\x00(\d+)", f.readline())
if m is not None:
self.log.debug('limit connections %s', m.group(1))
stats['limit_maxconn'] = m.group(1)
f.close()
except:
self.log.debug("Cannot parse command line options for memcached")
return stats
def collect(self):
hosts = self.config.get('hosts')
# Convert a string config value to be an array
if isinstance(hosts, basestring):
hosts = [hosts]
for host in hosts:
matches = re.search('((.+)\@)?([^:]+)(:(\d+))?', host)
alias = matches.group(2)
hostname = matches.group(3)
port = matches.group(5)
if alias is None:
alias = hostname
stats = self.get_stats(hostname, port)
# figure out what we're configured to get, defaulting to everything
desired = self.config.get('publish', stats.keys())
# for everything we want
for stat in desired:
if stat in stats:
# we have it
if stat in self.GAUGES:
self.publish_gauge(alias + "." + stat, stats[stat])
else:
self.publish_counter(alias + "." + stat, stats[stat])
else:
# we don't, must be somehting configured in publish so we
# should log an error about it
self.log.error("No such key '%s' available, issue 'stats' "
"for a full list", stat)
|
Python
| 0.999999
|
@@ -3154,32 +3154,69 @@
continue%0A
+ if '.' in pieces%5B2%5D:%0A
stat
@@ -3230,16 +3230,22 @@
s%5B1%5D%5D =
+float(
pieces%5B2
@@ -3245,16 +3245,85 @@
ieces%5B2%5D
+)%0A else:%0A stats%5Bpieces%5B1%5D%5D = int(pieces%5B2%5D)
%0A%0A
|
8e983472134817c1312e3713ca45c7359300dedf
|
Set students current flag based on enrolled and attending
|
academics/management/commands/set_student_current.py
|
academics/management/commands/set_student_current.py
|
Python
| 0
|
@@ -0,0 +1,861 @@
+#!/usr/bin/python%0A%0Aimport logging%0Afrom datetime import date%0A%0Afrom django.core.management.base import BaseCommand, CommandError%0Afrom django.db import transaction%0A%0Afrom academics.models import Student, Enrollment, AcademicYear%0A%0Alogger = logging.getLogger(__name__)%0A%0Aclass Command(BaseCommand):%0A help = %22Import reset student's current status%22%0A %0A def handle(self, *args, **kwargs):%0A logger.info(%22Beginning student status reset routing%22)%0A %0A with transaction.atomic():%0A Student.objects.update(current=False)%0A %0A current_enrollments = Enrollment.objects.filter(academic_year=AcademicYear.objects.current(), status_enrollment=%22Enrolled%22, status_attending=%22Attending%22)%0A current_students = Student.objects.filter(enrollment__in=current_enrollments)%0A current_students.update(current=True)
|
|
c812f283762f18cdf9107b2b7fd679c6b57e32d7
|
Create coins.py
|
common/coins.py
|
common/coins.py
|
Python
| 0.000005
|
@@ -0,0 +1 @@
+%0A
|
|
b7d94d888552161432154750a8ca6a96775a32e2
|
modify domain XML to boot from hda after installation.
|
vmfront/vmmanager/views.py
|
vmfront/vmmanager/views.py
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from xml.dom.minidom import parseString
from django.core.context_processors import csrf
from django import forms
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from vmmanager import create_virConnect
import libvirt
from vmmanager.vnc import display
# Create your views here.
def index(request):
return render_to_response('vmmanager/index.html')
def index_menu(request):
con = create_virConnect()
vmdoms = []
for name in con.listDefinedDomains():
dom = con.lookupByName(name)
vmdoms.append(dom)
for id in con.listDomainsID():
dom = con.lookupByID(id)
vmdoms.append(dom)
return render_to_response('vmmanager/index_menu.html',
{'vmdoms': vmdoms},
context_instance=RequestContext(request))
def index_top(request):
return render_to_response('vmmanager/index_top.html')
def status(request, vmname):
con = create_virConnect()
dom = con.lookupByName(vmname)
parsed = parseString(dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE))
if dom.info()[0] == 1:
vm_state = "running"
maxVcpus = dom.maxVcpus()
elif dom.info()[0] == 5:
vm_state = "shut off"
maxVcpus = "(N/A)"
else:
vm_state = dom.info()[0]
maxVcpus = "(N/A)"
graphics_port = parsed.getElementsByTagName('graphics')[0].getAttribute('port')
if graphics_port == "-1":
graphics_port = "(N/A)"
if request.method == "POST":
if 'PowerON' in request.POST:
if vm_state == "shut off":
dom.create()
return HttpResponseRedirect(reverse('vmmanager.views.status', args=(vmname,)))
elif 'shutdown' in request.POST:
if vm_state == "running":
dom.destroy()
return HttpResponseRedirect(reverse('vmmanager.views.status', args=(vmname,)))
#try:
# value = request.POST["PowerON"]
#except KeyError:
else:
return render_to_response('vmmanager/status.html',
context_instance=RequestContext(request, {
'dom': dom,
'info': dom.info(),
'vm_state': vm_state,
'state': dom.state(0),
'memoryStats': dom.memoryStats(),
'maxMemory': dom.maxMemory(),
'maxVcpus': maxVcpus,
'vcpus': dom.vcpus(),
'conGetMemoryStats': con.getMemoryStats(0,0),
'conGetCPUStats': con.getCPUStats(0,0),
'graphics_port': graphics_port,
'OSType': dom.OSType(),
}))
class VMForm(forms.Form):
name = forms.CharField()
memory = forms.ChoiceField(choices=[(256 * 1024, "256"), (512 * 1024, "512"), (1024 * 1024, "1024")])
vcpu = forms.ChoiceField(choices=[(1, "1"), (2, "2"), (3, "3")])
disk = forms.ChoiceField(choices=[(2, "2"), (5, "5"), (10, "10"), (20, "20")])
os = forms.ChoiceField(choices=[("freebsd", "FreeBSD"), ("ubuntu", "Ubuntu"), ("centos", "CentOS"), ("debian", "Debian")])
def clean_name(self):
# Custom validation
# Check overlapping of domain name
con = create_virConnect()
#con = libvirt.open('qemu:///system')
vm_names = []
for id in con.listDomainsID():
dom = con.lookupByID(id)
vm_names.append(dom.name())
vm_names.append(con.listDefinedDomains())
if self.cleaned_data['name'] in vm_names:
raise forms.ValidationError(u'Overlapping domain name')
else:
return self.cleaned_data['name']
def create(request):
if request.method == "POST":
form = VMForm(request.POST)
if form.is_valid():
con = create_virConnect()
# con = libvirt.open('qemu:///system')
# Create Volume
for name in con.listStoragePools() :
pool = con.storagePoolLookupByName(name)
pool.createXML(V_XML % form.cleaned_data, 0)
# Create Domain
domain = con.defineXML(D_XML % form.cleaned_data)
status = domain.create()
if status != -1:
# TODO
# return redirect to vnc
return HttpResponseRedirect(reverse('vmmanager.views.status', args=(form.cleaned_data["name"],)))
else:
form = VMForm()
c = {}
return render_to_response('vmmanager/create.html', c,
context_instance=RequestContext(request))
V_XML = """\
<volume>
<name>%(name)s.img</name>
<allocation>0</allocation>
<capacity unit="G">%(disk)s</capacity>
<!-- <target>
<path>/var/lib/virt/images/%(name)s.img</path>
<permissions>
<owner>107</owner>
<group>107</group>
<mode>0744</mode>
<label>virt_image_t</label>
</permissions>
</target> -->
</volume>
"""\
D_XML = """\
<domain type="kvm"> <!-- Domain Type -->
<name>%(name)s</name> <!-- name for vm -->
<uuid></uuid> <!-- global identifier for virtual machines. if define/create a new machine, a random UUID is generated-->
<os> <!-- Bootloader -->
<type>hvm</type> <!-- Full Virtualization -->
<boot dev='cdrom'/> <!-- Boot Device -->
<boot dev='hd' />
</os>
<vcpu>%(vcpu)s</vcpu> <!-- CPU allocation -->
<memory unit='KiB'>%(memory)s</memory> <!-- Maximum Memory Allocation Size -->
<currentMemory unit='KiB'>%(memory)s</currentMemory> <!-- Current Memory Allocation -->
<devices> <!-- devices provided to the guest domain -->
<emulator>/usr/bin/kvm</emulator> <!-- -->
<disk type='file' device='disk'> <!-- type:underlying source for the disk, device:how the disk is exposed to the guest OS -->
<source file='/var/lib/libvirt/images/%(name)s.img' />
<target dev='hda' />
</disk>
<disk type='file' device='cdrom'>
<source file='/var/shared/%(os)s.iso' />
<target dev='hda' />
<readonly/>
<address type='drive' controller='0' bus='1' target='0' unit='0'/>
</disk>
<interface type='network'>
<source network='default' />
<mac address='24:42:53:21:52:45' />
</interface>
<graphics type='vnc' port='-1' keymap='ja' passwd="asdfghjkl">
<listen type='address' address='157.82.3.140'/>
</graphics>
</devices>
</domain>
"""\
def vnc(request, vmname):
return display(request, vmname)
|
Python
| 0
|
@@ -5868,36 +5868,13 @@
ev='
-cdrom'/%3E %3C!-- Boot Device --
+hd' /
%3E%0A
@@ -5890,37 +5890,60 @@
%3Cboot dev='
-hd' /
+cdrom'/%3E %3C!-- Boot Device --
%3E%0A %3C/os%3E%0A
|
1190ae0f9f926db3b6969700a8a2b3ca67d7631c
|
Add google analytics script
|
query_google_analytics.py
|
query_google_analytics.py
|
Python
| 0.000001
|
@@ -0,0 +1,248 @@
+import json%0Aimport random%0A%0Adata = %7B%0A 'unique_users': random.randint(11, 99),%0A 'number_of_cats': random.randint(11, 99),%0A%7D%0A%0Awith open('/fs/website/people/fergus.cooper/google_analytics_data.json', 'w') as outfile:%0A json.dump(data, outfile)%0A
|
|
145b84b8ad2ba2568a3ca3044a551c042496cb9c
|
Fix ordered dict rendering for task templates
|
rally/common/yamlutils.py
|
rally/common/yamlutils.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import yaml
from yaml import constructor
from yaml import loader
from yaml import nodes
from yaml import parser
from yaml import resolver
ParserError = parser.ParserError
def _construct_mapping(loader, node, deep=False):
keys = []
if isinstance(node, nodes.MappingNode):
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
if key in keys:
raise constructor.ConstructorError(
"while constructing a mapping",
node.start_mark,
"the key (%s) is redefined" % key,
key_node.start_mark)
keys.append(key)
return collections.OrderedDict(loader.construct_pairs(node))
class _SafeLoader(loader.SafeLoader):
pass
def safe_load(stream):
"""Load stream to create python object
:param stream: json/yaml stream.
:returns: dict object
"""
_SafeLoader.add_constructor(resolver.BaseResolver.DEFAULT_MAPPING_TAG,
_construct_mapping)
return yaml.load(stream, _SafeLoader)
|
Python
| 0.000019
|
@@ -584,16 +584,28 @@
lections
+%0Aimport json
%0A%0Aimport
@@ -772,16 +772,681 @@
Error%0A%0A%0A
+# NOTE(andreykurilin): Jinja2 uses __repr__ methods of objects while rendering%0A# templates. Such behaviour converts OrderedDict to the string like%0A# %22OrderedDict(%5B('foo', 'xxx'), ('bar', 'yyy')%5D)%22%0A# which breaks json/yaml load.%0A# In 99%25 of cases, we are rendering templates based on the dicts obtained%0A# after yaml.safe_load which uses collections.OrderedDict , so writing here%0A# the workaround with overridden __repr__ method looks like the best choice.%0Aclass OrderedDict(collections.OrderedDict):%0A %22%22%22collections.OrderedDict with __repr__ like in the regular dict.%22%22%22%0A def __repr__(self):%0A return json.dumps(self, sort_keys=False)%0A%0A%0A
def _con
@@ -1961,28 +1961,16 @@
return
-collections.
OrderedD
@@ -2054,16 +2054,137 @@
pass%0A%0A%0A
+_SafeLoader.add_constructor(resolver.BaseResolver.DEFAULT_MAPPING_TAG,%0A _construct_mapping)%0A%0A%0A
def safe
@@ -2317,135 +2317,8 @@
%22%22%22%0A
- _SafeLoader.add_constructor(resolver.BaseResolver.DEFAULT_MAPPING_TAG,%0A _construct_mapping)%0A
|
6e4be2c3c082ea8551c1eee9fc792511a043f1a7
|
Fix bug when using unix sockets.
|
redis_cache/connection.py
|
redis_cache/connection.py
|
from redis.connection import UnixDomainSocketConnection, Connection
class CacheConnectionPool(object):
def __init__(self):
self._clients = {}
self._connection_pools = {}
def __contains__(self, server):
return server in self._clients
def __getitem__(self, server):
return self._clients.get(server, None)
def reset(self):
for pool in self._connection_pools.values():
pool.disconnect()
self._clients = {}
self._connection_pools = {}
def get_connection_pool(
self,
client,
host='127.0.0.1',
port=6379,
db=1,
password=None,
parser_class=None,
unix_socket_path=None,
connection_pool_class=None,
connection_pool_class_kwargs=None,
socket_timeout=None,
socket_connect_timeout=None,
**kwargs
):
connection_identifier = (host, port, db, unix_socket_path)
self._clients[connection_identifier] = client
pool = self._connection_pools.get(connection_identifier)
if pool is None:
connection_class = (
unix_socket_path and UnixDomainSocketConnection or Connection
)
kwargs = {
'db': db,
'password': password,
'connection_class': connection_class,
'parser_class': parser_class,
'socket_timeout': socket_timeout,
'socket_connect_timeout': socket_connect_timeout,
}
kwargs.update(connection_pool_class_kwargs)
if unix_socket_path is None:
kwargs.update({
'host': host,
'port': port,
})
else:
kwargs['path'] = unix_socket_path
pool = connection_pool_class(**kwargs)
self._connection_pools[connection_identifier] = pool
pool.connection_identifier = connection_identifier
return pool
pool = CacheConnectionPool()
|
Python
| 0
|
@@ -1463,36 +1463,135 @@
ut,%0A
+%7D%0A%0A
+ if not issubclass(connection_class, UnixDomainSocketConnection):%0A kwargs%5B
'socket_connect_
@@ -1590,33 +1590,35 @@
connect_timeout'
-:
+%5D =
socket_connect_
@@ -1624,31 +1624,17 @@
_timeout
-,%0A %7D
+%0A
%0A
|
3e0004283e32b29ac4792c3a0f97abe2c2bfaf25
|
version bump for 0.25.12.1.
|
oneflow/__init__.py
|
oneflow/__init__.py
|
VERSION = '0.25.12'
|
Python
| 0
|
@@ -12,11 +12,13 @@
'0.25.12
+.1
'%0A%0A
|
d9e47e7b3200586a41a2d4f6cbd555b4b3c3d0cb
|
version bump for 0.81.3.
|
oneflow/__init__.py
|
oneflow/__init__.py
|
VERSION = '0.81.2'
|
Python
| 0
|
@@ -14,8 +14,8 @@
.81.
-2
+3
'%0A%0A
|
7e65f4da82cb9176247c1aa9ab72f40116cc6a8e
|
Add the Caliper package
|
var/spack/repos/builtin/packages/caliper/package.py
|
var/spack/repos/builtin/packages/caliper/package.py
|
Python
| 0
|
@@ -0,0 +1,714 @@
+from spack import *%0A%0Aclass Caliper(Package):%0A %22%22%22%0A Caliper is a generic context annotation system. It gives programmers the%0A ability to provide arbitrary program context information to (performance)%0A tools at runtime.%0A %22%22%22%0A%0A homepage = %22https://github.com/LLNL/Caliper%22%0A url = %22%22%0A%0A version('master', git='ssh://git@cz-stash.llnl.gov:7999/piper/caliper.git')%0A%0A variant('mpi', default=False, description='Enable MPI function wrappers.')%0A%0A depends_on('libunwind')%0A depends_on('papi')%0A depends_on('mpi', when='+mpi')%0A%0A def install(self, spec, prefix):%0A with working_dir('build', create=True):%0A cmake('..', *std_cmake_args)%0A make()%0A make(%22install%22)%0A
|
|
c67b8173e33ed619d9601654a71c47d67fb82fe3
|
add package py-rope (#3314)
|
var/spack/repos/builtin/packages/py-rope/package.py
|
var/spack/repos/builtin/packages/py-rope/package.py
|
Python
| 0
|
@@ -0,0 +1,1548 @@
+##############################################################################%0A# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the LICENSE file for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass PyRope(PythonPackage):%0A %22%22%22a python refactoring library.%22%22%22%0A%0A homepage = %22https://github.com/python-rope/rope%22%0A url = %22https://pypi.io/packages/source/r/rope/rope-0.10.5.tar.gz%22%0A%0A version('0.10.5', '21882fd7c04c29d09f75995d8a088be7')%0A%0A depends_on('py-setuptools', type='build')%0A
|
|
cdcf814003694b86df6abee8f24af9b8609ea9a6
|
Add py-zipp package (#12656)
|
var/spack/repos/builtin/packages/py-zipp/package.py
|
var/spack/repos/builtin/packages/py-zipp/package.py
|
Python
| 0
|
@@ -0,0 +1,757 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyZipp(PythonPackage):%0A %22%22%22Backport of pathlib-compatible object wrapper for zip files.%22%22%22%0A%0A homepage = %22https://github.com/jaraco/zipp%22%0A url = %22https://pypi.io/packages/source/z/zipp/zipp-0.6.0.tar.gz%22%0A%0A version('0.6.0', sha256='3718b1cbcd963c7d4c5511a8240812904164b7f381b647143a89d3b98f9bcd8e')%0A%0A depends_on('python@2.7:', type=('build', 'run'))%0A depends_on('py-setuptools', type='build')%0A depends_on('py-setuptools-scm@1.15.0:', type='build')%0A depends_on('py-more-itertools', type=('build', 'run'))%0A
|
|
a206d2aebd5ae6f084377b4b7f2261a83afe4e70
|
Add new package: rsyslog (#18304)
|
var/spack/repos/builtin/packages/rsyslog/package.py
|
var/spack/repos/builtin/packages/rsyslog/package.py
|
Python
| 0
|
@@ -0,0 +1,1195 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Rsyslog(AutotoolsPackage):%0A %22%22%22The rocket-fast Syslog Server.%22%22%22%0A%0A homepage = %22https://www.rsyslog.com/%22%0A url = %22https://github.com/rsyslog/rsyslog/archive/v8.2006.0.tar.gz%22%0A%0A version('8.2006.0', sha256='dc30a2ec02d5fac91d3a4f15a00641e0987941313483ced46592ab0b0d68f324')%0A version('8.2004.0', sha256='b56b985fec076a22160471d389b7ff271909dfd86513dad31e401a775a6dfdc2')%0A version('8.2002.0', sha256='b31d56311532335212ef2ea7be4501508224cb21f1bef9d262c6d78e21959ea1')%0A%0A depends_on('autoconf', type='build')%0A depends_on('automake', type='build')%0A depends_on('libtool', type='build')%0A depends_on('m4', type='build')%0A depends_on('libestr')%0A depends_on('libfastjson')%0A depends_on('zlib')%0A depends_on('libuuid')%0A depends_on('libgcrypt')%0A depends_on('curl')%0A depends_on('byacc', type='build')%0A depends_on('flex', type='build')%0A%0A def setup_run_environment(self, env):%0A env.prepend_path('PATH', self.prefix.sbin)%0A
|
|
05f794499997a1097bf625e3bd202eaaf06fe373
|
Add exec module
|
common/modules/exec.py
|
common/modules/exec.py
|
Python
| 0.000001
|
@@ -0,0 +1,2320 @@
+import module%0A%0Aimport re%0A%0AREGEX = re.compile('%5Eexec(%5Cs+-o(%5Cs+%5B%5Cw.%5D+)?)?%5Cs+((%22%5B%5E%22%5D+%22)%5Cs+)+$')%0AMODNAME = 'exec'%0AUSAGE = %22%22%22Execute commands on target.%0Ausage: exec %5B-o %5Bfilename%5D%5D %22cmd1%22 %5B%22cmd2%22 %22cmd3%22 ...%5D%0A%5CnExecute given commands and optionally log to file with optional filename.%0A%5Cnoptions:%0A-h%5Ct%5Ctshow help%0A-o filename%5Ctwrite results to file in ARCHIVE_DIR'.%22%22%22%0A%0A%0A@module.server_handler(MODNAME)%0Adef server_exec(server, argv):%0A # extra space is for regex%0A if len(argv) %3C 2 or argv%5B1%5D in ('-h', '--help') or not REGEX.match(' '.join(argv) + ' '):%0A print USAGE%0A return%0A try:%0A preproc = preprocess(argv)%0A except Exception:%0A print USAGE%0A return%0A server.generic(*preproc)%0A%0A%0A@module.client_handler(MODNAME)%0Adef client_shell(client, inp):%0A %22%22%22Handle server %60exec' command.%0A%0A Execute specially formatted input string and return specially formatted%0A response.%0A %22%22%22%0A%0A out = ''%0A cmds = parse_exec_cmds(inp%5B5:%5D)%0A for cmd in cmds:%0A cmd_out = client.cmd_exec(cmd)%0A out += '='*20 + '%5Cn%5Cn$ %7B%7D%5Cn%7B%7D%5Cn'.format(cmd, cmd_out)%0A client.s.send(out)%0A%0A%0Adef preprocess(argv):%0A %22%22%22Parse posh %60exec' command line.%0A%0A Args:%0A inp: raw %60exec' command line%0A%0A Returns:%0A Tuple suitable for expansion into as self.generic() parameters.%0A %22%22%22%0A%0A write_file = None%0A write_flag = argv%5B1%5D == '-o'%0A if write_flag:%0A if len(argv) == 2:%0A # it was just %22exec -o%22%0A raise Exception%0A if '%22' not in argv%5B2%5D:%0A write_file = argv%5B2%5D%0A del argv%5B2%5D%0A del argv%5B1%5D%0A argv = ' '.join(argv)%0A return argv, write_flag, write_file%0A%0A%0Adef parse_exec_cmds(inp):%0A %22%22%22Parse string provided by server %60exec' command.%0A%0A Convert space delimited string with commands to execute in quotes, for%0A example (%22ls -l%22 %22cat /etc/passwd%22) into list with commands as strings.%0A%0A Returns:%0A List of commands to execute.%0A %22%22%22%0A%0A if inp.count('%22') == 2:%0A return %5Binp%5B1:-1%5D%5D%0A else:%0A # server side regex guarantees that these quotes will be in the%0A # correct place -- the space between two commands%0A third_quote = inp.find('%22 %22') + 2%0A first_cmd = inp%5B:third_quote-1%5D%0A rest = inp%5Bthird_quote:%5D%0A return %5Bfirst_cmd%5B1:-1%5D%5D + parse_exec_cmds(rest)%0A
|
|
8a6fdc7c79f9038c1b89ba90d60555f3dcbbfdb9
|
Add migration
|
project/api/migrations/0016_selection_competitor.py
|
project/api/migrations/0016_selection_competitor.py
|
Python
| 0.000002
|
@@ -0,0 +1,523 @@
+# Generated by Django 2.1.5 on 2019-02-12 14:15%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('api', '0015_auto_20190211_2028'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='selection',%0A name='competitor',%0A field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='selections', to='api.Competitor'),%0A ),%0A %5D%0A
|
|
646e8cc87b4bb0d032f6f725a0fc2fbc8dfe5a1f
|
add a command exporting field with their locales to XLS
|
telemeta/management/commands/telemeta-export-fields.py
|
telemeta/management/commands/telemeta-export-fields.py
|
Python
| 0
|
@@ -0,0 +1,1603 @@
+from optparse import make_option%0Afrom django.conf import settings%0Afrom django.core.management.base import BaseCommand, CommandError%0Afrom django.contrib.auth.models import User%0Afrom django.template.defaultfilters import slugify%0Afrom django.utils import translation%0Afrom telemeta.models import *%0Afrom telemeta.util.unaccent import unaccent%0Aimport logging%0Aimport codecs%0Afrom xlwt import Workbook%0A%0Aclass Command(BaseCommand):%0A help = %22Export media fields to a XLS file (see an example in example/data/%22%0A args = %22path%22%0A first_row = 1%0A admin_email = 'webmaster@parisson.com'%0A language_codes = %5B'en_US', 'fr_FR', 'de_DE'%5D%0A models = %5BMediaFonds, MediaCorpus, MediaCollection, MediaItem%5D%0A%0A def handle(self, *args, **options):%0A self.file = args%5B0%5D%0A self.book = Workbook()%0A for model in self.models:%0A self.sheet = self.book.add_sheet(model.element_type)%0A self.sheet.write(0, 0, 'Field')%0A self.sheet.col(0).width = 256*32%0A%0A k = 1%0A for language_code in self.language_codes:%0A self.sheet.write(0, k, language_code)%0A self.sheet.col(k).width = 256*32%0A k += 1%0A%0A i = 1%0A for field in model._meta.fields:%0A self.sheet.write(i, 0, field.attname)%0A j = 1%0A for language_code in self.language_codes:%0A translation.activate(language_code)%0A self.sheet.write(i, j, unicode(field.verbose_name.lower()))%0A j += 1%0A i += 1%0A%0A self.book.save(self.file)%0A
|
|
d5e77eba7cb7fe5c3235d1e59612e4c32fbe658f
|
Add test for Flags
|
tests/cupy_tests/test_flags.py
|
tests/cupy_tests/test_flags.py
|
Python
| 0
|
@@ -0,0 +1,641 @@
+import unittest%0A%0Afrom cupy import flags%0A%0A%0Aclass TestFlags(unittest.TestCase):%0A%0A def setUp(self):%0A self.flags = flags.Flags(1, 2, 3)%0A%0A def test_c_contiguous(self):%0A self.assertEqual(1, self.flags%5B'C_CONTIGUOUS'%5D)%0A%0A def test_f_contiguous(self):%0A self.assertEqual(2, self.flags%5B'F_CONTIGUOUS'%5D)%0A%0A def test_owndata(self):%0A self.assertEqual(3, self.flags%5B'OWNDATA'%5D)%0A%0A def test_key_error(self):%0A with self.assertRaises(KeyError):%0A self.flags%5B'unknown key'%5D%0A%0A def test_repr(self):%0A self.assertEqual(''' C_CONTIGUOUS : 1%0A F_CONTIGUOUS : 2%0A OWNDATA : 3''', repr(self.flags))%0A
|
|
990b452e8c142d6ada840026e80cbc15ebff895c
|
add csv test runner
|
tests/python_tests/csv_test.py
|
tests/python_tests/csv_test.py
|
Python
| 0.000001
|
@@ -0,0 +1,1518 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport glob%0Afrom nose.tools import *%0Afrom utilities import execution_path%0A%0Aimport os, mapnik2%0A%0A%0Adef setup():%0A # All of the paths used are relative, if we run the tests%0A # from another directory we need to chdir()%0A os.chdir(execution_path('.'))%0A%0Aif 'csv' in mapnik2.DatasourceCache.instance().plugin_names():%0A%0A def test_broken_files(visual=False):%0A broken = glob.glob(%22../data/csv/fails/*.*%22)%0A broken.extend(glob.glob(%22../data/csv/warns/*.*%22))%0A %0A # Add a filename that doesn't exist %0A broken.append(%22../data/csv/fails/does_not_exist.csv%22)%0A %0A for csv in broken:%0A throws = False%0A if visual:%0A try:%0A ds = mapnik2.Datasource(type='csv',file=csv,strict=True,quiet=True)%0A print '%5Cx1b%5B33mfailed%5Cx1b%5B0m',csv%0A except Exception:%0A print '%5Cx1b%5B1;32m%E2%9C%93 %5Cx1b%5B0m', csv%0A %0A def test_good_files(visual=False):%0A good_files = glob.glob(%22../data/csv/*.*%22)%0A good_files.extend(glob.glob(%22../data/csv/warns/*.*%22))%0A %0A for csv in good_files:%0A if visual:%0A try:%0A ds = mapnik2.Datasource(type='csv',file=csv,quiet=True)%0A print '%5Cx1b%5B1;32m%E2%9C%93 %5Cx1b%5B0m', csv%0A except Exception:%0A print '%5Cx1b%5B33mfailed%5Cx1b%5B0m',csv%0A%0A%0Aif __name__ == %22__main__%22:%0A setup()%0A %5Beval(run)(visual=True) for run in dir() if 'test_' in run%5D%0A
|
|
033809cbc96e380bdd657fd4f10d1ca60b0aa9af
|
Create Graphingcalc.py
|
Graphingcalc.py
|
Graphingcalc.py
|
Python
| 0
|
@@ -0,0 +1 @@
+%0A
|
|
b0d50f52f45d8f1c7de261c7fe8d15e621d0e641
|
Add a script to "untie" tied model weights.
|
scripts/theanets-untie.py
|
scripts/theanets-untie.py
|
Python
| 0
|
@@ -0,0 +1,1074 @@
+#!/usr/bin/env python%0A%0Aimport climate%0Aimport cPickle as pickle%0Aimport gzip%0Aimport numpy as np%0A%0Alogging = climate.get_logger('theanets-untie')%0A%0A@climate.annotate(%0A source='load a saved network from FILE',%0A target='save untied network weights to FILE',%0A)%0Adef main(source, target):%0A opener = gzip.open if source.endswith('.gz') else open%0A p = pickle.load(opener(source))%0A%0A logging.info('read from %25s:', source)%0A for w, b in zip(p%5B'weights'%5D, p%5B'biases'%5D):%0A logging.info('weights %25s bias %25s %25s', w.shape, b.shape, b.dtype)%0A%0A p%5B'weights'%5D.extend(0 + w.T for w in p%5B'weights'%5D%5B::-1%5D)%0A p%5B'biases'%5D.extend(-b for b in p%5B'biases'%5D%5B-2::-1%5D)%0A p%5B'biases'%5D.append(np.zeros(%0A (len(p%5B'weights'%5D%5B0%5D), ), p%5B'biases'%5D%5B0%5D.dtype))%0A%0A logging.info('writing to %25s:', target)%0A for w, b in zip(p%5B'weights'%5D, p%5B'biases'%5D):%0A logging.info('weights %25s bias %25s %25s', w.shape, b.shape, b.dtype)%0A%0A opener = gzip.open if target.endswith('.gz') else open%0A pickle.dump(p, opener(target, 'wb'), -1)%0A%0A%0Aif __name__ == '__main__':%0A climate.call(main)%0A
|
|
cb403c3934f2401b6b337a19c8f5fd2c1f77805d
|
Solve 31.
|
031/solution.py
|
031/solution.py
|
Python
| 0.999942
|
@@ -0,0 +1,1070 @@
+# coding: utf-8%0A%0A%22%22%22 Project Euler problem #31. %22%22%22%0A%0A%0Adef problem():%0A u%22%22%22 Solve the problem.%0A%0A In England the currency is made up of pound, %C2%A3, and pence, p, and there are%0A eight coins in general circulation:%0A%0A 1p, 2p, 5p, 10p, 20p, 50p, %C2%A31 (100p) and %C2%A32 (200p).%0A It is possible to make %C2%A32 in the following way:%0A%0A 1%C3%97%C2%A31 + 1%C3%9750p + 2%C3%9720p + 1%C3%975p + 1%C3%972p + 3%C3%971p%0A How many different ways can %C2%A32 be made using any number of coins?%0A%0A Answer: 73682%0A%0A %22%22%22%0A goal = 200%0A coins = 1, 2, 5, 10, 20, 50, 100, 200%0A ways = %5B1%5D + %5B0%5D * goal%0A%0A for coin in coins:%0A for idx in range(coin, goal + 1):%0A ways%5Bidx%5D += ways%5Bidx - coin%5D%0A%0A return ways%5B-1%5D%0A%0A # Alternative (bruteforce solution)%0A # return sum(%0A # 1%0A # for c200 in range(goal, -1, -200)%0A # for c100 in range(c200, -1, -100)%0A # for c50 in range(c100, -1, -50)%0A # for c20 in range(c50, -1, -20)%0A # for c10 in range(c20, -1, -10)%0A # for c5 in range(c10, -1, -5)%0A # for _ in range(c5, -1, -2)%0A # )%0A%0A%0Aif __name__ == '__main__':%0A print problem()%0A
|
|
1774794c448d13a20891454a79f664406b364e4c
|
add cdr_serieshelper
|
cdr_stats/call_analytic/cdr_serieshelper.py
|
cdr_stats/call_analytic/cdr_serieshelper.py
|
Python
| 0.000002
|
@@ -0,0 +1,2107 @@
+#%0A# CDR-Stats License%0A# http://www.cdr-stats.org%0A#%0A# This Source Code Form is subject to the terms of the Mozilla Public%0A# License, v. 2.0. If a copy of the MPL was not distributed with this file,%0A# You can obtain one at http://mozilla.org/MPL/2.0/.%0A#%0A# Copyright (C) 2011-2014 Star2Billing S.L.%0A#%0A# The Initial Developer of the Original Code is%0A# Arezqui Belaid %3Cinfo@star2billing.com%3E%0A#%0A%0A%0Afrom influxdb import InfluxDBClient%0Afrom influxdb import SeriesHelper%0A%0A%0A# InfluxDB connections settings%0Ahost = 'localhost'%0Aport = 8086%0Auser = 'root'%0Apassword = 'root'%0Adbname = 'mydb'%0A%0Amyclient = InfluxDBClient(host, port, user, password, dbname)%0A%0A%0Aclass CDRSeriesHelper(SeriesHelper):%0A # Meta class stores time series helper configuration.%0A class Meta:%0A # The client should be an instance of InfluxDBClient.%0A client = myclient%0A # The series name must be a string. Add dependent fields/tags in curly brackets.%0A series_name = 'events.stats.%7Bserver_name%7D'%0A # Defines all the fields in this time series.%0A fields = %5B'some_stat', 'other_stat'%5D%0A # Defines all the tags for the series.%0A tags = %5B'server_name'%5D%0A # Defines the number of data points to store prior to writing on the wire.%0A bulk_size = 5%0A # autocommit must be set to True when using bulk_size%0A autocommit = True%0A%0A%0A# The following will create *five* (immutable) data points.%0A# Since bulk_size is set to 5, upon the fifth construction call, *all* data%0A# points will be written on the wire via CDRSeriesHelper.Meta.client.%0ACDRSeriesHelper(server_name='us.east-1', some_stat=159, other_stat=10)%0ACDRSeriesHelper(server_name='us.east-1', some_stat=158, other_stat=20)%0ACDRSeriesHelper(server_name='us.east-1', some_stat=157, other_stat=30)%0ACDRSeriesHelper(server_name='us.east-1', some_stat=156, other_stat=40)%0ACDRSeriesHelper(server_name='us.east-1', some_stat=155, other_stat=50)%0A%0A# self.influxdbcdr.set_columns(%0A# %5B%22time%22, %22duration%22, %22billsec%22, %22country_id%22, %22hangup_id%22, %22switch_id%22, %22user_id%22%5D)%0A# self.influxdbcdr.add_points(%5B1413460800, 10, 8, 55, 16, 1, 1%5D)%0A
|
|
eb9fde03dd7b650c59db7a64389bdf1231844407
|
use get_by_name to benefit from caching
|
corehq/apps/users/middleware.py
|
corehq/apps/users/middleware.py
|
from redis_cache.exceptions import ConnectionInterrumped
from django.conf import settings
import django.core.exceptions
from dimagi.utils.couch.cache import cache_core
rcache = cache_core.get_redis_default_cache()
############################################################################################################
from corehq.apps.users.models import CouchUser, PublicUser, InvalidUser
from corehq.apps.domain.models import Domain
SESSION_USER_KEY_PREFIX = "session_user_doc_%s"
class UsersMiddleware(object):
def __init__(self):
# Normally we'd expect this class to be pulled out of the middleware list, too,
# but in case someone forgets, this will stop this class from being used.
found_domain_app = False
for app_name in settings.INSTALLED_APPS:
if app_name == "users" or app_name.endswith(".users"):
found_domain_app = True
break
if not found_domain_app:
raise django.core.exceptions.MiddlewareNotUsed
#def process_request(self, request):
def process_view(self, request, view_func, view_args, view_kwargs):
if 'domain' in view_kwargs:
request.domain = view_kwargs['domain']
if 'org' in view_kwargs:
request.org = view_kwargs['org']
if request.user and hasattr(request.user, 'get_profile'):
sessionid = request.COOKIES.get('sessionid', None)
if sessionid:
# roundabout way to keep doc_id based caching consistent.
# get user doc_id from session_id
MISSING = object()
INTERRUPTED = object()
try:
cached_user_doc_id = rcache.get(SESSION_USER_KEY_PREFIX % sessionid, MISSING)
except ConnectionInterrumped:
cached_user_doc_id = INTERRUPTED
# disable session based couch user caching - to be enabled later.
if cached_user_doc_id not in (MISSING, INTERRUPTED):
# cache hit
couch_user = CouchUser.wrap_correctly(
cache_core.cached_open_doc(
CouchUser.get_db(), cached_user_doc_id
)
)
else:
# cache miss, write to cache
couch_user = CouchUser.from_django_user(request.user)
if couch_user:
cache_core.do_cache_doc(couch_user.to_json())
if cached_user_doc_id is not INTERRUPTED:
rcache.set(SESSION_USER_KEY_PREFIX % sessionid, couch_user.get_id)
request.couch_user = couch_user
if 'domain' in view_kwargs:
domain = request.domain
if not request.couch_user:
couch_domain = Domain.view("domain/domains",
key=domain,
reduce=False,
include_docs=True,
).one()
if couch_domain and couch_domain.is_public:
request.couch_user = PublicUser(domain)
else:
request.couch_user = InvalidUser()
if request.couch_user:
request.couch_user.current_domain = domain
return None
############################################################################################################
|
Python
| 0
|
@@ -2915,174 +2915,26 @@
ain.
-view(%22domain/domains%22,%0A key=domain,%0A reduce=False,%0A include_docs=True,%0A ).one(
+get_by_name(domain
)%0A
|
fdf3df4154411652b340a38e901c52d678dbd92e
|
Replace get_user_profile_by_email with get_user.
|
zerver/management/commands/bulk_change_user_name.py
|
zerver/management/commands/bulk_change_user_name.py
|
from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_change_full_name
from zerver.models import UserProfile, get_user_profile_by_email
class Command(BaseCommand):
help = """Change the names for many users."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('data_file', metavar='<data file>', type=str,
help="file containing rows of the form <email>,<desired name>")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
data_file = options['data_file']
with open(data_file, "r") as f:
for line in f:
email, new_name = line.strip().split(",", 1)
try:
user_profile = get_user_profile_by_email(email)
old_name = user_profile.full_name
print("%s: %s -> %s" % (email, old_name, new_name))
do_change_full_name(user_profile, new_name, None)
except UserProfile.DoesNotExist:
print("* E-mail %s doesn't exist in the system, skipping." % (email,))
|
Python
| 0
|
@@ -171,27 +171,28 @@
import
-Base
Command
+Error
%0A%0Afrom z
@@ -252,60 +252,46 @@
ver.
-models import UserProfile, get_user_profile_by_email
+lib.management import ZulipBaseCommand
%0A%0Acl
@@ -302,16 +302,21 @@
Command(
+Zulip
BaseComm
@@ -615,16 +615,58 @@
name%3E%22)
+%0A self.add_realm_args(parser, True)
%0A%0A de
@@ -778,16 +778,56 @@
_file'%5D%0A
+ realm = self.get_realm(options)%0A
@@ -1007,16 +1007,21 @@
e =
+self.
get_user
_pro
@@ -1020,31 +1020,21 @@
user
-_profile_by_email(email
+(email, realm
)%0A
@@ -1254,32 +1254,20 @@
ept
-UserProfile.DoesNotExist
+CommandError
:%0A
@@ -1295,11 +1295,9 @@
nt(%22
-* E
+e
-mai
@@ -1326,14 +1326,16 @@
the
-system
+realm %25s
, sk
@@ -1340,17 +1340,16 @@
skipping
-.
%22 %25 (ema
@@ -1351,11 +1351,17 @@
(email,
+ realm
))%0A
|
7e4803db10f0cd2ccb6d54117bc68dd999023908
|
Create __init__.py
|
run_tests/__init__.py
|
run_tests/__init__.py
|
Python
| 0.000429
|
@@ -0,0 +1 @@
+%0A
|
|
339fdd927f9da0f7e15726d087c9916301aef935
|
Add soft margin SVM and added kernels and class
|
softMarginSVMwithKernels/howItWorksSoftMarginSVM.py
|
softMarginSVMwithKernels/howItWorksSoftMarginSVM.py
|
Python
| 0
|
@@ -0,0 +1,2307 @@
+# -*- coding: utf-8 -*-%0A%22%22%22Soft Margin SVM classification with kernels for machine learning.%0A%0ASoft margin SVM is basically an SVM (see folder **supportVectorMachine**) which%0Ahas some 'slack' and allows features to be 'wrongly' classified to avoid%0Aoverfitting the classifier. This also includes kernels. Kernels use the inner%0Aproduct to help us transform the feature space to make it possible for Support%0AVector Machines to create a good hyperplane with non-linear feature sets.%0A%0AI basically just do the 'from scratch' in this part because all this can easily%0Abe done by just adding some parameters to sklearn's svm.SVC().%0A%0AExample:%0A%0A $ python howItWorksSoftMarginSVM.py.py%0A%0ATodo:%0A *%0A%22%22%22%0Aimport numpy as np%0Afrom numpy import linalg%0A# Because I made a convex solver in 'howItWorksSupportVectorMachine.py' I will%0A# just use a library for it now because it's simpler.%0Aimport cvxopt%0Aimport cvxopt.solvers%0A%0A%0Adef linear_kernel(x1, x2):%0A %22%22%22Linear kernel function.%0A%0A if this kernel is used then the decision boundary hyperplane will have a%0A linear form.%0A %22%22%22%0A return np.dot(x1, x2)%0A%0A%0Adef polynomial_kernel(x, y, p=3):%0A %22%22%22Polynomial kernel function.%0A%0A if this kernel is used then the decision boundary hyperplane will have a%0A Polynomial form.%0A %22%22%22%0A return (1 + np.dot(x, y))**p%0A%0A%0Adef gaussian_kernel(x, y, sigma=5.0):%0A %22%22%22Gaussian kernel function.%0A%0A if this kernel is used then the decision boundary hyperplane will have a%0A Gaussian form.%0A %22%22%22%0A return np.exp(-linalg.norm(x - y)**2 / (2 * (sigma**2)))%0A%0A%0Aclass SVM(object):%0A %22%22%22Support Vector Machine (SVM) class.%0A%0A This class is for creating an instance of a SVM. To avoid retraining or%0A refitting (as it's also called) every time it is used.%0A %22%22%22%0A%0A def __init__(self, kernel=linear_kernel, C=None):%0A %22%22%22The __init__ method of the SVM class.%0A%0A Args:%0A kernel (function name): The kernel that will be used.%0A Default linear kernel.%0A C: the max sum of all the distances of the features that are%0A wrongly classified during fitting/training. Default is 'None', if C is%0A None then it's a hard margin SVM with no slack.%0A %22%22%22%0A self.kernel = kernel%0A self.C = C%0A if self.C is not None:%0A self.C = float(self.C)%0A
|
|
b89a98f0a5b6d4af94b5c52ff4baf4e7c10f2b53
|
Create PrintExpScalingFactors.py
|
solidity/python/constants/PrintExpScalingFactors.py
|
solidity/python/constants/PrintExpScalingFactors.py
|
Python
| 0
|
@@ -0,0 +1,258 @@
+from math import exp%0A%0A%0AMIN_PRECISION = 32%0A%0A%0Afor n in %5B1,2,3%5D:%0A print ' uint256 constant SCALED_EXP_%7B%7D = 0x%7B:x%7D;'.format(n,int(exp(n)*(1%3C%3CMIN_PRECISION)))%0A print ' uint256 constant SCALED_VAL_%7B%7D = 0x%7B:x%7D;'.format(n,int( (n)*(1%3C%3CMIN_PRECISION)))%0A
|
|
171283cc2dee67eed8469fac08f531268a21c780
|
add influxdb_user salt state to manage influxdb users
|
salt/states/influxdb_user.py
|
salt/states/influxdb_user.py
|
Python
| 0
|
@@ -0,0 +1,3199 @@
+# -*- coding: utf-8 -*-%0A'''%0AManagement of InfluxDB users%0A============================%0A'''%0A%0A%0Adef present(name, passwd, database, user=None, password=None, host=None,%0A port=None):%0A '''%0A Ensure that the user is present%0A%0A name%0A The name of the user to manage%0A%0A passwd%0A The password of the user%0A%0A database%0A The database to create the user in%0A%0A user%0A The user to connect as (must be able to create the user)%0A%0A password%0A The password of the user%0A%0A host%0A The host to connect to%0A%0A port%0A The port to connect to%0A%0A '''%0A ret = %7B'name': name,%0A 'changes': %7B%7D,%0A 'result': True,%0A 'comment': ''%7D%0A%0A # check if db does not exist%0A if not __salt__%5B'influxdb.db_exists'%5D(%0A database, user, password, host, port):%0A ret%5B'result'%5D = False%0A ret%5B'comment'%5D = 'Database %7B0%7D does not exist'.format(database)%0A return ret%0A%0A # check if user exists%0A if not __salt__%5B'influxdb.user_exists'%5D(%0A name, database, user, password, host, port):%0A if __opts__%5B'test'%5D:%0A ret%5B'result'%5D = None%0A ret%5B'comment'%5D = 'User %7B0%7D is not present and needs to be created'%5C%0A .format(name)%0A return ret%0A # The user is not present, make it!%0A if __salt__%5B'influxdb.user_create'%5D(%0A name, passwd, database, user, password, host, port):%0A ret%5B'comment'%5D = 'User %7B0%7D has been created'.format(name)%0A ret%5B'changes'%5D%5Bname%5D = 'Present'%0A return ret%0A else:%0A ret%5B'comment'%5D = 'Failed to create user %7B0%7D'.format(name)%0A ret%5B'result'%5D = False%0A return ret%0A%0A # fallback%0A ret%5B'comment'%5D = 'User %7B0%7D is already present'.format(name)%0A return ret%0A%0A%0Adef absent(name, database, user=None, password=None, host=None, port=None):%0A '''%0A Ensure that the named user is absent%0A%0A name%0A The name of the user to remove%0A%0A database%0A The database to remove the user from%0A%0A user%0A The user to connect as (must be able to remove the user)%0A%0A password%0A The password of the user%0A%0A host%0A The host to connect to%0A%0A port%0A The port to connect to%0A%0A '''%0A ret = %7B'name': name,%0A 'changes': %7B%7D,%0A 'result': True,%0A 'comment': ''%7D%0A%0A #check if user exists and remove it%0A if __salt__%5B'influxdb.user_exists'%5D(%0A name, database, user, password, host, port):%0A if __opts__%5B'test'%5D:%0A ret%5B'result'%5D = None%0A ret%5B'comment'%5D = 'User %7B0%7D is present and needs to be removed'%5C%0A .format(name)%0A return ret%0A if __salt__%5B'influxdb.user_remove'%5D(%0A name, database, user, password, host, port):%0A ret%5B'comment'%5D = 'User %7B0%7D has been removed'.format(name)%0A ret%5B'changes'%5D%5Bname%5D = 'Absent'%0A return ret%0A else:%0A ret%5B'comment'%5D = 'Failed to remove user %7B0%7D'.format(name)%0A ret%5B'result'%5D = False%0A return ret%0A%0A # fallback%0A ret%5B'comment'%5D = 'User %7B0%7D is not present, so it cannot be removed'%5C%0A .format(name)%0A return ret%0A
|
|
8601790648a17dd1794be4f88d61e4af01349a80
|
Test for the chipseq pipeline code
|
tests/test_pipeline_chipseq.py
|
tests/test_pipeline_chipseq.py
|
Python
| 0
|
@@ -0,0 +1,2472 @@
+%22%22%22%0A.. Copyright 2017 EMBL-European Bioinformatics Institute%0A%0A Licensed under the Apache License, Version 2.0 (the %22License%22);%0A you may not use this file except in compliance with the License.%0A You may obtain a copy of the License at%0A%0A http://www.apache.org/licenses/LICENSE-2.0%0A%0A Unless required by applicable law or agreed to in writing, software%0A distributed under the License is distributed on an %22AS IS%22 BASIS,%0A WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A See the License for the specific language governing permissions and%0A limitations under the License.%0A%22%22%22%0A%0Afrom __future__ import print_function%0A%0Aimport os.path%0Aimport pytest # pylint: disable=unused-import%0A%0Afrom process_chipseq import process_chipseq%0A%0A@pytest.mark.chipseq%0A@pytest.mark.pipeline%0Adef test_tb_pipeline():%0A %22%22%22%0A Test case to ensure that the ChIP-seq pipeline code works.%0A%0A Running the pipeline with the test data from the command line:%0A%0A .. code-block:: none%0A runcompss %5C%0A --lang=python %5C%0A --library_path=$%7BHOME%7D/bin %5C%0A --pythonpath=/%3Cpyenv_virtenv_dir%3E/lib/python2.7/site-packages/ %5C%0A --log_level=debug %5C%0A process_chipseq.py %5C%0A --taxon_id 9606 %5C%0A --genome /%3Cdataset_dir%3E/Human.GCA_000001405.22.fasta %5C%0A --assembly GRCh38 %5C%0A --file /%3Cdataset_dir%3E/DRR000150.22.fastq%0A %22%22%22%0A resource_path = os.path.join(os.path.dirname(__file__), %22data/%22)%0A%0A files = %5B%0A resource_path + 'macs2.Human.GCA_000001405.22.fasta',%0A resource_path + 'macs2.Human.GCA_000001405.22.fasta.ann',%0A resource_path + 'macs2.Human.GCA_000001405.22.fasta.amb',%0A resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwt',%0A resource_path + 'macs2.Human.GCA_000001405.22.fasta.pac',%0A resource_path + 'macs2.Human.GCA_000001405.22.fasta.sa',%0A resource_path + 'macs2.Human.DRR000150.22.fastq',%0A None%0A %5D%0A%0A metadata = %7B%0A 'assembly' : 'GRCh38',%0A 'expt_name' : 'macs.Human.SRR1658573'%0A %7D%0A%0A chipseq_handle = process_chipseq()%0A chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, %5B%5D)%0A%0A print(chipseq_files)%0A%0A # Add tests for all files created%0A for f_out in chipseq_files:%0A print(%22CHIP-SEQ RESULTS FILE:%22, f_out)%0A assert os.path.isfile(f_out) is True%0A assert os.path.getsize(f_out) %3E 0%0A
|
|
4da92f170a89ebc0f8b995ce60439e684c558192
|
Properly parse comma-separated query string params
|
app/main/views/jobs.py
|
app/main/views/jobs.py
|
# -*- coding: utf-8 -*-
import time
from flask import (
render_template,
abort,
jsonify,
request
)
from flask_login import login_required
from werkzeug.datastructures import MultiDict
from utils.template import Template
from app import (
job_api_client,
notification_api_client,
service_api_client,
current_service)
from app.main import main
from app.utils import (
get_page_from_request,
generate_previous_next_dict,
user_has_permissions,
generate_notifications_csv)
def _parse_filter_args(filter_dict):
if not isinstance(filter_dict, MultiDict):
filter_dict = MultiDict(filter_dict)
out_dict = MultiDict()
if 'type' in filter_dict:
out_dict.setlist('template_type', filter_dict.getlist('type'))
if 'status' in filter_dict:
out_dict.setlist('status', filter_dict.getlist('status'))
return out_dict
@main.route("/services/<service_id>/jobs")
@login_required
@user_has_permissions('view_activity', admin_override=True)
def view_jobs(service_id):
jobs = job_api_client.get_job(service_id)['data']
return render_template(
'views/jobs/jobs.html',
jobs=jobs
)
@main.route("/services/<service_id>/jobs/<job_id>")
@login_required
@user_has_permissions('view_activity', admin_override=True)
def view_job(service_id, job_id):
job = job_api_client.get_job(service_id, job_id)['data']
template = service_api_client.get_service_template(service_id, job['template'])['data']
notifications = notification_api_client.get_notifications_for_service(service_id, job_id)
finished = job['status'] == 'finished'
return render_template(
'views/jobs/job.html',
notifications=notifications['notifications'],
counts={
'queued': 0 if finished else job['notification_count'],
'sent': job['notification_count'] if finished else 0
},
uploaded_at=job['created_at'],
finished_at=job['updated_at'] if finished else None,
uploaded_file_name=job['original_file_name'],
template=Template(
template,
prefix=current_service['name']
),
job_id=job_id
)
@main.route("/services/<service_id>/jobs/<job_id>.json")
@login_required
@user_has_permissions('view_activity')
def view_job_updates(service_id, job_id):
job = job_api_client.get_job(service_id, job_id)['data']
notifications = notification_api_client.get_notifications_for_service(service_id, job_id)
finished = job['status'] == 'finished'
return jsonify(**{
'counts': render_template(
'partials/jobs/count.html',
counts={
'queued': 0 if finished else job['notification_count'],
'sent': job['notification_count'] if finished else 0
}
),
'notifications': render_template(
'partials/jobs/notifications.html',
notifications=notifications['notifications']
),
'status': render_template(
'partials/jobs/status.html',
uploaded_at=job['created_at'],
finished_at=job['updated_at'] if finished else None
),
})
@main.route('/services/<service_id>/notifications')
@login_required
@user_has_permissions('view_activity', admin_override=True)
def view_notifications(service_id):
# TODO get the api to return count of pages as well.
page = get_page_from_request()
if page is None:
abort(404, "Invalid page argument ({}) reverting to page 1.".format(request.args['page'], None))
filter_args = _parse_filter_args(request.args)
notifications = notification_api_client.get_notifications_for_service(
service_id=service_id,
page=page,
template_type=filter_args.getlist('template_type') if 'template_type' in filter_args else None,
status=filter_args.getlist('status') if 'status' in filter_args else None)
view_dict = MultiDict(request.args)
prev_page = None
if notifications['links'].get('prev', None):
prev_page = generate_previous_next_dict(
'main.view_notifications',
service_id,
view_dict,
page - 1,
'Previous page',
'page {}'.format(page - 1))
next_page = None
if notifications['links'].get('next', None):
next_page = generate_previous_next_dict(
'main.view_notifications',
service_id,
view_dict,
page + 1,
'Next page',
'page {}'.format(page + 1))
if 'download' in request.args and request.args['download'] == 'csv':
csv_content = generate_notifications_csv(notifications['notifications'])
return csv_content, 200, {
'Content-Type': 'text/csv; charset=utf-8',
'Content-Disposition': 'inline; filename="notifications.csv"'
}
return render_template(
'views/notifications.html',
notifications=notifications['notifications'],
page=page,
prev_page=prev_page,
next_page=next_page
)
@main.route("/services/<service_id>/jobs/<job_id>/notification/<string:notification_id>")
@login_required
@user_has_permissions('view_activity', admin_override=True)
def view_notification(service_id, job_id, notification_id):
now = time.strftime('%H:%M')
return render_template(
'views/notification.html',
message=[
message for message in messages if message['id'] == notification_id
][0],
delivered_at=now,
uploaded_at=now,
job_id=job_id
)
|
Python
| 0.999508
|
@@ -29,16 +29,33 @@
ort time
+%0Aimport itertools
%0A%0Afrom f
@@ -564,24 +564,25 @@
lter_dict):%0A
+%0A
if not i
@@ -665,22 +665,19 @@
ct)%0A
+%0A
-out_dict =
+return
Mul
@@ -687,126 +687,121 @@
ict(
-)
%0A
-if 'type' in filter_dict:%0A out_dict.setlist('template_type', filter_dict.getlist('type')
+ (%0A key,%0A (','.join(filter_dict.getlist(key))).split(',')%0A
)%0A
+
-if 'status'
+ for key
in
@@ -811,17 +811,23 @@
ter_dict
-:
+.keys()
%0A
@@ -831,85 +831,50 @@
-out_dict.setlist('status', filter_dict.getlist('status'))%0A return out_dict
+if ''.join(filter_dict.getlist(key))%0A )
%0A%0A%0A@
@@ -5063,16 +5063,51 @@
ext_page
+,%0A request_args=request.args
%0A )%0A%0A
|
8cb665e107cb33b5ff3825e5ffb3bde919d36cff
|
Add cache runner as per #2897
|
salt/runners/cache.py
|
salt/runners/cache.py
|
Python
| 0
|
@@ -0,0 +1,1422 @@
+'''%0AReturn cached data from minions%0A'''%0A# Import python libs%0Aimport os%0A%0A# Import salt libs%0Aimport salt.output%0Aimport salt.payload%0A%0A%0Adef _cdata():%0A '''%0A Return the cached data from the minions%0A '''%0A ret = %7B%7D%0A serial = salt.payload.Serial(__opts__)%0A mdir = os.path.join(__opts__%5B'cachedir'%5D, 'minions')%0A for minion in os.listdir(mdir):%0A path = os.path.join(mdir, minion, 'data.p')%0A if os.path.isfile(path):%0A with open(path) as fp_:%0A ret%5Bminion%5D = serial.loads(fp_.read())%0A return ret%0A%0A%0Adef grains(minion=None):%0A '''%0A Return cached grains for all minions or a specific minion%0A '''%0A data = _cdata()%0A if minion:%0A if minion in data:%0A salt.output(%7Bminion: data%5Bminion%5D%5B'grains'%5D%7D, 'grains')%0A return %7Bminion: data%5Bminion%5D%5B'grains'%5D%7D%0A ret = %7B%7D%0A for minion in data:%0A ret%5Bminion%5D = data%5Bminion%5D%5B'grains'%5D%0A salt.output(%7Bminion: data%5Bminion%5D%5B'grains'%5D%7D, 'grains')%0A return ret%0A%0A%0Adef pillar(minion=None):%0A '''%0A Return cached grains for all minions or a specific minion%0A '''%0A data = _cdata()%0A if minion:%0A if minion in data:%0A salt.output(%7Bminion: data%5Bminion%5D%5B'pillar'%5D%7D)%0A return %7Bminion: data%5Bminion%5D%5B'pillar'%5D%7D%0A ret = %7B%7D%0A for minion in data:%0A ret%5Bminion%5D = data%5Bminion%5D%5B'pillar'%5D%0A salt.output(%7Bminion: data%5Bminion%5D%5B'pillar'%5D%7D)%0A return ret%0A
|
|
93bb4703205166d49ced19c621bf120298018ddc
|
Fix yaml-hl to use the new API.
|
examples/yaml-hl/yaml_hl.py
|
examples/yaml-hl/yaml_hl.py
|
#!/usr/bin/python
import yaml, codecs, sys, optparse
yaml.add_resolver(u'!Config', [])
yaml.add_resolver(u'!TokensConfig', [u'tokens'])
yaml.add_resolver(u'!EventsConfig', [u'events'])
yaml.add_resolver(u'!StartEndConfig', [u'tokens', None])
yaml.add_resolver(u'!StartEndConfig', [u'events', None])
class YAMLHighlight:
def __init__(self, config):
parameters = yaml.load_document(config)
self.replaces = parameters['replaces']
self.substitutions = {}
for domain, items in [('Token', parameters['tokens']),
('Event', parameters['events'])]:
for code in items:
name = ''.join([part.capitalize() for part in code.split('-')]+[domain])
cls = getattr(yaml, name)
value = items[code]
if value:
if 'start' in value:
self.substitutions[cls, -1] = value['start']
if 'end' in value:
self.substitutions[cls, +1] = value['end']
def highlight(self, input):
if isinstance(input, str):
if input.startswith(codecs.BOM_UTF16_LE):
input = unicode(input, 'utf-16-le')
elif input.startswith(codecs.BOM_UTF16_BE):
input = unicode(input, 'utf-16-be')
else:
input = unicode(input, 'utf-8')
tokens = yaml.parse(input, Parser=iter)
events = yaml.parse(input)
markers = []
number = 0
for token in tokens:
number += 1
if token.start_mark.index != token.end_mark.index:
cls = token.__class__
if (cls, -1) in self.substitutions:
markers.append([token.start_mark.index, +2, number, self.substitutions[cls, -1]])
if (cls, +1) in self.substitutions:
markers.append([token.end_mark.index, -2, number, self.substitutions[cls, +1]])
number = 0
for event in events:
number += 1
cls = event.__class__
if (cls, -1) in self.substitutions:
markers.append([event.start_mark.index, +1, number, self.substitutions[cls, -1]])
if (cls, +1) in self.substitutions:
markers.append([event.end_mark.index, -1, number, self.substitutions[cls, +1]])
markers.sort()
markers.reverse()
chunks = []
position = len(input)
for index, weight1, weight2, substitution in markers:
if index < position:
chunk = input[index:position]
for substring, replacement in self.replaces:
chunk = chunk.replace(substring, replacement)
chunks.append(chunk)
position = index
chunks.append(substitution)
chunks.reverse()
result = u''.join(chunks)
return result.encode('utf-8')
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-c', '--config', dest='config', default='yaml_hl_ascii.cfg', metavar='CONFIG')
(options, args) = parser.parse_args()
hl = YAMLHighlight(file(options.config))
sys.stdout.write(hl.highlight(sys.stdin.read()))
|
Python
| 0
|
@@ -48,18 +48,17 @@
tparse%0A%0A
-%0A%0A
+#
yaml.add
@@ -83,16 +83,17 @@
g', %5B%5D)%0A
+#
yaml.add
@@ -129,24 +129,25 @@
u'tokens'%5D)%0A
+#
yaml.add_res
@@ -183,16 +183,17 @@
ents'%5D)%0A
+#
yaml.add
@@ -241,16 +241,17 @@
None%5D)%0A
+#
yaml.add
@@ -354,16 +354,16 @@
onfig):%0A
+
@@ -388,17 +388,8 @@
load
-_document
(con
@@ -1403,32 +1403,18 @@
aml.
-parse
+scan
(input
-, Parser=iter
)%0A
|
78ab60f9a0d7251effc238b98dc110706876da8e
|
add migration for new queue_empty field
|
scripts/migrations/029-set-mailbox-queue_empty.py
|
scripts/migrations/029-set-mailbox-queue_empty.py
|
Python
| 0
|
@@ -0,0 +1,427 @@
+import logging%0A%0Afrom allura import model as M%0A%0Alog = logging.getLogger(__name__)%0A%0A%0Adef main():%0A M.Mailbox.query.update(%7B'queue': %5B%5D%7D,%0A %7B'$set': %7B'queue_empty': True%7D%7D,%0A multi=True)%0A%0A M.Mailbox.query.update(%7B'queue': %7B'$ne': %5B%5D%7D%7D,%0A %7B'$set': %7B'queue_empty': False%7D%7D,%0A multi=True)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
1519f6be1d888f107a3b747e17cb1bd9d89c976f
|
Add SciPy benchmark
|
lib/node_modules/@stdlib/math/base/special/beta/benchmark/python/benchmark.scipy.py
|
lib/node_modules/@stdlib/math/base/special/beta/benchmark/python/benchmark.scipy.py
|
Python
| 0
|
@@ -0,0 +1,1601 @@
+#!/usr/bin/env python%0A%22%22%22Benchmark scipy.special.beta.%22%22%22%0A%0Afrom __future__ import print_function%0Aimport timeit%0A%0ANAME = %22beta%22%0AREPEATS = 3%0AITERATIONS = 1000000%0A%0A%0Adef print_version():%0A %22%22%22Print the TAP version.%22%22%22%0A print(%22TAP version 13%22)%0A%0A%0Adef print_summary(total, passing):%0A %22%22%22Print the benchmark summary.%0A%0A # Arguments%0A%0A * %60total%60: total number of tests%0A * %60passing%60: number of passing tests%0A%0A %22%22%22%0A print(%22#%22)%0A print(%221..%22 + str(total)) # TAP plan%0A print(%22# total %22 + str(total))%0A print(%22# pass %22 + str(passing))%0A print(%22#%22)%0A print(%22# ok%22)%0A%0A%0Adef print_results(elapsed):%0A %22%22%22Print benchmark results.%0A%0A # Arguments%0A%0A * %60elapsed%60: elapsed time (in seconds)%0A%0A # Examples%0A%0A %60%60%60 python%0A python%3E print_results(0.131009101868)%0A %60%60%60%0A %22%22%22%0A rate = ITERATIONS / elapsed%0A%0A print(%22 ---%22)%0A print(%22 iterations: %22 + str(ITERATIONS))%0A print(%22 elapsed: %22 + str(elapsed))%0A print(%22 rate: %22 + str(rate))%0A print(%22 ...%22)%0A%0A%0Adef benchmark():%0A %22%22%22Run the benchmark and print benchmark results.%22%22%22%0A setup = %22from scipy.special import beta; from random import random;%22%0A stmt = %22y = beta(1000.0*random() - 0.0, 1000.0*random() - 0.0)%22%0A%0A t = timeit.Timer(stmt, setup=setup)%0A%0A print_version()%0A%0A for i in xrange(REPEATS):%0A print(%22# python::%22 + NAME)%0A elapsed = t.timeit(number=ITERATIONS)%0A print_results(elapsed)%0A print(%22ok %22 + str(i+1) + %22 benchmark finished%22)%0A%0A print_summary(REPEATS, REPEATS)%0A%0A%0Adef main():%0A %22%22%22Run the benchmark.%22%22%22%0A benchmark()%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
f93e29f52a59d5f545faf8fd94e950f50383c7fc
|
Integrate LLVM at llvm/llvm-project@9c8f950a0400
|
third_party/llvm/workspace.bzl
|
third_party/llvm/workspace.bzl
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "3019898e0d1b494b7e7e76790adb3d83eff4aca1"
LLVM_SHA256 = "cb37f53299af16bc7e196c9dadd1e3fa889583d966b5d3e9adde7dc18d3094c5"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:BUILD.bazel",
patch_file = "//third_party/llvm:macos_build_fix.patch",
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
|
Python
| 0.000002
|
@@ -160,133 +160,133 @@
= %22
-3019898e0d1b494b7e7e76790adb3d83eff4aca1%22%0A LLVM_SHA256 = %22cb37f53299af16bc7e196c9dadd1e3fa889583d966b5d3e9adde7dc18d3094c5
+9c8f950a04004736bddb5093eda15f9a1c4f2eea%22%0A LLVM_SHA256 = %229dc64e4b455908f00d47b52d8a195b1e8dc98dd6337d16c85f3eda0ca37a1c8e
%22%0A%0A
|
f902202b5e7f2473a7ce22505a4cc8b4f39377e0
|
Add scantailor package (#12833)
|
var/spack/repos/builtin/packages/scantailor/package.py
|
var/spack/repos/builtin/packages/scantailor/package.py
|
Python
| 0
|
@@ -0,0 +1,1081 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Scantailor(CMakePackage):%0A %22%22%22Scan Tailor is an interactive post-processing tool for scanned pages. It%0A performs operations such as page splitting, deskewing, adding/removing%0A borders, and others. You give it raw scans, and you get pages ready to be%0A printed or assembled into a PDF or DJVU file. Scanning, optical character%0A recognition, and assembling multi-page documents are out of scope of this%0A project.%22%22%22%0A%0A homepage = %22http://www.scantailor.org%22%0A url = %22https://github.com/trufanov-nok/scantailor/archive/0.2.7.tar.gz%22%0A%0A version(%0A %220.2.7%22,%0A sha256=%223e27647621d43638888a268902f8fa098b06a70a5da5d0623b1c11220a367910%22,%0A )%0A%0A depends_on(%22qt@5:%22)%0A depends_on(%22libjpeg%22)%0A depends_on(%22zlib%22)%0A depends_on(%22libpng%22)%0A depends_on(%22libtiff%22)%0A depends_on(%22boost@1.35:%22)%0A depends_on(%22libxrender%22)%0A
|
|
4155d6ca5db149d8b213cc4078580fc2e85d7f4d
|
Migrate database for model changes.
|
vinotes/apps/api/migrations/0002_auto_20150325_1104.py
|
vinotes/apps/api/migrations/0002_auto_20150325_1104.py
|
Python
| 0
|
@@ -0,0 +1,608 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('api', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='wine',%0A name='description',%0A field=models.TextField(blank=True),%0A preserve_default=True,%0A ),%0A migrations.AddField(%0A model_name='winery',%0A name='description',%0A field=models.TextField(blank=True),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
253acd0afd532e8fa431ab140856fe0c4ba41684
|
make the plugins directory a package, so that plugins can depend on each other.
|
plugins/__init__.py
|
plugins/__init__.py
|
Python
| 0
|
@@ -0,0 +1,60 @@
+# keep this to be able to access plugins from other plugins%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.