commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
b6cd59f800b254d91da76083546ab7c10689df5f
|
Add unit test to enforce unique file names.
|
tests/test_no_dup_filenames.py
|
tests/test_no_dup_filenames.py
|
Python
| 0
|
@@ -0,0 +1,1632 @@
+# Copyright 2014 Red Hat, Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Aimport glob%0Aimport os%0A%0A%0Aimport testtools%0A%0A%0Aclass TestNoDupFilenames(testtools.TestCase):%0A%0A def test_no_dup_filenames(self):%0A topdir = os.path.normpath(os.path.dirname(__file__) + '/../')%0A elements_glob = os.path.join(topdir, %22elements%22, %22*%22)%0A%0A filenames = %5B%5D%0A dirs_to_check = %5B'block-device.d', 'cleanup.d', 'extra-data.d',%0A 'finalise.d', 'install.d', 'post-install.d',%0A 'pre-install.d', 'root.d'%5D%0A%0A for element_dir in glob.iglob(elements_glob):%0A for dir_to_check in dirs_to_check:%0A target_dir = os.path.join(element_dir, dir_to_check, %22*%22)%0A for target in glob.iglob(target_dir):%0A short_path = target%5Blen(element_dir) + 1:%5D%0A if not os.path.isdir(target):%0A err_msg = 'Duplicate file name found %25s' %25 short_path%0A self.assertFalse(short_path in filenames, err_msg)%0A filenames.append(short_path)%0A
|
|
e81fd02cc7431ea01416126b88a22b4bba9b755e
|
Test - add cmake test tool
|
tests/test_tools/test_cmake.py
|
tests/test_tools/test_cmake.py
|
Python
| 0
|
@@ -0,0 +1,2055 @@
+# Copyright 2015 0xc0170%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0Aimport os%0Aimport yaml%0Aimport shutil%0A%0Afrom unittest import TestCase%0A%0Afrom project_generator.generate import Generator%0Afrom project_generator.project import Project%0Afrom project_generator.settings import ProjectSettings%0Afrom project_generator.tools.cmake import CMakeGccArm%0A%0Afrom .simple_project import project_1_yaml, projects_1_yaml%0A%0Aclass TestProject(TestCase):%0A%0A %22%22%22test things related to the cmake tool%22%22%22%0A%0A def setUp(self):%0A if not os.path.exists('test_workspace'):%0A os.makedirs('test_workspace')%0A # write project file%0A with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f:%0A f.write(yaml.dump(project_1_yaml, default_flow_style=False))%0A # write projects file%0A with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f:%0A f.write(yaml.dump(projects_1_yaml, default_flow_style=False))%0A%0A self.project = next(Generator(projects_1_yaml).generate('project_1'))%0A%0A self.cmake = CMakeGccArm(self.project.project, ProjectSettings())%0A%0A def tearDown(self):%0A # remove created directory%0A shutil.rmtree('test_workspace', ignore_errors=True)%0A shutil.rmtree('generated_projects', ignore_errors=True)%0A%0A def test_export_project(self):%0A result = self.project.generate('cmake_gcc_arm', False)%0A projectfiles = self.project.get_generated_project_files('cmake_gcc_arm')%0A%0A assert result == 0%0A assert projectfiles%0A
|
|
15102368281837ace7e67ad915f2ff9c4c4a1ac3
|
remove package alias tool
|
tools/remove_packages_alias.py
|
tools/remove_packages_alias.py
|
Python
| 0.000001
|
@@ -0,0 +1,504 @@
+import os%0Aimport sys%0Aimport logging%0Aimport urllib3%0A%0Afrom elasticsearch import Elasticsearch, NotFoundError%0Afrom os_package_registry import PackageRegistry%0Afrom sqlalchemy import MetaData, create_engine%0A%0Aurllib3.disable_warnings()%0Alogging.root.setLevel(logging.INFO)%0A%0A%0Aif __name__ == %22__main__%22:%0A%0A es_host = os.environ%5B'OS_ELASTICSEARCH_ADDRESS'%5D%0A es = Elasticsearch(hosts=%5Bes_host%5D, use_ssl='https' in es_host)%0A%0A target_index = sys.argv%5B1%5D%0A%0A es.indices.delete_alias(target_index, 'packages')%0A
|
|
d53cff101248b9c90f5d2ae3f93d0e4933d03266
|
add a manifest (.cvmfspublished) abstraction class
|
cvmfs/manifest.py
|
cvmfs/manifest.py
|
Python
| 0.000001
|
@@ -0,0 +1,2603 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated by Ren%C3%A9 Meusel%0AThis file is part of the CernVM File System auxiliary tools.%0A%22%22%22%0A%0Aimport datetime%0A%0Aclass UnknownManifestField:%0A def __init__(self, key_char):%0A self.key_char = key_char%0A%0A def __str__(self):%0A return self.key_char%0A%0Aclass ManifestValidityError:%0A def __init__(self, message):%0A Exception.__init__(self, message)%0A%0A%0Aclass Manifest:%0A %22%22%22 Wraps information from .cvmfspublished%22%22%22%0A%0A def __init__(self, manifest_file):%0A %22%22%22 Initializes a Manifest object from a file pointer to .cvmfspublished %22%22%22%0A for line in manifest_file.readlines():%0A if len(line) == 0:%0A continue%0A if line%5B0:2%5D == %22--%22:%0A break%0A self._read_line(line)%0A self._check_validity()%0A%0A%0A def __str__(self):%0A return %22%3CManifest for %22 + self.repository_name + %22%3E%22%0A%0A%0A def __repr__(self):%0A return self.__str__()%0A%0A%0A def _read_line(self, line):%0A %22%22%22 Parse lines that appear in .cvmfspublished %22%22%22%0A key_char = line%5B0%5D%0A data = line%5B1:-1%5D%0A if key_char == %22C%22:%0A self.root_catalog = data%0A elif key_char == %22X%22:%0A self.certificate = data%0A elif key_char == %22H%22:%0A self.history_database = data%0A elif key_char == %22T%22:%0A self.last_modified = datetime.datetime.fromtimestamp(int(data))%0A elif key_char == %22R%22:%0A self.root_hash = data%0A elif key_char == %22D%22:%0A self.ttl = int(data)%0A elif key_char == %22S%22:%0A self.revision = int(data)%0A elif key_char == %22N%22:%0A self.repository_name = data%0A elif key_char == %22L%22:%0A self.unknown_field1 = data # TODO: ask Jakob what L means%0A else:%0A raise UnknownManifestField(key_char)%0A%0A%0A def _check_validity(self):%0A %22%22%22 Checks that all mandatory fields are found in .cvmfspublished %22%22%22%0A if not hasattr(self, 'root_catalog'):%0A raise ManifestValidityError(%22Manifest lacks a root catalog entry%22)%0A if not hasattr(self, 'root_hash'):%0A raise ManifestValidityError(%22Manifest lacks a root hash entry%22)%0A if not hasattr(self, 'ttl'):%0A raise ManifestValidityError(%22Manifest lacks a TTL entry%22)%0A if not hasattr(self, 'revision'):%0A raise ManifestValidityError(%22Manifest lacks a revision entry%22)%0A if not hasattr(self, 'repository_name'):%0A raise ManifestValidityError(%22Manifest lacks a repository name%22)%0A
|
|
66fcd6ab9d8703b2588bc2605278a5e056356de5
|
add top level bot class with basic outline of execution
|
updatebot/bot.py
|
updatebot/bot.py
|
Python
| 0
|
@@ -0,0 +1,3224 @@
+#%0A# Copyright (c) 2008 rPath, Inc.%0A#%0A# This program is distributed under the terms of the Common Public License,%0A# version 1.0. A copy of this license should have been distributed with this%0A# source file in a file called LICENSE. If it is not present, the license%0A# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# without any warranty; without even the implied warranty of merchantability%0A# or fitness for a particular purpose. See the Common Public License for%0A# full details.%0A#%0A%0A%22%22%22%0AModule for driving the update process.%0A%22%22%22%0A%0Aimport logging%0A%0Aimport repomd%0Afrom rpmvercmp import rpmvercmp%0Afrom rpmimport import rpmsource%0A%0Afrom updatebot import util%0Afrom updatebot import build%0Afrom updatebot import update%0Afrom updatebot import advise%0Afrom updatebot.errors import *%0A%0Alog = logging.getLogger('updatebot.bot')%0A%0Aclass Bot(object):%0A %22%22%22%0A Top level object for driving update process.%0A %22%22%22%0A%0A def __init__(self, cfg):%0A self._cfg = cfg%0A%0A self._clients = %7B%7D%0A self._rpmSource = rpmsource.RpmSource()%0A self._updater = update.Updater(self._cfg, self._rpmSource)%0A self._advisor = advise.Advisor(self._cfg, self._rpmSource)%0A self._builder = build.Builder(self._cfg)%0A%0A def _populateRpmSource(self):%0A %22%22%22%0A Populate the rpm source data structures.%0A %22%22%22%0A%0A for repo in self._cfg.repositoryPaths:%0A log.info('loading %25s/%25s repository data'%0A %25 (self._cfg.repositoryUrl, repo))%0A client = repomd.Client(self._cfg.repositoryUrl + '/' + repo)%0A self._rpmSource.loadFromClient(client, repo)%0A self._clients%5Brepo%5D = client%0A self._rpmSource.finalize()%0A%0A def run(self):%0A %22%22%22%0A Update the conary repository from the yum repositories.%0A %22%22%22%0A%0A log.info('starting update')%0A%0A # Populate rpm source object from yum metadata.%0A self._populateRpmSource()%0A%0A # Get troves to update and send advisories.%0A toAdvise, toUpdate = self._updater.getUpdates()%0A%0A # Check to see if advisories exist for all required packages.%0A self._advisor.check(toAdvise)%0A%0A # Update sources.%0A for nvf, srcPkg in toUpdate:%0A self._updater.update(nvf, srcPkg)%0A%0A # Make sure to build everything in the toAdvise list, there may be%0A # sources that have been updated, but not built.%0A buildTroves = %5B x%5B0%5D for x in toAdvise %5D%0A trvMap = self._builder.build(buildTroves)%0A%0A # Build group.%0A grpTrvMap = self._builder.build(self._cfg.topGroup)%0A%0A # Promote group.%0A # FIXME: should be able to get the new versions of packages from%0A # promote.%0A helper = self._updater.getConaryHelper()%0A newTroves = helper.promote(grpTrvMap.values(), self._cfg.targetLabel)%0A%0A # FIXME: build a trvMap from source tove nvf to new binary trove nvf%0A%0A # Send advisories.%0A self._advisor.send(trvMap, toAdvise)%0A%0A log.info('update completed successfully')%0A log.info('updated %25s packages and sent %25s advisories'%0A %25 (len(toUpdate), len(toAdvise)))%0A
|
|
8d2510fd38d946813b96798c745772641f19a5e7
|
Create 10MinEmail.py
|
10MinEmail.py
|
10MinEmail.py
|
Python
| 0.000001
|
@@ -0,0 +1,298 @@
+from bs4 import BeautifulSoup%0Aimport threading%0Aimport urllib%0Aweb=urllib.urlopen('http://www.my10minutemail.com/')%0Asoup=BeautifulSoup(web)%0Aprint soup.p.string%0Aprint 'Email Valid For 10 minutes'%0Araw_input()%0A#def alarm():%0A# print 'One Minute is Left'%0A#t = threading.Timer(60.0, alarm)%0A#t.start() %0A%0A
|
|
198dc11cadc1a20f95dccd5bb4897fa2947ff810
|
Add Affichage.py
|
Affichage.py
|
Affichage.py
|
Python
| 0.000001
|
@@ -0,0 +1,74 @@
+class Affichage:%0A%0A def affichage_jeux(self):%0A return 0%0A %0A
|
|
abe6ead4f93f98406fe197b6884e51015c200ca1
|
Add a test for query_result_to_dict
|
test/test_searchentities.py
|
test/test_searchentities.py
|
Python
| 0.999999
|
@@ -0,0 +1,761 @@
+import unittest%0A%0Afrom . import models%0Afrom sir.schema.searchentities import SearchEntity as E, SearchField as F%0A%0A%0Aclass QueryResultToDictTest(unittest.TestCase):%0A def setUp(self):%0A self.entity = E(models.B, %5B%0A F(%22id%22, %22id%22),%0A F(%22c_bar%22, %22c.bar%22),%0A F(%22c_bar_trans%22, %22c.bar%22, transformfunc=lambda v:%0A v.union(set(%5B%22yay%22%5D)))%0A %5D,%0A 1.1%0A )%0A self.expected = %7B%0A %22id%22: 1,%0A %22c_bar%22: %22foo%22,%0A %22c_bar_trans%22: set(%5B%22foo%22, %22yay%22%5D),%0A %7D%0A c = models.C(id=2, bar=%22foo%22)%0A self.val = models.B(id=1, c=c)%0A%0A def test_fields(self):%0A res = self.entity.query_result_to_dict(self.val)%0A self.assertDictEqual(self.expected, res)%0A
|
|
a8fa0d01256d82378fc0e39f6d691d5435fbbd21
|
Remove some Python 2 work-around code
|
tests/plots/test_ctables.py
|
tests/plots/test_ctables.py
|
# Copyright (c) 2015,2016,2017,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for the `ctables` module."""
import os.path
import tempfile
try:
buffer_args = {'bufsize': 0}
from StringIO import StringIO
except ImportError:
buffer_args = {'buffering': 1}
from io import StringIO
import numpy as np
import pytest
from metpy.plots.ctables import ColortableRegistry, convert_gempak_table
@pytest.fixture()
def registry():
"""Set up a registry for use by the tests."""
return ColortableRegistry()
def test_package_resource(registry):
"""Test registry scanning package resource."""
registry.scan_resource('metpy.plots', 'nexrad_tables')
assert 'cc_table' in registry
def test_scan_dir(registry):
"""Test registry scanning a directory and ignoring files it can't handle ."""
try:
kwargs = {'mode': 'w', 'dir': '.', 'suffix': '.tbl', 'delete': False}
kwargs.update(**buffer_args)
with tempfile.NamedTemporaryFile(**kwargs) as fobj:
fobj.write('"red"\n"lime"\n"blue"\n')
fname = fobj.name
# Unrelated table file that *should not* impact the scan
with tempfile.NamedTemporaryFile(**kwargs) as fobj:
fobj.write('PADK 704540 ADAK NAS\n')
bad_file = fobj.name
# Needs to be outside with so it's closed on windows
registry.scan_dir(os.path.dirname(fname))
name = os.path.splitext(os.path.basename(fname))[0]
assert name in registry
assert registry[name] == [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)]
finally:
os.remove(fname)
os.remove(bad_file)
def test_read_file(registry):
"""Test reading a colortable from a file."""
fobj = StringIO('(0., 0., 1.0)\n"red"\n"#0000FF" #Blue')
registry.add_colortable(fobj, 'test_table')
assert 'test_table' in registry
assert registry['test_table'] == [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 0.0, 1.0)]
def test_read_bad_file(registry):
"""Test what error results when reading a malformed file."""
with pytest.raises(RuntimeError):
fobj = StringIO('PADK 704540 ADAK NAS '
'AK US 5188 -17665 4 0')
registry.add_colortable(fobj, 'sfstns')
def test_get_colortable(registry):
"""Test getting a colortable from the registry."""
true_colors = [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0)]
registry['table'] = true_colors
table = registry.get_colortable('table')
assert table.N == 2
assert table.colors == true_colors
def test_get_steps(registry):
"""Test getting a colortable and norm with appropriate steps."""
registry['table'] = [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)]
norm, cmap = registry.get_with_steps('table', 5., 10.)
assert cmap(norm(np.array([6.]))).tolist() == [[0.0, 0.0, 1.0, 1.0]]
assert cmap(norm(np.array([14.9]))).tolist() == [[0.0, 0.0, 1.0, 1.0]]
assert cmap(norm(np.array([15.1]))).tolist() == [[1.0, 0.0, 0.0, 1.0]]
assert cmap(norm(np.array([26.]))).tolist() == [[0.0, 1.0, 0.0, 1.0]]
def test_get_steps_negative_start(registry):
"""Test bad start for get with steps (issue #81)."""
registry['table'] = [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)]
norm, _ = registry.get_with_steps('table', -10, 5)
assert norm.vmin == -10
assert norm.vmax == 5
def test_get_range(registry):
"""Test getting a colortable and norm with appropriate range."""
registry['table'] = [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)]
norm, cmap = registry.get_with_range('table', 5., 35.)
assert cmap(norm(np.array([6.]))).tolist() == [[0.0, 0.0, 1.0, 1.0]]
assert cmap(norm(np.array([14.9]))).tolist() == [[0.0, 0.0, 1.0, 1.0]]
assert cmap(norm(np.array([15.1]))).tolist() == [[1.0, 0.0, 0.0, 1.0]]
assert cmap(norm(np.array([26.]))).tolist() == [[0.0, 1.0, 0.0, 1.0]]
def test_get_boundaries(registry):
"""Test getting a colortable with explicit boundaries."""
registry['table'] = [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)]
norm, cmap = registry.get_with_boundaries('table', [0., 8., 10., 20.])
assert cmap(norm(np.array([7.]))).tolist() == [[0.0, 0.0, 1.0, 1.0]]
assert cmap(norm(np.array([9.]))).tolist() == [[1.0, 0.0, 0.0, 1.0]]
assert cmap(norm(np.array([10.1]))).tolist() == [[0.0, 1.0, 0.0, 1.0]]
def test_gempak():
"""Test GEMPAK colortable conversion."""
infile = StringIO("""! wvcolor.tbl
0 0 0
255 255 255
""")
outfile = StringIO()
# Do the conversion
convert_gempak_table(infile, outfile)
# Reset and grab contents
outfile.seek(0)
result = outfile.read()
assert result == '(0.000000, 0.000000, 0.000000)\n(1.000000, 1.000000, 1.000000)\n'
|
Python
| 0.00013
|
@@ -185,16 +185,40 @@
le.%22%22%22%0A%0A
+from io import StringIO%0A
import o
@@ -243,163 +243,8 @@
file
-%0Atry:%0A buffer_args = %7B'bufsize': 0%7D%0A from StringIO import StringIO%0Aexcept ImportError:%0A buffer_args = %7B'buffering': 1%7D%0A from io import StringIO
%0A%0Aim
@@ -851,46 +851,25 @@
alse
-%7D%0A kwargs.update(**buffer_args)
+, 'buffering': 1%7D
%0A
|
c1bbbd7ac51a25919512722d633f0d8c2d1009e2
|
Create unit tests directory
|
tests/test_lazy_toeplitz.py
|
tests/test_lazy_toeplitz.py
|
Python
| 0
|
@@ -0,0 +1,1699 @@
+from scipy.linalg import toeplitz%0Aimport numpy as np%0Afrom cooltools.snipping import LazyToeplitz%0A%0A%0An = 100%0Am = 150%0Ac = np.arange(1, n+1)%0Ar = np.r_%5B1,np.arange(-2, -m, -1)%5D%0A%0AL = LazyToeplitz(c, r)%0AT = toeplitz(c, r)%0A%0A%0Adef test_symmetric():%0A for si in %5B%0A slice(10, 20), %0A slice(0, 150),%0A slice(0, 0),%0A slice(150, 150),%0A slice(10, 10)%0A %5D:%0A assert np.allclose(L%5Bsi, si%5D, T%5Bsi, si%5D)%0A%0A%0Adef test_triu_no_overlap():%0A for si, sj in %5B%0A (slice(10, 20), slice(30, 40)),%0A (slice(10, 15), slice(30, 40)),%0A (slice(10, 20), slice(30, 45)),%0A %5D:%0A assert np.allclose(L%5Bsi, sj%5D, T%5Bsi, sj%5D)%0A%0A%0Adef test_tril_no_overlap():%0A for si, sj in %5B%0A (slice(30, 40), slice(10, 20)),%0A (slice(30, 40), slice(10, 15)),%0A (slice(30, 45), slice(10, 20)),%0A %5D:%0A assert np.allclose(L%5Bsi, sj%5D, T%5Bsi, sj%5D)%0A%0A%0Adef test_triu_with_overlap():%0A for si, sj in %5B%0A (slice(10, 20), slice(15, 25)),%0A (slice(13, 22), slice(15, 25)),%0A (slice(10, 20), slice(18, 22)),%0A %5D:%0A assert np.allclose(L%5Bsi, sj%5D, T%5Bsi, sj%5D)%0A%0A%0Adef test_tril_with_overlap():%0A for si, sj in %5B%0A (slice(15, 25), slice(10, 20)),%0A (slice(15, 22), slice(10, 20)),%0A (slice(15, 25), slice(10, 18)),%0A %5D:%0A assert np.allclose(L%5Bsi, sj%5D, T%5Bsi, sj%5D)%0A%0A%0Adef test_nested():%0A for si, sj in %5B%0A (slice(10, 40), slice(20, 30)),%0A (slice(10, 35), slice(20, 30)),%0A (slice(10, 40), slice(20, 25)),%0A (slice(20, 30), slice(10, 40)),%0A %5D:%0A assert np.allclose(L%5Bsi, sj%5D, T%5Bsi, sj%5D)%0A
|
|
c3026f4c6e5edff30347f544746781c7214c2c2e
|
Add root test file.
|
test_SM2SM.py
|
test_SM2SM.py
|
Python
| 0
|
@@ -0,0 +1,1838 @@
+'''%0ACreated on 2015-01-20%0A%0A@author: levi%0A'''%0A'''%0ACreated on 2015-01-19%0A%0A@author: levi%0A'''%0Aimport unittest%0A%0Afrom patterns.HSM2SM_matchLHS import HSM2SM_matchLHS%0Afrom patterns.HSM2SM_rewriter import HSM2SM_rewriter%0A%0Afrom PyRamify import PyRamify%0A%0Afrom t_core.messages import Packet%0Afrom t_core.iterator import Iterator%0Afrom t_core.matcher import Matcher%0Afrom t_core.rewriter import Rewriter%0A%0Afrom himesis_utils import graph_to_dot%0A%0Afrom tests.TestModules.HSM2SM import HSM2SM%0A%0Aclass Test(unittest.TestCase):%0A%0A%0A def testName(self): %0A i = Iterator()%0A p = Packet()%0A%0A pyramify = PyRamify()%0A%0A %5Bself.rules, self.ruleTraceCheckers, backwardPatterns2Rules, backwardPatternsComplete, self.matchRulePatterns, self.ruleCombinators%5D = %5C%0A pyramify.ramify_directory(%22dir_for_pyramify/%22, True)%0A%0A HSM2SM_py = self.matchRulePatterns%5B%22HSM2SM%22%5D%0A print(HSM2SM_py)%0A%0A matcher = HSM2SM_py%5B0%5D%0A rewriter = HSM2SM_py%5B1%5D%0A p.graph = matcher.condition%0A%0A %0A graph_to_dot(%22test_before_SM2SM%22, HSM2SM())%0A %0A graph_to_dot(%22test_SM2SM%22, p.graph)%0A %0A s2s_match = matcher%0A s2s_rewrite = rewriter%0A %0A graph_to_dot(%22test_SM2SM_matcher%22, matcher.condition)%0A graph_to_dot(%22test_SM2SM_rewriter%22, rewriter.condition)%0A %0A s2s_match.packet_in(p)%0A %0A if s2s_match.is_success:%0A print %22Yes!%22%0A else:%0A print %22no%22%0A %0A p = i.packet_in(p)%0A p = s2s_rewrite.packet_in(p)%0A %0A if s2s_rewrite.is_success:%0A print %22Yes!%22%0A else:%0A print %22no%22%0A %0A graph_to_dot(%22after_SM2SM%22, p.graph)%0A %0A %0A %0A %0A%0A%0Aif __name__ == %22__main__%22:%0A #import sys;sys.argv = %5B'', 'Test.testName'%5D%0A unittest.main()
|
|
3c7c81fa65206ea70cbff8394efe35749dc9dddd
|
add bitquant.py driver
|
web/bitquant.py
|
web/bitquant.py
|
Python
| 0.000001
|
@@ -0,0 +1,218 @@
+from flask import Flask, request%0Aapp = Flask(__name__, static_url_path='', static_folder='bitquant')%0A%0A@app.route(%22/%22)%0Adef root():%0A return app.send_static_file('index.html')%0A%0Aif __name__ == %22__main__%22:%0A app.run()%0A
|
|
58d3df14b1b60da772f59933345a2dfdf2cadec2
|
Add python solution for day 17
|
day17/solution.py
|
day17/solution.py
|
Python
| 0.000262
|
@@ -0,0 +1,568 @@
+import itertools%0A%0Adata = open(%22data%22, %22r%22).read()%0A%0Acontainers = map(int, data.split(%22%5Cn%22))%0A%0Apart1 = %5B%5D%0AminLength = None%0Afor length in range(len(containers)):%0A%09combinations = itertools.combinations(containers, length)%0A%09combinations = filter(lambda containers: sum(containers) == 150, combinations)%0A%0A%09part1 += combinations%0A%0A%09if minLength is None and len(combinations) %3E 0:%0A%09%09minLength = length%0A%0A%0Aprint %22Combinations (Part 1):%22, len(part1)%0A%0Apart2 = filter(lambda containers: len(containers) == minLength, part1)%0A%0Aprint %22Minimum Length Combinations (Part 2):%22, len(part2)%0A
|
|
88cfd7529c6c08e24b20576c1e40f41f3156a47e
|
add tandem sam scores script
|
bin/tandem_sam_scores.py
|
bin/tandem_sam_scores.py
|
Python
| 0
|
@@ -0,0 +1,1300 @@
+%22%22%22%0Atandem_sam_scores.py%0A%0AFor each alignment, compare the %22target%22 simulated alignment score to the%0Aactual score obtained by the aligner. When the read is simulated, we borrow%0Athe target score and the pattern of mismatches and gaps from an input%0Aalignment. But because the new read's sequence and point of origin are%0Adifferent, and because the aligner's heuristics might act differently on the%0Atandem read than on the input read, the aligned score might be different.%0ACould be either higher or lower. Here we compare and make a table showing%0Ahow scores change before and after.%0A%22%22%22%0A%0Afrom __future__ import print_function%0Aimport sys%0Afrom collections import defaultdict%0A%0Ascores = defaultdict(int)%0A%0Afor ln in sys.stdin:%0A if ln%5B0%5D == '@':%0A continue%0A toks = ln.rstrip().split('%5Ct')%0A assert toks%5B0%5D%5B:6%5D == '!!ts!!'%0A ref_id, fw, ref_off, expected_score, typ = toks%5B0%5D%5B16:%5D.split('!!ts-sep!!')%0A actual_score = None%0A for ef in toks%5B11:%5D:%0A if ef.startswith('AS:i:'):%0A actual_score = int(ef%5B5:%5D)%0A break%0A scores%5B(expected_score, actual_score)%5D += 1%0A%0Afor k, v in sorted(scores.items()):%0A expected_score, actual_score = k%0A actual_score = 'NA' if actual_score is None else str(actual_score)%0A print(%22%25s,%25s,%25d%22 %25 (expected_score, actual_score, v))%0A
|
|
b4c21650cfd92d722a0ac20ea51d90f15adca44e
|
add permissions classes for Group API
|
bioshareX/permissions.py
|
bioshareX/permissions.py
|
Python
| 0
|
@@ -0,0 +1,1728 @@
+from django.http.response import Http404%0Afrom rest_framework.permissions import DjangoModelPermissions, SAFE_METHODS%0Afrom django.contrib.auth.models import Group%0A%0A%0Aclass ViewObjectPermissions(DjangoModelPermissions):%0A def has_object_permission(self, request, view, obj):%0A if hasattr(view, 'get_queryset'):%0A queryset = view.get_queryset()%0A else:%0A queryset = getattr(view, 'queryset', None)%0A%0A assert queryset is not None, (%0A 'Cannot apply DjangoObjectPermissions on a view that '%0A 'does not set %60.queryset%60 or have a %60.get_queryset()%60 method.'%0A )%0A%0A model_cls = queryset.model%0A user = request.user%0A# raise Exception(','.join(self.perms))%0A if not user.has_perms(self.perms, obj):%0A # If the user does not have permissions we need to determine if%0A # they have read permissions to see 403, or not, and simply see%0A # a 404 response.%0A%0A if request.method in SAFE_METHODS:%0A # Read permissions already checked and failed, no need%0A # to make another lookup.%0A raise Http404%0A%0A read_perms = self.get_required_object_permissions('GET', model_cls,view)%0A if not user.has_perms(read_perms, obj):%0A raise Http404%0A%0A # Has read permissions.%0A return False%0A%0A return True%0A%0Aclass ManageGroupPermission(ViewObjectPermissions):%0A perms = %5B'manage_group'%5D%0A# def has_object_permission(self, request, view, obj):%0A# if not request.user.groups.filter(id=obj.id).exists():%0A# return False%0A# return super(ManageGroupPermission, self).has_object_permission(request,view,obj)
|
|
6e1d1da7983da2ca43a1185adc2ddb2e2e1b7333
|
Add basic cycles exercices
|
chapter02/cicles.py
|
chapter02/cicles.py
|
Python
| 0.000013
|
@@ -0,0 +1,1220 @@
+#!/usr/bin/env python%0A%0Aprint %22Escribir un ciclo definido para imprimir por pantalla todos los numeros entre 10 y 20.%22%0Aprint %5Bx for x in range(10, 20)%5D%0A%0Aprint %22Escribir un ciclo definido que salude por pantalla a sus cinco mejores amigos/as.%22%0Aprint %5Bamigo for amigo in %5B'Lola', 'Dolores', 'Quique', 'Manuel', 'Manolo'%5D%5D%0A%0Aprint %22Escribir un programa que use un ciclo definido con rango numerico, que pregunte los nombres de sus cinco mejores amigos/as, y los salude.%22%0Afor i in range(5):%0A amigo = raw_input('Escribe el nombre de tu %7B0%7D amigo: '.format(i + 1))%0A print 'Hola %7B0%7D'.format(amigo)%0A%0Aprint %22Escribir un programa que use un ciclo definido con rango numerico, que pregunte los nombres de sus seis mejores amigos/as, y los salude.%22%0Afor i in range(6):%0A amigo = raw_input('Escribe el nombre de tu %7B0%7D amigo: '.format(i + 1))%0A print 'Hola %7B0%7D'.format(amigo)%0A%0Aprint %22Escribir un programa que use un ciclo definido con rango numerico, que averigue a cuantos amigos quieren saludar, les pregunte los nombres de esos amigos/as, y los salude.%22 %0Anum = input(%22A cuantos amigos quieres saludar?%22)%0Afor i in range(num):%0A amigo = raw_input('Escribe el nombre de tu %7B0%7D amigo: '.format(i + 1))%0A print 'Hola %7B0%7D'.format(amigo)%0A
|
|
08c189a643f0b76ad28f9c0e0bc376a0ae202343
|
Create nesting.py
|
codility/nesting.py
|
codility/nesting.py
|
Python
| 0.000002
|
@@ -0,0 +1,232 @@
+%22%22%22%0Ahttps://codility.com/programmers/task/nesting/%0A%22%22%22%0A%0A%0Adef solution(S):%0A balance = 0%0A%0A for char in S:%0A balance += (1 if char == '(' else -1)%0A%0A if balance %3C 0:%0A return 0%0A%0A return int(balance == 0)%0A
|
|
631aa503d1457f823cacd0642a1554ce8f31c1f9
|
add jm server
|
python/jm_server.py
|
python/jm_server.py
|
Python
| 0
|
@@ -0,0 +1,957 @@
+#!/usr/bin/python%0A#%0A# jm_server.py%0A#%0A# Author: Zex %3Ctop_zlynch@yahoo.com%3E%0A#%0A%0Aimport dbus %0Aimport dbus.service%0Afrom basic import * %0A%0Aclass JuiceMachine(dbus.service.FallbackObject):%0A %22%22%22%0A JuiceMachine server%0A %22%22%22%0A def __init__(self):%0A%0A connection = dbus.SessionBus()%0A connection_name = dbus.service.BusName(%0A JM_SERVICE_NAME, bus = connection)%0A%0A dbus.service.Object.__init__(self, connection_name,%0A JM_1_PATH)%0A%0A @dbus.service.method(JM_1_IFACE,%0A in_signature = '', out_signature = 's',%0A path_keyword = 'path')%0A def list(self, path = JM_1_PATH):%0A return 'Service unique name: %5B'+self.connection.get_unique_name()+'%5D'%0A%0A%0Adef start_server():%0A %22%22%22%0A Stadrt juicemachine server%0A %22%22%22%0A dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)%0A global loop%0A%0A obj = JuiceMachine()%0A loop = gobject.MainLoop()%0A connection = dbus.StarterBus()%0A%0A loop.run()%0A%0A%0A
|
|
59a108840f0fb07f60f20bc9ff59a0d194cb0ee3
|
enable import as module
|
__init__.py
|
__init__.py
|
Python
| 0.000001
|
@@ -0,0 +1,218 @@
+%22%22%22%0A.. module:: lmtscripts%0A :platform: Unix%0A :synopsis: useful scripts for EHT observations at LMT%0A%0A.. moduleauthor:: Lindy Blackburn %3Clindylam@gmail.com%3E%0A.. moduleauthor:: Katie Bouman %3Cklbouman@gmail.com%3E%0A%0A%22%22%22%0A
|
|
88b6549b74dd767733cd823de410e00067a79756
|
add test auto updater
|
auto_update_tests.py
|
auto_update_tests.py
|
Python
| 0
|
@@ -0,0 +1,676 @@
+#!/usr/bin/env python%0A%0Aimport os, sys, subprocess, difflib%0A%0Aprint '%5B processing and updating testcases... %5D%5Cn'%0A%0Afor asm in sorted(os.listdir('test')):%0A if asm.endswith('.asm.js'):%0A print '..', asm%0A wasm = asm.replace('.asm.js', '.wast')%0A actual, err = subprocess.Popen(%5Bos.path.join('bin', 'asm2wasm'), os.path.join('test', asm)%5D, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()%0A assert err == '', 'bad err:' + err%0A%0A # verify output%0A if not os.path.exists(os.path.join('test', wasm)):%0A print actual%0A raise Exception('output .wast file does not exist')%0A open(os.path.join('test', wasm), 'w').write(actual)%0A%0Aprint '%5Cn%5B success! %5D'%0A%0A
|
|
322dd59f362a1862c739c5c63cd180bce8655a6d
|
Test to add data
|
AddDataTest.py
|
AddDataTest.py
|
Python
| 0
|
@@ -0,0 +1,278 @@
+__author__ = 'chuqiao'%0Aimport script%0Ascript.addDataToSolrFromUrl(%22http://www.elixir-europe.org:8080/events%22, %22http://www.elixir-europe.org:8080/events%22);%0Ascript.addDataToSolrFromUrl(%22http://localhost/ep/events?state=published&field_type_tid=All%22, %22http://localhost/ep/events%22);%0A
|
|
be9cf41600b2a00494ca34e3b828e7a43d8ae457
|
Create testing.py
|
bcn/utils/testing.py
|
bcn/utils/testing.py
|
Python
| 0.000001
|
@@ -0,0 +1,624 @@
+%22%22%22Utility functions for unittests.%0A%0ANotes%0A-----%0ADefines a function that compares the hash of outputs with the expected output, given a particular seed.%0A%22%22%22%0Afrom __future__ import division, absolute_import%0A%0Aimport hashlib%0A%0Adef assert_consistency(X, true_md5):%0A '''%0A Asserts the consistency between two function outputs based on a hash.%0A%0A Parameters%0A ----------%0A X : ndarray%0A Array to be hashed.%0A true_md5 : str%0A Expected hash.%0A '''%0A m = hashlib.md5()%0A m.update(X)%0A current_md5 = m.hexdigest()%0A print%0A print current_md5, true_md5%0A print%0A assert current_md5 == true_md5%0A
|
|
1fbb3bb85c5f5ad8422fb0058091d67beeeec06c
|
Allow for setting a "BOTO_CONFIG" environment variable which can be set to the location of a configuration file to be loaded in addition to all the standard boto config paths.
|
boto/pyami/config.py
|
boto/pyami/config.py
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import StringIO, os, re
import ConfigParser
import boto
BotoConfigPath = '/etc/boto.cfg'
BotoConfigLocations = [BotoConfigPath]
if 'HOME' in os.environ:
UserConfigPath = os.path.expanduser('~/.boto')
BotoConfigLocations.append(UserConfigPath)
else:
UserConfigPath = None
class Config(ConfigParser.SafeConfigParser):
def __init__(self, path=None, fp=None, do_load=True):
ConfigParser.SafeConfigParser.__init__(self, {'working_dir' : '/mnt/pyami',
'debug' : '0'})
if do_load:
if path:
self.load_from_path(path)
elif fp:
self.readfp(fp)
else:
self.read(BotoConfigLocations)
def load_from_path(self, path):
file = open(path)
for line in file.readlines():
match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
if match:
extended_file = match.group(1)
(dir, file) = os.path.split(path)
self.load_from_path(os.path.join(dir, extended_file))
self.read(path)
def save_option(self, path, section, option, value):
"""
Write the specified Section.Option to the config file specified by path.
Replace any previous value. If the path doesn't exist, create it.
Also add the option the the in-memory config.
"""
config = ConfigParser.SafeConfigParser()
config.read(path)
if not config.has_section(section):
config.add_section(section)
config.set(section, option, value)
fp = open(path, 'w')
config.write(fp)
fp.close()
if not self.has_section(section):
self.add_section(section)
self.set(section, option, value)
def save_user_option(self, section, option, value):
self.save_option(UserConfigPath, section, option, value)
def save_system_option(self, section, option, value):
self.save_option(BotoConfigPath, section, option, value)
def get_instance(self, name, default=None):
try:
val = self.get('Instance', name)
except:
val = default
return val
def get_user(self, name, default=None):
try:
val = self.get('User', name)
except:
val = default
return val
def getint_user(self, name, default=0):
try:
val = self.getint('User', name)
except:
val = default
return val
def get_value(self, section, name, default=None):
return self.get(section, name, default)
def get(self, section, name, default=None):
try:
val = ConfigParser.SafeConfigParser.get(self, section, name)
except:
val = default
return val
def getint(self, section, name, default=0):
try:
val = ConfigParser.SafeConfigParser.getint(self, section, name)
except:
val = int(default)
return val
def getfloat(self, section, name, default=0.0):
try:
val = ConfigParser.SafeConfigParser.getfloat(self, section, name)
except:
val = float(default)
return val
def getbool(self, section, name, default=False):
if self.has_option(section, name):
val = self.get(section, name)
if val.lower() == 'true':
val = True
else:
val = False
else:
val = default
return val
def setbool(self, section, name, value):
if value:
self.set(section, name, 'true')
else:
self.set(section, name, 'false')
def dump(self):
s = StringIO.StringIO()
self.write(s)
print s.getvalue()
def dump_safe(self, fp=None):
if not fp:
fp = StringIO.StringIO()
for section in self.sections():
fp.write('[%s]\n' % section)
for option in self.options(section):
if option == 'aws_secret_access_key':
fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option)
else:
fp.write('%s = %s\n' % (option, self.get(section, option)))
def dump_to_sdb(self, domain_name, item_name):
import simplejson
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
if not domain:
domain = sdb.create_domain(domain_name)
item = domain.new_item(item_name)
item.active = False
for section in self.sections():
d = {}
for option in self.options(section):
d[option] = self.get(section, option)
item[section] = simplejson.dumps(d)
item.save()
def load_from_sdb(self, domain_name, item_name):
import simplejson
sdb = boto.connect_sdb()
domain = sdb.lookup(domain_name)
item = domain.get_item(item_name)
for section in item.keys():
if not self.has_section(section):
self.add_section(section)
d = simplejson.loads(item[section])
for attr_name in d.keys():
attr_value = d[attr_name]
if attr_value == None:
attr_value = 'None'
if isinstance(attr_value, bool):
self.setbool(section, attr_name, attr_value)
else:
self.set(section, attr_name, attr_value)
|
Python
| 0
|
@@ -1382,16 +1382,126 @@
h = None
+%0Aif 'BOTO_CONFIG' in os.environ:%0A BotoConfigLocations.append(os.path.expanduser(os.environ%5B'BOTO_CONFIG'%5D))
%0A%0Aclass
|
41904abd0778719a1586b404c1ca56eb3205f998
|
Include undg/myip to this repo bin.
|
bin/myip.py
|
bin/myip.py
|
Python
| 0
|
@@ -0,0 +1,2239 @@
+#!/usr/bin/python3%0A%0Adef extIp(site): # GETING PUBLIC IP%0A import urllib.request%0A from re import findall%0A%0A ipMask = '%5Cd%7B1,3%7D%5C.%5Cd%7B1,3%7D%5C.%5Cd%7B1,3%7D%5C.%5Cd%7B1,3%7D'%0A if site == 'dyndns':%0A url = 'http://checkip.dyndns.org'%0A regexp = '%3Cbody%3ECurrent IP Address: ('+ipMask+')%3C/body%3E'%0A if site == 'google':%0A url = 'https://www.google.co.uk/search?q=my+ip'%0A regexp = '%3Cw-answer-desktop%3E%3Cdiv class=%22...... ...... .... ...... ......%22 style=%22-webkit-line-clamp:2%22%3E('+ipMask+')%3C/div%3E'%0A%0A%0A req = urllib.request.Request( url, data=None, headers=%7B%0A 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0'%0A %7D%0A )%0A try:%0A # opener = urllib.request.urlopen(url,timeout=10)%0A opener = urllib.request.urlopen(req,timeout=10)%0A except urllib.error.URLError as e:%0A print(e.reason)%0A html = opener.read()%0A%0A ip = findall(regexp,str(html))%5B0%5D%0A return ip%0A%0Adef localIp(): # GETING LOCAL IP FROM SYSTEM%0A import socket%0A get = socket.gethostname()%0A ip = socket.gethostbyname(get)%0A return ip%0A%0Adef flags(): # CLI ARGUMENTS%0A import argparse%0A import sys%0A parser = argparse.ArgumentParser()%0A%0A # FLAGS%0A parser.add_argument('-l', '--local', help='show local ip', action='store_true')%0A parser.add_argument('-p', '--public', help='show public ip', action='store_true')%0A parser.add_argument('-g', '--google', help='use google to check ip, faster but may block ip', action='store_true')%0A parser.add_argument('-v', '--verbose', help='Make output verbose', action='store_true')%0A%0A args = parser.parse_args()%0A # LOCAL IP%0A if args.local:%0A if args.verbose:%0A print('system =%3E Your local IP:', localIp())%0A else:%0A print(localIp())%0A%0A # PUBLIC IP%0A if args.google:%0A site = 'google'%0A args.public = True%0A else:%0A site = 'dyndns'%0A%0A if args.public:%0A if args.verbose:%0A print(site, '=%3E Your public IP:', extIp(site))%0A else:%0A print(extIp(site))%0A%0A # IF NO ARGS%0A if len(sys.argv) %3C 2:%0A parser.print_help()%0A print('%5Cnsystem =%3E Your local IP:', localIp())%0A print(site, '=%3E Your public IP:', extIp(site))%0A%0Aflags()%0A
|
|
c5a7e6dc9a98f056a31552e7ace4d150b13b998f
|
Create markdown.py
|
markdown.py
|
markdown.py
|
Python
| 0.000002
|
@@ -0,0 +1,1007 @@
+import os%0Aimport sys%0Aimport markdown%0A%0Afrom cactus.utils import fileList%0A%0A%0Atemplate = %22%22%22%0A%25s%0A%0A%7B%25%25 extends %22%25s%22 %25%25%7D%0A%7B%25%25 block %25s %25%25%7D%0A%0A%25s%0A%0A%7B%25%25 endblock %25%25%7D%0A%22%22%22%0A%0Atitle_template = %22%22%22%0A%7B%25%25 block title %25%25%7D%25s%7B%25%25 endblock %25%25%7D%0A%22%22%22%0A%0ACLEANUP = %5B%5D%0A%0Adef preBuild(site):%0A%09for path in fileList(site.paths%5B'pages'%5D):%0A%0A%09%09if not path.endswith('.md'):%0A%09%09%09continue%0A%0A%09%09md = markdown.Markdown(extensions=%5B'meta'%5D)%0A%0A%09%09with open(path, 'r') as f:%0A%09%09%09html = md.convert(f.read())%0A%0A%09%09metadata = %5B%5D%0A%0A%09%09for k, v in md.Meta.iteritems():%0A%09%09%09if k == 'title':%0A%09%09%09%09pass%0A%09%09%09metadata.append('%25s: %25s' %25 (k, v%5B0%5D))%0A%0A%09%09outPath = path.replace('.md', '.html')%0A%0A%09%09with open(outPath, 'w') as f:%0A%09%09%0A%09%09%09data = template %25 (%0A%09%09%09%09'%5Cn'.join(metadata),%0A%09%09%09%09md.Meta%5B'extends'%5D%5B0%5D,%0A%09%09%09%09md.Meta%5B'block'%5D%5B0%5D,%0A%09%09%09%09html%0A%09%09%09)%0A%09%09%09%0A%09%09%09try:%0A%09%09%09%09data += title_template %25 md.Meta%5B'title'%5D%5B0%5D%0A%09%09%09except KeyError:%0A%09%09%09%09pass%0A%0A%09%09%09f.write(data)%0A%0A%09%09CLEANUP.append(outPath)%0A%0Adef postBuild(site):%0A%09global CLEANUP%0A%09for path in CLEANUP:%0A%09%09print path%0A%09%09os.remove(path)%0A%09CLEANUP = %5B%5D%0A
|
|
5492e1b318ff0af3f1e2b1ed0217ed2744b50b68
|
Add first structure for issue 107 (automatic configuration doc generation)
|
server/src/configuration_doc.py
|
server/src/configuration_doc.py
|
Python
| 0
|
@@ -0,0 +1,2407 @@
+from collections import namedtuple%0A%0ANO_DEFAULT = object()%0AANY_TYPE = object()%0A%0A_Argument = namedtuple('Argument', 'category type default message')%0A%0A_sorted_variables = %5B%5D%0A%0A%0A######################################%0A# %0A# CORE%0A#%0A%0ACORE = 'core'%0A%0AWEBLAB_CORE_SERVER_SESSION_TYPE = 'core_session_type'%0AWEBLAB_CORE_SERVER_SESSION_POOL_ID = 'core_session_pool_id'%0A%0A_sorted_variables.extend(%5B%0A (WEBLAB_CORE_SERVER_SESSION_TYPE, _Argument(CORE, str, 'Memory', %22%22%22What type of session manager the Core Server will use: Memory or MySQL.%22%22%22)),%0A (WEBLAB_CORE_SERVER_SESSION_POOL_ID, _Argument(CORE, str, 'UserProcessingServer', %22%22%22 A unique identifier of the type of sessions, in order to manage them. For instance, if there are four servers (A, B, C and D), the load of users can be splitted in two groups: those being sent to A and B, and those being sent to C and D. A and B can share those sessions to provide fault tolerance (if A falls down, B can keep working from the same point A was) using a MySQL session manager, and the same may apply to C and D. The problem is that if A and B want to delete all the sessions -at the beginning, for example-, but they don't want to delete sessions of C and D, then they need a unique identifier shared for A and B, and another for C and D. In this case, %22!UserProcessing_A_B%22 and %22!UserProcessing_C_D%22 would be enough.%22%22%22)),%0A%5D)%0A%0A#####################################%0A# %0A# The rest%0A# %0A%0A%0Avariables = dict(_sorted_variables)%0A%0Aif __name__ == '__main__':%0A categories = set(%5B variable.category for variable in variables.values() %5D)%0A variables_by_category = %7B%7D%0A for category in categories:%0A variables_by_category%5Bcategory%5D = %5B variable for variable in variables if variables%5Bvariable%5D.category == category %5D%0A%0A for category in categories:%0A print %22%7C%7C *Property* %7C%7C *Type* %7C%7C *Default value* %7C%7C *Description* %7C%7C%22%0A for variable, argument in _sorted_variables:%0A if variable in variables_by_category%5Bcategory%5D:%0A print %22%7C%7C %25(variable)s %7C%7C %25(type)s %7C%7C %25(default)s %7C%7C %25(doc)s %7C%7C%22 %25 %7B%0A 'variable' : variable,%0A 'type' : variables%5Bvariable%5D.type.__name__,%0A 'default' : variables%5Bvariable%5D.default,%0A 'doc' : variables%5Bvariable%5D.message%0A %7D%0A%0A
|
|
411f855daa9f06868aa597f84c0b739429d705f4
|
Create bot_read.py
|
bot_read.py
|
bot_read.py
|
Python
| 0.000001
|
@@ -0,0 +1,450 @@
+#!/usr/bin/python%0Aimport praw%0A%0Auser_agent = (%22PyFor Eng bot 0.1%22)%0Ar = praw.Reddit(user_agent=user_agent)%0A%0Asubreddit = r.get_subreddit('python')%0A%0Afor submission in subreddit.get_hot(limit=5):%0A print submission.title%0A print submission.selftext%0A print submission.score%0A%0Asubreddit = r.get_subreddit('learnpython')%0A%0Afor submission in subreddit.get_hot(limit=5):%0A print submission.title%0A print submission.selftext%0A print submission.score%0A
|
|
b62b37db1141221ae735b531bdb46264aadbe2e7
|
add make_requests client in python
|
make_requests.py
|
make_requests.py
|
Python
| 0.000001
|
@@ -0,0 +1,611 @@
+import os%0Aimport sys%0Aimport time%0A%0A%0Adef main(timeout_secs, server_port, iteration_count, file_name):%0A for i in range(iteration_count):%0A start_time_secs = time.time()%0A cmd = 'nc localhost %25d %3C file_list_with_time.txt' %25 server_port%0A rc = os.system(cmd)%0A if rc != 0:%0A sys.exit(1)%0A else:%0A end_time_secs = time.time()%0A total_time_secs = end_time_secs - start_time_secs%0A if total_time_secs %3E timeout_secs:%0A print('***client timeout %25d' %25 total_time_secs)%0A%0A%0Aif __name__=='__main__':%0A main(5, 7000, 1000, 'some_file')%0A%0A
|
|
5794a2d8d2b59a6a37b5af4e8c1adba276c325c4
|
Create TagAnalysis.py
|
TagAnalysis.py
|
TagAnalysis.py
|
Python
| 0
|
@@ -0,0 +1,28 @@
+# Analysis of question tags%0A
|
|
a6a9bb5a365aef9798091335c81b1b793578ed1f
|
Initialize car classifier
|
car_classifier.py
|
car_classifier.py
|
Python
| 0.001911
|
@@ -0,0 +1,769 @@
+class CarClassifier(object):%0A %22%22%22 Classifier for car object%0A Attributes:%0A car_img_dir: path to car images%0A not_car_img_dir: path to not car images%0A sample_size: number of images to be used to train classifier%0A %22%22%22%0A def __init__(self, car_img_dir, not_car_img_dir, sample_size):%0A %22%22%22 Initialize class members%0A Attr:%0A car_img_dir: path to car images%0A not_car_img_dir: path to not car images%0A sample_size: number of images to be used to train classifier%0A %22%22%22%0A self.car_img_dir = car_img_dir%0A self.not_car_img_dir = not_car_img_dir%0A self.sample_size = sample_size%0A%0A def get_features(self):%0A %22%22%22 Extract feature vector from images%0A %22%22%22%0A pass%0A
|
|
1732fe53dc228da64f3536ce2c76b420d8b100dc
|
Create the animation.py module.
|
ch17/animation.py
|
ch17/animation.py
|
Python
| 0
|
@@ -0,0 +1,176 @@
+# animation.py%0A# Animation%0A%22%22%22%0AThis is an example of animation using pygame.%0AAn example from Chapter 17 of%0A'Invent Your Own Games With Python' by Al Sweigart%0A%0AA.C. LoGreco%0A%22%22%22%0A
|
|
248a756cd6ff44eca6e08b3e976bc2ae027accd4
|
Add memory ok check
|
chassis_memory.py
|
chassis_memory.py
|
Python
| 0.000001
|
@@ -0,0 +1,728 @@
+import re%0Aimport subprocess%0A%0Afrom maas_common import status_err, status_ok, metric_bool%0A%0AOKAY = re.compile('(?:Health%7CStatus)%5Cs+:%5Cs+(%5Cw+)')%0A%0A%0Adef chassis_memory_report():%0A %22%22%22Return the report as a string.%22%22%22%0A return subprocess.check_output(%5B'omreport', 'chassis', 'memory'%5D)%0A%0A%0Adef memory_okay(report):%0A %22%22%22Determine if the installed memory array is okay.%0A%0A :returns: True if all %22Ok%22, False otherwise%0A :rtype: bool%0A %22%22%22%0A return all(v.lower() == 'ok' for v in OKAY.findall(report))%0A%0A%0Adef main():%0A try:%0A report = chassis_memory_report()%0A except OSError as e:%0A status_err(str(e))%0A%0A status_ok()%0A metric_bool('memory_okay', memory_okay(report))%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
260e2b6d4820ce008d751bc21289ece997247d05
|
add source
|
sqlalchemy_fulltext/__init__.py
|
sqlalchemy_fulltext/__init__.py
|
Python
| 0
|
@@ -0,0 +1,2455 @@
+# -*- coding: utf-8 -*-s%0Aimport re%0A%0Afrom sqlalchemy import event%0Afrom sqlalchemy.schema import DDL%0Afrom sqlalchemy.orm.mapper import Mapper%0Afrom sqlalchemy.ext.compiler import compiles%0Afrom sqlalchemy.ext.declarative import declared_attr%0Afrom sqlalchemy.sql.expression import ClauseElement%0A%0AMYSQL = %22mysql%22%0AMYSQL_BUILD_INDEX_QUERY = %22%22%22%0A ALTER TABLE %7B0.__tablename__%7D%0A ADD FULLTEXT (%7B1%7D)%0A %22%22%22%0AMYSQL_MATCH_AGAINST = %22%22%22%0A MATCH (%7B0%7D)%0A AGAINST (%22%7B1%7D%22)%0A %22%22%22%0A%0Adef escape_quote(string):%0A return re.sub(r%22%5B%5C%22%5C'%5D+%22, %22%22, string)%0A%0A%0Aclass FullTextSearch(ClauseElement):%0A %22%22%22%0A Search FullText%0A :param against: the search query%0A :param table: the table needs to be query%0A%0A FullText support with in query, i.e.%0A %3E%3E%3E from sqlalchemy_fulltext import FullTextSearch%0A %3E%3E%3E session.query(Foo).filter(FullTextSearch('adfadf', Foo))%0A %22%22%22%0A def __init__(self, against, model):%0A self.model = model%0A self.against = escape_quote(against)%0A%0A%0A@compiles(FullTextSearch, MYSQL)%0Adef __mysql_fulltext_search(element, compiler, **kw):%0A assert issubclass(element.model, FullText), %22%7B0%7D not FullTextable%22.format(element.model)%0A%0A return MYSQL_MATCH_AGAINST.format(%22,%22.join(%0A element.model.__fulltext_columns__),%0A element.against)%0A%0A%0Aclass FullText(object):%0A %22%22%22%0A FullText Minxin object for SQLAlchemy%0A %22%22%22%0A %0A __fulltext_columns__ = tuple()%0A%0A @classmethod%0A def build_fulltext(cls):%0A %22%22%22%0A build up fulltext index after table is created%0A %22%22%22%0A if FullText not in cls.__bases__:%0A return%0A assert cls.__fulltext_columns__, %22Model:%7B0.__name__%7D No FullText columns defined%22.format(cls)%0A%0A event.listen(cls.__table__,%0A 'after_create',%0A DDL(MYSQL_BUILD_INDEX_QUERY.format(cls,%0A %22, %22.join((escape_quote(c)%0A for c in cls.__fulltext_columns__)))%0A )%0A )%0A @declared_attr%0A def __contains__(*arg):%0A print arg%0A return True%0A%0Adef __build_fulltext_index(mapper, class_): %0A if issubclass(class_, FullText):%0A class_.build_fulltext()%0A%0A%0Aevent.listen(Mapper, 'instrument_class', __build_fulltext_index)%0A
|
|
eb1ba44a9c00303bbf8ff20b4b489a6058a4ab1d
|
Fix Buffer.__len__
|
neovim/buffer.py
|
neovim/buffer.py
|
from util import RemoteMap
class Buffer(object):
def __len__(self):
return self._vim.get_buffer_count()
def __getitem__(self, idx):
if not isinstance(idx, slice):
return self.get_line(idx)
include_end = False
start = idx.start
end = idx.stop
if start == None:
start = 0
if end == None:
end = -1
include_end = True
return self.get_slice(start, end, True, include_end)
def __setitem__(self, idx, lines):
if not isinstance(idx, slice):
if lines == None:
return self.del_line(idx)
else:
return self.set_line(idx, lines)
if lines == None:
lines = []
include_end = False
start = idx.start
end = idx.stop
if start == None:
start = 0
if end == None:
end = -1
include_end = True
return self.set_slice(start, end, True, include_end, lines)
def __iter__(self):
for i in xrange(len(self)):
yield self.get_line(i)
def append(self, lines, index=-1):
if isinstance(lines, basestring):
lines = [lines]
self.insert(index, lines)
def range(self, start, end):
return Range(self, start, end)
@property
def name(self):
return self.get_name()
@name.setter
def name(self, value):
return self.set_name(value)
@property
def number(self):
return self.get_number()
@property
def vars(self):
if not hasattr(self, '_vars'):
self._vars = RemoteMap(lambda k: self.get_var(k),
lambda k, v: self.set_var(k, v))
return self._vars
@property
def options(self):
if not hasattr(self, '_options'):
self._options = RemoteMap(lambda k: self.get_option(k),
lambda k, v: self.set_option(k, v))
return self._options
@property
def valid(self):
return self.is_valid()
class Range(object):
def __init__(self, buffer, start, end):
self._buffer = buffer
self.start = start - 1
self.end = end
def __len__(self):
return self.end - self.start
def __getitem__(self, idx):
if not isinstance(idx, slice):
return self._buffer[self._normalize_index(idx)]
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start == None:
start = self.start
if end == None:
end = self.end
return self._buffer[start:end]
def __setitem__(self, idx, lines):
if not isinstance(idx, slice):
self._buffer[self._normalize_index(idx)] = lines
return
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start == None:
start = self.start
if end == None:
end = self.end
self._buffer[start:end] = lines
def __iter__(self):
for i in xrange(self.start, self.end):
yield self._buffer[i]
def append(self, lines, i=None):
i = self._normalize_index(i)
if i == None:
i = self.end
self._buffer.append(lines, i)
def _normalize_index(self, index):
if index == None:
return None
if index < 0:
index = self.end - 1
else:
index += self.start
if index >= self.end:
index = self.end - 1
return index
|
Python
| 0.000133
|
@@ -90,29 +90,18 @@
elf.
-_vim.get_buffer_count
+get_length
()%0A%0A
|
257a328745b9622713afa218940d2cd820987e93
|
Add a super simple color correction client example
|
examples/color-correction-ui.py
|
examples/color-correction-ui.py
|
Python
| 0
|
@@ -0,0 +1,997 @@
+#!/usr/bin/env python%0A#%0A# Simple example color correction UI.%0A# Talks to an fcserver running on localhost.%0A#%0A# Micah Elizabeth Scott%0A# This example code is released into the public domain.%0A#%0A%0Aimport Tkinter as tk%0Aimport socket%0Aimport json%0Aimport struct%0A%0As = socket.socket()%0As.connect(('localhost', 7890))%0Aprint %22Connected to OPC server%22%0A%0Adef setGlobalColorCorrection(**obj):%0A%09msg = json.dumps(obj)%0A%09s.send(struct.pack(%22%3EBBH%22, 0, 0xF0, len(msg)) + msg)%0A%0Adef update(_):%0A%09setGlobalColorCorrection(%0A%09%09gamma = gamma.get(),%0A%09%09whitepoint = %5B%0A%09%09%09red.get(),%0A%09%09%09green.get(),%0A%09%09%09blue.get(),%0A%09%09%5D)%0A%0Adef slider(name, from_, to):%0A%09s = tk.Scale(root, label=name, from_=from_, to=to, resolution=0.01,%0A%09%09showvalue='yes', orient='horizontal', length=400, command=update)%0A%09s.set(1.0)%0A%09s.pack()%0A%09return s%0A%0Aroot = tk.Tk()%0Aroot.title(%22Fadecandy Color Correction Example%22)%0A%0Agamma = slider(%22Gamma%22, 0.2, 3.0)%0Ared = slider(%22Red%22, 0.0, 1.5)%0Agreen = slider(%22Green%22, 0.0, 1.5)%0Ablue = slider(%22Blue%22, 0.0, 1.5)%0A%0Aroot.mainloop()%0A
|
|
2359d7f6140b7b8292c3d9043064a9ee195ecebb
|
add module for storing repeated constants
|
code/constants.py
|
code/constants.py
|
Python
| 0.000001
|
@@ -0,0 +1,164 @@
+%22%22%22Module for constants and conversion factors.%22%22%22%0A%0A__author__ = 'Salman Hashmi, Ryan Keenan'%0A__license__ = 'BSD License'%0A%0A%0ATO_DEG = 180./np.pi%0ATO_RAD = np.pi/180.%0A
|
|
f40fb7a27934bbd2e6fe758b33d9dcc567858c8e
|
make sure to download MTL file from amazon (done by @drewbo)
|
landsat/downloader.py
|
landsat/downloader.py
|
# Landsat Util
# License: CC0 1.0 Universal
from os.path import join, exists, getsize
from homura import download as fetch
import requests
from utils import check_create_folder
from mixins import VerbosityMixin
import settings
class RemoteFileDoesntExist(Exception):
pass
class IncorrectSceneId(Exception):
pass
class Downloader(VerbosityMixin):
def __init__(self, verbose=False, download_dir=None):
self.download_dir = download_dir if download_dir else settings.DOWNLOAD_DIR
self.google = settings.GOOGLE_STORAGE
self.s3 = settings.S3_LANDSAT
# Make sure download directory exist
check_create_folder(self.download_dir)
def download(self, scenes, bands=None):
"""
Download scenese from Google Storage or Amazon S3 if bands are provided
@params
scenes - A list of sceneIDs
bands - A list of bands
"""
if isinstance(scenes, list):
for scene in scenes:
if bands:
if isinstance(bands, list):
# Create a folder to download the specific bands into
path = check_create_folder(join(self.download_dir, scene))
try:
for band in bands:
self.amazon_s3(scene, band, path)
except RemoteFileDoesntExist:
self.google_storage(scene, self.download_dir)
else:
raise Exception('Expected bands list')
self.google_storage(scene, self.download_dir)
return True
else:
raise Exception('Expected sceneIDs list')
def google_storage(self, scene, path):
""" Google Storage Downloader """
sat = self.scene_interpreter(scene)
filename = scene + '.tar.bz'
url = self.google_storage_url(sat)
if self.remote_file_exists(url):
return self.fetch(url, path, filename)
else:
raise RemoteFileDoesntExist('%s is not available on Google Storage' % filename)
def amazon_s3(self, scene, band, path):
""" Amazon S3 downloader """
sat = self.scene_interpreter(scene)
filename = '%s_B%s.TIF' % (scene, band)
url = self.amazon_s3_url(sat, filename)
if self.remote_file_exists(url):
return self.fetch(url, path, filename)
else:
raise RemoteFileDoesntExist('%s is not available on Amazon S3' % filename)
def fetch(self, url, path, filename):
self.output('Downloading: %s' % filename, normal=True, arrow=True)
if exists(join(path, filename)):
size = getsize(join(path, filename))
if size == self.get_remote_file_size(url):
self.output('%s already exists on your system' % filename, normal=True, color='green', indent=1)
return False
fetch(url, path)
self.output('stored at %s' % path, normal=True, color='green', indent=1)
return True
def google_storage_url(self, sat):
"""
Return a google storage url the contains the scene provided
@params
sat - expects an object created by scene_interpreter method
"""
filename = sat['scene'] + '.tar.bz'
return join(self.google, sat['sat'], sat['path'], sat['row'], filename)
def amazon_s3_url(self, sat, filename):
"""
Return an amazon s3 url the contains the scene and band provided
@params
sat - expects an object created by scene_interpreter method
"""
return join(self.s3, sat['sat'], sat['path'], sat['row'], sat['scene'], filename)
def remote_file_exists(self, url):
status = requests.head(url).status_code
if status == 200:
return True
else:
return False
def get_remote_file_size(self, url):
""" Gets the filesize of a remote file """
headers = requests.head(url).headers
return int(headers['content-length'])
def scene_interpreter(self, scene):
""" Conver sceneID to rows, paths and dates """
anatomy = {
'path': None,
'row': None,
'sat': None,
'scene': scene
}
if isinstance(scene, str) and len(scene) == 21:
anatomy['path'] = scene[3:6]
anatomy['row'] = scene[6:9]
anatomy['sat'] = 'L' + scene[2:3]
return anatomy
else:
raise IncorrectSceneId('Received incorrect scene')
if __name__ == '__main__':
d = Downloader()
# d.download(['LC81990242015046LGN00', 'LC80030172015001LGN00'])
d.download(['LC80030172015001LGN00'], bands=[5, 4])
|
Python
| 0
|
@@ -1244,16 +1244,189 @@
try:%0A
+ # Always grab MTL.txt if bands are specified%0A bands_plus = bands%0A bands_plus.append('MTL')%0A
@@ -1458,24 +1458,29 @@
and in bands
+_plus
:%0A
@@ -2458,30 +2458,125 @@
-filename = '%25s_B%25s.TIF
+if band != 'MTL':%0A filename = '%25s_B%25s.TIF' %25 (scene, band)%0A else:%0A filename = '%25s_%25s.txt
' %25
|
07da1b8a2d0a8c8e28db3c9bed9de1d9f9a7ad6f
|
Add base solver class
|
base_solver.py
|
base_solver.py
|
Python
| 0
|
@@ -0,0 +1,572 @@
+#!/usr/bin/env python%0A# encoding: utf-8%0A%0Afrom datetime import datetime%0A%0Aclass BaseSolver(object):%0A task = None%0A%0A best_solution = None%0A best_distance = float('inf')%0A search_time = None%0A%0A def __init__(self, task):%0A self.task = task%0A%0A def run(self):%0A start_time = datetime.now()%0A self.best_solution, self.best_distance = self.run_search()%0A finish_time = datetime.now()%0A%0A self.search_time = finish_time - start_time%0A%0A def run_search(self):%0A # dummy - this is where one should implement the algorithm%0A pass%0A
|
|
ff76d47f210e97f3ac4ba58a2c3eecb045b28cde
|
Create RateLimit.py
|
Cogs/RateLimit.py
|
Cogs/RateLimit.py
|
Python
| 0.000001
|
@@ -0,0 +1,1357 @@
+import asyncio%0Aimport discord%0Aimport os%0Afrom datetime import datetime%0Afrom discord.ext import commands%0A%0A# This is the RateLimit module. It keeps users from being able to spam commands%0A%0Aclass RateLimit:%0A%0A%09# Init with the bot reference, and a reference to the settings var%0A%09def __init__(self, bot, settings):%0A%09%09self.bot = bot%0A%09%09self.settings = settings%0A%09%09self.commandCooldown = 5 # 5 seconds between commands%0A%09%09%0A%09def canRun( firstTime, threshold ):%0A%09%09# Check if enough time has passed since the last command to run another%0A%09%09currentTime = int(time.time())%0A%09%09if currentTime %3E (int(firstTime) + int(threshold)):%0A%09%09%09return True%0A%09%09else:%0A%09%09%09return False%0A%0A%09async def message(self, message):%0A%09%09# Check the message and see if we should allow it - always yes.%0A%09%09# This module doesn't need to cancel messages - but may need to ignore%0A%09%09ignore = False%0A%09%09%0A%09%09# Check if we can run commands%0A%09%09lastTime = int(self.settings.getUserStat(message.author, message.server, %22LastCommand%22))%0A%09%09if not self.canRun( lastTime, self.commandCooldown ):%0A%09%09%09# We can't run commands yet - ignore%0A%09%09%09ignore = True%0A%09%09%0A%09%09return %7B 'Ignore' : ignore, 'Delete' : False %7D%0A%09%09%0A%09async def oncommand(self, command, ctx):%0A%09%09# Let's grab the user who had a completed command - and set the timestamp%0A%09%09self.settings.setUserStat(ctx.message.author, ctx.message.server, %22LastCommand%22, int(time.time()))%0A
|
|
d2b7f191519835a3a8f0e8a32fb52c7b354b0e33
|
Add Slurp command
|
Commands/Slurp.py
|
Commands/Slurp.py
|
Python
| 0.000005
|
@@ -0,0 +1,2090 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Aug 31, 2015%0A%0A@author: Tyranic-Moron%0A%22%22%22%0A%0Afrom IRCMessage import IRCMessage%0Afrom IRCResponse import IRCResponse, ResponseType%0Afrom CommandInterface import CommandInterface%0A%0Afrom Utils import WebUtils%0A%0Afrom bs4 import BeautifulSoup%0A%0A%0Aclass Slurp(CommandInterface):%0A triggers = %5B'slurp'%5D%0A help = %22slurp %3Cattribute%3E %3Curl%3E %3Ccss selector%3E - scrapes the given attribute from the tag selected at the given url%22%0A%0A def execute(self, message):%0A %22%22%22%0A @type message: IRCMessage%0A %22%22%22%0A if len(message.ParameterList) %3C 3:%0A return IRCResponse(ResponseType.Say, u%22Not enough parameters, usage: %7B%7D%22.format(self.help), message.ReplyTo)%0A%0A prop, url, selector = (message.ParameterList%5B0%5D, message.ParameterList%5B1%5D, u%22 %22.join(message.ParameterList%5B2:%5D))%0A%0A page = WebUtils.fetchURL(url)%0A if page is None:%0A return IRCResponse(ResponseType.Say, u%22Problem fetching %7B%7D%22.format(url), message.ReplyTo)%0A%0A soup = BeautifulSoup(page.body)%0A tag = soup.select_one(selector)%0A%0A if tag is None:%0A return IRCResponse(ResponseType.Say,%0A u%22'%7B%7D' does not select a tag at %7B%7D%22.format(selector, url),%0A message.ReplyTo)%0A%0A specials = %7B%0A 'name': tag.name,%0A 'text': tag.text%0A %7D%0A%0A if prop in specials:%0A value = specials%5Bprop%5D%0A elif prop in tag:%0A value = tag%5Bprop%5D%0A else:%0A return IRCResponse(ResponseType.Say,%0A u%22The tag selected by '%7B%7D' (%7B%7D) does not have attribute '%7B%7D'%22.format(selector,%0A tag.name,%0A prop),%0A message.ReplyTo)%0A%0A if not isinstance(value, basestring):%0A value = u%22 %22.join(value)%0A%0A return IRCResponse(ResponseType.Say, value, message.ReplyTo)%0A%0A
|
|
96f6a2c5813ae605a48995f6c6fb89234b2eba07
|
remove risky username logic
|
dj/scripts/post_vimeo.py
|
dj/scripts/post_vimeo.py
|
#!/usr/bin/python
# posts to vimeo
import vimeo_uploader as uploader
import os
# import pw
from process import process
from main.models import Show, Location, Episode, Raw_File, Cut_List
class post(process):
ready_state = 4
def process_ep(self, ep):
if self.options.verbose: print ep.id, ep.name
if not ep.released: # and not self.options.release_all:
# --release-all will force the upload, overrides ep.released
# if someone uncomments the # and not... above.
if self.options.verbose: print "not released:", ep.released
return False
loc = ep.location
show = ep.show
client = show.client
descriptions = [ep.authors, ep.description, show.description, client.description]
descriptions = [d for d in descriptions if d]
description = "\n".join(descriptions)
meta = {
'title': ep.name,
'description': description,
}
tags = [ self.options.topics, client.slug, client.tags, show.slug, ep.tags ]
authors = ep.authors.split(',')
authors = [ a.replace(' ','') for a in authors ]
tags += authors
meta['tags'] = [tag for tag in tags if tag]
# if ep.license:
# meta['license'] = str(ep.license)
# elif self.options.license:
# meta['license'] = self.options.license
if self.options.rating:
meta['rating'] = self.options.rating
if self.options.category:
meta['category'] = self.options.category
# http://gdata.youtube.com/schemas/2007/categories.cat
meta['category'] = "Education"
if self.options.hidden:
meta['hidden'] = self.options.hidden
if ep.location.lat and ep.location.lon:
meta['latlon'] = (ep.location.lat, ep.location.lon)
# private is implemnted different in youtube and blip.
# plit want's a number, yt wants Truthy
meta['hidden'] = ep.hidden or self.options.hidden
# find a thumbnail
# check for episode.tumb used in the following:
# 1. absololute path (dumb?)
# 2. in tumb dir (smart)
# 3. relitive to show dir (not completely wonky)
# 4. in tumb dir, same name as episode.png (smart)
# if none of those, then grab the thumb from the first cut list file
found=False
for thumb in [
ep.thumbnail,
os.path.join(self.show_dir,'thumb',ep.thumbnail),
os.path.join(self.show_dir,ep.thumbnail),
os.path.join(self.show_dir,'thumb',ep.slug+".png"),]:
if os.path.isfile(thumb):
found=True
break
if not found:
for cut in Cut_List.objects.filter(
episode=ep,apply=True).order_by('sequence'):
basename = cut.raw_file.basename()
thumb=os.path.join(self.episode_dir, "%s.png"%(basename))
if os.path.exists(thumb):
found=True
break
if not found: thumb=''
# get a list of video files to upload
# blip supports multiple formats, youtube does not.
# youtube and such will only upload the first file.
files = []
for ext in self.options.upload_formats:
src_pathname = os.path.join( self.show_dir, ext, "%s.%s"%(ep.slug,ext))
files.append({'ext':ext,'pathname':src_pathname})
if self.options.debug_log:
# put the mlt and .sh stuff into the log
# blip and firefox want it to be xml, so jump though some hoops
log = "<log>\n"
mlt_pathname = os.path.join( self.show_dir, 'tmp', "%s.mlt"%(ep.slug,))
log += open(mlt_pathname).read()
sh_pathname = os.path.join( self.show_dir, 'tmp', "%s.sh"%(ep.slug,))
shs = open(sh_pathname).read().split('\n')
shs = [ "<line>\n%s\n</line>\n" % l for l in shs if l]
log += "<shell_script>\n%s</shell_script>\n" % ''.join(shs)
log += "</log>"
# blip says: try something like a tt or srt file
log_pathname = os.path.join( self.show_dir, 'tmp', "%s.tt"%(ep.slug,))
log_file=open(log_pathname,'w').write(log)
# add the log to the list of files to be posted
files.append({'ext':'tt', 'pathname':log_pathname})
# look for username in [options, client, ]
# password always comes from pw.py
host_user = self.options.host_user if self.options.host_user \
else client.host_user if client.host_user
if self.options.test:
print 'test mode:'
print 'files %s' % files
print 'meta %s' % meta
print 'thumb %s' % thumb
print
else:
uploader = uploader.Uploader()
uploader.files = files
uploader.thumb = thumb
uploader.meta = meta
uploader.user = host_user
uploader.old_url = ep.host_url # for replacing.
ret = uploader.upload()
if ret:
if self.options.verbose: print uploader.new_url
ep.host_url = uploader.new_url
self.last_url = uploader.new_url # hook for tests so that it can be browsed
print dir(uploader)
import code
# code.interact(local=locals())
else:
print "error!"
# tring to fix the db timeout problem
# ep=Episode.objects.get(pk=ep.id)
try:
ep.save()
except DatabaseError, e:
from django.db import connection
connection.connection.close()
connection.connection = None
ep.save()
return ret
def add_more_options(self, parser):
parser.add_option('--host-user',
help='video host account name (pass stored in pw.py)')
parser.add_option('--rating',
help="TV rating")
parser.add_option('-T', '--topics',
help="list of topics (user defined)")
parser.add_option('-C', '--category',
help = "-C list' to see full list" )
parser.add_option('--hidden',
help="availability on host: 0=Available, 1=Hidden, 2=Available to family, 4=Available to friends/family.")
parser.add_option('--release-all', action="store_true",
help="ignore the released setting.")
def add_more_option_defaults(self, parser):
parser.set_defaults(category="Education")
if __name__ == '__main__':
p=post()
p.main()
|
Python
| 0.997312
|
@@ -4352,16 +4352,39 @@
st_user
+%0A #
if clien
|
8b6020384e20305411d2bbb587a2504ef302a17c
|
Create calculatepi.py
|
calculatepi.py
|
calculatepi.py
|
Python
| 0.000002
|
@@ -0,0 +1,93 @@
+%22%22%22%0Acalculatepi.py%0AAuthor: %3Cyour name here%3E%0ACredit: %3Clist sources used, if any%3E%0AAssignment:%0A%0A
|
|
0bc5b307d5121a3cacac159fa27ab42f97e208aa
|
Add database module
|
rabbithole/db.py
|
rabbithole/db.py
|
Python
| 0.000001
|
@@ -0,0 +1,687 @@
+# -*- coding: utf-8 -*-%0A%0Aimport logging%0A%0Afrom sqlalchemy import (%0A create_engine,%0A text,%0A)%0A%0Alogger = logging.getLogger(__name__)%0A%0A%0Aclass Database(object):%0A %22%22%22Database writer.%0A%0A :param url: Database connection string%0A :type url: str%0A%0A %22%22%22%0A%0A def __init__(self, url, insert_query):%0A %22%22%22Connect to database.%22%22%22%0A engine = create_engine(url)%0A%0A self.connection = engine.connect()%0A logger.debug('Connected to: %25r', url)%0A%0A self.insert_query = text(insert_query)%0A%0A def insert(self, rows):%0A %22%22%22Insert rows in database.%22%22%22%0A self.connection.execute(self.insert_query, rows)%0A logger.debug('Inserted %25d rows', len(rows))%0A
|
|
b62ed7a60349536457b03a407e99bae3e3ff56e8
|
install issue
|
erpnext/setup/install.py
|
erpnext/setup/install.py
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
default_mail_footer = """<div style="padding: 7px; text-align: right; color: #888"><small>Sent via
<a style="color: #888" href="http://erpnext.org">ERPNext</a></div>"""
def after_install():
frappe.get_doc({'doctype': "Role", "role_name": "Analytics"}).insert()
set_single_defaults()
create_compact_item_print_custom_field()
from frappe.desk.page.setup_wizard.setup_wizard import add_all_roles_to
add_all_roles_to("Administrator")
frappe.db.commit()
def check_setup_wizard_not_completed():
if frappe.db.get_default('desktop:home_page') == 'desktop':
print
print "ERPNext can only be installed on a fresh site where the setup wizard is not completed"
print "You can reinstall this site (after saving your data) using: bench --site [sitename] reinstall"
print
return False
def set_single_defaults():
for dt in frappe.db.sql_list("""select name from `tabDocType` where issingle=1"""):
default_values = frappe.db.sql("""select fieldname, `default` from `tabDocField`
where parent=%s""", dt)
if default_values:
try:
b = frappe.get_doc(dt, dt)
for fieldname, value in default_values:
b.set(fieldname, value)
b.save()
except frappe.MandatoryError:
pass
frappe.db.set_default("date_format", "dd-mm-yyyy")
def create_compact_item_print_custom_field():
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
create_custom_field('Print Settings', {
'label': _('Compact Item Print'),
'fieldname': 'compact_item_print',
'fieldtype': 'Check',
'default': 1,
'insert_after': 'with_letterhead'
})
|
Python
| 0.000001
|
@@ -1031,79 +1031,120 @@
in
-frappe.db.sql_list(%22%22%22select name from %60tabDocType%60 where issingle=1%22%22%22
+('Accounts Settings', 'Print Settings', 'HR Settings', 'Buying Settings',%0A%09%09'Selling Settings', 'Stock Settings'
):%0A%09
@@ -1439,16 +1439,59 @@
%09%09%09%09pass
+%0A%09%09%09except frappe.ValidationError:%0A%09%09%09%09pass
%0A%0A%09frapp
|
43e823ad9ea7c44b49c883e8633dc488dff0d2ca
|
Add end_time for indexing.
|
events/search_indexes.py
|
events/search_indexes.py
|
from haystack import indexes
from .models import Event
from django.utils.translation import get_language
from django.utils.html import strip_tags
class EventIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
autosuggest = indexes.EdgeNgramField(model_attr='name')
def get_updated_field(self):
return 'origin_last_modified_time'
def get_model(self):
return Event
def prepare(self, obj):
#obj.lang_keywords = obj.keywords.filter(language=get_language())
obj.description = strip_tags(obj.description)
return super(EventIndex, self).prepare(obj)
|
Python
| 0
|
@@ -320,16 +320,72 @@
='name')
+%0A end_time = indexes.DateField(model_attr='end_time')
%0A%0A de
|
a85e444e9411f9f768db7c3e1b589b737c01b0a0
|
add mnist examples
|
TensorFlow/ex3/test_mnist.py
|
TensorFlow/ex3/test_mnist.py
|
Python
| 0
|
@@ -0,0 +1,1263 @@
+#!/usr/bin/env python%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A%0Aimport gzip%0Aimport os%0Aimport tempfile%0A%0Aimport numpy%0Afrom six.moves import urllib%0Afrom six.moves import xrange # pylint: disable=redefined-builtin%0Aimport tensorflow as tf%0Afrom tensorflow.examples.tutorials.mnist import input_data%0A%0Amnist = input_data.read_data_sets(%22/tmp/tensorflow/mnist/input_data%22, one_hot=True)%0A%0A%0Ax = tf.placeholder(%22float%22, %5BNone, 784%5D)%0A%0Aw = tf.Variable(tf.zeros(%5B784, 10%5D))%0Ab = tf.Variable(tf.zeros(%5B10%5D))%0A%0Ay = tf.nn.softmax(tf.matmul(x, w))%0A%0Ay_ = tf.placeholder(%22float%22, %5BNone, 10%5D)%0A%0Across_entropy = - tf.reduce_sum(y_ * tf.log(y))%0A%0Atrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)%0A%0Ainit = tf.initialize_all_variables()%0A%0A%0Asess = tf.Session()%0Asess.run(init)%0A%0A%0Afor _ in range(10000):%0A%09batch_xs, batch_ys = mnist.train.next_batch(100)%0A%09sess.run(train_step, feed_dict=%7Bx: batch_xs, y_: batch_ys%7D)%0A%09print(sess.run(cross_entropy, feed_dict=%7Bx: batch_xs, y_: batch_ys%7D))%0A%0Acorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1))%0Aaccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))%0A%0Aprint(sess.run(accuracy, feed_dict=%7Bx: mnist.test.images, y_: mnist.test.labels%7D))%0A
|
|
523ef51278c964718da68bb789e78e6c8f5f8766
|
Add the init method to the notification model.
|
model/notification.py
|
model/notification.py
|
Python
| 0
|
@@ -0,0 +1,169 @@
+def NotificationModel(Query):%0A def __init__(self, db):%0A self.db = db%0A self.table_name = %22notification%22%0A super(NotificationModel, self).__init__()
|
|
c68c5bf488cb7224d675bec333c6b7a4992574ed
|
Add a simple APL exception class
|
apl_exception.py
|
apl_exception.py
|
Python
| 0.000007
|
@@ -0,0 +1,227 @@
+%22%22%22%0A A simple APL exception class%0A%22%22%22%0A%0Aclass APL_Exception (BaseException):%0A %22%22%22%0A APL Exception Class%0A %22%22%22%0A def __init__ (self,message,line=None):%0A self.message = message%0A self.line = line%0A%0A# EOF%0A
|
|
e68590e9e05ab54b91ad3d03e372fbf8b341c3b9
|
Use a logger thread to prevent stdout races.
|
gtest-parallel.py
|
gtest-parallel.py
|
#!/usr/bin/env python2
import Queue
import optparse
import subprocess
import sys
import threading
parser = optparse.OptionParser(
usage = 'usage: %prog [options] executable [executable ...]')
parser.add_option('-w', '--workers', type='int', default=16,
help='number of workers to spawn')
parser.add_option('--gtest_filter', type='string', default='',
help='test filter')
parser.add_option('--gtest_also_run_disabled_tests', action='store_true',
default=False, help='run disabled tests too')
(options, binaries) = parser.parse_args()
if binaries == []:
parser.print_usage()
sys.exit(1)
tests = Queue.Queue()
# Find tests.
job_id = 0
for test_binary in binaries:
command = [test_binary]
if options.gtest_filter != '':
command += ['--gtest_filter=' + options.gtest_filter]
if options.gtest_also_run_disabled_tests:
command += ['--gtest_also_run_disabled_tests']
test_list = subprocess.Popen(command + ['--gtest_list_tests'],
stdout=subprocess.PIPE).communicate()[0]
test_group = ''
for line in test_list.split('\n'):
if not line.strip():
continue
if line[0] != " ":
test_group = line.strip()
continue
line = line.strip()
# Skip disabled tests unless they should be run
if not options.gtest_also_run_disabled_tests and 'DISABLED' in line:
continue
test = test_group + line
tests.put((command, job_id, test))
print str(job_id) + ': TEST ' + test_binary + ' ' + test
job_id += 1
def run_job((command, job_id, test)):
sub = subprocess.Popen(command + ['--gtest_filter=' + test],
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
while True:
line = sub.stdout.readline()
if line == '':
break
print str(job_id) + '> ' + line.rstrip()
code = sub.wait()
print str(job_id) + ': EXIT ' + str(code)
def worker():
while True:
try:
run_job(tests.get_nowait())
tests.task_done()
except Queue.Empty:
return
threads = []
for i in range(options.workers):
t = threading.Thread(target=worker)
t.daemon = True
threads.append(t)
[t.start() for t in threads]
[t.join() for t in threads]
|
Python
| 0
|
@@ -645,16 +645,36 @@
xit(1)%0A%0A
+log = Queue.Queue()%0A
tests =
@@ -1856,30 +1856,32 @@
break%0A%0A
-print
+log.put(
str(job_id)
@@ -1898,24 +1898,25 @@
ine.rstrip()
+)
%0A%0A code = s
@@ -1927,22 +1927,24 @@
ait()%0A
-print
+log.put(
str(job_
@@ -1970,16 +1970,17 @@
tr(code)
+)
%0A%0Adef wo
@@ -2106,16 +2106,113 @@
return%0A%0A
+def logger():%0A while True:%0A line = log.get()%0A if line == %22%22:%0A return%0A print line%0A%0A
threads
@@ -2355,16 +2355,74 @@
hreads%5D%0A
+printer = threading.Thread(target=logger)%0Aprinter.start()%0A
%5Bt.join(
@@ -2421,28 +2421,55 @@
%5Bt.join() for t in threads%5D%0A
+log.put(%22%22)%0Aprinter.join()%0A
|
e093ce0730fa3071484fed251535fea62e0430d6
|
add logger view
|
View/LoggerView.py
|
View/LoggerView.py
|
Python
| 0
|
@@ -0,0 +1,2083 @@
+# Under MIT License, see LICENSE.txt%0A%0Afrom PyQt4.QtGui import QWidget%0Afrom PyQt4.QtCore import QTimer%0Afrom PyQt4.QtGui import QListWidget%0Afrom PyQt4.QtGui import QHBoxLayout%0Afrom PyQt4.QtGui import QVBoxLayout%0Afrom PyQt4.QtGui import QPushButton%0Afrom Model.DataInModel import DataInModel%0A%0A__author__ = 'RoboCupULaval'%0A%0A%0Aclass LoggerView(QWidget):%0A def __init__(self, parent=None):%0A QWidget.__init__(self, parent)%0A self._parent = parent%0A self._model = None%0A self._count = 0%0A self.pause = False%0A self.init_ui()%0A%0A def init_ui(self):%0A self.log_queue = QListWidget(self)%0A layout = QHBoxLayout()%0A layout.addWidget(self.log_queue)%0A%0A layout_btn = QVBoxLayout()%0A self.btn_pause = QPushButton('Pause')%0A self.btn_pause.setCheckable(True)%0A self.btn_pause.setChecked(self.pause)%0A self.btn_pause.clicked.connect(self.pauseEvent)%0A layout_btn.addWidget(self.btn_pause)%0A layout.addLayout(layout_btn)%0A%0A self.setLayout(layout)%0A%0A self.hide()%0A%0A self.timer = QTimer()%0A self.timer.timeout.connect(self.update_logger)%0A self.timer.start(250)%0A%0A def pauseEvent(self):%0A self.pause = not self.pause%0A self.btn_pause.setChecked(self.pause)%0A%0A def set_model(self, model):%0A if isinstance(model, DataInModel):%0A self._model = model%0A else:%0A raise TypeError('Logger should get data in model argument.')%0A%0A def update_logger(self):%0A if not self.pause:%0A if self._model is not None:%0A messages = self._model.get_last_log(self._count)%0A if messages is not None:%0A self._count += len(messages)%0A for msg in messages:%0A self.log_queue.addItem(str(msg))%0A self.log_queue.scrollToBottom()%0A%0A def get_count(self):%0A return self._count%0A%0A def show_hide(self):%0A if self.isVisible():%0A self.hide()%0A else:%0A self.show()%0A self._parent.resize_window()%0A
|
|
b2f07c815c66be310ee1c126ba743bb786d79a08
|
Create problem2.py
|
W2/PS2/problem2.py
|
W2/PS2/problem2.py
|
Python
| 0.000024
|
@@ -0,0 +1,1053 @@
+'''%0APROBLEM 2: PAYING DEBT OFF IN A YEAR (15.0/15.0 points)%0ANow write a program that calculates the minimum fixed monthly payment needed in order pay off a credit card balance within 12 months. By a fixed monthly payment, we mean a single number which does not change each month, but instead is a constant amount that will be paid each month.%0A%0AIn this problem, we will not be dealing with a minimum monthly payment rate.%0A%0AThe following variables contain values as described below:%0A%0Abalance - the outstanding balance on the credit card%0A%0AannualInterestRate - annual interest rate as a decimal%0A%0AThe program should print out one line: the lowest monthly payment that will pay off all debt in under 1 year, for example:%0A%0ALowest Payment: 180 %0A'''%0A%0A%0Abalance = 3329%0AannualInterestRate = 0.2%0Apayment = 0%0Anew_balance = balance%0Awhile new_balance %3E 0:%0A new_balance = balance%0A for month in range (1,13):%0A new_balance -= payment%0A new_balance += (new_balance*(annualInterestRate/12))%0A payment += 10%0Aprint('Lowest Payment: ' + str(payment-10))%0A%0A
|
|
61b7ee073efcd698329bec69a9eb682a1bc032d3
|
Add py_trace_event to DEPS.
|
telemetry/telemetry/util/trace.py
|
telemetry/telemetry/util/trace.py
|
Python
| 0.000008
|
@@ -0,0 +1,362 @@
+# Copyright 2014 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0Afrom telemetry.core import util%0A%0A%0Autil.AddDirToPythonPath(util.GetChromiumSrcDir(),%0A 'third_party', 'py_trace_event', 'src')%0Afrom trace_event import * # pylint: disable=F0401%0A
|
|
c3789b5f8a8c90902693194cf257b6c9e4ac7783
|
Add solution to 119.
|
119/119.py
|
119/119.py
|
Python
| 0.000083
|
@@ -0,0 +1,1333 @@
+%22%22%22%0AThe number 512 is interesting because it is equal to the sum of its digits%0Araised to some power: 5 + 1 + 2 = 8, and 83 = 512. Another example of a number%0Awith this property is 614656 = 284.%0A%0AWe shall define an to be the nth term of this sequence and insist that a number%0Amust contain at least two digits to have a sum.%0A%0AYou are given that a2 = 512 and a10 = 614656.%0A%0AFind a30.%0A%0A%0ASolution comment: Well, this number is on oeis.org. So kinda cheating, but...%0AOther solution is simple brute force. The search space was just manually set quite a%0Abit larger than what is set now, then just reduced a bit afterwards. Terms not generated%0Ain order, so need to generate some more terms than just 30.%0A%22%22%22%0A%0A%0A# from urllib.request import urlopen%0A# data = urlopen('https://oeis.org/A023106/b023106.txt').read().splitlines()%0A# answer = int(data%5B30 + 9%5D.split()%5B-1%5D) # Skip first 10 single digit terms.%0A# print('Answer:', answer)%0A%0A%0Afrom time import time%0A%0Adef digit_sum(n):%0A s = 0%0A while n:%0A s += n %25 10%0A n //= 10%0A return s%0A%0At0 = time()%0Aterms = %5B%5D%0Afor b in range(2, 100):%0A x = b%0A for _ in range(2, 30):%0A x *= b%0A if digit_sum(x) == b:%0A terms.append(x)%0A if len(terms) %3E= 40:%0A break%0A%0Aprint('Answer:', sorted(terms)%5B29%5D)%0Aprint('Execution time: %7B:.3f%7D ms'.format((time() - t0) * 1e3))%0A
|
|
2c155d4fe286f685bca696c60730bd2fca2151f1
|
Add new package: sysbench (#18310)
|
var/spack/repos/builtin/packages/sysbench/package.py
|
var/spack/repos/builtin/packages/sysbench/package.py
|
Python
| 0
|
@@ -0,0 +1,937 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Sysbench(AutotoolsPackage):%0A %22%22%22Scriptable database and system performance benchmark.%22%22%22%0A%0A homepage = %22https://github.com/akopytov/sysbench%22%0A url = %22https://github.com/akopytov/sysbench/archive/1.0.20.tar.gz%22%0A%0A version('1.0.20', sha256='e8ee79b1f399b2d167e6a90de52ccc90e52408f7ade1b9b7135727efe181347f')%0A version('1.0.19', sha256='39cde56b58754d97b2fe6a1688ffc0e888d80c262cf66daee19acfb2997f9bdd')%0A version('1.0.18', sha256='c679b285e633c819d637bdafaeacc1bec13f37da5b3357c7e17d97a71bf28cb1')%0A%0A depends_on('autoconf', type='build')%0A depends_on('automake', type='build')%0A depends_on('libtool', type='build')%0A depends_on('m4', type='build')%0A depends_on('mysql-client')%0A
|
|
34a9969495f1b1c9452bff54cb03148e68fde303
|
Create Insertion_sort_with_binary_search.py
|
C02-Getting-Started/exercise_code/Insertion_sort_with_binary_search.py
|
C02-Getting-Started/exercise_code/Insertion_sort_with_binary_search.py
|
Python
| 0.000002
|
@@ -0,0 +1,1780 @@
+# Exercise 2.3-6 in book%0A# Standalone Python version 2.7 code%0Aimport os%0Aimport re%0Aimport math%0Aimport time%0Afrom random import randint%0A%0Adef insertion_sort(array):%0A%09for j, v in enumerate(array):%0A%09%09key = v%0A%09%09i = j - 1%0A%09%09while i %3E -1 and array%5Bi%5D %3E key:%0A%09%09%09array%5Bi+1%5D = array%5Bi%5D%0A%09%09%09i = i - 1%0A%09%09array%5Bi+1%5D = key%0A%09%09%0Adef insertion_sort_v2(array):%0A%09for j, v in enumerate(array):%0A%09%09if j %3E 0:%0A%09%09%09key = array%5Bj%5D%0A%09%09%09a = binary_search(array, key, j)%0A%09%09%09for i in range(j, a, -1):%0A%09%09%09%09array%5Bi%5D = array%5Bi-1%5D%0A%09%09%09array%5Ba%5D = key%0A%0Adef binary_search(array, searchingelement, arraypart):%0A%09array = list(array%5B:arraypart%5D)%0A%09last = array.__len__()%0A%09mid = int(last/2)%0A%09min = 0%0A%09for i in range(int(math.log(last)/math.log(2)) + 1):%0A%09%09if array%5Bmid%5D == searchingelement:%0A%09%09%09return mid%0A%09%09elif array%5Bmid%5D %3C searchingelement:%0A%09%09%09min = mid%0A%09%09%09mid = int((last + mid) / 2)%0A%09%09else:%0A%09%09%09last = mid%0A%09%09%09mid = int((mid + min) / 2)%0A%09if array%5Bmid%5D %3C searchingelement:%0A%09%09return mid+1%0A%09elif array%5Bmid%5D %3E searchingelement:%0A%09%09if mid-1 %3E -1:%0A%09%09%09return mid-1%0A%09%09else:%0A%09%09%09return mid%0A%09else:%0A%09%09return mid%0A%09%0A%09%0Aif __name__ == '__main__':%0A%09array1 = %5B%5D%0A%09for i in range(10000):%0A%09%09array1.append(randint(0, 1000))%0A%09%0A%09array = list(array1)%0A%09t0 = time.clock()%0A%09insertion_sort(array)%0A%09t1 = time.clock()%0A%09print %22insertion_sort: %22 + str(t1-t0)%0A%0A%09array = list(array1)%0A%09t0 = time.clock()%0A%09insertion_sort_v2(array)%0A%09t1 = time.clock()%0A%09print %22insertion_sort_v2: %22 + str(t1-t0)%0A# Test results shows that worst case of improved insertion sort is O(n * (n%5C2) * lg(n)) %0A# Better than insertion sort but still very bad%0A# Tested for 1000 random elements%0A# insertion_sort:----0.0390096090178%0A# insertion_sort_v2:-0.0287921815039%0A%0A# Tested for 10000 random elements%0A# insertion_sort:----3.76619711492%0A# insertion_sort_v2:-2.25984142782%0A# End of 2.3-6 in book%0A
|
|
274e7a93bac93461f07dd43f3f84f1f00e229ffd
|
Add migration script hr_family -> hr_employee_relative
|
hr_employee_relative/migrations/12.0.1.0.0/post-migration.py
|
hr_employee_relative/migrations/12.0.1.0.0/post-migration.py
|
Python
| 0
|
@@ -0,0 +1,1961 @@
+# Copyright 2019 Creu Blanca%0A# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).%0A%0Afrom openupgradelib import openupgrade%0A%0A%0A@openupgrade.migrate()%0Adef migrate(env, version):%0A cr = env.cr%0A columns = 'fam_spouse, fam_spouse_employer, fam_spouse_tel, fam_father,' %5C%0A ' fam_father_date_of_birth, fam_mother, fam_mother_date_of_birth'%0A cr.execute('SELECT id, %25s FROM hr_employee' %25 columns)%0A%0A relation_spouse = env.ref('hr_employee_relative.relation_spouse').id%0A relation_parent = env.ref('hr_employee_relative.relation_parent').id%0A relation_child = env.ref('hr_employee_relative.relation_child').id%0A%0A for employee in cr.fetchall():%0A if employee%5B1%5D or employee%5B2%5D or employee%5B3%5D:%0A env%5B'hr.employee.relative'%5D.create(%7B%0A 'employee_id': employee%5B0%5D,%0A 'name': employee%5B1%5D or 'Spouse',%0A 'relation_id': relation_spouse%0A %7D)%0A if employee%5B4%5D or employee%5B5%5D:%0A env%5B'hr.employee.relative'%5D.create(%7B%0A 'employee_id': employee%5B0%5D,%0A 'name': employee%5B4%5D or 'Father',%0A 'date_of_birth': employee%5B5%5D or False,%0A 'relation_id': relation_parent%0A %7D)%0A if employee%5B6%5D or employee%5B7%5D:%0A env%5B'hr.employee.relative'%5D.create(%7B%0A 'employee_id': employee%5B0%5D,%0A 'name': employee%5B6%5D or 'Mother',%0A 'date_of_birth': employee%5B7%5D or False,%0A 'relation_id': relation_parent%0A %7D)%0A cr.execute(%0A 'SELECT name, date_of_birth, employee_id, gender'%0A ' FROM hr_employee_children'%0A )%0A for children in cr.fetchall():%0A env%5B'hr.employee.relative'%5D.create(%7B%0A 'name': children%5B0%5D or 'Child',%0A 'date_of_birth': children%5B1%5D or False,%0A 'employee_id': children%5B2%5D,%0A 'gender': children%5B3%5D or False,%0A 'relation_id': relation_child%0A %7D)%0A
|
|
5f9bb1a027664a0107a213b5dfa82c22d75c1196
|
handle relative paths
|
pls-files.py
|
pls-files.py
|
#!/usr/bin/env python
from ConfigParser import SafeConfigParser
from contextlib import closing
from os.path import basename, dirname, join
import sys
from urllib2 import urlopen
def generic_open(arg):
try:
return urlopen(arg), None
except ValueError:
return open(arg, "r"), dirname(arg)
def playlist_files(config):
n = config.getint("playlist", "NumberOfEntries")
for i in xrange(1, n+1):
yield config.get("playlist", "File%d" % i)
if len(sys.argv) > 1:
config = SafeConfigParser()
for arg in sys.argv[1:]:
raw_handle, directory = generic_open(arg)
with closing(raw_handle) as handle:
try:
config.readfp(handle)
for raw_fn in playlist_files(config):
fn = "file://" + join(directory, raw_fn) if directory != None else raw_fn
print fn
except Exception, e:
print >> sys.stderr, "%s\n [%s] %s" % (arg, type(e).__name__, e)
else:
print >> sys.stderr, "Usage: %s file-or-url [file-or-url ...]" % basename(sys.argv[0])
sys.exit(1)
|
Python
| 0.000003
|
@@ -131,16 +131,36 @@
me, join
+, normpath, realpath
%0Aimport
@@ -281,16 +281,54 @@
eError:%0A
+ arg = normpath(realpath(arg))%0A
|
061ba14918eb6598031c9ad8a1c3f8e9c0f0a34b
|
Create LeetCode-LowestCommonAncestor2.py
|
LeetCode-LowestCommonAncestor2.py
|
LeetCode-LowestCommonAncestor2.py
|
Python
| 0.000004
|
@@ -0,0 +1,941 @@
+%22%22%22%0AGiven a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.%0ANotice it is binary tree, not BST%0A%22%22%22%0A%0Aclass TreeNode(object):%0A def __init__(self, x):%0A self.val = x%0A self.left = None%0A self.right = None%0A %0Aclass Solution(object):%0A def lowestCommonAncestor(self, root, p, q):%0A %22%22%22%0A :type root: TreeNode%0A :type p: TreeNode%0A :type q: TreeNode%0A :rtype: TreeNode%0A %22%22%22%0A if not root:%0A return None%0A elif root.val==p.val or root.val==q.val:%0A return root%0A else:%0A left = self.lowestCommonAncestor(root.left, p, q)%0A right = self.lowestCommonAncestor(root.right, p, q)%0A if not left and not right:%0A return root%0A else:%0A if not left:%0A return left%0A if not right:%0A return right%0A%0A%0A
|
|
63208828762d01122054d122c8d305fa8930f9bd
|
Make service postage nullable
|
migrations/versions/0258_service_postage_nullable.py
|
migrations/versions/0258_service_postage_nullable.py
|
Python
| 0.999996
|
@@ -0,0 +1,874 @@
+%22%22%22%0A%0ARevision ID: 0258_service_postage_nullable%0ARevises: 0257_letter_branding_migration%0ACreate Date: 2019-02-12 11:52:53.139383%0A%0A%22%22%22%0Afrom alembic import op%0Aimport sqlalchemy as sa%0A%0A%0Arevision = '0258_service_postage_nullable'%0Adown_revision = '0257_letter_branding_migration'%0A%0A%0Adef upgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A op.alter_column('services_history', 'postage', existing_type=sa.BOOLEAN(), nullable=True)%0A op.alter_column('services', 'postage', existing_type=sa.BOOLEAN(), nullable=True)%0A # ### end Alembic commands ###%0A%0A%0Adef downgrade():%0A # ### commands auto generated by Alembic - please adjust! ###%0A op.alter_column('services_history', 'postage', existing_type=sa.BOOLEAN(), nullable=False)%0A op.alter_column('services', 'postage', existing_type=sa.BOOLEAN(), nullable=False)%0A # ### end Alembic commands ###%0A
|
|
bc1fe15c77b8eedb40993e5ea24fa4d7340ff646
|
Fix bug 17 (#4254)
|
PaddleRec/multi-task/MMoE/args.py
|
PaddleRec/multi-task/MMoE/args.py
|
Python
| 0
|
@@ -0,0 +1,1393 @@
+# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A%0Aimport argparse%0Aimport distutils.util%0A%0A%0Adef parse_args():%0A parser = argparse.ArgumentParser(description=__doc__)%0A parser.add_argument(%0A %22--base_lr%22, type=float, default=0.01, help=%22learning_rate%22)%0A parser.add_argument(%22--batch_size%22, type=int, default=5, help=%22batch_size%22)%0A parser.add_argument(%22--dict_dim%22, type=int, default=64, help=%22dict dim%22)%0A parser.add_argument(%0A %22--emb_dim%22, type=int, default=100, help=%22embedding_dim%22)%0A parser.add_argument(%0A '--use_gpu', type=bool, default=False, help='whether using gpu')%0A parser.add_argument('--ce', action='store_true', help=%22run ce%22)%0A args = parser.parse_args()%0A return args%0A
|
|
0179d4d84987da76c517de4e01100f0e1d2049ea
|
Add unit tests for pacman list packages
|
tests/unit/modules/pacman_test.py
|
tests/unit/modules/pacman_test.py
|
Python
| 0
|
@@ -0,0 +1,2165 @@
+# -*- coding: utf-8 -*-%0A'''%0A :codeauthor: :email:%60Eric Vz %3Ceric@base10.org%3E%60%0A'''%0A%0A# Import Python Libs%0Afrom __future__ import absolute_import%0A%0A# Import Salt Testing Libs%0Afrom salttesting import TestCase, skipIf%0Afrom salttesting.mock import (%0A MagicMock,%0A patch,%0A NO_MOCK,%0A NO_MOCK_REASON%0A)%0A%0Afrom salttesting.helpers import ensure_in_syspath%0A%0Aensure_in_syspath('../../')%0A%0A# Import Salt Libs%0Afrom salt.modules import pacman%0Afrom salt.exceptions import CommandExecutionError%0A%0A@skipIf(NO_MOCK, NO_MOCK_REASON)%0Aclass PacmanTestCase(TestCase):%0A '''%0A Test cases for salt.modules.pacman%0A '''%0A%0A def setUp(self):%0A pacman.__salt__ = %7B%7D%0A pacman.__context__ = %7B%7D%0A%0A%0A def test_list_pkgs(self):%0A '''%0A Test if it list the packages currently installed in a dict%0A '''%0A cmdmock = MagicMock(return_value='A 1.0%5CnB 2.0')%0A sortmock = MagicMock()%0A stringifymock = MagicMock()%0A with patch.dict(pacman.__salt__, %7B'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock%7D):%0A self.assertDictEqual(pacman.list_pkgs(), %7B'A': %5B'1.0'%5D, 'B': %5B'2.0'%5D%7D)%0A sortmock.assert_called_once()%0A stringifymock.assert_called_once()%0A%0A%0A def test_list_pkgs_as_list(self):%0A '''%0A Test if it list the packages currently installed in a dict%0A '''%0A cmdmock = MagicMock(return_value='A 1.0%5CnB 2.0')%0A sortmock = MagicMock()%0A stringifymock = MagicMock()%0A with patch.dict(pacman.__salt__, %7B'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock%7D):%0A self.assertDictEqual(pacman.list_pkgs(True), %7B'A': %5B'1.0'%5D, 'B': %5B'2.0'%5D%7D)%0A sortmock.assert_called_once()%0A stringifymock.assert_not_called()%0A %0A%0A%0A ''' %0A Helper methods for test cases%0A '''%0A def _add_pkg(self, pkgs, name, version):%0A pkgs.setdefault(name, %5B%5D).append(version)%0A%0A%0A%0Aif __name__ == '__main__':%0A from integration import run_tests%0A run_tests(PacmanTestCase, needs_daemon=False)%0A
|
|
f2d4ddba7c594ec93f0ede0be1fc515b0c7c2d7b
|
Remove HInput and Isolate joystick related code because son path isues with pygame
|
HJoystick.py
|
HJoystick.py
|
Python
| 0
|
@@ -0,0 +1,1157 @@
+#from direct.showbase import DirectObject%0Aimport pygame #pygame must be in the Main.py directory%0A#THIS FILE MUST BE IN THE MAIN.PY DIRECTORY BECAUSE SON PATH ISSUES%0A%0A%0Aclass HJoystickSensor():%0A def __init__(self,joystickId=0):%0A #print os.getcwd()%0A pygame.init()%0A pygame.joystick.init()%0A c=pygame.joystick.get_count()%0A if c%3E0:%0A self.id=joystickId%0A self.object=pygame.joystick.Joystick(self.id)%0A self.numButtons=self.object.get_numbuttons()%0A self.numAxes=self.object.get_numaxes()%0A base.taskMgr.add(self._task,%22taskForJoystick_%22+self.id)%0A else:%0A print %22No Joystick%22%0A%0A def _task(self,t):%0A pygame.event.pump()%0A for b in range(self.numButtons):%0A if self.object.get_button(b):%0A messenger.send(%22Joystick_Button_%22+str(b))%0A for a in range(self.numAxes):%0A axis=self.object.get_axis(a)%0A if axis!=0:%0A messenger.send(%22Joystick_Axis_%22+str(a),sentArgs%5Ba%5D)%0A return t.cont%0A ##Hats y otras cosas que no uso ahorita%0A%0Aif __name__==%22__main__%22:%0A a=HJoystickSensor()
|
|
fe63d6e1e822f7cb60d1c0bdaa08eb53d3849783
|
Add script to extract artist names from MusicBrainz database
|
benchmark/datasets/musicbrainz/extract-from-dbdump.py
|
benchmark/datasets/musicbrainz/extract-from-dbdump.py
|
Python
| 0
|
@@ -0,0 +1,391 @@
+#!/usr/bin/env python%0A%22%22%22%0AScript to extract the artist names from a MusicBrainz database dump.%0A%0AUsage:%0A ./extract-from-dbdump.py %3Cdump_dir%3E/artist %3Coutfile%3E%0A%22%22%22%0A%0Aimport pandas as pd%0Aimport sys%0A%0A__author__ = %22Uwe L. Korn%22%0A__license__ = %22MIT%22%0A%0A%0Ainput_file = sys.argv%5B1%5D%0Aoutput_file = sys.argv%5B2%5D%0A%0Adf = pd.read_csv(input_file, sep='%5Ct', header=None)%0Adf.ix%5B:, 2%5D.to_csv(outfile, index=False)%0A
|
|
842092122b14343c9b1c2e2a4e0dd67dd8bdf767
|
build SlideEvaluation objects from existing data
|
promort/slides_manager/migrations/0014_auto_20171201_1119.py
|
promort/slides_manager/migrations/0014_auto_20171201_1119.py
|
Python
| 0
|
@@ -0,0 +1,1162 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.5 on 2017-12-01 11:19%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Adef populate_slide_evaluations(apps, schema_editor):%0A SlideEvaluation = apps.get_model('slides_manager', 'SlideEvaluation')%0A SlideQualityControl = apps.get_model('slides_manager', 'SlideQualityControl')%0A%0A for qc in SlideQualityControl.objects.all():%0A SlideEvaluation(%0A slide=qc.slide, rois_annotation_step=qc.rois_annotation_step,%0A staining=qc.slide.staining, adequate_slide=qc.adequate_slide,%0A not_adequacy_reason=qc.not_adequacy_reason, notes=qc.notes,%0A reviewer=qc.reviewer, acquisition_date=qc.acquisition_date%0A ).save()%0A%0A%0Adef clear_slide_evaluations(apps, schema_editor):%0A SlideEvaluation = apps.get_model('slides_manager', 'SlideEvaluation')%0A%0A for se in SlideEvaluation.objects.all():%0A se.delete()%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('slides_manager', '0013_slideevaluation'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(populate_slide_evaluations, clear_slide_evaluations)%0A %5D%0A
|
|
116f41481062e6d9f15c7a81c2e5268aa1b706c7
|
add sources script
|
admin/scripts/addListOfSources.py
|
admin/scripts/addListOfSources.py
|
Python
| 0.000001
|
@@ -0,0 +1,2757 @@
+from collections import defaultdict%0Aimport re%0Aimport sys%0Aimport time%0A%0Asys.path.append('../..')%0Afrom crawler.crawler import crawl, itemFactory%0Afrom engine.data.database.databaseConnection import commit, rollback%0Afrom engine.data.database.sourceTable import addSource, sourceExists, urlToLookupId%0Afrom engine.data.database.sourceGroupAssignmentTable import addSourceGroupAssignment%0Afrom engine.data.database.itemTable import getSourceUrlsForItemUrl%0Afrom engine.data.url import Url%0A%0A%0Adef handleLine(line):%0A # Parse line%0A m = lineParser.match(line.rstrip())%0A assert(m.lastindex == 1 or m.lastindex == 2)%0A url = Url(m.group(1))%0A sourceGroupName = None%0A if(m.lastindex == 2):%0A sourceGroupName = m.group(2)%0A%0A # Add source%0A if not sourceExists(url):%0A print(%22Adding %22 + url.value)%0A webFeed = itemFactory(url)%0A if not hasSimilarSource(webFeed):%0A addSource(url, webFeed.name)%0A crawl(webFeed)%0A%0A sourceId = urlToLookupId(url.value)%0A print %22https://ps4m.com/s/%25d%22 %25 (sourceId)%0A else:%0A print %22NOT ADDING!%22%0A return%0A else:%0A print (url.value + %22 already exists%22)%0A%0A # If nessecary, assign source to group%0A if(sourceGroupName is not None):%0A print %22%5CtAdding to %25s%22 %25 (sourceGroupName)%0A addSourceGroupAssignment(url, sourceGroupName)%0A return%0A%0A%0Adef usage():%0A message = %22%22%22%25s%0ANAME%0A addListOfSources - adds a file of source urls%0ASYNOPSIS%0A addListOfSources SOURCE_FILE%0A%0A SOURCE_FILE -%0A Contains one url per line. Also optionally, a space then a source group.%0A%22%22%22 %25 sys.argv%5B0%5D%0A print message%0A%0AlineParser = re.compile(%22%5E(%5CS+)%5Cs?(.+)?$%22)%0A%0Adef hasSimilarSource(webfeed):%0A duplicateUrlCounter = defaultdict(lambda:0)%0A for i in webfeed.items:%0A for sourceUrl in getSourceUrlsForItemUrl(i%5B1%5D):%0A duplicateUrlCounter%5BsourceUrl%5D += 1%0A%0A # Print a warning, if any other webfeed has more than half of this webfeed%0A result = False%0A for c in duplicateUrlCounter.keys():%0A if (duplicateUrlCounter%5Bc%5D %3E len(webfeed.items)/2):%0A print %22Possible duplicate feed. New feed %25s. Old feed: %25s%22 %25 (webfeed.url, c)%0A result = True%0A return result %0A%0A%0Aif(len(sys.argv) != 2):%0A usage()%0A exit(1)%0A%0AsourceFilePath = sys.argv%5B1%5D%0AsourceFile = open(sourceFilePath, 'r')%0A%0AproblemLine = set()%0A%0Afor line in sourceFile:%0A try:%0A handleLine(line)%0A except Exception, e:%0A rollback()%0A print %22fail %25s: %25s%22 %25 (line, e)%0A problemLine.add(line)%0A continue%0A%0A print # Add a blank line between sources%0A commit()%0A time.sleep(1)%0A%0AsourceFile.close()%0A%0A# Report errors%0Aprint 'Could Not Add the Following Line:'%0Afor i in problemLine:%0A print i%0A
|
|
3a235e25ac3f5d76eb4030e01afbe7b716ec6d91
|
Add py solution for 331. Verify Preorder Serialization of a Binary Tree
|
py/verify-preorder-serialization-of-a-binary-tree.py
|
py/verify-preorder-serialization-of-a-binary-tree.py
|
Python
| 0.005023
|
@@ -0,0 +1,571 @@
+class Solution(object):%0A def isValidSerialization(self, preorder):%0A %22%22%22%0A :type preorder: str%0A :rtype: bool%0A %22%22%22%0A def get_tree(nodes, offset):%0A if nodes%5Boffset%5D == '#':%0A return offset + 1%0A else:%0A left = get_tree(nodes, offset + 1)%0A right = get_tree(nodes, left)%0A return right%0A%0A nodes = preorder.split(',')%0A try:%0A ret = get_tree(nodes, 0)%0A return ret == len(nodes)%0A except IndexError:%0A return False%0A
|
|
d733d3359038e6b249a6bd878ba0d6c3224b5e9a
|
fix flake8 errors
|
run_tests.py
|
run_tests.py
|
#!/usr/bin/env python
import sys
import shutil
import tempfile
try:
import django
except ImportError:
print("Error: missing test dependency:")
print(" django library is needed to run test suite")
print(" you can install it with 'pip install django'")
print(" or use tox to automatically handle test dependencies")
sys.exit(1)
try:
import shortuuid
except ImportError:
print("Error: missing test dependency:")
print(" shortuuid library is needed to run test suite")
print(" you can install it with 'pip install shortuuid'")
print(" or use tox to automatically handle test dependencies")
sys.exit(1)
try:
import dateutil
except ImportError:
print("Error: missing test dependency:")
print(" dateutil library is needed to run test suite")
print(" you can install it with 'pip install dateutil'")
print(" or use tox to automatically handle test dependencies")
sys.exit(1)
__test_libs__ = [
django,
shortuuid,
dateutil,
]
from django.conf import settings
def main():
# Dynamically configure the Django settings with the minimum necessary to
# get Django running tests.
KEY_LOCS = {}
try:
try:
# If KeyCzar is available, set up the environment.
from keyczar import keyczart, keyinfo
# Create an RSA private key.
keys_dir = tempfile.mkdtemp("django_extensions_tests_keyzcar_rsa_dir")
keyczart.Create(keys_dir, "test", keyinfo.DECRYPT_AND_ENCRYPT, asymmetric=True)
keyczart.AddKey(keys_dir, "PRIMARY", size=4096)
KEY_LOCS['DECRYPT_AND_ENCRYPT'] = keys_dir
# Create an RSA public key.
pub_dir = tempfile.mkdtemp("django_extensions_tests_keyzcar_pub_dir")
keyczart.PubKey(keys_dir, pub_dir)
KEY_LOCS['ENCRYPT'] = pub_dir
except ImportError:
pass
settings.configure(
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.sessions',
'django_extensions.tests.testapp',
'django_extensions',
],
# Django replaces this, but it still wants it. *shrugs*
DATABASE_ENGINE='django.db.backends.sqlite3',
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
},
MEDIA_ROOT='/tmp/django_extensions_test_media/',
MEDIA_PATH='/media/',
ROOT_URLCONF='django_extensions.tests.urls',
DEBUG=True,
TEMPLATE_DEBUG=True,
ENCRYPTED_FIELD_KEYS_DIR=KEY_LOCS,
)
if django.VERSION[:2] >= (1,7):
django.setup()
apps = ['django_extensions']
if django.VERSION[:2] >= (1, 6):
apps.append('django_extensions.tests.testapp')
apps.append('django_extensions.tests')
from django.core.management import call_command
from django.test.utils import get_runner
try:
from django.contrib.auth import get_user_model
except ImportError:
USERNAME_FIELD = "username"
else:
USERNAME_FIELD = get_user_model().USERNAME_FIELD
DjangoTestRunner = get_runner(settings)
class TestRunner(DjangoTestRunner):
def setup_databases(self, *args, **kwargs):
result = super(TestRunner, self).setup_databases(*args, **kwargs)
kwargs = {
"interactive": False,
"email": "admin@doesnotexit.com",
USERNAME_FIELD: "admin",
}
call_command("createsuperuser", **kwargs)
return result
failures = TestRunner(verbosity=2, interactive=True).run_tests(apps)
sys.exit(failures)
finally:
for name, path in KEY_LOCS.items():
# cleanup crypto key temp dirs
shutil.rmtree(path)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -2852,16 +2852,17 @@
%5D %3E= (1,
+
7):%0A
@@ -3440,24 +3440,25 @@
r(settings)%0A
+%0A
clas
|
f6dce9177421f61c7a773e1bbe53588eb54defc9
|
Create score.py
|
Samples/AzureML/score.py
|
Samples/AzureML/score.py
|
Python
| 0.000008
|
@@ -0,0 +1,1210 @@
+#example: scikit-learn and Swagger%0Aimport json%0Aimport numpy as np%0Aimport pandas as pd%0Aimport azureml.train.automl%0Afrom sklearn.externals import joblib%0Afrom sklearn.linear_model import Ridge%0Afrom azureml.core.model import Model%0A%0Afrom inference_schema.schema_decorators import input_schema, output_schema%0Afrom inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType%0A%0Adef init():%0A global model%0A # note here %22sklearn_regression_model.pkl%22 is the name of the model registered under%0A # this is a different behavior than before when the code is run locally, even though the code is the same.%0A model_path = Model.get_model_path('nyc.pkl')%0A # deserialize the model file back into a sklearn model%0A model = joblib.load(model_path)%0A%0Ainput_sample = np.array(%5B%5B1,%22Thursday%22,16,1,3.48%5D%5D)%0Aoutput_sample = np.array(%5B13.66304196%5D)%0A%0A@input_schema('data', NumpyParameterType(input_sample))%0A@output_schema(NumpyParameterType(output_sample))%0Adef run(data):%0A try:%0A result = np.round(model.predict(data),2)%0A # you can return any datatype as long as it is JSON-serializable%0A return result.tolist()%0A except Exception as e:%0A error = str(e)%0A return error%0A
|
|
4f45932c2a3519b6ccbdee20fb4beaafe1774bb2
|
Refactor for spaceapi, closes #135
|
plugins/status.py
|
plugins/status.py
|
from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import json
import aiohttp
@command(permission="view")
@asyncio.coroutine
def status(bot, mask, target, args):
"""Returns the door status of the hackerspace rooms
%%status
"""
try:
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(
BYTEBOT_PLUGIN_CONFIG['spacestatus']['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving spaceapi data")
raise Exception()
r = yield from resp.read()
data = json.loads(r.decode('utf-8'))
bot.privmsg(target, 'Space status:')
if data['state']['open']:
bot.privmsg(target, '\tThe space is open!')
else:
bot.privmsg(target, '\tThe space is closed!')
except Exception:
bot.privmsg(target, '\tError while retrieving space status')
@command(permission="view")
@asyncio.coroutine
def users(bot, mask, target, args):
"""Returns the current users inside the hackerspace rooms
%%users
"""
try:
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(
BYTEBOT_PLUGIN_CONFIG['spacestatus']['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving spaceapi data")
raise Exception()
r = yield from resp.read()
data = json.loads(r.decode('utf-8'))['sensors']['people_now_present'][0]
if data['value'] > 0:
bot.privmsg(target,
'Space users: ' + str(', '.join(data['names'])))
elif data['value'] == 0:
bot.privmsg(target, 'Nobody is logged into teh space :(')
else:
bot.privmsg(target,
"I'm not sure if anyone's in the space")
except Exception:
bot.privmsg(target, '\tError while retrieving user data')
|
Python
| 0
|
@@ -990,33 +990,63 @@
except Exception
-:
+ as e:%0A bot.log.error(e)
%0A bot.pri
@@ -2136,17 +2136,47 @@
xception
-:
+ as e:%0A bot.log.error(e)
%0A
|
f1599a7b3f342a86cf7eb7201593b8515d5f13ad
|
Add views for handling 400 & 500 errors
|
arcutils/views.py
|
arcutils/views.py
|
Python
| 0
|
@@ -0,0 +1,1872 @@
+import logging%0A%0Afrom django.http import HttpResponseBadRequest, HttpResponseServerError%0Afrom django.template import loader%0Afrom django.views.decorators.csrf import requires_csrf_token%0A%0A%0Alog = logging.getLogger(__name__)%0A%0A%0A@requires_csrf_token%0Adef bad_request(request, exception=None, template_name='400.html'):%0A %22%22%22Override default Django bad_request view so context is passed.%0A%0A Otherwise, static files won't be loaded and default context vars%0A won't be available (&c).%0A%0A If loading or rendering the template causes an error, a bare 400%0A response will be returned.%0A%0A To use this in a project, import it into the project's root%0A urls.py and add a 400.html template.%0A%0A .. note:: The %60%60exception%60%60 arg was added in Django 1.9.%0A%0A %22%22%22%0A try:%0A template = loader.get_template(template_name)%0A body, content_type = template.render(%7B'request': request%7D, request), None%0A except Exception:%0A log.exception('Exception encountered while rendering 400 error')%0A body, content_type = '%3Ch1%3EBad Request (400)%3C/h1%3E', 'text/html'%0A return HttpResponseBadRequest(body, content_type=content_type)%0A%0A%0A@requires_csrf_token%0Adef server_error(request, template_name='500.html'):%0A %22%22%22Override default Django server_error view so context is passed.%0A%0A Otherwise, static files won't be loaded and default context vars%0A won't be available (&c).%0A%0A If loading or rendering the template causes an error, a bare 500%0A response will be returned.%0A%0A %22%22%22%0A try:%0A template = loader.get_template(template_name)%0A body, content_type = template.render(%7B'request': request%7D, request), None%0A except Exception:%0A log.exception('Exception encountered while rendering 500 error')%0A body, content_type = '%3Ch1%3EServer Error (500)%3C/h1%3E', 'text/html'%0A return HttpResponseServerError(body, content_type=content_type)%0A
|
|
7aee25badd2085d63012c83f6be8082d93427754
|
Add files via upload
|
polyregreesion.py
|
polyregreesion.py
|
Python
| 0
|
@@ -0,0 +1,1650 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Sun Jul 31 10:03:15 2016%0A%0A@author:Viky%0ACode for polynomial regression%0A%22%22%22%0A%0A#importing necessary packages%0Aimport numpy as np %0Aimport tensorflow as tf%0Aimport matplotlib.pyplot as plt%0A%0A%0A#input data:%0Ax_input=np.linspace(0,3,1000)%0Ax1=x_input/np.max(x_input)%0Ax2=np.power(x_input,2)/np.max(np.power(x_input,2))%0Ay_input=5*x1-3*x2%0Ay_input= y_input.reshape((y_input.size, 1))%0A%0A#model parameters%0A#order of polynomial %0An=2%0AW = tf.Variable(tf.random_normal(%5Bn,1%5D), name='weight')%0A#bias%0Ab = tf.Variable(tf.random_normal(%5B1%5D), name='bias')%0A%0A#X=tf.placeholder(tf.float32,shape=(None,2))%0AX=tf.placeholder(tf.float32,shape=%5BNone,n%5D)%0AY=tf.placeholder(tf.float32,shape=%5BNone, 1%5D)%0A%0A%0A#preparing the data%0Adef modify_input(x,x_size,n_value):%0A x_new=np.zeros(%5Bx_size,n_value%5D) %0A for i in range(n):%0A x_new%5B:,i%5D=np.power(x,(i+1))%0A x_new%5B:,i%5D=x_new%5B:,i%5D/np.max(x_new%5B:,i%5D)%0A return x_new%0A %0A%0A#model%0Ax_modified=modify_input(x_input,x_input.size,n)%0AY_pred=tf.add(tf.matmul(X,W),b)%0A%0A#algortihm%0Aloss = tf.reduce_mean(tf.square(Y_pred -Y ))%0A#training algorithm%0Aoptimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)%0A#initializing the variables%0Ainit = tf.initialize_all_variables()%0A%0A#starting the session session %0Asess = tf.Session()%0Asess.run(init)%0A%0Aepoch=12000%0A%0Afor step in xrange(epoch): %0A _, c=sess.run(%5Boptimizer, loss%5D, feed_dict=%7BX: x_modified, Y: y_input%7D)%0A if step%251000==0 :%0A print c%0A%0Aprint %22Model paramters:%22 %0Aprint sess.run(W)%0Aprint %22bias:%25f%22 %25sess.run(b)%0A# comparing our model %0Ay_test=sess.run(Y_pred, feed_dict=%7BX:x_modified%7D)%0Aplt.plot(x_input,y_input,x_input, y_test)%0Aplt.show()
|
|
6f4d5917abdbae1fe731e7a1786d8589d2b31ac0
|
Fix #160 -- Add missing migration
|
machina/apps/forum/migrations/0011_auto_20190627_2132.py
|
machina/apps/forum/migrations/0011_auto_20190627_2132.py
|
Python
| 0
|
@@ -0,0 +1,717 @@
+# Generated by Django 2.2.2 on 2019-06-28 02:32%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('forum', '0010_auto_20181103_1401'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='forum',%0A name='level',%0A field=models.PositiveIntegerField(editable=False),%0A ),%0A migrations.AlterField(%0A model_name='forum',%0A name='lft',%0A field=models.PositiveIntegerField(editable=False),%0A ),%0A migrations.AlterField(%0A model_name='forum',%0A name='rght',%0A field=models.PositiveIntegerField(editable=False),%0A ),%0A %5D%0A
|
|
4c601ce9b91a0bef7082e3d8a5c1b95dc512d829
|
add csl_util
|
User_Crawler/util_csl.py
|
User_Crawler/util_csl.py
|
Python
| 0.000006
|
@@ -0,0 +1,1697 @@
+# -*- coding: utf-8 -*-%0A%0Afrom types import *%0Aimport pandas as pd%0A%0AUSER_ATTR_LIST = %5B'./data/cross-site-linking/user_type.csv',%0A './data/graph/CC.csv',%0A './data/graph/degree.csv',%0A './data/graph/pagerank.csv'%0A %5D%0A%0A%0Adef dict_merge(dict_1, dict_2):%0A res = %7B%7D%0A for key in dict_1:%0A if key in dict_2:%0A res%5Bkey%5D = %7B%7D%0A res%5Bkey%5D.update(dict_1%5Bkey%5D)%0A res%5Bkey%5D.update(dict_2%5Bkey%5D)%0A return res%0A%0A%0Adef load_user_attr_to_dict(file_path):%0A user_attr_dict = %7B%7D%0A user_attr_df = pd.read_csv(file_path)%0A attr_list = list(user_attr_df)%0A attr_list.remove('username')%0A for idx, row in user_attr_df.iterrows():%0A user_attr_dict%5Brow%5B'username'%5D%5D = %7B%7D%0A for attr in attr_list:%0A user_attr_dict%5Brow%5B'username'%5D%5D%5Battr%5D = row%5Battr%5D%0A return user_attr_dict%0A%0A%0Adef load_all_attr_to_dict(file_list=USER_ATTR_LIST):%0A res = %7B%7D%0A for file_path in file_list:%0A user_attr_dict = load_user_attr(file_path)%0A res = dict_merge(res, user_attr_dict)%0A return res%0A%0A%0Adef load_user_attr_to_df(file_path):%0A return pd.read_csv(file_path)%0A%0A%0Adef load_all_attr_to_df(file_list=USER_ATTR_LIST):%0A df_list = %5Bload_user_attr_to_df(file_path) for file_path in file_list%5D%0A res = df_list%5B0%5D%0A for i in range(1, len(df_list)):%0A res = pd.merge(res, df_list%5Bi%5D, on='username')%0A return res%0A%0A%0Adef split_df(df, by='user_type'):%0A by_value_list = sorted(df%5Bby%5D.drop_duplicates().values.tolist())%0A for by_value in by_value_list:%0A df%5Bdf%5Bby%5D == by_value%5D.to_csv('./data/cross-site-linking/user_attr_' + str(by_value) + '.csv', index=False, encoding='utf-8')%0A
|
|
bcee6173027c48bfb25a65d3e97660f2e2a0852b
|
Add a python script to generate test methods
|
gentest.py
|
gentest.py
|
Python
| 0.000017
|
@@ -0,0 +1,750 @@
+from itertools import product%0Aimport json%0Aimport numpy%0A%0Acube = numpy.array(range(1, 9)).reshape(2, 2, 2)%0A%0Apcube = %5B%0A cube%5B0 ,0 ,0 %5D,%0A cube%5B0 ,0 ,0:2%5D,%0A cube%5B0 ,0:2,0:1%5D,%0A cube%5B0 ,0:2,0:2%5D,%0A cube%5B0:2,0:1,0:1%5D,%0A cube%5B0:2,0:1,0:2%5D,%0A cube%5B0:2,0:2,0:1%5D,%0A cube%5B0:2,0:2,0:2%5D,%0A%5D%0A%0Afor (i, (a, b)) in enumerate(product(pcube, repeat=2), start=1):%0A print 'public function testBsxfun%7B0:0%3E2d%7D()'.format(i)%0A print '%7B'%0A print '$a = %7B0%7D;'.format(json.dumps(a.tolist()))%0A print '$b = %7B0%7D;'.format(json.dumps(b.tolist()))%0A print '$expected = %7B0%7D;'.format(json.dumps((a * b).tolist()))%0A print '$actual = Bsxfun::bsxfun($this-%3Etimes, $a, $b);'%0A print '$this-%3EassertEquals($expected, $actual);'%0A print '%7D'%0A print%0A
|
|
052392da7980c4f4e2e86cd8eb65da5b91d3547b
|
Solve Code Fights different symbols naive problem
|
CodeFights/differentSymbolsNaive.py
|
CodeFights/differentSymbolsNaive.py
|
Python
| 0.001563
|
@@ -0,0 +1,625 @@
+#!/usr/local/bin/python%0A# Code Fights Different Symbols Naive Problem%0A%0Afrom collections import Counter%0A%0A%0Adef differentSymbolsNaive(s):%0A return len(Counter(s))%0A%0A%0Adef main():%0A tests = %5B%0A %5B%22cabca%22, 3%5D,%0A %5B%22aba%22, 2%5D%0A %5D%0A%0A for t in tests:%0A res = differentSymbolsNaive(t%5B0%5D)%0A ans = t%5B1%5D%0A if ans == res:%0A print(%22PASSED: differentSymbolsNaive(%7B%7D) returned %7B%7D%22%0A .format(t%5B0%5D, res))%0A else:%0A print((%22FAILED: differentSymbolsNaive(%7B%7D) returned %7B%7D,%22%0A %22answer: %7B%7D%22).format(t%5B0%5D, res, ans))%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
f625f46e89c8e95677492cfb03ee113a3f6c7bb3
|
Add utils.py
|
src/utils.py
|
src/utils.py
|
Python
| 0.000004
|
@@ -0,0 +1,1516 @@
+%22%22%22%0AThe MIT License (MIT)%0A%0ACopyright (c) 2017 Stefan Graupner%0A%0APermission is hereby granted, free of charge, to any person obtaining a copy%0Aof this software and associated documentation files (the %22Software%22), to deal%0Ain the Software without restriction, including without limitation the rights%0Ato use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0Acopies of the Software, and to permit persons to whom the Software is%0Afurnished to do so, subject to the following conditions:%0A%0AThe above copyright notice and this permission notice shall be included in all%0Acopies or substantial portions of the Software.%0A%0ATHE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0AIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0AFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0AAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0ALIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0AOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE%0ASOFTWARE.%0A%22%22%22%0A%0Aimport hashlib%0A%0Afrom gi.repository import OParl%0A%0Aclass OParlType(object):%0A %22%22%22%0A Simple wrapper class around OParl type urls%0A %22%22%22%0A%0A entity = 'Unknown'%0A version = '1.0'%0A%0A def __init__(self, object):%0A type = object.get_oparl_type()%0A type = type.split('/')%0A%0A self.version = type%5B-2%5D%0A self.entity = type%5B-1%5D%0A%0Adef sha1_hexdigest(string):%0A return hashlib.sha1(string).hexdigest()
|
|
3dcbde095cc8372f321a6691f57b190704dd5457
|
version 1 to simulate lighting project
|
graph_proj/graph_proj.py
|
graph_proj/graph_proj.py
|
Python
| 0
|
@@ -0,0 +1,1329 @@
+import networkx as nx%0Aimport random%0A%0A# Generate small-world network by watts-strogatz method%0Anodenum = 150%0Aneighk = 10%0Ap = 0.2%0A%0AG = nx.watts_strogatz_graph(nodenum, neighk, p)%0A%0A# random seed point%0A# first start with green point, then blue point%0Agreen_collect = set()%0Ablue_collect = set()%0Astablept = set()%0A%0Aallpt = set(range(nodenum))%0Arestpt = allpt.difference(green_collect.union(blue_collect))%0A%0Ai = 0%0Awhile len(restpt)!=0:%0A i += 1%0A green_sdpt = random.choice(list(restpt))%0A stablept.add(green_sdpt)%0A green_collect.add(green_sdpt)%0A green_neigh = set(nx.neighbors(G, green_sdpt))%0A green_collect.update(green_neigh)%0A%0A inter_collect = green_collect.intersection(blue_collect)%0A inter_collect.difference_update(stablept)%0A blue_collect.difference_update(inter_collect)%0A%0A restpt = allpt.difference(green_collect.union(blue_collect))%0A if len(restpt) == 0:%0A break%0A%0A blue_sdpt = random.choice(list(restpt))%0A blue_collect.add(blue_sdpt)%0A stablept.add(blue_sdpt)%0A blue_neigh = set(nx.neighbors(G, blue_sdpt))%0A blue_collect.update(blue_neigh)%0A%0A inter_collect = green_collect.intersection(blue_collect)%0A inter_collect.difference_update(stablept)%0A%0A green_collect.difference_update(inter_collect)%0A%0A restpt = allpt.difference(green_collect.union(blue_collect))%0A%0A%0A%0A%0A %0A %0A%0A%0A%0A %0A
|
|
05c588866cc66bff33cb77fe35434f850ddd07f0
|
Handle values larger than 2**63-1 in numeric crash address conversion (#119)
|
server/crashmanager/migrations/0009_copy_crashaddress.py
|
server/crashmanager/migrations/0009_copy_crashaddress.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import sys
from django.db import models, migrations
from django.conf import settings
def create_migration_tool(apps, schema_editor):
CrashEntry = apps.get_model("crashmanager", "CrashEntry")
for entry in CrashEntry.objects.filter(crashAddressNumeric = None):
if entry.crashAddress == None or len(entry.crashAddress) == 0:
entry.crashAddressNumeric = None
entry.save()
else:
try:
entry.crashAddressNumeric = long(entry.crashAddress, 16)
entry.save()
except ValueError as e:
print("Failed to convert crash address value: %s" % entry.crashAddress, file=sys.stderr)
class Migration(migrations.Migration):
dependencies = [
('crashmanager', '0008_crashentry_crashaddressnumeric'),
]
operations = [
migrations.RunPython(
create_migration_tool,
),
]
|
Python
| 0
|
@@ -593,16 +593,391 @@
ss, 16)%0A
+ %0A # Crash addresses are typically unsigned, but our database%0A # can only store signed 64 bit integers. Convert to signed%0A # if the value exceeds maximum value for signed 64 bit.%0A if (entry.crashAddressNumeric %3E (2**63-1)):%0A entry.crashAddressNumeric -= 2**64%0A %0A
|
4c381da905d81bde6ed28407f8e4cd3bcbd6d8be
|
Add cart forms
|
apps/cart/forms.py
|
apps/cart/forms.py
|
Python
| 0.000001
|
@@ -0,0 +1,373 @@
+from django import forms%0A%0A%0APRODUCT_QUANTITY_CHOICES = %5B(i, str(i)) for i in range(1, 21)%5D%0A%0A%0Aclass CartAddProductForm(forms.Form):%0A quantity = forms.TypedChoiceField(choices=PRODUCT_QUANTITY_CHOICES,%0A coerce=int)%0A update = forms.BooleanField(required=False, initial=False,%0A widget=forms.HiddenInput)%0A
|
|
55ee723c6a95046d59efedb47f276262835892af
|
reverse string
|
learning/test/reverse_string.py
|
learning/test/reverse_string.py
|
Python
| 0.999999
|
@@ -0,0 +1,190 @@
+__author__ = 'root'%0Adef reverse(text):%0A reverse_text = %22%22%0A for char in range(len(text)):%0A reverse_text = text%5Bchar%5D + reverse_text%0A return reverse_text%0A%0Aprint reverse(%22abcd%22)
|
|
1bb2a9213dad8bde8a05da63438dbcdd0d8d09c6
|
add example for asynchronous execution, little simpler than multiprocessing, uses a decorator to simplify it further
|
examples/async.py
|
examples/async.py
|
Python
| 0
|
@@ -0,0 +1,2483 @@
+#!/usr/bin/env python2.7%0A%22%22%22Example of asynchronously running %22show version%22.%0A%0Aasync(): decorator to make further functions asynchronous%0Acommand_runner(): creates a connection and runs an arbitrary command%0Amain(): entry point, runs the command_runner%0A%22%22%22%0Aimport netmiko%0Afrom inspect import getmodule%0Afrom multiprocessing import Pool%0A%0Adef async(decorated):%0A %22%22%22Wraps a top-level function around an asynchronous dispatcher.%0A%0A When the decorated function is called, a task is submitted to a process%0A pool, and a future object is returned, providing access to an eventual%0A return value.%0A%0A The future object has a blocking get() method to access the task result:%0A it will return immediately if the job is already done, or block until it%0A completes.%0A%0A See http://stackoverflow.com/questions/1239035/asynchronous-method-call-in-python%0A %22%22%22%0A # Keeps the original function visible from the module global namespace,%0A # under a name consistent to its __name__ attribute. This is necessary for%0A # the multiprocessing pickling machinery to work properly.%0A module = getmodule(decorated)%0A decorated.__name__ += '_original'%0A setattr(module, decorated.__name__, decorated)%0A%0A def send(*args, **opts):%0A %22%22%22Returns asynchronously.%22%22%22%0A return async.pool.apply_async(decorated, args, opts)%0A%0A return send%0A%0A@async%0Adef command_runner(dispatcher, cmd):%0A %22%22%22Run show version on many devices.%22%22%22%0A # Prepare the dispatcher%0A dsp = netmiko.ssh_dispatcher(dispatcher%5B%22device_type%22%5D)%0A # Run the dispatcher and get the device ready%0A dev = dsp(**dispatcher)%0A # returns the output of the variable %60cmd%60 that was passed%0A return dev.send_command(cmd)%0A%0Adef main():%0A %22%22%22Program entry point.%22%22%22%0A async.pool = Pool(10)%0A devices = %5B%2210.10.10.1%22, %2210.10.10.2%22, %2210.10.10.3%22, %2210.10.10.4%22, %2210.10.10.5%22,%0A %2210.10.10.6%22, %2210.10.10.7%22, %2210.10.10.8%22, %2210.10.10.9%22, %2210.10.10.10%22%5D%0A cmd = %22show version%22%0A results = %5B%5D%0A for device in devices:%0A # Assumes all devices are Juniper devices%0A dispatcher = %7B%22device_type%22: %22juniper%22,%0A %22ip%22: device,%0A %22username%22: %22user%22,%0A %22password%22: %22pass%22%7D%0A result = command_runner(dispatcher, cmd)%0A results.append(result)%0A # Must use the %60get()%60 method or you will just get a list of pool objects%0A results = %5Bi.get() for i in results%5D%0A print results%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
d159b32d51339915ef633f3c6d33ce5eeafa78d6
|
Add py solution for 396. Rotate Function
|
py/rotate-function.py
|
py/rotate-function.py
|
Python
| 0.998434
|
@@ -0,0 +1,472 @@
+class Solution(object):%0A def maxRotateFunction(self, A):%0A %22%22%22%0A :type A: List%5Bint%5D%0A :rtype: int%0A %22%22%22%0A lA = len(A)%0A if not lA:%0A return 0%0A subsum = 0%0A F = 0%0A for i in xrange(1, lA):%0A subsum += A%5B-i%5D%0A F += subsum%0A subsum += A%5B0%5D%0A m = F%0A for i in xrange(1, lA):%0A F += subsum%0A F -= lA * A%5B-i%5D%0A m = max(m, F)%0A return m%0A
|
|
70ff0faa7da6066bb75ddb871f67aa749f5bdc4e
|
Add custom field rendering tests
|
django_admin_bootstrapped/tests.py
|
django_admin_bootstrapped/tests.py
|
Python
| 0
|
@@ -0,0 +1,1744 @@
+from __future__ import absolute_import%0A%0Afrom django.test import TestCase%0Afrom django.contrib.admin.widgets import AdminDateWidget%0Afrom django.template import Template, Context%0Afrom django import forms%0A%0Atry:%0A from bootstrap3 import renderers%0Aexcept ImportError:%0A # nothing to test if we don't have django-bootstrap3 installed%0A pass%0Aelse:%0A from .renderers import BootstrapFieldRenderer%0A%0A class RendererTestCase(TestCase):%0A def setUp(self):%0A class TestForm(forms.Form):%0A char = forms.CharField(max_length=255)%0A hidden = forms.CharField(max_length=255, widget=forms.HiddenInput())%0A date = forms.DateField(widget=AdminDateWidget())%0A%0A self.form = TestForm(%7B%0A 'char': 'hi there',%0A 'hidden': 'hidden text',%0A 'date': '20140111',%0A %7D)%0A%0A def render_template(self, field):%0A context = %7B 'field': field %7D%0A template = Template('%7B%25 load bootstrapped_goodies_tags %25%7D %7B%25 dab_field_rendering field %25%7D')%0A return template.render(Context(context))%0A%0A def test_basic_functionality(self):%0A field = self.form%5B'char'%5D%0A html = self.render_template(field)%0A # we prepend this class%0A self.assertIn('class=%22form-control', html)%0A%0A def test_hidden_input(self):%0A field = self.form%5B'hidden'%5D%0A html = self.render_template(field)%0A self.assertIn('type=%22hidden%22', html)%0A%0A def test_control_inline(self):%0A field = self.form%5B'date'%5D%0A html = self.render_template(field)%0A # we prepend these classes%0A self.assertIn('class=%22form-control form-control-inline', html)%0A
|
|
84e14782f353ef1d0dec20ed1da31cfb1da413a4
|
Add diary example.
|
examples/diary.py
|
examples/diary.py
|
Python
| 0
|
@@ -0,0 +1,1652 @@
+#!/usr/bin/env python%0A%0Afrom collections import OrderedDict%0Aimport datetime%0Aimport sys%0A%0Afrom walrus import *%0A%0Adatabase = Database(host='localhost', port=6379, db=0)%0A%0Aclass Entry(Model):%0A database = database%0A namespace = 'diary'%0A%0A content = TextField(fts=True)%0A timestamp = DateTimeField(default=datetime.datetime.now, index=True)%0A%0A%0Adef menu_loop():%0A choice = None%0A while choice != 'q':%0A for key, value in menu.items():%0A print('%25s) %25s' %25 (key, value.__doc__))%0A choice = raw_input('Action: ').lower().strip()%0A if choice in menu:%0A menu%5Bchoice%5D()%0A%0Adef add_entry():%0A %22%22%22Add entry%22%22%22%0A print('Enter your entry. Press ctrl+d when finished.')%0A data = sys.stdin.read().strip()%0A if data and raw_input('Save entry? %5BYn%5D ') != 'n':%0A Entry.create(content=data)%0A print('Saved successfully.')%0A%0Adef view_entries(search_query=None):%0A %22%22%22View previous entries%22%22%22%0A if search_query:%0A expr = Entry.content.match(search_query)%0A else:%0A expr = None%0A%0A query = Entry.query(expr, order_by=Entry.timestamp.desc())%0A for entry in query:%0A timestamp = entry.timestamp.strftime('%25A %25B %25d, %25Y %25I:%25M%25p')%0A print(timestamp)%0A print('=' * len(timestamp))%0A print(entry.content)%0A print('n) next entry')%0A print('q) return to main menu')%0A if raw_input('Choice? (Nq) ') == 'q':%0A break%0A%0Adef search_entries():%0A %22%22%22Search entries%22%22%22%0A view_entries(raw_input('Search query: '))%0A%0Amenu = OrderedDict(%5B%0A ('a', add_entry),%0A ('v', view_entries),%0A ('s', search_entries),%0A%5D)%0A%0Aif __name__ == '__main__':%0A menu_loop()%0A
|
|
736c4ba9a865143cf697121229b9c5016a2fbce1
|
Add retries for GET calls which return 502 or 504 errors.
|
qds_sdk/connection.py
|
qds_sdk/connection.py
|
import sys
import requests
import logging
import ssl
import json
import pkg_resources
from requests.adapters import HTTPAdapter
try:
from requests.packages.urllib3.poolmanager import PoolManager
except ImportError:
from urllib3.poolmanager import PoolManager
from qds_sdk.retry import retry
from qds_sdk.exception import *
log = logging.getLogger("qds_connection")
"""
see http://stackoverflow.com/questions/14102416/python-requests-requests-exceptions-sslerror-errno-8-ssl-c504-eof-occurred
"""
class MyAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize,
block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
class Connection:
def __init__(self, auth, base_url, skip_ssl_cert_check, reuse=True):
self.auth = auth
self.base_url = base_url
self.skip_ssl_cert_check = skip_ssl_cert_check
self._headers = {'User-Agent': 'qds-sdk-py-%s' % pkg_resources.get_distribution("qds-sdk").version,
'Content-Type': 'application/json'}
self.reuse = reuse
if reuse:
self.session = requests.Session()
self.session.mount('https://', MyAdapter())
@retry((RetryWithDelay, requests.Timeout), tries=6, delay=30, backoff=2)
def get_raw(self, path, params=None):
return self._api_call_raw("GET", path, params=params)
@retry((RetryWithDelay, requests.Timeout), tries=6, delay=30, backoff=2)
def get(self, path, params=None):
return self._api_call("GET", path, params=params)
def put(self, path, data=None):
return self._api_call("PUT", path, data)
def post(self, path, data=None):
return self._api_call("POST", path, data)
def delete(self, path, data=None):
return self._api_call("DELETE", path, data)
def _api_call_raw(self, req_type, path, data=None, params=None):
url = self.base_url.rstrip('/') + '/' + path
if self.reuse:
x = self.session
else:
x = requests
kwargs = {'headers': self._headers, 'auth': self.auth, 'verify': not self.skip_ssl_cert_check}
if data:
kwargs['data'] = json.dumps(data)
if params:
kwargs['params'] = params
log.info("[%s] %s" % (req_type, url))
log.info("Payload: %s" % json.dumps(data, indent=4))
log.info("Params: %s" % params)
if req_type == 'GET':
r = x.get(url, timeout=300, **kwargs)
elif req_type == 'POST':
r = x.post(url, timeout=300, **kwargs)
elif req_type == 'PUT':
r = x.put(url, timeout=300, **kwargs)
elif req_type == 'DELETE':
r = x.delete(url, timeout=300, **kwargs)
else:
raise NotImplemented
self._handle_error(r)
return r
def _api_call(self, req_type, path, data=None, params=None):
return self._api_call_raw(req_type, path, data=data, params=params).json()
def _handle_error(self, response):
"""Raise exceptions in response to any http errors
Args:
response: A Response object
Raises:
BadRequest: if HTTP error code 400 returned.
UnauthorizedAccess: if HTTP error code 401 returned.
ForbiddenAccess: if HTTP error code 403 returned.
ResourceNotFound: if HTTP error code 404 is returned.
MethodNotAllowed: if HTTP error code 405 is returned.
ResourceConflict: if HTTP error code 409 is returned.
ResourceInvalid: if HTTP error code 422 is returned.
ClientError: if HTTP error code falls in 401 - 499.
ServerError: if HTTP error code falls in 500 - 599.
ConnectionError: if unknown HTTP error code returned.
"""
code = response.status_code
if 200 <= code < 400:
return
if code == 400:
sys.stderr.write(response.text + "\n")
raise BadRequest(response)
elif code == 401:
sys.stderr.write(response.text + "\n")
raise UnauthorizedAccess(response)
elif code == 403:
sys.stderr.write(response.text + "\n")
raise ForbiddenAccess(response)
elif code == 404:
sys.stderr.write(response.text + "\n")
raise ResourceNotFound(response)
elif code == 405:
sys.stderr.write(response.text + "\n")
raise MethodNotAllowed(response)
elif code == 409:
sys.stderr.write(response.text + "\n")
raise ResourceConflict(response)
elif code == 422:
sys.stderr.write(response.text + "\n")
raise ResourceInvalid(response)
elif code in (449, 503):
sys.stderr.write(response.text + "\n")
raise RetryWithDelay(response)
elif 401 <= code < 500:
sys.stderr.write(response.text + "\n")
raise ClientError(response)
elif 500 <= code < 600:
sys.stderr.write(response.text + "\n")
raise ServerError(response)
else:
raise ConnectionError(response)
|
Python
| 0
|
@@ -4964,17 +4964,27 @@
(449, 50
-3
+2, 503, 504
):%0A
|
51eb0cd7b7e0101843dbff9b4ddd4fe0d32af2c6
|
Add tasks for installing JCC and PyLucene.
|
buedafab/tasks.py
|
buedafab/tasks.py
|
"""Relatively self-contained, simple Fabric commands."""
from fabric.api import require, env, local, warn, settings, cd
import os
from buedafab.operations import run, exists, conditional_rm, sed, sudo
from buedafab import environments, deploy, utils
def setup():
"""A shortcut to bootstrap or update a virtualenv with the dependencies for
this project. Installs the `common.txt` and `dev.txt` pip requirements and
initializes/updates any git submodules.
setup() also supports the concept of "private packages" - i.e. Python
packages that are not available on PyPi but require some local compilation
and thus don't work well as git submodules. It can either download a tar
file of the package from S3 or clone a git repository, build and install the
package.
Any arbitrary functions in env.extra_setup_tasks will also be run from
env.root_dir.
"""
environments.localhost()
with settings(virtualenv=None):
for package in deploy.packages._read_private_requirements():
deploy.packages._install_private_package(*package)
deploy.packages._install_manual_packages(env.root_dir)
deploy.packages._install_pip_requirements(env.root_dir)
with cd(env.root_dir):
local('git submodule update --init --recursive')
for task in env.extra_setup_tasks:
task()
def enable():
"""Toggles a value True. Used in 'toggle' commands such as
maintenancemode().
"""
env.toggle = True
def disable():
"""Toggles a value False. Used in 'toggle' commands such as
maintenancemode().
"""
env.toggle = False
def maintenancemode():
"""If using the maintenancemode app
(https://github.com/jezdez/django-maintenancemode), this command will toggle
it on and off. It finds the `MAINTENANCE_MODE` variable in your
`settings.py` on the remote server, toggles its value and restarts the web
server.
Requires the env keys:
toggle - set by enable() or disable(), indicates whether we should turn
maintenance mode on or off.
settings - relative path from the project root to the settings.py file
current_release_path - path to the current release on the remote server
"""
require('toggle', provided_by=[enable, disable])
require('settings')
require('current_release_path')
settings_file = os.path.join(utils.absolute_release_path(), env.settings)
if exists(settings_file):
sed(settings_file, '(MAINTENANCE_MODE = )(False|True)',
'\\1%(toggle)s' % env)
restart_webserver()
else:
warn('Settings file %s could not be found' % settings_file)
def rollback():
"""Swaps the deployed version of the app to the previous version.
Requires the env keys:
path - root deploy target for this app
releases_root - subdirectory that stores the releases
current_release_symlink - name of the symlink pointing to the currently
deployed version
Optional:
crontab - relative path from the project root to a crontab to install
deploy_user - user that should run the crontab
"""
require('path')
require('releases_root')
require('current_release_symlink')
require('crontab')
require('deploy_user')
with cd(os.path.join(env.path, env.releases_root)):
previous_link = deploy.release.alternative_release_path()
conditional_rm(env.current_release_symlink)
run('ln -fs %s %s' % (previous_link, env.current_release_symlink))
deploy.cron.conditional_install_crontab(utils.absolute_release_path(),
env.crontab, env.deploy_user)
restart_webserver()
def restart_webserver(hard_reset=False):
"""Restart the Gunicorn application webserver.
Requires the env keys:
unit - short name of the app, assuming /etc/init.d/%(unit)s is the
server process init.d script
"""
require('unit')
with settings(warn_only=True):
sudo('/etc/init.d/%(unit)s restart' % env)
def rechef():
"""Run the latest Chef cookbooks on all servers."""
sudo('chef-client')
|
Python
| 0
|
@@ -4170,12 +4170,480 @@
ef-client')%0A
+%0Adef install_jcc(**kwargs):%0A try:%0A import jcc%0A except ImportError:%0A run('git clone git://gist.github.com/729451.git build-jcc')%0A run('build-jcc/install_jcc.sh')%0A run('rm -rf build-jcc')%0A%0Adef install_pylucene(**kwargs):%0A try:%0A import lucene%0A except ImportError:%0A run('git clone git://gist.github.com/728598.git build-pylucene')%0A run('build-pylucene/install_pylucene.sh')%0A run('rm -rf build-pylucene')%0A
|
eca8accb984c252f36289cd7bbab8ab23c198317
|
Create problem3.py
|
W2/L4/problem3.py
|
W2/L4/problem3.py
|
Python
| 0.000022
|
@@ -0,0 +1,87 @@
+#L4 PROBLEM 3 %0A%0Adef square(x):%0A '''%0A x: int or float.%0A '''%0A return x ** 2%0A
|
|
3332370d70ad30856c9517e51eedc454500f8bf8
|
Add forwarding script for build-bisect.py.
|
build/build-bisect.py
|
build/build-bisect.py
|
Python
| 0.000002
|
@@ -0,0 +1,324 @@
+#!/usr/bin/python%0A# Copyright (c) 2010 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0Aimport sys%0A%0Aprint %22This script has been moved to tools/bisect-builds.py.%22%0Aprint %22Please update any docs you're working from!%22%0A%0Asys.exit(1)%0A
|
|
35e720cf7b9cbae4e077d0699dd321f741180787
|
Create cached_test.py
|
cached/cached_test.py
|
cached/cached_test.py
|
Python
| 0.000003
|
@@ -0,0 +1,167 @@
+from cached import cached%0A%0A%0Aclass Double:%0A def __init__(self, x):%0A self._x = x%0A %0A @cached(%22_double_x%22)%0A def value(self):%0A return self._x * 2%0A
|
|
ee7c257b62bff832b899f54fd7bf39ae47db05b7
|
Add tool to get new url
|
get_new_url.py
|
get_new_url.py
|
Python
| 0
|
@@ -0,0 +1,385 @@
+import sys%0A%0Aimport polycules%0A%0A%0Aif len(sys.argv) != 2:%0A print('Expected ID, got too little or too much')%0A%0Aold_id = sys.argv%5B1%5D%0A%0Adb = polycules.connect_db()%0Aresult = db.execute('select hash from polycules where id = ?', %5B%0A old_id,%0A%5D).fetchone()%0A%0Aif result is None:%0A print(%22Couldn't find the polycule with that ID%22)%0A%0Aprint('New url: https://polycul.es/%7B%7D'.format(result%5B0%5D%5B:7%5D))%0A
|
|
a994df7e8961e0d82a37ed268dba55c021c7ccd1
|
Move order - here till i get the order_desc stuff working.
|
objects/OrderExtra/Move.py
|
objects/OrderExtra/Move.py
|
Python
| 0
|
@@ -0,0 +1,507 @@
+%0Afrom xstruct import pack%0Afrom objects import Order%0A%0Aclass Move(Order):%0A%09%22%22%22%5C%0A%09Move to a place in space.%0A%09%22%22%22%0A%09subtype = 1%0A%09substruct = %22qqq%22%0A%0A%09def __init__(self, sequence, %5C%0A%09%09%09%09%09id,%09type, slot, turns, resources, %5C%0A%09%09%09%09%09x, y, z):%0A%09%09Order.__init__(self, sequence, %5C%0A%09%09%09%09%09id, type, slot, turns, resources,%0A%09%09%09%09%09x, y, z)%0A%0A%09%09self.length += 3*8%0A%09%09self.pos = (x, y, z)%0A%0A%09def __repr__(self):%0A%09%09output = Order.__repr__(self)%0A%09%09output += pack(self.substruct, self.pos%5B0%5D, self.pos%5B1%5D, self.pos%5B2%5D)%0A%0A%09%09return output%0A
|
|
fb2d69698326506214f4fbb2c158d1193a138f04
|
Update sqlserver.py to default to integrated security if no username/password
|
checks.d/sqlserver.py
|
checks.d/sqlserver.py
|
'''
Check the performance counters from SQL Server
'''
from checks import AgentCheck
ALL_INSTANCES = 'ALL'
VALID_METRIC_TYPES = ('gauge', 'rate', 'histogram')
class SQLServer(AgentCheck):
METRICS = [
('sqlserver.buffer.cache_hit_ratio', 'gauge', 'Buffer cache hit ratio'),
('sqlserver.buffer.page_life_expectancy', 'gauge', 'Page life expectancy'),
('sqlserver.stats.batch_requests', 'gauge', 'Batch Requests/sec'),
('sqlserver.stats.sql_compilations', 'gauge', 'SQL Compilations/sec'),
('sqlserver.stats.sql_recompilations', 'gauge', 'SQL Re-Compilations/sec'),
('sqlserver.stats.connections', 'gauge', 'User connections'),
('sqlserver.stats.lock_waits', 'gauge', 'Lock Waits/sec', '_Total'),
('sqlserver.access.page_splits', 'gauge', 'Page Splits/sec'),
('sqlserver.stats.procs_blocked', 'gauge', 'Processes Blocked'),
('sqlserver.buffer.checkpoint_pages', 'gauge', 'Checkpoint pages/sec')
]
def __init__(self, name, init_config, agentConfig):
AgentCheck.__init__(self, name, init_config, agentConfig)
# Load any custom metrics from conf.d/sqlserver.yaml
for row in init_config.get('custom_metrics', []):
if row['type'] not in VALID_METRIC_TYPES:
self.log.error('%s has an invalid metric type: %s' \
% (row['name'], row['type']))
self.METRICS.append( (row['name'], row['type'], row['counter_name'],
row.get('instance_name', ''), row.get('tag_by', None)) )
# Cache connections
self.connections = {}
def _conn_key(self, host, username, password, database):
''' Return a key to use for the connection cache
'''
return '%s:%s:%s:%s' % (host, username, password, database)
def _conn_string(self, host, username, password, database):
''' Return a connection string to use with adodbapi
'''
conn_str = 'Provider=SQLOLEDB;Data Source=%s;Initial Catalog=%s;' \
% (host, database)
if username:
conn_str += 'User ID=%s;' % (username)
if password:
conn_str += 'Password=%s;' % (password)
return conn_str
def check(self, instance):
try:
import adodbapi
except ImportError:
raise Exception("Unable to import adodbapi module.")
host = instance.get('host', '127.0.0.1;1433')
username = instance.get('username')
password = instance.get('password')
database = instance.get('database', 'master')
conn_key = self._conn_key(host, username, password, database)
if conn_key not in self.connections:
try:
conn_str = self._conn_string(host, username, password, database)
conn = adodbapi.connect(conn_str)
self.connections[conn_key] = conn
except Exception, e:
cx = "%s - %s" % (host, database)
raise Exception("Unable to connect to SQL Server for instance %s.\n %s" \
% (cx, traceback.format_exc()))
conn = self.connections[conn_key]
cursor = conn.cursor()
self._fetch_metrics(cursor)
def _fetch_metrics(self, cursor):
''' Fetch the metrics from the sys.dm_os_performance_counters table
'''
for metric in self.METRICS:
# Normalize all rows to the same size for easy of use
if len(metric) == 3:
metric = metric + ('', None)
elif len(metric) == 4:
metric = metric + (None,)
mname, mtype, counter, instance_n, tag_by = metric
# For "ALL" instances, we run a separate method because we have
# to loop over multiple results and tag the metrics
if instance_n == ALL_INSTANCES:
try:
self._fetch_all_instances(metric, cursor)
except Exception, e:
self.log.exception('Unable to fetch metric: %s' % mname)
self.warning('Unable to fetch metric: %s' % mname)
else:
try:
cursor.execute("""
select cntr_value
from sys.dm_os_performance_counters
where counter_name = ?
and instance_name = ?
""", (counter, instance_n))
(value,) = cursor.fetchone()
except Exception, e:
self.log.exception('Unable to fetch metric: %s' % mname)
self.warning('Unable to fetch metric: %s' % mname)
continue
# Save the metric
metric_func = getattr(self, mtype)
metric_func(mname, value)
def _fetch_all_instances(self, metric, cursor):
mname, mtype, counter, instance_n, tag_by = metric
cursor.execute("""
select instance_name, cntr_value
from sys.dm_os_performance_counters
where counter_name = ?
and instance_name != '_Total'
""", (counter,))
rows = cursor.fetchall()
for instance_name, cntr_value in rows:
value = cntr_value
tags = ['%s:%s' % (tag_by, instance_name.strip())]
metric_func = getattr(self, mtype)
metric_func(mname, value, tags=tags)
|
Python
| 0
|
@@ -2206,16 +2206,109 @@
ssword)%0A
+ if not username and not password:%0A conn_str += 'Integrated Security=SSPI;'
%0A
|
c310b7230d24a782c1f3773d2a3095de46530513
|
Update ssnc.py to support Tencent-Nintendo Switch (#718)
|
cogs/ssnc.py
|
cogs/ssnc.py
|
# ISC License
#
# Copyright (c) 2019, Valentijn "noirscape" V.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from discord.ext import commands
from discord.ext.commands import Cog
import re
def setup(bot):
bot.add_cog(SwitchSerialNumberCheck(bot))
class SwitchSerialNumberCheck(Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["ssnc"])
async def check_nx_serial(self, ctx, serial):
"""Check the given Switch serial to see if it is patched or not. For safety reasons, the invoking message is
removed."""
try:
await ctx.message.delete()
except:
pass
serial = serial.split()[0].upper()
mariko = False
if not re.match("XA[JKW][1479][0-9]{6}", serial):
# This should catch serials from the new "mariko" units
# XKW10000000000, XKJ10000000000 = HAC-001-01, the "New Switch"
# XJW01000000000, XWW01000000000 = HDH-001, the Switch Lite
# As not much about the assembly line is known yet every digit will count for the filter
if re.match("X[KJW][JW][0-9]{7}", serial):
mariko = True
else:
return await ctx.send("This is not a valid serial number!\n"
"If you believe this to be an error, contact staff.")
patched = False
maybe = False
region = serial[2]
assembly_line = int(serial[3])
checking_value = int(serial[3:10])
safe_serial = serial[:9] + 'XXXX'
if region == 'J':
if assembly_line == 1:
if checking_value < 1002000:
pass
elif 1002000 <= checking_value < 1003000:
maybe = True
elif checking_value >= 1003000:
patched = True
elif assembly_line == 4:
if checking_value < 4004600:
pass
elif 4004600 <= checking_value < 4006000:
maybe = True
elif checking_value >= 4006000:
patched = True
elif assembly_line == 7:
if checking_value < 7004000:
pass
elif 7004000 <= checking_value < 7005000:
maybe = True
elif checking_value >= 7005000:
patched = True
elif region == 'W':
if assembly_line == 1:
if checking_value < 1007400:
pass
elif 1007400 <= checking_value < 1012000: # GBATemp thread is oddly disjointed here, proper value could
# be 1007500, not sure.
maybe = True
elif checking_value >= 1012000:
patched = True
elif assembly_line == 4:
if checking_value < 4001100:
pass
elif 4001100 <= checking_value < 4001200:
maybe = True
elif checking_value >= 4001200:
patched = True
elif assembly_line == 7:
if checking_value < 7001780:
pass
elif 7001780 <= checking_value < 7003000:
maybe = True
elif checking_value >= 7003000:
maybe = True
elif assembly_line == 9:
maybe = True
elif region == 'K':
maybe = True
if mariko:
return await ctx.send("{}: Serial {} seems to be a \"mariko\" Switch or Switch Lite.\n"
"These are currently not hackable or vulnerable to any known exploits".format(ctx.author.mention, safe_serial))
elif maybe:
return await ctx.send("{}: Serial {} _might_ be patched. The only way you can know this for sure is by "
"pushing the payload manually. You can find instructions to do so here: "
"https://switchgui.de/switch-guide/user_guide/emummc/sending_payload/".format(ctx.author.mention,
safe_serial))
elif patched:
return await ctx.send("{}: Serial {} is patched.".format(ctx.author.mention, safe_serial))
else:
return await ctx.send("{}: Serial {} is not patched.".format(ctx.author.mention, safe_serial))
|
Python
| 0
|
@@ -1802,16 +1802,17 @@
%5BKJW%5D%5BJW
+C
%5D%5B0-9%5D%7B7
@@ -1816,32 +1816,93 @@
%5D%7B7%7D%22, serial):%0A
+ # Region %22C%22 is Tencent-Nintendo Switch. Mariko.%0A
|
b8764629331caeeb37a4845480ed884841719525
|
scale phage counts to percent so match bacteria counts
|
code/percent_phage_counts.py
|
code/percent_phage_counts.py
|
Python
| 0.000001
|
@@ -0,0 +1,1127 @@
+%22%22%22%0AThe phage counts per metagenome are normalized based on the number of %0Areads that hit. I want to scale that to a percent, so that it matches%0Athe bacterial data. If there was a single phage present it would get %0A100%25 of the reads%0A%22%22%22%0A%0A%0Aimport os%0Aimport sys%0A%0A%0Atry:%0A inf = sys.argv%5B1%5D%0A ouf = sys.argv%5B2%5D%0Aexcept:%0A sys.exit(sys.argv%5B0%5D + %22 %3Cphage abundance file%3E (probably normalized_phage_mg_counts.tsv) %3Coutput file%3E%22)%0A%0A%0Aheader = None%0Adata = %5B%5D%0Atotal = %5B%5D%0Awith open(inf, 'r') as fin:%0A header = fin.readline()%0A h = header.split(%22%5Ct%22)%0A for i in range(len(h)):%0A total.append(0)%0A for l in fin:%0A p = l.strip().split(%22%5Ct%22)%0A for i in range(1, len(p)-1):%0A total%5Bi%5D += float(p%5Bi%5D)%0A data.append(p)%0A%0Awith open(ouf, 'w') as out:%0A out.write(header)%0A for l in data:%0A out.write(l%5B0%5D)%0A for i in range(1, len(l)-1):%0A if total%5Bi%5D == 0:%0A # this metagenome has no phage hits!%0A out.write(%22%5Ct0%22)%0A else:%0A out.write(%22%5Ct%22 + str(1.0 * float(l%5Bi%5D)/total%5Bi%5D * 100))%0A out.write(%22%5Ct%22 + l%5B-1%5D + %22%5Cn%22)%0A%0A
|
|
d2a84fb3a8165c9526aa5c96f308dda3b92a2c2c
|
add new decision module
|
code/decision.py
|
code/decision.py
|
Python
| 0
|
@@ -0,0 +1,571 @@
+%22%22%22%0AModule for rover decision-handling.%0A%0AUsed to build a decision tree for determining throttle, brake and%0Asteer commands based on the output of the perception_step() function%0Ain the perception module.%0A%0A%22%22%22%0A%0A__author__ = 'Salman Hashmi'%0A__license__ = 'BSD License'%0A%0A%0Aimport time%0A%0Aimport numpy as np%0A%0Aimport states%0Aimport events%0A%0A%0Aclass DecisionHandler():%0A %22%22%22Handle events and switch between states.%22%22%22%0A%0A def __init__(self):%0A %22%22%22Initialize a DecisionHandler instance.%22%22%22%0A%0A def execute(self, Rover):%0A %22%22%22Select and execute the current state action.%22%22%22%0A%0A
|
|
3896ddcf660e168afaa80a0be9d7b40b6dd15967
|
Add script to clean source code of compiled files.
|
sansview/clean.py
|
sansview/clean.py
|
Python
| 0
|
@@ -0,0 +1,284 @@
+%22%22%22%0D%0A Remove all compiled code.%0D%0A%22%22%22%0D%0Aimport os%0D%0A%0D%0Afiledirs = %5B'.', 'perspectives', 'perspectives/fitting'%5D%0D%0A%0D%0Afor d in filedirs:%0D%0A files = os.listdir(d)%0D%0A for f in files:%0D%0A if f.find('.pyc')%3E0:%0D%0A print %22Removed%22, f%0D%0A os.remove(os.path.join(d,f))
|
|
ec736876e11a5aa4f52c63a91b05fc342e298051
|
Add config.sample.py.
|
config.sample.py
|
config.sample.py
|
Python
| 0
|
@@ -0,0 +1,507 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%22%22%22%0AFile: config.sample.py%0AAuthor: huxuan %3Ci@huxuan.org%3E%0ADescription: Configuration file for app.%0A%22%22%22%0A%0A# Debug or not%0ADEBUG = True%0A%0A# Make jsonfiy encode in utf-8.%0AJSON_AS_ASCII = False%0A%0A# Secret key.%0ASECRET_KEY = 'CAPUHOME_Secret_Key'%0A%0A# Database & sqlalchemy.%0ADB_USERNAME = 'username'%0ADB_PASSWORD = 'password'%0ADB_SERVER = 'localhost'%0ADB_NAME = 'dbname'%0ASQLALCHEMY_DATABASE_URI = 'mysql://%7B%7D:%7B%7D@%7B%7D/%7B%7D'.format(%0A DB_USERNAME, DB_PASSWORD, DB_SERVER, DB_NAME)%0A
|
|
ec9d97f7017939651fc78605fc81a2f030f88b5f
|
Add exceptions file
|
brew/exceptions.py
|
brew/exceptions.py
|
Python
| 0.000001
|
@@ -0,0 +1,486 @@
+# -*- coding: utf-8 -*-%0A%0A__all__ = %5B%0A u'BrewdayException',%0A u'DataLoaderException',%0A u'GrainException',%0A u'HopException',%0A u'StyleException',%0A u'YeastException',%0A%5D%0A%0A%0Aclass BrewdayException(Exception):%0A pass%0A%0A%0Aclass DataLoaderException(BrewdayException):%0A pass%0A%0A%0Aclass GrainException(BrewdayException):%0A pass%0A%0A%0Aclass HopException(BrewdayException):%0A pass%0A%0A%0Aclass StyleException(BrewdayException):%0A pass%0A%0A%0Aclass YeastException(BrewdayException):%0A pass%0A
|
|
c6eabd93a8ebfe82e2ee3a1118588158c530f8dd
|
Remove print() in executable
|
supernova/executable.py
|
supernova/executable.py
|
#!/usr/bin/env python
#
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains the functions needed for supernova and supernova-keyring commands
to run
"""
from __future__ import print_function
import sys
import click
import pkg_resources
from . import colors
from . import config
from . import credentials
from . import supernova
from . import utils
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
version = pkg_resources.require("supernova")[0].version
click.echo("supernova, version %s" % version)
ctx.exit()
def print_env_list(ctx, param, value):
if not value or ctx.resilient_parsing:
return
nova_creds = config.run_config()
for nova_env in nova_creds.keys():
envheader = '-- %s ' % colors.gwrap(nova_env)
print(envheader.ljust(86, '-'))
for param, value in sorted(nova_creds[nova_env].items()):
print(' %s: %s' % (param.upper().ljust(25), value))
ctx.exit()
@click.command()
@click.option('--executable', '-x', default='nova',
help='command to run', show_default=True)
@click.option('--debug', '-d', default='False', is_flag=True,
help="Enable debugging", show_default=True)
@click.argument('environment', nargs=1)
@click.argument('command')
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=False, default=False,
help="Print version number")
@click.option('--list', is_flag=True, callback=print_env_list,
expose_value=False, is_eager=False, default=False,
help="List all configured environments")
@click.pass_context
def run_supernova(ctx, executable, debug, environment, command):
"""
supernova [environment] [command]
Here are some example commands that may help you get started:
supernova prod list
supernova prod image-list
supernova prod keypair-list
"""
nova_creds = config.run_config()
utils.check_environment_presets()
# Is our environment argument a single environment or a supernova group?
if utils.is_valid_group(environment, nova_creds):
envs = utils.get_envs_in_group(environment, nova_creds)
else:
envs = [environment]
supernova_args = {
'debug': debug,
'executable': executable
}
if len(envs) == 1 and not utils.is_valid_environment(envs[0], nova_creds):
msg = ("\nCouldn't find an environment called '{0}' in your "
"configuration file.\nTry supernova --list to see all "
"configured environments.\n".format(envs[0]))
click.echo(msg)
ctx.exit(1)
for env in envs:
supernova_args['nova_env'] = env
returncode = supernova.run_command(nova_creds, command,
supernova_args)
# NOTE(major): The return code here is the one that comes back from the
# OS_EXECUTABLE that supernova runs (by default, 'nova'). When using
# supernova groups, the return code is the one returned by the executable
# for the last environment in the group.
#
# It's not ideal, but it's all I can think of for now. ;)
sys.exit(returncode)
@click.command()
@click.option('--get', '-g', 'action', flag_value='get_credential',
help='retrieve a credential from keyring storage')
@click.option('--set', '-s', 'action', flag_value='set_credential',
help='store a credential in keyring storage',)
@click.argument('environment', nargs=1)
@click.argument('parameter', nargs=1)
@click.pass_context
def run_supernova_keyring(ctx, action, environment, parameter):
"""
Sets or retrieves credentials stored in your system's keyring using the
python-keyring module.
Consider a supernova configuration file with these items:
\b
[prod]
OS_PASSWORD=USE_KEYRING['production_sso']
...
You could retrieve or set the credential using these commands:
\b
supernova -g prod production_sso <= get the credential
supernova -s prod production_sso <= set the credential
"""
if action == 'get_credential':
result = credentials.get_user_password(env=environment,
param=parameter)
if not result:
click.echo("\nUnable to find a credential matching the data "
"provided.")
ctx.exit(1)
else:
click.echo("\nFound credential for {0}: {1}".format(*result))
ctx.exit()
elif action == 'set_credential':
msg = """
Preparing to set a credential in the keyring for:
- Environment : {0}
- Parameter : {1}
If this is correct, enter the corresponding credential to store in your keyring
or press CTRL-C to abort""".format(environment, parameter)
credential = click.prompt(text=msg, hide_input=True)
result = credentials.set_user_password(environment=environment,
parameter=parameter,
password=credential)
if result:
click.echo("\nSuccessfully stored.")
ctx.exit()
else:
click.echo("\nUnable to store your credential.")
ctx.exit(1)
else:
click.secho("ERROR: must specify --get or --set", bold=True)
click.echo(ctx.get_help())
ctx.exit()
|
Python
| 0.001291
|
@@ -1103,22 +1103,28 @@
ion
-%25s%22 %25
+%7B0%7D%22.format(
version)
%0A
@@ -1119,16 +1119,17 @@
version)
+)
%0A ctx
@@ -1332,18 +1332,18 @@
ader = '
---
+__
%25s ' %25
@@ -1347,43 +1347,59 @@
%25 c
-olors.gwrap(nova_env)%0A print
+lick.style(nova_env, fg='green')%0A click.echo
(env
@@ -1416,17 +1416,17 @@
st(86, '
--
+_
'))%0A
@@ -1499,21 +1499,26 @@
-print
+click.echo
(' %25s:
|
2dd55385c3c8209217bde19c5a8d30ad929ce084
|
Create employee.py
|
scheduler/employee.py
|
scheduler/employee.py
|
Python
| 0.000053
|
@@ -0,0 +1,1045 @@
+# -*- coding: utf-8 -*-%0A%0A# employee.py%0A#%0A# Created by Thomas Nelson %3Ctn90ca@gmail.com%3E%0A#%0A# Created..........2015-03-12%0A# Modified.........2015-03-12%0A%0A%0Aclass Employee (object):%0A%09%22%22%22This class will represent an employee and there available time slots%0A%09for each work day that the provided store is open.%0A%0A%09%22%22%22%0A%0A%0A%09def __init__(self, store, name):%0A%09%09%22%22%22%0A%0A%09%09%22%22%22%0A%0A%09%09self.date = %5B%5D%0A%09%09self.name = name%0A%0A%09%09self.date.append(%5BTrue%5D * len(store.date%5B0%5D))%0A%09%09self.date.append(%5BTrue%5D * len(store.date%5B1%5D))%0A%09%09self.date.append(%5BTrue%5D * len(store.date%5B2%5D))%0A%09%09self.date.append(%5BTrue%5D * len(store.date%5B3%5D))%0A%09%09self.date.append(%5BTrue%5D * len(store.date%5B4%5D))%0A%09%09self.date.append(%5BTrue%5D * len(store.date%5B5%5D))%0A%09# end def __init__%0A%0A%0A%09def add_hours(self, date, t_start, t_end, store):%0A%09%09%22%22%22%0A%0A%09%09%22%22%22%0A%0A%09%09for t in xrange(len(store.date%5Bdate%5D)):%0A%09%09%09if store.date%5Bdate%5D%5Bt%5D == t_start:%0A%09%09%09%09start_time = t%0A%09%09%09if store.date%5Bdate%5D%5Bt%5D == t_end:%0A%09%09%09%09end_time = t%0A%0A%09%09for time in xrange(start_time, end_time+1):%0A%09%09%09self.date%5Bdate%5D%5Btime%5D = False%0A%09# end def add_hours%0A%0A%0A# end class Employee%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.