commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
cde401e95bef16b3bcc815251187af094240b598
|
Create check_linux.py
|
check_linux.py
|
check_linux.py
|
Python
| 0.000002
|
@@ -0,0 +1,1849 @@
+import cups%0Afrom twill.commands import *%0Aimport html2text%0Aimport subprocess%0Aimport time%0A%0A##Emotional words%0Aacceptance = %5B'congratulat', 'enjoy', 'party'%5D%0Arejection = %5B'sorry', 'unfortunately', 'disappoint'%5D%0A%0A##function to login and save the html%0Adef retrieve():%0A go('https://decisions.mit.edu/decision.php')%0A%0A fv(%221%22, %22username%22, %22username%22) #replace with the actual username%0A fv(%221%22, %22password%22, %22password%22) #replace with the actual password%0A formaction('f','https://decisions.mit.edu/decision.php')%0A submit()%0A %0A save_html('decision.html')%0A%0A##function to check if the applicant has been accepted%0Adef check():%0A global acceptance, rejection%0A%0A html = open(%22decision.html%22).read()%0A%0A with open(%22decision.txt%22, %22w%22) as text_file:%0A text_file.write(html2text.html2text(html))%0A%0A converted = html2text.html2text(html).lower()%0A%0A if any(x in converted for x in acceptance):%0A return %22Congratulations! You have been admitted to the class of 2020%22%0A #command to be spoken in case of acceptance%0A elif any(x in converted for x in rejection):%0A%09return %22I am extremely sorry. Unfortunately, you couldn't be admitted%22%0A #command to be spoken in case of rejection%0A else:%0A print %22Unable to identify%22%0A%09return -1%0A%0A##function to print the decision%0Adef printit():%0A conn = cups.Connection()%0A printers = conn.getPrinters()%0A printer_name = printers.keys()%5B1%5D%0A cups.setUser('username') #replace with the computer's account name%0A conn.printFile(printer_name, %22decision.txt%22, %22%22,%7B%7D)%0A%0Awhile True:%0A retrieve()%0A command=check()%0A if command!=-1:%0A printit()%0A subprocess.call(%5B'speech-dispatcher'%5D) #start speech dispatcher%0A subprocess.call(%5B'spd-say', command%5D) #say the command%0A break%0A time.sleep(15) #recheck the decision every 15 seconds%0A
|
|
fde083c87f0e2582fbf57415e957b93d116ad67a
|
Create RequestHandler related to GCI.
|
app/soc/modules/gci/views/base.py
|
app/soc/modules/gci/views/base.py
|
Python
| 0.000004
|
@@ -0,0 +1,2596 @@
+#!/usr/bin/env python2.5%0A#%0A# Copyright 2011 the Melange authors.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%22%22%22Module containing the boiler plate required to construct GCI views.%0A%22%22%22%0A%0A__authors__ = %5B%0A '%22Selwyn Jacob%22 %3Cselwynjacob90@gmail.com%3E',%0A %5D%0A%0Afrom soc.views.base import RequestHandler%0A%0Afrom soc.modules.gci.views import base_templates%0Afrom soc.modules.gci.views.helper import access_checker%0Afrom soc.modules.gci.views.helper.request_data import RequestData%0Afrom soc.modules.gci.views.helper.request_data import RedirectHelper%0A%0A%0Aclass RequestHandler(RequestHandler):%0A %22%22%22Customization required by GCI to handle HTTP requests.%0A %22%22%22%0A%0A def render(self, template_path, context):%0A %22%22%22Renders the page using the specified context.%0A%0A See soc.views.base.RequestHandler.%0A%0A The context object is extended with the following values:%0A header: a rendered header.Header template for the current self.data%0A mainmenu: a rendered site_menu.MainMenu template for the current self.data%0A footer: a rendered site_menu.Footer template for the current self.data%0A %22%22%22%0A context%5B'header'%5D = base_templates.Header(self.data)%0A context%5B'mainmenu'%5D = base_templates.MainMenu(self.data)%0A context%5B'footer'%5D = base_templates.Footer(self.data)%0A super(RequestHandler, self).render(template_path, context)%0A%0A def init(self, request, args, kwargs):%0A self.data = RequestData()%0A self.redirect = RedirectHelper(self.data, self.response)%0A self.data.populate(self.redirect, request, args, kwargs)%0A if self.data.is_developer:%0A self.mutator = access_checker.DeveloperMutator(self.data)%0A self.check = access_checker.DeveloperAccessChecker(self.data)%0A else:%0A self.mutator = access_checker.Mutator(self.data)%0A self.check = access_checker.AccessChecker(self.data)%0A%0A def error(self, status, message=None):%0A self.response.set_status(status)%0A%0A template_path = %22v2/modules/gci/error.html%22%0A context = %7B%0A 'page_name': self.response.content,%0A 'message': message,%0A %7D%0A%0A self.response.content = ''%0A self.render(template_path, context)%0A
|
|
e2669eddb9187db9a71095d8ed860f8b25369e78
|
add new package (#20106)
|
var/spack/repos/builtin/packages/py-catkin-pkg/package.py
|
var/spack/repos/builtin/packages/py-catkin-pkg/package.py
|
Python
| 0
|
@@ -0,0 +1,833 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0A%0Aclass PyCatkinPkg(PythonPackage):%0A %22%22%22Library for retrieving information about catkin packages.%22%22%22%0A%0A homepage = %22https://wiki.ros.org/catkin_pkg%22%0A url = %22https://pypi.io/packages/source/c/catkin-pkg/catkin_pkg-0.4.23.tar.gz%22%0A%0A version('0.4.23', sha256='28ee181cca827c0aabf9397351f58a97e1475ca5ac7c106a5916e3ee191cd3d0')%0A%0A depends_on('py-setuptools', type=('build', 'run'))%0A depends_on('py-docutils', type=('build', 'run'))%0A depends_on('py-python-dateutil', type=('build', 'run'))%0A depends_on('py-pyparsing', type=('build', 'run'))%0A depends_on('py-argparse', when='%5Epython@:2.6', type=('build', 'run'))%0A
|
|
0106355df43bc35a75aafc6b9070f78131e89bef
|
Test for switching to postgres search backend
|
tests/search_backend_postgres.py
|
tests/search_backend_postgres.py
|
Python
| 0
|
@@ -0,0 +1,546 @@
+from wolis.test_case import WolisTestCase%0A%0Aclass SearchBackendPostgresTest(WolisTestCase):%0A def test_set_search_backend(self):%0A self.login('morpheus', 'morpheus')%0A self.acp_login('morpheus', 'morpheus')%0A %0A self.change_acp_knob(%0A link_text='Search settings',%0A check_page_text='Here you can define what search backend will be used',%0A name='search_type',%0A value='phpbb_search_fulltext_postgres',%0A )%0A%0Aif __name__ == '__main__':%0A import unittest%0A unittest.main()%0A
|
|
1f12da3d049527f838ab21c042b8f18e1977af49
|
Migrate existing platform admin services to not be counted
|
migrations/versions/0283_platform_admin_not_live.py
|
migrations/versions/0283_platform_admin_not_live.py
|
Python
| 0
|
@@ -0,0 +1,676 @@
+%22%22%22empty message%0A%0ARevision ID: 0283_platform_admin_not_live%0ARevises: 0282_add_count_as_live%0ACreate Date: 2016-10-25 17:37:27.660723%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '0283_platform_admin_not_live'%0Adown_revision = '0282_add_count_as_live'%0A%0Afrom alembic import op%0Aimport sqlalchemy as sa%0A%0A%0ASTATEMENT = %22%22%22%0A UPDATE%0A services%0A SET%0A count_as_live = %7Bcount_as_live%7D%0A FROM%0A users%0A WHERE%0A services.created_by_id = users.id and%0A users.platform_admin is true%0A ;%0A%22%22%22%0A%0A%0Adef upgrade():%0A op.execute(STATEMENT.format(count_as_live='false'))%0A%0Adef downgrade():%0A op.execute(STATEMENT.format(count_as_live='true'))%0A
|
|
8e0e28c45616479c3d1fea9be78553185126743b
|
change case_type to location_type to be more clear about what's expected
|
corehq/apps/consumption/models.py
|
corehq/apps/consumption/models.py
|
from decimal import Decimal
from couchdbkit.ext.django.schema import Document, StringProperty, DecimalProperty
TYPE_DOMAIN = 'domain'
TYPE_PRODUCT = 'product'
TYPE_SUPPLY_POINT_TYPE = 'supply-point-type'
TYPE_SUPPLY_POINT = 'supply-point'
class DefaultConsumption(Document):
"""
Model for setting the default consumption value of an entity
"""
type = StringProperty() # 'domain', 'product', 'supply-point-type', 'supply-point'
domain = StringProperty()
product_id = StringProperty()
supply_point_type = StringProperty()
supply_point_id = StringProperty()
default_consumption = DecimalProperty()
def get_default_consumption(domain, product_id, case_type, case_id):
keys = [
[domain, product_id, {}, case_id],
[domain, product_id, case_type, None],
[domain, product_id, None, None],
[domain, None, None, None],
]
results = DefaultConsumption.get_db().view(
'consumption/consumption_index',
keys=keys, reduce=False, limit=1, descending=True,
)
results = results.one()
return Decimal(results['value']) if results else None
|
Python
| 0.000002
|
@@ -674,28 +674,32 @@
product_id,
-case
+location
_type, case_
@@ -788,20 +788,24 @@
uct_id,
-case
+location
_type, N
|
d43e9867c7a603a0767941f4ed963d9d857bca03
|
Fix test after API change
|
utest/serializing/test_reporting.py
|
utest/serializing/test_reporting.py
|
import unittest
from robot.output.logger import Logger
from robot.output.readers import ExecutionErrors
import resources
from robot.common.model import BaseTestSuite
import robot.output
from robot.serializing.testoutput import Reporter
import robot.serializing.testoutput
def set_serialize_log_mock():
results = {'log_path':None}
def serialize_log(test_output_datamodel, log_path, title=None):
results['log_path'] = log_path
results['title'] = title
robot.serializing.testoutput.serialize_log = serialize_log
return results
def set_serialize_report_mock():
results = {'report_path':None}
def serialize_report(test_output_datamodel, report_path, title=None, background=None, logpath=None):
results['report_path'] = report_path
results['title'] = title
results['background'] = background
results['logpath'] = logpath
robot.serializing.testoutput.serialize_report = serialize_report
return results
def set_process_outputs_mock():
results = {'paths':None}
def process_outputs(paths, settings):
results['paths'] = paths
results['settings'] = settings
suite = BaseTestSuite('Suite')
suite.starttime = 7
suite.endtime = 42
return suite, ExecutionErrors(None)
robot.serializing.testoutput.process_outputs = process_outputs
return results
class TestReporting(unittest.TestCase):
def setUp(self):
self._reporter = Reporter()
self._settings = {
'Report': 'NONE',
'Log': 'NONE',
'XUnitFile': 'NONE',
'Output': 'NONE',
'LogTitle': None,
'ReportTitle': None,
'ReportBackground': None,
'SuiteStatLevel': None,
'TagStatInclude': None,
'TagStatExclude': None,
'TagStatCombine': None,
'TagDoc': None,
'TagStatLink': None,
'SetTag': None,
'SuiteNames': None,
'TestNames': None,
'Include': None,
'Exclude': None,
'StartTime': 0,
'Name': None,
'Doc': None,
'Metadata': {},
'Critical': None,
'NonCritical': None,
'NoStatusRC': None,
'RunEmptySuite': False,
'EndTime': 0,
'LogLevel': 'INFO'
}
self._original_logger = robot.serializing.testoutput.LOGGER
robot.serializing.testoutput.LOGGER = Logger()
robot.serializing.testoutput.LOGGER.disable_automatic_console_logger()
self._log_results = set_serialize_log_mock()
self._report_results = set_serialize_report_mock()
#self._process_outputs_results = set_process_outputs_mock()
def tearDown(self):
robot.serializing.testoutput.LOGGER = self._original_logger
def test_generate_report_and_log(self):
self._settings['Log'] = 'log.html'
self._settings['Report'] = 'report.html'
self._reporter.execute(self._settings, resources.GOLDEN_OUTPUT)
self._assert_expected_log('log.html')
self._assert_expected_report('report.html')
self._assert_log_link_in_report('log.html')
def test_no_generation(self):
self._reporter.execute(self._settings, resources.GOLDEN_OUTPUT)
self._assert_no_log()
self._assert_no_report()
def test_only_log(self):
self._settings['Log'] = 'only-log.html'
self._reporter.execute(self._settings, resources.GOLDEN_OUTPUT)
self._assert_expected_log('only-log.html')
self._assert_no_report()
def test_only_report(self):
self._settings['Report'] = 'reports-only.html'
self._reporter.execute(self._settings, resources.GOLDEN_OUTPUT)
self._assert_no_log()
self._assert_expected_report('reports-only.html')
self._assert_no_log_links_in_report()
def test_multiple_outputs(self):
self._settings['Log'] = 'log.html'
self._settings['Report'] = 'report.html'
self._reporter.execute_rebot(self._settings, *[resources.GOLDEN_OUTPUT, resources.GOLDEN_OUTPUT2])
self._assert_expected_log('log.html')
self._assert_expected_report('report.html')
def _assert_expected_log(self, expected_file_name):
self.assertEquals(self._log_results['log_path'], expected_file_name)
def _assert_expected_report(self, expected_file_name):
self.assertEquals(self._report_results['report_path'], expected_file_name)
def _assert_log_link_in_report(self, expected_log_link):
self.assertEquals(self._report_results['logpath'], expected_log_link)
def _assert_no_log_links_in_report(self):
self._assert_log_link_in_report(None)
def _assert_no_log(self):
self._assert_expected_log(None)
def _assert_no_report(self):
self._assert_expected_report(None)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -695,25 +695,8 @@
one,
- background=None,
log
@@ -789,51 +789,8 @@
tle%0A
- results%5B'background'%5D = background%0A
|
6857624e9d6633038f0565a520de856ee40def09
|
Test with many envs and large groups
|
test/many_envs_test.py
|
test/many_envs_test.py
|
Python
| 0
|
@@ -0,0 +1,1358 @@
+# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT%0A# All rights reserved. This work is under a BSD license, see LICENSE.TXT.%0A%0Afrom .. import ConfigRoot%0Afrom ..envs import EnvFactory%0A%0Aef = EnvFactory()%0A%0Aenvs = %5B%5D%0Agroups = %5B%5D%0Afor ii in range(0, 16):%0A local_envs = %5B%5D%0A for jj in range(0, 128):%0A local_envs.append(ef.Env('e' + str(ii) + '_' + str(jj)))%0A groups.append(ef.EnvGroup('g' + str(ii), *local_envs))%0A envs.extend(local_envs)%0A%0A%0Adef test_many_envs():%0A with ConfigRoot(envs%5B0%5D, envs) as conf:%0A conf.setattr('a', default=None, e0_0=0)%0A conf.setattr('b', default=None, e1_7=1)%0A conf.setattr('c', default=None, e2_15=2)%0A conf.setattr('d', default=None, e3_23=3)%0A conf.setattr('e', default=None, e4_31=4)%0A conf.setattr('f', default=None, e5_39=5)%0A conf.setattr('g', default=None, e6_47=6)%0A conf.setattr('h', default=None, e7_55=7)%0A conf.setattr('i', default=None, e0_0=10, e15_127=8)%0A%0A assert conf.a == 0%0A assert conf.b == None%0A assert conf.i == 10%0A%0A%0Adef test_many_groups():%0A # This is slow!%0A with ConfigRoot(envs%5B0%5D, envs) as conf:%0A conf.setattr('a', default=None, g0=0)%0A conf.setattr('b', default=None, g1=1)%0A conf.setattr('i', default=None, e0_0=10, g15=8)%0A%0A assert conf.a == 0%0A assert conf.b == None%0A assert conf.i == 10%0A
|
|
122fa6367dd7162503157b5f6e2739d28d5b2a4d
|
Fix stopping at breakpoints after stepping
|
python/helpers/pydev/_pydevd_frame_eval/pydevd_frame_tracing.py
|
python/helpers/pydev/_pydevd_frame_eval/pydevd_frame_tracing.py
|
import sys
import traceback
from _pydev_bundle import pydev_log
from _pydev_imps._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_comm import get_global_debugger, CMD_SET_BREAK
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
def update_globals_dict(globals_dict):
new_globals = {'_pydev_stop_at_break': _pydev_stop_at_break}
globals_dict.update(new_globals)
def handle_breakpoint(frame, info, global_debugger, breakpoint):
# ok, hit breakpoint, now, we have to discover if it is a conditional breakpoint
new_frame = frame
condition = breakpoint.condition
if condition is not None:
try:
val = eval(condition, new_frame.f_globals, new_frame.f_locals)
if not val:
return False
except:
if type(condition) != type(''):
if hasattr(condition, 'encode'):
condition = condition.encode('utf-8')
msg = 'Error while evaluating expression: %s\n' % (condition,)
sys.stderr.write(msg)
traceback.print_exc()
if not global_debugger.suspend_on_breakpoint_exception:
return False
else:
try:
# add exception_type and stacktrace into thread additional info
etype, value, tb = sys.exc_info()
try:
error = ''.join(traceback.format_exception_only(etype, value))
stack = traceback.extract_stack(f=tb.tb_frame.f_back)
# On self.set_suspend(thread, CMD_SET_BREAK) this info will be
# sent to the client.
info.conditional_breakpoint_exception = \
('Condition:\n' + condition + '\n\nError:\n' + error, stack)
finally:
etype, value, tb = None, None, None
except:
traceback.print_exc()
if breakpoint.expression is not None:
try:
try:
val = eval(breakpoint.expression, new_frame.f_globals, new_frame.f_locals)
except:
val = sys.exc_info()[1]
finally:
if val is not None:
info.pydev_message = str(val)
return True
def _pydev_stop_at_break():
frame = sys._getframe(1)
t = threading.currentThread()
if t.additional_info.is_tracing:
return
if t.additional_info.pydev_step_cmd == -1:
# do not handle breakpoints while stepping, because they're handled by old tracing function
t.additional_info.is_tracing = True
debugger = get_global_debugger()
pydev_log.debug("Suspending at breakpoint in file: {} on line {}".format(frame.f_code.co_filename, frame.f_lineno))
try:
abs_path_real_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
filename = abs_path_real_path_and_base[1]
breakpoints_for_file = debugger.breakpoints.get(filename)
line = frame.f_lineno
breakpoint = breakpoints_for_file[line]
if breakpoint and handle_breakpoint(frame, t.additional_info, debugger, breakpoint):
debugger.set_suspend(t, CMD_SET_BREAK)
debugger.do_wait_suspend(t, frame, 'line', None)
t.additional_info.is_tracing = False
def pydev_trace_code_wrapper():
# import this module again, because it's inserted inside user's code
global _pydev_stop_at_break
_pydev_stop_at_break()
|
Python
| 0.000013
|
@@ -2430,17 +2430,146 @@
rame(1)%0A
+ # it's absolutely necessary to reset tracing function for frame in order to get the real line number%0A frame.f_trace = None
%0A
-
t =
@@ -2882,132 +2882,8 @@
er()
-%0A pydev_log.debug(%22Suspending at breakpoint in file: %7B%7D on line %7B%7D%22.format(frame.f_code.co_filename, frame.f_lineno))
%0A%0A
@@ -3246,16 +3246,33 @@
_lineno%0A
+ try:%0A
@@ -3323,92 +3323,395 @@
-if breakpoint and handle_breakpoint(frame, t.additional_info, debugger, breakpoint):
+except KeyError:%0A pydev_log.debug(%22Couldn't find breakpoint in the file %7B%7D on line %7B%7D%22.format(frame.f_code.co_filename, frame.f_lineno))%0A return%0A if breakpoint and handle_breakpoint(frame, t.additional_info, debugger, breakpoint):%0A pydev_log.debug(%22Suspending at breakpoint in file: %7B%7D on line %7B%7D%22.format(frame.f_code.co_filename, frame.f_lineno))
%0A
|
a4b242ebd107f9321cc5b87aee2cf608940007f4
|
Make permission name more consistent.
|
product/migrations/0005_auto_20161015_1536.py
|
product/migrations/0005_auto_20161015_1536.py
|
Python
| 0.000001
|
@@ -0,0 +1,474 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.1 on 2016-10-15 15:36%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('product', '0004_auto_20161015_1534'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='productcategory',%0A options=%7B'permissions': (('manage_productcategories', 'Manage product categories'),)%7D,%0A ),%0A %5D%0A
|
|
b0a4f510ed343825a8073a68c4dc0e3066b560ec
|
add example canICA
|
nilearn/example_canICA.py
|
nilearn/example_canICA.py
|
Python
| 0.000004
|
@@ -0,0 +1,1274 @@
+# -*- coding: utf-8 -*-%0Afrom nilearn import datasets%0A%0Adataset = datasets.fetch_adhd()%0Afunc_files = dataset.func # The list of 4D nifti files for each subject%0A%0A### Apply CanICA ##############################################################%0Afrom nilearn.decomposition.canica import CanICA%0A%0An_components = 20%0Acanica = CanICA(n_components=n_components, smoothing_fwhm=6.,%0A memory=%22nilearn_cache%22, memory_level=5,%0A threshold=3., verbose=10, random_state=0)%0Acanica.fit(func_files)%0A%0A# Retrieve the independent components in brain space%0Acomponents_img = canica.masker_.inverse_transform(canica.components_)%0A# components_img is a Nifti Image object, and can be saved to a file with%0A# the following line:%0Acomponents_img.to_filename('canica_resting_state.nii.gz')%0A%0A### Visualize the results #####################################################%0A# Show some interesting components%0Aimport nibabel%0Aimport matplotlib.pyplot as plt%0Afrom nilearn.plotting import plot_stat_map%0A%0Afor i in range(n_components):%0A plot_stat_map(nibabel.Nifti1Image(components_img.get_data()%5B..., i%5D,%0A components_img.get_affine()),%0A display_mode=%22z%22, title=%22IC %25d%22%25i, cut_coords=1,%0A colorbar=False)%0A%0Aplt.show()%0A
|
|
25c2502fce4556b5b72e96116745c83d1689677f
|
Add tests for artist serializers
|
artists/tests/test_serializers.py
|
artists/tests/test_serializers.py
|
Python
| 0
|
@@ -0,0 +1,1021 @@
+from unittest import TestCase%0A%0Afrom ..models import Artist, Hyperlink%0Afrom ..serializers import ArtistSerializer, HyperlinkSerializer%0A%0A%0Aclass HyperlinkSerializerTest(TestCase):%0A%0A %22%22%22Tests for Hyperlink serializer.%22%22%22%0A%0A def test_valid_fields(self):%0A id_ = 4%0A name = 'jamendo'%0A display_name = %22Jamendo%22%0A url = %22http://www.jamendo.com/artist/1333%22%0A link = Hyperlink(id=id_, name=name, url=url)%0A serializer = HyperlinkSerializer(link)%0A self.assertEqual(serializer.data, %7B%0A 'id': id_,%0A 'name': name,%0A 'display_name': display_name,%0A 'url': url,%0A %7D)%0A%0A%0Aclass ArtistSerializerTest(TestCase):%0A%0A %22%22%22Tests for Artist serializer.%22%22%22%0A%0A def test_no_links(self):%0A id_ = 2%0A name = %22Brad Sucks%22%0A artist = Artist(id=id_, name=name)%0A serializer = ArtistSerializer(artist)%0A self.assertEqual(serializer.data, %7B%0A 'id': id_,%0A 'name': name,%0A 'links': %5B%5D,%0A %7D)%0A
|
|
7e463d26f8937d7671a15e370c6e91926597053b
|
Fix bug with {{task}} in jinja templates.
|
flexget/utils/template.py
|
flexget/utils/template.py
|
from __future__ import unicode_literals, division, absolute_import
import logging
import os
import re
import sys
from copy import copy
from datetime import datetime, date, time
import locale
from email.utils import parsedate
from time import mktime
from jinja2 import (Environment, StrictUndefined, ChoiceLoader,
FileSystemLoader, PackageLoader, TemplateNotFound,
TemplateSyntaxError, Undefined)
from flexget.event import event
from flexget.plugin import PluginError
from flexget.utils.pathscrub import pathscrub
log = logging.getLogger('utils.template')
# The environment will be created after the manager has started
environment = None
class RenderError(Exception):
"""Error raised when there is a problem with jinja rendering."""
pass
def filter_pathbase(val):
"""Base name of a path."""
return os.path.basename(val or '')
def filter_pathname(val):
"""Base name of a path, without its extension."""
return os.path.splitext(os.path.basename(val or ''))[0]
def filter_pathext(val):
"""Extension of a path (including the '.')."""
return os.path.splitext(val or '')[1]
def filter_pathdir(val):
"""Directory containing the given path."""
return os.path.dirname(val or '')
def filter_pathscrub(val, os_mode=None):
"""Replace problematic characters in a path."""
return pathscrub(val, os_mode)
def filter_re_replace(val, pattern, repl):
"""Perform a regexp replacement on the given string."""
return re.sub(pattern, repl, unicode(val))
def filter_re_search(val, pattern):
"""Perform a search for given regexp pattern, return the matching portion of the text."""
if not isinstance(val, basestring):
return val
result = re.search(pattern, val)
if result:
return result.group(0)
return ''
def filter_formatdate(val, format):
"""Returns a string representation of a datetime object according to format string."""
encoding = locale.getpreferredencoding()
if not isinstance(val, (datetime, date, time)):
return val
return val.strftime(format.encode(encoding)).decode(encoding)
def filter_parsedate(val):
"""Attempts to parse a date according to the rules in RFC 2822"""
return datetime.fromtimestamp(mktime(parsedate(val)))
def filter_format_number(val, places=None, grouping=True):
"""Formats a number according to the user's locale."""
if not isinstance(val, (int, float, long)):
return val
if places is not None:
format = '%.' + str(places) + 'f'
elif isinstance(val, (int, long)):
format = '%d'
else:
format = '%.02f'
locale.setlocale(locale.LC_ALL, '')
return locale.format(format, val, grouping)
def filter_pad(val, width, fillchar='0'):
"""Pads a number or string with fillchar to the specified width."""
return unicode(val).rjust(width, fillchar)
# Override the built-in Jinja default filter due to Jinja bug
# https://github.com/mitsuhiko/jinja2/pull/138
def filter_default(value, default_value=u'', boolean=False):
if isinstance(value, Undefined) or (boolean and not value):
return default_value
return value
filter_d = filter_default
@event('manager.startup')
def make_environment(manager):
"""Create our environment and add our custom filters"""
global environment
environment = Environment(undefined=StrictUndefined,
loader=ChoiceLoader([PackageLoader('flexget'),
FileSystemLoader(os.path.join(manager.config_base, 'templates'))]),
extensions=['jinja2.ext.loopcontrols'])
for name, filt in globals().items():
if name.startswith('filter_'):
environment.filters[name.split('_', 1)[1]] = filt
# TODO: list_templates function
def get_template(templatename, pluginname=None):
"""Loads a template from disk. Looks in both included plugins and users custom plugin dir."""
if not templatename.endswith('.template'):
templatename += '.template'
locations = []
if pluginname:
locations.append(pluginname + '/' + templatename)
locations.append(templatename)
for location in locations:
try:
return environment.get_template(location)
except TemplateNotFound:
pass
else:
raise PluginError('Template not found: %s (%s)' % (templatename, pluginname))
def render_from_entry(template_string, entry):
"""Renders a Template or template string with an Entry as its context."""
# If a plain string was passed, turn it into a Template
if isinstance(template_string, basestring):
try:
template = environment.from_string(template_string)
except TemplateSyntaxError as e:
raise PluginError('Error in template syntax: ' + e.message)
else:
# We can also support an actual Template being passed in
template = template_string
# Make a copy of the Entry so we can add some more fields
variables = copy(entry)
variables['now'] = datetime.now()
variables['task'] = entry.task
# We use the lower level render function, so that our Entry is not cast into a dict (and lazy loading lost)
try:
result = u''.join(template.root_render_func(template.new_context(variables, shared=True)))
except:
exc_info = sys.exc_info()
try:
return environment.handle_exception(exc_info, True)
except Exception as e:
error = RenderError('(%s) %s' % (type(e).__name__, e))
log.debug('Error during rendering: %s' % error)
raise error
# Only try string replacement if jinja didn't do anything
if result == template_string:
try:
result = template_string % entry
except KeyError as e:
raise RenderError('Does not contain the field `%s` for string replacement.' % e)
except ValueError as e:
raise PluginError('Invalid string replacement template: %s (%s)' % (template_string, e))
except TypeError as e:
raise RenderError('Error during string replacement: %s' % e.message)
return result
def render_from_task(template, task):
"""
Renders a Template with a task as its context.
:param template: Template or template string to render.
:param task: Task to render the template from.
:return: The rendered template text.
"""
if isinstance(template, basestring):
template = environment.from_string(template)
try:
result = template.render({'task': task})
except Exception as e:
raise RenderError('(%s) %s' % (type(e).__name__, e))
return result
|
Python
| 0.000001
|
@@ -5056,16 +5056,79 @@
e.now()%0A
+ if 'task' not in variables and hasattr(entry, 'task'):%0A
vari
@@ -5153,16 +5153,21 @@
try.task
+.name
%0A # W
|
1f49c220a9da5b848f68566a7bdb4be7acae8d23
|
Fix an issue when using lazy fields in a jinja statement where it would return 'None' instead of None, causing the default filter to not work. Add our own version of Jinja default filter, to fix a bug currently in the Jinja one.
|
flexget/utils/template.py
|
flexget/utils/template.py
|
import logging
import os
import re
import sys
from copy import copy
from datetime import datetime, date, time
import locale
from email.utils import parsedate
from time import mktime
from jinja2 import (Environment, StrictUndefined, ChoiceLoader, FileSystemLoader, PackageLoader, TemplateNotFound,
TemplateSyntaxError)
from flexget.event import event
from flexget.plugin import PluginError
from flexget.utils.pathscrub import pathscrub
log = logging.getLogger('utils.template')
# The environment will be created after the manager has started
environment = None
class RenderError(Exception):
"""Error raised when there is a problem with jinja rendering."""
pass
def filter_pathbase(val):
"""Base name of a path."""
return os.path.basename(val or '')
def filter_pathname(val):
"""Base name of a path, without its extension."""
return os.path.splitext(os.path.basename(val or ''))[0]
def filter_pathext(val):
"""Extension of a path (including the '.')."""
return os.path.splitext(val or '')[1]
def filter_pathdir(val):
"""Directory containing the given path."""
return os.path.dirname(val or '')
def filter_pathscrub(val, os_mode=None):
"""Replace problematic characters in a path."""
return pathscrub(val, os_mode=os_mode)
def filter_re_replace(val, pattern, repl):
"""Perform a regexp replacement on the given string."""
return re.sub(pattern, repl, unicode(val))
def filter_re_search(val, pattern):
"""Perform a search for given regexp pattern, return the matching portion of the text."""
if not isinstance(val, basestring):
return val
result = re.search(pattern, val)
if result:
return result.group(0)
return ''
def filter_formatdate(val, format):
"""Returns a string representation of a datetime object according to format string."""
encoding = locale.getpreferredencoding()
if not isinstance(val, (datetime, date, time)):
return val
return val.strftime(format.encode(encoding)).decode(encoding)
def filter_parsedate(val):
"""Attempts to parse a date according to the rules in RFC 2822"""
return datetime.fromtimestamp(mktime(parsedate(val)))
def filter_format_number(val, places=None, grouping=True):
"""Formats a number according to the user's locale."""
if not isinstance(val, (int, float, long)):
return val
if places is not None:
format = '%.' + str(places) + 'f'
elif isinstance(val, (int, long)):
format = '%d'
else:
format = '%.02f'
locale.setlocale(locale.LC_ALL, '')
return locale.format(format, val, grouping)
def filter_pad(val, width, fillchar='0'):
"""Pads a number or string with fillchar to the specified width."""
return str(val).rjust(width, fillchar)
@event('manager.startup')
def make_environment(manager):
"""Create our environment and add our custom filters"""
global environment
environment = Environment(undefined=StrictUndefined,
loader=ChoiceLoader([PackageLoader('flexget'),
FileSystemLoader(os.path.join(manager.config_base, 'templates'))]),
extensions=['jinja2.ext.loopcontrols'])
for name, filt in globals().items():
if name.startswith('filter_'):
environment.filters[name.split('_', 1)[1]] = filt
# TODO: list_templates function
def get_template(templatename, pluginname=None):
"""Loads a template from disk. Looks in both included plugins and users custom plugin dir."""
if not templatename.endswith('.template'):
templatename += '.template'
locations = []
if pluginname:
locations.append(pluginname + '/' + templatename)
locations.append(templatename)
for location in locations:
try:
return environment.get_template(location)
except TemplateNotFound:
pass
else:
raise PluginError('Template not found: %s (%s)' % (templatename, pluginname))
def render_from_entry(template_string, entry):
"""Renders a Template or template string with an Entry as its context."""
# If a plain string was passed, turn it into a Template
if isinstance(template_string, basestring):
try:
template = environment.from_string(template_string)
except TemplateSyntaxError, e:
raise PluginError('Error in template syntax: ' + e.message)
else:
# We can also support an actual Template being passed in
template = template_string
# Make a copy of the Entry so we can add some more fields
variables = copy(entry)
variables['now'] = datetime.now()
# We use the lower level render function, so that our Entry is not cast into a dict (and lazy loading lost)
try:
result = u''.join(template.root_render_func(template.new_context(variables)))
except:
exc_info = sys.exc_info()
try:
return environment.handle_exception(exc_info, True)
except Exception, e:
error = RenderError('(%s) %s' % (type(e).__name__, e))
log.debug('Error during rendering: %s' % error)
raise error
# Only try string replacement if jinja didn't do anything
if result == template_string:
try:
result = template_string % entry
except KeyError, e:
raise RenderError('Does not contain the field `%s` for string replacement.' % e)
except ValueError, e:
raise PluginError('Invalid string replacement template: %s (%s)' % (template_string, e))
except TypeError, e:
raise RenderError('Error during string replacement: %s' % e.message)
return result
def render_from_feed(template, feed):
"""
Renders a Template with a feed as its context.
:param template: Template or template string to render.
:param feed: Feed to render the template from.
:return: The rendered template text.
"""
if isinstance(template, basestring):
template = environment.from_string(template)
try:
result = template.render({'feed': feed})
except Exception, e:
raise RenderError('(%s) %s' % (type(e).__name__, e))
return result
|
Python
| 0.000016
|
@@ -329,16 +329,27 @@
taxError
+, Undefined
)%0Afrom f
@@ -2816,16 +2816,326 @@
char)%0A%0A%0A
+# Override the built-in Jinja default filter due to Jinja bug%0A# https://github.com/mitsuhiko/jinja2/pull/138%0Adef filter_default(value, default_value=u'', boolean=False):%0A if isinstance(value, Undefined) or (boolean and not value):%0A return default_value%0A return value%0A%0A%0Afilter_d = filter_default%0A%0A%0A
@event('
@@ -5172,16 +5172,29 @@
ariables
+, shared=True
)))%0A
|
79563ccb72b50ad9b0a7cf037ad46efc98a1f79b
|
Create call.py
|
common/call.py
|
common/call.py
|
Python
| 0.000001
|
@@ -0,0 +1,2301 @@
+def call(mod,cmd,*args,**kargs):%0A %22%22%22Calls arbitrary python code%0A%0A Arguments:%0A mod - The module from which you are calling%0A cmd - The command in said module%0A *args - Any arguments you need to give to it%0A index=0 - A specific index at which to return%0A end=0 - An end range from which to return%0A Use case:%0A if you don't know what command you need to run at compile time%0A %22%22%22%0A if mod == %22__builtins__%22:%0A m = __builtins__%0A else:%0A m = __import__(mod)%0A func = getattr(m,cmd)%0A if args:%0A r = func(*args)%0A elif type(func) != type(open) and type(func) != type(call):%0A r = func%0A else:%0A r = func()%0A index = kargs.get('index')%0A end = kargs.get('end')%0A if end is not None and index is not None:%0A return r%5Bindex:end%5D%0A elif index is not None:%0A return r%5Bindex%5D%0A else:%0A return r%0A%0Adef process(tup):%0A %22%22%22Convert tuples into a format readable by call.call%22%22%22%0A args = %5B%5D%0A ix=None%0A ex=None%0A for item in tup%5B0%5D:%0A if type(item) == type(%22index=%22):%0A if item%5B:6%5D == %22index=%22:%0A ix = int(item%5B6:%5D)%0A elif item%5B:4%5D == %22end=%22:%0A ex = int(item%5B4:%5D)%0A else:%0A args.append(item)%0A else:%0A args.append(item)%0A args = tuple(args)%0A a = call(*args,index=ix,end=ex)%0A return a == tup%5B1%5D%0A%0Adef parse(d):%0A %22%22%22Checks a dict keyed by the related python calls to see if they are the expected value%0A Dict format:%0A Key:%0A tuple:%0A %5B0%5D - module from which the command is called (or %22__builtins__%22)%0A %5B1%5D - command which you are calling%0A %5B*%5D - %22index=x%22, where x is the index you wish%0A %5B*%5D - %22end=x%22, where x is the end of the range you wish returned%0A %5B*%5D - all other arguments in the order the command is supposed to receive it%0A keyed arguments are not supported%0A Value:%0A The expected return value%0A %22%22%22%0A from multiprocessing import Pool%0A p = %5B%5D%0A for item in d:%0A p.append((item,d%5Bitem%5D))%0A r = Pool().map(process,p)%0A for res in r:%0A if not res:%0A return False%0A return True%0A
|
|
686da2bf6b71961ea82e72640b7e6ff16c4723d7
|
Add bubblesort example. Have problems with type inference.
|
examples/bubblesort.py
|
examples/bubblesort.py
|
Python
| 0
|
@@ -0,0 +1,1435 @@
+from numba import *%0Aimport numpy as np%0Afrom timeit import default_timer as timer%0A%0A#@autojit%0A#def bubbleswap(X, i):%0A# tmp = X%5Bi%5D%0A# X%5Bi%5D = X%5Bi + 1%5D%0A# X%5Bi + 1%5D = tmp%0A%0Adef bubblesort(X, doprint):%0A N = X.shape%5B0%5D%0A for end in range(N, 1, -1):%0A for i in range(end - 1):%0A cur = X%5Bi%5D%0A if cur %3E X%5Bi + 1%5D:%0A # Works if the swap if another function.%0A # bubbleswap(X, i)%0A # But, the following is causing error.%0A tmp = X%5Bi%5D%0A X%5Bi%5D = X%5Bi + 1%5D%0A X%5Bi + 1%5D = tmp%0A if doprint:%0A print %22Iteration:%22, i, X%0A%0Abubblesort_fast = autojit(bubblesort)%0A%0Adef main():%0A%0A Xtest = np.array(list(reversed(range(8))))%0A%0A X0 = Xtest.copy()%0A bubblesort(X0, True)%0A%0A X1 = Xtest.copy()%0A bubblesort_fast(X1, True) # This fails%0A%0A print X0%0A print X1%0A assert all(X0 == X1)%0A%0A# REP = 10%0A# N = 100%0A#%0A# Xorig = np.array(list(reversed(range(N))))%0A#%0A# t0 = timer()%0A# for t in range(REP):%0A# X0 = Xorig.copy()%0A# bubblesort(X0, False)%0A# tpython = (timer() - t0) / REP%0A#%0A# t1 = timer()%0A# for t in range(REP):%0A# X1 = Xorig.copy()%0A# bubblesort_fast(X1, False)%0A# tnumba = (timer() - t1) / REP%0A#%0A# assert all(X0 == X1)%0A#%0A# print 'Python', tpython%0A# print 'Numba', tnumba%0A# print 'Speedup', tpython / tnumba, 'x'%0A%0A%0Aif __name__ == '__main__':%0A main()%0A%0A
|
|
8c9ff0787d1d862765bbd657b09357d31a402e1f
|
add collector for https://torstatus.blutmagie.de/
|
collectors/torstatus.blutmagie.py
|
collectors/torstatus.blutmagie.py
|
Python
| 0
|
@@ -0,0 +1,2148 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0Aimport socket%0Aimport re%0Afrom bs4 import BeautifulSoup%0Aimport requests%0Aimport ipwhois%0Afrom pprint import pprint%0A%0A%0Adef get_url(url):%0A try:%0A res = requests.get(url)%0A except requests.exceptions.ConnectionError:%0A raise requests.exceptions.ConnectionError(%22DNS lookup failures%22)%0A else:%0A if res.status_code != 200:%0A raise requests.exceptions.ConnectionError(%0A %22the %7B%7D, answer with %7B%7D error%22.format(url, res.status_code))%0A%0A return res%0A%0A%0Adef get_host(ip):%0A attempts = 5%0A host = %22undefined%22%0A while attempts:%0A try:%0A data = socket.gethostbyaddr(ip)%0A host = data%5B0%5D%0A break%0A except socket.herror:%0A attempts -= 1%0A%0A return host%0A%0A%0Adef get_who_is_and_country(ip):%0A try:%0A ip_obj = ipwhois.IPWhois(ip)%0A who_is = ip_obj.lookup(retry_count=5)%0A return str(who_is), who_is%5B'asn_country_code'%5D%0A except ipwhois.exceptions.IPDefinedError:%0A return %22Private-Use Networks%22, %22undefined%22%0A except ipwhois.exceptions.WhoisLookupError:%0A return %22undefined%22, %22undefined%22%0A%0A%0Adef gather():%0A base_url = %22https://torstatus.blutmagie.de/%22%0A attack_type = %22TOR%22%0A%0A res = get_url(base_url)%0A soup = BeautifulSoup(res.content, %22lxml%22)%0A table_info = soup.findAll('table', %7B%22class%22: %22displayTable%22%7D)%0A for row in table_info%5B0%5D.findAll('tr', %7B%22class%22: %22r%22%7D):%0A col = row.findAll('td', %7B%22class%22: %22TDS%22%7D)%0A col = col%5B0%5D.findAll('td', %7B%22class%22: %22iT%22%7D)%0A items = str(col%5B0%5D.text).split('%5B')%0A host = items%5B0%5D.strip()%0A ip = items%5B1%5D%5B:-1%5D.strip()%0A%0A ip = re.findall(r'%5B0-9%5D+(?:%5C.%5B0-9%5D+)%7B3%7D', ip)%0A if ip == 0:%0A continue%0A%0A ip_address = ip%5B0%5D%0A who_is, country = get_who_is_and_country(ip_address)%0A%0A doc = %7B%0A 'IP': ip_address,%0A 'SourceInfo': base_url,%0A 'Type': attack_type,%0A 'Country': country,%0A 'Domain': host,%0A 'URL': host,%0A 'WhoIsInfo': who_is,%0A %7D%0A%0A pprint(doc)%0A%0Aif __name__ == '__main__':%0A gather()%0A
|
|
265b47de5a54d7c3a6a7be70b10f16b05f40d0b2
|
add tests for "$ oj login --check URL"
|
tests/command_login.py
|
tests/command_login.py
|
Python
| 0
|
@@ -0,0 +1,1765 @@
+import os%0Aimport subprocess%0Aimport sys%0Aimport time%0Aimport unittest%0A%0Aimport tests.utils%0A%0A%0Aclass LoginTest(unittest.TestCase):%0A def snippet_call_login_check_failure(self, url):%0A ojtools = os.path.abspath('oj')%0A with tests.utils.sandbox(files=%5B%5D) as tempdir:%0A env = dict(**os.environ)%0A env%5B'HOME'%5D = tempdir%0A self.assertRaises%0A proc = subprocess.run(%5Bojtools, 'login', '--check', url%5D, env=env, stdout=sys.stdout, stderr=sys.stderr)%0A self.assertEqual(proc.returncode, 1)%0A%0A def test_call_login_check_atcoder_failure(self):%0A self.snippet_call_login_check_failure('https://atcoder.jp/')%0A%0A def test_call_login_check_codeforces_failure(self):%0A self.snippet_call_login_check_failure('https://codeforces.com/')%0A%0A def test_call_login_check_yukicoder_failure(self):%0A self.snippet_call_login_check_failure('https://yukicoder.me/')%0A%0A @unittest.skipIf('CI' in os.environ, 'login is required')%0A def test_call_login_check_atcoder_success(self):%0A ojtools = os.path.abspath('oj')%0A subprocess.check_call(%5Bojtools, 'login', '--check', 'https://atcoder.jp/'%5D, stdout=sys.stdout, stderr=sys.stderr)%0A%0A @unittest.skipIf('CI' in os.environ, 'login is required')%0A def test_call_login_check_codeforces_success(self):%0A ojtools = os.path.abspath('oj')%0A subprocess.check_call(%5Bojtools, 'login', '--check', 'https://codeforces.com/'%5D, stdout=sys.stdout, stderr=sys.stderr)%0A%0A @unittest.skipIf('CI' in os.environ, 'login is required')%0A def test_call_login_check_yukicoder_success(self):%0A ojtools = os.path.abspath('oj')%0A subprocess.check_call(%5Bojtools, 'login', '--check', 'https://yukicoder.me/'%5D, stdout=sys.stdout, stderr=sys.stderr)%0A
|
|
0681d3833cd3c82d95ce80f12b492706f26b5ffa
|
add geco_slo_channel_plot in progress
|
geco_slow_channel_plot.py
|
geco_slow_channel_plot.py
|
Python
| 0.000001
|
@@ -0,0 +1,2321 @@
+#!/usr/bin/env python%0A# (c) Stefan Countryman 2017%0A%0Aimport matplotlib.pyplot as plt%0Aimport numpy as np%0Aimport geco_gwpy_dump as g%0Aimport gwpy.segments%0Aimport gwpy.time%0Aimport sys%0A%0Aif len(sys.argv) == 1:%0A job = g.Job.load()%0Aelse:%0A job = g.Job.load(sys.argv%5B1%5D)%0A%0Asegs = gwpy.segments.DataQualityFlag.query_segdb('L1:DMT-ANALYSIS_READY:1',%0A job.start, job.end)%0A%0AINDEX_MISSING_FMT = ('%7B%7D index not found for segment %7B%7D of %7B%7D, time %7B%7D%5Cn'%0A 'Setting %7B%7D index to %7B%7D.')%0Afor i, q in enumerate(job.full_queries):%0A means = %5B%5D%0A mins = %5B%5D%0A maxs = %5B%5D%0A stds = %5B%5D%0A times = %5B%5D%0A t = q.read()%0A for ii, s in enumerate(segs.active):%0A # this next bit seems to be necessary due to a bug%0A start = gwpy.time.to_gps(s.start).gpsSeconds%0A end = gwpy.time.to_gps(s.end).gpsSeconds%0A # the start index for this segment might be outside the full timeseries%0A try:%0A i_start = np.argwhere(t.times.value == (start // 60 * 60))%5B0%5D%5B0%5D%0A except IndexError:%0A i_start = 0%0A print(INDEX_MISSING_FMT.format('Start', ii, len(segs.active),%0A start, 'start', i_start))%0A # the end index for this segment might be outside the full timeseries%0A try:%0A i_end = np.argwhere(t.times.value == (end // 60 * 60 + 60))%5B0%5D%5B0%5D%0A except IndexError:%0A i_end = -2%0A print(INDEX_MISSING_FMT.format('End', ii, len(segs.active),%0A end, 'end', i_end))%0A tt = t%5Bi_start:i_end+1%5D%0A means.append( tt.mean().value )%0A mins.append( tt.min().value )%0A maxs.append( tt.max().value )%0A stds.append( tt.std().value )%0A times.append( tt.times.mean().value )%0A f = plt.plot(times, means, %22o'black'%22,%0A times, mins, %22v'red'%22,%0A times, maxs, %22%5E'blue'%22,%0A times, maxs-stds, %221'pink'%22,%0A times, maxs+stds, %222'teal'%22)%0A f.set_title('%7B%7D from %7B%7D to %7B%7D'.format(t.channel.name,%0A gwpy.time.from_gps(j.start),%0A gwpy.time.from_gps(j.end)))%0A f.savefig('%7B%7D__%7B%7D__%7B%7D.png'.format(q.start, q.end, q.sanitized_channel))%0A
|
|
fa6f2e35db07571759d654088d77cb7a206c5722
|
Create test.py
|
test.py
|
test.py
|
Python
| 0.000001
|
@@ -0,0 +1,193 @@
+import unittest%0A%0Aimport awesome%0A%0A%0Aclass TestMethods(unittest.TestCase):%0A def test_add(self):%0A self.assertEqual(awesome.smile(), %22:)%22)%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
8bda92da85bd666aa91b657319a019e00bf27126
|
add sample configuration file
|
ryu/services/protocols/bgp/bgp_sample_conf.py
|
ryu/services/protocols/bgp/bgp_sample_conf.py
|
Python
| 0
|
@@ -0,0 +1,3176 @@
+import os%0A%0A# =============================================================================%0A# BGP configuration.%0A# =============================================================================%0ABGP = %7B%0A%0A # General BGP configuration.%0A 'routing': %7B%0A # ASN for this BGP instance.%0A 'local_as': 64512,%0A%0A # BGP Router ID.%0A 'router_id': '10.10.0.1',%0A%0A # We list all BGP neighbors below. We establish EBGP sessions with peer%0A # with different AS number then configured above. We will%0A # establish IBGP session if AS number is same.%0A 'bgp_neighbors': %7B%0A '10.0.0.1': %7B%0A 'remote_as': 64513,%0A 'multi_exit_disc': 100%0A %7D,%0A '10.10.0.2': %7B%0A 'remote_as': 64514,%0A %7D,%0A %7D,%0A%0A 'networks': %5B%0A '10.20.0.0/24',%0A '10.30.0.0/24',%0A '10.40.0.0/16',%0A '10.50.0.0/16',%0A %5D,%0A %7D,%0A%0A%7D%0A%0A# =============================================================================%0A# Logging configuration.%0A# =============================================================================%0ALOGGING = %7B%0A%0A # We use python logging package for logging.%0A 'version': 1,%0A 'disable_existing_loggers': False,%0A%0A 'formatters': %7B%0A 'verbose': %7B%0A 'format': '%25(levelname)s %25(asctime)s %25(module)s ' +%0A '%5B%25(process)d %25(thread)d%5D %25(message)s'%0A %7D,%0A 'simple': %7B%0A 'format': '%25(levelname)s %25(asctime)s %25(module)s %25(lineno)s ' +%0A '%25(message)s'%0A %7D,%0A 'stats': %7B%0A 'format': '%25(message)s'%0A %7D,%0A %7D,%0A%0A 'handlers': %7B%0A # Outputs log to console.%0A 'console': %7B%0A 'level': 'DEBUG',%0A 'class': 'logging.StreamHandler',%0A 'formatter': 'simple'%0A %7D,%0A 'console_stats': %7B%0A 'level': 'DEBUG',%0A 'class': 'logging.StreamHandler',%0A 'formatter': 'stats'%0A %7D,%0A # Rotates log file when its size reaches 10MB.%0A 'log_file': %7B%0A 'level': 'ERROR',%0A 'class': 'logging.handlers.RotatingFileHandler',%0A 'filename': os.path.join('.', 'bgpspeaker.log'),%0A 'maxBytes': '10000000',%0A 'formatter': 'verbose'%0A %7D,%0A 'stats_file': %7B%0A 'level': 'DEBUG',%0A 'class': 'logging.handlers.RotatingFileHandler',%0A 'filename': os.path.join('.', 'statistics_bgps.log'),%0A 'maxBytes': '10000000',%0A 'formatter': 'stats'%0A %7D,%0A %7D,%0A%0A # Fine-grained control of logging per instance.%0A 'loggers': %7B%0A 'bgpspeaker': %7B%0A 'handlers': %5B'console', 'log_file'%5D,%0A 'handlers': %5B'console'%5D,%0A 'level': 'DEBUG',%0A 'propagate': False,%0A %7D,%0A 'stats': %7B%0A 'handlers': %5B'stats_file', 'console_stats'%5D,%0A 'level': 'INFO',%0A 'propagate': False,%0A 'formatter': 'stats',%0A %7D,%0A %7D,%0A%0A # Root loggers.%0A 'root': %7B%0A 'handlers': %5B'console', 'log_file'%5D,%0A 'level': 'DEBUG',%0A 'propagate': True,%0A %7D,%0A%7D%0A
|
|
42af700af58588fccaa84f5348a5c854d095d1a9
|
Add ex2.2: multiple simple requests
|
code/ex2.2-simple_requests.py
|
code/ex2.2-simple_requests.py
|
Python
| 0.000034
|
@@ -0,0 +1,497 @@
+from urllib.request import urlopen%0Aimport time%0A%0AURLS = %5B%0A 'http://127.0.0.1:8000',%0A 'http://127.0.0.1:8000',%0A 'http://127.0.0.1:8000',%0A%5D%0A%0Adef request_greetings():%0A responses = %5B%5D%0A for url in URLS:%0A resp = urlopen(url)%0A responses.append(resp.read().decode('utf-8'))%0A texts = '%5Cn'.join(responses)%0A return texts%0A%0A%0Aif __name__ == %22__main__%22:%0A t1 = time.time()%0A greetings = request_greetings()%0A print(time.time() - t1, %22seconds passed%22)%0A print(greetings)%0A
|
|
f2ac12e66aa2209f98e6eed2283005f2a9e74768
|
Create perm_missing_elem.py
|
codility/perm_missing_elem.py
|
codility/perm_missing_elem.py
|
Python
| 0.00014
|
@@ -0,0 +1,410 @@
+%22%22%22%0Ahttps://codility.com/programmers/task/perm_missing_elem/%0A%0AGiven array A of integers. N = len(A). Integers are distinct and taken from%0Arange 1..(N+1), which means exactly one integer from 1..(N+1) is missing from%0AA. Find the missing integer.%0A%0ARuntime: O(N)%0AExtra Space: O(1)%0A%22%22%22%0A%0A%0Adef solution(A):%0A total1 = sum(xrange(1, len(A) + 2)) # sum of 1..(N+1)%0A total2 = sum(A)%0A return total1 - total2%0A
|
|
785af4d73a158fdf43ceff8bb4c974a0215606fd
|
add missing test file
|
tests/test_set_long.py
|
tests/test_set_long.py
|
Python
| 0.000001
|
@@ -0,0 +1,817 @@
+import cmemcached%0Aimport unittest%0Aimport subprocess%0Aimport time%0A%0ATEST_SERVER = %22localhost%22%0Amemcached_process = None%0A%0Adef setup():%0A global memcached_process%0A memcached_process = subprocess.Popen(%5B'memcached'%5D)%0A time.sleep(0.5)%0A%0A%0Adef teardown():%0A memcached_process.terminate()%0A%0A%0Aclass TestCmemcached_for_long(unittest.TestCase):%0A%0A def setUp(self):%0A self.mc = cmemcached.Client(%5BTEST_SERVER%5D, comp_threshold=1024)%0A%0A def test_set_get_long(self):%0A self.mc.set(%22key_long_short%22, long(1L))%0A v = self.mc.get(%22key_long_short%22)%0A self.assertEqual(v, 1L)%0A self.assertEqual(type(v), long)%0A%0A big = 1233345435353543L%0A self.mc.set(%22key_long_big%22, big)%0A v = self.mc.get(%22key_long_big%22)%0A self.assertEqual(v, big)%0A self.assertEqual(type(v), long)%0A%0A
|
|
d59e49ddf95c20b51ac285dccb0a1b43936d97ef
|
Test that some credential setting properly switches endpoint schemes
|
tests/test_tpclient.py
|
tests/test_tpclient.py
|
Python
| 0
|
@@ -0,0 +1,3100 @@
+# Copyright (c) 2009-2010 Six Apart Ltd.%0A# All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions are met:%0A#%0A# * Redistributions of source code must retain the above copyright notice,%0A# this list of conditions and the following disclaimer.%0A#%0A# * Redistributions in binary form must reproduce the above copyright notice,%0A# this list of conditions and the following disclaimer in the documentation%0A# and/or other materials provided with the distribution.%0A#%0A# * Neither the name of Six Apart Ltd. nor the names of its contributors may%0A# be used to endorse or promote products derived from this software without%0A# specific prior written permission.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS %22AS IS%22%0A# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE%0A# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE%0A# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE%0A# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR%0A# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF%0A# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS%0A# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN%0A# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)%0A# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE%0A# POSSIBILITY OF SUCH DAMAGE.%0A%0A%0Aimport unittest%0Afrom urlparse import urlsplit%0A%0Afrom oauth.oauth import OAuthConsumer, OAuthToken%0A%0Aimport typepad.tpclient%0A%0A%0Aclass TestTypePadClient(unittest.TestCase):%0A%0A def assertScheme(self, url, *args):%0A scheme = urlsplit(url)%5B0%5D%0A return self.assertEquals(scheme, *args)%0A%0A def test_adjust_scheme(self):%0A c = typepad.tpclient.TypePadClient()%0A c.endpoint = 'http://api.typepad.com'%0A%0A c.clear_credentials()%0A self.assertScheme(c.endpoint, 'http')%0A%0A c.add_credentials('a', 'b')%0A self.assertScheme(c.endpoint, 'http')%0A%0A c.add_credentials('a', 'b', domain='api.typepad.com')%0A self.assertScheme(c.endpoint, 'http')%0A%0A c.add_credentials(OAuthConsumer('a', 'b'), OAuthToken('c', 'd'))%0A self.assertScheme(c.endpoint, 'http')%0A%0A c.add_credentials(OAuthConsumer('a', 'b'), OAuthToken('c', 'd'), domain='api.example.com')%0A self.assertScheme(c.endpoint, 'http')%0A%0A c.add_credentials(OAuthConsumer('a', 'b'), OAuthToken('c', 'd'), domain='typepad.com')%0A self.assertScheme(c.endpoint, 'http')%0A%0A # This time for sure!!%0A c.add_credentials(OAuthConsumer('a', 'b'), OAuthToken('c', 'd'), domain='api.typepad.com')%0A self.assertScheme(c.endpoint, 'https')%0A%0A # Try it again.%0A c.add_credentials(OAuthConsumer('a', 'b'), OAuthToken('c', 'd'), domain='api.typepad.com')%0A self.assertScheme(c.endpoint, 'https')%0A%0A # Check that clearing works.%0A c.clear_credentials()%0A self.assertScheme(c.endpoint, 'http')%0A
|
|
9ad8a03eac3d9fc94122ee2494ff1f6398467cfc
|
Add files via upload
|
pque.py
|
pque.py
|
Python
| 0
|
@@ -0,0 +1,2118 @@
+class Pque(object):%0A %22%22%22make as priority queue priority scale is 0 through -99%0A 0 has greatest priority with ties being first come first pop%22%22%22%0A def __init__(self):%0A self.next_node = None%0A self.priority = 0%0A self.value = None%0A self.tail = None%0A self.head = None%0A self.size = 0%0A %0A def insert(self,value , priority = -99):%0A %22%22%22 inserts a value into the que defalt priority is -99%22%22%22%0A new_pque = Pque()%0A new_pque.priority = priority%0A if self.size is 0:%0A self.head = new_pque%0A self.tail = new_pque%0A else:%0A current_node = self.head%0A pre_node = None%0A for x in range(self.size):%0A if new_pque.priority %3E current_node.priority:%0A if current_node is self.head:%0A new_pque.next_node = self.head%0A self.head = new_pque%0A break%0A else:%0A pre_node.next_node = new_pque%0A new_pque.next_node = current_node%0A self.size += 1%0A new_pque.value = value%0A break%0A if current_node is self.tail:%0A self.tail.next_node = new_pque%0A self.tail = new_pque%0A break%0A else:%0A pre_node = current_node%0A current_node = current_node.next_node%0A self.size += 1%0A new_pque.value = value%0A def peek(self):%0A %22%22%22returns the data in the head of the pque with out removing it%22%22%22%0A if self.head is None:%0A raise IndexError ('que is empty')%0A return self.head.value%0A %0A def pop(self):%0A %22%22%22returns the data in the head of pque and removes it %22%22%22%0A if self.head is None:%0A raise IndexError ('que is empty')%0A temp_val = self.head.value%0A self.head = self.head.next_node%0A self.size -= 1%0A return temp_val%0A %0A %0A %0A %0A
|
|
b480a21d58c1fc1b8ad66442bd65a7d892d8efa1
|
Create Blink.py
|
Blink.py
|
Blink.py
|
Python
| 0
|
@@ -0,0 +1,320 @@
+import time%0Aimport mraa%0A%0A###### Programe Begins ####%0Aprint (mraa.getVersion())%0ALed = mraa.Gpio(4)%0ALed.dir(mraa.DIR_OUT)%0ALed.write(1)%0Avalue = 0%0Awhile 1:%0A if value == 0:%0A value = 1%0A else:%0A value = 0%0A Led.write(value)%0A print %22LED %22+str(value)%0A time.sleep(1)%0A
|
|
36af8c98005bfb6d51344b80a59cb6e48c8b55fb
|
Add outputter to display overstate stages
|
salt/output/overstatestage.py
|
salt/output/overstatestage.py
|
Python
| 0.000001
|
@@ -0,0 +1,846 @@
+'''%0ADisplay clean output of an overstate stage%0A'''%0A%0A%0A#%5B%7B'group2': %7B'match': %5B'fedora17-2', 'fedora17-3'%5D,%0A# 'require': %5B'group1'%5D,%0A# 'sls': %5B'nginx', 'edit'%5D%7D%0A# %7D%0A# %5D%0A%0A# Import Salt libs%0Aimport salt.utils%0A%0A%0Adef output(data):%0A '''%0A Format the data for printing stage information from the overstate system%0A '''%0A colors = salt.utils.get_colors(__opts__.get('color'))%0A ostr = ''%0A for comp in data:%0A for name, stage in comp.items():%0A ostr += '%7B0%7D%7B1%7D:%7B2%7D%5Cn'.format(colors%5B'LIGHT_BLUE'%5D, name, colors%5B'ENDC'%5D)%0A for key in sorted(stage):%0A ostr += ' %7B0%7D%7B1%7D:%7B2%7D%7B3%7D%5Cn'.format(%0A colors%5B'LIGHT_BLUE'%5D,%0A key,%0A stage%5Bkey%5D,%0A colors%5B'ENDC'%5D)%0A return ostr%0A
|
|
1085114668dc13d86dac8de70557cd4242ab9d20
|
Add tests for parse_csv, weighted_mode and weighted_replicate (#210)
|
tests/test_learning.py
|
tests/test_learning.py
|
Python
| 0
|
@@ -0,0 +1,356 @@
+import pytest%0Afrom learning import parse_csv, weighted_mode, weighted_replicate%0A%0Adef test_parse_csv():%0A%09assert parse_csv('1, 2, 3 %5Cn 0, 2, na') == %5B%5B1, 2, 3%5D, %5B0, 2, 'na'%5D%5D%0A%0A%0Adef test_weighted_mode():%0A%09assert weighted_mode('abbaa', %5B1,2,3,1,2%5D) == 'b'%0A%0A%0Adef test_weighted_replicate():%0A%09assert weighted_replicate('ABC', %5B1,2,1%5D, 4) == %5B'A', 'B', 'B', 'C'%5D%0A%09
|
|
8d88336d831eeb5e6603b9ff66f2e4906c4f9e2e
|
Create test.py
|
test.py
|
test.py
|
Python
| 0.000005
|
@@ -0,0 +1,21 @@
+print %22Hello world!%22%0A
|
|
6aec92990790dd5ba04ca1079dbc5fe9106f8747
|
Add test suite
|
test.py
|
test.py
|
Python
| 0.000001
|
@@ -0,0 +1,1850 @@
+import sys%0Aimport unittest%0Aimport pygooglesms%0Afrom pygooglesms import GoogleSMS%0A%0A%0Aclass TestPyGoogleSMS(unittest.TestCase):%0A GOOD_LOGIN = 'CHANGEME'%0A GOOD_PASSWD = 'CHANGEME'%0A TEST_NUMBER = 'CHANGEME'%0A%0A BAD_LOGIN = 'nobody@gmail.com'%0A BAD_PASSWD = 'terrible'%0A%0A BAD_AUTH_MSG = 'No auth token provided by server (Bad account?)'%0A NOT_LOGGED_IN_MSG = 'Not logged in'%0A%0A def test_good_login(self):%0A GoogleSMS(self.GOOD_LOGIN, self.GOOD_PASSWD)%0A%0A def test_bad_login(self):%0A try:%0A GoogleSMS(self.BAD_LOGIN, self.BAD_PASSWD)%0A except pygooglesms.GoogleAuthError as error:%0A if error.msg == self.BAD_AUTH_MSG:%0A return%0A raise error%0A%0A def test_bad_login_good_user(self):%0A try:%0A GoogleSMS(self.GOOD_LOGIN, self.BAD_PASSWD)%0A except pygooglesms.GoogleAuthError as error:%0A if error.msg == self.BAD_AUTH_MSG:%0A return%0A raise error%0A%0A def test_sms_with_bad_login(self):%0A sms = GoogleSMS(self.GOOD_LOGIN, self.GOOD_PASSWD)%0A try:%0A sms.login(self.BAD_LOGIN, self.BAD_PASSWD)%0A except Exception:%0A pass%0A try:%0A sms.send(self.TEST_NUMBER, 'test_message')%0A except pygooglesms.GoogleVoiceError as error:%0A if error.msg == self.NOT_LOGGED_IN_MSG:%0A return%0A raise error%0A%0A def test_sms_with_good_login(self):%0A sms = GoogleSMS(self.GOOD_LOGIN, self.GOOD_PASSWD)%0A sms.send(self.TEST_NUMBER, 'test_message')%0A # some way to validate this? send message to self?%0A%0Aif __name__ == %22__main__%22:%0A suite = unittest.TestLoader().loadTestsFromTestCase(TestPyGoogleSMS)%0A result = unittest.TextTestRunner(verbosity=2).run(suite)%0A%0A error = len(result.errors) + len(result.failures)%0A sys.exit(error)%0A
|
|
b7ff7ee179f7d973051ca7f70a04f27322c07cf2
|
Create Redis.py
|
Redis.py
|
Redis.py
|
Python
| 0.000001
|
@@ -0,0 +1,37 @@
+#Intro to Redis (NoSql)%0Aimport redis%0A
|
|
7d8ad6124cd838f3b2507e43c2c89e8a4873465e
|
fix case with no provider
|
test.py
|
test.py
|
Python
| 0.000002
|
@@ -0,0 +1,528 @@
+from genomepy.provider import ProviderBase%0Afrom genomepy import Genome%0Aimport sys%0A%0A%0Agenome_name = %22ce10%22%0Agenome = Genome(genome_name)%0Atax_id = genome.tax_id%0A%0A#p = ProviderBase.create(%22UCSC%22)%0A#print(p.assembly_accession(%22ci3%22))%0A#sys.exit()%0A%0Ap = ProviderBase.create(%22Ensembl%22)%0Aname, accession, *rest = %5Brow for row in p.search(tax_id)%5D%5B0%5D%0Aprint(name, tax_id)%0Aif accession == genome.assembly_accession:%0A print(f%22Ensembl %7Bname%7D matches %7Bgenome_name%7D by accession%22)%0Aelse:%0A print(f%22Could not find a matching genome in Ensembl%22)%0A
|
|
eabe6103860fd7b04e52f2e5181affbb55e93273
|
add wsgi script
|
wsgi.py
|
wsgi.py
|
Python
| 0
|
@@ -0,0 +1,237 @@
+#!/usr/bin/env python3%0A# To run:%0A# gunicorn -b 0.0.0.0:5000 wsgi:app%0Aimport os%0A%0Afrom server import create_app, generate%0Afrom server.models import db, User%0A%0Aenv = os.environ.get('OK_ENV', 'dev')%0Aapp = create_app('settings/%25s.py' %25 env)%0A%0A%0A
|
|
623848955085a03d1de7e15863a7dd82e4d7f51a
|
fix import threading.Thread
|
Sound.py
|
Sound.py
|
import sublime, sublime_plugin
from subprocess import call
from os.path import join
from random import randrange
import threading
class EventSound(sublime_plugin.EventListener):
def __init__(self, *args, **kwargs):
super(EventSound, self).__init__(*args, **kwargs)
if sublime.platform() == "osx":
self.play = self.osx_play
elif sublime.platform() == "linux":
pass # TODO
elif sublime.platform() == "windows":
pass # TODO
@thread
def osx_play(self, event_name, random=False):
self.on_play_flag = False
if not random:
file_path = join(sublime.packages_path(), "Sublime-Sound", "sounds", event_name) + ".mp3"
else:
num_files = sublime.load_settings("Sound.sublime-settings").get("random_sounds")[event_name]["num_files"]
file_path = join(sublime.packages_path(), "Sublime-Sound", "random_sounds", event_name, str(randrange(1, num_files))) + ".mp3"
call(["afplay", file_path])
def on_new_async(self, view):
# Called when a new buffer is created. Runs in a separate thread, and does not block the application.
if not hasattr(self, "on_play_flag"): self.on_play_flag = False # TODO: use decorator
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_new"), 100)
def on_clone_async(self, view):
# Called when a view is cloned from an existing one. Runs in a separate thread, and does not block the application.
if not hasattr(self, "on_play_flag"): self.on_play_flag = False
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_clone"), 100)
def on_load_async(self, view):
# Called when the file is finished loading. Runs in a separate thread, and does not block the application.
if not hasattr(self, "on_play_flag"): self.on_play_flag = False
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_load"), 100)
def on_close(self, view):
# Called when a view is closed (note, there may still be other views into the same buffer).
if not hasattr(self, "on_play_flag"): self.on_play_flag = False
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_close"), 100)
def on_pre_save_async(self, view):
# Called after a view has been saved. Runs in a separate thread, and does not block the application.
if not hasattr(self, "on_play_flag"): self.on_play_flag = False
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_save"), 100)
def on_modified_async(self, view):
# Called after changes have been made to a view. Runs in a separate thread, and does not block the application.
if not hasattr(self, "on_play_flag"): self.on_play_flag = False
if self.on_play_flag: return
self.on_play_flag = True
sublime.set_timeout(lambda: self.play("on_modify", random=True), 100)
def thread(func):
def wrapper(*args, **kwargs):
threading.Thread(target=lambda: func(*args, **kwargs)).start()
return wrapper
|
Python
| 0.000009
|
@@ -110,24 +110,36 @@
nge%0A
+from threading
import
-t
+T
hread
-ing
%0A%0Acl
@@ -3262,18 +3262,8 @@
-threading.
Thre
|
89c4f92d56ab445d86dfdd321bca6c7e0f30855e
|
Create admin.py
|
weibopub/admin.py
|
weibopub/admin.py
|
Python
| 0
|
@@ -0,0 +1,2216 @@
+from __future__ import unicode_literals%0A%0Afrom django.utils.safestring import mark_safe%0Afrom django.utils.translation import ugettext_lazy as _%0Afrom django.template.defaultfilters import truncatechars%0A%0Afrom mezzanine.weibopub import get_auth_settings%0A%0AFORMFIELD_HTML = %22%22%22%0A%3Cdiv class='send_weibo_container'%3E%0A %3Cinput id='id_send_weibo' name='send_weibo' type='checkbox'%3E%0A %3Clabel class='vCheckboxLabel' for='id_send_weibo'%3E%25s%3C/label%3E%0A%3C/div%3E%0A%22%22%22%0A%0Aclass WeiboAdminMixin(object):%0A %22%22%22%0A Admin mixin that adds a %22Send to Weibo%22 checkbox to the add/change%0A views, which when checked, will send a weibo with the title%E3%80%81pic and link%0A to the object being saved.%0A %22%22%22%0A%0A def formfield_for_dbfield(self, db_field, **kwargs):%0A %22%22%22%0A Adds the %22Send to Weibo%22 checkbox after the %22status%22 field,%0A provided by any %60%60Displayable%60%60 models. The approach here is%0A quite a hack, however the sane approach of using a custom%0A form with a boolean field defined, and then adding it to the%0A formssets attribute of the admin class fell apart quite%0A horrifically.%0A %22%22%22%0A formfield = super(WeiboAdminMixin,%0A self).formfield_for_dbfield(db_field, **kwargs)%0A if db_field.name == %22status%22 and get_auth_settings():%0A def wrapper(render):%0A def wrapped(*args, **kwargs):%0A rendered = render(*args, **kwargs)%0A label = _(%22Pub to Weibo%22)%0A return mark_safe(rendered + FORMFIELD_HTML %25 label)%0A return wrapped%0A formfield.widget.render = wrapper(formfield.widget.render)%0A return formfield%0A%0A def save_model(self, request, obj, form, change):%0A %22%22%22%0A Sends a weibo with the title/pic/short_url if applicable.%0A %22%22%22%0A super(WeiboAdminMixin, self).save_model(request, obj, form, change)%0A if request.POST.get(%22send_weibo%22, False):%0A auth_settings = get_auth_settings()%0A obj.set_short_url()%0A message = truncatechars(obj, 140 - len(obj.short_url) - 1)%0A api = Api(*auth_settings)%0A api.update.post(u'%25s%E3%80%82%5B%E9%98%85%E8%AF%BB%E5%85%A8%E6%96%87:%25s%5D'%25(message,obj.short_url),pic=open('/Users/test.png'))%0A
|
|
626662f0f3ef2ce7de63c424da89263443243e97
|
Fix SpiderState bug in Windows platforms
|
scrapy/contrib/spiderstate.py
|
scrapy/contrib/spiderstate.py
|
import os, cPickle as pickle
from scrapy import signals
class SpiderState(object):
"""Store and load spider state during a scraping job"""
def __init__(self, jobdir=None):
self.jobdir = jobdir
@classmethod
def from_crawler(cls, crawler):
obj = cls(crawler.settings.get('JOBDIR'))
crawler.signals.connect(obj.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(obj.spider_opened, signal=signals.spider_opened)
return obj
def spider_closed(self, spider):
if self.jobdir:
with open(self.statefn, 'wb') as f:
pickle.dump(spider.state, f, protocol=2)
def spider_opened(self, spider):
if self.jobdir and os.path.exists(self.statefn):
with open(self.statefn) as f:
spider.state = pickle.load(f)
else:
spider.state = {}
@property
def statefn(self):
return os.path.join(self.jobdir, 'spider.state')
|
Python
| 0
|
@@ -786,16 +786,22 @@
.statefn
+, 'rb'
) as f:%0A
|
af6eef23e60b88209ce4e9601f0cf457ee421bdc
|
add zone no crawler
|
DataHouse/crawler/zone_no_crawler.py
|
DataHouse/crawler/zone_no_crawler.py
|
Python
| 0.000002
|
@@ -0,0 +1,1473 @@
+%22%22%22%0Anot finish yet%0A%22%22%22%0A%0Aimport requests%0Afrom bs4 import BeautifulSoup%0A%0A%0Adef crawl():%0A url = 'http://quhao.tianqi.com/'%0A headers = %7B%0A 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',%0A 'Host': 'quhao.tianqi.com',%0A 'Referer': 'https://www.baidu.com/link?url=dt9Ft7DGXOxzDe8CX8pIybsRFMUsEzSbE3udUXkowquCMcMgXumd-ruQM4nr4uBD&wd=&eqid=a75b74b50001af66000000035b082294',%0A 'Upgrade-Insecure-Requests': '1',%0A 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'%0A %7D%0A%0A response = requests.get(url, headers=headers)%0A if response.status_code == 200:%0A soup = BeautifulSoup(response.text, 'html5lib')%0A for li in soup.find('div', class_=%22box%22).find_all('li'):%0A print(li.text)%0A%0A%0Adef crawl_detail(place):%0A url = 'http://quhao.tianqi.com/%25s' %25 place%0A headers = %7B%0A 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',%0A 'Host': 'quhao.tianqi.com',%0A 'Referer': 'http://quhao.tianqi.com/',%0A 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'%0A %7D%0A response = requests.get(url, headers=headers)%0A if response.status_code == 200:%0A soup = BeautifulSoup(response.text, 'html5lib')%0A%0A%0Aif __name__ == '__main__':%0A crawl()%0A
|
|
5fc6d9fc05bc4cae5588489c576744a518155461
|
Add Sin, Cos and Tan function classes
|
trig.py
|
trig.py
|
Python
| 0
|
@@ -0,0 +1,608 @@
+import function%0Aimport math%0A%0A%0Aclass Sin(function.Function):%0A%0A def _evaluate(self, x):%0A return math.sin(x)%0A%0A%0Aclass Cos(function.Function):%0A%0A def _evaluate(self, x):%0A return math.cos(x)%0A%0A%0Aclass Tan(function.Function):%0A%0A def _evaluate(self, x):%0A sin = Sin()%0A cos = Cos()%0A if cos(x) == 0:%0A raise ZeroDivisionError()%0A return sin(x) / cos(x)%0A%0A%0Adef main():%0A sin = Sin()%0A cos = Cos()%0A tan = Tan()%0A assert(sin(0) == 0)%0A assert(cos(0) == 1)%0A assert(tan(0) == 0)%0A assert((tan + cos + sin)(0) == 1)%0A%0Aif __name__ == %22__main__%22:%0A main()
|
|
d101e1bae5b083b436411507ebe73f12f8088075
|
Create solution.py
|
unit-3-mixed-reading-and-assignment-lessons/lesson-3-assignment-one-code-block/solutions/solution.py
|
unit-3-mixed-reading-and-assignment-lessons/lesson-3-assignment-one-code-block/solutions/solution.py
|
Python
| 0.000018
|
@@ -0,0 +1,249 @@
+def convert_temperature(temperature, to='celsius'):%0A def to_fahrenheit():%0A return (temperature * 9 / 5) + 32%0A%0A def to_celsius():%0A return (temperature - 32) * 5 / 9%0A%0A return (to_celsius if to == 'celsius' else to_fahrenheit)()%0A
|
|
f375be2ac11aa9648c77392cec36900db900c6ef
|
Add Flask server.
|
serve.py
|
serve.py
|
Python
| 0
|
@@ -0,0 +1,821 @@
+import sys%0Aimport flask%0Aimport sqlalchemy as sa%0Aimport coils%0Aimport tables%0A%0Aapp = flask.Flask(__name__)%0A%0A@app.route('/')%0Adef root():%0A%0A # Load configuration file.%0A CONFIG = sys.argv%5B1%5D if len(sys.argv)%3E=2 else 'wabbit.cfg'%0A config = coils.Config(CONFIG)%0A%0A # Connect to database engine.%0A engine = sa.create_engine(%0A 'mysql://%7B%7D:%7B%7D@%7B%7D/%7B%7D'.format(%0A config%5B'username'%5D, config%5B'password'%5D,%0A config%5B'host'%5D, config%5B'db_name'%5D))%0A conn = engine.connect()%0A%0A # Select and print.%0A result = ''%0A s = sa.sql.select(%5Btables.image%5D)%0A rows = conn.execute(s)%0A for row in rows:%0A result += '%7B:s%7D%3Cbr%3E'.format(row)%0A%0A # Close the session.%0A conn.close()%0A return result%0A%0A@app.route('/pics')%0Adef pics():%0A return 'Pictures!'%0A%0Aif __name__ == '__main__':%0A app.run()%0A
|
|
f3ab43a3c24851ca5a6e68c04e56af8c8f9a0fd1
|
add setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,3042 @@
+from setuptools import setup, find_packages # Always prefer setuptools over distutils%0Afrom codecs import open # To use a consistent encoding%0Afrom os import path%0A%0Ahere = path.abspath(path.dirname(__file__))%0A%0A# Get the long description from the relevant file%0A#with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:%0A# long_description = f.read()%0A%0Asetup(%0A name='jupyter-drive',%0A%0A # Versions should comply with PEP440. For a discussion on single-sourcing%0A # the version across setup.py and the project code, see%0A # https://packaging.python.org/en/latest/development.html#single-sourcing-the-version%0A version='0.0.1',%0A%0A description='Interation of IPython/Jupyter with google drive',%0A long_description='',%0A%0A # The project's main homepage.%0A url='https://github.com/jupyter/jupyter-drive',%0A%0A # Author details%0A author='Matthias Bussonnier, Kester Tong, Kyle Kelley, Thomas Kluyver, The IPython team',%0A author_email='ipython-dev@scipy.org',%0A%0A # Choose your license%0A license='BSD',%0A%0A # See https://pypi.python.org/pypi?%253Aaction=list_classifiers%0A classifiers=%5B%0A # How mature is this project? Common values are%0A # 3 - Alpha%0A # 4 - Beta%0A # 5 - Production/Stable%0A 'Development Status :: 3 - Alpha',%0A%0A # Indicate who your project is intended for%0A 'Intended Audience :: Developers',%0A%0A # Pick your license as you wish (should match %22license%22 above)%0A 'License :: OSI Approved :: BSD License',%0A%0A # Specify the Python versions you support here. In particular, ensure%0A # that you indicate whether you support Python 2, Python 3 or both.%0A 'Programming Language :: Python :: 2',%0A 'Programming Language :: Python :: 2.7',%0A 'Programming Language :: Python :: 3',%0A 'Programming Language :: Python :: 3.3',%0A 'Programming Language :: Python :: 3.4',%0A 'Programming Language :: Framework :: IPython',%0A %5D,%0A%0A # What does your project relate to?%0A keywords='ipython jupyter google drive notebook',%0A%0A # You can just specify the packages manually here if your project is%0A # simple. Or you can use find_packages().%0A packages=find_packages(exclude=%5B'contrib', 'docs', 'tests*'%5D),%0A%0A # List run-time dependencies here. These will be installed by pip when your%0A # project is installed. For an analysis of %22install_requires%22 vs pip's%0A # requirements files see:%0A # https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files%0A install_requires=%5B'IPython'%5D,%0A%0A%0A # have to be included in MANIFEST.in as well.%0A package_data=%7B%0A 'jupyter-drive': %5B'*'%5D,%0A %7D,%0A%0A # Although 'package_data' is the preferred approach, in some case you may%0A # need to place data files outside of your packages.%0A # see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files%0A # In this case, 'data_file' will be installed into '%3Csys.prefix%3E/my_data'%0A data_files=%5B('jupyter-drive', %5B'*'%5D)%5D,%0A%0A %7D,%0A)%0A
|
|
1b77c721b53d59e1b6242906780941d262d070e8
|
add basic setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,1229 @@
+from setuptools import setup%0D%0Afrom esipy import __version__ %0D%0A%0D%0A# install requirements%0D%0Ainstall_requirements = %5B%0D%0A %22requests%22,%0D%0A %22pyswagger%22,%0D%0A %22six%22%0D%0A%5D%0D%0A%0D%0A# test requirements%0D%0Atest_requirements = %5B%0D%0A %22coverage%22,%0D%0A %22coveralls%22,%0D%0A %22httmock%22,%0D%0A %22nose%22,%0D%0A %22mock%22,%0D%0A %22future%22,%0D%0A %22python-memcached%22%0D%0A%5D + install_requirements%0D%0A%0D%0Asetup(%0D%0A name='EsiPy',%0D%0A version=__version__,%0D%0A packages=%5B'esipy'%5D,%0D%0A url='https://github.com/Kyria/EsiPy',%0D%0A license='BSD 3-Clause License',%0D%0A author='Kyria',%0D%0A author_email='anakhon@gmail.com',%0D%0A description='Swagger Client for the ESI API for EVE Online',%0D%0A install_requires=install_requirements,%0D%0A tests_require=test_requirements,%0D%0A test_suite='nose.collector',%0D%0A classifiers=%5B%0D%0A %22Development Status :: 4 - Beta%22,%0D%0A %22Intended Audience :: Developers%22,%0D%0A %22Programming Language :: Python%22,%0D%0A %22Programming Language :: Python :: 2.7%22,%0D%0A %22Programming Language :: Python :: 3%22,%0D%0A %22Programming Language :: Python :: 3.2%22,%0D%0A %22Programming Language :: Python :: 3.3%22,%0D%0A %22Programming Language :: Python :: 3.4%22,%0D%0A %22Programming Language :: Python :: Implementation :: PyPy%22,%0D%0A %5D%0D%0A)%0D%0A
|
|
518e1d56a23eb1d5b4bb31ae46c958e519addfc0
|
add setup file
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,636 @@
+#!/usr/bin/env python%0A%0Afrom os.path import exists%0Afrom setuptools import setup, find_packages%0A%0Asetup(name='topik',%0A version='0.1.0',%0A description='A Topic Modeling high-level interface',%0A url='http://github.com/ContinuumIO/topik/',%0A author='Christine Doig',%0A author_email='christine.doig@continuum.io',%0A license='BSD',%0A keywords='topic modeling lda nltk gensim',%0A packages=find_packages(),%0A install_requires=list(open('requirements.txt').read().strip().split('%5Cn')),%0A long_description=(open('README.rst').read() if exists('README.rst')%0A else ''), zip_safe=False)%0A
|
|
07cda3fee1215f4d28e3885796c5d5f02ec28918
|
add beginning for D4 qcodes wrapper
|
D4/D4.py
|
D4/D4.py
|
Python
| 0
|
@@ -0,0 +1,389 @@
+from qcodes import Instrument%0Afrom qcodes.utils.validators import Numbers%0A%0Afrom .D4_module import D4_module%0A%0A%0Aclass D4(Instrument):%0A %22%22%22%0A Qcodes driver for the D4 ADC SPI-rack module.%0A %22%22%22%0A def __init__(self, name, spi_rack, module, **kwargs):%0A super().__init__(name, **kwargs)%0A%0A self.d4 = D4_module(spi_rack, module)%0A%0A for i in range(2):%0A pass%0A
|
|
0cce9a108d97b61bc36c1d6873a801ae5a02ee10
|
Add setup.py script.
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,371 @@
+from distutils.core import setup%0A%0Asetup(%0A name=%22booleanOperations%22,%0A version=%220.1%22,%0A description=%22Boolean operations on paths.%22,%0A author=%22Frederik Berlaen%22,%0A url=%22https://github.com/typemytype/booleanOperations%22,%0A license=%22MIT%22,%0A packages=%5B%22booleanOperations%22%5D,%0A package_dir=%7B%22%22: %22Lib%22%7D,%0A package_data=%7B%22booleanOperations%22: %5B%22pyClipper.so%22%5D%7D%0A)%0A
|
|
6edea0e1f15c3905251793238fe88641a5935fed
|
Update version to 3.6.2
|
setup.py
|
setup.py
|
from setuptools import setup
description = """
Full featured redis cache backend for Django.
"""
setup(
name = "django-redis",
url = "https://github.com/niwibe/django-redis",
author = "Andrei Antoukh",
author_email = "niwi@niwi.be",
version='3.6.1',
packages = [
"redis_cache",
"redis_cache.client"
],
description = description.strip(),
install_requires=[
'redis>=2.9.0',
],
zip_safe=False,
include_package_data = True,
package_data = {
'': ['*.html'],
},
classifiers = [
"Development Status :: 5 - Production/Stable",
"Operating System :: OS Independent",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
],
)
|
Python
| 0.000001
|
@@ -265,9 +265,9 @@
3.6.
-1
+2
',%0A
|
709aef34e608eb86dc7cbffd5635f78d8b5f59f3
|
add fasta2bed.py
|
fasta2bed.py
|
fasta2bed.py
|
Python
| 0.000077
|
@@ -0,0 +1,240 @@
+'''Credit: Elijah Lowe'''%0A%0A'''Read in sequences in FASTA format and print out BED format.'''%0Aimport screed, sys%0A%0Ainfile = sys.argv%5B1%5D%0A%0Afor n, record in enumerate(screed.open(infile)):%0A print record%5B'name'%5D+%22%5Ct0%5Ct%22,len(record%5B'sequence'%5D)%0A
|
|
ce43e24355da26fbd40f7f32101ed631c2ae787d
|
Version bump 1.9.4
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
import os
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-cities-light',
version='1.9.3',
description='Simple alternative to django-cities',
author='James Pic',
author_email='jamespic@gmail.com',
url='https://github.com/yourlabs/django-cities-light',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
long_description=read('README.rst'),
license = 'MIT',
keywords = 'django cities countries postal codes',
install_requires=[
'django',
'django_autoslug',
'progressbar==2.2',
],
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
Python
| 0
|
@@ -416,17 +416,17 @@
on='1.9.
-3
+4
',%0A d
|
58e4c8c5cd3c47fb4dc4e6e772b0f300fe890225
|
Add missed setup.py
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,385 @@
+#!/usr/bin/env python%0Afrom setuptools import setup, find_packages%0A%0Asetup(%0A name='myprefetch',%0A version='0.1',%0A description='MySQL Replication Prefetcher',%0A packages=find_packages(),%0A long_description=open('README.md').read(),%0A license=open('LICENSE').read(),%0A url='https://github.com/vine/mysql-prefetcher',%0A install_requires=%5B%0A 'MySQL-python',%0A %5D,%0A)%0A
|
|
46b723ad4b48e29225cf8fcc44fa90bc9cfc3e21
|
Enable distutils for package
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,412 @@
+from distutils.core import setup%0A%0Asetup(%0A name='Getty',%0A version='0.0.1',%0A author='Clemens Wolff',%0A author_email='clemens.wolff+pypi@gmail.com',%0A packages=%5B'getty'%5D,%0A url='https://github.com/c-w/Getty',%0A download_url='http://pypi.python.org/pypi/Getty',%0A license='LICENSE.txt',%0A description='Scraper for art available from getty.edu',%0A long_description=open('README.rst').read(),%0A)%0A
|
|
b4a41b129a33361ebcb45de87a236952943ab3c3
|
Create setup.py
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,1497 @@
+#!/usr/bin/env python%0A# %0A# Copyright (c) 2009-2013 Kyle Gorman%0A# %0A# Permission is hereby granted, free of charge, to any person obtaining a %0A# copy of this software and associated documentation files (the %0A# %22Software%22), to deal in the Software without restriction, including %0A# without limitation the rights to use, copy, modify, merge, publish, %0A# distribute, sublicense, and/or sell copies of the Software, and to %0A# permit persons to whom the Software is furnished to do so, subject to %0A# the following conditions:%0A# %0A# The above copyright notice and this permission notice shall be included %0A# in all copies or substantial portions of the Software.%0A# %0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS %0A# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF %0A# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. %0A# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY %0A# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, %0A# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE %0A# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.%0A# %0A# setup.py: for building SWIG bridge to Python%0A# Kyle Gorman%0A%0Afrom distutils.core import setup, Extension%0A%0Asetup(name='swipe', version='1.5', author='Kyle Gorman', %0Adescription=%22%22%22SWIPE' pitch estimator%22%22%22, py_modules=%5B'swipe'%5D, %0Aext_modules=%5BExtension('_swipe', sources=%5B'vector.c', 'swipe.c', 'swipe_wrap.c'%5D, libraries=%5B'sndfile', 'lapack', 'fftw3'%5D)%5D)%0A
|
|
3af11eab7373a937f8df9858efbc41cdc9cfc940
|
Package Linehaul
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,672 @@
+from setuptools import setup, find_packages%0A%0Ainstall_requires = %5B%5D%0Awith open(%22requirements/main.in%22, %22r%22) as fp:%0A for line in fp:%0A line.strip()%0A if line:%0A install_requires.append(line)%0A%0A%0Asetup(%0A name=%22linehaul%22,%0A use_scm_version=%7B%0A %22local_scheme%22: lambda v: %22+%7B.node%7D%7B%7D%22.format(v, %22.dirty%22 if v.dirty else %22%22),%0A %22version_scheme%22: lambda v: %223.%7B.distance%7D.0%22.format(v),%0A %7D,%0A packages=find_packages(exclude=%5B%22tests*%22%5D),%0A package_data=%7B%22linehaul%22: %5B%22schema.json%22%5D%7D,%0A entry_points=%7B%22console_scripts%22: %5B%22linehaul = linehaul.cli:main%22%5D%7D,%0A install_requires=install_requires,%0A setup_requires=%5B%22setuptools_scm%22%5D,%0A)%0A
|
|
f743220b6d6868b3a489a1843dda329ed0a7d5c4
|
Add Python setup file
|
setup.py
|
setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,289 @@
+from setuptools import setup%0A%0Asetup(name='amaya',%0A version='0.1',%0A description='IRCv3 capable bot framework',%0A url='http://github.com/bookhorse/amaya',%0A author='Nicole Brennan',%0A author_email='twipony.ts@gmail.com',%0A license='ZLib',%0A packages=%5B'amaya'%5D)%0A
|
|
250261038893c7f5b004776c4aec01ebfc1d9012
|
Fix a typo
|
setup.py
|
setup.py
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
#
# Generate a Flocker package that canbe deployed onto cluster nodes.
#
import os.path
from setuptools import setup
path = os.path.join(os.path.dirname(__file__), b"flocker/version")
with open(path) as fObj:
version = fObj.read().strip()
del path
setup(
# This is the human-targetted name of the software being packaged.
name="Flocker",
# This is a string giving the version of the software being packaged. For
# simplicity it should be something boring like X.Y.Z.
version=version,
# This identifies the creators of this software. This is left symbolic for
# ease of maintenance.
author="HybridCluster Team",
# This is contact information for the authors.
author_email="support@hybridcluster.com",
# Here is a website where more information about the software is available.
url="http://hybridcluster.com/",
# This defines *Python* packages (in other words, things that can be
# imported) which are part of the package. Most of what they contain will
# be included in the package automatically by virtue of the packages being
# mentioned here. These aren't recursive so each sub-package must also be
# explicitly included.
packages=[
"flocker", "flocker.test",
],
# This defines extra non-source files that live in the source tree that
# need to be included as part of the package.
package_data={
# This is the canonical definition of the source form of the cluster
# version.
"flocker": ["version"],
},
extras_require={
# This extra allows you to build the documentation for Flocker.
"doc": ["Sphinx==1.2", "sphinx-rtd-theme==0.1.6"],
# This extra is for developers who need to work on Flocker itself.
"dev": ["pyflakes==0.8.1"]
},
)
|
Python
| 1
|
@@ -93,16 +93,17 @@
that can
+
be deplo
|
b46119ada62fbcb4791cd8ce210e34a43564df5b
|
Add setup_tools cruft
|
setup.py
|
setup.py
|
Python
| 0
|
@@ -0,0 +1,343 @@
+from distutils.core import setup%0A%0Asetup(name = 'MNML',%0A description = 'A very lightweight WSGI Python web framework',%0A author = 'Bradley Wright',%0A author_email = 'me@bradleywright.name',%0A url = 'http://github.com/bradleywright/mnml',%0A version = '0.1',%0A py_modules = %5B'mnml'%5D,%0A )
|
|
328204f4158a829c6922019dcd83d3afbca2536d
|
bump to 0.4
|
setup.py
|
setup.py
|
from distutils.core import setup
setup(name='zencoder',
version='0.3',
description='Integration library for Zencoder',
author='Alex Schworer',
author_email='alex.schworer@gmail.com',
url='http://github.com/schworer/zencoder-py',
license="MIT License",
install_requires=['httplib2'],
packages=['zencoder']
)
|
Python
| 0.000024
|
@@ -72,9 +72,9 @@
='0.
-3
+4
',%0A
|
e9b8c9489f4a43a43a865853f9bbdb246e445802
|
Comment setup.py
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
from glob import glob
import platform
if os.environ.get('USE_SETUPTOOLS'):
from setuptools import setup
setup # workaround for pyflakes issue #13
setup_kwargs = dict(zip_safe=0)
else:
from distutils.core import setup
setup_kwargs = dict()
data_files = [
('share/diamond', ['LICENSE', 'README.md', 'version.txt']),
('share/diamond/user_scripts', []),
]
if os.getenv('VIRTUAL_ENV', False):
data_files.append(('etc/diamond',
glob('conf/*.conf.*')))
data_files.append(('etc/diamond/collectors',
glob('conf/collectors/*')))
data_files.append(('etc/diamond/handlers',
glob('conf/handlers/*')))
else:
data_files.append(('/etc/diamond',
glob('conf/*.conf.*')))
data_files.append(('/etc/diamond/collectors',
glob('conf/collectors/*')))
data_files.append(('/etc/diamond/handlers',
glob('conf/handlers/*')))
if platform.dist()[0] == 'Ubuntu':
data_files.append(('/etc/init',
['debian/upstart/diamond.conf']))
if platform.dist()[0] in ['centos', 'redhat']:
data_files.append(('/etc/init.d',
['bin/init.d/diamond']))
data_files.append(('/var/log/diamond',
['.keep']))
if platform.dist()[1].split('.')[0] >= '6':
data_files.append(('/etc/init',
['rpm/upstart/diamond.conf']))
# Support packages being called differently on different distros
if platform.dist()[0] in ['centos', 'redhat']:
install_requires = ['python-configobj', 'psutil', ],
else:
install_requires = ['ConfigObj', 'psutil', ],
def get_version():
try:
f = open('version.txt')
except IOError:
os.system("./version.sh > version.txt")
f = open('version.txt')
version = ''.join(f.readlines()).rstrip()
f.close()
return version
def pkgPath(root, path, rpath="/"):
global data_files
if not os.path.exists(path):
return
files = []
for spath in os.listdir(path):
subpath = os.path.join(path, spath)
spath = os.path.join(rpath, spath)
if os.path.isfile(subpath):
files.append(subpath)
data_files.append((root + rpath, files))
for spath in os.listdir(path):
subpath = os.path.join(path, spath)
spath = os.path.join(rpath, spath)
if os.path.isdir(subpath):
pkgPath(root, subpath, spath)
pkgPath('share/diamond/collectors', 'src/collectors')
version = get_version()
setup(
name='diamond',
version=version,
url='https://github.com/BrightcoveOS/Diamond',
author='The Diamond Team',
author_email='https://github.com/BrightcoveOS/Diamond',
license='MIT License',
description='Smart data producer for graphite graphing package',
package_dir={'': 'src'},
packages=['diamond', 'diamond.handler'],
scripts=['bin/diamond', 'bin/diamond-setup'],
data_files=data_files,
install_requires=install_requires,
#test_suite='test.main',
** setup_kwargs
)
|
Python
| 0
|
@@ -1794,16 +1794,197 @@
sion():%0A
+ %22%22%22%0A Read the version.txt file to get the new version string%0A Generate it if version.txt is not available. Generation%0A is required for pip installs%0A %22%22%22%0A
try:
@@ -2232,16 +2232,70 @@
h=%22/%22):%0A
+ %22%22%22%0A Package up a path recursively%0A %22%22%22%0A
glob
|
bd41935801fb01d85d7f3f600c6b94f077cdf82f
|
Add solid motor utilities.
|
solid.py
|
solid.py
|
Python
| 0
|
@@ -0,0 +1,809 @@
+''' Solid rocket motor equations.%0A%0AMatt Vernacchia%0Aproptools%0A2016 Aug 22%0A'''%0A%0Adef chamber_pressure(K_n, a, n, rho_solid, c_star):%0A ''' Chamber pressure due to solid propellant combustion.%0A%0A See equation 12-6 in Rocket Propulsion Elements 8th edition.%0A%0A Args:%0A K_n (scalar): Ratio of burning area to throat area, A_b/A_t %5Bunits: none%5D.%0A a (scalar): Propellant burn rate coefficient %5Bunits: meter second**-1 pascal**-n%5D.%0A n (scalar): Propellant burn rate exponent %5Bunits: none%5D.%0A rho_solid (scalar): Solid propellant density %5Bunits: kilogram meter**-3%5D.%0A c_star (scalar): Propellant combustion charateristic velocity %5Bunits: meter second**-1%5D.%0A%0A Returns:%0A Chamber pressure %5Bunits: pascal%5D.%0A '''%0A return (K_n * rho_solid * a * c_star) ** (1 / (1 - n))%0A
|
|
62d817cde6a8c58372125f551d8122cc303ac4b5
|
Add a new gclient-new-workdir script which clones an existing gclient working directory much like git-new-workdir, but takes into account all sub projects as well.
|
gclient-new-workdir.py
|
gclient-new-workdir.py
|
Python
| 0.000226
|
@@ -0,0 +1,2495 @@
+#!/usr/bin/env python%0A# Copyright 2013 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A#%0A# Usage:%0A# gclient-new-workdir.py %3Crepository%3E %3Cnew_workdir%3E %5B%3Cbranch%3E%5D%0A#%0A%0Aimport os%0Aimport shutil%0Aimport subprocess%0Aimport sys%0A%0A%0Adef parse_options(argv):%0A assert not sys.platform.startswith(%22win%22)%0A%0A if len(argv) != 3:%0A print(%22usage: gclient-new-workdir.py %3Crepository%3E %3Cnew_workdir%3E%22)%0A sys.exit(1)%0A%0A repository = argv%5B1%5D%0A new_workdir = argv%5B2%5D%0A%0A if not os.path.exists(repository):%0A print(%22Repository does not exist: %22 + repository)%0A sys.exit(1)%0A%0A if os.path.exists(new_workdir):%0A print(%22New workdir already exists: %22 + new_workdir)%0A sys.exit(1)%0A%0A return repository, new_workdir%0A%0A%0Adef main(argv):%0A repository, new_workdir = parse_options(argv)%0A%0A gclient = os.path.join(repository, %22.gclient%22)%0A if not os.path.exists(gclient):%0A print(%22No .gclient file: %22 + gclient)%0A%0A gclient_entries = os.path.join(repository, %22.gclient_entries%22)%0A if not os.path.exists(gclient_entries):%0A print(%22No .gclient_entries file: %22 + gclient_entries)%0A%0A os.mkdir(new_workdir)%0A os.symlink(gclient, os.path.join(new_workdir, %22.gclient%22))%0A os.symlink(gclient_entries, os.path.join(new_workdir, %22.gclient_entries%22))%0A%0A for root, dirs, _ in os.walk(repository):%0A if %22.git%22 in dirs:%0A workdir = root.replace(repository, new_workdir, 1)%0A make_workdir(os.path.join(root, %22.git%22),%0A os.path.join(workdir, %22.git%22))%0A%0A%0Adef make_workdir(repository, new_workdir):%0A print(%22Creating: %22 + new_workdir)%0A os.makedirs(new_workdir)%0A%0A GIT_DIRECTORY_WHITELIST = %5B%0A %22config%22,%0A %22info%22,%0A %22hooks%22,%0A %22logs/refs%22,%0A %22objects%22,%0A %22packed-refs%22,%0A %22refs%22,%0A %22remotes%22,%0A %22rr-cache%22,%0A %22svn%22%0A %5D%0A%0A for entry in GIT_DIRECTORY_WHITELIST:%0A make_symlink(repository, new_workdir, entry)%0A%0A shutil.copy2(os.path.join(repository, %22HEAD%22),%0A os.path.join(new_workdir, %22HEAD%22))%0A subprocess.check_call(%5B%22git%22, %22checkout%22, %22-f%22%5D,%0A cwd=new_workdir.rstrip(%22.git%22))%0A%0A%0Adef make_symlink(repository, new_workdir, link):%0A if not os.path.exists(os.path.join(repository, link)):%0A return%0A link_dir = os.path.dirname(os.path.join(new_workdir, link))%0A if not os.path.exists(link_dir):%0A os.makedirs(link_dir)%0A os.symlink(os.path.join(repository, link), os.path.join(new_workdir, link))%0A%0A%0Aif __name__ == '__main__':%0A sys.exit(main(sys.argv))%0A
|
|
a45c78d0fcc6d8cd2d8e702917a2dabd7bfc0444
|
Add command createaccount
|
saleor/account/management/commands/createserviceaccount.py
|
saleor/account/management/commands/createserviceaccount.py
|
Python
| 0.000016
|
@@ -0,0 +1,3186 @@
+import json%0Afrom typing import Any, Dict, List, Optional%0A%0Aimport requests%0Afrom django.contrib.auth.models import Permission%0Afrom django.contrib.sites.models import Site%0Afrom django.core.management import BaseCommand, CommandError%0Afrom django.core.management.base import CommandParser%0Afrom requests.exceptions import RequestException%0A%0Afrom ....core.permissions import get_permissions, get_permissions_enum_list%0Afrom ...models import ServiceAccount%0A%0A%0Aclass Command(BaseCommand):%0A help = %22Used to create service account%22%0A%0A def add_arguments(self, parser: CommandParser) -%3E None:%0A parser.add_argument(%22name%22, type=str)%0A parser.add_argument(%0A %22--permission%22,%0A action=%22append%22,%0A default=%5B%5D,%0A dest=%22permissions%22,%0A help=%22Assign new permission to Service Account. %22%0A %22Argument can be specified multiple times.%22,%0A )%0A parser.add_argument(%22--is_active%22, default=True, dest=%22is_active%22)%0A parser.add_argument(%0A %22--target_url%22,%0A dest=%22target_url%22,%0A help=%22Url which will receive newly created data of service account object.%22,%0A )%0A%0A def validate_permissions(self, required_permissions: List%5Bstr%5D):%0A permissions = list(map(lambda x: x%5B1%5D, get_permissions_enum_list()))%0A for perm in required_permissions:%0A if perm not in permissions:%0A raise CommandError(%0A f%22Permisssion: %7Bperm%7D doesn't exist in Saleor.%22%0A f%22 Avaiable permissions: %7Bpermissions%7D%22%0A )%0A%0A def clean_permissions(self, required_permissions: List%5Bstr%5D) -%3E List%5BPermission%5D:%0A permissions = get_permissions(required_permissions)%0A return permissions%0A%0A def send_service_account_data(self, target_url, data: Dict%5Bstr, Any%5D):%0A domain = Site.objects.get_current().domain%0A headers = %7B%22x-saleor-domain%22: domain%7D%0A try:%0A response = requests.post(target_url, json=data, headers=headers, timeout=15)%0A except RequestException as e:%0A raise CommandError(f%22Request failed. Exception: %7Be%7D%22)%0A if response.status_code != 200:%0A raise CommandError(%0A f%22Failed to send service account data to %7Btarget_url%7D. %22 # type: ignore%0A f%22Status code: %7Bresponse.status_code%7D, content: %7Bresponse.content%7D%22%0A )%0A%0A def handle(self, *args: Any, **options: Any) -%3E Optional%5Bstr%5D:%0A name = options%5B%22name%22%5D%0A is_active = options%5B%22is_active%22%5D%0A target_url = options%5B%22target_url%22%5D%0A permissions = list(set(options%5B%22permissions%22%5D))%0A self.validate_permissions(permissions)%0A%0A service_account = ServiceAccount.objects.create(name=name, is_active=is_active)%0A permissions_qs = self.clean_permissions(permissions)%0A service_account.permissions.add(*permissions_qs)%0A token_obj = service_account.tokens.create()%0A data = %7B%0A %22auth_token%22: token_obj.auth_token,%0A %22name%22: name,%0A %22permissions%22: permissions,%0A %7D%0A if target_url:%0A self.send_service_account_data(target_url, data)%0A return json.dumps(data)%0A
|
|
b4f76fdf1f500be289082d6a0fdc90835dff74fd
|
Update forward compatibility horizon to 2022-05-15
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 5, 14)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1339,9 +1339,9 @@
5, 1
-4
+5
)%0A_F
|
ef07c46210b17d407a9f9097af43bb77e16325d3
|
Update forward compatibility horizon to 2022-05-21
|
tensorflow/python/compat/compat.py
|
tensorflow/python/compat/compat.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2022, 5, 20)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
Python
| 0
|
@@ -1335,17 +1335,17 @@
22, 5, 2
-0
+1
)%0A_FORWA
|
b8029643ca5dd5d559b7411b6fbb20896502fd7b
|
Create solution.py
|
hackerrank/algorithms/implementation/medium/almost_sorted/py/solution.py
|
hackerrank/algorithms/implementation/medium/almost_sorted/py/solution.py
|
Python
| 0.000018
|
@@ -0,0 +1,1193 @@
+def solution(L):%0A def isAscending(L):%0A m = 1%0A%0A while m %3C len(L):%0A if L%5Bm%5D %3C L%5Bm - 1%5D:%0A return False%0A m += 1%0A%0A return True%0A%0A buffer = list(L%5B:%5D)%0A i = 1%0A%0A while i %3C len(buffer):%0A if buffer%5Bi%5D %3C buffer%5Bi - 1%5D:%0A i -= 1%0A break%0A i += 1%0A%0A j = len(buffer) - 1%0A%0A while j %3E i:%0A if buffer%5Bj%5D %3C buffer%5Bj - 1%5D:%0A j += 1%0A break%0A j -= 1%0A else:%0A return 'yes', 0, 0%0A %0A buffer%5Bi%5D, buffer%5Bj - 1%5D = buffer%5Bj - 1%5D, buffer%5Bi%5D%0A%0A if isAscending(buffer):%0A return 'swap', i + 1, j%0A else:%0A buffer%5Bi%5D, buffer%5Bj - 1%5D = buffer%5Bj - 1%5D, buffer%5Bi%5D%0A%0A k = i + 1%0A%0A while k %3C j:%0A if buffer%5Bk%5D %3E buffer%5Bk - 1%5D:%0A break%0A k += 1%0A else:%0A buffer%5Bi:j%5D = reversed(buffer%5Bi:j%5D)%0A %0A if isAscending(buffer):%0A return 'reverse', i + 1, j%0A else:%0A return 'no', 0, 0%0A%0A return 'no', 0, 0%0A%0An = int(input())%0AL = list(map(int, input().split()))%0Aans, i, j = solution(L)%0A%0Aif ans == 'no' or ans == 'yes':%0A print(ans)%0Aelse:%0A print('yes')%0A print(ans, i, j)%0A
|
|
e57c1b157b39eac278552fd6c9a16e004d8be501
|
Create task_1_2.py
|
INBa/2014/Andreev_F_I/task_1_2.py
|
INBa/2014/Andreev_F_I/task_1_2.py
|
Python
| 0.999998
|
@@ -0,0 +1,378 @@
+# %D0%97%D0%B0%D0%B4%D0%B0%D1%87%D0%B0 1, %D0%92%D0%B0%D1%80%D0%B8%D0%B0%D0%BD%D1%82 2 %0A# %D0%9D%D0%B0%D0%BF%D0%B8%D1%88%D0%B8%D1%82%D0%B5 %D0%BF%D1%80%D0%BE%D0%B3%D1%80%D0%B0%D0%BC%D0%BC%D1%83, %D0%BA%D0%BE%D1%82%D0%BE%D1%80%D0%B0%D1%8F %D0%B1%D1%83%D0%B4%D0%B5%D1%82 %D1%81%D0%BE%D0%BE%D0%B1%D1%89%D0%B0%D1%82%D1%8C %D1%80%D0%BE%D0%B4 %D0%B4%D0%B5%D1%8F%D1%82%D0%B5%D0%BB%D1%8C%D0%BD%D0%BE%D1%81%D1%82%D0%B8 %D0%B8 %D0%BF%D1%81%D0%B5%D0%B2%D0%B4%D0%BE%D0%BD%D0%B8%D0%BC %D0%BF%D0%BE%D0%B4 %D0%BA%D0%BE%D1%82%D0%BE%D1%80%D1%8B%D0%BC %D1%81%D0%BA%D1%80%D1%8B%D0%B2%D0%B0%D0%B5%D1%82%D1%81%D1%8F %D0%9C%D0%B0%D1%80%D1%82%D0%B8%D0%BD %D0%90%D0%BD%D0%B4%D0%B5%D1%80%D1%81%D0%B5%D0%BD. %D0%9F%D0%BE%D1%81%D0%BB%D0%B5 %D0%B2%D1%8B%D0%B2%D0%BE%D0%B4%D0%B0 %D0%B8%D0%BD%D1%84%D0%BE%D1%80%D0%BC%D0%B0%D1%86%D0%B8%D0%B8 %D0%BF%D1%80%D0%BE%D0%B3%D1%80%D0%B0%D0%BC%D0%BC%D0%B0 %D0%B4%D0%BE%D0%BB%D0%B6%D0%BD%D0%B0 %D0%B4%D0%BE%D0%B6%D0%B8%D0%B4%D0%B0%D1%82%D1%8C%D1%81%D1%8F %D0%BF%D0%BE%D0%BA%D0%B0 %D0%BF%D0%BE%D0%BB%D1%8C%D0%B7%D0%BE%D0%B2%D0%B0%D1%82%D0%B5%D0%BB%D1%8C %D0%BD%D0%B0%D0%B6%D0%BC%D0%B5%D1%82 Enter %D0%B4%D0%BB%D1%8F %D0%B2%D1%8B%D1%85%D0%BE%D0%B4%D0%B0.%0A%0A# %D0%90%D0%BD%D0%B4%D1%80%D0%B5%D0%B5%D0%B2 %D0%A4.%D0%98.%0A# 23.05.2016%0Aprint('%D0%9C%D0%B0%D1%80%D1%82%D0%B8%D0%BD %D0%90%D0%BD%D0%B4%D0%B5%D1%80%D1%81%D0%B5%D0%BD %D0%B8%D0%B7%D0%B2%D0%B5%D1%81%D1%82%D0%BD%D1%8B%D0%B9 %D0%B4%D0%B0%D1%82%D1%81%D0%BA%D0%B8%D0%B9 %D0%BF%D0%B8%D1%81%D0%B0%D1%82%D0%B5%D0%BB%D1%8C-%D0%BA%D0%BE%D0%BC%D0%BC%D1%83%D0%BD%D0%B8%D1%81%D1%82, %D0%B5%D0%B3%D0%BE %D0%BF%D1%81%D0%B5%D0%B2%D0%B4%D0%BE%D0%BD%D0%B8%D0%BC %D0%9D%D0%B5%D0%BA%D1%81%D0%B5')%0Ainput(%22%D0%9D%D0%B0%D0%B6%D0%BC%D0%B8%D1%82%D0%B5 Enter %D0%B4%D0%BB%D1%8F %D0%B2%D1%8B%D1%85%D0%BE%D0%B4%D0%B0%22)%0A
|
|
1d55ad8fb8309918e7d41b4f443e16ebefbb1895
|
Add script for populating elasticsearch.
|
populate_elasticsearch.py
|
populate_elasticsearch.py
|
Python
| 0
|
@@ -0,0 +1,987 @@
+#!/usr/bin/env python%0A%0Aimport os%0Aimport sys%0Aimport json%0Aimport requests%0Aimport multiprocessing%0Aimport mwparserfromhell as mwp%0A%0AES_URL = 'http://localhost:9200'%0A%0ASECTIONS_TO_REMOVE = set(%5B%0A 'references', 'see also', 'external links', 'footnotes'%0A%5D)%0A%0Adef put_document(path):%0A id = os.path.basename(path)%0A doc = json.load(file(path))%0A wdoc = mwp.parse(doc%5B'wikitext'%5D)%0A for section in wdoc.get_sections(include_headings = True):%0A try:%0A title = section.get(0).title.strip().lower()%0A if title in SECTIONS_TO_REMOVE:%0A wdoc.remove(section)%0A except (IndexError, AttributeError):%0A # No heading or empty section?%0A pass%0A doc%5B'wikitext'%5D = wdoc.strip_code()%0A response = requests.put(%0A ES_URL + '/' + sys.argv%5B2%5D + '/' + id, json.dumps(doc))%0A print response.content%0A%0Apool = multiprocessing.Pool()%0Apool.map(put_document, %5B%0A os.path.join(sys.argv%5B1%5D, id)%0A for id in os.listdir(sys.argv%5B1%5D)%5D)%0A
|
|
1362703d4068a03c2970962ef2c05c3b128edcf1
|
Create 5function.py
|
introduction/5function.py
|
introduction/5function.py
|
Python
| 0.000494
|
@@ -0,0 +1,121 @@
+def fun():%0A name = %22av%22%0A age = int(19)%0A print(%22%25s is %25d years old.%22%25 (name, age))%0A return %22end%22%0Aprint(fun())%0A
|
|
a6235ab8f9635d434f312eb7b0d442a0894ff208
|
Fix error when parsing HTML page without <html> (#6077)
|
devsiteParseHTML.py
|
devsiteParseHTML.py
|
import os
import re
import yaml
import logging
import devsiteHelper
from google.appengine.ext.webapp.template import render
SOURCE_PATH = os.path.join(os.path.dirname(__file__), 'src/content/')
SERVED_FROM_AE = not os.environ['SERVER_SOFTWARE'].startswith('Dev')
def parse(requestPath, fileLocation, content, lang='en'):
context = {
'lang': lang,
'requestPath': requestPath.replace('/index', ''),
'bodyClass': 'devsite-doc-page',
'servedFromAppEngine': SERVED_FROM_AE
}
## Get the HTML tag
htmlTag = re.search(r'<html.*?>', content)
if htmlTag is None:
log.warning('Does not contain <html> root element')
else:
htmlTag = htmlTag.group(0)
# Check the HTML tag contains the devsite
if htmlTag.find('devsite') == -1:
return content
# Isolate the <head>
headStart = content.find('<head')
headEnd = content.find('</head>')
head = content[headStart:headEnd].strip()
# Isolate the <body>
bodyStart = content.find('<body')
bodyEnd = content.rfind('</body>')
body = content[bodyStart:bodyEnd].strip()
body = re.sub(r'<body.*?>', '', body)
# Remove any comments {# something #}
body = re.sub(r'{#.+?#}', '', body)
body = re.sub(r'{% comment %}.*?{% endcomment %}(?ms)', '', body)
# Render any DevSite specific tags
body = devsiteHelper.renderDevSiteContent(body, lang)
# Read the project.yaml file
projectPath = re.search('name=\"project_path\" value=\"(.*?)\"', head)
projectPath = projectPath.group(1)
projectYaml = yaml.load(devsiteHelper.readFile(projectPath, lang))
context['projectYaml'] = projectYaml
# Read the parent project.yaml file if applicable
parentProjectYaml = None
if 'parent_project_metadata_path' in projectYaml:
parentprojectPath = projectYaml['parent_project_metadata_path']
parentProjectYaml = yaml.load(devsiteHelper.readFile(parentprojectPath, lang))
# Read the book.yaml file and generate the left hand nav
bookPath = re.search('name=\"book_path\" value=\"(.*?)\"', head)
bookPath = bookPath.group(1)
bookYaml = devsiteHelper.parseBookYaml(bookPath, lang)
context['bookYaml'] = devsiteHelper.expandBook(bookYaml)
context['lowerTabs'] = devsiteHelper.getLowerTabs(bookYaml)
context['renderedLeftNav'] = devsiteHelper.getLeftNav(requestPath, bookYaml)
# Get the logo row (TOP ROW) icon
context['logoRowIcon'] = projectYaml['icon']['path']
# Get the logo row (TOP ROW) title
if parentProjectYaml:
context['logoRowTitle'] = parentProjectYaml['name']
else:
context['logoRowTitle'] = projectYaml['name']
# Get the header title & description
context['headerTitle'] = projectYaml['name']
# headerDescription is rarely shown, hiding temporarily
# context['headerDescription'] = projectYaml['description']
# Read the page title
pageTitle = []
titleRO = re.search('<title>(.*?)</title>', head)
if titleRO:
title = titleRO.group(1)
pageTitle.append(title)
if body.find('<h1>') == -1:
body = '<h1 class="page-title">' + title + '</h1>\n\n' + body
pageTitle.append(projectYaml['name'])
pageTitle.append('WebFu Staging')
context['pageTitle'] = ' | '.join(pageTitle)
# Get the footer path & read/parse the footer file.
footerPath = projectYaml['footer_path']
footers = yaml.load(devsiteHelper.readFile(footerPath, lang))['footer']
for item in footers:
if 'promos' in item:
context['footerPromos'] = item['promos']
elif 'linkboxes' in item:
context['footerLinks'] = item['linkboxes']
# Replaces <pre> tags with prettyprint enabled tags
body = re.sub(r'^<pre>(?m)', r'<pre class="prettyprint">', body)
context['content'] = body
# Checks if the page should be displayed in full width mode
fullWidth = re.search('name=\"full_width\" value=\"true\"', head)
if fullWidth:
context['fullWidth'] = True
# # Build the table of contents & transform so it fits within DevSite
context['renderedTOC'] = '<b>TOC Not Implemented</b> for DevSite HTML Pages'
gitHubEditUrl = 'https://github.com/google/WebFundamentals/blob/'
gitHubEditUrl += 'master/src/content/'
gitHubEditUrl += fileLocation.replace(SOURCE_PATH, '')
context['gitHubEditUrl'] = gitHubEditUrl
gitHubIssueUrl = 'https://github.com/google/WebFundamentals/issues/'
gitHubIssueUrl += 'new?title=Feedback for: ' + context['pageTitle'] + ' ['
gitHubIssueUrl += lang + ']&body='
gitHubIssueUrl += gitHubEditUrl
context['gitHubIssueUrl'] = gitHubIssueUrl
# Renders the content into the template
return render('gae/page-article.html', context)
|
Python
| 0.000001
|
@@ -582,16 +582,20 @@
%0A log
+ging
.warning
|
06e352476d52865ab4905cebc57672d9f674a732
|
Disable tests requiring pairing
|
test/client/audio_consumer_test.py
|
test/client/audio_consumer_test.py
|
# Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import unittest
from Queue import Queue
import speech_recognition
from os.path import dirname, join
from speech_recognition import WavFile, AudioData
from mycroft.client.speech.listener import AudioConsumer, RecognizerLoop
from mycroft.client.speech.local_recognizer import LocalRecognizer
from mycroft.stt import MycroftSTT
__author__ = 'seanfitz'
class MockRecognizer(object):
def __init__(self):
self.transcriptions = []
def recognize_mycroft(self, audio, key=None,
language=None, show_all=False):
if len(self.transcriptions) > 0:
return self.transcriptions.pop(0)
else:
raise speech_recognition.UnknownValueError()
def set_transcriptions(self, transcriptions):
self.transcriptions = transcriptions
class AudioConsumerTest(unittest.TestCase):
"""
AudioConsumerTest
"""
def setUp(self):
self.loop = RecognizerLoop()
self.queue = Queue()
self.recognizer = MockRecognizer()
self.consumer = AudioConsumer(
self.loop.state, self.queue, self.loop, MycroftSTT(),
LocalRecognizer(self.loop.wakeup_recognizer.key_phrase,
self.loop.wakeup_recognizer.phonemes, "1e-16"),
self.loop.mycroft_recognizer)
def __create_sample_from_test_file(self, sample_name):
root_dir = dirname(dirname(dirname(__file__)))
filename = join(
root_dir, 'test', 'client', 'data', sample_name + '.wav')
wavfile = WavFile(filename)
with wavfile as source:
return AudioData(
source.stream.read(), wavfile.SAMPLE_RATE,
wavfile.SAMPLE_WIDTH)
def test_word_extraction(self):
"""
This is intended to test the extraction of the word: ``mycroft``.
The values for ``ideal_begin`` and ``ideal_end`` were found using an
audio tool like Audacity and they represent a sample value position of
the audio. ``tolerance`` is an acceptable margin error for the distance
between the ideal and actual values found by the ``WordExtractor``
"""
# TODO: implement WordExtractor test without relying on the listener
return
audio = self.__create_sample_from_test_file('weather_mycroft')
self.queue.put(audio)
tolerance = 4000
ideal_begin = 70000
ideal_end = 92000
monitor = {}
self.recognizer.set_transcriptions(["what's the weather next week"])
def wakeword_callback(message):
monitor['pos_begin'] = message.get('pos_begin')
monitor['pos_end'] = message.get('pos_end')
self.loop.once('recognizer_loop:wakeword', wakeword_callback)
self.consumer.read()
actual_begin = monitor.get('pos_begin')
self.assertIsNotNone(actual_begin)
diff = abs(actual_begin - ideal_begin)
self.assertTrue(
diff <= tolerance,
str(diff) + " is not less than " + str(tolerance))
actual_end = monitor.get('pos_end')
self.assertIsNotNone(actual_end)
diff = abs(actual_end - ideal_end)
self.assertTrue(
diff <= tolerance,
str(diff) + " is not less than " + str(tolerance))
@unittest.skip('Disabled while unittests are brought upto date')
def test_wakeword_in_beginning(self):
self.queue.put(self.__create_sample_from_test_file('weather_mycroft'))
self.recognizer.set_transcriptions(["what's the weather next week"])
monitor = {}
def callback(message):
monitor['utterances'] = message.get('utterances')
self.loop.once('recognizer_loop:utterance', callback)
self.consumer.read()
utterances = monitor.get('utterances')
self.assertIsNotNone(utterances)
self.assertTrue(len(utterances) == 1)
self.assertEquals("what's the weather next week", utterances[0])
@unittest.skip('Disabled while unittests are brought upto date')
def test_wakeword(self):
self.queue.put(self.__create_sample_from_test_file('mycroft'))
self.recognizer.set_transcriptions(["silence"])
monitor = {}
def callback(message):
monitor['utterances'] = message.get('utterances')
self.loop.once('recognizer_loop:utterance', callback)
self.consumer.read()
utterances = monitor.get('utterances')
self.assertIsNotNone(utterances)
self.assertTrue(len(utterances) == 1)
self.assertEquals("silence", utterances[0])
def test_ignore_wakeword_when_sleeping(self):
self.queue.put(self.__create_sample_from_test_file('mycroft'))
self.recognizer.set_transcriptions(["not detected"])
self.loop.sleep()
monitor = {}
def wakeword_callback(message):
monitor['wakeword'] = message.get('utterance')
self.loop.once('recognizer_loop:wakeword', wakeword_callback)
self.consumer.read()
self.assertIsNone(monitor.get('wakeword'))
self.assertTrue(self.loop.state.sleeping)
def test_wakeup(self):
self.queue.put(self.__create_sample_from_test_file('mycroft_wakeup'))
self.loop.sleep()
self.consumer.read()
self.assertFalse(self.loop.state.sleeping)
def test_stop(self):
self.queue.put(self.__create_sample_from_test_file('mycroft'))
self.consumer.read()
self.queue.put(self.__create_sample_from_test_file('stop'))
self.recognizer.set_transcriptions(["stop"])
monitor = {}
def utterance_callback(message):
monitor['utterances'] = message.get('utterances')
self.loop.once('recognizer_loop:utterance', utterance_callback)
self.consumer.read()
utterances = monitor.get('utterances')
self.assertIsNotNone(utterances)
self.assertTrue(len(utterances) == 1)
self.assertEquals("stop", utterances[0])
def test_record(self):
self.queue.put(self.__create_sample_from_test_file('mycroft'))
self.consumer.read()
self.queue.put(self.__create_sample_from_test_file('record'))
self.recognizer.set_transcriptions(["record"])
monitor = {}
def utterance_callback(message):
monitor['utterances'] = message.get('utterances')
self.loop.once('recognizer_loop:utterance', utterance_callback)
self.consumer.read()
utterances = monitor.get('utterances')
self.assertIsNotNone(utterances)
self.assertTrue(len(utterances) == 1)
self.assertEquals("record", utterances[0])
|
Python
| 0.000001
|
@@ -6040,32 +6040,101 @@
tate.sleeping)%0A%0A
+ @unittest.skip('Disabled while unittests are brought upto date')%0A
def test_sto
@@ -6768,32 +6768,101 @@
utterances%5B0%5D)%0A%0A
+ @unittest.skip('Disabled while unittests are brought upto date')%0A
def test_rec
|
2d4f6dd040d6f92b0865421cddf6c9ce16eff08e
|
add sample python module
|
my_amazing_python_module.py
|
my_amazing_python_module.py
|
Python
| 0.000001
|
@@ -0,0 +1,55 @@
+def some_function(name):%0A return %22Hello, %25s!%22 %25 name
|
|
ff2b86d90ecbc2da25ddc05b0430555861104cac
|
Add an example for HybridContentsManager.
|
examples/hybrid_manager_example.py
|
examples/hybrid_manager_example.py
|
Python
| 0
|
@@ -0,0 +1,2100 @@
+# This example shows how to configure Jupyter/IPython to use the more complex%0A# HybridContentsManager.%0A%0A# A HybridContentsManager implements the contents API by delegating requests to%0A# other contents managers. Each sub-manager is associated with a root%0A# directory, and all requests for data within that directory are routed to the%0A# sub-manager.%0A%0A# A HybridContentsManager needs two pieces of information at configuration time:%0A%0A# 1. %60%60manager_classes%60%60, a map from root directory to the type of contents%0A# manager to use for that root directory.%0A# 2. %60%60manager_kwargs%60%60, a map from root directory to a dict of keywords to%0A# pass to the associated sub-manager.%0A%0Afrom pgcontents.pgmanager import PostgresContentsManager%0Afrom pgcontents.hybridmanager import HybridContentsManager%0A%0A# Using Jupyter (IPython %3E= 4.0).%0A# from notebook.services.contents.filemanager import FileContentsManager%0A# Using Legacy IPython.%0Afrom IPython.html.services.contents.filemanager import FileContentsManager%0A%0Ac = get_config()%0A%0Ac.NotebookApp.contents_manager_class = HybridContentsManager%0Ac.HybridContentsManager.manager_classes = %7B%0A # Associate the root directory with a PostgresContentsManager.%0A # This manager will receive all requests that don't fall under any of the%0A # other managers.%0A '': PostgresContentsManager,%0A # Associate /directory with a FileContentsManager.%0A 'directory': FileContentsManager,%0A # Associate /other_directory with another FileContentsManager.%0A 'other_directory': FileContentsManager,%0A%7D%0Ac.HybridContentsManager.manager_kwargs = %7B%0A # Args for root PostgresContentsManager.%0A '': %7B%0A 'db_url': 'postgresql://ssanderson@/pgcontents_testing',%0A 'user_id': 'my_awesome_username',%0A 'max_file_size_bytes': 1000000, # Optional%0A %7D,%0A # Args for the FileContentsManager mapped to /directory%0A 'directory': %7B%0A 'root_dir': '/home/ssanderson/some_local_directory',%0A %7D,%0A # Args for the FileContentsManager mapped to /other_directory%0A 'other_directory': %7B%0A 'root_dir': '/home/ssanderson/some_other_local_directory',%0A %7D%0A%7D%0A
|
|
d68109c2fb7bb324c93506d26a1a7cf996134da3
|
Allow `soulmate_finder` to be imported
|
soulmate_finder/__init__.py
|
soulmate_finder/__init__.py
|
Python
| 0.000015
|
@@ -0,0 +1,87 @@
+# allow %60soulmate_finder%60 to be imported%0A%0A# FIXME: This is bad%0Afrom .__main__ import *%0A
|
|
79fe576ec71552633c7e5a2a646567beecfa3b5b
|
Add a test
|
test/pkgbuildtest.py
|
test/pkgbuildtest.py
|
Python
| 0
|
@@ -0,0 +1,2160 @@
+#!/usr/bin/env python3%0A%0Aimport sys%0Aimport os%0Aimport unittest%0Asys.path%5B0%5D = os.path.abspath('..')%0A%0Aimport libaur.PKGBUILD as P%0A%0Aclass PkgbuildTest(unittest.TestCase):%0A KNOWN_VALUES = %5B%0A ('''pkgname=foobar%5Cn''',%0A %7B'pkgname':%5B'foobar'%5D%7D),%0A ('''pkgname=(foobar)%5Cn''',%0A %7B'pkgname':%5B'foobar'%5D%7D),%0A ('''pkgname=('foobar' 'pacman')%5Cn''',%0A %7B'pkgname':%5B'foobar', 'pacman'%5D%7D),%0A # This one fails. Need better parsing for non-quoted strings%0A #('''pkgname=(foobar pacman)%5Cn''',%0A # %7B'pkgname':%5B'foobar', 'pacman'%5D%7D),%0A ('''pkgver=123%0A 456%0A ''', %7B'pkgver':%5B'123'%5D%7D),%0A ('''depends=('foobar' 'pacman')%5Cn''',%0A %7B'depends':%5B'foobar', 'pacman'%5D%7D),%0A ('''depends=(%22foobar%22 'pacman')%5Cn''',%0A %7B'depends':%5B'foobar', 'pacman'%5D%7D),%0A ('''depends=(%22foobar%22 %22pacman%22)%5Cn''',%0A %7B'depends':%5B'foobar', 'pacman'%5D%7D),%0A ('''depends=(%0A 'foobar'%0A 'pacman')%5Cn''',%0A %7B'depends':%5B'foobar', 'pacman'%5D%7D),%0A ('''depends=(%0A 'foobar'%0A 'pacman'%0A )%5Cn''',%0A %7B'depends':%5B'foobar', 'pacman'%5D%7D),%0A ('''depends=(%0A # One dep%0A 'foobar'%0A # Two dep%0A 'pacman'%0A )%5Cn''',%0A %7B'depends':%5B'foobar', 'pacman'%5D%7D),%0A ('''source=(git://foobar.git#branch=git%0A )%5Cn''',%0A %7B'source':%5B'git://foobar.git#branch=git'%5D%7D),%0A ('''source=(git://foobar.git#some comment%0A )%5Cn''',%0A %7B'source':%5B'git://foobar.git'%5D%7D),%0A %5D%0A%0A def test_known_values(self):%0A '''parse_pkgbuild should return the values listed above'''%0A for pkgbuild, output in self.KNOWN_VALUES:%0A # This is a default .Add it to the known output dictionary.%0A output%5B'epoch'%5D = %5B'0'%5D%0A self.assertDictEqual(P.parse_pkgbuild(full_str=pkgbuild), output)%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
44a785b456ad1d1bd9c866b79cadaec4c1d5bab5
|
Add sample template file (#24)
|
samples/vsphere/common/sample_template.py
|
samples/vsphere/common/sample_template.py
|
Python
| 0
|
@@ -0,0 +1,1714 @@
+#!/usr/bin/env python%0A%0A%22%22%22%0A* *******************************************************%0A* Copyright (c) VMware, Inc. 2017. All Rights Reserved.%0A* SPDX-License-Identifier: MIT%0A* *******************************************************%0A*%0A* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU %22AS IS%22 WITHOUT%0A* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,%0A* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED%0A* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,%0A* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.%0A%22%22%22%0A%0A__author__ = 'VMware, Inc.'%0A__vcenter_version__ = '6.5+'%0A%0Aimport atexit%0A%0Afrom com.vmware.vcenter_client import VM%0Afrom samples.vsphere.common import vapiconnect%0Afrom samples.vsphere.common.sample_util import parse_cli_args%0A%0A%0Aclass Sample:%0A %22%22%22%0A Demonstrates getting list of VMs present in vCenter%0A%0A Sample Prerequisites:%0A - vCenter%0A %22%22%22%0A%0A def __init__(self):%0A self.vm_service = None # Service used by the sample code.%0A self.cleardata = None%0A%0A def setup(self):%0A server, username, password, cleardata, skip_verification = %5C%0A parse_cli_args()%0A stub_config = vapiconnect.connect(server, username, password,%0A skip_verification)%0A self.vm_service = VM(stub_config)%0A self.cleardata = cleardata%0A atexit.register(vapiconnect.logout, stub_config)%0A%0A def run(self):%0A vms = self.vm_service.list()%0A print(vms)%0A%0A def cleanup(self):%0A if self.cleardata:%0A pass%0A%0A%0Adef main():%0A sample = Sample()%0A sample.setup()%0A sample.run()%0A sample.cleanup()%0A%0A%0A# Start program%0Aif __name__ == '__main__':%0A main()%0A
|
|
7d5407a98ef8b0d025532d9675b7109a4c4713f4
|
add send sms python post code
|
send-sms-text-messages-python/send_sms.py
|
send-sms-text-messages-python/send_sms.py
|
Python
| 0
|
@@ -0,0 +1,535 @@
+# we import the Twilio client from the dependency we just installed%0Afrom twilio.rest import TwilioRestClient%0A%0A# the following line needs your Twilio Account SID and Auth Token%0Aclient = TwilioRestClient(%22ACxxxxxxxxxxxxxx%22, %22zzzzzzzzzzzzz%22)%0A%0A# change the %22from_%22 number to your Twilio number and the %22to%22 number%0A# to the phone number you signed up for Twilio with, or upgrade your%0A# account to send SMS to any phone number%0Aclient.messages.create(to=%22+19732644152%22, from_=%22+12023358536%22,%0A body=%22Hello from Python!%22)%0A
|
|
9e200c1e5d666c3ab151c96fcc1190c70ddcb02c
|
Add pagination utility.
|
src/cadorsfeed/views/pagination.py
|
src/cadorsfeed/views/pagination.py
|
Python
| 0
|
@@ -0,0 +1,858 @@
+from werkzeug import cached_property%0Afrom flask import url_for%0A%0Aclass Pagination(object):%0A%0A def __init__(self, db, key, per_page, page, endpoint):%0A self.db = db%0A self.query = key%0A self.per_page = per_page%0A self.page = page%0A self.endpoint = endpoint%0A%0A @cached_property%0A def count(self):%0A return self.db.zcard(self.query)%0A%0A @cached_property%0A def entries(self):%0A start = (self.page - 1) * self.per_page%0A return self.db.zrevrange(self.query, start, (start + self.per_page) - 1)%0A%0A has_previous = property(lambda x: x.page %3E 1)%0A has_next = property(lambda x: x.page %3C x.pages)%0A previous = property(lambda x: url_for(x.endpoint, page=x.page - 1))%0A next = property(lambda x: url_for(x.endpoint, page=x.page + 1))%0A pages = property(lambda x: max(0, x.count - 1) // x.per_page + 1)%0A
|
|
3506cb01b0ce03d834c61ff28dd5d35785b999d3
|
add initial coverage implementation (#39)
|
tools/coverage.py
|
tools/coverage.py
|
Python
| 0
|
@@ -0,0 +1,2209 @@
+#!/bin/env python%0A%22%22%22coverage.py%0A%0AThis script is for checking the code coverage of unit tests in the%0Aoshinko-rest project. It is meant to be invoked from the top level of the%0Arepository.%0A%0AExample invocation:%0A%0A $ tools/coverage.py -h%0A%0A%22%22%22%0Aimport argparse%0Aimport copy%0Aimport re%0Aimport subprocess%0A%0A%0Aoshinko_repo = 'github.com/redhatanalytics/oshinko-rest/'%0Aoshinko_test_package = oshinko_repo + 'tests/unit'%0Acoverage_packages = %5B%0A 'handlers',%0A 'helpers/authentication',%0A 'helpers/containers',%0A 'helpers/deploymentconfigs',%0A 'helpers/info',%0A 'helpers/logging',%0A 'helpers/podtemplates',%0A 'helpers/services',%0A 'helpers/uuid',%0A 'version',%0A%5D%0A%0A%0Adef main(args):%0A def run_and_print(cmd):%0A proc = subprocess.Popen(cmd,%0A stdout=subprocess.PIPE,%0A stderr=subprocess.PIPE)%0A match = re.search('%5B0-9%5D%7B1,3%7D%5C.%5B0-9%5D%25', proc.stdout.read())%0A if match is not None:%0A print(' ' + match.group(0))%0A else:%0A print(' unknown')%0A%0A print('starting coverage scan')%0A base_cmd = %5B'go', 'test'%5D%0A if args.coverprofile is not None:%0A base_cmd = base_cmd + %5B'-coverprofile', args.coverprofile%5D%0A if args.individual is True:%0A for pkg in coverage_packages:%0A print(' - scanning ' + pkg)%0A cmd = base_cmd + %5B'-coverpkg', oshinko_repo+pkg,%0A oshinko_test_package%5D%0A run_and_print(cmd)%0A else:%0A print(' - scanning all packages')%0A pkg_list = ','.join(%5Boshinko_repo+p for p in coverage_packages%5D)%0A cmd = base_cmd + %5B'-coverpkg', pkg_list, oshinko_test_package%5D%0A run_and_print(cmd)%0A%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser(description='Run coverage analysis.')%0A parser.add_argument('-i', '--individual', dest='individual',%0A action='store_true',%0A help='Print coverage analysis for each package.')%0A parser.add_argument('-c', '--coverprofile', dest='coverprofile',%0A action='store',%0A help='Write coverage profile to this file.')%0A args = parser.parse_args()%0A main(args)%0A
|
|
63eaadad7a5169ec6219d33f9b39ce27859684c2
|
Add script to automate notebooks testing
|
notebooks/test_notebooks.py
|
notebooks/test_notebooks.py
|
Python
| 0.000001
|
@@ -0,0 +1,1142 @@
+# -*- coding: utf-8 -*-%0A%0A%0A'''%0AChecks notebook execution result.%0AEqual to this command + error management:%0Ajupyter nbconvert --to notebook --execute --ExecutePreprocessor.timeout=60 --output executed_notebook.ipynb demo.ipynb%0A%0AFor jupyter configuration information, run: jupyter --path %0A'''%0A%0A# Dependencies: nbformat, nbconvert, jupyter-client, ipykernel%0Aimport io%0Aimport nbformat%0Afrom nbconvert.preprocessors import ExecutePreprocessor%0Afrom nbconvert.preprocessors import CellExecutionError%0A%0Anotebook_filename = 'demo.ipynb'%0Arun_path = '.'%0Anotebook_filename_out = 'executed_notebook.ipynb'%0A%0Awith io.open(notebook_filename) as f:%0A nb = nbformat.read(f, as_version=4)%0A%0Aep = ExecutePreprocessor(timeout=600, kernel_name='python')%0Atry:%0A out = ep.preprocess(nb, %7B'metadata': %7B'path': run_path%7D%7D)%0Aexcept CellExecutionError:%0A out = None%0A msg = 'Error executing the notebook %22%25s%22.%5Cn%5Cn' %25 notebook_filename%0A msg += 'See notebook %22%25s%22 for the traceback.' %25 notebook_filename_out%0A print(msg)%0A raise%0Afinally:%0A with io.open(notebook_filename_out, mode='wt') as f: # io.open avoids UnicodeEncodeError%0A nbformat.write(nb, f)%0A
|
|
37c151de6b2241e68f7287349b43f6dce1150093
|
add an API module for core.ui
|
enthought/mayavi/core/ui/api.py
|
enthought/mayavi/core/ui/api.py
|
Python
| 0.000002
|
@@ -0,0 +1,318 @@
+from enthought.mayavi.tools.mlab_scene_model import MlabSceneModel%0Afrom enthought.mayavi.core.ui.mayavi_scene import MayaviScene%0Afrom enthought.tvtk.pyface.scene_editor import SceneEditor%0Afrom enthought.mayavi.core.ui.engine_view import EngineView%0Afrom enthought.mayavi.core.ui.engine_rich_view import EngineRichView%0A%0A
|
|
4fc098ade74a9b46f11937e229cba75b83f0c9a4
|
Create Utils.py
|
Utils.py
|
Utils.py
|
Python
| 0
|
@@ -0,0 +1,641 @@
+class Batchable:%0A%0A def __init__(self, X, y, batch_size = 256, seed = None):%0A import math%0A import numpy as np%0A if seed:%0A np.random.seed(seed)%0A idx = np.arange(X.shape%5B1%5D)%0A np.random.shuffle(idx)%0A self.X = X%5B:, idx%5D%0A self.y = y%5B:, idx%5D%0A self.start = 0%0A self.batch_size = batch_size%0A self.num_batches = math.ceil(X.shape%5B0%5D / batch_size)%0A %0A def next(self):%0A end = self.start + self.batch_size%0A if end %3E self.X.shape%5B1%5D:%0A end = self.X.shape%5B1%5D - 1%0A return self.X%5B:, self.start: (end + 1)%5D, self.y%5B:, self.start: (end + 1)%5D%0A
|
|
bed08a0d3187f6b31f985d448598eb0ea231a1db
|
Python print dict tip
|
python/print_dict.py
|
python/print_dict.py
|
Python
| 0.999031
|
@@ -0,0 +1,206 @@
+# print dictionary %0Aimport json%0A%0Agoo = %7B'name': 'honux', 'age': 25, 'favorite' : %7B'game': 'SF', 'movie': 'matrix', 'book': 'Galaxy hero'%7D%7D%0A%0Aprint(goo)%0A%0Aprint(json.dumps(goo, indent = 4, sort_keys = True))%0A%0A
|
|
4083cac3c0ec107df68cdecb8fc52c00e2684b08
|
Add b3 format benchmark tests (#1489)
|
propagator/opentelemetry-propagator-b3/tests/performance/benchmarks/trace/propagation/test_benchmark_b3_format.py
|
propagator/opentelemetry-propagator-b3/tests/performance/benchmarks/trace/propagation/test_benchmark_b3_format.py
|
Python
| 0
|
@@ -0,0 +1,1669 @@
+# Copyright The OpenTelemetry Authors%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Aimport opentelemetry.propagators.b3 as b3_format%0Aimport opentelemetry.sdk.trace as trace%0Afrom opentelemetry.trace.propagation.textmap import DictGetter%0A%0AFORMAT = b3_format.B3Format()%0A%0A%0Adef test_extract_single_header(benchmark):%0A benchmark(%0A FORMAT.extract,%0A DictGetter(),%0A %7B%0A FORMAT.SINGLE_HEADER_KEY: %22bdb5b63237ed38aea578af665aa5aa60-c32d953d73ad2251-1-11fd79a30b0896cd285b396ae102dd76%22%0A %7D,%0A )%0A%0A%0Adef test_inject_empty_context(benchmark):%0A tracer = trace.TracerProvider().get_tracer(%22sdk_tracer_provider%22)%0A with tracer.start_as_current_span(%22Root Span%22):%0A with tracer.start_as_current_span(%22Child Span%22):%0A benchmark(%0A FORMAT.inject,%0A dict.__setitem__,%0A %7B%0A FORMAT.TRACE_ID_KEY: %22bdb5b63237ed38aea578af665aa5aa60%22,%0A FORMAT.SPAN_ID_KEY: %2200000000000000000c32d953d73ad225%22,%0A FORMAT.PARENT_SPAN_ID_KEY: %2211fd79a30b0896cd285b396ae102dd76%22,%0A FORMAT.SAMPLED_KEY: %221%22,%0A %7D,%0A )%0A
|
|
4a5767c18b3d75420c5498341012fa98e74edba6
|
Create 2string_join.py
|
string/2string_join.py
|
string/2string_join.py
|
Python
| 0.002004
|
@@ -0,0 +1,98 @@
+s1=%22hello%22%0As2=%22world%22%0Aprint(s1+s2)%0A%0Aprint(%22%5Cn%22,%22 %22.join(s2),%22%5Cn%22)%0A%0Aprint(%22length of s1=%22,len(s1))%0A
|
|
c1f02399ad3ce9c4009e297be58aa6e1f10337cb
|
Add utility getrecord.py to retrieve single records from lib
|
getrecord.py
|
getrecord.py
|
Python
| 0
|
@@ -0,0 +1,1266 @@
+#!/usr/bin/python%0A#%0A# retrieve specified record from lib%0A#%0A%0Aimport sys%0Aimport olypy.oio as oio%0Afrom olypy.oid import to_oid, to_int%0Aimport olypy.dbck as dbck%0A%0Aimport pathlib%0Afrom jinja2 import Environment, PackageLoader, select_autoescape%0Afrom olymap.loc import build_complete_loc_dict%0Afrom olymap.ship import build_complete_ship_dict%0Afrom olymap.char import build_complete_char_dict%0Afrom olymap.item import build_complete_item_dict%0Afrom olymap.skill import build_complete_skill_dict%0Afrom olymap.storm import build_complete_storm_dict%0Afrom olymap.player import build_complete_player_dict%0A%0Aimport olymap.utilities as u%0Aimport olymap.reports as reports%0Afrom olymap.maps import write_index, write_map_leaves, write_top_map, write_bitmap%0Afrom olymap.legacy import create_map_matrix, write_legacy_bitmap, write_legacy_top_map, write_legacy_map_leaves%0A%0A%0Ainlib = sys.argv%5B1%5D%0Adata = oio.read_lib(inlib)%0Adbck.check_db(data, fix=True, checknames=True)%0Arec_id = ' '%0Arec_id = input('Enter record id (%220%22 to exit): ')%0Awhile rec_id != '0':%0A try:%0A rec_id_conv = to_int(rec_id)%0A try:%0A print(data%5Brec_id_conv%5D)%0A except:%0A print('Invalid key')%0A except:%0A print('Invalid key')%0A rec_id = input('Enter record id (%220%22 to exit): ')%0A
|
|
502a95b4bcf54792b5755c9ea6f03a8f9572a271
|
test resize
|
tests/test_resize.py
|
tests/test_resize.py
|
Python
| 0.000001
|
@@ -0,0 +1,663 @@
+from tempfile import TemporaryFile%0A%0Aimport pytest%0A%0Afrom imgpy import Img%0A%0A%0A@pytest.mark.parametrize('image', (%7B%0A 'sub': 'anima/bordered.gif',%0A 'size': (100, 100)%0A%7D, %7B%0A 'sub': 'anima/clear.gif',%0A 'size': (100, 100)%0A%7D, %7B%0A 'sub': 'fixed/bordered.jpg',%0A 'size': (100, 100)%0A%7D, %7B%0A 'sub': 'fixed/clear.jpg',%0A 'size': (100, 100)%0A%7D, ))%0Adef test_resize(path, image):%0A with Img(fp=path(image%5B'sub'%5D)) as src:%0A src.resize(image%5B'size'%5D)%0A with TemporaryFile() as tf:%0A src.save(fp=tf)%0A tf.seek(0)%0A with Img(fp=tf) as dest:%0A res = (dest.width, dest.height)%0A assert res == image%5B'size'%5D%0A
|
|
f4bb4d17214f4e359455cf7b5fb7ab973508049b
|
Add missing module for merge script
|
bin/diffMatcher.py
|
bin/diffMatcher.py
|
Python
| 0.000001
|
@@ -0,0 +1,1016 @@
+#!/usr/bin/python%0A# coding=utf-8%0A%0Aimport subprocess%0A%0Aclass DiffMatcher(object):%0A %0A def __init__(self, listA, listB):%0A self.listA = listA%0A self.listB = listB%0A%0A%0A def create_diff(self, listA, listB,case_sensitive):%0A new_list = %5B%5D%0A #compare the two files%0A try:%0A if (case_sensitive):%0A #ignore case sensitiveness%0A inp = subprocess.check_output(%5B'diff', '-iy', listA.name, listB.name%5D)%0A else:%0A inp = subprocess.check_output(%5B'diff', '-y', listA.name, listB.name%5D)%0A # diff exits with 1 if outputs mismatch... grml%0A except subprocess.CalledProcessError, e: %0A inp = e.output%0A%0A %0A inp = inp.decode(%22utf-8%22).split(%22%5Cn%22)%0A %0A #create list of difference%0A for entry in inp:%0A g = entry.replace(%22%5Ct%22,%22 %22)%0A g = g.split()%0A new_list.append(g)%0A %0A %0A del new_list%5B-1%5D%0A %0A return new_list%0A
|
|
7b0044d3ccb617e92ee8523be949966f1188c742
|
add unittest
|
tests/unit/py2/nupic/encoders/utility_test.py
|
tests/unit/py2/nupic/encoders/utility_test.py
|
Python
| 0.000015
|
@@ -0,0 +1,2033 @@
+#!/usr/bin/env python%0A# ----------------------------------------------------------------------%0A# Numenta Platform for Intelligent Computing (NuPIC)%0A# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from%0A# Numenta, Inc. a separate commercial license for this software code, the%0A# following terms and conditions apply:%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License version 3 as%0A# published by the Free Software Foundation.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.%0A# See the GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with this program. If not, see http://www.gnu.org/licenses.%0A#%0A# http://numenta.org/licenses/%0A# ----------------------------------------------------------------------%0A%0A%0Aimport unittest2 as unittest%0A%0A%0Afrom nupic.encoders.scalar import ScalarEncoder%0Afrom nupic.encoders.vector import VectorEncoder%0Afrom nupic.encoders.utility import UtilityEncoder%0A%0Aclass UtilityEncoderTest(unittest.TestCase):%0A %22%22%22testing Utility encoder%22%22%22%0A%0A def setUp(self):%0A self.data = %5B1,2,3%5D%0A%0A # encoder for score: 0..100, fine-grained to 0.5%0A self.scoreEnc = ScalarEncoder(3, 0, 100, resolution=0.5, name='score')%0A%0A # encoder for the input (data) part%0A elem = ScalarEncoder(1,0,3,resolution=1)%0A self.dataEnc = VectorEncoder(len(self.data), elem, typeCastFn=int, name='data')%0A%0A # utility encoder%0A def sumAll(list):%0A return sum(list)%0A%0A self.fn = sumAll%0A%0A self.utilityEnc = None%0A%0A def testInitialization(self):%0A %22%22%22creating a utility encoder%22%22%22%0A util = UtilityEncoder(self.dataEnc, self.scoreEnc, feval=self.fn, name='starter')%0A assert True==isinstance(util, UtilityEncoder)%0A%0A##########################################################%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
2ac66bef27652dec67b90cb428031e4954da8e21
|
Create download-search.py
|
lattes/download-search.py
|
lattes/download-search.py
|
Python
| 0
|
@@ -0,0 +1,950 @@
+import urllib2%0Aimport codecs%0Aimport os%0A%0Atry: os.makedirs('data')%0Aexcept: pass%0A%0Ainc = 10000%0Aoffset = 0%0Alimit = 211056%0Awhile (offset %3C 211057):%0A urlpath = 'http://buscatextual.cnpq.br/buscatextual/busca.do?metodo=forwardPaginaResultados®istros=' + str(offset) + ';' + str(inc) + '&query=%2528+%252Bidx_particao%253A1+%252Bidx_nacionalidade%253Ae%2529+or+%2528+%252Bidx_particao%253A1+%252Bidx_nacionalidade%253Ab%2529&analise=cv&tipoOrdenacao=null&paginaOrigem=index.do&mostrarScore=false&mostrarBandeira=true&modoIndAdhoc=null'%0A response = urllib2.urlopen(urlpath)%0A html = response.read().decode('ISO-8859-1') # read and decode the response%0A f = codecs.open('data/' + str(offset) + '-' + str(inc) + '.html', 'w', 'utf-8')%0A f.write(html)%0A f.close()%0A offset += inc%0A%0Aexit(1)%0A%0Aprog = re.compile(%22abreDetalhe%5C((.*?)%5C)%22)%0Afor a in prog.finditer(html):%0A print a.group(1).split(',')%0A%0A%0Aprog = re.compile(%22%3Cimg alt='(.*?)'%22)%0Afor a in prog.finditer(html):%0A print a.group(1)%0A
|
|
6a95d0df59f5ab03cb8537014e0102e5300a544a
|
Add Docker driver
|
docker.py
|
docker.py
|
Python
| 0
|
@@ -0,0 +1,1604 @@
+%0Aimport random, string%0Aimport sys, subprocess%0A%0Afrom log import logging%0ALOG = logging.getLogger(__name__)%0A%0Aclass Driver(object):%0A%0A def __init__(self, name, dockerfilepath):%0A self.name = name%0A self.dockerfilepath = dockerfilepath%0A self.tag = %22rallyci:%22 + dockerfilepath%0A self.number = 0%0A self.current = self.tag%0A self.names = %5B%5D%0A%0A def _run(self, cmd, stdout, stdin=None):%0A pipe = subprocess.Popen(cmd, stdin=stdin,%0A stdout=subprocess.PIPE,%0A stderr=subprocess.STDOUT)%0A%0A for line in iter(pipe.stdout.readline, b''):%0A stdout.write(line)%0A return pipe.returncode%0A%0A def build(self, stdout):%0A cmd = %5B%22docker%22, %22build%22, %22-t%22, self.tag, self.dockerfilepath%5D%0A LOG.debug(%22Building image %25r%22 %25 cmd)%0A return self._run(cmd, stdout)%0A%0A def run(self, cmd, stdout, stdin=None):%0A%0A name = %22%22.join(random.sample(string.letters, 12))%0A self.names.append(name)%0A command = %5B%22docker%22, %22run%22, %22--name%22, name%5D%0A if stdin:%0A command += %5B%22-i%22%5D%0A command += %5Bself.current%5D%0A command += cmd.split(%22 %22)%0A LOG.debug(%22Running command %25r%22 %25 command)%0A returncode = self._run(command, stdout, stdin=stdin)%0A self.current = subprocess.check_output(%0A %5B%22docker%22, %22commit%22, name%5D).strip()%0A return returncode%0A%0A def cleanup(self):%0A for name in self.names:%0A subprocess.check_output(%5B%22docker%22, %22rm%22, name%5D)%0A subprocess.check_output(%5B%22docker%22, %22rmi%22, self.current%5D)%0A
|
|
f3c622a3dc9573c8244bf01408c1ff4620080c99
|
Create views module
|
views/__init__.py
|
views/__init__.py
|
Python
| 0
|
@@ -0,0 +1 @@
+%0A
|
|
07148136d8dcc165fc72d3ef264d721c652db025
|
Test cases for ZPool
|
test/002_test_zpool.py
|
test/002_test_zpool.py
|
Python
| 0
|
@@ -0,0 +1,1124 @@
+import unittest%0Aimport os%0Afrom .test_utils import _LibZFSHandleCase%0Afrom libzfs.zpool import ZPool, zpool_prop_t%0A%0ALIBZFS_TEST_POOL = os.environ.get(%22LIBZFS_TEST_POOL%22, False)%0A%0A%0A@unittest.skipUnless(LIBZFS_TEST_POOL, %22LIBZFS_TEST_POOL not set, so we do not test to a specific pool%22)%0Aclass Test_ZPool(_LibZFSHandleCase):%0A def test_001_iter_zpools(self):%0A pools = ZPool.list()%0A assert len(pools) %3E 0%0A%0A def test_002_get_zpool(self):%0A pool = ZPool.get(name=LIBZFS_TEST_POOL)%0A assert pool is not None%0A%0A def test_003_get_zpool_properties(self):%0A pool = ZPool.get(name=LIBZFS_TEST_POOL)%0A props = pool.properties%0A assert len(props.keys()) %3E 0%0A assert props.get(zpool_prop_t.ZPOOL_PROP_NAME) == LIBZFS_TEST_POOL%0A assert props.get(zpool_prop_t.ZPOOL_PROP_SIZE) %3E 0%0A%0A def test_004_get_zpool_config(self):%0A pool = ZPool.get(name=LIBZFS_TEST_POOL)%0A config = pool.config%0A pool.refresh_stats()%0A oldconfig = pool.old_config%0A assert len(config.keys()) %3E 0%0A assert len(oldconfig.keys()) %3E 0%0A assert config == oldconfig%0A
|
|
ba8be59db72c958e2ff20b9ae7fe81c400b40f9c
|
Make start of ongoing and deadline activities just a date
|
bluebottle/time_based/migrations/0008_auto_20201023_1443.py
|
bluebottle/time_based/migrations/0008_auto_20201023_1443.py
|
Python
| 0
|
@@ -0,0 +1,714 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.17 on 2020-10-23 12:43%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('time_based', '0007_auto_20201023_1433'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='ongoingactivity',%0A name='start',%0A field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),%0A ),%0A migrations.AlterField(%0A model_name='withadeadlineactivity',%0A name='start',%0A field=models.DateField(blank=True, null=True, verbose_name='Start of activity'),%0A ),%0A %5D%0A
|
|
b869748e4bc0ee6986fa280aa69027aaf8607dcb
|
allow set_ev_handler without dispatchers
|
ryu/controller/handler.py
|
ryu/controller/handler.py
|
# Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011, 2012 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
from ryu.controller import ofp_event
LOG = logging.getLogger('ryu.controller.handler')
# just represent OF datapath state. datapath specific so should be moved.
HANDSHAKE_DISPATCHER = "handshake"
CONFIG_DISPATCHER = "config"
MAIN_DISPATCHER = "main"
DEAD_DISPATCHER = "dead"
# should be named something like 'observe_event'
def set_ev_cls(ev_cls, dispatchers=None):
def _set_ev_cls_dec(handler):
handler.ev_cls = ev_cls
handler.dispatchers = _listify(dispatchers)
handler.observer = ev_cls.__module__
return handler
return _set_ev_cls_dec
def set_ev_handler(ev_cls, dispatchers):
def _set_ev_cls_dec(handler):
handler.ev_cls = ev_cls
handler.dispatchers = _listify(dispatchers)
return handler
return _set_ev_cls_dec
def _is_ev_cls(meth):
return hasattr(meth, 'ev_cls')
def _listify(may_list):
if may_list is None:
may_list = []
if not isinstance(may_list, list):
may_list = [may_list]
return may_list
def register_instance(i):
for _k, m in inspect.getmembers(i, inspect.ismethod):
# LOG.debug('instance %s k %s m %s', i, _k, m)
if _is_ev_cls(m):
i.register_handler(m.ev_cls, m)
|
Python
| 0
|
@@ -1336,16 +1336,21 @@
patchers
+=None
):%0A d
|
7f49a34e605d701168ee88c8cff0e3b8ed9a68d6
|
Add GPG-related minor migration
|
storage_service/locations/migrations/0017_gpg_space_minor_migration.py
|
storage_service/locations/migrations/0017_gpg_space_minor_migration.py
|
Python
| 0
|
@@ -0,0 +1,1123 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('locations', '0016_mirror_location_aip_replication'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='space',%0A name='access_protocol',%0A field=models.CharField(%0A help_text='How the space can be accessed.',%0A max_length=8,%0A verbose_name='Access protocol',%0A choices=%5B%0A (b'ARKIVUM', 'Arkivum'),%0A (b'DV', 'Dataverse'),%0A (b'DC', 'DuraCloud'),%0A (b'DSPACE', 'DSpace via SWORD2 API'),%0A (b'FEDORA', 'FEDORA via SWORD2'),%0A (b'GPG', 'GPG encryption on Local Filesystem'),%0A (b'FS', 'Local Filesystem'),%0A (b'LOM', 'LOCKSS-o-matic'),%0A (b'NFS', 'NFS'),%0A (b'PIPE_FS', 'Pipeline Local Filesystem'),%0A (b'SWIFT', 'Swift')%5D),%0A ),%0A %5D%0A
|
|
579d21e001f5cd61702dc086d36c1a5f764ffb45
|
Add app.wsgi to run under Apache
|
app.wsgi
|
app.wsgi
|
Python
| 0
|
@@ -0,0 +1,47 @@
+import app%0A%0Asite = app.create_app()%0Asite.run()%0A
|
|
38090ac06a48a4205cbc2318e3ad9296d5b08ea5
|
Add migration to populate Broadcast.base_language
|
temba/msgs/migrations/0069_populate_broadcast_base_lang.py
|
temba/msgs/migrations/0069_populate_broadcast_base_lang.py
|
Python
| 0.000001
|
@@ -0,0 +1,2251 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom collections import defaultdict%0Afrom django.db import migrations%0Afrom temba.utils import chunk_list%0A%0A%0Adef do_populate(Broadcast, FlowStep):%0A BroadcastSteps = FlowStep.broadcasts.through%0A broadcast_ids = list(Broadcast.objects.values_list('id', flat=True).order_by('org_id'))%0A num_processed = 0%0A%0A if broadcast_ids:%0A print(%22Starting population of Broadcast.base_language for %25d total broadcasts...%22 %25 len(broadcast_ids))%0A%0A for id_batch in chunk_list(broadcast_ids, 1000):%0A broadcast_steps = BroadcastSteps.objects.filter(broadcast_id__in=id_batch).distinct('broadcast_id')%0A broadcast_steps = broadcast_steps.prefetch_related('flowstep__run__flow')%0A%0A # dict of language codes to lists of broadcast ids%0A broadcasts_by_lang = defaultdict(list)%0A%0A for broadcast_step in broadcast_steps:%0A flow = broadcast_step.flowstep.run.flow%0A%0A if flow.base_language:%0A broadcasts_by_lang%5Bflow.base_language%5D.append(broadcast_step.broadcast_id)%0A%0A # update each set of broadcasts associated with a particular flow%0A num_updated = 0%0A for lang, bcast_ids in broadcasts_by_lang.items():%0A Broadcast.objects.filter(id__in=bcast_ids).update(base_language=lang)%0A num_updated += len(bcast_ids)%0A%0A num_processed += len(id_batch)%0A print(%22 %3E Processed %25d of %25d broadcasts (updated %25d with %25d different languages)%22%0A %25 (num_processed, len(broadcast_ids), num_updated, len(broadcasts_by_lang)))%0A%0A if broadcast_ids:%0A print(%22Finished population of Broadcast.base_language for %25d total broadcasts%22 %25 len(broadcast_ids))%0A%0A%0Adef apply_as_migration(apps, schema_editor):%0A Broadcast = apps.get_model('msgs', 'Broadcast')%0A FlowStep = apps.get_model('flows', 'FlowStep')%0A%0A do_populate(Broadcast, FlowStep)%0A%0A%0Adef apply_offline():%0A from temba.flows.models import FlowStep%0A from temba.msgs.models import Broadcast%0A%0A do_populate(Broadcast, FlowStep)%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('msgs', '0068_broadcast_base_language'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(apply_as_migration)%0A %5D%0A
|
|
42cc997aea3f71d9b0db37d36a895e68994616ea
|
Add Jansson (#2287)
|
var/spack/repos/builtin/packages/jansson/package.py
|
var/spack/repos/builtin/packages/jansson/package.py
|
Python
| 0
|
@@ -0,0 +1,1579 @@
+##############################################################################%0A# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/llnl/spack%0A# Please also see the LICENSE file for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass Jansson(CMakePackage):%0A %22%22%22Jansson is a C library for encoding, decoding and manipulating JSON%0A data.%22%22%22%0A%0A homepage = %22http://www.digip.org/jansson/%22%0A url = %22https://github.com/akheron/jansson/archive/v2.9.tar.gz%22%0A%0A version('2.9', 'd2db25c437b359fc5a065ed938962237')%0A%0A depends_on('cmake', type='build')%0A
|
|
e63a623452d9aa64c2dd392442f1f09f8e0924ef
|
make it work on python 2.6
|
compress/filters/css_url_replace/__init__.py
|
compress/filters/css_url_replace/__init__.py
|
import re
from django.conf import settings
from compress.filter_base import FilterBase
CSS_REPLACE = getattr(settings, 'COMPRESS_CSS_URL_REPLACE', [])
class CSSURLReplace(FilterBase):
def filter_css(self, css):
for pattern, repl in CSS_REPLACE.iteritems():
css = re.sub(pattern, repl, css, flags=re.UNICODE | re.IGNORECASE)
if self.verbose:
print 'Replaced "%s" with "%s"' % (pattern, repl)
return css
|
Python
| 0.000324
|
@@ -312,42 +312,8 @@
css
-, flags=re.UNICODE %7C re.IGNORECASE
)%0A
|
feee17d37fdef9b2b511366f42599ceb1b7fdd50
|
Add migration
|
corehq/apps/sms/migrations/0019_add_new_registration_invitation_fields.py
|
corehq/apps/sms/migrations/0019_add_new_registration_invitation_fields.py
|
Python
| 0.000002
|
@@ -0,0 +1,939 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Aimport jsonfield.fields%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('sms', '0018_check_for_phone_number_migration'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='selfregistrationinvitation',%0A name='android_only',%0A field=models.BooleanField(default=False),%0A preserve_default=True,%0A ),%0A migrations.AddField(%0A model_name='selfregistrationinvitation',%0A name='custom_user_data',%0A field=jsonfield.fields.JSONField(default=dict),%0A preserve_default=True,%0A ),%0A migrations.AddField(%0A model_name='selfregistrationinvitation',%0A name='require_email',%0A field=models.BooleanField(default=False),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
1cee75aec336b6ba7d21cb9aa18b238e68ce2fd0
|
add script to export data from database
|
website/scripts/export_pairs.py
|
website/scripts/export_pairs.py
|
Python
| 0
|
@@ -0,0 +1,389 @@
+from website.models import Annotation%0A%0Anl_list = %5B%5D%0Acm_list = %5B%5D%0Afor annotation in Annotation.objects.all():%0A nl_list.append(annotation.nl.str)%0A cm_list.append(annotation.cmd.str)%0A%0Awith open('nl.txt', 'w') as o_f:%0A for nl in nl_list:%0A o_f.write('%7B%7D%5Cn'.format(nl.strip()))%0A%0Awith open('cm.txt', 'w') as o_f:%0A for cm in cm_list:%0A o_f.write('%7B%7D%5Cn'.format(cm.strip()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.