commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
9c70a5d65b1c06f62751dfb4fcdd4d6a60a5eb71
|
Add unit tests for testing the widget tree iterators.
|
kivy/tests/test_widget_walk.py
|
kivy/tests/test_widget_walk.py
|
Python
| 0
|
@@ -0,0 +1,1902 @@
+import unittest%0A%0A%0Aclass FileWidgetWalk(unittest.TestCase):%0A%0A def test_walk_large_tree(self):%0A from kivy.uix.boxlayout import BoxLayout%0A from kivy.uix.label import Label%0A from kivy.uix.widget import walk, walk_reverse%0A ''' the tree%0A BoxLayout%0A BoxLayout%0A Label%0A 10 labels%0A BoxLayout%0A 10 labels%0A BoxLayout%0A Label%0A Label%0A '''%0A%0A root = BoxLayout()%0A tree = %5Broot%5D%0A%0A box = BoxLayout()%0A tree.append(box)%0A root.add_widget(box)%0A%0A label = Label()%0A tree.append(label)%0A root.add_widget(label)%0A for i in range(10):%0A tree.append(Label())%0A label.add_widget(tree%5B-1%5D)%0A%0A box = BoxLayout()%0A tree.append(box)%0A root.add_widget(box)%0A for i in range(10):%0A tree.append(Label())%0A box.add_widget(tree%5B-1%5D)%0A%0A box = BoxLayout()%0A tree.append(box)%0A root.add_widget(box)%0A tree.append(Label())%0A box.add_widget(tree%5B-1%5D)%0A%0A label = Label()%0A tree.append(label)%0A root.add_widget(label)%0A%0A def rotate(l, n):%0A return l%5Bn:%5D + l%5B:n%5D%0A%0A for i in range(len(tree)):%0A rotated = rotate(tree, i) # shift list to start at i%0A walked = %5Bn for n in walk(tree%5Bi%5D)%5D # walk starting with i%0A walked_reversed = %5Bn for n in walk_reverse(tree%5Bi%5D)%5D%0A%0A self.assertListEqual(rotated, walked)%0A self.assertListEqual(walked, list(reversed(walked_reversed)))%0A%0A def test_walk_single(self):%0A from kivy.uix.label import Label%0A from kivy.uix.widget import walk, walk_reverse%0A%0A label = Label()%0A self.assertListEqual(%5Bn for n in walk(label)%5D, %5Blabel%5D)%0A self.assertListEqual(%5Bn for n in walk_reverse(label)%5D, %5Blabel%5D)%0A
|
|
989320c3f2bdf65eb8c22822f34052047e0d1a2b
|
Reorder array
|
Arrays/reorder_array.py
|
Arrays/reorder_array.py
|
Python
| 0.00003
|
@@ -0,0 +1,1046 @@
+%22%22%22%0AGiven two integer arrays of same size, arr%5B%5D and index%5B%5D, reorder elements in arr%5B%5D according to given index array.%0AInput:%0Aarr: 50 40 70 60 90%0Aindex: 3 0 4 1 2%0AOutput:%0Aarr: 60 50 90 40 70%0Aindex: 0 1 2 3 4%0A%22%22%22%0A%0A%22%22%22%0AApproach:%0A1. Do the following for every element arr%5Bi%5D%0A2. While index%5Bi%5D != i, store array and index values for the target position where arr%5Bi%5D has to be placed.%0A The correct position for arr%5Bi%5D is index%5Bi%5D.%0A3. Place arr%5Bi%5D at its correct position. Also update index value of correct position.%0A4. Copy old values of correct position to arr%5Bi%5D and index%5Bi%5D as the while loop continues for i.%0A%22%22%22%0A%0A%0Adef reorder(list_of_numbers, indices):%0A for i in range(len(list_of_numbers)):%0A while indices%5Bi%5D != i:%0A old_target_index = indices%5Bindices%5Bi%5D%5D%0A old_target_element = list_of_numbers%5Bindices%5Bi%5D%5D%0A%0A list_of_numbers%5Bindices%5Bi%5D%5D = list_of_numbers%5Bi%5D%0A indices%5Bindices%5Bi%5D%5D = indices%5Bi%5D%0A%0A indices%5Bi%5D = old_target_index%0A list_of_numbers%5Bi%5D = old_target_element%0A%0A
|
|
3a4de870ebefd0e3e32b8c1b9facee6c98ce8b7f
|
Convert python 2 version to python 3
|
ltk2to3.py
|
ltk2to3.py
|
Python
| 0.999999
|
@@ -0,0 +1,2105 @@
+import os%0Aimport shutil%0Aimport fnmatch%0A%0Adef get_files(patterns):%0A %22%22%22 gets all files matching pattern from root%0A pattern supports any unix shell-style wildcards (not same as RE) %22%22%22%0A%0A cwd = os.getcwd()%0A if isinstance(patterns,str):%0A patterns = %5Bpatterns%5D%0A%0A matched_files = %5B%5D%0A for pattern in patterns:%0A path = os.path.abspath(pattern)%0A # print(%22looking at path %22+str(path))%0A # check if pattern contains subdirectory%0A if os.path.exists(path):%0A if os.path.isdir(path):%0A for root, subdirs, files in os.walk(path):%0A split_path = root.split('/')%0A for file in files:%0A # print(os.path.join(root, file))%0A if fnmatch.fnmatch(file, '*.py'):%0A matched_files.append(os.path.join(root, file))%0A else:%0A matched_files.append(path)%0A else:%0A logger.info(%22File not found: %22+pattern)%0A if len(matched_files) == 0:%0A return None%0A return matched_files%0A%0Adir2 = 'python2/ltk'%0Afiles2 = get_files(dir2)%0A%0A# Copy files from 2 to 3%0Afor fpath2 in files2:%0A fpath3 = fpath2.replace('python2','python3')%0A shutil.copyfile(fpath2, fpath3)%0A%0A# Comment and uncomment specified lines in Python 3 version%0Afor fpath in files2:%0A fpath = fpath.replace('python2','python3')%0A with open(fpath, 'r+') as f:%0A lines = f.readlines()%0A f.seek(0)%0A f.truncate()%0A is_python3 = False%0A is_python2 = False%0A for line in lines:%0A if '# Python 3' in line:%0A is_python3 = True%0A elif is_python3:%0A if '# End Python 3' in line:%0A is_python3 = False%0A continue%0A line = line.replace('# ','')%0A elif '# Python 2' in line:%0A is_python2 = True%0A elif is_python2:%0A if '# End Python 2' in line:%0A is_python2 = False%0A continue%0A line = '# '+str(line)%0A f.write(line)
|
|
0babd53317322cea1a56cc8cacd6ffc417145c80
|
Add migration file.
|
django_project/realtime/migrations/0033_auto_20180202_0723.py
|
django_project/realtime/migrations/0033_auto_20180202_0723.py
|
Python
| 0
|
@@ -0,0 +1,505 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('realtime', '0032_auto_20180201_0947'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='ash',%0A name='forecast_duration',%0A field=models.IntegerField(default=1, verbose_name='Duration of forecast for Ash Hazard in days'),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
844049b0d4aecb25fc480fae37111e8aebac6438
|
Add mimeformats.py to support drag and drop
|
src/mcedit2/util/mimeformats.py
|
src/mcedit2/util/mimeformats.py
|
Python
| 0
|
@@ -0,0 +1,229 @@
+%22%22%22%0A mimeformats%0A%22%22%22%0Afrom __future__ import absolute_import, division, print_function, unicode_literals%0Aimport logging%0A%0Alog = logging.getLogger(__name__)%0A%0Aclass MimeFormats(object):%0A MapItem = %22application/x-mcedit-mapitem%22
|
|
fa1223c661d60033b7d7aba2a27151d6ee18a299
|
Add tests for circle ci checks
|
tests/ci_checks/test_circle.py
|
tests/ci_checks/test_circle.py
|
Python
| 0
|
@@ -0,0 +1,1089 @@
+import pytest%0A%0Afrom semantic_release import ci_checks%0Afrom semantic_release.errors import CiVerificationError%0A%0A%0Adef test_circle_should_pass_if_branch_is_master_and_no_pr(monkeypatch):%0A monkeypatch.setenv('CIRCLE_BRANCH', 'master')%0A monkeypatch.setenv('CI_PULL_REQUEST', '')%0A%0A assert ci_checks.circle('master')%0A%0A%0Adef test_circle_should_pass_if_branch_is_correct_and_no_pr(monkeypatch):%0A monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')%0A monkeypatch.setenv('CI_PULL_REQUEST', '')%0A%0A assert ci_checks.circle('other-branch')%0A%0A%0Adef test_circle_should_raise_ci_verification_error_for_wrong_branch(monkeypatch):%0A monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')%0A monkeypatch.setenv('CI_PULL_REQUEST', '')%0A%0A with pytest.raises(CiVerificationError):%0A ci_checks.circle('master')%0A%0A%0Adef test_circle_should_raise_ci_verification_error_for_pr(monkeypatch):%0A monkeypatch.setenv('CIRCLE_BRANCH', 'other-branch')%0A monkeypatch.setenv('CI_PULL_REQUEST', 'http://the-url-of-the-pr')%0A%0A with pytest.raises(CiVerificationError):%0A ci_checks.circle('master')%0A
|
|
6a50f602ebc2334d45352cd2ff13c1f91db7e0bd
|
Integrate LLVM at llvm/llvm-project@8e22539067d9
|
third_party/llvm/workspace.bzl
|
third_party/llvm/workspace.bzl
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "223261cbaa6b4c74cf9eebca3452ec0d15ea018e"
LLVM_SHA256 = "8425d6458484c6e7502b4e393cd8d98b533826a3b040261d67261f1364936518"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
|
Python
| 0.000003
|
@@ -160,133 +160,133 @@
= %22
-223261cbaa6b4c74cf9eebca3452ec0d15ea018e%22%0A LLVM_SHA256 = %228425d6458484c6e7502b4e393cd8d98
+8e22539067d9376c4f808b25f543feba728d40c9%22%0A LLVM_SHA256 = %22db0a7099e6e1eacb
b5
+1
338
-26a3b040261d67261f1364936518
+f0b18c237be7354c25e8126c523390bef965a9b6f6
%22%0A%0A
|
bd729068b1683954ab190f187e59d8a5fc0741f1
|
Integrate LLVM at llvm/llvm-project@7ed7d4ccb899
|
third_party/llvm/workspace.bzl
|
third_party/llvm/workspace.bzl
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "b109172d993edacd9853a8bbb8128a94da014399"
LLVM_SHA256 = "36ee6bf7d89b43034c1c58c57aa63d0703d1688807480969dfd1f4d7ccaa3787"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
|
Python
| 0.000004
|
@@ -160,133 +160,133 @@
= %22
-b109172d993edacd9853a8bbb8128a94da014399%22%0A LLVM_SHA256 = %2236ee6bf7d89b43034c1c58c57aa63d0703d1688807480969dfd1f4d7ccaa3787
+7ed7d4ccb8991e2b5b95334b508f8cec2faee737%22%0A LLVM_SHA256 = %226584ccaffd5debc9fc1bb275a36af9bad319a7865abecf36f97cbe3c2da028d0
%22%0A%0A
|
f29dab9a82b44fac483d71c432a40a0bb2ca51b1
|
Add the beginnings of an example client.
|
examples/dbus_client.py
|
examples/dbus_client.py
|
Python
| 0
|
@@ -0,0 +1,769 @@
+%0Aimport dbus%0A%0Abus = dbus.SystemBus()%0A%0A# This adds a signal match so that the client gets signals sent by Blivet1's%0A# ObjectManager. These signals are used to notify clients of changes to the%0A# managed objects (for blivet, this will be devices, formats, and actions).%0Abus.add_match_string(%22type='signal',sender='com.redhat.Blivet1',path_namespace='/com/redhat/Blivet1'%22)%0A%0Ablivet = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1/Blivet')%0Ablivet.Reset()%0A%0Aobject_manager = bus.get_object('com.redhat.Blivet1', '/com/redhat/Blivet1')%0Aobjects = object_manager.GetManagedObjects()%0Afor object_path in blivet.ListDevices():%0A device = objects%5Bobject_path%5D%5B'com.redhat.Blivet1.Device'%5D%0A print(device%5B'Name'%5D, device%5B'Type'%5D, device%5B'Size'%5D, device%5B'FormatType'%5D)%0A
|
|
ebf4d87390307dcf735c53f18a18f3466a4ee5e4
|
Add standalone wave trigger tool.
|
tools/standalonewavetrigger.py
|
tools/standalonewavetrigger.py
|
Python
| 0
|
@@ -0,0 +1,2720 @@
+#!/usr/bin/env python%0A%0A# Standard library imports%0Aimport argparse%0Aimport collections%0Aimport logging%0Aimport os%0Aimport time%0A%0A# Additional library imports%0Aimport requests%0A%0A%0A# Named logger for this module%0A_logger = logging.getLogger(__name__)%0A%0A# Parse the command line arguments%0A_parser = argparse.ArgumentParser('')%0A_parser.add_argument('-t', '--triggers', default='triggers', help='Folder containing trigger files')%0A_parser.add_argument('-r', '--rate', default=4.0, help='Poll rate in polls per second')%0A_parser.add_argument('-d', '--debug', action='store_true', help='Enables debug logging')%0A_args = _parser.parse_args()%0A%0A# Configure the logging module%0A_logformat = '%25(asctime)s : %25(levelname)s : %25(name)s : %25(message)s'%0A_loglevel = logging.DEBUG if _args.debug else logging.INFO%0Alogging.basicConfig(format=_logformat, level=_loglevel)%0Alogging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)%0A%0A# We use a session variable so that HTTP keep-alive is utilized, and%0A# also so we'll always remember to set the content type appropriately.%0A_session = requests.session()%0A_session.headers%5B'Content-Type'%5D = 'application/json'%0A%0A# Stores previous last access times for each file%0A# so they can be compared each time files are polled.%0A_atimes = collections.defaultdict(time.time)%0A%0A%0A# Poll the list of files forever%0Awhile True:%0A%0A # Delay the appropriate amount of time between polls%0A time.sleep(1.0 / _args.rate)%0A%0A # Grab a list of all fully-qualified wave file names in the trigger folder%0A files = (os.path.join(_args.triggers, f) for f in os.listdir(_args.triggers) if os.path.splitext(f)%5B1%5D == '.wav')%0A%0A # Iterate over the list of files%0A for filename in files:%0A%0A # If the last access time is newer than what was previous recorded then take%0A # action on that file. A small threshold is used to prevent %22double bouncing%22.%0A if os.stat(filename).st_atime - _atimes%5Bfilename%5D %3E 1.0:%0A%0A # Open the file and pull out the data%0A with open(filename, 'rb') as f:%0A req = f.read()%0A%0A # Immediately store off the last accessed time%0A _atimes%5Bfilename%5D = os.stat(filename).st_atime%0A%0A # Separate the components of the request%0A method, url, data = req%5B52:%5D.splitlines(False)%0A%0A # Attempt to send the request and log the results%0A _logger.debug('Sending %7B0%7D request to %7B1%7D'.format(method, url))%0A try:%0A response = _session.request(method, url, data=data)%0A _logger.debug('Received response with status code %7B0%7D'.format(response.status_code))%0A except requests.RequestException:%0A _logger.warning('Unable to contact %7B0%7D'.format(url))%0A
|
|
d76c7f73701edeb263ebffc94ccc3f4893f7ef0d
|
add leetcode Reorder List
|
leetcode/ReorderList/solution.py
|
leetcode/ReorderList/solution.py
|
Python
| 0
|
@@ -0,0 +1,1455 @@
+# Definition for singly-linked list.%0Aclass ListNode:%0A%0A def __init__(self, x):%0A self.val = x%0A self.next = None%0A%0A def printList(self):%0A head = self%0A while head:%0A print head,%0A head = head.next%0A print ''%0A%0A def __str__(self):%0A return str(self.val)%0A%0A%0Aclass Solution:%0A # @param head, a ListNode%0A # @return nothing%0A%0A def reorderList(self, head):%0A if head is None or head.next is None:%0A return head%0A slow = fast = head%0A while fast.next and fast.next.next:%0A slow = slow.next%0A fast = fast.next.next%0A prev = slow.next%0A slow.next = None%0A%0A # reverse the second part of the list%0A cur = prev.next%0A prev.next = None%0A while cur:%0A next = cur.next%0A cur.next = prev%0A prev = cur%0A cur = next%0A%0A # merge the two sublist%0A root = head%0A while prev:%0A tmp = prev.next%0A prev.next = root.next%0A root.next = prev%0A root = prev.next%0A prev = tmp%0A return head%0A%0A%0Adef main():%0A import random%0A root = ListNode(random.randint(1, 1000))%0A head = root%0A for x in xrange(9):%0A head.next = ListNode(random.randint(1, 1000))%0A head = head.next%0A root.printList()%0A s = Solution()%0A root = s.reorderList(root)%0A root.printList()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
19c4e9a6505f9107e8d47c901658050bdd47dd1e
|
Correct docstring.
|
astm/protocol.py
|
astm/protocol.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import logging
from threading import _Timer, RLock
from collections import namedtuple
from .asynclib import AsyncChat
from .records import HeaderRecord, TerminatorRecord
from .constants import STX, CRLF, ENQ, ACK, NAK, EOT, ENCODING
log = logging.getLogger(__name__)
#: ASTM protocol states set.
STATE = namedtuple('ASTMState', ['init', 'opened', 'transfer'])(*range(3))
__all__ = ['STATE', 'ASTMProtocol']
class ASTMProtocol(AsyncChat):
"""Common ASTM protocol routines."""
#: ASTM header record class.
astm_header = HeaderRecord
#: ASTM terminator record class.
astm_terminator = TerminatorRecord
#: Flag about chunked transfer.
is_chunked_transfer = None
#: Operation timeout value.
timeout = None
use_encoding = True
encoding = ENCODING
strip_terminator = False
_last_recv_data = None
_last_sent_data = None
_state = None
_lock = RLock()
_timer = None
_timer_cls = _Timer
def __init__(self, sock=None, map=None, timeout=None):
super(ASTMProtocol, self).__init__(sock, map)
if timeout is not None:
self.timeout = timeout
def found_terminator(self):
while self.inbox:
data = self.inbox.popleft()
if not data:
continue
self.dispatch(data)
def dispatch(self, data):
"""Dispatcher of received data."""
self._last_recv_data = data
if data == ENQ:
handler = self.on_enq
elif data == ACK:
handler = self.on_ack
elif data == NAK:
handler = self.on_nak
elif data == EOT:
handler = self.on_eot
elif data.startswith(STX): # this looks like a message
handler = self.on_message
else:
handler = lambda: self.default_handler(data)
with self._lock:
resp = handler()
self.start_timer()
if resp is not None:
self.push(resp)
def default_handler(self, data):
raise ValueError('Unable to dispatch data: %r', data)
def push(self, data):
self._last_sent_data = data
return super(ASTMProtocol, self).push(data)
def start_timer(self):
if self.timeout is None:
return
self.stop_timer()
self._timer = self._timer_cls(self.timeout, self.on_timeout)
self._timer.daemon = True
self._timer.start()
log.debug('Timer %r started', self._timer)
def stop_timer(self):
if self.timeout is None or self._timer is None:
return
if self._timer is not None and self._timer.is_alive():
self._timer.cancel()
log.debug('Timer %r stopped', self._timer)
self._timer = None
def on_enq(self):
"""Calls on <ENQ> message receiving."""
def on_ack(self):
"""Calls on <ACK> message receiving."""
def on_nak(self):
"""Calls on <NAK> message receiving."""
def on_eot(self):
"""Calls on <EOT> message receiving."""
def on_message(self):
"""Calls on ASTM message receiving."""
def _get_state(self):
return self._state
def _set_state(self, value):
assert value in STATE
self._state = value
#: ASTM handler state value:
#:
#: - ``init``: Neutral state
#: - ``opened``: ENQ message was sent, waiting for ACK
#: - ``transfer``: Data transfer processing
#:
state = property(_get_state, _set_state)
def set_init_state(self):
"""Sets handler state to INIT (0).
In ASTM specification this state also called as `neutral` which means
that handler is ready to establish data transfer.
"""
self.terminator = 1
self.state = STATE.init
self.on_init_state()
log.info('Switched to init state')
def set_opened_state(self):
"""Sets handler state to OPENED (1).
Intermediate state that only means for client implementation. On this
state client had already sent <ENQ> and awaits for <ACK> or
<NAK> response. On <ACK> it switched his state to `transfer`.
"""
self.terminator = 1
self.state = STATE.opened
self.on_opened_state()
log.info('Switched to opened state')
def set_transfer_state(self):
"""Sets handler state to TRANSFER (2).
In this state handler is able to send or receive ASTM messages depending
on his role (client or server). At the end of data transfer client
should send <EOT> and switch state to `init`.
"""
self.terminator = [CRLF, EOT]
self.state = STATE.transfer
self.on_transfer_state()
log.info('Switched to transfer state')
def on_init_state(self):
"""Calls on set state INIT (0)"""
def on_opened_state(self):
"""Calls on set state OPENED (1)"""
def on_transfer_state(self):
"""Calls on set state TRANSFER (2)"""
def on_timeout(self):
"""Calls when timeout occurs for send/recv operations."""
def discard_input_buffers(self):
self._last_recv_message = None
return super(ASTMProtocol, self).discard_input_buffers()
def discard_output_buffers(self):
self._last_sent_message = None
return super(ASTMProtocol, self).discard_output_buffers()
|
Python
| 0.000006
|
@@ -5254,39 +5254,74 @@
out
-occurs for send/recv operations
+event occurs. Used to limit time for waiting%0A response data
.%22%22%22
|
a86852fe908bb0a44ef267a75b9446ddcaf03f6e
|
Add basic support for LimitlessLED
|
homeassistant/components/light/limitlessled.py
|
homeassistant/components/light/limitlessled.py
|
Python
| 0
|
@@ -0,0 +1,2516 @@
+%22%22%22%0Ahomeassistant.components.light.limitlessled%0A~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~%0A%0ASupport for LimitlessLED bulbs, also known as...%0A%0AEasyBulb%0AAppLight%0AAppLamp%0AMiLight%0ALEDme%0Adekolight%0AiLight%0A%0A%22%22%22%0Aimport random%0Aimport logging%0A%0Afrom homeassistant.helpers.entity import ToggleEntity%0Afrom homeassistant.const import STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME%0Afrom homeassistant.components.light import ATTR_BRIGHTNESS%0A%0A_LOGGER = logging.getLogger(__name__)%0A%0A%0Adef setup_platform(hass, config, add_devices_callback, discovery_info=None):%0A try:%0A import ledcontroller%0A except ImportError:%0A _LOGGER.exception(%22Error while importing dependency ledcontroller.%22)%0A return%0A%0A led = ledcontroller.LedController(config%5B'host'%5D)%0A%0A lights = %5B%5D%0A for i in range(1, 5):%0A if 'group_%25d_name' %25 (i) in config:%0A lights.append(%0A LimitlessLED(%0A led,%0A i,%0A config%5B'group_%25d_name' %25 (i)%5D,%0A STATE_OFF%0A )%0A )%0A%0A add_devices_callback(lights)%0A%0A%0Aclass LimitlessLED(ToggleEntity):%0A def __init__(self, led, group, name, state, brightness=180):%0A self.led = led%0A self.group = group%0A%0A # LimitlessLEDs don't report state, we have track it ourselves.%0A self.led.off(self.group)%0A%0A self._name = name or DEVICE_DEFAULT_NAME%0A self._state = state%0A self._brightness = brightness%0A%0A @property%0A def should_poll(self):%0A %22%22%22 No polling needed for a demo light. %22%22%22%0A return False%0A%0A @property%0A def name(self):%0A %22%22%22 Returns the name of the device if any. %22%22%22%0A return self._name%0A%0A @property%0A def state(self):%0A %22%22%22 Returns the name of the device if any. %22%22%22%0A return self._state%0A%0A @property%0A def state_attributes(self):%0A %22%22%22 Returns optional state attributes. %22%22%22%0A if self.is_on:%0A return %7B%0A ATTR_BRIGHTNESS: self._brightness,%0A %7D%0A%0A @property%0A def is_on(self):%0A %22%22%22 True if device is on. %22%22%22%0A return self._state == STATE_ON%0A%0A def turn_on(self, **kwargs):%0A %22%22%22 Turn the device on. %22%22%22%0A self._state = STATE_ON%0A%0A if ATTR_BRIGHTNESS in kwargs:%0A self._brightness = kwargs%5BATTR_BRIGHTNESS%5D%0A%0A self.led.set_brightness(self._brightness, self.group)%0A%0A def turn_off(self, **kwargs):%0A %22%22%22 Turn the device off. %22%22%22%0A self._state = STATE_OFF%0A self.led.off(self.group)%0A
|
|
8d1917785f4cf8cc17ec1b3898dcb90f7402cfe9
|
Revert of Attempt to add tracing dir into path, so that tracing_project can be imported. (patchset #1 id:1 of https://codereview.chromium.org/1300373002/ )
|
tracing/tracing_build/__init__.py
|
tracing/tracing_build/__init__.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
import os
import sys
def _AddTracingProjectPath():
tracing_path = os.path.normpath(
os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
if tracing_path not in sys.path:
sys.path.insert(0, tracing_path)
_AddTracingProjectPath()
import tracing_project
tracing_project.UpdateSysPathIfNeeded()
|
Python
| 0.000007
|
@@ -187,243 +187,8 @@
ys%0A%0A
-%0Adef _AddTracingProjectPath():%0A tracing_path = os.path.normpath(%0A os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))%0A if tracing_path not in sys.path:%0A sys.path.insert(0, tracing_path)%0A%0A%0A_AddTracingProjectPath()%0A
impo
|
37f286812bea7429bea67172a40d26ad435d6f67
|
Add test for 'holes' argument in add_polygon
|
test/examples/hole_in_square.py
|
test/examples/hole_in_square.py
|
Python
| 0.000004
|
@@ -0,0 +1,861 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0Aimport pygmsh as pg%0Aimport numpy as np%0A%0Adef generate():%0A # Characteristic length%0A lcar = 1e-1%0A%0A # Coordinates of lower-left and upper-right vertices of a square domain%0A xmin = 0.0%0A xmax = 5.0%0A ymin = 0.0%0A ymax = 5.0%0A%0A # Vertices of a square hole%0A squareHoleCoordinates = np.array(%5B%5B1, 1, 0%5D,%0A %5B4, 1, 0%5D,%0A %5B4, 4, 0%5D,%0A %5B1, 4, 0%5D%5D)%0A%0A # Create geometric object%0A geom = pg.Geometry()%0A%0A # Create square hole%0A squareHole = %5Bgeom.add_polygon_loop(squareHoleCoordinates, lcar)%5D%0A%0A # Create square domain with square hole%0A geom.add_rectangle(xmin, xmax, ymin, ymax, 0.0, lcar, holes=squareHole)%0A%0A # Return geo-file code%0A return geom.get_code()%0A%0Aif __name__ == '__main__':%0A print(generate())%0A
|
|
db4bc200f9a48edf9e160c2134293df0313183a7
|
Add conditional command prefix plugin
|
conditional_prefix.py
|
conditional_prefix.py
|
Python
| 0.000001
|
@@ -0,0 +1,573 @@
+from cloudbot import hook%0A%0Aimport re%0A%0A@hook.sieve%0Adef conditional_prefix(bot, event, plugin):%0A if plugin.type == 'command':%0A if event.chan in event.conn.config%5B'prefix_blocked_channels'%5D:%0A command_prefix = event.conn.config%5B'command_prefix'%5D%0A%0A if not event.chan.lower() == event.nick.lower(): # private message, no command prefix%0A command_re = r'(?i)%5E(?:%5B%7B%7D%5D)(%5Cw+)(?:$%7C%5Cs+)(.*)'.format(command_prefix, event.conn.nick)%0A%0A if re.match(command_re, event.content):%0A return None%0A return event%0A
|
|
a2ea7c7d4d6b680f180b9916eb2a814713887154
|
Test empty record.
|
tests/test_empty_record.py
|
tests/test_empty_record.py
|
Python
| 0
|
@@ -0,0 +1,2381 @@
+#!/usr/bin/env python%0A%0A# Copyright 2016 Ben Walsh%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%0Aimport sys%0Aimport os%0Aimport pytest%0Aimport shutil%0Aimport tempfile%0A%0Aimport avro.schema%0Aimport avro.datafile%0Aimport avro.io%0A%0Aimport pyavroc%0A%0Aimport _testhelper%0A%0ANRECORDS = 100%0A%0Ajson_schema = '''%7B%22namespace%22: %22example.avro%22,%0A %22type%22: %22record%22,%0A %22name%22: %22User%22,%0A %22fields%22: %5B%0A %5D%0A%7D'''%0A%0A%0Adef _python_create_file(filename):%0A if sys.version_info %3E= (3,):%0A schema = avro.schema.Parse(json_schema)%0A else:%0A schema = avro.schema.parse(json_schema)%0A%0A with open(filename, 'wb') as fp:%0A writer = avro.datafile.DataFileWriter(fp, avro.io.DatumWriter(), schema)%0A%0A for i in range(NRECORDS):%0A writer.append(%7B%7D)%0A%0A writer.close()%0A%0A%0Adef _pyavroc_create_file(filename):%0A avtypes = pyavroc.create_types(json_schema)%0A%0A with open(filename, 'w') as fp:%0A writer = pyavroc.AvroFileWriter(fp, json_schema)%0A%0A for i in range(NRECORDS):%0A writer.write(avtypes.User())%0A%0A writer.close()%0A%0A%0Adef _create_files():%0A dirname = tempfile.mkdtemp()%0A%0A python_filename = os.path.join(dirname, %22test_python.avro%22)%0A pyavroc_filename = os.path.join(dirname, %22test_pyavroc.avro%22)%0A%0A _python_create_file(python_filename)%0A _pyavroc_create_file(pyavroc_filename)%0A%0A return (dirname, python_filename, pyavroc_filename)%0A%0A%0Adef _delete_files(dirname):%0A shutil.rmtree(dirname)%0A%0A%0Adef _python_read(filename):%0A fp = avro.datafile.DataFileReader(open(filename, 'rb'), avro.io.DatumReader())%0A%0A return list(fp)%0A%0A%0Adef _pyavroc_read(filename, types):%0A fp = pyavroc.AvroFileReader(open(filename), types=types)%0A%0A return list(fp)%0A%0A%0Adef test_load_same():%0A dirname, python_filename, pyavroc_filename = _create_files()%0A%0A assert _pyavroc_read(python_filename, False) == _python_read(pyavroc_filename)%0A%0A _delete_files(dirname)%0A
|
|
cba5a8058e96bd6c5ee639df223c77f56d8296fa
|
Add ladot package (#10905)
|
var/spack/repos/builtin/packages/ladot/package.py
|
var/spack/repos/builtin/packages/ladot/package.py
|
Python
| 0
|
@@ -0,0 +1,909 @@
+# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Ladot(Package):%0A %22%22%22Ladot is a script that makes using LaTeX in graphs generated by dot%0A (graphviz) relatively easy.%22%22%22%0A%0A homepage = %22https://brighten.bigw.org/projects/ladot/%22%0A url = %22https://brighten.bigw.org/projects/ladot/ladot-1.2.tar.gz%22%0A%0A version('1.2', sha256='f829eeca829b82c0315cd87bffe410bccab96309b86b1c883b3ddaa93170f25e')%0A%0A depends_on('perl', type=('run', 'test'))%0A depends_on('graphviz', type=('run', 'test'))%0A depends_on('texlive', type='test')%0A%0A def install(self, spec, prefix):%0A if self.run_tests:%0A with working_dir('example'):%0A make()%0A%0A mkdir(prefix.bin)%0A install('ladot', prefix.bin)%0A
|
|
152bf235721c5b6c8ba61da4d8521733a2842885
|
Send script
|
extract_norcal_table.py
|
extract_norcal_table.py
|
Python
| 0
|
@@ -0,0 +1,327 @@
+import urllib2%0A%0Afrom bs4 import BeautifulSoup%0A%0Aurl = %22http://www.mapsofworld.com/usa/states/california/map-of-northern-%0A california.html%22%0A %0Apage = urllib2.urlopen(url) %0Asoup = BeautifulSoup(page)%0A%0Atables = soup.findAll(%22table%22)%0Atables%5B3%5D.find_all('td')%0A%0Afor td in tables%5B3%5D.find_all('td'):%0A print td.text
|
|
b80e52ecf09f96e84625eb6fff9aa7a20059c0f8
|
Add new top level script to ease running of individual unittests.
|
test_single.py
|
test_single.py
|
Python
| 0
|
@@ -0,0 +1,256 @@
+%0Aimport sys%0Aimport unittest%0A%0Afrom toast.mpirunner import MPITestRunner%0A%0Afile = sys.argv%5B1%5D%0A%0Aloader = unittest.TestLoader()%0Arunner = MPITestRunner(verbosity=2)%0Asuite = loader.discover('tests', pattern='%7B%7D'.format(file), top_level_dir='.')%0Arunner.run(suite)%0A
|
|
2ae235215d33555b077fbd9e2f0c42d52ccce8c4
|
add listener
|
dyn-listener.py
|
dyn-listener.py
|
Python
| 0
|
@@ -0,0 +1,1338 @@
+#!/usr/bin/env python%0A%0Afrom logentries import LogentriesHandler%0Aimport logging%0Afrom flask import Flask, jsonify, request%0A%0Alistener = Flask(__name__)%0A%0A# Configure the port your postback URL will listen on and provide your%0A# LOGENTRIES_TOKEN%0APORT = 5000%0ALOGENTRIES_TOKEN = %22your-log-token-here%22%0A%0Alog = logging.getLogger('logentries')%0Alog.setLevel(logging.INFO)%0Adyn = LogentriesHandler(LOGENTRIES_TOKEN)%0Alog.addHandler(dyn)%0A# Enter the following for the bounce postback URL:%0A# SCRIPT_HOST_IP:PORT/bounce?e=@email&r=@bouncerule&t=@bouncetype&dc=@diagnostic&s=@status%0A@listener.route('/bounce', methods=%5B'GET'%5D)%0Adef bounce():%0A e = request.args.get('e')%0A r = request.args.get('r')%0A t = request.args.get('t')%0A dc = request.args.get('dc')%0A s = request.args.get('s')%0A log.info(%22BOUNCE: email='%7B%7D' rule='%7B%7D' type='%7B%7D' diagnostic='%7B%7D' %5C%0A status='%7B%7D'%22.format(e, r, t, dc, s))%0A return jsonify(result=%7B%22status%22: 200%7D)%0A%0A# Enter the following for the complaint postback URL:%0A# SCRIPT_HOST_IP:PORT/complaint?e=@email%0A@listener.route('/complaint', methods=%5B'GET'%5D)%0Adef complaint():%0A e = request.args.get('e')%0A log.info(%22COMPLAINT: email='%7B%7D'%22.format(e))%0A return jsonify(result=%7B%22status%22: 200%7D)%0A%0A%0Aif __name__ == '__main__':%0A listener.run(host='0.0.0.0',%0A port=PORT,%0A debug=False)%0A
|
|
abc32403d85c536f38a2072941f1864418c55b4f
|
Create editdistance.py
|
editdistance.py
|
editdistance.py
|
Python
| 0
|
@@ -0,0 +1,928 @@
+# Author: Vikram Raman%0A# Date: 09-12-2015%0A%0Aimport time%0A%0A# edit distance between two strings%0A# e(i,j) = min (1 + e(i-1,j) %7C 1 + e(i,j-1) %7C diff(i,j) + e(i-1,j-1))%0A%0Adef editdistance(s1, s2):%0A m = 0 if s1 is None else len(s1)%0A n = 0 if s2 is None else len(s2)%0A%0A if m == 0:%0A return n%0A elif n == 0:%0A return m%0A%0A l = %5B%5Bi for i in range(0,n+1)%5D%5D%0A%0A for i in range(1,m+1):%0A l.append(%5Bi%5D)%0A%0A for i in range(1,m+1):%0A for j in range(1,n+1):%0A minimum = min(1 + l%5Bi-1%5D%5Bj%5D, 1 + l%5Bi%5D%5Bj-1%5D, diff(s1,s2,i,j) + l%5Bi-1%5D%5Bj-1%5D)%0A l%5Bi%5D.append(minimum)%0A return l%5Bm%5D%5Bn%5D %0A%0Adef diff (s1, s2, i, j):%0A return s1%5Bi-1%5D != s2%5Bj-1%5D%0A%0As1 = %22exponential%22%0As2 = %22polynomial%22%0Aprint %22s1=%25s, s2=%25s%22 %25 (s1,s2)%0Astart_time = time.clock()%0Adistance=editdistance(s1, s2)%0Aprint %22distance=%25d%22 %25 (distance)%0Aprint(%22--- %25s seconds ---%22 %25 (time.clock() - start_time))%0Aprint editdistance(%22foo%22, %22bar%22)%0A
|
|
5a8ad84381650740c5eea8cacb2a6a2e90edaf54
|
Exclude the 500ing Citizen Code of Conduct.
|
tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
|
tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py
|
import logging
import re
import scrapy
from scrapy import Request
from scrapy.linkextractors import IGNORED_EXTENSIONS
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.utils.url import url_has_any_extension
from typing import Any, Generator, List, Optional, Tuple
EXCLUDED_URLS = [
# Google calendar returns 404s on HEAD requests unconditionally
'https://calendar.google.com/calendar/embed?src=ktiduof4eoh47lmgcl2qunnc0o@group.calendar.google.com',
# Returns 409 errors to HEAD requests frequently
'https://medium.freecodecamp.org/',
# Returns 404 to HEAD requests unconditionally
'https://www.git-tower.com/blog/command-line-cheat-sheet/',
# Requires authentication
'https://circleci.com/gh/zulip/zulip',
'https://circleci.com/gh/zulip/zulip/16617',
]
class BaseDocumentationSpider(scrapy.Spider):
name = None # type: Optional[str]
# Exclude domain address.
deny_domains = [] # type: List[str]
start_urls = [] # type: List[str]
deny = [] # type: List[str]
file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]
tags = ('a', 'area', 'img')
attrs = ('href', 'src')
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.has_error = False
self.skip_external = kwargs.get('skip_external', None)
def _set_error_state(self) -> None:
self.has_error = True
def _has_extension(self, url: str) -> bool:
return url_has_any_extension(url, self.file_extensions)
def _is_external_url(self, url: str) -> bool:
return url.startswith('http') or self._has_extension(url)
def check_existing(self, response: Any) -> None:
self.log(response)
def _is_external_link(self, url: str) -> bool:
if "zulip.readthedocs" in url or "zulipchat.com" in url or "zulip.org" in url:
# We want CI to check any links to Zulip sites.
return False
if (len(url) > 4 and url[:4] == "file") or ("localhost" in url):
# We also want CI to check any links to built documentation.
return False
if 'github.com/zulip' in url:
# Finally, links to our own GitHub organization should always work.
return False
return True
def check_permalink(self, response: Any) -> None:
self.log(response)
xpath_template = "//*[@id='{permalink}' or @name='{permalink}']"
m = re.match(r".+\#(?P<permalink>.*)$", response.request.url) # Get anchor value.
if not m:
return
permalink = m.group('permalink')
# Check permalink existing on response page.
if not response.selector.xpath(xpath_template.format(permalink=permalink)):
self._set_error_state()
raise Exception(
"Permalink #{} is not found on page {}".format(permalink, response.request.url))
def parse(self, response: Any) -> Generator[Request, None, None]:
self.log(response)
for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=['doc'],
tags=self.tags, attrs=self.attrs, deny=self.deny,
canonicalize=False).extract_links(response):
callback = self.parse # type: Any
dont_filter = False
method = 'GET'
if self._is_external_url(link.url):
callback = self.check_existing
method = 'HEAD'
elif '#' in link.url:
dont_filter = True
callback = self.check_permalink
if self.skip_external:
if (self._is_external_link(link.url)):
continue
yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,
errback=self.error_callback)
def retry_request_with_get(self, request: Request) -> Generator[Request, None, None]:
request.method = 'GET'
request.dont_filter = True
yield request
def exclude_error(self, url: str) -> bool:
if url in EXCLUDED_URLS:
return True
return False
def error_callback(self, failure: Any) -> Optional[Generator[Any, None, None]]:
if hasattr(failure.value, 'response') and failure.value.response:
response = failure.value.response
if self.exclude_error(response.url):
return None
if response.status == 404:
self._set_error_state()
raise Exception('Page not found: {}'.format(response))
if response.status == 405 and response.request.method == 'HEAD':
# Method 'HEAD' not allowed, repeat request with 'GET'
return self.retry_request_with_get(response.request)
self.log("Error! Please check link: {}".format(response), logging.ERROR)
elif isinstance(failure.type, IOError):
self._set_error_state()
else:
raise Exception(failure.value)
return None
|
Python
| 0
|
@@ -807,16 +807,97 @@
16617',%0A
+ # 500s because the site is semi-down%0A 'http://citizencodeofconduct.org/',%0A
%5D%0A%0A%0Aclas
|
f79e0782235943e0ace543db754cca232682f6ad
|
Add some basic tests
|
km3pipe/io/tests/test_aanet.py
|
km3pipe/io/tests/test_aanet.py
|
Python
| 0.000012
|
@@ -0,0 +1,1348 @@
+# Filename: test_aanet.py%0A# pylint: disable=locally-disabled,C0111,R0904,C0301,C0103,W0212%0Afrom km3pipe.testing import TestCase, patch, Mock%0Afrom km3pipe.io.aanet import AanetPump%0A%0Aimport sys%0Asys.modules%5B'ROOT'%5D = Mock()%0Asys.modules%5B'aa'%5D = Mock()%0A%0A__author__ = %22Tamas Gal%22%0A__copyright__ = %22Copyright 2018, Tamas Gal and the KM3NeT collaboration.%22%0A__credits__ = %5B%5D%0A__license__ = %22MIT%22%0A__maintainer__ = %22Tamas Gal%22%0A__email__ = %22tgal@km3net.de%22%0A__status__ = %22Development%22%0A%0A%0Aclass TestAanetPump(TestCase):%0A def test_init_raises_valueerror_if_no_filename_given(self):%0A with self.assertRaises(ValueError):%0A AanetPump()%0A%0A def test_init_with_filename(self):%0A filename = 'a'%0A p = AanetPump(filename=filename)%0A assert filename in p.filenames%0A%0A @patch(%22ROOT.gSystem%22)%0A def test_init_with_custom_aanet_lib(self, root_gsystem_mock):%0A filename = 'a'%0A custom_aalib = 'an_aalib'%0A p = AanetPump(filename=filename, aa_lib=custom_aalib)%0A assert filename in p.filenames%0A root_gsystem_mock.Load.assert_called_once_with(custom_aalib)%0A%0A def test_init_with_indexed_filenames(self):%0A filename = 'a%5Bindex%5Db'%0A indices = %5B1, 2, 3%5D%0A p = AanetPump(filename=filename, indices=indices)%0A for index in indices:%0A assert %22a%22+str(index)+%22b%22 in p.filenames%0A
|
|
9f39ed48b6f745a96b5874bc87e306c01d3f016f
|
add 0.py
|
0.py
|
0.py
|
Python
| 0.999328
|
@@ -0,0 +1,43 @@
+if __name__ == %22__main__%22:%0A print 2**38%0A
|
|
0575be4316e930de71dce8c92d7be428d4565470
|
Add c.py
|
c.py
|
c.py
|
Python
| 0.997858
|
@@ -0,0 +1,63 @@
+%0Aclass C(object):%0A def c(self):%0A print(%22c%22)%0A%0AC().c()%0A
|
|
61cfa59b7881f8658a8eab13ba4bc50ac17ba6ce
|
Add sample plugin used by functional tests
|
nose2/tests/functional/support/lib/plugin_a.py
|
nose2/tests/functional/support/lib/plugin_a.py
|
Python
| 0
|
@@ -0,0 +1,149 @@
+from nose2 import events%0A%0Aclass PluginA(events.Plugin):%0A configSection = 'a'%0A%0A def __init__(self):%0A self.a = self.config.as_int('a', 0)%0A
|
|
2e6c7235c555799cc9dbb9d1fa7faeab4557ac13
|
Add stubby saved roll class
|
db.py
|
db.py
|
Python
| 0
|
@@ -0,0 +1,256 @@
+import sqlite3%0A%0A%0Aconnection = sqlite3.connect('data.db')%0A%0A%0Aclass SavedRoll:%0A @staticmethod%0A def save(user, name, args):%0A pass%0A%0A @staticmethod%0A def get(user, name):%0A pass%0A%0A @staticmethod%0A def delete(user, name):%0A pass%0A
|
|
85044ad914029d9b421b3492e828ad89a85b62a3
|
Create ept.py
|
ept.py
|
ept.py
|
Python
| 0.000002
|
@@ -0,0 +1,786 @@
+# -*- coding: utf-8 -*-%0Afrom TorCtl import TorCtl%0Aimport requests,json%0A%0Aproxies = %7B'http': 'socks5://127.0.0.1:9050','https': 'socks5://127.0.0.1:9050'%7D%0A%0Aclass TorProxy(object):%0A%0A%09def __init__(self,):%0A%09%09pass%0A%0A%09def connect(self, url, method):%0A%09%09r = getattr(requests, method)(url,proxies=proxies)%0A%09%09return r%0A%0A%09def new_ip(self,):%0A%09%09self.conn = TorCtl.connect(controlAddr=%22127.0.0.1%22, controlPort=9051, passphrase=%22RTFM_FODAO%22)%0A%09%09self.conn.send_signal(%22NEWNYM%22)%0A%09%09self.conn.close()%0A%0A%09def check_ip(self,):%0A%09%09self.url = %22http://ipinfo.io%22%0A%09%09self.r = requests.get(self.url,proxies=proxies)%0A%09%09try:%0A%09%09%09return json.loads(self.r.content)%5B'ip'%5D%0A%09%09except:%0A%09%09%09return 'Error to get your IP'%0A%0Aif __name__ == '__main__':%0A%09tor = TorProxy()%0A%09print tor.check_ip()%0A%09print tor.new_ip()%0A%09print tor.check_ip()%0A
|
|
062473c20e59f259d38edcd79e22d0d215b8f52f
|
Add file to store API Access keys
|
key.py
|
key.py
|
Python
| 0.000001
|
@@ -0,0 +1,182 @@
+consumer_key = '' # Enter your values here%0Aconsumer_secret = '' # Enter your values here%0Aaccess_token = '' # Enter your values here%0Aaccess_token_secret = '' # Enter your values here%0A
|
|
7f3411268e153c47edc77c681e14aef5747639de
|
use the subdir /httplib2, follow up for 10273
|
pwb.py
|
pwb.py
|
import sys,os
sys.path.append('.')
sys.path.append('externals')
sys.path.append('pywikibot/compat')
if "PYWIKIBOT2_DIR" not in os.environ:
os.environ["PYWIKIBOT2_DIR"] = os.path.split(__file__)[0]
sys.argv.pop(0)
if len(sys.argv) > 0:
if not os.path.exists(sys.argv[0]):
testpath = os.path.join(os.path.split(__file__)[0], 'scripts', sys.argv[0])
if os.path.exists(testpath):
sys.argv[0] = testpath
else:
testpath = testpath + '.py'
if os.path.exists(testpath):
sys.argv[0] = testpath
else:
raise Exception("%s not found!" % sys.argv[0])
sys.path.append(os.path.split(sys.argv[0])[0])
execfile(sys.argv[0])
else:
sys.argv.append('')
|
Python
| 0
|
@@ -56,16 +56,25 @@
xternals
+/httplib2
')%0D%0Asys.
|
c1efaebe51c723cd912701ea4cfbf3a9d67068f6
|
fix logging
|
bot.py
|
bot.py
|
from time import sleep
__author__ = 'kotov.a'
import options
import logging
import coloredlogs
import sys
logger = logging.getLogger('inbot')
level = logging.DEBUG
handler = logging.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
logger.addHandler(handler)
coloredlogs.install(level=logging.DEBUG)
def get_followed_by(api):
followed_by_list = []
followed_by_generator = api.user_followed_by(as_generator=True, max_pages=300)
for f in followed_by_generator:
followed_by_list.extend(f[0])
return followed_by_list
def get_follow(api):
follows_list = []
follows_generator = api.user_follows(as_generator=True, max_pages=300)
for f in follows_generator:
follows_list.extend(f[0])
return follows_list
def like_media(media_id, api):
logger.info('Like %s', media_id)
api.like_media(media_id)
sleep_custom()
def follow_user(user_id, api):
logger.info('Follow %s', user_id)
api.follow_user(user_id=user_id)
sleep_custom()
def sleep_custom():
duration = 40
logger.debug('Sleep %d', duration)
sleep(duration)
from instagram.client import InstagramAPI
api = InstagramAPI(access_token=options.ACCESS_TOKEN, client_secret=options.CLIENT_SECRET, client_ips="1.2.3.4")
logger.debug('Get account information...')
followed_by = get_followed_by(api)
sleep(10)
follow = get_follow(api)
sleep(5)
user_id = api.user().id
logger.info('Start. Followed by %d, follow %d. User id: %s', len(followed_by), len(follow), user_id)
sleep(5)
likes_count = 0
follows_count = 0
ignore_list = []
last_action_is_like = False
try:
for tag in options.TAGS:
logger.debug('Limits %s', api.x_ratelimit_remaining)
media = list(api.tag_recent_media(tag_name=tag, count=20))
for m in media[0]:
media_user_id = m.user.id
logger.debug('Skip media %s user %s previously followed', m.id, media_user_id)
if any(x for x in m.likes if x.id == user_id):
logger.debug('Skip media %s, previously liked', m.id)
continue
if any(x for x in follow if x.id == media_user_id):
logger.debug('Skip media %s, user %s previously followed', m.id, media_user_id)
continue
if any(x for x in followed_by if x.id == media_user_id):
logger.debug('Skip media %s, user %s is follower', m.id, media_user_id)
continue
if media_user_id in ignore_list:
logger.debug('Skip media %s, user %s handled in this session', m.id, media_user_id)
continue
if not last_action_is_like:
if likes_count < 30:
like_media(m.id, api)
likes_count += 1
ignore_list.append(media_user_id)
last_action_is_like = True
continue
else:
logger.warning('Likes limit exceed')
if last_action_is_like:
if follows_count < 20:
follow_user(media_user_id, api)
follows_count += 1
ignore_list.append(media_user_id)
last_action_is_like = False
continue
else:
logger.warning('Follows limit exceed')
if likes_count >= 30 and follows_count >= 20:
logger.info('Finish, likes %d, follows %d', likes_count, follows_count)
exit(0)
except Exception as e:
logger.exception(e)
finally:
logger.info('Total, likes: %d, follows: %d', likes_count, follows_count)
|
Python
| 0.000002
|
@@ -777,32 +777,45 @@
media_id, api):%0A
+ try:%0A
logger.info(
@@ -831,24 +831,28 @@
, media_id)%0A
+
api.like
@@ -864,32 +864,36 @@
a(media_id)%0A
+
+
sleep_custom()%0A%0A
@@ -887,24 +887,78 @@
ep_custom()%0A
+ except Exception as e:%0A logger.exception(e)
%0A%0Adef follow
|
81c722316d75e929d120f4d7139c499052a4e2fb
|
add cli program
|
cli.py
|
cli.py
|
Python
| 0
|
@@ -0,0 +1,1178 @@
+#!/usr/bin/env python%0A# -*- codeing: utf-8 -*-%0Aimport socket%0Aimport logging%0Aimport json%0A%0ALOG = logging.getLogger('DynamicLoadCmd')%0A%0A%0Adef main():%0A %0A sc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A sc.connect(('127.0.0.1', 10807))%0A%0A while True:%0A line = raw_input('(ryu) ')%0A%0A if line == 'exit':%0A break%0A%0A elif line == 'list':%0A msg = json.dumps(%7B'cmd': 'list'%7D)%0A sc.sendall(msg)%0A buf = sc.recv(2048)%0A print buf%0A app_list = json.loads(buf)%0A app_id = 0%0A%0A for app_info in app_list:%0A print '%5B%2502d%5D%25s' %25 (app_id, app_info%5B'name'%5D),%0A%0A if app_info%5B'installed'%5D:%0A print '%5B%5C033%5B92minstalled%5C033%5B0m%5D'%0A else:%0A print ''%0A%0A app_id += 1%0A%0A elif 'install' in line:%0A argv = line.split(' ')%0A%0A if len(argv) %3C 2:%0A print 'install %5Bapp_id%5D'%0A continue%0A%0A app_id = int(argv%5B1%5D)%0A msg = json.dumps(%7B'cmd':'install', 'app_id': app_id%7D)%0A sc.sendall(msg)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
4f08f057c7e4cc8230a996d853892ab3eef36065
|
Add simple terminal-based version of rock-paper-scissors.
|
rps.py
|
rps.py
|
Python
| 0
|
@@ -0,0 +1,1037 @@
+from random import choice%0A%0Aclass RPSGame:%0A shapes = %5B'rock', 'paper', 'scissors'%5D%0A draws = %5B('rock', 'rock'), ('paper', 'paper'), ('scissors', 'scissors')%5D%0A first_wins = %5B('rock', 'scissors'), ('scissors', 'paper'), ('paper', 'rock')%5D%0A %0A def _evaluate(self, player_move, computer_move):%0A if (player_move, computer_move) in RPSGame.draws:%0A return %22Draw!%22%0A elif (player_move, computer_move) in RPSGame.first_wins:%0A return %22Player wins!%22%0A else:%0A return %22Computer wins!%22%0A%0A def play(self, rounds=1):%0A for i in range(rounds):%0A player_move = input(%22%5Brock,paper,scissors%5D: %22)%0A computer_move = choice(RPSGame.shapes)%0A winner = self._evaluate(player_move, computer_move)%0A print(20 * %22-%22)%0A print(%22You played: %25s%22 %25 player_move)%0A print(%22Computer played: %25s%22 %25 computer_move)%0A print(winner)%0A print(20 * %22-%22)%0A%0Aif __name__ == '__main__':%0A game = RPSGame()%0A game.play(rounds=10)%0A%0A
|
|
024b9dbfb3e34b5ff092ad86a1bec1e82ccfb9f9
|
Convert tests/test_elsewhere_twitter.py to use Harness & TestClient.
|
tests/test_elsewhere_twitter.py
|
tests/test_elsewhere_twitter.py
|
from gittip.testing import tip_graph
from gittip.elsewhere import twitter
def test_twitter_resolve_resolves():
with tip_graph(('alice', 'bob', 1, True, False, False, "twitter", "2345")):
expected = 'alice'
actual = twitter.resolve(u'alice')
assert actual == expected, actual
|
Python
| 0
|
@@ -9,15 +9,17 @@
tip.
-testing
+elsewhere
imp
@@ -23,24 +23,22 @@
import t
-ip_graph
+witter
%0Afrom gi
@@ -46,35 +46,110 @@
tip.
-elsewhere import twitter%0A%0A%0A
+models import Elsewhere%0Afrom gittip.testing import Harness%0A%0A%0Aclass TestElsewhereTwitter(Harness):%0A
def
@@ -182,90 +182,293 @@
ves(
+self
):%0A
-with tip_graph(('alice', 'bob', 1, True, False, False, %22twitter%22, %222345%22)):
+ alice = self.make_participant('alice')%0A alice_on_twitter = Elsewhere(platform='twitter', user_id=%221%22,%0A user_info=%7B'screen_name': 'alice'%7D)%0A alice.accounts_elsewhere.append(alice_on_twitter)%0A self.session.commit()%0A
%0A
|
3739819ed85a03520ad3152a569ad6cfb3dd7fb5
|
Add a used test.
|
lib/tagnews/tests/test_crimetype_tag.py
|
lib/tagnews/tests/test_crimetype_tag.py
|
Python
| 0.000001
|
@@ -0,0 +1,557 @@
+import tagnews%0A%0Aclass TestCrimetype():%0A @classmethod%0A def setup_method(cls):%0A cls.model = tagnews.CrimeTags()%0A%0A%0A def test_tagtext(self):%0A self.model.tagtext('This is example article text')%0A%0A%0A def test_tagtext_proba(self):%0A article = 'Murder afoul, someone has been shot!'%0A probs = self.model.tagtext_proba(article)%0A max_prob = probs.max()%0A max_type = probs.idxmax()%0A tags = self.model.tagtext(article,%0A prob_thresh=max_prob-0.001)%0A assert max_type in tags%0A
|
|
edc335e68d44c6a0c99499bc4416c55a6072232e
|
add proper test for govobj stuff
|
test/test_governance_methods.py
|
test/test_governance_methods.py
|
Python
| 0
|
@@ -0,0 +1,1227 @@
+import pytest%0Aimport os%0Aos.environ%5B'SENTINEL_ENV'%5D = 'test'%0Aimport sys%0Asys.path.append( os.path.join( os.path.dirname(__file__), '..', 'lib' ) )%0A%0A# NGM/TODO: setup both Proposal and Superblock, and insert related rows,%0A# including Events%0A%0Adef setup():%0A pass%0A #this is doog.%0A%0Adef teardown():%0A pass%0A #you SON OF A BITCH!%0A%0A# pw_event = PeeWeeEvent.get(%0A# (PeeWeeEvent.start_time %3C misc.get_epoch() ) &%0A# (PeeWeeEvent.error_time == 0) &%0A# (PeeWeeEvent.prepare_time == 0)%0A# )%0A#%0A# if pw_event:%0A# govobj = GovernanceObject()%0A# govobj.load(pw_event.governance_object_id)%0A%0A# setup/teardown?%0A%0A# Event model%0A%0A#govobj.get_prepare_command%0A%0A# GovernanceObject model%0A@pytest.fixture%0Adef governance_object():%0A from models import PeeWeeGovernanceObject%0A from governance import GovernanceObject%0A govobj = GovernanceObject()%0A #govobj.%0A return%0A%0Adef test_prepare_command(governance_object):%0A d = governance_object.get_dict()%0A assert type(d) == type(%7B%7D)%0A%0A fields = %5B 'parent_id', 'object_creation_time', 'object_hash',%0A 'object_parent_hash', 'object_name', 'object_type', 'object_revision',%0A 'object_data', 'object_fee_tx' %5D%0A%0A fields.sort()%0A sorted_keys = d.keys()%0A sorted_keys.sort()%0A assert sorted_keys == fields%0A
|
|
8c49123ccaf16a4513f8096475dd2b865cfee66f
|
Revert of Re-enable mobile memory tests. (https://codereview.chromium.org/414473002/)
|
tools/perf/benchmarks/memory.py
|
tools/perf/benchmarks/memory.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import memory
import page_sets
from telemetry import benchmark
class MemoryMobile(benchmark.Benchmark):
test = memory.Memory
page_set = page_sets.MobileMemoryPageSet
@benchmark.Disabled('android')
class MemoryTop25(benchmark.Benchmark):
test = memory.Memory
page_set = page_sets.Top25PageSet
@benchmark.Disabled('android')
class Reload2012Q3(benchmark.Benchmark):
tag = 'reload'
test = memory.Memory
page_set = page_sets.Top2012Q3PageSet
@benchmark.Disabled('android') # crbug.com/371153
class MemoryToughDomMemoryCases(benchmark.Benchmark):
test = memory.Memory
page_set = page_sets.ToughDomMemoryCasesPageSet
|
Python
| 0.000018
|
@@ -239,16 +239,67 @@
hmark%0A%0A%0A
+@benchmark.Disabled('android') # crbug.com/370977%0A
class Me
|
d36310c6316379086faaf6a29c0392a6ff5ab465
|
Simple sentence parser with a defined CFG
|
Natural_Language_Processing/simple_parser.py
|
Natural_Language_Processing/simple_parser.py
|
Python
| 0.997157
|
@@ -0,0 +1,442 @@
+import nltk%0Afrom nltk import CFG%0A%0Agrammar1 = CFG.fromstring(%22%22%22 S -%3E NP VP%0AVP -%3E V NP %7C V NP PP%0APP -%3E P NP%0AV -%3E %22saw%22 %7C %22ate%22 %7C %22walked%22%0ANP -%3E %22John%22 %7C %22Mary%22 %7C %22Bob%22 %7C Det N %7C Det N PP %7C N%0ADet -%3E %22a%22 %7C %22an%22 %7C %22the%22 %7C %22my%22%0AN -%3E %22man%22 %7C %22dog%22 %7C %22cat%22 %7C %22telescope%22 %7C %22park%22%0AP -%3E %22in%22 %7C %22on%22 %7C %22by%22 %7C %22with%22%0A%22%22%22)%0A%0Asent = %22Mary saw Bob%22.split()%0A%0Ard_parser = nltk.RecursiveDescentParser(grammar1)%0A%0Afor tree in rd_parser.parse(sent):%0A print tree%0A
|
|
baff0200dfbe5ac33949f2fa3cddca72912b3b09
|
add results.py
|
epac/results.py
|
epac/results.py
|
Python
| 0.000001
|
@@ -0,0 +1,773 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Fri May 17 16:37:54 2013%0A%0A@author: edouard.duchesnay@cea.fr%0A%22%22%22%0A%0A%0Aclass Results(dict):%0A TRAIN = %22tr%22%0A TEST = %22te%22%0A SCORE = %22score%22%0A PRED = %22pred%22%0A TRUE = %22true%22%0A SEP = %22_%22%0A%0A def __init__(self, **kwargs):%0A if kwargs:%0A self.add(**kwargs)%0A%0A def _c(self, k1, k2):%0A return k1 + self.SEP + k2%0A%0A def add(self, key2, suffix, score=None, pred=%5B%5D, true=%5B%5D):%0A tr = dict()%0A if score:%0A tr%5Bself._c(self.SCORE, suffix)%5D = score%0A if len(pred):%0A tr%5Bself._c(self.PRED, suffix)%5D = pred%0A if len(true):%0A tr%5Bself._c(self.TRUE, suffix)%5D = true%0A if key2 in self:%0A self%5Bkey2%5D.update(tr)%0A else:%0A self%5Bkey2%5D = tr
|
|
553ba87b8858c11b2c2778d35a3c6e3694304278
|
create the Spider of Turkey of McDonalds
|
locations/spiders/mcdonalds_tr.py
|
locations/spiders/mcdonalds_tr.py
|
Python
| 0
|
@@ -0,0 +1,2521 @@
+# -*- coding: utf-8 -*-%0Aimport scrapy%0Aimport json%0Aimport re%0Afrom locations.items import GeojsonPointItem%0A%0Aclass McDonaldsTRSpider(scrapy.Spider):%0A name = 'mcdonalds_tr'%0A allowed_domains = %5B'www.mcdonalds.com.tr'%5D%0A %0A def start_requests(self):%0A url = 'https://www.mcdonalds.com.tr/Content/WebService/ClientSiteWebService.asmx/GetRestaurantsV5'%0A # for state in STATES:%0A formdata = %7B%0A %22cityId%22: %220%22,%0A %22townId%22: %220%22,%0A %22Services%22: %22%22%0A %7D%0A%0A headers = %7B%0A 'Accept': 'application/json, text/javascript, */*; q=0.01',%0A 'Accept-Encoding': 'gzip, deflate, br',%0A 'Accept-Language': 'en-US,en;q=0.9',%0A 'Content-Type': 'application/json',%0A 'Origin': 'https://www.mcdonalds.com.tr',%0A 'Host':'www.mcdonalds.com.tr',%0A 'Referer': 'https://www.mcdonalds.com.tr/kurumsal/restoranlar',%0A 'X-Requested-With': 'XMLHttpRequest'%0A %7D%0A %0A yield scrapy.http.Request(%0A url, %0A self.parse,%0A method = 'POST',%0A body = json.dumps(formdata),%0A headers = headers,%0A )%0A%0A def normalize_time(self, time_str):%0A match = re.search(r'(%5B0-9%5D%7B1,2%7D):(%5B0-9%5D%7B1,2%7D)', time_str)%0A h, m = match.groups()%0A%0A return '%2502d:%2502d' %25 (%0A int(h) + 12 if int(h)%3C13 else int(h),%0A int(m),%0A )%0A%0A def store_hours(self, hour):%0A data = hour%5B0%5D%0A if not data%5B'Name'%5D:%0A return '24/7'%0A value = data%5B'Value'%5D.strip()%0A if value == %22-%22:%0A return None%0A%0A start = value.split(%22-%22)%5B0%5D.strip()%0A end = value.split(%22-%22)%5B1%5D.strip()%0A end = self.normalize_time(end)%0A return 'Mo-Su ' + start + ':' + end%0A%0A def parse(self, response):%0A results = json.loads(response.body_as_unicode())%0A results = results%5B'd'%5D%0A for data in results:%0A properties = %7B%0A 'city': data%5B'City'%5D,%0A 'ref': data%5B'ID'%5D,%0A 'phone': data%5B'Phone'%5D.strip(),%0A 'lon': data%5B'Longitude'%5D,%0A 'lat': data%5B'Latitude'%5D,%0A 'name': data%5B'Name'%5D,%0A 'addr_full': data%5B'Address'%5D,%0A 'state': data%5B'Town'%5D%0A %7D%0A%0A opening_hours = self.store_hours(data%5B'WorkingHours'%5D)%0A if opening_hours:%0A properties%5B'opening_hours'%5D = opening_hours%0A%0A yield GeojsonPointItem(**properties)%0A
|
|
0fef9ab4e7a70a5e53cf5e5ae91d7cc5fd8b91da
|
Create xml_grabber.py
|
grabbing/xml_grabber.py
|
grabbing/xml_grabber.py
|
Python
| 0.000008
|
@@ -0,0 +1,1559 @@
+%22%22%22XML TYPE%0A%3C?xml version=%221.0%22 encoding=%22utf-8%22?%3E%0A%3Crss xmlns:atom=%22http://www.w3.org/2005/Atom%22 version=%222.0%22%3E%0A%09%3Cchannel%3E%0A%09%09%3Ctitle%3EQ Blog%3C/title%3E%0A%09%09%3Clink%3Ehttp://agus.appdev.my.id/feed/%3C/link%3E%0A%09%09%3Cdescription%3ELatest Posts of Q%3C/description%3E%0A%09%09%3Catom:link href=%22http://agus.appdev.my.id/feed/%22 rel=%22self%22%3E%3C/atom:link%3E%0A%09%09%3Clanguage%3Een-us%3C/language%3E%0A%09%09%3ClastBuildDate%3EMon, 29 Jun 2015 12:49:38 -0000%3C/lastBuildDate%3E%0A%09%09%3Citem%3E%0A%09%09%09%3Ctitle%3ESample Post Kapal Pesiar%3C/title%3E%0A%09%09%09%3Clink%3Ehttp://agus.appdev.my.id/entry/sample-post-kapal-pesiar%3C/link%3E%0A%09%09%09%3Cdescription%3ESample Post Kapal Pesiar%3C/description%3E%0A%09%09%09%3Cguid%3Ehttp://agus.appdev.my.id/entry/sample-post-kapal-pesiar%3C/guid%3E%0A%09%09%3C/item%3E%0A%09%09%3Citem%3E%0A%09%09%09%3Ctitle%3ETest Post from user%3C/title%3E%0A%09%09%09%3Clink%3Ehttp://agus.appdev.my.id/entry/test-post-user%3C/link%3E%0A%09%09%09%3Cdescription%3ETest Post from user%3C/description%3E%0A%09%09%09%3Cguid%3Ehttp://agus.appdev.my.id/entry/test-post-user%3C/guid%3E%0A%09%09%3C/item%3E%0A%09%3C/channel%3E%0A%3C/rss%3E%0A%22%22%22%0A%0Aimport urllib%0Afrom bs4 import BeautifulSoup as BS%0A%0Aurl = 'http://agus.appdev.my.id/feed/'%0A%0Asoup = BeautifulSoup(url)%0Adef _getUrl_Image(url):%0A start = urllib.urlopen(url)%0A soup = BS(start)%0A all_link = soup.findAll('item', None)%0A for i in all_link:%0A item = str(i)+'%5Cn'%0A split = item.split('%3C')%0A title = split%5B2%5D%5B6:%5D%0A link = %22%3Ca href='%22+split%5B4%5D%5B6:%5D+%22'%3E%22+title+%22%3C/a%3E%22%0A print link%0A%0A_getUrl_Image(url)%0A%0A%22%22%22RESULT%0A%3Ca href='http://agus.appdev.my.id/entry/sample-post-kapal-pesiar'%3ESample Post Kapal Pesiar%3C/a%3E%0A%3Ca href='http://agus.appdev.my.id/entry/test-post-user'%3ETest Post from user%3C/a%3E%0A%22%22%22%0A
|
|
6b6f7d225633e9c6bd406de695a1e52ce830a14e
|
Create feature_util.py
|
feature_util.py
|
feature_util.py
|
Python
| 0.000001
|
@@ -0,0 +1,58 @@
+'''%0AContains methods to extract features for training%0A'''%0A
|
|
88ff76fbc9275a327e016e9aef09d4ab2c3647e9
|
test setup
|
Classes/test_Classes/test_State.py
|
Classes/test_Classes/test_State.py
|
Python
| 0.000001
|
@@ -0,0 +1,76 @@
+%22%22%22Attribute System unit tests.%22%22%22%0A%0Aimport pytest%0Afrom ..State import State%0A
|
|
ae86eb3f7a3d7b2a8289f30c8d3d312c459710fb
|
update code laplacian article
|
assets/codes/laplacian_filter.py
|
assets/codes/laplacian_filter.py
|
Python
| 0
|
@@ -0,0 +1,1243 @@
+import cv2%0Aimport numpy as np%0Afrom PIL import Image%0A%0Aimage = cv2.imread(%22output.jpg%22)%0Agray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)%0A%0Alaplacian0 = np.array((%5B0, 1, 0%5D,%0A %5B1, -4, 1%5D,%0A %5B0, 1, 0%5D), dtype=%22int%22)%0Alaplacian1 = np.array((%5B1, 1, 1%5D,%0A %5B1, -8, 1%5D,%0A %5B1, 1, 1%5D), dtype=%22int%22)%0Alaplacian2 = np.array((%5B1, 2, 1%5D,%0A %5B2, -12, 2%5D,%0A %5B1, 2, 1%5D), dtype=%22int%22)%0Alaplacian3 = np.array((%5B1, 0, 1%5D,%0A %5B4, -12, 4%5D,%0A %5B1, 0, 1%5D), dtype=%22int%22)%0Alaplacian4 = np.array((%5B1, 4, 1%5D,%0A %5B0, -12, 0%5D,%0A %5B1, 4, 1%5D), dtype=%22int%22)%0A%0AkernelBank = ((%22laplacian0%22, laplacian0),%0A (%22laplacian1%22, laplacian1),%0A (%22laplacian2%22, laplacian2),%0A (%22laplacian3%22, laplacian3),%0A (%22laplacian4%22, laplacian4))%0A%0AImage.fromarray(gray).show()%0A%0Afor (kernelName, kernel) in kernelBank:%0A opencvOutput = cv2.filter2D(gray, -1, kernel)%0A cv2.putText(opencvOutput, %0A kernelName,%0A (30,30), %0A cv2.FONT_HERSHEY_SIMPLEX, %0A 1,%0A (255,0,255))%0A Image.fromarray(opencvOutput).show()%0A
|
|
d892914381a3067fdd04d6d0af0aceda0c092039
|
test staff
|
staff/tests/test_staff.py
|
staff/tests/test_staff.py
|
Python
| 0.000005
|
@@ -0,0 +1,1001 @@
+%22%22%22Test sending emails.%22%22%22%0A%0Afrom happening.tests import TestCase%0Afrom model_mommy import mommy%0Afrom django.conf import settings%0A%0A%0Aclass TestStaff(TestCase):%0A%0A %22%22%22Test staff views.%22%22%22%0A%0A def setUp(self):%0A %22%22%22Set up users.%22%22%22%0A self.user = mommy.make(settings.AUTH_USER_MODEL, is_staff=True)%0A self.user.set_password(%22password%22)%0A self.user.save()%0A%0A self.non_staff_user = mommy.make(settings.AUTH_USER_MODEL)%0A self.non_staff_user.set_password(%22password%22)%0A self.non_staff_user.save()%0A%0A def test_dashboard(self):%0A %22%22%22Test dashboard loads only for staff.%22%22%22%0A self.client.login(username=self.user.username, password=%22password%22)%0A response = self.client.get(%22/staff/%22)%0A self.assertEquals(response.status_code, 200)%0A%0A self.client.login(username=self.non_staff_user.username,%0A password=%22password%22)%0A response = self.client.get(%22/staff/%22)%0A self.assertEquals(response.status_code, 302)%0A
|
|
a193f1d9b1816f72661254bba69c2c4a1e2c1b30
|
Add tests for google menu
|
tests/extensions/functional/tests/test_google_menu.py
|
tests/extensions/functional/tests/test_google_menu.py
|
Python
| 0
|
@@ -0,0 +1,1551 @@
+%22%22%22%0AGoogle Menu tests%0A%22%22%22%0A%0A%0Afrom base import BaseTouchscreenTest%0Aimport time%0Afrom base import MAPS_URL, ZOOMED_IN_MAPS_URL, Pose%0Afrom base import screenshot_on_error, make_screenshot%0Aimport re%0A%0Aclass TestGoogleMenu(BaseTouchscreenTest):%0A%0A @screenshot_on_error%0A def test_google_menu_is_visible(self):%0A self.browser.get(MAPS_URL)%0A morefun = self.browser.find_element_by_id('morefun')%0A assert morefun.is_displayed() is True%0A items = self.browser.find_element_by_id('morefun_items')%0A assert items.is_displayed() is False%0A%0A @screenshot_on_error%0A def test_google_items_are_visible_on_click(self):%0A self.browser.get(MAPS_URL)%0A morefun = self.browser.find_element_by_id('morefun')%0A morefun.click()%0A assert morefun.is_displayed() is True%0A items = self.browser.find_element_by_id('morefun_items')%0A assert items.is_displayed() is True%0A%0A @screenshot_on_error%0A def test_clicking_doodle_item(self):%0A %22Clicking on the doodle item should change the url to the doodles page%22%0A self.browser.get(ZOOMED_IN_MAPS_URL)%0A time.sleep(5)%0A morefun = self.browser.find_element_by_id('morefun')%0A morefun.click()%0A items = self.browser.find_element_by_id('morefun_items')%0A li_items = items.find_elements_by_tag_name('li')%0A assert len(li_items) == 2%0A doodle = li_items%5B1%5D%0A doodle.click()%0A%0A assert re.match(r'chrome-extension:%5C/%5C/%5Ba-z%5D+%5C/pages%5C/doodles.html',%0A self.browser.current_url)%0A%0A%0A%0A
|
|
26df96a0c772c70013cc7a027022e84383ccaee2
|
Add a helper script for converting -print-before-all output into a file based equivelent
|
utils/chunk-print-before-all.py
|
utils/chunk-print-before-all.py
|
Python
| 0.999981
|
@@ -0,0 +1,1221 @@
+#!/usr/bin/env python%0A# Given a -print-before-all -print-module-scope log from an opt invocation,%0A# chunk it into a series of individual IR files, one for each pass invocation.%0A# If the log ends with an obvious stack trace, try to split off a separate%0A# %22crashinfo.txt%22 file leaving only the valid input IR in the last chunk.%0A# Files are written to current working directory.%0A%0Aimport sys%0A%0Abasename = %22chunk-%22%0Achunk_id = 0%0A%0Adef print_chunk(lines):%0A global chunk_id%0A global basename%0A fname = basename + str(chunk_id) + %22.ll%22%0A chunk_id = chunk_id + 1%0A print %22writing chunk %22 + fname + %22 (%22 + str(len(lines)) + %22 lines)%22%0A with open(fname, %22w%22) as f:%0A f.writelines(lines)%0A%0Ais_dump = False%0Acur = %5B%5D%0Afor line in sys.stdin:%0A if line.startswith(%22*** IR Dump Before %22) and len(cur) != 0:%0A print_chunk(cur);%0A cur = %5B%5D%0A cur.append(%22; %22 + line)%0A elif line.startswith(%22Stack dump:%22):%0A print_chunk(cur);%0A cur = %5B%5D%0A cur.append(line)%0A is_dump = True%0A else:%0A cur.append(line)%0A%0Aif is_dump:%0A print %22writing crashinfo.txt (%22 + str(len(cur)) + %22 lines)%22%0A with open(%22crashinfo.txt%22, %22w%22) as f:%0A f.writelines(cur)%0Aelse:%0A print_chunk(cur);%0A
|
|
cd08fb72fea040d31394435bc6c1892bc208bcc0
|
Add sumclip.py for WPA analysis
|
bin/sumclip.py
|
bin/sumclip.py
|
Python
| 0
|
@@ -0,0 +1,1947 @@
+# Copyright 2016 Bruce Dawson. All Rights Reserved.%0D%0A#%0D%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0D%0A# you may not use this file except in compliance with the License.%0D%0A# You may obtain a copy of the License at%0D%0A#%0D%0A# http://www.apache.org/licenses/LICENSE-2.0%0D%0A#%0D%0A# Unless required by applicable law or agreed to in writing, software%0D%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0D%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0D%0A# See the License for the specific language governing permissions and%0D%0A# limitations under the License.%0D%0A%0D%0A%22%22%22%0D%0ASum data from the clipboard. This script reads lines of data from the clipboard,%0D%0Aconverts each line to a python float (double) and sums them, also printing other%0D%0Astatistics.%0D%0A%0D%0AThis is useful for summing columns of WPA (Windows Performance Analyzer) data%0D%0Afrom ETW traces. Just select all data, control-clicking as needed to deselect%0D%0Aparticular rows, then right-click in the column-%3E Copy Other-%3E Copy Column%0D%0ASelection. Then run this tool. This tool assumes that commas can be discarded%0D%0Aand will fail in many non-English locales.%0D%0A%22%22%22%0D%0A%0D%0Afrom __future__ import print_function%0D%0A%0D%0Aimport sys%0D%0Aimport win32clipboard%0D%0A%0D%0Adef main():%0D%0A win32clipboard.OpenClipboard()%0D%0A data = win32clipboard.GetClipboardData()%0D%0A win32clipboard.CloseClipboard()%0D%0A%0D%0A sum = 0%0D%0A min = 1e100%0D%0A max = 0%0D%0A count = 0%0D%0A missed_count = 0%0D%0A for line in data.splitlines():%0D%0A try:%0D%0A val = float(line.replace(',', ''))%0D%0A count += 1%0D%0A sum += val%0D%0A if val %3C min:%0D%0A min = val%0D%0A if val %3E max:%0D%0A max = val%0D%0A except:%0D%0A missed_count += 1%0D%0A%0D%0A if count %3E 0:%0D%0A print(%22Found %25d values, sum is %251.3f, min %251.3f, avg %251.3f, max %251.3f.%22 %25 (count, sum, min, sum / count, max))%0D%0A if missed_count %3E 0:%0D%0A print(%22Found %25d non-numeric values%22 %25 missed_count)%0D%0A%0D%0A%0D%0Aif __name__ == '__main__':%0D%0A sys.exit(main())%0D%0A
|
|
34317172bc8b0cf6ec512181e7fac30bc4804cea
|
Create goingLoopyWithPython.py
|
goingLoopyWithPython.py
|
goingLoopyWithPython.py
|
Python
| 0.00004
|
@@ -0,0 +1,560 @@
+# date: 11/09/15%0A# username: A1fus%0A# name: Alfie Bowman%0A# description: Going Loopy with Python%0A%0Alines = 0 #defines variable%0Awhile lines %3C50: #causes Python to do anything indented until the condition is met%0A print(%22I will not mess about in Computer Science lessons%22) #prints the str%0A lines = lines + 1 #adds 1 to the variable%0A%0A%0Avalue = 1 #Aspire%0Awhile value %3C 101:%0A print(value)%0A value = value + 1%0A%0A%0AyourNumber = int(input(%22Pick a number: %22)) #Aspire+%0Awhile yourNumber %3C 101:%0A print(yourNumber)%0A yourNumber = yourNumber + 1%0A
|
|
3b3be788b4d414e5828b985300e4e5cca43afdff
|
initialize listfield before __set__
|
modularodm/fields/ListField.py
|
modularodm/fields/ListField.py
|
from ..fields import Field, List
import copy
class ListField(Field):
def __init__(self, field_instance):
super(self.__class__, self).__init__(list=True)
# ListField is a list of the following (e.g., ForeignFields)
self._field_instance = field_instance
# Descriptor data is this type of list
self._list_class = self._field_instance._list_class
# Descriptor data is this type of list object, instantiated as our
# default
if self._field_instance._default and not hasattr(self._field_instance._default, '__iter__'):
raise Exception(
'Default value for list fields must be a list; received <{0}>'.format(
repr(self._field_instance._default)
)
)
self._default = self._list_class(self._field_instance._default, field_instance=self._field_instance)
def __set__(self, instance, value):
if isinstance(value, self._default.__class__):
self.data[instance] = value
elif hasattr(value, '__iter__'):
self.data[instance].extend(value)
else:
self.data[instance] = value
def to_storage(self, value):
'''
value will come in as a List (MutableSequence)
'''
if value:
return [self._field_instance.to_storage(i) for i in value]
return []
def on_after_save(self, parent, old_stored_data, new_value):
if not hasattr(self._field_instance, 'on_after_save'):
return
if new_value and not old_stored_data:
additions = new_value
removes = []
elif old_stored_data and not new_value:
additions = []
removes = old_stored_data
elif old_stored_data and new_value:
additions = [i for i in new_value if self._field_instance.to_storage(i) not in old_stored_data]
removes = [i for i in old_stored_data if i not in new_value]
else:
# raise Exception('There shouldn\'t be a diff in the first place.')
# todo: discuss -- this point can be reached when the object is not loaded and the new value is an empty list
additions = []
removes = []
for i in additions:
self._field_instance.on_after_save(parent, None, i)
for i in removes:
self._field_instance.on_after_save(parent, i, None)
@property
def base_class(self):
if self._field_instance is None:
return
if not hasattr(self._field_instance, 'base_class'):
return
return self._field_instance.base_class
|
Python
| 0.000001
|
@@ -1062,32 +1062,120 @@
e, '__iter__'):%0A
+ self.data%5Binstance%5D = self._list_class(field_instance=self._field_instance)%0A
self
|
828b7614c06ab4879040d5e289b4f12659c96b7e
|
Fix error preventing use of custom converter
|
brte/engine.py
|
brte/engine.py
|
if "bpy" in locals():
import imp
imp.reload(socket_api)
imp.reload(_converters)
imp.reload(processors)
else:
import bpy
from . import socket_api
from . import converters as _converters
from . import processors
import os
import socket
import struct
import subprocess
import sys
import time
import collections
import bpy
import mathutils
from OpenGL.GL import *
DEFAULT_WATCHLIST = [
#"actions",
#"armatures",
"cameras",
"images",
"lamps",
"materials",
"meshes",
"objects",
"scenes",
#"sounds",
#"speakers",
"textures",
#"worlds",
]
class _BaseFunc:
def __call__(self, data_set):
pass
ViewportTuple = collections.namedtuple('Viewport', ('height', 'width'))
def get_collection_name(collection):
class_name = collection.rna_type.__class__.__name__
clean_name = class_name.replace("BlendData", "").lower()
return clean_name
class RealTimeEngine():
bl_idname = 'RTE_FRAMEWORK'
bl_label = "Real Time Engine Framework"
def __init__(self, **kwargs):
# Display image
self.width = 1
self.height = 1
self.clock = time.perf_counter()
self.draw_lock = False
self.override_context = None
if 'converter' in kwargs:
self.converter = converter
else:
self.converter = _converters.BTFConverter()
if 'processor' in kwargs:
self.processor = kwargs['processor']
else:
self.display = processors.DoubleBuffer(3, self.draw_callback)
self.processor = processors.DummyProcessor(self.display)
self.remove_delta = {}
self.add_delta = {}
self.update_delta = {}
self.view_delta = {}
watch_list = kwargs['watch_list'] if 'watch_list' in kwargs else DEFAULT_WATCHLIST
self._watch_list = [getattr(bpy.data, i) for i in watch_list]
self._tracking_sets = {}
for collection in self._watch_list:
collection_name = get_collection_name(collection)
self._tracking_sets[collection_name] = set()
self._old_vmat = None
self._old_pmat = None
self._old_viewport = None
def main_loop(scene):
try:
new_time = time.perf_counter()
dt = new_time - self.clock
self.clock = new_time
self.main_update(dt)
except ReferenceError:
bpy.app.handlers.scene_update_post.remove(main_loop)
bpy.app.handlers.scene_update_post.append(main_loop)
self.tex = glGenTextures(1)
def view_update(self, context):
""" Called when the scene is changed """
for collection in self._watch_list:
collection_name = get_collection_name(collection)
collection_set = set(collection)
tracking_set = self._tracking_sets[collection_name]
# Check for new items
add_set = collection_set - tracking_set
self.add_delta[collection_name] = add_set
tracking_set |= add_set
# Check for removed items
remove_set = tracking_set - collection_set
self.remove_delta[collection_name] = remove_set
tracking_set -= remove_set
# Check for updates
update_set = {item for item in collection if item.is_updated}
self.update_delta[collection_name] = update_set
def view_draw(self, context):
""" Called when viewport settings change """
self.override_context = context.copy()
region = context.region
view = context.region_data
vmat = view.view_matrix.copy()
vmat_inv = vmat.inverted()
pmat = view.perspective_matrix * vmat_inv
viewport = [region.x, region.y, region.width, region.height]
self.update_view(vmat, pmat, viewport)
glPushAttrib(GL_ALL_ATTRIB_BITS)
glDisable(GL_DEPTH_TEST)
glDisable(GL_CULL_FACE)
glDisable(GL_STENCIL_TEST)
glEnable(GL_TEXTURE_2D)
glClearColor(0, 0, 1, 1)
glClear(GL_COLOR_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self.tex)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, self.width, self.height, 0, GL_RGB,
GL_UNSIGNED_BYTE, self.processor.image_buffer)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glBegin(GL_QUADS)
glColor3f(1.0, 1.0, 1.0)
glTexCoord2f(0.0, 0.0)
glVertex3i(-1, -1, 0)
glTexCoord2f(1.0, 0.0)
glVertex3i(1, -1, 0)
glTexCoord2f(1.0, 1.0)
glVertex3i(1, 1, 0)
glTexCoord2f(0.0, 1.0)
glVertex3i(-1, 1, 0)
glEnd()
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
glPopAttrib()
def update_view(self, view_matrix, projection_matrix, viewport):
if view_matrix != self._old_vmat:
self._old_vmat = view_matrix
self.view_delta['view_matrix'] = view_matrix
if projection_matrix != self._old_pmat:
self._old_pmat = projection_matrix
self.view_delta['projection_matrix'] = projection_matrix
if viewport != self._old_viewport:
self._old_viewport = viewport
self.view_delta['viewport'] = ViewportTuple(width=viewport[2], height=viewport[3])
def draw_callback(self):
'''Forces a view_draw to occur'''
self.tag_redraw()
def main_update(self, dt):
def converter_callback(data):
self.processor.process_data(data)
if self.add_delta or self.update_delta or self.view_delta:
self.converter.convert(self.add_delta, self.update_delta, self.remove_delta, self.view_delta, converter_callback)
self.add_delta.clear()
self.update_delta.clear()
self.remove_delta.clear()
self.view_delta.clear()
self.processor.update(dt)
|
Python
| 0
|
@@ -1311,16 +1311,24 @@
erter =
+kwargs%5B'
converte
@@ -1328,16 +1328,18 @@
onverter
+'%5D
%0A
|
a90c05355c2735c0a8d2b87d12b143d91f801660
|
make timeline of training output
|
bsd/epochizer.py
|
bsd/epochizer.py
|
Python
| 0.000096
|
@@ -0,0 +1,62 @@
+'''Group ims'''%0A%0Aimport os%0Aimport sys%0Aimport time%0A%0A%0A%0Aif __name
|
|
d3fa9df4c4f91ddb42954ea125ed69c2380ada62
|
create python version of list_change_file_hashes
|
src/list_changed_file_hashes.py
|
src/list_changed_file_hashes.py
|
Python
| 0.000003
|
@@ -0,0 +1,1108 @@
+from git import Repo%0Aimport os%0A%0Aclass CommitList:%0A%0A def __init__(self, repo):%0A self.repo = repo%0A%0A def print_all_blob_hashes(self):%0A hashes = set()%0A for commit in self.repo.iter_commits(self.repo.head):%0A for p in commit.parents:%0A diff = p.diff(commit)%0A for change in diff.iter_change_type(%22M%22):%0A if change.b_blob.name.endswith(%22.java%22):%0A hashes.add(change.b_blob.hexsha)%0A for change in diff.iter_change_type(%22A%22):%0A if change.b_blob.name.endswith(%22.java%22):%0A hashes.add(change.b_blob.hexsha)%0A%0A for h in hashes:%0A print h%0A%0Aif __name__ == '__main__':%0A import argparse%0A%0A parser = argparse.ArgumentParser(description='Edit distance calculator')%0A parser.add_argument('org_git_dir')%0A%0A args = parser.parse_args()%0A %0A git_dir = args.org_git_dir%0A if not os.path.isdir(git_dir):%0A print %22%25s is not a directory%22 %25 (git_dir)%0A%0A repo = Repo(git_dir)%0A %0A cl = CommitList(repo)%0A cl.print_all_blob_hashes()%0A
|
|
5f3f2ce52569eb3ae57ab3e4a2eaff29fc0d6522
|
add pyqt demo
|
study/python/pyqt/demo.py
|
study/python/pyqt/demo.py
|
Python
| 0
|
@@ -0,0 +1,1625 @@
+from PyQt5.QtWidgets import QMainWindow, QPushButton , QWidget , QMessageBox, QApplication, QHBoxLayout%0Aimport sys, sqlite3%0A%0Aclass WinForm(QMainWindow):%0A def __init__(self, parent=None):%0A super(WinForm, self).__init__(parent)%0A button1 = QPushButton('%E6%8F%92%E5%85%A5%E6%95%B0%E6%8D%AE')%0A button2 = QPushButton('%E6%98%BE%E7%A4%BA%E6%95%B0%E6%8D%AE')%0A%0A button1.clicked.connect(lambda: self.onButtonClick(1))%0A button2.clicked.connect(lambda: self.onButtonClick(2))%0A%0A layout = QHBoxLayout()%0A layout.addWidget(button1)%0A layout.addWidget(button2)%0A%0A main_frame = QWidget()%0A main_frame.setLayout(layout)%0A self.setCentralWidget(main_frame)%0A%0A def onButtonClick(self, n):%0A if n == 1:%0A query = 'INSERT INTO users(name, phone, age, remark) VALUES(?, ?, ?, ?)'%0A%0A curs.execute(query, (%22test%22, %2212312312312%22, 12, %22text%22 ))%0A conn.commit()%0A%0A print('Button %7B0%7D %E8%A2%AB%E6%8C%89%E4%B8%8B%E4%BA%86'.format(n))%0A QMessageBox.information(self, %22%E4%BF%A1%E6%81%AF%E6%8F%90%E7%A4%BA%E6%A1%86%22, 'Button %7B0%7D clicked'.format(n))%0A%0A if n == 2:%0A print('hhhh %7B0%7D %E8%A2%AB%E6%8C%89%E4%B8%8B%E4%BA%86'.format(n))%0A QMessageBox.information(self, %22%E4%BF%A1%E6%81%AF%E6%8F%90%E7%A4%BA%E6%A1%86%22, 'Button %7B0%7D clicked'.format(n))%0A%0A%0A%0Aif __name__ == %22__main__%22:%0A app = QApplication(sys.argv)%0A%0A conn = sqlite3.connect(%22user.db%22)%0A curs = conn.cursor()%0A%0A curs.execute('''%0A CREATE TABLE IF NOT EXISTS users(%0A id INTEGER PRIMARY KEY AUTOINCREMENT,%0A name TEXT,%0A phone TEXT,%0A age INTEGER,%0A remark TEXT%0A )%0A ''')%0A%0A conn.commit()%0A%0A form = WinForm()%0A form.show()%0A sys.exit(app.exec_())%0A
|
|
bf8328ff9b020bd3b99268744f86f94db2924011
|
Create process.py
|
process.py
|
process.py
|
Python
| 0.000002
|
@@ -0,0 +1,416 @@
+rm 1_*%0Arm 21_*%0Ahead *error.txt %3E%3E info_gg_unpaired.txt%0Asource /mnt/common/epfl/etc/bbcf_bashrc ### para llamar a todos los programas de bbcf%0Amodule add UHTS/Analysis/samtools/1.2;%0Apython -c %22from bbcflib import mapseq%22%0Afor i in %7B2..48%7D%0Ado%0A %09add_nh_flag %22$i%22_16S_gg.sam %22$i%22_SE_gg.bam%0A samtools sort %22$i%22_SE_gg.bam %22$i%22_SE_gg_s%0A samtools view -F0x4 $i%22_SE_gg_s.bam %7C cut -f 3 %7C uniq -c %3E%3E %22$i%22_counts.txt%0Adone%0A
|
|
93589c7e139d3af4b0a949f107fc5e20ed69fee4
|
add atop stats library
|
cbagent/collectors/libstats/atopstats.py
|
cbagent/collectors/libstats/atopstats.py
|
Python
| 0
|
@@ -0,0 +1,1988 @@
+from uuid import uuid4%0A%0Afrom fabric.api import run%0A%0Afrom systemstats import SystemStats, multi_task, single_task%0A%0A%0Auhex = lambda: uuid4().hex%0A%0A%0Aclass AtopStats(SystemStats):%0A%0A def __init__(self, hosts, user, password):%0A super(AtopStats, self).__init__(hosts, user, password)%0A self.logfile = %22/tmp/%7B0%7D.atop%22.format(uhex())%0A%0A self._base_cmd =%5C%0A %22d=%60date +%25H:%25M%60 && atop -r %7B0%7D -b $d -e $d%22.format(self.logfile)%0A self._cpu_column = self._get_cpu_column()%0A self._vsize_column = self._get_vsize_column()%0A self._rss_column = self._get_rss_column()%0A%0A @multi_task%0A def stop_atop(self):%0A run(%22killall -q atop%22)%0A run(%22rm -rf %7B0%7D%22.format(self.logfile))%0A%0A @multi_task%0A def start_atop(self):%0A run(%22nohup atop -a -w %7B0%7D 5 %3E /dev/null 2%3E&1 &%22.format(self.logfile))%0A%0A def is_atop_running(self):%0A raise NotImplementedError%0A%0A def restart_atop(self):%0A self.stop_atop()%0A self.start_atop()%0A%0A @single_task%0A def _get_vsize_column(self):%0A output = run(%22atop -m 1 1 %7C grep PID%22)%0A return output.split().index(%22VSIZE%22)%0A%0A @single_task%0A def _get_rss_column(self):%0A output = run(%22atop -m 1 1 %7C grep PID%22)%0A return output.split().index(%22RSIZE%22)%0A%0A @single_task%0A def _get_cpu_column(ip):%0A output = run(%22atop 1 1 %7C grep PID%22)%0A return output.split().index(%22CPU%22)%0A%0A @multi_task%0A def get_process_cpu(self, process):%0A cmd = self._base_cmd + %22%7C grep %7B0%7D%22.format(process)%0A output = run(cmd)%0A return output.split()%5Bself._cpu_column%5D%0A%0A @multi_task%0A def get_process_vzize(self, process):%0A cmd = self._base_cmd + %22 -m %7C grep %7B0%7D%22.format(process)%0A output = run(cmd)%0A return output.split()%5Bself._vsize_column%5D%0A%0A @multi_task%0A def get_process_rss(self, process):%0A cmd = self._base_cmd + %22 -m %7C grep %7B0%7D%22.format(process)%0A output = run(cmd)%0A return output.split()%5Bself._rss_column%5D%0A
|
|
a04942d1073ed5d031b2f85e412578d7e33d867a
|
set default output to png
|
sympy/printing/preview.py
|
sympy/printing/preview.py
|
import os
import time
import tempfile
from latex import latex
def preview(expr, output='ps', viewer=None, euler=True):
"""View expression in PNG, DVI, PostScript or PDF form.
This will generate LaTeX representation of the given expression
and compile it using available TeX distribiution. Then it will
run appropriate viewer for the given output format or use the
user defined one. If you prefer not to use external viewer
then you can use combination of 'png' output and 'pyglet'
viewer. By default PostScript output is generated.
By default pretty Euler fonts are used for typesetting (they
were used to typeset the well known "Concrete Mathematics"
book). If you prefer default AMS fonts or your system lacks
'eulervm' LaTeX package then unset 'euler' keyword argument.
To use viewer auto-detection, lets say for 'png' output, issue::
>> from sympy import *
>> x, y = symbols("xy")
>> preview(x + y, output='png')
This will choose 'pyglet by default. To select different one::
>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats
the rules are sligtly different. As an example we will take
'dvi' output format. If you would run::
>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your
system (predefined in the function, so it will try evince,
first, then kdvi and xdvi). If nothing is found you will
need to set the viewer explicitely::
>> preview(x + y, output='dvi', viewer='superior-dvi-viwer')
This will skip auto-dection and will run user specified
'superior-dvi-viwer'. If 'view' fails to find it on
your system it will gracefully raise an exception.
Currently this depends on pexpect, which is not available for windows.
"""
# we don't want to depend on anything not in the
# standard library with SymPy by default
import pexpect
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
candidates = {
"dvi" : [ "evince", "okular", "kdvi", "xdvi" ],
"ps" : [ "evince", "okular", "gsview", "gv" ],
"pdf" : [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
if pexpect.which(candidate):
viewer = candidate
break
else:
raise SystemError("No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer not in special and not pexpect.which(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if not euler:
format = r"""\documentclass[12pt]{article}
\usepackage{amsmath}
\begin{document}
\pagestyle{empty}
%s
\vfill
\end{document}
"""
else:
format = r"""\documentclass[12pt]{article}
\usepackage{amsmath}
\usepackage{eulervm}
\begin{document}
\pagestyle{empty}
%s
\vfill
\end{document}
"""
if viewer == "pyglet":
# import pyglet before we change the current dir, because after that it
# would fail:
from sympy.thirdparty import import_thirdparty
pyglet = import_thirdparty("pyglet")
tmp = tempfile.mktemp()
tex = open(tmp + ".tex", "w")
tex.write(format % latex(expr, inline=False))
tex.close()
cwd = os.getcwd()
os.chdir(tempfile.gettempdir())
if os.system("latex -halt-on-error %s.tex" % tmp) != 0:
raise SystemError("Failed to generate DVI output.")
os.remove(tmp + ".tex")
os.remove(tmp + ".aux")
os.remove(tmp + ".log")
if output != "dvi":
command = {
"ps" : "dvips -o %s.ps %s.dvi",
"pdf" : "dvipdf %s.dvi %s.pdf",
"png" : "dvipng -T tight -z 9 " + \
"--truecolor -o %s.png %s.dvi",
}
try:
if os.system(command[output] % (tmp, tmp)) != 0:
raise SystemError("Failed to generate '%s' output." % output)
else:
os.remove(tmp + ".dvi")
except KeyError:
raise SystemError("Invalid output format: %s" % output)
src = "%s.%s" % (tmp, output)
if viewer == "pyglet":
from pyglet import window, image, gl
from pyglet.window import key
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(src, decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
win = window.Window(
width = img.width + 2*offset,
height = img.height + 2*offset,
caption = "sympy",
resizable = False
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
os.system("%s %s &> /dev/null &" % (viewer, src))
time.sleep(2) # wait for the viewer to read data
os.remove(src)
os.chdir(cwd)
|
Python
| 0.000001
|
@@ -85,17 +85,18 @@
utput='p
-s
+ng
', viewe
@@ -544,26 +544,19 @@
default
-PostScript
+png
output
|
8c65226b79ad0f7ac3487a117298498cff4b23be
|
Update cherry-pickup.py
|
Python/cherry-pickup.py
|
Python/cherry-pickup.py
|
# Time: O(n^3)
# Space: O(n^2)
class Solution(object):
def cherryPickup(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
# dp holds the max # of cherries two k-length paths can pickup.
# The two k-length paths arrive at (i, k - i) and (j, k - j),
# respectively.
n = len(grid)
dp = [[-1 for _ in xrange(n)] for _ in xrange(n)]
dp[0][0] = grid[0][0]
max_len = 2 * (n-1)
directions = [(0, 0), (-1, 0), (0, -1), (-1, -1)]
for k in xrange(1, max_len+1):
for i in reversed(xrange(min(k+1, n))):
for j in reversed(xrange(i, min(k+1, n))):
if not (0 <= k-i < n and 0 <= k-j < n):
continue
if grid[i][k-i] == -1 or grid[j][k-j] == -1:
dp[i][j] = -1
continue
cnt = grid[i][k-i]
if i != j:
cnt += grid[j][k-j]
max_cnt = -1
for direction in directions:
ii, jj = i+direction[0], j+direction[1]
if ii >= 0 and jj >= 0 and dp[ii][jj] >= 0:
max_cnt = max(max_cnt, dp[ii][jj]+cnt)
dp[i][j] = max_cnt
return max(dp[n-1][n-1], 0)
|
Python
| 0
|
@@ -603,16 +603,31 @@
(xrange(
+max(0, k-n-1),
min(k+1,
|
43f2accb8cd4f63d62f7515bb5633296a7d592f0
|
Add setup.py to spm.
|
nipype/interfaces/spm/setup.py
|
nipype/interfaces/spm/setup.py
|
Python
| 0.000136
|
@@ -0,0 +1,342 @@
+def configuration(parent_package='',top_path=None):%0A from numpy.distutils.misc_util import Configuration%0A%0A config = Configuration('spm', parent_package, top_path)%0A%0A config.add_data_dir('tests')%0A%0A return config%0A%0Aif __name__ == '__main__':%0A from numpy.distutils.core import setup%0A setup(**configuration(top_path='').todict())%0A
|
|
c111c24e870788d729c0d875053ce4bc2a41fa2f
|
Update captureImage.py
|
BAD/captureImage.py
|
BAD/captureImage.py
|
from urlCommands import UrlCommands as UC # A collection of url commands to request go pro actions
import cv2
import numpy as np
import os
import time
import threading
# Home cherokee directory:
# http://10.5.5.9:8080/videos/DCIM/100GOPRO/
#-----------------------------------------------------------------------\
# Adjustments when testing within edison:
# remove comments in init constructor
# imshow method must be removed
# path must be changed to /media/external... for the Edison sd card directory
# home_directory = "http://10.5.5.9:8080/videos/DCIM/100GOPRO/"
#-----------------------------------------------------------------------
class CaptureImage:
def __init__(self):
# Make sure the GoPro is turned on, initialize settings and begin capture.
self.imageID = "frame.jpeg"
self.targetID = "target.png"
self.goproURL = UC()
self.path = os.getcwd()
print self.path
print "Initializing GoPro.."
#self.gopro.turn_on()
def start_photo_thread(self):
# capture thread to capture photos during flight
print "Started " + threading.currentThread().getName() + " " + str(time.time())
def start_search_thread(self):
# thread to handle the URL requests, we dont want them to cause the captures to fall behind
print "Started " + threading.currentThread().getName() + " " + str(time.time())
def capture_video(self):
self.gopro.enable_camera_mode() # enable camera video mode
self.gopro.start_capture()
def get_photo(self):
self.gopro.get_photo() # This will download the latest photo to the image directory
self.imageID = self.gopro.get_image_id()
def begin_capture(self):
print "Init Threading.."
# Init all of the GoPro Settings to capture a photo
# self.gopro.enable_photo_mode()
# self.gopro.start_capture()
# self.gopro.stop_capture()
self.start_search_thread()
self.start_photo_thread()
def shutdown(self):
self.gopro.turn_off()
cap = CaptureImage()
t1 = threading.Thread(name="capture_thread", target=cap.start_photo_thread)
t2 = threading.Thread(name="cherokee_thread", target=cap.start_search_thread)
t1.start()
t2.start()
# Collect the images captured
os.chdir(cap.path)
img1 = cv2.imread(cap.targetID,0) # Target image from user.
os.chdir(cap.path + "/img")
img2 = cv2.imread(cap.imageID,0) # Current frame from GoPro
t1 = time.time()
# Create the SURF object for keypoints and descriptors
surf = cv2.xfeatures2d.SURF_create(400)
kp1, des1 = surf.detectAndCompute(img1,None) # Keypoints, Target
kp2, des2 = surf.detectAndCompute(img2,None) # Keypoints, Frame
# FLANN parameters needed for flann matcher
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=50) # higher the # = more accurate/slower
# Create and match descriptors
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in xrange(len(matches))]
# Lowe's ratio test to get the closest matching vectors
good = []
for i,(m,n) in enumerate(matches):
if m.distance < 0.6*n.distance:
matchesMask[i]=[1,0]
good.append(m) # Append the good match for count/comparison
# Adjust the drawing parameters
draw_params = dict(matchColor = (0,255,0),
singlePointColor = (255,255,255),
matchesMask = matchesMask,
flags = 0)
# Draw the image found and save to hits directory
hit = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)
print (str(time.time()-t1) + " s " +
"\nTotal Matches: "+
str(len(matches)))
os.chdir(cap.path + "/hits")
cv2.imwrite("hit.jpeg", hit)
|
Python
| 0.000001
|
@@ -1205,32 +1205,128 @@
r(time.time())%0D%0A
+ print %22Closed %22 + threading.currentThread().getName() + %22 %22 + str(time.time())%0D%0A
%0D%0A def start_
@@ -1537,16 +1537,112 @@
ime())%0D%0A
+ print %22Closed %22 + threading.currentThread().getName() + %22 %22 + str(time.time())%0D%0A
%0D%0A de
|
4797918eec0c43ada3f6eb9a63ec2f275eced253
|
Add spider for State Farm Agents; closes #519
|
locations/spiders/statefarm.py
|
locations/spiders/statefarm.py
|
Python
| 0
|
@@ -0,0 +1,2436 @@
+import json%0Aimport re%0Aimport scrapy%0A%0Afrom locations.items import GeojsonPointItem%0A%0A%0Aclass StateFarmSpider(scrapy.Spider):%0A name = %22statefarm%22%0A allowed_domains = %5B%22statefarm.com%22%5D%0A download_delay = 0.2%0A%0A start_urls = %5B%0A 'https://www.statefarm.com/agent/us',%0A %5D%0A%0A def parse_location(self, response):%0A%0A name = response.xpath('//*%5B@id=%22AgentNameLabelId%22%5D//span%5B@itemprop=%22name%22%5D/text()').extract_first()%0A if name:%0A name += ' - State Farm Insurance Agent'%0A%0A lat = response.xpath('//*%5B@id=%22agentOfficePrimaryLocLat%22%5D/@value').extract_first()%0A lon = response.xpath('//*%5B@id=%22agentOfficePrimaryLocLong%22%5D/@value').extract_first()%0A%0A properties = %7B%0A 'ref': %22_%22.join(response.url.split('/')%5B-3:%5D),%0A 'name': name,%0A 'addr_full': response.xpath('normalize-space(//div%5B@itemtype=%22http://schema.org/PostalAddress%22%5D//span%5B@id=%22locStreetContent_mainLocContent%22%5D/text())').extract_first(),%0A 'city': response.xpath('//div%5B@itemtype=%22http://schema.org/PostalAddress%22%5D/div%5B2%5D/span/span%5B1%5D/text()').extract_first().strip(', '),%0A 'state': response.xpath('//div%5B@itemtype=%22http://schema.org/PostalAddress%22%5D/div%5B2%5D/span/span%5B2%5D/text()').extract_first(),%0A 'postcode': response.xpath('//div%5B@itemtype=%22http://schema.org/PostalAddress%22%5D/div%5B2%5D/span/span%5B3%5D/text()').extract_first(),%0A 'phone': response.xpath('normalize-space(//span%5B@id=%22offNumber_mainLocContent%22%5D/span/text())').extract_first(),%0A 'lat': float(lat) if lat else None,%0A 'lon': float(lon) if lon else None,%0A 'website': response.url,%0A %7D%0A%0A yield GeojsonPointItem(**properties)%0A%0A def parse(self, response):%0A agents = response.xpath('//div%5Bcontains(@id, %22agent-details%22)%5D')%0A # agent_sites = response.xpath('//a%5Bcontains(text(), %22Visit agent site%22)%5D/@href').extract()%0A%0A if agents:%0A for agent in agents:%0A agent_site = agent.xpath('.//a%5Bcontains(text(), %22Visit agent site%22)%5D/@href').extract_first()%0A if not agent_site:%0A raise Exception('no agent site found')%0A yield scrapy.Request(response.urljoin(agent_site), callback=self.parse_location)%0A%0A else:%0A urls = response.xpath('//li/div/a/@href').extract()%0A%0A for url in urls:%0A yield scrapy.Request(response.urljoin(url))%0A%0A%0A
|
|
ebec02461bd341d49a499572d56bdef4520a650e
|
Add a missing migration
|
Instanssi/store/migrations/0007_storeitem_is_ticket.py
|
Instanssi/store/migrations/0007_storeitem_is_ticket.py
|
Python
| 0.00011
|
@@ -0,0 +1,596 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.4 on 2016-12-11 22:21%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('store', '0006_auto_20161209_0015'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='storeitem',%0A name='is_ticket',%0A field=models.BooleanField(default=False, help_text='Tuote on lipputuote, ja sit%C3%A4 voi k%C3%A4ytt%C3%A4%C3%A4 esim. kompomaatissa %C3%A4%C3%A4nestysoikeuden hankkimiseen', verbose_name='Tuote on lipputuote'),%0A ),%0A %5D%0A
|
|
cb01c58e0d11999331eb01e33bf970db8742f2f8
|
Create VertexGlyphFilter.py
|
src/Python/Filtering/VertexGlyphFilter.py
|
src/Python/Filtering/VertexGlyphFilter.py
|
Python
| 0.000001
|
@@ -0,0 +1,1285 @@
+#!/usr/bin/env python%0Aimport vtk%0A%0Adef main():%0A colors = vtk.vtkNamedColors()%0A points = vtk.vtkPoints()%0A points.InsertNextPoint(0,0,0)%0A points.InsertNextPoint(1,1,1)%0A points.InsertNextPoint(2,2,2)%0A %0A polydata = vtk.vtkPolyData()%0A polydata.SetPoints(points)%0A %0A vertexGlyphFilter = vtk.vtkVertexGlyphFilter()%0A vertexGlyphFilter.AddInputData(polydata)%0A vertexGlyphFilter.Update()%0A %0A # Create a mapper and actor%0A mapper = vtk.vtkPolyDataMapper()%0A mapper.SetInputConnection(vertexGlyphFilter.GetOutputPort())%0A %0A actor = vtk.vtkActor()%0A actor.SetMapper(mapper)%0A actor.GetProperty().SetPointSize(10)%0A actor.GetProperty().SetColor(colors.GetColor3d(%22Yellow%22))%0A %0A # Create a renderer, render window, and interactor%0A renderer = vtk.vtkRenderer()%0A renderWindow = vtk.vtkRenderWindow()%0A renderWindow.AddRenderer(renderer)%0A renderWindowInteractor = vtk.vtkRenderWindowInteractor()%0A renderWindowInteractor.SetRenderWindow(renderWindow)%0A %0A # Add the actor to the scene%0A renderer.AddActor(actor)%0A renderer.SetBackground(colors.GetColor3d(%22Green%22)) # Background color green%0A %0A # Render and interact%0A renderWindow.Render()%0A renderWindowInteractor.Start()%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
d2536ce3ded4fc2ea5648025f04efa093629b70f
|
test rapapasswords table
|
test/rapapasswordstest.py
|
test/rapapasswordstest.py
|
Python
| 0.000008
|
@@ -0,0 +1,1358 @@
+#!/usr/bin/python2.4%0A#%0A# Copyright (c) 2005-2007 rPath, Inc.%0A#%0A%0Aimport testsuite%0Atestsuite.setup()%0A%0Aimport os%0Aimport sys%0Aimport time%0Aimport tempfile%0A%0Aimport fixtures%0A%0Afrom mint import rapapasswords%0A%0Aclass rAPAPasswordsTest(fixtures.FixturedUnitTest):%0A @fixtures.fixture(%22Full%22)%0A def testPasswords(self, db, data):%0A client = self.getClient('admin')%0A self.failIf(client.getrAPAPassword('foo.bar.baz', 'role'), %0A 'Value returned when not present.')%0A client.setrAPAPassword('blah.bar.baz', 'foo_bar_baz', 'passwd', 'role')%0A client.setrAPAPassword('foo.bar.baz', 'foo_bar_baz', 'passwd', 'role')%0A client.setrAPAPassword('foo.bar.baz', 'foo_bar_baz2', 'passwd2', 'role2')%0A user, passwd = client.getrAPAPassword('foo.bar.baz', 'role') %0A self.failIf(user != 'foo_bar_baz' or passwd != 'passwd', %22Incorrect user returned%22)%0A user, passwd = client.getrAPAPassword('foo.bar.baz', 'role2') %0A self.failIf(user != 'foo_bar_baz2' or passwd != 'passwd2', %22Incorrect user returned%22)%0A%0A client.setrAPAPassword('foo.bar.baz', 'foo_bar_baz', 'passwd_changed', 'role')%0A user, passwd = client.getrAPAPassword('foo.bar.baz', 'role') %0A self.failIf(user != 'foo_bar_baz' or passwd != 'passwd_changed', %22Password not updated.%22)%0Aif __name__ == %22__main__%22:%0A testsuite.main()%0A
|
|
45e86e49e845ef25df6e1db3bcb336809ffb5f5f
|
Disable IPv6 on wireless (Extension Attribute for Casper)
|
ipv6_Checker.py
|
ipv6_Checker.py
|
Python
| 0
|
@@ -0,0 +1,1086 @@
+#!/usr/bin/python%0A%0A#Copyright 2014 Quam Sodji%0A%0Aimport subprocess%0A%0Adef getinfo(hardware): #Return network info on select interface%0A info = subprocess.check_output(%5B%22networksetup%22, %22-getinfo%22, hardware%5D)%0A return info%0A%0Awireless = %5B%22Airport%22, %22Wi-Fi%22%5D #The two type of interfaces that refers to wireless%0A%0Alist_network = subprocess.check_output(%5B%22networksetup%22, %22-listallnetworkservices%22%5D)%0Alist_network = list_network.split('%5Cn')%0A%0Afor device in wireless:%0A if device in list_network: %0A response = getinfo(device)%0A response_check = response.split(%22%5Cn%22)%0A if %22IPv6: Off%22 not in response_check:%0A check = subprocess.check_output(%5B%22networksetup%22, %22-setv6off%22, device%5D)%0A Status = %22Off%22%0A else:%0A for setting in response_check:%0A if setting.startswith(%22IPv6:%22):%0A if setting != %22IPv6: Off%22:%0A Status = setting%0A else:%0A Status = %22Off%22%0A else:%0A Status = %22No wireless interfaces configured%22%0A continue%0A %0A%0Aprint %22%3Cresult%3E%25s%3C/result%3E%22%25Status%0A
|
|
c68872453a0c4a28e31d5ee38faf11d8a0486b62
|
add drone_setup.py
|
drone_setup.py
|
drone_setup.py
|
Python
| 0.000001
|
@@ -0,0 +1,1945 @@
+import requests%0Aimport json%0A%0A# Open user-account.json and create a json object containing%0A# user credential fields.%0Awith open('user-account.json', %22r%22) as jsonFile:%0A jsonUser = json.load(jsonFile)%0A jsonFile.close()%0A%0A# Assign user credentials to variables%0AUSER_EMAIL = jsonUser%5B0%5D%5B'email'%5D%0AUSER_API_KEY = jsonUser%5B0%5D%5B'api_key'%5D%0ADRONE_NAME = %22%22%0A%0A# Create headers object for API requests%0Aheaders = %7B%0A 'user-email': USER_EMAIL,%0A 'user-key': USER_API_KEY,%0A 'Content-Type': 'application/json'%0A%7D%0A%0A# This request will create a virtual drone on your account with a random%0A# name. The server should respond with a JSON formatted Drone%0A# object that contains the name of the new drone.%0Aprint %22%5CnCreate new virtual drone...%5Cn%22%0A%0Aresponse = requests.post('http://api.dronesmith.io/api/drone', headers=headers)%0Aobj = json.loads(response.text)%0Aprint json.dumps(obj, indent=2, sort_keys=True)%0A%0A# Update DRONE_NAME%0ADRONE_NAME = obj%5B'name'%5D%0A%0A# Update drone_name field in jsonUser object%0AjsonUser%5B0%5D%5B%22drone_name%22%5D = DRONE_NAME%0A%0A# Write jsonUser object to user-account.json%0Awith open('user-account.json', %22w%22) as jsonFile:%0A jsonFile.write(json.dumps(jsonUser,indent=2, sort_keys=True))%0A jsonFile.close()%0A%0A# Add a sensor named radiation_sensor to drone and initialize its intensity field%0Aprint %22%5CnAdd radiation sensor to drone...%5Cn%22%0Aresponse = requests.post('http://api.dronesmith.io/api/drone/' + DRONE_NAME %5C%0A + '/sensor/radiation_sensor', json=%7B%0A %22intensity%22: 0%0A %7D, headers=headers)%0AjsonText = json.loads(response.text)%0Aprint json.dumps(jsonText, indent=2, sort_keys=True)%0A%0A# Get drone object to make sure radiation sensor was properly%0A# added. There should be a sensors field containing radiation_sensor object.%0Aprint %22%5CnGet Drone object..%5Cn%22%0Aresponse = requests.get('http://api.dronesmith.io/api/drone/' %5C%0A+ DRONE_NAME, headers=headers)%0AjsonText = json.loads(response.text)%0Aprint json.dumps(jsonText, indent=2, sort_keys=True)%0A
|
|
a5ff02a696c553dbd4038e1cb1c0fd0668b30006
|
Create ets2look.py
|
FreePIE/ets2look.py
|
FreePIE/ets2look.py
|
Python
| 0
|
@@ -0,0 +1,226 @@
+hSen = 750%0AvSen = 200%0A%0Aif starting:%0A lastX = 0%0A lastY = 0%0A%0AthisX = xbox360%5B0%5D.rightStickX * hSen%0AthisY = xbox360%5B0%5D.rightStickY * vSen%0A%0Amouse.deltaX = thisX - lastX%0Amouse.deltaY = lastY - thisY%0A%0AlastX = thisX%0AlastY = thisY%0A%0A
|
|
7bea7133d8b069e784b8e35e045a6411cac8882c
|
add movielens (#1027)
|
python/dllib/src/bigdl/dllib/feature/dataset/movielens.py
|
python/dllib/src/bigdl/dllib/feature/dataset/movielens.py
|
Python
| 0
|
@@ -0,0 +1,1670 @@
+#%0A# Copyright 2016 The BigDL Authors.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A#%0A%0Aimport os%0A%0Aimport zipfile%0Aimport numpy as np%0A%0Afrom bigdl.dataset import base%0A%0ASOURCE_URL = 'http://files.grouplens.org/datasets/movielens/'%0A%0Adef read_data_sets(data_dir):%0A %22%22%22%0A Parse or download movielens 1m data if train_dir is empty.%0A%0A :param data_dir: The directory storing the movielens data%0A :return: a 2D numpy array with user index and item index in each row %0A %22%22%22%0A%0A WHOLE_DATA = 'ml-1m.zip'%0A local_file = base.maybe_download(WHOLE_DATA, data_dir, SOURCE_URL + WHOLE_DATA)%0A zip_ref = zipfile.ZipFile(local_file, 'r')%0A extracted_to = os.path.join(data_dir, %22ml-1m%22)%0A if not os.path.exists(extracted_to):%0A print(%22Extracting %25s to %25s%22 %25 (local_file, data_dir))%0A zip_ref.extractall(data_dir)%0A zip_ref.close()%0A rating_files = os.path.join(extracted_to,%22ratings.dat%22)%0A rating_list = %5Bi.strip().split(%22::%22)%5B:2%5D for i in open(rating_files,%22r%22).readlines()%5D %0A movielens_data = np.array(rating_list).astype(int)%0A return movielens_data %0A%0Aif __name__ == %22__main__%22:%0A movielens_data = read_data_sets(%22/tmp/movielens/%22)%0A
|
|
46b7dd2c389d2bf020e2c413518e0f960fa28ba4
|
Add test for current_locale expression
|
tests/test_expressions.py
|
tests/test_expressions.py
|
Python
| 0.000003
|
@@ -0,0 +1,185 @@
+from sqlalchemy_i18n.expressions import current_locale%0A%0A%0Aclass TestCurrentLocaleExpression(object):%0A def test_render(self):%0A assert str(current_locale()) == ':current_locale'%0A
|
|
633f5ad8064395ec3805e38ea1f73a9aa7475878
|
Use the caller's unpickler as well
|
jsonpickle/_handlers.py
|
jsonpickle/_handlers.py
|
import datetime
import jsonpickle
class DatetimeHandler(jsonpickle.handlers.BaseHandler):
"""
Datetime objects use __reduce__, and they generate binary strings encoding
the payload. This handler encodes that payload to reconstruct the
object.
"""
def flatten(self, obj, data):
pickler = self._base
if not pickler.unpicklable:
return unicode(obj)
cls, args = obj.__reduce__()
args = [args[0].encode('base64')] + map(pickler.flatten, args[1:])
data['__reduce__'] = (pickler.flatten(cls), args)
return data
def restore(self, obj):
cls, args = obj['__reduce__']
value = args[0].decode('base64')
unpickler = jsonpickle.Unpickler()
cls = unpickler.restore(cls)
params = map(unpickler.restore, args[1:])
params = (value,) + tuple(params)
return cls.__new__(cls, *params)
class SimpleReduceHandler(jsonpickle.handlers.BaseHandler):
"""
Follow the __reduce__ protocol to pickle an object. As long as the factory
and its arguments are pickleable, this should pickle any object that
implements the reduce protocol.
"""
def flatten(self, obj, data):
pickler = self._base
if not pickler.unpicklable:
return unicode(obj)
data['__reduce__'] = map(pickler.flatten, obj.__reduce__())
return data
def restore(self, obj):
unpickler = jsonpickle.Unpickler()
cls, args = map(unpickler.restore, obj['__reduce__'])
return cls.__new__(cls, *args)
jsonpickle.handlers.registry.register(datetime.datetime, DatetimeHandler)
jsonpickle.handlers.registry.register(datetime.date, DatetimeHandler)
jsonpickle.handlers.registry.register(datetime.time, DatetimeHandler)
jsonpickle.handlers.registry.register(datetime.timedelta, SimpleReduceHandler)
|
Python
| 0.000001
|
@@ -711,38 +711,26 @@
ckler =
-jsonpickle.Unpickler()
+self._base
%0A
@@ -1429,30 +1429,18 @@
r =
-jsonpickle.Unpickler()
+self._base
%0A
|
f1f654d823ee8454b53f27372bbaab85f4d01631
|
add analyzer
|
performancetest/rec-analyze.py
|
performancetest/rec-analyze.py
|
Python
| 0.000001
|
@@ -0,0 +1,2025 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0Afrom __future__ import division%0A%0Aimport re%0Aimport sys%0Aimport os%0Aimport json%0Aimport numpy as np%0A%0Adef f(n):%0A return int(round(n))%0A %0A%0Adef report(name, seq):%0A print '===', name, '%5Bms%5D ==='%0A print %22mean:%22, f(np.mean(seq)), %22%5Ctstd dev:%22, f(np.std(seq))%0A print %2295%25:%22, f(np.percentile(seq, 95)), %22%5Ct99%25:%22, f(np.percentile(seq, 99))%0A print %22min:%22, f(min(seq)), %22%5Ctmax:%22, f(max(seq))%0A%0A%0A%0Aif __name__ == %22__main__%22:%0A errors = dict()%0A%0A total_times = %5B%5D%0A service_times = %5B%5D%0A overhead_times = %5B%5D%0A %0A response = None%0A curltime_ms = None%0A totaltime = None%0A externaltime = None%0A n = 0%0A for line in sys.stdin:%0A if line.startswith('%7B'):%0A # assume json response%0A response = json.loads(line)%0A #totaltime = response%5B%22msecs%22%5D%0A externaltime = response%5B%22msecs%22%5D%0A continue%0A match = re.search(r'CURL HTTPCODE=(%5Cd+) SECS=(%5Cd+%5B.,%5D%5Cd+)', line)%0A if match:%0A n+=1%0A print %22n:%22, n%0A (httpcode, time) = match.group(1,2) %0A if not httpcode == %22200%22:%0A if not httpcode in errors:%0A errors%5B httpcode %5D = 1%0A else:%0A errors%5B httpcode %5D += 1%0A else:%0A time = time.replace(',','.')%0A curltime_ms = float(time)*1000%0A print %22CURL:%22, curltime_ms%0A print %22TOTAL REC:%22, externaltime%0A overhead = curltime_ms - externaltime%0A print %22TOTAL OVERHEAD:%22, overhead%0A total_times.append(curltime_ms)%0A overhead_times.append(overhead)%0A service_times.append(externaltime)%0A continue%0A if errors:%0A print %22ERRORS:%22, errors%0A else:%0A print %22%5Bno errors%5D%22%0A print %22samples:%22, len(total_times)%0A report(%22total time%22, total_times)%0A report(%22service time%22, service_times)%0A report(%22overhead time%22, overhead_times)%0A%0A %0A
|
|
e7809f307610e98cb8356110eec7e8c1f41e9d46
|
Backup script.
|
management/backup.py
|
management/backup.py
|
Python
| 0
|
@@ -0,0 +1,979 @@
+#!/usr/bin/env python%0Aimport sys%0Aimport os%0Aimport glob%0Aimport shutil%0Aimport mc_bin_client%0A%0Adef usage():%0A print %3E%3E sys.stderr, %22%22%22%0AUsage: %25s %3Cdest_dir%3E%0A%22%22%22 %25 os.path.basename(sys.argv%5B0%5D)%0A sys.exit(1)%0A%0Adef main():%0A if len(sys.argv) != 2:%0A usage()%0A%0A cmd_dir = os.path.dirname(sys.argv%5B0%5D)%0A dest_dir = sys.argv%5B1%5D%0A flushctl = os.path.join(cmd_dir, 'flushctl.py')%0A%0A mc = mc_bin_client.MemcachedClient('127.0.0.1')%0A db_path = mc.stats()%5B'ep_dbname'%5D%0A db_files = glob.glob('%25s*' %25 db_path)%0A%0A print 'Pausing persistence... ',%0A os.system('%22%25s%22 127.0.0.1:11211 stop' %25 flushctl)%0A print 'paused.'%0A try:%0A for fn in db_files:%0A dest_fn = os.path.join(dest_dir, os.path.basename(fn))%0A print 'Copying %25s to %25s' %25 (fn, dest_fn)%0A shutil.copyfile(fn, dest_fn)%0A finally:%0A print 'Unpausing persistence.'%0A os.system('%22%25s%22 127.0.0.1:11211 start' %25 flushctl)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
2945b68d5a8b6505f1a8516dd8b5f7d4b85aac5a
|
Add tests for storeslice
|
numba/tests/test_storeslice.py
|
numba/tests/test_storeslice.py
|
Python
| 0
|
@@ -0,0 +1,920 @@
+from __future__ import print_function%0Aimport numba.unittest_support as unittest%0Aimport numpy as np%0Afrom numba.compiler import compile_isolated, Flags%0A%0A%0Adef usecase(obs, nPoints, B, sigB, A, sigA, M, sigM):%0A center = nPoints / 2%0A print(center)%0A obs%5B0:center%5D = np.arange(center)%0A obs%5Bcenter%5D = 321%0A obs%5B(center + 1):%5D = np.arange(nPoints - center - 1)%0A%0A%0Aclass TestStoreSlice(unittest.TestCase):%0A def test_usecase(self):%0A n = 10%0A obs_got = np.zeros(n)%0A obs_expected = obs_got.copy()%0A%0A flags = Flags()%0A flags.set(%22enable_pyobject%22)%0A cres = compile_isolated(usecase, (), flags=flags)%0A cres.entry_point(obs_got, n, 10.0, 1.0, 2.0, 3.0, 4.0, 5.0)%0A usecase(obs_expected, n, 10.0, 1.0, 2.0, 3.0, 4.0, 5.0)%0A%0A print(obs_got, obs_expected)%0A self.assertTrue(np.allclose(obs_got, obs_expected))%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A%0A
|
|
c468f49e50b7a55995ef1fc447c04993c2171ca2
|
Add lc0242_valid_anagram.py
|
lc0242_valid_anagram.py
|
lc0242_valid_anagram.py
|
Python
| 0.007833
|
@@ -0,0 +1,683 @@
+%22%22%22Leetcode 242. Valid Anagram%0AEasy%0A%0AURL: https://leetcode.com/problems/valid-anagram/%0A%0AGiven two strings s and t,%0Awrite a function to determine if t is an anagram of s.%0A%0AExample 1:%0AInput: s = %22anagram%22, t = %22nagaram%22%0AOutput: true%0A%0AExample 2:%0AInput: s = %22rat%22, t = %22car%22%0AOutput: false%0A%0ANote:%0AYou may assume the string contains only lowercase alphabets.%0A%0AFollow up:%0AWhat if the inputs contain unicode characters? How would you adapt your solution to such case?%0A%22%22%22%0A%0Aclass Solution(object):%0A def isAnagram(self, s, t):%0A %22%22%22%0A :type s: str%0A :type t: str%0A :rtype: bool%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
dbb812621c58d92bc3c2aed6135ddb9b91cd9181
|
implement bucket compaction test
|
perfrunner/tests/compaction.py
|
perfrunner/tests/compaction.py
|
Python
| 0
|
@@ -0,0 +1,699 @@
+from perfrunner.tests import TargetIterator%0Afrom perfrunner.tests.kv import KVTest%0A%0A%0Aclass DbCompactionTest(KVTest):%0A%0A def compact(self):%0A self.reporter.start()%0A for target_settings in TargetIterator(self.cluster_spec,%0A self.test_config):%0A self.rest.trigger_bucket_compaction(target_settings.node,%0A target_settings.bucket)%0A self.monitor.monitor_bucket_fragmentation(target_settings)%0A self.reporter.finish('Bucket compaction')%0A%0A def run(self):%0A self._run_load_phase() # initial load%0A self._run_load_phase() # extra mutations for fragmentation%0A
|
|
c2982060ff7ffbbf784a37675f2caec381e9aa48
|
Create quicksort.py
|
Python/quicksort.py
|
Python/quicksort.py
|
Python
| 0.000004
|
@@ -0,0 +1,569 @@
+def quickSort(arr):%0A less = %5B%5D%0A pivotList = %5B%5D%0A more = %5B%5D%0A if len(arr) %3C= 1:%0A return arr%0A else:%0A pivot = arr%5B0%5D%0A for i in arr:%0A if i %3C pivot:%0A less.append(i)%0A elif i %3E pivot:%0A more.append(i)%0A else:%0A pivotList.append(i)%0A less = quickSort(less)%0A more = quickSort(more)%0A return less + pivotList + more%0A%0Aif __name__ == %22__main__%22:%0A arr=%5Bint(x) for x in input(%22Enter the array elements : %22).split()%5D%0A a = quickSort(arr)%0A print(a)%0A
|
|
9bf8baec1a0a72d33ad6021bf90488e1163c94de
|
make IPython tab-complete work on docmuments
|
metadatastore/doc.py
|
metadatastore/doc.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import collections
from functools import reduce
_HTML_TEMPLATE = """
<table>
{% for key, value in document | dictsort recursive %}
<tr>
<th> {{ key }} </th>
<td>
{% if value.items %}
<table>
{{ loop(value | dictsort) }}
</table>
{% else %}
{% if key == 'time' %}
{{ value | human_time }}
{% else %}
{{ value }}
{% endif %}
{% endif %}
</td>
</tr>
{% endfor %}
</table>
"""
class DocumentIsReadOnly(Exception):
pass
class Document(dict):
def __init__(self, name, *args, **kwargs):
super(Document, self).__init__(*args, **kwargs)
super(Document, self).__setitem__('_name', name)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError()
def __setattr__(self, key, value):
raise DocumentIsReadOnly()
def __setitem__(self, key, value):
raise DocumentIsReadOnly()
def __delattr__(self, key):
raise DocumentIsReadOnly()
def __delitem__(self, key):
raise DocumentIsReadOnly()
def update(self, *args, **kwargs):
raise DocumentIsReadOnly()
def pop(self, key):
raise DocumentIsReadOnly()
def __iter__(self):
return (k for k in super(Document, self).__iter__()
if not (isinstance(k, six.string_types) and k.startswith('_')))
def items(self):
return ((k, v) for k, v in super(Document, self).items()
if not (isinstance(k, six.string_types) and k.startswith('_')))
def values(self):
return (v for k, v in super(Document, self).items()
if not (isinstance(k, six.string_types) and k.startswith('_')))
def keys(self):
return (k for k in super(Document, self).keys()
if not (isinstance(k, six.string_types) and k.startswith('_')))
def __len__(self):
return len(list(self.keys()))
def _repr_html_(self):
import jinja2
env = jinja2.Environment()
env.filters['human_time'] = pretty_print_time
template = env.from_string(_HTML_TEMPLATE)
return template.render(document=self)
def __str__(self):
try:
return vstr(self)
except ImportError:
return super(self, Document).__str__()
def to_name_dict_pair(self):
"""Convert to (name, dict) pair
This can be used to safely mutate a Document::
name, dd = doc.to_name_dict_pair()
dd['new_key'] = 'aardvark'
dd['run_start'] = dd['run_start']['uid']
new_doc = Document(name, dd)
Returns
-------
name : str
Name of Document
ret : dict
Data payload of Document
"""
ret = dict(self)
name = ret.pop('_name')
return name, ret
def pretty_print_time(timestamp):
import humanize
import time
import datetime
dt = datetime.datetime.fromtimestamp(timestamp).isoformat()
ago = humanize.naturaltime(time.time() - timestamp)
return '{ago} ({date})'.format(ago=ago, date=dt)
def _format_dict(value, name_width, value_width, name, tabs=0):
ret = ''
for k, v in six.iteritems(value):
if isinstance(v, collections.Mapping):
ret += _format_dict(v, name_width, value_width, k, tabs=tabs+1)
else:
ret += ("\n%s%-{}s: %-{}s".format(
name_width, value_width) % (' '*tabs, k[:16], v))
return ret
def _format_data_keys_dict(data_keys_dict):
from prettytable import PrettyTable
fields = reduce(set.union,
(set(v) for v in six.itervalues(data_keys_dict)))
fields = sorted(list(fields))
table = PrettyTable(["data keys"] + list(fields))
table.align["data keys"] = 'l'
table.padding_width = 1
for data_key, key_dict in sorted(data_keys_dict.items()):
row = [data_key]
for fld in fields:
row.append(key_dict.get(fld, ''))
table.add_row(row)
return table
def vstr(doc, indent=0):
"""Recursive document walker and formatter
Parameters
----------_
name : str, optional
Document header name. Defaults to ``doc._name``
indent : int, optional
The indentation level. Defaults to starting at 0 and adding one tab
per recursion level
"""
headings = [
# characters recommended as headers by ReST docs
'=', '-', '`', ':', '.', "'", '"', '~', '^', '_', '*', '+', '#',
# all other valid header characters according to ReST docs
'!', '$', '%', '&', '(', ')', ',', '/', ';', '<', '>', '?', '@',
'[', '\\', ']', '{', '|', '}'
]
name = doc['_name']
ret = "\n%s\n%s" % (name, headings[indent]*len(name))
documents = []
name_width = 16
value_width = 40
for name, value in sorted(doc.items()):
if name == 'descriptors':
# this case is to deal with Headers from databroker
for val in value:
documents.append((name, val))
elif name == 'data_keys':
ret += "\n%s" % str(_format_data_keys_dict(value))
elif isinstance(value, collections.Mapping):
if '_name' in value:
documents.append((name, value))
else:
# format dicts reasonably
ret += "\n%-{}s:".format(name_width, value_width) % (name)
ret += _format_dict(value, name_width, value_width,
name, tabs=1)
else:
ret += ("\n%-{}s: %-{}s".format(name_width, value_width) %
(name[:16], value))
for name, value in documents:
ret += "\n%s" % (vstr(value, indent+1))
# ret += "\n"
ret = ret.split('\n')
ret = ["%s%s" % (' '*indent, line) for line in ret]
ret = "\n".join(ret)
return ret
def ref_doc_to_uid(doc, field):
"""Convert a reference doc to a uid
Given a Document, replace the given field (which must contain a
Document) with the uid of that Document.
Returns a new instance with the updated values
Parameters
----------
doc : Document
The document to replace an entry in
field : str
The field to replace with the uid of it's contents
"""
name, doc = doc.to_name_dict_pair()
doc[field] = doc[field]['uid']
return Document(name, doc)
|
Python
| 0.000001
|
@@ -842,141 +842,67 @@
me)%0A
-%0A
-def __getattr__(self, key):%0A try:%0A return self%5Bkey%5D%0A except KeyError:%0A raise AttributeError(
+ super(Document, self).__setattr__('__dict__', self
)%0A%0A
|
592f9901f9125534f59efc2cb36bb4fb2bab351e
|
Fix typo (#8754)
|
homeassistant/scripts/__init__.py
|
homeassistant/scripts/__init__.py
|
"""Home Assistant command line scripts."""
import argparse
import importlib
import logging
import os
import sys
from typing import List
from homeassistant.bootstrap import mount_local_lib_path
from homeassistant.config import get_default_config_dir
from homeassistant.const import CONSTRAINT_FILE
from homeassistant.util.package import (
install_package, running_under_virtualenv)
def run(args: List) -> int:
"""Run a script."""
scripts = []
path = os.path.dirname(__file__)
for fil in os.listdir(path):
if fil == '__pycache__':
continue
elif os.path.isdir(os.path.join(path, fil)):
scripts.append(fil)
elif fil != '__init__.py' and fil.endswith('.py'):
scripts.append(fil[:-3])
if not args:
print('Please specify a script to run.')
print('Available scripts:', ', '.join(scripts))
return 1
if args[0] not in scripts:
print('Invalid script specified.')
print('Available scripts:', ', '.join(scripts))
return 1
script = importlib.import_module('homeassistant.scripts.' + args[0])
config_dir = extract_config_dir()
deps_dir = mount_local_lib_path(config_dir)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
for req in getattr(script, 'REQUIREMENTS', []):
if running_under_virtualenv():
returncode = install_package(req, constraints=os.path.join(
os.path.dirname(__file__), os.pardir, CONSTRAINT_FILE))
else:
returncode = install_package(
req, target=deps_dir, constraints=os.path.join(
os.path.dirname(__file__), os.pardir, CONSTRAINT_FILE))
if not returncode:
print('Aborting scipt, could not install dependency', req)
return 1
return script.run(args[1:]) # type: ignore
def extract_config_dir(args=None) -> str:
"""Extract the config dir from the arguments or get the default."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-c', '--config', default=None)
args = parser.parse_known_args(args)[0]
return (os.path.join(os.getcwd(), args.config) if args.config
else get_default_config_dir())
|
Python
| 0.001331
|
@@ -1757,16 +1757,17 @@
rting sc
+r
ipt, cou
|
0aaed7764d743afe46af503fe5938fa718fe3abc
|
Set up contextmanager for db cals
|
teamworkApp/lib/dbCalls.py
|
teamworkApp/lib/dbCalls.py
|
Python
| 0
|
@@ -0,0 +1,840 @@
+# muddersOnRails()%0A# Sara McAllister November 5, 2-17%0A# Last updated: 11-5-2017%0A%0A# library for SQLite database calls for teamwork analysis app%0A%0Aimport contextlib%0Aimport sqlite3%0A%0ADB = 'db/development.sqlite3'%0A%0Adef connect(sqlite_file):%0A %22%22%22 Make connection to an SQLite database file %22%22%22%0A conn = sqlite3.connect(sqlite_file)%0A c = conn.cursor()%0A return conn, c%0A%0Adef close(conn):%0A %22%22%22 Commit changes and close connection to the database %22%22%22%0A conn.commit()%0A conn.close()%0A%0A@contextlib.contextmanager%0Adef dbconnect(sqlite_file=DB):%0A conn, cursor = connect(sqlite_file)%0A try:%0A yield cursor%0A finally:%0A close(conn)%0A%0Adef getAllStyles():%0A %22%22%22Get all style entries in db ordered based on entry in db%22%22%22%0A with dbconnect() as cursor:%0A scores = cursor.execute('SELECT * FROM styles').fetchall()%0A return scores
|
|
6e5074cf969f8667e633ab2fa3373e83402e7610
|
Add DigitalOcean
|
agithub/DigitalOcean.py
|
agithub/DigitalOcean.py
|
Python
| 0.000002
|
@@ -0,0 +1,727 @@
+# Copyright 2012-2016 Jonathan Paugh and contributors%0A# See COPYING for license details%0Afrom base import *%0A%0Aclass DigitalOcean(API):%0A '''%0A Digital Ocean API%0A '''%0A def __init__(self, token=None, *args, **kwargs):%0A props = ConnectionProperties(%0A api_url = 'api.digitalocean.com',%0A url_prefix = '/v2',%0A secure_http = True,%0A extra_headers = %7B%0A 'authorization' : self.generateAuthHeader(token)%0A %7D)%0A self.setClient(Client(*args, **kwargs))%0A self.setConnectionProperties(props)%0A%0A def generateAuthHeader(self, token):%0A if token is not None:%0A return %22Bearer %22 + token%0A return None%0A
|
|
18aeea496175cb73ccf0d9f164359f75f854b512
|
add background_helper
|
portality/background_helper.py
|
portality/background_helper.py
|
Python
| 0.000001
|
@@ -0,0 +1,958 @@
+%22%22%22 collections of wrapper function for helping you to create BackgroundTask%0A%0A%22%22%22%0Afrom typing import Callable, Type%0A%0Afrom portality import models%0Afrom portality.background import BackgroundApi, BackgroundTask%0Afrom portality.core import app%0A%0A%0Adef execute_by_job_id(job_id,%0A task_factory: Callable%5B%5Bmodels.BackgroundJob%5D, BackgroundTask%5D):%0A %22%22%22 Common way to execute BackgroundTask by job_id%0A %22%22%22%0A job = models.BackgroundJob.pull(job_id)%0A task = task_factory(job)%0A BackgroundApi.execute(task)%0A%0A%0Adef submit_task_basic(background_task: Type%5BBackgroundTask%5D):%0A %22%22%22 Common way to submit task by BackgroundTask Class%0A%0A %22%22%22%0A user = app.config.get(%22SYSTEM_USERNAME%22)%0A job = background_task.prepare(user)%0A background_task.submit(job)%0A%0A%0Adef create_job(username, action):%0A %22%22%22 Common way to create BackgroundJob%0A %22%22%22%0A job = models.BackgroundJob()%0A job.user = username%0A job.action = action%0A return job%0A
|
|
caef73c6d7d9853e32f5a75b321f515f3c138b6d
|
Create nzbget.py
|
comandarr/nzbget.py
|
comandarr/nzbget.py
|
Python
| 0.000004
|
@@ -0,0 +1 @@
+%0A
|
|
853972cac73c3837c37a5682c2057a0aab500961
|
Add tests for VAE framework
|
pylearn2/models/tests/test_vae.py
|
pylearn2/models/tests/test_vae.py
|
Python
| 0
|
@@ -0,0 +1,2935 @@
+import numpy%0Aimport theano%0Aimport theano.tensor as T%0Afrom pylearn2.models.mlp import MLP%0Afrom pylearn2.models.mlp import Linear, ConvRectifiedLinear%0Afrom pylearn2.models.vae import VAE%0Afrom pylearn2.models.vae.visible import BinaryVisible%0Afrom pylearn2.models.vae.latent import DiagonalGaussianPrior%0Afrom pylearn2.space import Conv2DSpace%0A%0A%0Adef test_one_sample_allowed():%0A %22%22%22%0A VAE allows one sample per data point%0A %22%22%22%0A encoding_model = MLP(nvis=10, layers=%5BLinear(layer_name='h', dim=10,%0A irange=0.01)%5D)%0A decoding_model = MLP(nvis=5, layers=%5BLinear(layer_name='h', dim=10,%0A irange=0.01)%5D)%0A visible = BinaryVisible(decoding_model=decoding_model)%0A latent = DiagonalGaussianPrior(encoding_model=encoding_model,%0A num_samples=1)%0A vae = VAE(nvis=10, visible=visible, latent=latent, nhid=5)%0A X = T.matrix('X')%0A lower_bound = vae.log_likelihood_lower_bound(X)%0A f = theano.function(inputs=%5BX%5D, outputs=lower_bound)%0A f(numpy.random.uniform(size=(10, 10)))%0A%0A%0Adef test_multiple_samples_allowed():%0A %22%22%22%0A VAE allows multiple samples per data point%0A %22%22%22%0A encoding_model = MLP(nvis=10, layers=%5BLinear(layer_name='h', dim=10,%0A irange=0.01)%5D)%0A decoding_model = MLP(nvis=5, layers=%5BLinear(layer_name='h', dim=10,%0A irange=0.01)%5D)%0A visible = BinaryVisible(decoding_model=decoding_model)%0A latent = DiagonalGaussianPrior(encoding_model=encoding_model,%0A num_samples=10)%0A vae = VAE(nvis=10, visible=visible, latent=latent, nhid=5)%0A X = T.matrix('X')%0A lower_bound = vae.log_likelihood_lower_bound(X)%0A f = theano.function(inputs=%5BX%5D, outputs=lower_bound)%0A f(numpy.random.uniform(size=(10, 10)))%0A%0A%0Adef test_convolutional_compatible():%0A %22%22%22%0A VAE allows convolutional encoding networks%0A %22%22%22%0A encoding_model = MLP(%0A input_space=Conv2DSpace(shape=%5B4, 4%5D, num_channels=1),%0A layers=%5BConvRectifiedLinear(%0A layer_name='h',%0A output_channels=2,%0A kernel_shape=%5B2, 2%5D,%0A kernel_stride=%5B1, 1%5D,%0A pool_shape=%5B1, 1%5D,%0A pool_stride=%5B1, 1%5D,%0A pool_type='max',%0A irange=0.01%0A )%5D%0A )%0A decoding_model = MLP(nvis=5, layers=%5BLinear(layer_name='h', dim=16,%0A irange=0.01)%5D)%0A visible = BinaryVisible(decoding_model=decoding_model)%0A latent = DiagonalGaussianPrior(encoding_model=encoding_model,%0A num_samples=10)%0A vae = VAE(nvis=16, visible=visible, latent=latent, nhid=5)%0A X = T.matrix('X')%0A lower_bound = vae.log_likelihood_lower_bound(X)%0A f = theano.function(inputs=%5BX%5D, outputs=lower_bound)%0A f(numpy.random.uniform(size=(10, 16)))%0A
|
|
bf1e9434afc03a21cab5b274401a755c3b84196c
|
add event.brochure migration
|
migrations/mezzanine_agenda/0014_event_brochure.py
|
migrations/mezzanine_agenda/0014_event_brochure.py
|
Python
| 0
|
@@ -0,0 +1,533 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.2 on 2016-05-10 13:56%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0Aimport mezzanine.core.fields%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('mezzanine_agenda', '0013_auto_20160510_1542'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='event',%0A name='brochure',%0A field=mezzanine.core.fields.FileField(blank=True, max_length=1024, verbose_name='brochure'),%0A ),%0A %5D%0A
|
|
d0afa54fa01a2981c10f0bbdbc0f5eab4b5ad710
|
test that test fullscreen/resize with 3d actions
|
test/test_3d_fullscreen.py
|
test/test_3d_fullscreen.py
|
Python
| 0
|
@@ -0,0 +1,1225 @@
+# This code is so you can run the samples without installing the package%0Aimport sys%0Aimport os%0Asys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))%0A#%0A%0Aimport pyglet%0Aimport cocos%0Afrom cocos.director import director%0Afrom cocos.actions import *%0Afrom cocos.layer import *%0A%0A%0A%0Aclass BackgroundLayer( cocos.layer.Layer ):%0A def __init__(self):%0A super( BackgroundLayer, self ).__init__()%0A self.img = pyglet.resource.image('background_image.png')%0A%0A def on_draw( self ):%0A self.img.blit(0,0)%0A%0Adef toggle_fullscreen():%0A director.window.set_fullscreen( not director.window.fullscreen )%0A%0Aif __name__ == %22__main__%22:%0A director.init( resizable=True )%0A director.set_depth_test()%0A%0A main_scene = cocos.scene.Scene()%0A%0A main_scene.add( BackgroundLayer(), z=0 )%0A%0A # set a 3d grid with a grid3d action%0A e = WavesTiles3D( amplitude=60, waves=2, grid=(32,24), duration=3)%0A f = ShuffleTiles( duration=3, grid=(32,24) )%0A%0A main_scene.do( e + %5C%0A CallFunc( toggle_fullscreen ) + %5C%0A Reverse(e) + %5C%0A CallFunc(toggle_fullscreen) + %5C%0A f + %5C%0A CallFunc(toggle_fullscreen) + %5C%0A Reverse(f) + %5C%0A StopGrid() %5C%0A )%0A%0A director.run (main_scene)%0A
|
|
2948be3af67b7ec124942654dc7f734eec346f55
|
Check that the c_api module is not destroyed
|
tensorflow/python/framework/c_api_util.py
|
tensorflow/python/framework/c_api_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for using the TensorFlow C API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow as c_api
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
class ScopedTFStatus(object):
"""Wrapper around TF_Status that handles deletion."""
def __init__(self):
self.status = c_api.TF_NewStatus()
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api.TF_DeleteStatus is not None:
c_api.TF_DeleteStatus(self.status)
class ScopedTFGraph(object):
"""Wrapper around TF_Graph that handles deletion."""
def __init__(self):
self.graph = c_api.TF_NewGraph()
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api.TF_DeleteGraph is not None:
c_api.TF_DeleteGraph(self.graph)
class ScopedTFImportGraphDefOptions(object):
"""Wrapper around TF_ImportGraphDefOptions that handles deletion."""
def __init__(self):
self.options = c_api.TF_NewImportGraphDefOptions()
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api.TF_DeleteImportGraphDefOptions is not None:
c_api.TF_DeleteImportGraphDefOptions(self.options)
@tf_contextlib.contextmanager
def tf_buffer(data=None):
"""Context manager that creates and deletes TF_Buffer.
Example usage:
with tf_buffer() as buf:
# get serialized graph def into buf
...
proto_data = c_api.TF_GetBuffer(buf)
graph_def.ParseFromString(compat.as_bytes(proto_data))
# buf has been deleted
with tf_buffer(some_string) as buf:
c_api.TF_SomeFunction(buf)
# buf has been deleted
Args:
data: An optional `bytes`, `str`, or `unicode` object. If not None, the
yielded buffer will contain this data.
Yields:
Created TF_Buffer
"""
if data:
buf = c_api.TF_NewBufferFromString(compat.as_bytes(data))
else:
buf = c_api.TF_NewBuffer()
try:
yield buf
finally:
c_api.TF_DeleteBuffer(buf)
def tf_output(c_op, index):
"""Returns a wrapped TF_Output with specified operation and index.
Args:
c_op: wrapped TF_Operation
index: integer
Returns:
Wrapped TF_Output
"""
ret = c_api.TF_Output()
ret.oper = c_op
ret.index = index
return ret
def tf_operations(graph):
"""Generator that yields every TF_Operation in `graph`.
Args:
graph: Graph
Yields:
wrapped TF_Operation
"""
# pylint: disable=protected-access
pos = 0
c_op, pos = c_api.TF_GraphNextOperation(graph._c_graph, pos)
while c_op is not None:
yield c_op
c_op, pos = c_api.TF_GraphNextOperation(graph._c_graph, pos)
# pylint: enable=protected-access
def new_tf_operations(graph):
"""Generator that yields newly-added TF_Operations in `graph`.
Specifically, yields TF_Operations that don't have associated Operations in
`graph`. This is useful for processing nodes added by the C API.
Args:
graph: Graph
Yields:
wrapped TF_Operation
"""
# TODO(b/69679162): do this more efficiently
for c_op in tf_operations(graph):
try:
graph._get_operation_by_tf_operation(c_op) # pylint: disable=protected-access
except KeyError:
yield c_op
|
Python
| 0
|
@@ -1302,32 +1302,54 @@
modules.%0A if
+c_api is not None and
c_api.TF_DeleteS
@@ -1359,32 +1359,32 @@
us is not None:%0A
-
c_api.TF_D
@@ -1716,32 +1716,54 @@
modules.%0A if
+c_api is not None and
c_api.TF_DeleteG
@@ -2174,31 +2174,53 @@
er modules.%0A
-
if
+c_api is not None and
c_api.TF_Del
|
3ad49c6d2ca7ca5f04302f8ee125741ddb68c879
|
decode base64 output string to UTF-8
|
liquid_tags/graphviz.py
|
liquid_tags/graphviz.py
|
"""
GraphViz Tag
---------
This implements a Liquid-style graphviz tag for Pelican. You can use different
Graphviz programs like dot, neato, twopi etc. [1]
[1] http://www.graphviz.org/
Syntax
------
{% graphviz
<program> {
<DOT code>
}
%}
Examples
--------
{% graphviz
dot {
digraph graphname {
a -> b -> c;
b -> d;
}
}
%}
{% graphviz
twopi {
<code goes here>
}
%}
{% graphviz
neato {
<code goes here>
}
%}
...
Output
------
<div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,_BASE64_IMAGE DATA_/></div>
"""
import base64
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% dot graphviz [program] [dot code] %}'
DOT_BLOCK_RE = re.compile(r'^\s*(?P<program>\w+)\s*\{\s*(?P<code>.*\})\s*\}$', re.MULTILINE | re.DOTALL)
def run_graphviz(program, code, options=[], format='png'):
""" Runs graphviz programs and returns image data
Copied from https://github.com/tkf/ipython-hierarchymagic/blob/master/hierarchymagic.py
"""
import os
from subprocess import Popen, PIPE
dot_args = [program] + options + ['-T', format]
if os.name == 'nt':
# Avoid opening shell window.
# * https://github.com/tkf/ipython-hierarchymagic/issues/1
# * http://stackoverflow.com/a/2935727/727827
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, creationflags=0x08000000)
else:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
wentwrong = False
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = p.communicate(code.encode('utf-8'))
except (OSError, IOError) as err:
if err.errno != EPIPE:
raise
wentwrong = True
except IOError as err:
if err.errno != EINVAL:
raise
wentwrong = True
if wentwrong:
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise RuntimeError('dot exited with error:\n[stderr]\n{0}'.format(stderr.decode('utf-8')))
return stdout
@LiquidTags.register('graphviz')
def graphviz_parser(preprocessor, tag, markup):
""" Simple Graphviz parser """
# Parse the markup string
m = DOT_BLOCK_RE.search(markup)
if m:
# Get program and DOT code
code = m.group('code')
program = m.group('program').strip()
# Run specified program with our markup
output = run_graphviz(program, code)
# Return Base64 encoded image
return '<div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,%s"></div>' % base64.b64encode(output)
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
|
Python
| 0.999998
|
@@ -2901,16 +2901,32 @@
(output)
+.decode('utf-8')
%0A%0A el
|
126af71599e14866501e2e9f479b2658fff56526
|
Create ipc_lista2.06.py
|
lista2/ipc_lista2.06.py
|
lista2/ipc_lista2.06.py
|
Python
| 0.000001
|
@@ -0,0 +1 @@
+%0A
|
|
dac5f9e406f3c205d6ed212d4414ca55c94b8f15
|
Add test for exact search with album query
|
tests/local/test_search.py
|
tests/local/test_search.py
|
Python
| 0
|
@@ -0,0 +1,507 @@
+from __future__ import unicode_literals%0A%0Aimport unittest%0A%0Afrom mopidy.local import search%0Afrom mopidy.models import Album, Track%0A%0A%0Aclass LocalLibrarySearchTest(unittest.TestCase):%0A def test_find_exact_with_album_query(self):%0A expected_tracks = %5BTrack(album=Album(name='foo'))%5D%0A tracks = %5BTrack(), Track(album=Album(name='bar'))%5D + expected_tracks%0A%0A search_result = search.find_exact(tracks, %7B'album': %5B'foo'%5D%7D)%0A%0A self.assertEqual(search_result.tracks, tuple(expected_tracks))%0A
|
|
5701de3e75de98c939eaadcb8d05b6ab228ded8b
|
Make output tabular
|
tests/site_detect_tests.py
|
tests/site_detect_tests.py
|
# -*- coding: utf-8 -*-
"""Test for site detection."""
#
# (C) Pywikibot team, 2014-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
from pywikibot.site_detect import MWSite
from pywikibot.tools import PY2
from tests.aspects import unittest, TestCase
if not PY2:
basestring = (str,)
class TestWikiSiteDetection(TestCase):
"""Test Case for MediaWiki detection and site object creation."""
family = 'meta'
code = 'meta'
net = True
def setUp(self):
"""Set up test."""
self.failures = {}
self.errors = {}
self.passes = {}
self.all = []
super(TestWikiSiteDetection, self).setUp()
def tearDown(self):
"""Tear Down test."""
def norm(url):
res = None
typ = -1
for pos, result in enumerate([self.passes, self.errors,
self.failures]):
if url in result:
assert res is None
res = result[url]
typ = pos
if res is None:
typ += 1
res = 'Missing'
assert 0 <= pos < len(PREFIXES)
return typ, url, res
super(TestWikiSiteDetection, self).tearDown()
print('Out of %d sites, %d tests passed, %d tests failed '
'and %d tests raised an error'
% (len(self.all), len(self.passes), len(self.failures), len(self.errors)
)
)
PREFIXES = ['PASS', 'ERR ', 'FAIL', 'MISS']
sorted_all = sorted((norm(url) for url in self.all),
key=lambda item: item[0])
print('Results:\n' + '\n'.join(
'{0} {1} : {2}'.format(PREFIXES[i[0]], i[1], i[2])
for i in sorted_all))
def _wiki_detection(self, url, result):
"""Perform one load test."""
self.all += [url]
try:
site = MWSite(url)
except Exception as e:
print('failed on ' + url)
self.errors[url] = e
return
try:
if result is None:
self.assertIsNone(site)
else:
self.assertIsInstance(site, result)
self.passes[url] = result
except AssertionError as error:
self.failures[url] = error
def assertSite(self, url):
"""Assert a MediaWiki site can be loaded from the url."""
self._wiki_detection(url, MWSite)
def assertNoSite(self, url):
"""Assert a url is not a MediaWiki site."""
self._wiki_detection(url, None)
def test_IWM(self):
"""Test the load_site method for MW sites on the IWM list."""
data = self.get_site().siteinfo['interwikimap']
for item in data:
if 'local' not in item:
url = item['url']
self.all += [url]
try:
site = MWSite(url)
except Exception as error:
print('failed to load ' + url)
self.errors[url] = error
continue
if type(site) is MWSite:
try:
version = site.version
except Exception as error:
print('failed to get version of ' + url)
self.errors[url] = error
else:
try:
self.assertIsInstance(version, basestring)
self.assertRegex(version, r'^\d\.\d+.*')
self.passes[url] = site
except AssertionError as error:
print('failed to parse version of ' + url)
self.failures[url] = error
def test_detect_site(self):
"""Test detection of MediaWiki sites."""
self.assertSite('http://botwiki.sno.cc/wiki/$1')
self.assertSite('http://glossary.reuters.com/index.php?title=$1')
self.assertSite('http://www.livepedia.gr/index.php?title=$1')
self.assertSite('http://guildwars.wikia.com/wiki/$1')
self.assertSite('http://www.hrwiki.org/index.php/$1')
self.assertSite('http://www.proofwiki.org/wiki/$1')
self.assertSite(
'http://www.ck-wissen.de/ckwiki/index.php?title=$1')
self.assertSite('http://en.citizendium.org/wiki/$1')
self.assertSite(
'http://www.lojban.org/tiki/tiki-index.php?page=$1')
self.assertSite('http://www.EcoReality.org/wiki/$1')
self.assertSite('http://www.wikichristian.org/index.php?title=$1')
self.assertSite('http://wikitree.org/index.php?title=$1')
self.assertEqual(len(self.passes), 12)
self.assertEqual(len(self.failures), 0)
self.assertEqual(len(self.errors), 0)
def test_detect_failure(self):
"""Test detection failure for MediaWiki sites with an API."""
self.assertNoSite('https://en.wikifur.com/wiki/$1')
# api.php is not available
self.assertNoSite('http://wiki.animutationportal.com/index.php/$1')
# API is disabled
self.assertNoSite('http://wiki.linuxquestions.org/wiki/$1')
# offline
self.assertNoSite('http://seattlewiki.org/wiki/$1')
self.assertEqual(len(self.errors), 4)
def test_pre_api_sites(self):
"""Test detection of MediaWiki sites prior to the API."""
self.assertNoSite('http://www.wikif1.org/$1')
self.assertNoSite('http://www.thelemapedia.org/index.php/$1')
self.assertNoSite('http://esperanto.blahus.cz/cxej/vikio/index.php/$1')
self.assertNoSite('http://www.werelate.org/wiki/$1')
self.assertNoSite('http://www.otterstedt.de/wiki/index.php/$1')
self.assertNoSite('http://kb.mozillazine.org/$1')
self.assertEqual(len(self.errors), 6)
def test_detect_nosite(self):
"""Test detection of non-wiki sites."""
self.assertNoSite('http://bluwiki.com/go/$1')
self.assertNoSite('http://www.imdb.com/name/nm$1/')
self.assertNoSite('http://www.ecyrd.com/JSPWiki/Wiki.jsp?page=$1')
self.assertNoSite('http://operawiki.info/$1')
self.assertNoSite(
'http://www.tvtropes.org/pmwiki/pmwiki.php/Main/$1')
self.assertNoSite('http://c2.com/cgi/wiki?$1')
self.assertNoSite('https://phabricator.wikimedia.org/$1')
self.assertNoSite(
'http://www.merriam-webster.com/cgi-bin/dictionary?book=Dictionary&va=$1')
self.assertNoSite('http://arxiv.org/abs/$1')
self.assertNoSite('http://musicbrainz.org/doc/$1')
self.assertNoSite('http://wiki.animutationportal.com/index.php/$1')
self.assertEqual(len(self.errors), 11)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
Python
| 0.000025
|
@@ -1731,16 +1731,73 @@
tem%5B0%5D)%0A
+ width = max(len(item%5B1%5D) for item in sorted_all)%0A
@@ -1847,16 +1847,20 @@
'%7B0%7D %7B1
+:%7B3%7D
%7D : %7B2%7D'
@@ -1893,16 +1893,23 @@
1%5D, i%5B2%5D
+, width
)%0A
|
a99e92756a10529ca1e52d1d351bc43fea067b35
|
Add fabfile for API
|
packaging/radar-api/fabfile.py
|
packaging/radar-api/fabfile.py
|
Python
| 0
|
@@ -0,0 +1,892 @@
+import os%0Aimport re%0A%0Afrom fabric.api import task, put, run, cd%0A%0A%0A@task%0Adef deploy(archive=None, name='radar-api'):%0A if archive is None:%0A # Use the latest archive by default%0A archive = sorted(x for x in os.listdir('.') if x.endswith('.tar.gz'))%5B-1%5D%0A%0A version = re.search('-(%5B%5E-%5D+-%5B%5E-%5D+)%5C.tar%5C.gz$', archive).group(1)%0A%0A tmp_archive_path = '/tmp/%25s.tar.gz' %25 name%0A put(archive, tmp_archive_path)%0A%0A tmp_path = '/tmp/%25s' %25 name%0A run('rm -rf %7Bpath%7D && mkdir -p %7Bpath%7D'.format(path=tmp_path))%0A%0A current_version = '/srv/%7Bname%7D/current'.format(name=name)%0A new_version = '/srv/%7Bname%7D/%7Bversion%7D'.format(name=name, version=version)%0A%0A with cd(tmp_path):%0A run('tar --strip-components=1 -xzf %25s' %25 tmp_archive_path)%0A run('./install.sh %25s' %25 new_version)%0A%0A run('ln -sf %25s %25s' %25 (new_version, current_version))%0A run('rm -rf %25s' %25 tmp_archive_path)%0A
|
|
246ed2ba33f21b696af2a00793a521bc77da2a45
|
add excel-sheet-column-title
|
vol4/excel-sheet-column-title/excel-sheet-column-title.py
|
vol4/excel-sheet-column-title/excel-sheet-column-title.py
|
Python
| 0.000386
|
@@ -0,0 +1,314 @@
+import string%0A%0Aclass Solution(object):%0A def convertToTitle(self, n):%0A %22%22%22%0A :type n: int%0A :rtype: str%0A %22%22%22%0A alphabet = string.uppercase%0A ret = ''%0A while n %3E 0:%0A ret = alphabet%5B(n - 1) %25 26%5D + ret%0A n = (n - 1) / 26%0A return ret%0A
|
|
2674b886b786086ec62a18b953e80ec6fceaa59d
|
Bump subminor version (2.0.5 -> 2.0.6)
|
endpoints/__init__.py
|
endpoints/__init__.py
|
#!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Endpoints module."""
# pylint: disable=wildcard-import
from api_config import api
from api_config import API_EXPLORER_CLIENT_ID
from api_config import AUTH_LEVEL
from api_config import EMAIL_SCOPE
from api_config import Issuer
from api_config import method
from api_exceptions import *
from apiserving import *
from endpoints_dispatcher import *
import message_parser
from resource_container import ResourceContainer
from users_id_token import get_current_user
from users_id_token import InvalidGetUserCall
from users_id_token import SKIP_CLIENT_ID_CHECK
__version__ = '2.0.5'
|
Python
| 0
|
@@ -1208,7 +1208,7 @@
2.0.
-5
+6
'%0A
|
0ca298f6706706637dccd4f27c56eed6e91c98ba
|
Rename new test class correctly and flesh out first passing tests
|
tests/runners.py
|
tests/runners.py
|
from spec import Spec
class Runner_(Spec):
class run:
def out_stream_defaults_to_sys_stdout(self):
"out_stream defaults to sys.stdout"
def err_stream_defaults_to_sys_stderr(self):
"err_stream defaults to sys.stderr"
def out_stream_can_be_overridden(self):
"out_stream can be overridden"
def err_stream_can_be_overridden(self):
"err_stream can be overridden"
|
Python
| 0.000002
|
@@ -1,8 +1,20 @@
+import sys%0A%0A
from spe
@@ -26,16 +26,98 @@
ort Spec
+, trap, eq_%0A%0Afrom invoke import Local, Context%0A%0Afrom _utils import mock_subprocess
%0A%0A%0Aclass
@@ -121,14 +121,13 @@
ass
-Runner
+Local
_(Sp
@@ -146,16 +146,66 @@
ss run:%0A
+ @trap%0A @mock_subprocess(out=%22sup%22)%0A
@@ -297,16 +297,156 @@
stdout%22%0A
+ Local(Context()).run(%22command%22)%0A eq_(sys.stdout.getvalue(), %22sup%22)%0A%0A @trap%0A @mock_subprocess(err=%22sup%22)
%0A
@@ -538,16 +538,106 @@
.stderr%22
+%0A Local(Context()).run(%22command%22)%0A eq_(sys.stderr.getvalue(), %22sup%22)
%0A%0A
|
f6bd8bcf45d182e3aa8edd3cf0fef5aa35125e31
|
create ccc.py
|
src/ccc.py
|
src/ccc.py
|
Python
| 0
|
@@ -0,0 +1,46 @@
+#%0A# ccc.py%0A# Created by pira on 2017/07/28.%0A#%0A
|
|
0843eba3476809e833ea52611d9e193bf0872dbd
|
Add Back Builder Code
|
tools/Builder.py
|
tools/Builder.py
|
Python
| 0
|
@@ -0,0 +1,1828 @@
+import os%0Aimport hashlib%0Aimport RandomIO%0A%0A%0A# config vars%0Amy_address = %221CutsncbjcCtZKeRfvQ7bnYFVj28zeU6fo%22%0Amy_store_path = %22C://Farm/%22%0Amy_shard_size = 1024*1024*128 # 128 MB%0Amy_max_size = 1024*1024*640 # 640 MB%0A%0A%0Aclass Builder:%0A def __init__(self, address, shard_size, max_size):%0A self.address = address%0A self.shard_size = shard_size%0A self.max_size = max_size%0A%0A @staticmethod%0A def sha256(content):%0A %22%22%22Finds the SHA-256 hash of the content.%22%22%22%0A content = content.encode('utf-8')%0A return hashlib.sha256(content).hexdigest()%0A%0A def build_seed(self, height):%0A %22%22%22Deterministically build a seed.%22%22%22%0A seed = self.sha256(self.address)%0A for i in range(height):%0A seed = self.sha256(seed)%0A return seed%0A%0A def generate_shard(self, seed, store_path, cleanup=False):%0A %22%22%22Save a shard, and return its SHA-256 hash.%22%22%22%0A tmp_file = RandomIO.RandomIO(seed).read(self.shard_size) # temporarily generate file%0A file_hash = hashlib.sha256(tmp_file).hexdigest() # get SHA-256 hash%0A RandomIO.RandomIO(seed).genfile(self.shard_size, store_path+file_hash) # save the shard%0A if cleanup:%0A os.remove(store_path+file_hash)%0A return file_hash%0A%0A def build(self, store_path, debug=False, cleanup=False):%0A %22%22%22Fill the farmer with data up to their max.%22%22%22%0A for shard_num in range(int(self.max_size/self.shard_size)):%0A seed = self.build_seed(shard_num)%0A file_hash = self.generate_shard(seed, store_path, cleanup)%0A%0A if debug:%0A print(%22Saving seed %7B0%7D with SHA-256 hash %7B1%7D.%22.format(seed, file_hash))%0A%0A%0Aif __name__ == %22__main__%22: # pragma: no cover%0A bucket = Builder(my_address, my_shard_size, my_max_size)%0A bucket.build(my_store_path, True)%0A
|
|
8b7d1ffb2461e12b5cbce6873e51ca14f9d8cf90
|
Revert "Accidentally deleted manage.py"
|
SnookR/manage.py
|
SnookR/manage.py
|
Python
| 0
|
@@ -0,0 +1,804 @@
+#!/usr/bin/env python%0Aimport os%0Aimport sys%0A%0Aif __name__ == %22__main__%22:%0A os.environ.setdefault(%22DJANGO_SETTINGS_MODULE%22, %22SnookR.settings%22)%0A try:%0A from django.core.management import execute_from_command_line%0A except ImportError:%0A # The above import may fail for some other reason. Ensure that the%0A # issue is really that Django is missing to avoid masking other%0A # exceptions on Python 2.%0A try:%0A import django%0A except ImportError:%0A raise ImportError(%0A %22Couldn't import Django. Are you sure it's installed and %22%0A %22available on your PYTHONPATH environment variable? Did you %22%0A %22forget to activate a virtual environment?%22%0A )%0A raise%0A execute_from_command_line(sys.argv)%0A
|
|
202c8472298780cbf80bf3253550e4277236c0c9
|
add simple noise fitting code
|
kid_readout/analysis/noise_fit.py
|
kid_readout/analysis/noise_fit.py
|
Python
| 0
|
@@ -0,0 +1,918 @@
+import numpy as np%0Aimport lmfit%0Afrom kid_readout.analysis import fitter%0Aprint %22k%22%0Adef lorenz(f,fc,a):%0A return a/(1+(f/fc)**2)%0A%0Adef simple_noise_model(params,f):%0A A = params%5B'A'%5D.value%0A fc = params%5B'fc'%5D.value%0A nw = params%5B'nw'%5D.value%0A return lorenz(f,fc,A) + nw%0A%0Adef simple_noise_guess(f,S):%0A params = lmfit.Parameters()%0A params.add('A',(S.max()-S.min()),min=0,max=S.max())%0A params.add('nw',(S.max()+S.min())/2.0,min=S.min()/2,max=S.max())%0A params.add('fc',500.0,min=10,max=1e4)%0A return params%0A%0A%0Aclass SingleLorenzModel(fitter.Fitter):%0A def __init__(self, f, data, model=simple_noise_model, guess=simple_noise_guess, functions=%7B%7D,%0A mask=None, errors=None, weight_by_errors=True):%0A super(SingleLorenzModel,self).__init__(f,data,model=model,guess=guess,functions=functions,mask=mask,%0A errors=errors,weight_by_errors=weight_by_errors)%0A
|
|
5879e59f34e31707f207c588143711dbdf18ee8b
|
remove login_required for register page
|
app/main/views/index.py
|
app/main/views/index.py
|
from flask import render_template
from flask_login import login_required
from app.main import main
@main.route('/')
def index():
return render_template('signedout.html')
@main.route("/govuk")
def govuk():
return render_template('govuk_template.html')
@main.route("/register")
@login_required
def register():
return render_template('register.html')
@main.route("/register-from-invite")
@login_required
def registerfrominvite():
return render_template('register-from-invite.html')
@main.route("/verify")
@login_required
def verify():
return render_template('verify.html')
@main.route("/verify-mobile")
@login_required
def verifymobile():
return render_template('verify-mobile.html')
@main.route("/text-not-received-2")
def textnotreceived2():
return render_template('text-not-received-2.html')
@main.route("/dashboard")
@login_required
def dashboard():
return render_template('dashboard.html')
@main.route("/add-service")
@login_required
def addservice():
return render_template('add-service.html')
@main.route("/two-factor")
@login_required
def twofactor():
return render_template('two-factor.html')
@main.route("/send-sms")
def sendsms():
return render_template('send-sms.html')
@main.route("/check-sms")
def checksms():
return render_template('check-sms.html')
@main.route("/email-not-received")
def emailnotreceived():
return render_template('email-not-received.html')
@main.route("/text-not-received")
def textnotreceived():
return render_template('text-not-received.html')
@main.route("/send-email")
def sendemail():
return render_template('send-email.html')
@main.route("/check-email")
def checkemail():
return render_template('check-email.html')
@main.route("/jobs")
def showjobs():
return render_template('jobs.html')
@main.route("/jobs/job")
def showjob():
return render_template('job.html')
@main.route("/jobs/job/notification")
def shownotification():
return render_template('notification.html')
@main.route("/forgot-password")
def forgotpassword():
return render_template('forgot-password.html')
@main.route("/new-password")
def newpassword():
return render_template('new-password.html')
@main.route("/user-profile")
def userprofile():
return render_template('user-profile.html')
@main.route("/manage-users")
def manageusers():
return render_template('manage-users.html')
@main.route("/service-settings")
def servicesettings():
return render_template('service-settings.html')
@main.route("/api-keys")
def apikeys():
return render_template('api-keys.html')
@main.route("/verification-not-received")
def verificationnotreceived():
return render_template('verification-not-received.html')
|
Python
| 0
|
@@ -284,32 +284,16 @@
ister%22)%0A
-@login_required%0A
def regi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.