commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
a0bd114b8caf75d28bc52a3aba10494660e6735a
|
Add lc0138_copy_list_with_random_pointer.py
|
lc0138_copy_list_with_random_pointer.py
|
lc0138_copy_list_with_random_pointer.py
|
Python
| 0.000001
|
@@ -0,0 +1,1060 @@
+%22%22%22Leetcode 138. Copy List with Random Pointer%0AMedium%0A%0AURL: https://leetcode.com/problems/copy-list-with-random-pointer/%0A%0AA linked list is given such that each node contains an additional %0Arandom pointer which could point to any node in the list or null.%0A%0AReturn a deep copy of the list. %0A%0AExample 1:%0AInput:%0A%7B%22$id%22:%221%22,%22next%22:%7B%22$id%22:%222%22,%22next%22:null,%22random%22:%7B%22$ref%22:%222%22%7D,%22val%22:2%7D,%0A %22random%22:%7B%22$ref%22:%222%22%7D,%22val%22:1%7D%0AExplanation:%0ANode 1's value is 1, both of its next and random pointer points to Node 2.%0ANode 2's value is 2, its next pointer points to null and %0Aits random pointer points to itself.%0A %0ANote:%0AYou must return the copy of the given head as a reference to the cloned list.%0A%22%22%22%0A%0A# Definition for a Node.%0Aclass Node(object):%0A def __init__(self, val, next, random):%0A self.val = val%0A self.next = next%0A self.random = random%0A%0A%0Aclass Solution(object):%0A def copyRandomList(self, head):%0A %22%22%22%0A :type head: Node%0A :rtype: Node%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
f8bde0dd523a46d81a64f9b3bd8633be0bf6676d
|
Create an exception to throw when a user tries to tag a bad commit
|
webserver/codemanagement/exceptions.py
|
webserver/codemanagement/exceptions.py
|
Python
| 0.000001
|
@@ -0,0 +1,51 @@
+class CodeManagementException(Exception):%0A pass%0A
|
|
1120f16569e1bc9c9675a9cefd782b09266cd82c
|
Add initial k-means algo
|
avocado/stats/kmeans.py
|
avocado/stats/kmeans.py
|
Python
| 0.999978
|
@@ -0,0 +1,1384 @@
+import math%0Afrom random import random%0Afrom collections import namedtuple%0A%0APoint = namedtuple('Point', ('coords', 'n', 'ct'))%0ACluster = namedtuple('Cluster', ('points', 'center', 'n'))%0A%0A%0Adef euclidean(p1, p2):%0A return math.sqrt(sum(%5B%0A (p1.coords%5Bi%5D - p2.coords%5Bi%5D) ** 2 for i in range(p1.n)%0A %5D))%0A%0Adef calculate_center(points, n):%0A vals = %5B0.0 for i in range(n)%5D%0A plen = 0%0A for p in points:%0A plen += p.ct%0A for i in range(n):%0A vals%5Bi%5D += (p.coords%5Bi%5D * p.ct)%0A return Point(%5B(v / plen) for v in vals%5D, n, 1)%0A%0Adef kmeans(points, k, min_diff):%0A clusters = %5BCluster(%5Bp%5D, p, p.n) for p in random.sample(points, k)%5D%0A%0A while True:%0A plists = %5B%5B%5D for i in range(k)%5D%0A%0A for p in points:%0A smallest_distance = float('Inf')%0A for i in range(k):%0A distance = euclidean(p, clusters%5Bi%5D.center)%0A if distance %3C smallest_distance:%0A smallest_distance = distance%0A idx = i%0A plists%5Bidx%5D.append(p)%0A%0A diff = 0%0A for i in range(k):%0A old = clusters%5Bi%5D%0A center = calculate_center(plists%5Bi%5D, old.n)%0A new = Cluster(plists%5Bi%5D, center, old.n)%0A clusters%5Bi%5D = new%0A diff = max(diff, euclidean(old.center, new.center))%0A%0A if diff %3C min_diff:%0A break%0A%0A return clusters%0A
|
|
abf2fa18769470d93adf9c0c4113b0d13c55836d
|
Fixed a typo.
|
tensorflow/python/profiler/profiler_client.py
|
tensorflow/python/profiler/profiler_client.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Profiler client APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import errors
from tensorflow.python.profiler.internal import _pywrap_profiler
from tensorflow.python.util.tf_export import tf_export
_GRPC_PREFIX = 'grpc://'
@tf_export('profiler.experimental.client.trace', v1=[])
def trace(service_addr,
logdir,
duration_ms,
worker_list='',
num_tracing_attempts=3,
options=None):
"""Sends gRPC requests to one or more profiler servers to perform on-demand profiling.
This method will block the calling thread until it receives responses from all
servers or until deadline expiration. Both single host and multiple host
profiling are supported on CPU, GPU, and TPU.
The profiled results will be saved by each server to the specified TensorBoard
log directory (i.e. the directory you save your model checkpoints). Use the
TensorBoard profile plugin to view the visualization and analysis results.
Args:
service_addr: A comma delimited string of gRPC addresses of the workers to
profile.
e.g. service_addr='grpc://localhost:6009'
service_addr='grpc://10.0.0.2:8466,grpc://10.0.0.3:8466'
service_addr='grpc://localhost:12345,grpc://localhost:23456'
logdir: Path to save profile data to, typically a TensorBoard log directory.
This path must be accessible to both the client and server.
e.g. logdir='gs://your_tb_dir'
duration_ms: Duration of tracing or monitoring in mliiseconds. Must be
greater than zero.
worker_list: An optional TPU only configuration. The list of workers to
profile in the current session.
num_tracing_attempts: Optional. Automatically retry N times when no trace
event is collected (default 3).
options: profiler.experimental.ProfilerOptions namedtuple for miscellaneous
profiler options.
Raises:
InvalidArgumentError: For when arguments fail validation checks.
UnavailableError: If no trace event was collected.
Example usage (CPU/GPU):
# Start a profiler server before your model runs.
```python
tf.profiler.experimental.server.start(6009)
# (Model code goes here).
# Send gRPC request to the profiler server to collect a trace of your model.
```python
tf.profiler.experimental.client.trace('grpc://localhost:6009',
'/nfs/tb_log', 2000)
Example usage (Multiple GPUs):
# E.g. your worker IP addresses are 10.0.0.2, 10.0.0.3, 10.0.0.4, and you
# would like to schedule start of profiling 1 second from now, for a duration
# of 2 seconds.
options['delay_ms'] = 1000
tf.profiler.experimental.client.trace(
'grpc://10.0.0.2:8466,grpc://10.0.0.3:8466,grpc://10.0.0.4:8466',
'gs://your_tb_dir',
2000,
options=options)
Example usage (TPU):
# Send gRPC request to a TPU worker to collect a trace of your model. A
# profiler service has been started in the TPU worker at port 8466.
```python
# E.g. your TPU IP address is 10.0.0.2 and you want to profile for 2 seconds.
tf.profiler.experimental.client.trace('grpc://10.0.0.2:8466',
'gs://your_tb_dir', 2000)
Example usage (Multiple TPUs):
# Send gRPC request to a TPU pod to collect a trace of your model on multiple
# TPUs. A profiler service has been started in all the TPU workers at the
# port 8466.
```python
# E.g. your TPU IP addresses are 10.0.0.2, 10.0.0.3, 10.0.0.4, and you want to
# profile for 2 seconds.
tf.profiler.experimental.client.trace('grpc://10.0.0.2:8466',
'gs://your_tb_dir',
2000, '10.0.0.2,10.0.0.3,10.0.0.4')
Launch TensorBoard and point it to the same logdir you provided to this API.
$ tensorboard --logdir=/tmp/tb_log (or gs://your_tb_dir in the above examples)
Open your browser and go to localhost:6006/#profile to view profiling results.
"""
if duration_ms <= 0:
raise errors.InvalidArgumentError(None, None,
'duration_ms must be greater than zero.')
opts = dict(options._asdict()) if options is not None else {}
_pywrap_profiler.trace(
_strip_addresses(service_addr, _GRPC_PREFIX), logdir, worker_list, True,
duration_ms, num_tracing_attempts, opts)
@tf_export('profiler.experimental.client.monitor', v1=[])
def monitor(service_addr, duration_ms, level=1):
"""Sends grpc requests to profiler server to perform on-demand monitoring.
The monitoring result is a light weight performance summary of your model
execution. This method will block the caller thread until it receives the
monitoring result. This method currently supports Cloud TPU only.
Args:
service_addr: gRPC address of profiler service e.g. grpc://10.0.0.2:8466.
duration_ms: Duration of monitoring in ms.
level: Choose a monitoring level between 1 and 2 to monitor your job. Level
2 is more verbose than level 1 and shows more metrics.
Returns:
A string of monitoring output.
Example usage:
# Continuously send gRPC requests to the Cloud TPU to monitor the model
# execution.
```python
for query in range(0, 100):
print(tf.profiler.experimental.client.monitor('grpc://10.0.0.2:8466', 1000))
"""
return _pywrap_profiler.monitor(
_strip_prefix(service_addr, _GRPC_PREFIX), duration_ms, level, True)
def _strip_prefix(s, prefix):
return s[len(prefix):] if s.startswith(prefix) else s
def _strip_addresses(addresses, prefix):
return ','.join([_strip_prefix(s, prefix) for s in addresses.split(',')])
|
Python
| 0.999721
|
@@ -2283,18 +2283,19 @@
ing in m
-l
i
+ll
iseconds
|
f1cfe335dce4c57778d28f0cbf0be48447b60805
|
normalize pressure and radius, according to python-for-android / API doc
|
kivy/input/providers/androidjoystick.py
|
kivy/input/providers/androidjoystick.py
|
# pylint: disable=W0611
__all__ = ('AndroidMotionEventProvider', )
import os
try:
import android
except ImportError:
if 'KIVY_DOC' not in os.environ:
raise Exception('android lib not found.')
from kivy.logger import Logger
from kivy.input.provider import MotionEventProvider
from kivy.input.factory import MotionEventFactory
from kivy.input.shape import ShapeRect
from kivy.input.motionevent import MotionEvent
import pygame.joystick
class AndroidMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.profile = ['pos', 'pressure', 'shape']
self.sx, self.sy, self.pressure, radius = args
self.shape = ShapeRect()
self.shape.width = radius
self.shape.height = radius
super(AndroidMotionEvent, self).depack(args)
class AndroidMotionEventProvider(MotionEventProvider):
def __init__(self, device, args):
super(AndroidMotionEventProvider, self).__init__(device, args)
self.joysticks = []
self.touches = {}
self.uid = 0
self.window = None
def create_joystick(self, index):
Logger.info('Android: create joystick <%d>' % index)
js = pygame.joystick.Joystick(index)
js.init()
if js.get_numbuttons() == 0:
Logger.info('Android: discard joystick <%d> cause no button' %
index)
return
self.joysticks.append(js)
def start(self):
pygame.joystick.init()
Logger.info('Android: found %d joystick' % pygame.joystick.get_count())
for i in xrange(pygame.joystick.get_count()):
self.create_joystick(i)
def stop(self):
self.joysticks = []
def update(self, dispatch_fn):
if not self.window:
from kivy.core.window import Window
self.window = Window
w, h = self.window.system_size
touches = self.touches
for joy in self.joysticks:
jid = joy.get_id()
pressed = joy.get_button(0)
x = joy.get_axis(0) * 32768. / w
y = 1. - (joy.get_axis(1) * 32768. / h)
pressure = joy.get_axis(2)
radius = joy.get_axis(3)
# new touche ?
if pressed and jid not in touches:
self.uid += 1
touch = AndroidMotionEvent(self.device, self.uid,
[x, y, pressure, radius])
touches[jid] = touch
dispatch_fn('begin', touch)
# update touch
elif pressed:
touch = touches[jid]
# avoid same touch position
if touch.sx == x and touch.sy == y \
and touch.pressure == pressure:
#print 'avoid moving.', touch.uid, x, y, pressure, radius
continue
touch.move([x, y, pressure, radius])
dispatch_fn('update', touch)
# disapear
elif not pressed and jid in touches:
touch = touches[jid]
touch.move([x, y, pressure, radius])
touch.update_time_end()
dispatch_fn('end', touch)
touches.pop(jid)
MotionEventFactory.register('android', AndroidMotionEventProvider)
|
Python
| 0
|
@@ -2167,45 +2167,125 @@
s(2)
-%0A radius = joy.get_axis(3)
+ / 1000. # python for android do * 1000.%0A radius = joy.get_axis(3) / 1000. # python for android do * 1000.
%0A%0A
|
1de5cfe8714c140a79358e4287645f21095abad7
|
Create sarafu.py
|
sarafu.py
|
sarafu.py
|
Python
| 0.000002
|
@@ -0,0 +1,34 @@
+def (sarafu)%0A print (%22sarafu%22)%0A
|
|
d8878fdfae5e4ca61de8b980bbc753f9e86ac655
|
Add botaddnitf plugin
|
hangupsbot/plugins/botaddnotif.py
|
hangupsbot/plugins/botaddnotif.py
|
Python
| 0.000001
|
@@ -0,0 +1,1046 @@
+%22%22%22%0APlugin for monitoring if bot is added to a HO and report it to the bot admins.%0AAdd a %22botaddnotif_enable%22: true parameter in the config.json file.%0A%0AAuthor: @cd334%0A%22%22%22%0A%0Aimport asyncio %0Aimport logging%0Aimport hangups%0Aimport plugins%0A%0Alogger = logging.getLogger(__name__)%0A%0Adef _initialise(bot):%0A plugins.register_handler(_handle_join_notify, type=%22membership%22)%0A%0A@asyncio.coroutine%0Adef _handle_join_notify(bot, event, command):%0A if not event.conv_event.type_ == hangups.MembershipChangeType.JOIN:%0A return%0A %0A bot_id = bot._user_list._self_user.id_%0A %0A if not bot_id in event.conv_event.participant_ids:%0A return%0A%0A enable = bot.get_config_option(%22botaddnotif_enable%22)%0A%0A if not enable == True :%0A return%0A%0A name = hangups.ui.utils.get_conv_name(event.conv, truncate=False)%0A%0A message = u'%3Cb%3E%25s%3C/b%3E has added me to Hangout: %3Cb%3E%25s%3C/b%3E' %25 (event.user.full_name, name)%0A%0A admin_list=bot.get_config_option('admins')%0A for admin_id in admin_list:%0A yield from bot.coro_send_to_user(admin_id, message)
|
|
c2207ed347ab4d804cb7ea966eace6ea93a41326
|
Create sensor.py
|
sensor.py
|
sensor.py
|
Python
| 0.000002
|
@@ -0,0 +1,790 @@
+import RPi.GPIO as gpio%0Aimport time%0A%0Adef distance(measure='cm'):%0A try:%0A gpio.setmode(gpio.BOARD)%0A gpio.setup(12, gpio.OUT)%0A gpio.setup(16, gpio.IN)%0A %0A gpio.output(12, False)%0A while gpio.input(16) == 0:%0A nosig = time.time()%0A%0A while gpio.input(16) == 1:%0A sig = time.time()%0A%0A tl = sig - nosig%0A%0A if measure == 'cm':%0A distance = tl / 0.000058%0A elif measure == 'in':%0A distance = tl / 0.000148%0A else:%0A print('improper choice of measurement: in or cm')%0A distance = None%0A%0A gpio.cleanup()%0A return distance%0A except:%0A distance = 100%0A gpio.cleanup()%0A return distance%0A%0Aif __name__ == %22__main__%22:%0A print(distance(%22cm%22))%0A
|
|
5ac2e849fbf4857f7b449cdd39608ba053bddf6e
|
add main func
|
ExtractLevelDomain.py
|
ExtractLevelDomain.py
|
Python
| 0.001334
|
@@ -0,0 +1,3002 @@
+#coding=utf-8%0Aimport re%0Afrom urlparse import urlparse%0A%0Aclass ExtractLevelDomain():%0A%0A def __init__(self):%0A self.topHostPostfix = (%0A '.com','.la','.io',%0A '.co', '.cn','.info',%0A '.net', '.org','.me',%0A '.mobi', '.us', '.biz',%0A '.xxx', '.ca', '.co.jp',%0A '.com.cn', '.net.cn', '.org.cn',%0A '.mx','.tv', '.ws',%0A '.ag', '.com.ag', '.net.ag',%0A '.org.ag','.am','.asia',%0A '.at', '.be', '.com.br',%0A '.net.br',%0A '.bz',%0A '.com.bz',%0A '.net.bz',%0A '.cc',%0A '.com.co',%0A '.net.co',%0A '.nom.co',%0A '.de',%0A '.es',%0A '.com.es',%0A '.nom.es',%0A '.org.es',%0A '.eu',%0A '.fm',%0A '.fr',%0A '.gs',%0A '.in',%0A '.co.in',%0A '.firm.in',%0A '.gen.in',%0A '.ind.in',%0A '.net.in',%0A '.org.in',%0A '.it',%0A '.jobs',%0A '.jp',%0A '.ms',%0A '.com.mx',%0A '.nl','.nu','.co.nz','.net.nz',%0A '.org.nz',%0A '.se',%0A '.tc',%0A '.tk',%0A '.tw',%0A '.com.tw',%0A '.idv.tw',%0A '.org.tw',%0A '.hk',%0A '.co.uk',%0A '.me.uk',%0A '.org.uk',%0A '.vg')%0A %0A self.extractPattern = r'%5B%5C.%5D('+'%7C'.join(%5Bh.replace('.',r'%5C.') for h in self.topHostPostfix%5D)+')$'%0A self.pattern = re.compile(self.extractPattern,re.IGNORECASE)%0A self.level = %22*%22%0A%0A def parse_url(self,url):%0A parts = urlparse(url)%0A host = parts.netloc%0A m = self.pattern.search(host)%0A return m.group() if m else host%0A%0A def parse_url_level(self,url,level=%22*%22):%0A extractRule = self._parse_regex(level)%0A parts = urlparse(url)%0A host = parts.netloc%0A pattern = re.compile(extractRule,re.IGNORECASE)%0A m = pattern.search(host)%0A self.level = level%0A return m.group() if m else host%0A %0A def set_level(self,level):%0A extractRule = self._parse_regex(level)%0A self.extractPattern = extractRule%0A self.pattern = re.compile(self.extractPattern,re.IGNORECASE)%0A self.level = level%0A%0A def _parse_regex(self,level):%0A extractRule = r'(%5Cw*%5C.?)%25s('+'%7C'.join(%5Bh.replace('.',r'%5C.') for h in self.topHostPostfix%5D)+')$'%0A level = level if level == %22*%22 else %22%7B%25s%7D%22%25level%0A extractRule = extractRule%25(level)%0A return extractRule%0A %0Aif __name__ == %22__main__%22:%0A filter = ExtractLevelDomain()%0A print filter.level%0A print filter.parse_url('http://dmp.301.xiaorui.cc/redirect/xiaorui.cc')%0A print filter.parse_url_level('http://dmp.301.xiaorui.cc/redirect/xiaorui.cc',level=2)%0A filter.set_level(1)%0A print filter.parse_url_level('http://dmp.301.xiaorui.cc/redirect/xiaorui.cc',level=1)%0A print filter.level%0A
|
|
2350045ae48f0f5b7fcf5162c1dc82c198fc4db2
|
add note about default Options in tracker
|
ForgeTracker/forgetracker/widgets/admin_custom_fields.py
|
ForgeTracker/forgetracker/widgets/admin_custom_fields.py
|
import ew as ew_core
import ew.jinja2_ew as ew
from allura.lib.widgets import form_fields as ffw
from allura.lib.widgets import forms as f
from pylons import c
from forgetracker import model
from formencode import validators as fev
class MilestonesAdmin(ffw.SortableTable):
defaults=dict(
ffw.SortableTable.defaults,
button=ffw.AdminField(field=ew.InputField(
css_class='add', field_type='button',
value='New Milestone')),
empty_msg='No milestones have been created.',
nonempty_msg='Drag and drop the milestones to reorder.',
repetitions=0)
fields = [
ew.HiddenField(name='old_name'),
ew.Checkbox(name='complete', show_label=True, suppress_label=True),
ew.TextField(name='name',
attrs={'style':'width: 80px'}),
ffw.DateField(name='due_date',
attrs={'style':'width: 80px'}),
ffw.AutoResizeTextarea(
name='description',
attrs={'style':'height:1em; width: 150px'}),
ew.InputField(
label='Delete',
field_type='button',
attrs={'class':'delete', 'value':'Delete'}),
]
button = ew.InputField(
css_class='add', field_type='button', value='New Milestone')
def prepare_context(self, context):
response = super(MilestonesAdmin, self).prepare_context(context)
if 'value' in response:
for milestone_data in response['value']:
if 'name' in milestone_data:
milestone_data['old_name'] = milestone_data['name']
return response
def resources(self):
for r in super(MilestonesAdmin, self).resources(): yield r
yield ew.CSSScript('''div.state-field table{ width: 700px; }''')
class CustomFieldAdminDetail(ffw.StateField):
template='jinja:forgetracker:templates/tracker_widgets/custom_field_admin_detail.html'
defaults=dict(
ffw.StateField.defaults,
selector=ffw.AdminField(field=ew.SingleSelectField(
name='type',
options=[
ew.Option(py_value='string', label='Text'),
ew.Option(py_value='number', label='Number'),
ew.Option(py_value='boolean', label='Boolean'),
ew.Option(py_value='select', label='Select'),
ew.Option(py_value='milestone', label='Milestone'),
ew.Option(py_value='user', label='User'),
],
)),
states=dict(
select=ffw.FieldCluster(
fields=[
ffw.AdminField(field=ew.TextField(name='options')) ],
show_labels=False),
milestone=ffw.FieldCluster(
# name='milestones',
fields=[ MilestonesAdmin(name='milestones') ])
))
class CustomFieldAdmin(ew.CompoundField):
template='jinja:forgetracker:templates/tracker_widgets/custom_field_admin.html'
def resources(self):
for r in super(CustomFieldAdmin, self).resources():
yield r
yield ew.JSLink('tracker_js/custom-fields.js')
fields = [
ew.HiddenField(name='name'),
ew.TextField(name='label'),
ew.Checkbox(
name='show_in_search',
label='Show in search',
show_label=True,
suppress_label=True),
CustomFieldAdminDetail() ]
class TrackerFieldAdmin(f.ForgeForm):
submit_text=None
class fields(ew_core.NameList):
open_status_names = ew.TextField(label='Open Statuses')
closed_status_names = ew.TextField(label='Closed Statuses')
custom_fields = ffw.SortableRepeatedField(field=CustomFieldAdmin())
class buttons(ew_core.NameList):
save = ew.SubmitButton(label='Save')
cancel = ew.SubmitButton(
label="Cancel",
css_class='cancel', attrs=dict(
onclick='window.location.reload(); return false;'))
def resources(self):
for rr in self.fields['custom_fields'].resources():
yield rr
class CustomFieldDisplay(ew.CompoundField):
template='jinja:forgetracker:templates/tracker_widgets/custom_field_display.html'
class CustomFieldsDisplay(ew.RepeatedField):
template='jinja:forgetracker:templates/tracker_widgets/custom_fields_display.html'
class TrackerFieldDisplay(f.ForgeForm):
class fields(ew_core.NameList):
milestone_names = ew.TextField()
open_status_names = ew.TextField(label='Open Statuses')
closed_status_names = ew.TextField(label='Open Statuses')
custom_fields = CustomFieldsDisplay()
def resources(self):
for rr in self.fields['custom_fields'].resources():
yield rr
|
Python
| 0
|
@@ -2671,16 +2671,138 @@
options'
+,%0A label='Options (separate with spaces; prefix with * to set a default)',%0A
)) %5D,%0A
|
fd0e09e11d41d1d7b64f7bf592b788fda8b86e3e
|
Create archive_identifier.py
|
identifiers/archive_identifier.py
|
identifiers/archive_identifier.py
|
Python
| 0.000001
|
@@ -0,0 +1,206 @@
+from identifier import Result%0A%09%0ACAB_PATTERNS = %5B%0A%09'4D 53 43 46'%0A%5D%0A%09%0Aclass CabResolver:%0A%09def identify(self, stream):%0A%09%09return Result('CAB')%0A%09%0Adef load(hound):%0A%09hound.add_matches(CAB_PATTERNS, CabResolver())%0A
|
|
bb619aa09132dbd970d2b78e23fecbd78ee774de
|
Create new package. (#6191)
|
var/spack/repos/builtin/packages/r-aneufinder/package.py
|
var/spack/repos/builtin/packages/r-aneufinder/package.py
|
Python
| 0
|
@@ -0,0 +1,2813 @@
+##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass RAneufinder(RPackage):%0A %22%22%22This package implements functions for CNV calling, plotting,%0A export and analysis from whole-genome single cell sequencing data.%22%22%22%0A%0A homepage = %22https://www.bioconductor.org/packages/AneuFinder/%22%0A url = %22https://git.bioconductor.org/packages/AneuFinder%22%0A%0A version('1.4.0', git='https://git.bioconductor.org/packages/AneuFinder', commit='e5bdf4d5e4f84ee5680986826ffed636ed853b8e')%0A%0A depends_on('r@3.4.0:3.4.9', when='@1.4.0')%0A depends_on('r-genomicranges', type=('build', 'run'))%0A depends_on('r-cowplot', type=('build', 'run'))%0A depends_on('r-aneufinderdata', type=('build', 'run'))%0A depends_on('r-foreach', type=('build', 'run'))%0A depends_on('r-doparallel', type=('build', 'run'))%0A depends_on('r-biocgenerics', type=('build', 'run'))%0A depends_on('r-s4vectors', type=('build', 'run'))%0A depends_on('r-genomeinfodb', type=('build', 'run'))%0A depends_on('r-iranges', type=('build', 'run'))%0A depends_on('r-rsamtools', type=('build', 'run'))%0A depends_on('r-bamsignals', type=('build', 'run'))%0A depends_on('r-dnacopy', type=('build', 'run'))%0A depends_on('r-biostrings', type=('build', 'run'))%0A depends_on('r-genomicalignments', type=('build', 'run'))%0A depends_on('r-ggplot2', type=('build', 'run'))%0A depends_on('r-reshape2', type=('build', 'run'))%0A depends_on('r-ggdendro', type=('build', 'run'))%0A depends_on('r-reordercluster', type=('build', 'run'))%0A depends_on('r-mclust', type=('build', 'run'))%0A depends_on('r-ggrepel', type=('build', 'run'))%0A
|
|
d604429f1d27f8753b8d9665a55111a3b90f0699
|
Add homebrew version of fluentd logger
|
scrapi/util/logging.py
|
scrapi/util/logging.py
|
Python
| 0
|
@@ -0,0 +1,1152 @@
+import logging%0Afrom datetime import datetime%0A%0Afrom fluent import sender%0A%0A%0Aclass FluentHandler(logging.Handler):%0A '''%0A Logging Handler for fluent.%0A '''%0A def __init__(self,%0A tag,%0A host='localhost',%0A port=24224,%0A timeout=3.0,%0A verbose=False):%0A%0A self.tag = tag%0A self.sender = sender.FluentSender(tag,%0A host=host, port=port,%0A timeout=timeout, verbose=verbose)%0A logging.Handler.__init__(self)%0A%0A def emit(self, record):%0A data = self.format(record)%0A data = %7B%0A 'level': record.levelname,%0A 'message': record.msg,%0A 'source': record.name,%0A 'date': datetime.fromtimestamp(record.created).isoformat(),%0A 'fullPath': record.pathname,%0A 'uptime': record.relativeCreated%0A %7D%0A self.sender.emit(None, data)%0A%0A def close(self):%0A self.acquire()%0A try:%0A self.sender._close()%0A logging.Handler.close(self)%0A finally:%0A self.release()%0A
|
|
cfeb4fbfa44b597772f1eb63d828e605f9e39396
|
Add server
|
server.py
|
server.py
|
Python
| 0.000001
|
@@ -0,0 +1,1836 @@
+import argparse%0Aimport os%0Aimport signal%0Aimport subprocess%0A%0Adef parse_args():%0A parser = argparse.ArgumentParser()%0A parser.add_argument('operation',%0A choices=%5B'start', 'stop', 'restart'%5D,%0A help='start/stop/restart the server')%0A return parser.parse_args()%0A%0A%0Adef find_server_process(components):%0A proc = subprocess.Popen(%5B'ps', '-e', '-o', 'pid,command'%5D,%0A stdout=subprocess.PIPE)%0A (out, dummy) = proc.communicate()%0A %0A processes = %5Bi.strip() for i in str(out).split('%5Cn')%5B1:%5D if i%5D%0A%0A if len(processes) %3E 0:%0A for line in processes:%0A fields = line.split(None, 1)%0A if fields%5B1%5D == ' '.join(components):%0A return int(fields%5B0%5D)%0A%0A return None%0A%0Adef get_absolute_filename(relative_name):%0A return os.path.abspath(os.path.join(os.path.dirname(__file__),%0A relative_name))%0A%0Adef start_service():%0A proc = subprocess.Popen(launch_command)%0A print('Heathergraph service started - PID=%7B%7D'.format(proc.pid))%0A%0Adef stop_service(pid):%0A if pid:%0A print('Stopping Heathergraph service - PID=%7B%7D'.format(pid))%0A os.kill(pid, signal.SIGTERM) %0A else:%0A print('No Heathergraph service was found to be running')%0A %0A%0Adef main():%0A args = parse_args()%0A launch_command = %5B'python',%0A get_absolute_filename('heathergraph.py')%5D%0A pid = find_server_process(launch_command)%0A%0A if args.operation == 'start':%0A if not pid:%0A start_service()%0A else:%0A print('Heathergraph service is already running')%0A%0A elif args.operation == 'stop':%0A stop_service(pid)%0A elif args.operation == 'restart':%0A stop_service(pid)%0A start_service()%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
8fda2e1330277e98b62d3286e5c208d320fc07db
|
Add simple api to redis with flask
|
server.py
|
server.py
|
Python
| 0
|
@@ -0,0 +1,788 @@
+from flask import Flask%0Afrom flask import json%0Afrom flask import Response%0Aimport redis%0A%0Aapp = Flask(__name__)%0Ar = redis.StrictRedis(host='localhost', port=6379, db=0)%0A%0A@app.route(%22/%22)%0Adef hello():%0A return %22Hello ld!%22%0A%0A@app.route('/properties/')%0Adef show_properties():%0A props = r.smembers('daftpunk:properties')%0A%0A data = %5B%5D%0A for n in props:%0A %09data.append(%7B%22id%22:n, %22address%22: r.get('daftpunk:%25s:address' %25 n)%7D)%0A%0A resp = Response(json.dumps(data), status=200, mimetype='application/json')%0A return resp%0A%0A@app.route('/property/%3Cid%3E')%0Adef show_property(id):%0A%0A data = %7B%22id%22:id, %22address%22: r.get('daftpunk:%25s:address' %25 id)%7D%0A%0A resp = Response(json.dumps(data), status=200, mimetype='application/json')%0A return resp%0A%0Aif __name__ == %22__main__%22:%0A app.run(debug=True)
|
|
ef108d756ae91925ca0afec280151974eae4a696
|
add import script to load existing data in influxdb, optional
|
scripts/influxdb_import.py
|
scripts/influxdb_import.py
|
Python
| 0
|
@@ -0,0 +1,2289 @@
+'''%0AImport biomaj banks statistics in Influxdb if never done before.....%0A'''%0Afrom influxdb import InfluxDBClient%0Afrom biomaj.bank import Bank%0Afrom biomaj_core.config import BiomajConfig%0Aimport sys%0A%0Aif len(sys.argv) != 1:%0A print('Usage: influxdb_import.py path_to_global.properties')%0A sys.exit(1)%0A%0ABiomajConfig.load_config(config_file=sys.argv%5B1%5D)%0A%0Ainfluxdb = None%0Atry:%0A influxdb = InfluxDBClient(host='biomaj-influxdb', database='biomaj')%0Aexcept Exception as e:%0A print('Failed to connect to influxdb, check configuration in global.properties: ' + str(e))%0A sys.exit(1)%0A%0Ares = influxdb.query('select last(%22value%22) from %22biomaj.banks.quantity%22')%0Aif res:%0A print('Found data in influxdb, update info....')%0A%0Abanks = Bank.list()%0Anb_banks = 0%0Ametrics = %5B%5D%0Afor bank in banks:%0A productions = bank%5B'production'%5D%0A total_size = 0%0A latest_size = 0%0A if not productions:%0A continue%0A nb_banks += 1%0A latest_size = productions%5Blen(productions) - 1%5D%5B'size'%5D%0A for production in productions:%0A if 'size' in production:%0A total_size += production%5B'size'%5D%0A%0A influx_metric = %7B%0A %22measurement%22: 'biomaj.production.size.total',%0A %22fields%22: %7B%0A %22value%22: float(total_size)%0A %7D,%0A %22tags%22: %7B%0A %22bank%22: bank%5B'name'%5D%0A %7D,%0A %22time%22: int(production%5B'session'%5D)%0A %7D%0A metrics.append(influx_metric)%0A influx_metric = %7B%0A %22measurement%22: 'biomaj.production.size.latest',%0A %22fields%22: %7B%0A %22value%22: float(latest_size)%0A %7D,%0A %22tags%22: %7B%0A %22bank%22: bank%5B'name'%5D%0A %7D,%0A %22time%22: int(production%5B'session'%5D)%0A %7D%0A metrics.append(influx_metric)%0A influx_metric = %7B%0A %22measurement%22: 'biomaj.bank.update.new',%0A %22fields%22: %7B%0A %22value%22: 1%0A %7D,%0A %22tags%22: %7B%0A %22bank%22: bank%5B'name'%5D%0A %7D,%0A %22time%22: int(production%5B'session'%5D)%0A %7D%0A metrics.append(influx_metric)%0A%0Ainflux_metric = %7B%0A %22measurement%22: 'biomaj.banks.quantity',%0A %22fields%22: %7B%0A %22value%22: nb_banks%0A %7D%0A%7D%0Ametrics.append(influx_metric)%0A%0Ainfluxdb.write_points(metrics, time_precision=%22s%22)%0A
|
|
aeea3331adc97209d3e3099baa9847b6f7646316
|
Fix python-2.6 support
|
powerline/lint/markedjson/markedvalue.py
|
powerline/lint/markedjson/markedvalue.py
|
__all__ = ['gen_marked_value', 'MarkedValue']
try:
from __builtin__ import unicode
except ImportError:
unicode = str
def gen_new(cls):
def __new__(arg_cls, value, mark):
r = super(arg_cls, arg_cls).__new__(arg_cls, value)
r.mark = mark
r.value = value
return r
return __new__
def gen_init(cls):
def __init__(self, value, mark):
return cls.__init__(self, value)
return __init__
def gen_getnewargs(cls):
def __getnewargs__(self):
return (self.value, self.mark)
return __getnewargs__
class MarkedUnicode(unicode):
__new__ = gen_new(unicode)
__getnewargs__ = gen_getnewargs(unicode)
def _proc_partition(self, part_result):
pointdiff = 1
r = []
for s in part_result:
mark = self.mark.copy()
# XXX Does not work properly with escaped strings, but this requires
# saving much more information in mark.
mark.column += pointdiff
mark.pointer += pointdiff
r.append(MarkedUnicode(s, mark))
pointdiff += len(s)
return tuple(r)
def rpartition(self, sep):
return self._proc_partition(super(MarkedUnicode, self).rpartition(sep))
def partition(self, sep):
return self._proc_partition(super(MarkedUnicode, self).partition(sep))
class MarkedInt(int):
__new__ = gen_new(int)
__getnewargs__ = gen_getnewargs(int)
class MarkedFloat(float):
__new__ = gen_new(float)
__getnewargs__ = gen_getnewargs(float)
class MarkedDict(dict):
__new__ = gen_new(dict)
__getnewargs__ = gen_getnewargs(dict)
def __init__(self, value, mark):
dict.__init__(self, value)
self.keydict = dict(((key, key) for key in self))
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.keydict[key] = key
def update(self, *args, **kwargs):
dict.update(self, *args, **kwargs)
self.keydict = dict(((key, key) for key in self))
def copy(self):
return MarkedDict(super(MarkedDict, self).copy(), self.mark)
class MarkedList(list):
__new__ = gen_new(list)
__init__ = gen_init(list)
__getnewargs__ = gen_getnewargs(list)
class MarkedValue:
def __init__(self, value, mark):
self.mark = mark
self.value = value
__getinitargs__ = gen_getnewargs(None)
specialclasses = {
unicode: MarkedUnicode,
int: MarkedInt,
float: MarkedFloat,
dict: MarkedDict,
list: MarkedList,
}
classcache = {}
def gen_marked_value(value, mark, use_special_classes=True):
if use_special_classes and value.__class__ in specialclasses:
Marked = specialclasses[value.__class__]
elif value.__class__ in classcache:
Marked = classcache[value.__class__]
else:
class Marked(MarkedValue):
for func in value.__class__.__dict__:
if func == 'copy':
def copy(self):
return self.__class__(self.value.copy(), self.mark)
elif func not in set(('__init__', '__new__', '__getattribute__')):
if func in set(('__eq__',)):
# HACK to make marked dictionaries always work
exec ((
'def {0}(self, *args):\n'
' return self.value.{0}(*[arg.value if isinstance(arg, MarkedValue) else arg for arg in args])'
).format(func))
else:
exec ((
'def {0}(self, *args, **kwargs):\n'
' return self.value.{0}(*args, **kwargs)\n'
).format(func))
classcache[value.__class__] = Marked
return Marked(value, mark)
|
Python
| 0
|
@@ -1381,27 +1381,28 @@
t(dict):%0A%09__
-new
+init
__ = gen_new
@@ -1398,19 +1398,20 @@
_ = gen_
-new
+init
(dict)%0A%09
@@ -1452,35 +1452,37 @@
ct)%0A%0A%09def __
-init__(self
+new__(arg_cls
, value, mar
@@ -1491,41 +1491,97 @@
:%0A%09%09
-dict.__init__(self, value)%0A%09%09self
+r = super(arg_cls, arg_cls).__new__(arg_cls, value)%0A%09%09r.mark = mark%0A%09%09r.value = value%0A%09%09r
.key
@@ -1611,30 +1611,38 @@
for key in
-self))
+r))%0A%09%09return r
%0A%0A%09def __set
|
540f913d6b9402512bc2b507504f77f709c17eca
|
add exec example
|
examples/exec.py
|
examples/exec.py
|
Python
| 0
|
@@ -0,0 +1,2112 @@
+%22%22%22%0AExample uses of exec.%0A%0Aexec is a special form which takes 1, 2, or 3 arguments.%0Aexec(expr, globals, locals)%0Alocals and globals are optional.%0Aexpr is a string to be executed as code.%0Aglobals is a dictionary from symbol names to values.%0Alocals is a dictionary from symbol names to values.%0A%22%22%22%0A%0Aimport inspect%0Aimport numpy as np%0A%0Adef exec_verbose(expr, globalsd=None, localsd=None):%0A %22%22%22Wraps exec() and prints some stuff.%0A Behaves just like exec with the following exceptions:%0A - Prints the expr to be exec'd.%0A - Catches and reports exceptions but does not throw.%0A %22%22%22%0A # This line prints expr and whether or not global and locals exist.%0A print %22exec%22 + (%22 (g)%22 if globalsd != None else %22%22) + (%22 (l)%22 if localsd != None else %22%22) + %22: %22 + expr%0A try:%0A if (globalsd == None) and (localsd == None):%0A exec(expr)%0A elif (globalsd != None) and (localsd == None):%0A exec(expr, globalsd)%0A elif (globalsd != None) and (localsd != None):%0A exec(expr, globalsd, localsd)%0A else:%0A raise RuntimeError(%22bad exec_verbose args%22)%0A except Exception as ex:%0A print %22exec failed:%22, type(ex).__name__, ex%0A%0Aa = 1%0Ab = 2%0Aprint %22Exec with implicit globals and locals.%22%0Aexec_verbose(%22print a, b%22)%0A# 1 2%0Aprint%0A%0Aprint %22With empty globals.%22%0Aexec_verbose(%22print a, b%22, %7B%7D)%0A# exec failed: NameError name 'a' is not defined%0Aprint%0A%0Aprint %22With custom globals.%22%0Aexec_verbose(%22print a, b%22, %7B%22a%22: 3, %22b%22: 4%7D)%0A# 3 4%0Aprint%0A%0Aprint %22With shadowing of globals by locals.%22%0Aexec_verbose(%22print a, b%22, %7B%22a%22: 3, %22b%22: 4%7D, %7B%22b%22: 5%7D)%0A# 3 5%0Aprint%0A%0Aprint %22Refer to imports within this file.%22%0Aexec_verbose(%22print np.sin(0)%22)%0A# 0.0%0Aprint%0A%0Aprint %22Supplying globals kills imports.%22%0Aexec_verbose(%22print np.sin(0)%22, %7B%7D)%0A# exec failed: NameError name 'np' is not defined%0Aprint%0A%0Aprint %22You can simulate them.%22%0Aexec_verbose(%22print np.sin(0)%22, %7B%22np%22: np%7D)%0A# 0.0%0Aprint%0A%0Aprint %22And do dirty tricks.%22%0Aclass FakeNumpy(object):%0A def sin(self, x):%0A return -1%0Aprint inspect.getsource(FakeNumpy).strip()%0Aexec_verbose(%22print np.sin(0)%22, %7B%22np%22: FakeNumpy()%7D)%0A# -1%0Aprint%0A
|
|
c48b6ea55969adff7e0662c551a529161a4d0b94
|
add kattis/stockprices
|
Kattis/stockprices.py
|
Kattis/stockprices.py
|
Python
| 0.000001
|
@@ -0,0 +1,2154 @@
+%22%22%22%0D%0AProblem: stockprices%0D%0ALink: https://open.kattis.com/problems/stockprices%0D%0ASource: NWERC 2010%0D%0A%22%22%22%0D%0Aimport queue%0D%0Aimport sys%0D%0A%0D%0Adef runTest():%0D%0A N = int(input())%0D%0A buyHeap = queue.PriorityQueue() # MinHeap%0D%0A sellHeap = queue.PriorityQueue() # MinHeap%0D%0A stockPrice = None%0D%0A for i in range(N):%0D%0A command, n, _, _, price = input().split()%0D%0A n = int(n)%0D%0A price = int(price)%0D%0A if command == %22buy%22:%0D%0A buyHeap.put((int(-price), n))%0D%0A else:%0D%0A sellHeap.put((int(price), n))%0D%0A%0D%0A bestBid = buyHeap.queue%5B0%5D if len(buyHeap.queue) %3E 0 else None%0D%0A bestAsk = sellHeap.queue%5B0%5D if len(sellHeap.queue) %3E 0 else None%0D%0A%0D%0A while (bestBid is not None) and (bestAsk is not None) and (-bestBid%5B0%5D %3E= bestAsk%5B0%5D):%0D%0A stockPrice = bestAsk%5B0%5D%0D%0A nBid = bestBid%5B1%5D%0D%0A nAsk = bestAsk%5B1%5D%0D%0A print(%22Best bid = %7B:%7D (%7B:%7D)%22.format(-bestBid%5B0%5D, nBid), file=sys.stderr)%0D%0A print(%22Best ask = %7B:%7D (%7B:%7D)%22.format(bestAsk%5B0%5D, nAsk), file=sys.stderr)%0D%0A if nBid %3E nAsk:%0D%0A buyHeap.get()%0D%0A sellHeap.get()%0D%0A buyHeap.put((bestBid%5B0%5D, nBid-nAsk))%0D%0A elif nBid %3C nAsk:%0D%0A buyHeap.get()%0D%0A sellHeap.get()%0D%0A sellHeap.put((bestAsk%5B0%5D, nAsk-nBid))%0D%0A else:%0D%0A buyHeap.get()%0D%0A sellHeap.get()%0D%0A%0D%0A bestBid = buyHeap.queue%5B0%5D if len(buyHeap.queue) %3E 0 else None%0D%0A bestAsk = sellHeap.queue%5B0%5D if len(sellHeap.queue) %3E 0 else None%0D%0A%0D%0A bestBid = buyHeap.queue%5B0%5D if len(buyHeap.queue) %3E 0 else None%0D%0A bestAsk = sellHeap.queue%5B0%5D if len(sellHeap.queue) %3E 0 else None%0D%0A%0D%0A if bestAsk is not None: print(%22%7B:%7D%22.format(int(bestAsk%5B0%5D)), end=%22 %22)%0D%0A else: print(%22-%22, end=%22 %22)%0D%0A%0D%0A if bestBid is not None: print(%22%7B:%7D%22.format(int(-bestBid%5B0%5D)), end=%22 %22)%0D%0A else: print(%22-%22, end=%22 %22)%0D%0A%0D%0A if stockPrice is not None: print(%22%7B:%7D%22.format(int(stockPrice)), end=%22%5Cn%22)%0D%0A else: print(%22-%22, end=%22%5Cn%22)%0D%0A%0D%0AT = int(input())%0D%0Afor i in range(T):%0D%0A runTest()%0D%0A
|
|
a3780ebd969d95873a589968fb778a3553c24128
|
Fix LXDContainer.container_running
|
pylxd/container.py
|
pylxd/container.py
|
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from pylxd import base
class LXDContainer(base.LXDBase):
# containers:
def container_list(self):
(state, data) = self.connection.get_object('GET', '/1.0/containers')
return [container.split('/1.0/containers/')[-1]
for container in data['metadata']]
def container_running(self, container):
(state, data) = self.connection.get_object(
'GET',
'/1.0/containers/%s/state' % container)
data = data.get('metadata')
container_running = False
if data['status'] in ['RUNNING', 'STARTING', 'FREEZING,FROZEN',
'THAWED']:
container_running = True
return container_running
def container_init(self, container):
return self.connection.get_object('POST', '/1.0/containers',
json.dumps(container))
def container_update(self, container, config):
return self.connection.get_object('PUT', '/1.0/containers/%s'
% container, json.dumps(config))
def container_defined(self, container):
return self.connection.get_status('GET', '/1.0/containers/%s/state'
% container)
def container_state(self, container):
(state, data) = self.connection.get_object(
'GET', '/1.0/containers/%s/state' % container)
return data['metadata']['status']
def container_start(self, container, timeout):
action = {'action': 'start', 'timeout': timeout}
return self.connection.get_object('PUT', '/1.0/containers/%s/state'
% container,
json.dumps(action))
def container_stop(self, container, timeout):
action = {'action': 'stop', 'timeout': timeout}
return self.connection.get_object('PUT', '/1.0/containers/%s/state'
% container,
json.dumps(action))
def container_suspend(self, container, timeout):
action = {'action': 'freeze', 'timeout': timeout}
return self.connection.get_object('PUT', '/1.0/containers/%s/state'
% container,
json.dumps(action))
def container_resume(self, container, timeout):
action = {'action': 'unfreeze', 'timeout': timeout}
return self.connection.get_object('PUT', '/1.0/containers/%s/state'
% container,
json.dumps(action))
def container_reboot(self, container, timeout):
action = {'action': 'restart', 'timeout': timeout}
return self.connection.get_object('PUT', '/1.0/containers/%s/state'
% container,
json.dumps(action))
def container_destroy(self, container):
return self.connection.get_object('DELETE', '/1.0/containers/%s'
% container)
def get_container_log(self, container):
(state, data) = self.connection.get_object(
'GET', '/1.0/containers/%s?log=true' % container)
return data['metadata']['log']
# file operations
def get_container_file(self, container, filename):
return self.connection.get_raw(
'GET',
'/1.0/containers/%s/files?path=%s' % (container, filename))
def container_publish(self, container):
return self.connection.get_object('POST', '/1.0/images',
json.dumps(container))
# misc operations
def run_command(self, container, args, interactive, web_sockets, env):
env = env or {}
data = {'command': args,
'interactive': interactive,
'wait-for-websocket': web_sockets,
'environment': env}
return self.connection.get_object('POST', '/1.0/containers/%s/exec'
% container, json.dumps(data))
# snapshots
def snapshot_list(self, container):
(state, data) = self.connection.get_object(
'GET',
'/1.0/containers/%s/snapshots' % container)
return [snapshot.split('/1.0/containers/%s/snapshots/%s/'
% (container, container))[-1]
for snapshot in data['metadata']]
def snapshot_create(self, container, config):
return self.connection.get_object('POST',
'/1.0/containers/%s/snapshots'
% container,
json.dumps(config))
def snapshot_info(self, container, snapshot):
return self.conncetion.get_object('GET',
'/1.0/containers/%s/snapshsots/%s'
% (container, snapshot))
def snapshot_rename(self, container, snapshot, config):
return self.connection.get_object('POST',
'/1.0/containers/%s/snapshots/%s'
% (container, snapshot),
json.dumps(config))
def snapshot_delete(self, container, snapshot):
return self.connection.get_object('DELETE',
'/1.0/containers/%s/snapshots/%s'
% (container, snapshot))
|
Python
| 0.002618
|
@@ -1208,17 +1208,20 @@
FREEZING
-,
+', '
FROZEN',
|
b01445701c2974c0f69c9a43208111f0b80a167f
|
Create helloWorld.py
|
helloWorld.py
|
helloWorld.py
|
Python
| 0.999992
|
@@ -0,0 +1,21 @@
+print %22Hello World!%22%0A
|
|
9ad2a898298667aa6adfbf0c4e786e431c9a96b1
|
test test test
|
python-ver/test.py
|
python-ver/test.py
|
Python
| 0.000008
|
@@ -0,0 +1,23 @@
+print 'blah blah blah'%0A
|
|
27575c3fd6bdc55748b808a98c0b19e3edfb17af
|
Create ShakeBoussole.py
|
sense-hat/ShakeBoussole.py
|
sense-hat/ShakeBoussole.py
|
Python
| 0
|
@@ -0,0 +1,1064 @@
+from sense_hat import SenseHat%0Aimport time%0Aimport sys%0A%0Asense = SenseHat()%0A%0Aled_loop = %5B4, 5, 6, 7, 15, 23, 31, 39, 47, 55, 63, 62, 61, 60, 59, 58, 57, 56, 48, 40, 32, 24, 16, 8, 0, 1, 2, 3%5D%0A%0Asense = SenseHat()%0Asense.set_rotation(0)%0Asense.clear()%0A%0Aprev_x = 0%0Aprev_y = 0%0A%0Aled_degree_ratio = len(led_loop) / 360.0%0A%0Awhile True:%0A x, y, z = sense.get_accelerometer_raw().values()%0A%0A x = abs(x)%0A y = abs(y)%0A z = abs(z)%0A%0A if x %3E 1 or y %3E 1 or z %3E 1:%0A%09%09%0A while True:%0A dir = sense.get_compass()%0A dir_inverted = 180 - dir # So LED appears to follow North%0A led_index = int(led_degree_ratio * dir_inverted)%0A offset = led_loop%5Bled_index%5D%0A y = offset // 8 # row%0A x = offset %25 8 # column%0A if x != prev_x or y != prev_y:%0A sense.set_pixel(prev_x, prev_y, 0, 0, 0)%0A%0A sense.set_pixel(x, y, 0, 0, 255)%0A%0A prev_x = x%0A prev_y = y%0A%0A else:%0A sense.clear()%0A time.sleep(2)%0A%0Asense.stick.direction_middle = sense.clear()%0A
|
|
fb96906301515b268d56bb7a494360f794883223
|
include migration for uniq
|
metaci/release/migrations/0002_auto_20180815_2248.py
|
metaci/release/migrations/0002_auto_20180815_2248.py
|
Python
| 0
|
@@ -0,0 +1,476 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.10 on 2018-08-15 22:48%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('repository', '0005_repository_release_tag_regex'),%0A ('release', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterUniqueTogether(%0A name='release',%0A unique_together=set(%5B('repo', 'git_tag')%5D),%0A ),%0A %5D%0A
|
|
c503471f3d7318a2519b486b8be74ec1d1f2e235
|
Add an admin interface for xmlrpc tokens
|
linaro_django_xmlrpc/admin.py
|
linaro_django_xmlrpc/admin.py
|
Python
| 0.000002
|
@@ -0,0 +1,247 @@
+from django.contrib import admin%0A%0Afrom linaro_django_xmlrpc.models import AuthToken%0A%0A%0Aclass AuthTokenAdmin(admin.ModelAdmin):%0A list_display = ('user', 'description', 'created_on', 'last_used_on')%0A%0Aadmin.site.register(AuthToken, AuthTokenAdmin)%0A
|
|
a0702b8ac74c4976cf747880bdfeb86088a16715
|
CREATE new Syft Message structure
|
packages/syft/src/syft/core/node/common/node_service/generic_payload/syft_message.py
|
packages/syft/src/syft/core/node/common/node_service/generic_payload/syft_message.py
|
Python
| 0
|
@@ -0,0 +1,975 @@
+# stdlib%0Afrom typing import Any%0Afrom typing import Dict%0Afrom typing import Optional%0A%0A# third party%0Afrom nacl.signing import VerifyKey%0A%0A# relative%0Afrom .....common.message import ImmediateSyftMessage, SignedMessage%0Afrom .....common.uid import UID%0Afrom .....io.address import Address%0Afrom ....abstract.node_service_interface import NodeServiceInterface%0A%0Aclass SyftMessage(ImmediateSyftMessage):%0A __attr_allowlist__ = %5B%22id%22, %22payload%22, %22address%22, %22reply_to%22, %22msg_id%22, %22kwargs%22%5D%0A%0A signed_type = SignedMessage%0A%0A def __init__(%0A self,%0A address: Address,%0A kwargs: Dict%5Bstr,Any%5D = %7B%7D,%0A msg_id: Optional%5BUID%5D = None,%0A reply_to: Optional%5BAddress%5D = None) -%3E None:%0A super().__init__(address=address, msg_id=msg_id)%0A self.reply_to = reply_to%0A self.kwargs = kwargs %0A %0A def run(self, node: NodeServiceInterface, verify_key: Optional%5BVerifyKey%5D = None) -%3E ImmediateSyftMessage:%0A raise NotImplementedError%0A
|
|
b6b65e1a26a45d45d98e5f270f6704d286335605
|
Split run method into __init__ and treat method
|
scripts/unusedfiles.py
|
scripts/unusedfiles.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This bot appends some text to all unused images and notifies uploaders.
Parameters:
-always Don't be asked every time.
"""
#
# (C) Leonardo Gregianin, 2007
# (C) Filnik, 2008
# (c) xqt, 2011-2014
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import i18n, pagegenerators, Bot
template_to_the_image = {
'it': u'{{immagine orfana}}',
'fa': u'{{تصاویر بدون استفاده}}',
}
# This template message should use subst:
template_to_the_user = {
'fa': u'\n\n{{جا:اخطار به کاربر برای تصاویر بدون استفاده|%(title)s}}--~~~~',
}
class UnusedFilesBot(Bot):
"""Unused files bot."""
def __init__(self, site, **kwargs):
"""Constructor."""
super(UnusedFilesBot, self).__init__(**kwargs)
self.site = site
def run(self):
"""Start the bot."""
template_image = i18n.translate(self.site,
template_to_the_image)
template_user = i18n.translate(self.site,
template_to_the_user)
self.summary = i18n.twtranslate(self.site, 'unusedfiles-comment')
if not all([template_image, template_user]):
raise pywikibot.Error(u'This script is not localized for %s site.'
% self.site)
generator = pagegenerators.UnusedFilesGenerator(site=self.site)
generator = pagegenerators.PreloadingGenerator(generator)
for image in generator:
if not image.exists():
pywikibot.output("File '%s' does not exist (see bug T71133)."
% image.title())
continue
# Use fileUrl() and fileIsShared() to confirm it is local media
# rather than a local page with the same name as shared media.
if (image.fileUrl() and not image.fileIsShared() and
u'http://' not in image.text):
if template_image in image.text:
pywikibot.output(u"%s done already"
% image.title(asLink=True))
continue
self.append_text(image, u"\n\n" + template_image)
uploader = image.getFileVersionHistory().pop(0)['user']
user = pywikibot.User(image.site, uploader)
usertalkpage = user.getUserTalkPage()
msg2uploader = template_user % {'title': image.title()}
self.append_text(usertalkpage, msg2uploader)
def append_text(self, page, apptext):
"""Append apptext to the page."""
if page.isRedirectPage():
page = page.getRedirectTarget()
if page.exists():
text = page.text
else:
if page.isTalkPage():
text = u''
else:
raise pywikibot.NoPage(page)
oldtext = text
text += apptext
self.userPut(page, oldtext, text, summary=self.summary)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
for arg in pywikibot.handle_args(args):
if arg == '-always':
options['always'] = True
bot = UnusedFilesBot(pywikibot.Site(), **options)
try:
bot.run()
except pywikibot.Error as e:
pywikibot.bot.suggest_help(exception=e)
return False
else:
return True
if __name__ == "__main__":
main()
|
Python
| 0.000715
|
@@ -934,56 +934,8 @@
te%0A%0A
- def run(self):%0A %22%22%22Start the bot.%22%22%22%0A
@@ -973,32 +973,32 @@
late(self.site,%0A
+
@@ -1400,32 +1400,122 @@
%25 self.site)%0A
+%0A self.template_image = template_image%0A self.template_user = template_user%0A%0A
generato
@@ -1636,24 +1636,25 @@
erator)%0A
+%0A
for imag
@@ -1649,36 +1649,102 @@
-for image in generator:%0A
+self.generator = generator%0A%0A def treat(self, image):%0A %22%22%22Process one image page.%22%22%22%0A
@@ -1762,36 +1762,32 @@
image.exists():%0A
-
pywi
@@ -1865,36 +1865,32 @@
-
%25 image.title())
@@ -1902,33 +1902,23 @@
- continue%0A
+return%0A
@@ -1989,20 +1989,16 @@
-
# rather
@@ -2060,20 +2060,16 @@
-
if (imag
@@ -2113,20 +2113,16 @@
d() and%0A
-
@@ -2172,23 +2172,24 @@
- if
+if self.
template
@@ -2218,36 +2218,32 @@
-
-
pywikibot.output
@@ -2262,20 +2262,16 @@
lready%22%0A
-
@@ -2343,25 +2343,16 @@
- continue%0A
+return%0A%0A
@@ -2387,18 +2387,22 @@
ge,
-u%22
+'
%5Cn%5Cn
-%22
+'
+
+self.
temp
@@ -2409,28 +2409,25 @@
late_image)%0A
-
+%0A
@@ -2490,28 +2490,24 @@
-
-
user = pywik
@@ -2546,28 +2546,24 @@
-
usertalkpage
@@ -2600,20 +2600,16 @@
-
-
msg2uplo
@@ -2611,24 +2611,29 @@
2uploader =
+self.
template_use
@@ -2661,20 +2661,16 @@
itle()%7D%0A
-
|
af61a720abe964c2295d8f0aa555fed9bb67372a
|
add 4
|
004.py
|
004.py
|
Python
| 0.999998
|
@@ -0,0 +1,204 @@
+def pal(value):%0A return str(value) == str(value)%5B::-1%5D%0A%0A%0Ap = 1%0A%0Afor i in xrange(999, 99, -1):%0A for j in xrange(999, 99, -1):%0A n = i * j%0A if n %3E p and pal(n):%0A p = n%0A%0Aprint p
|
|
ee1e049a4cbe47ce106824612a69d738562eceb3
|
add simple test for testing that view change messages are checked on the receiver side
|
plenum/test/view_change/test_instance_change_msg_checking.py
|
plenum/test/view_change/test_instance_change_msg_checking.py
|
Python
| 0
|
@@ -0,0 +1,348 @@
+import pytest%0Aimport types%0Afrom plenum.common.types import InstanceChange%0A%0Adef test_instance_change_msg_type_checking(nodeSet, looper, up):%0A nodeA = nodeSet.Alpha%0A nodeB = nodeSet.Beta%0A %0A ridBetta = nodeA.nodestack.getRemote(nodeB.name).uid%0A badViewNo = %22BAD%22%0A nodeA.send(InstanceChange(badViewNo), ridBetta)%0A looper.runFor(.2)
|
|
e1b47df9fadb888dafc32abc8018b15477d74feb
|
Add python test unit.
|
fpsgame/tests.py
|
fpsgame/tests.py
|
Python
| 0
|
@@ -0,0 +1,247 @@
+from ctypes import *%0Aimport sys%0Aimport os%0Aimport xml.etree.ElementTree as ET%0A%0Abinaries = '../../../binaries'%0A%0A# Work out the platform-dependent library filename%0Adll_filename = %7B%0A%09'posix': './libCollada_dbg.so',%0A%09'nt': 'Collada_dbg.dll',%0A%7D%5Bos.name%5D
|
|
412d27b31dc5644c84ac90179fe74669ce8a406c
|
change description & goal from varchar(100) to text
|
talkoohakemisto/migrations/versions/221e6ee3f6c9_adjust_goal_and_description_size.py
|
talkoohakemisto/migrations/versions/221e6ee3f6c9_adjust_goal_and_description_size.py
|
Python
| 0
|
@@ -0,0 +1,862 @@
+%22%22%22adjust goal and description size%0A%0ARevision ID: 221e6ee3f6c9%0ARevises: 27f12bb68b12%0ACreate Date: 2014-04-12 17:04:16.750942%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '221e6ee3f6c9'%0Adown_revision = '27f12bb68b12'%0A%0Afrom alembic import op%0Aimport sqlalchemy as sa%0A%0A%0A%0Adef upgrade():%0A op.execute(%0A '''%0A ALTER TABLE voluntary_work%0A ALTER COLUMN description%0A TYPE text%0A '''%0A )%0A op.execute(%0A '''%0A ALTER TABLE voluntary_work%0A ALTER COLUMN goal%0A TYPE text%0A '''%0A )%0A pass%0A%0A%0Adef downgrade():%0A op.execute(%0A '''%0A ALTER TABLE voluntary_work%0A ALTER COLUMN description%0A TYPE varchar(100)%0A '''%0A )%0A op.execute(%0A '''%0A ALTER TABLE voluntary_work%0A ALTER COLUMN goal%0A TYPE varchar(100)%0A '''%0A )%0A pass%0A
|
|
8c1f1c0728b261526b19c46dbd459bbc0f4e97a8
|
add leetcode Maximum Depth of Binary Tree
|
leetcode/MaximumDepthOfBinaryTree/solution.py
|
leetcode/MaximumDepthOfBinaryTree/solution.py
|
Python
| 0.00001
|
@@ -0,0 +1,428 @@
+# -*- coding:utf-8 -*-%0A# Definition for a binary tree node%0A# class TreeNode:%0A# def __init__(self, x):%0A# self.val = x%0A# self.left = None%0A# self.right = None%0A%0Aclass Solution:%0A # @param root, a tree node%0A # @return an integer%0A def maxDepth(self, root):%0A if not root:%0A return 0%0A hight = max(self.maxDepth(root.left), self.maxDepth(root.right))%0A return 1 + hight%0A
|
|
558c68060903608abe0bbe15303f192eacf529eb
|
Add way to call converters/harvesters in 0.2.0
|
proto_main.py
|
proto_main.py
|
Python
| 0
|
@@ -0,0 +1,1152 @@
+from importlib import import_module%0A%0A%0Adef call_harvester(source_name, **kwargs):%0A harvester = import_module(%22mdf_indexers.harvesters.%22 + source_name + %22_harvester%22)%0A harvester.harvest(**kwargs)%0A%0A%0Adef call_converter(sources, input_path=None, metadata=None, verbose=False):%0A if type(sources) is not list:%0A sources = %5Bsources%5D%0A if verbose:%0A print(%22CONVERTING THE FOLLOWING DATASETS:%22, sources)%0A for source_name in sources:%0A if verbose:%0A print(%22%5CnCONVERTER FOR%22, source_name, %22%5Cn%22)%0A converter = import_module(%22mdf_indexers.converters.%22 + source_name + %22_converter%22)%0A if not input_path:%0A # Relative path is from calling function, not sub-function: paths.datasets will be wrong%0A # Use %22mdf_indexers/datasets/X%22 instead%0A input_path = %22mdf_indexers/datasets/%22 + source_name + %22/%22%0A converter.convert(input_path=input_path, metadata=metadata, verbose=verbose)%0A if verbose:%0A print(%22%5CnALL CONVERTING COMPLETE%22)%0A%0A%0Aif __name__ == %22__main__%22:%0A import sys%0A if len(sys.argv) %3E 1:%0A call_converter(*sys.argv%5B1:%5D)%0A else:%0A call_converter()%0A%0A
|
|
ebe0b558d80ca7b7e5e7be50cc7c053020dca9fe
|
create list app skeleton
|
app.py
|
app.py
|
Python
| 0.000002
|
@@ -0,0 +1,701 @@
+#!/usr/bin/env python%0Afrom flask import Flask%0A%0Aapp = Flask(__name__)%0A%0A# define a list item class%0Aclass ListItem(Model):%0A %0A@app.route('/add', methods=%5B'POST'%5D)%0Adef add_item():%0A ''' add items to the list '''%0A return %22stub%22%0A%0A@app.route('/view')%0Adef view_items():%0A ''' view items in the list '''%0A return %22stub%22%0A%0A@app.route('/delete/%3Cthis_id%3E')%0Adef delete_item(this_id):%0A ''' delete items from the list '''%0A return %22stub%22%0A%0A@app.route('/strike/%3Cthis_id%3E')%0Adef strike(this_id):%0A ''' move items to and from deletion staging area '''%0A%0A@app.route('/')%0Adef home():%0A ''' applicaiton root '''%0A return %22stub%22%0A%0Aif __name__ == '__main__':%0A app.run(debug=True, host='0.0.0.0', port=3000)
|
|
a23eb3f9a921676a3b91ff48b073f9cf4d15cfaa
|
Create bot.py
|
bot.py
|
bot.py
|
Python
| 0.000001
|
@@ -0,0 +1,1125 @@
+from twython import Twython, TwythonError%0Afrom PIL import Image%0Aimport os, random, statistics, time%0A%0AAPP_KEY = ''%0AAPP_SECRET = ''%0AOAUTH_TOKEN = ''%0AOAUTH_TOKEN_SECRET = ''%0A%0Abrightness_threshold = 35%0Aseconds_between_tweets = 600%0A%0Adef tweet():%0A%09twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)%0A%0A%09acceptable = False%0A%09while not acceptable:%0A%09%09fn = '.'%0A%09%09while fn.startswith('.'):%0A%09%09%09fn = random.choice(os.listdir('img/'))%0A%09%09%09acceptable = image_acceptable('img/' + fn)%0A%0A%09photo = open('img/' + fn, 'rb')%0A%09response = twitter.upload_media(media=photo)%0A%0A%09try:%0A%09%09twitter.update_status(status='#FutureDiaryBot #MiraiNikki #FutureDiary' + ' ', media_ids=%5Bresponse%5B'media_id'%5D%5D)%0A%09%09print 'tweeted!'%0A%09except TwythonError as error:%0A%09%09print error%0A%0Adef main():%0A%09while True:%0A%09%09tweet()%0A%09%09time.sleep(seconds_between_tweets)%0A%0Adef image_acceptable(path):%0A%0A%09img = Image.open(path)%0A%09all_rgb = %5B%5D%0A%0A%09pix = img.load()%0A%09for x in range(0, img.size%5B0%5D):%0A%09%09for y in range(0, img.size%5B1%5D):%0A%09%09%09all_rgb.append(pix%5Bx,y%5D)%0A%0A%09all_brightness = map(sum, all_rgb)%0A%09sd = statistics.stdev(all_brightness)%0A%0A%09return sd %3E brightness_threshold%0A%0Amain()%0A
|
|
3bb4f078f2a03b334c2b44378be2b01e54fb7b37
|
Add command load beginner categories
|
datasets/management/commands/load_beginner_categories.py
|
datasets/management/commands/load_beginner_categories.py
|
Python
| 0.000001
|
@@ -0,0 +1,983 @@
+from django.core.management.base import BaseCommand%0Aimport json%0Afrom datasets.models import Dataset, TaxonomyNode%0A%0A%0Aclass Command(BaseCommand):%0A help = 'Load field easy categories from json taxonomy file. ' %5C%0A 'Use it as python manage.py load_beginner_categories.py ' %5C%0A 'DATASET_ID PATH/TO/TAOXNOMY_FILE.json'%0A%0A def add_arguments(self, parser):%0A parser.add_argument('dataset_id', type=int)%0A parser.add_argument('taxonomy_file', type=str)%0A%0A def handle(self, *args, **options):%0A file_location = options%5B'taxonomy_file'%5D%0A dataset_id = options%5B'dataset_id'%5D%0A%0A ds = Dataset.objects.get(id=dataset_id)%0A taxonomy = ds.taxonomy%0A data = json.load(open(file_location))%0A%0A for d in data:%0A node = taxonomy.get_element_at_id(d%5B'id'%5D)%0A if d%5B'beginner_category'%5D:%0A node.beginner_task = True%0A else:%0A node.beginner_task = False%0A node.save()%0A
|
|
a06995a686c0509f50a481e7d7d41bb35ffe8f19
|
add simple improved Sieve Of Eratosthenes Algorithm (#1412)
|
maths/prime_sieve_eratosthenes.py
|
maths/prime_sieve_eratosthenes.py
|
Python
| 0
|
@@ -0,0 +1,801 @@
+'''%0ASieve of Eratosthenes%0A%0AInput : n =10%0AOutput : 2 3 5 7 %0A%0AInput : n = 20 %0AOutput: 2 3 5 7 11 13 17 19%0A%0Ayou can read in detail about this at %0Ahttps://en.wikipedia.org/wiki/Sieve_of_Eratosthenes%0A'''%0A%0Adef prime_sieve_eratosthenes(num):%0A %22%22%22%0A print the prime numbers upto n%0A %0A %3E%3E%3E prime_sieve_eratosthenes(10)%0A 2 3 5 7 %0A %3E%3E%3E prime_sieve_eratosthenes(20)%0A 2 3 5 7 11 13 17 19 %0A %22%22%22%0A %0A %0A primes = %5BTrue for i in range(num + 1)%5D%0A p = 2%0A %0A while p * p %3C= num:%0A if primes%5Bp%5D == True:%0A for i in range(p*p, num+1, p):%0A primes%5Bi%5D = False%0A p+=1%0A%0A for prime in range(2, num+1):%0A if primes%5Bprime%5D:%0A print(prime, end=%22 %22)%0A%0Aif __name__ == %22__main__%22:%0A num = int(input())%0A %0A prime_sieve_eratosthenes(num)%0A
|
|
e7ab5b39042f3a224a150257dd1aa845e7bb1adc
|
Add back a missing import
|
common/djangoapps/xblock_django/models.py
|
common/djangoapps/xblock_django/models.py
|
"""
Models.
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from config_models.models import ConfigurationModel
class XBlockDisableConfig(ConfigurationModel):
"""
Configuration for disabling and deprecating XBlocks.
"""
class Meta(ConfigurationModel.Meta):
app_label = 'xblock_django'
disabled_blocks = TextField(
default='', blank=True,
help_text=_('Space-separated list of XBlocks which should not render.')
)
disabled_create_blocks = TextField(
default='', blank=True,
help_text=_(
"Space-separated list of XBlock types whose creation to disable in Studio."
)
)
@classmethod
def is_block_type_disabled(cls, block_type):
""" Return True if block_type is disabled. """
config = cls.current()
if not config.enabled:
return False
return block_type in config.disabled_blocks.split()
def __unicode__(self):
config = XBlockDisableConfig.current()
return u"Disabled xblocks = {disabled_xblocks}".format(
disabled_xblocks=config.disabled_blocks
)
class XBlockConfiguration(ConfigurationModel):
"""
XBlock configuration used by both LMS and Studio, and not specific to a particular template.
"""
KEY_FIELDS = ('name',) # xblock name is unique
class Meta(ConfigurationModel.Meta):
app_label = 'xblock_django'
# boolean field 'enabled' inherited from parent ConfigurationModel
name = models.CharField(max_length=255, null=False, db_index=True)
deprecated = models.BooleanField(
default=False,
verbose_name=_('show deprecation messaging in Studio')
)
def __unicode__(self):
return (
"XBlockConfiguration(name={}, enabled={}, deprecated={})"
).format(self.name, self.enabled, self.deprecated)
class XBlockStudioConfigurationFlag(ConfigurationModel):
"""
Enables site-wide Studio configuration for XBlocks.
"""
class Meta(object):
app_label = "xblock_django"
# boolean field 'enabled' inherited from parent ConfigurationModel
def __unicode__(self):
return "XBlockStudioConfigurationFlag(enabled={})".format(self.enabled)
class XBlockStudioConfiguration(ConfigurationModel):
"""
Studio editing configuration for a specific XBlock/template combination.
"""
KEY_FIELDS = ('name', 'template') # xblock name/template combination is unique
FULL_SUPPORT = 'fs'
PROVISIONAL_SUPPORT = 'ps'
UNSUPPORTED = 'us'
SUPPORT_CHOICES = (
(FULL_SUPPORT, _('Fully Supported')),
(PROVISIONAL_SUPPORT, _('Provisionally Supported')),
(UNSUPPORTED, _('Unsupported'))
)
# boolean field 'enabled' inherited from parent ConfigurationModel
name = models.CharField(max_length=255, null=False, db_index=True)
template = models.CharField(max_length=255, blank=True, default='')
support_level = models.CharField(max_length=2, choices=SUPPORT_CHOICES, default=UNSUPPORTED)
class Meta(object):
app_label = "xblock_django"
def __unicode__(self):
return (
"XBlockStudioConfiguration(name={}, template={}, enabled={}, support_level={})"
).format(self.name, self.template, self.enabled, self.support_level)
|
Python
| 0.000081
|
@@ -39,16 +39,55 @@
models%0A
+from django.db.models import TextField%0A
from dja
|
b92625f1fa381f079d81e67b34d9f03e9c8a2282
|
Update help for nbgrader autograde
|
nbgrader/apps/autogradeapp.py
|
nbgrader/apps/autogradeapp.py
|
from textwrap import dedent
from IPython.config.loader import Config
from IPython.utils.traitlets import Unicode, Bool, Dict
from IPython.nbconvert.preprocessors import ClearOutputPreprocessor
from nbgrader.apps.baseapp import (
BaseNbConvertApp, nbconvert_aliases, nbconvert_flags)
from nbgrader.preprocessors import (
FindStudentID, SaveAutoGrades, OverwriteGradeCells, Execute)
aliases = {}
aliases.update(nbconvert_aliases)
aliases.update({
'regexp': 'FindStudentID.regexp',
'assignment': 'AssignmentExporter.assignment_id',
'student': 'AssignmentExporter.student_id',
'db': 'AssignmentExporter.db_url'
})
flags = {}
flags.update(nbconvert_flags)
flags.update({
'overwrite-cells': (
{'AutogradeApp': {'overwrite_cells': True}},
"Overwrite grade cells from the database."
)
})
class AutogradeApp(BaseNbConvertApp):
name = Unicode(u'nbgrader-autograde')
description = Unicode(u'Autograde a notebook by running it')
aliases = Dict(aliases)
flags = Dict(flags)
examples = Unicode(dedent(
"""
Running `nbgrader autograde` on a file by itself will produce a student
version of that file in the same directory. In this case, it will produce
"Problem 1.nbconvert.ipynb":
> nbgrader autograde "Problem 1.ipynb"
If you want to override the .nbconvert part of the filename, you can use
the --output flag:
> nbgrader autograde "Problem 1.ipynb" --output "Problem 1.graded.ipynb"
Or, you can put the graded version in a different directory. In the
following example, there will be a file "graded/Problem 1.ipynb" after
running `nbgrader autograde`:
> nbgrader autograde "Problem 1.ipynb" --build-dir=graded
You can also use shell globs, and copy files from one directory to another:
> nbgrader autograde submitted/*.ipynb --build-dir=graded
If you need to copy dependent files over as well, you can do this with
the --files and --relpath flags. In the following example, all the .jpg
files in the teacher/images/ folder will be linked to the student/images/
folder (without the --relpath flag, they would be in student/teacher/images/):
> nbgrader autograde submitted/*.ipynb --build-dir=graded --files='["submitted/images/*.jpg"]' --relpath=submitted
"""
))
student_id = Unicode(u'', config=True)
overwrite_cells = Bool(False, config=True, help="Overwrite grade cells from the database")
def _classes_default(self):
"""This has to be in a method, for TerminalIPythonApp to be available."""
classes = super(AutogradeApp, self)._classes_default()
classes.extend([
FindStudentID,
ClearOutputPreprocessor,
OverwriteGradeCells,
Execute,
SaveAutoGrades
])
return classes
def build_extra_config(self):
self.extra_config = Config()
self.extra_config.Exporter.preprocessors = [
'nbgrader.preprocessors.FindStudentID',
'IPython.nbconvert.preprocessors.ClearOutputPreprocessor'
]
if self.overwrite_cells:
self.extra_config.Exporter.preprocessors.append(
'nbgrader.preprocessors.OverwriteGradeCells'
)
self.extra_config.Exporter.preprocessors.extend([
'nbgrader.preprocessors.Execute',
'nbgrader.preprocessors.SaveAutoGrades'
])
self.config.merge(self.extra_config)
|
Python
| 0
|
@@ -453,46 +453,8 @@
e(%7B%0A
- 'regexp': 'FindStudentID.regexp',%0A
@@ -1227,16 +1227,95 @@
t.ipynb%22
+ (note that you need to specify the assignment%0A name and the student id)
:%0A
@@ -1359,24 +1359,72 @@
lem 1.ipynb%22
+ --assignment=%22Problem Set 1%22 --student=student1
%0A%0A If
@@ -1602,16 +1602,64 @@
d.ipynb%22
+ --assignment=%22Problem Set 1%22 --student=student1
%0A%0A
@@ -1903,32 +1903,80 @@
build-dir=graded
+ --assignment=%22Problem Set 1%22 --student=student1
%0A%0A You ca
@@ -2115,336 +2115,268 @@
aded
-%0A%0A If you need to copy dependent files over as well, you can do this with%0A the --files and --relpath flags. In the following example, all the .jpg%0A files in the teacher/images/ folder will be linked to the student/images/%0A folder (without the --relpath flag, they would be in student/teacher/images/)
+ --assignment=%22Problem Set 1%22 --student=student1%0A%0A If you want to overwrite grade cells with the source and metadata that%0A was stored in the database when running %60nbgrader assign%60 with --save-cells,%0A you can use the --overwrite-cells flag
:%0A%0A
@@ -2407,101 +2407,91 @@
ade
-submitted/*
+%22Problem 1
.ipynb
+%22
--
-build-dir=graded --files='%5B%22submitted/images/*.jpg%22%5D' --relpath=submitted
+assignment=%22Problem Set 1%22 --student=student1 --overwrite-cells
%0A%0A
|
e6e92fb3afff0403091c221328f9023e0e391b0b
|
Add eventlisten script to watch events on the master and minion
|
tests/eventlisten.py
|
tests/eventlisten.py
|
Python
| 0
|
@@ -0,0 +1,1651 @@
+'''%0AUse this script to dump the event data out to the terminal. It needs to know%0Awhat the sock_dir is.%0A%0AThis script is a generic tool to test event output%0A'''%0A%0A# Import Python libs%0Aimport optparse%0Aimport pprint%0Aimport os%0Aimport time%0Aimport tempfile%0A%0A# Import Salt libs%0Aimport salt.utils.event%0A%0Adef parse():%0A '''%0A Parse the script command line inputs%0A '''%0A parser = optparse.OptionParser()%0A%0A parser.add_option('-s',%0A '--sock-dir',%0A dest='sock_dir',%0A default=os.path.join(tempfile.gettempdir(), '.salt-unix'),%0A help=('Staticly define the directory holding the salt unix '%0A 'sockets for communication'))%0A parser.add_option('-n',%0A '--node',%0A dest='node',%0A default='master',%0A help=('State if this listener will attach to a master or a '%0A 'minion daemon, pass %22master%22 or %22minion%22'))%0A%0A options, args = parser.parse_args()%0A%0A opts = %7B%7D%0A %0A for k, v in options.__dict__.items():%0A if v is not None:%0A opts%5Bk%5D = v%0A%0A return opts%0A%0A%0Adef listen(sock_dir, node):%0A '''%0A Attach to the pub socket and grab messages%0A '''%0A event = salt.utils.event.SaltEvent(%0A sock_dir,%0A node%0A )%0A while True:%0A ret = event.get_event(full=True)%0A if ret is None:%0A continue%0A print('Event fired at %7B0%7D'.format(time.asctime()))%0A print('*'*25)%0A print('Tag: %7B0%7D'.format(ret%5B'tag'%5D))%0A print('Data:')%0A pprint.pprint(ret%5B'data'%5D)%0A%0A%0Aif __name__ == '__main__':%0A opts = parse()%0A listen(opts%5B'sock_dir'%5D, opts%5B'node'%5D)%0A
|
|
703d97150de1c74b7c1a62b59c1ff7081dec8256
|
Add an example of resolving a known service by service name
|
examples/resolver.py
|
examples/resolver.py
|
Python
| 0.998661
|
@@ -0,0 +1,537 @@
+#!/usr/bin/env python3%0A%0A%22%22%22 Example of resolving a service with a known name %22%22%22%0A%0Aimport logging%0Aimport sys%0A%0Afrom zeroconf import Zeroconf%0A%0ATYPE = '_test._tcp.local.'%0ANAME = 'My Service Name'%0A%0Aif __name__ == '__main__':%0A logging.basicConfig(level=logging.DEBUG)%0A if len(sys.argv) %3E 1:%0A assert sys.argv%5B1:%5D == %5B'--debug'%5D%0A logging.getLogger('zeroconf').setLevel(logging.DEBUG)%0A%0A zeroconf = Zeroconf()%0A%0A try:%0A print(zeroconf.get_service_info(TYPE, NAME + '.' + TYPE))%0A finally:%0A zeroconf.close()%0A
|
|
b95ad416428333b949a2e89eec9f18f45fb82e19
|
Add OmegaTFT to strategies list
|
axelrod/strategies/_strategies.py
|
axelrod/strategies/_strategies.py
|
from __future__ import absolute_import
from .alternator import Alternator
from .apavlov import APavlov2006, APavlov2011
from .appeaser import Appeaser
from .averagecopier import AverageCopier, NiceAverageCopier
from .axelrod_first import (Davis, RevisedDowning, Feld, Grofman, Nydegger,
Joss, Shubik, Tullock, UnnamedStrategy)
from .axelrod_second import Champion, Eatherley, Tester
from .backstabber import BackStabber, DoubleCrosser
from .calculator import Calculator
from .cooperator import Cooperator, TrickyCooperator
from .cycler import AntiCycler, Cycler, CyclerCCD, CyclerCCCD, CyclerCCCCCD
from .darwin import Darwin
from .defector import Defector, TrickyDefector
from .forgiver import Forgiver, ForgivingTitForTat
from .geller import Geller, GellerCooperator, GellerDefector
from .gobymajority import (
GoByMajority, GoByMajority10, GoByMajority20, GoByMajority40,
GoByMajority5)
from .grudger import Grudger, ForgetfulGrudger, OppositeGrudger, Aggravater
from .grumpy import Grumpy
from .hunter import (
DefectorHunter, CooperatorHunter, CycleHunter, AlternatorHunter,
MathConstantHunter, RandomHunter, EventualCycleHunter)
from .inverse import Inverse
from .mathematicalconstants import Golden, Pi, e
from .memoryone import (
WinStayLoseShift, GTFT, StochasticCooperator, StochasticWSLS, ZDGTFT2,
ZDExtort2, SoftJoss, MemoryOnePlayer)
from .mindcontrol import MindController, MindWarper, MindBender
from .mindreader import MindReader, ProtectedMindReader, MirrorMindReader
from .oncebitten import OnceBitten, FoolMeOnce, ForgetfulFoolMeOnce, FoolMeForever
from .prober import Prober, Prober2, Prober3, HardProber
from .punisher import Punisher, InversePunisher
from .qlearner import RiskyQLearner, ArrogantQLearner, HesitantQLearner, CautiousQLearner
from .rand import Random
from .retaliate import (
Retaliate, Retaliate2, Retaliate3, LimitedRetaliate, LimitedRetaliate2,
LimitedRetaliate3)
from .titfortat import (
TitForTat, TitFor2Tats, TwoTitsForTat, Bully, SneakyTitForTat,
SuspiciousTitForTat, AntiTitForTat, HardTitForTat, HardTitFor2Tats,
OmegaTFT)
# Note: Meta* strategies are handled in .__init__.py
strategies = [
Aggravater,
Alternator,
AlternatorHunter,
AntiCycler,
AntiTitForTat,
APavlov2006,
APavlov2011,
Appeaser,
ArrogantQLearner,
AverageCopier,
BackStabber,
Bully,
Calculator,
CautiousQLearner,
Champion,
Cooperator,
CooperatorHunter,
CycleHunter,
CyclerCCCCCD,
CyclerCCCD,
CyclerCCD,
Darwin,
Davis,
Defector,
DefectorHunter,
DoubleCrosser,
Eatherley,
EventualCycleHunter,
Feld,
FoolMeForever,
FoolMeOnce,
ForgetfulFoolMeOnce,
ForgetfulGrudger,
Forgiver,
ForgivingTitForTat,
GTFT,
Geller,
GellerCooperator,
GellerDefector,
GoByMajority,
GoByMajority10,
GoByMajority20,
GoByMajority40,
GoByMajority5,
Golden,
Grofman,
Grudger,
Grumpy,
HardProber,
HardTitFor2Tats,
HardTitForTat,
HesitantQLearner,
Inverse,
InversePunisher,
Joss,
LimitedRetaliate,
LimitedRetaliate2,
LimitedRetaliate3,
MathConstantHunter,
MindBender,
MindController,
MindReader,
MindWarper,
MirrorMindReader,
NiceAverageCopier,
Nydegger,
OnceBitten,
OppositeGrudger,
Pi,
Prober,
Prober2,
Prober3,
ProtectedMindReader,
Punisher,
Random,
RandomHunter,
Retaliate,
Retaliate2,
Retaliate3,
RiskyQLearner,
Shubik,
SneakyTitForTat,
SoftJoss,
StochasticWSLS,
SuspiciousTitForTat,
Tester,
TitForTat,
TitFor2Tats,
TrickyCooperator,
TrickyDefector,
Tullock,
TwoTitsForTat,
WinStayLoseShift,
ZDExtort2,
ZDGTFT2,
e,
]
|
Python
| 0
|
@@ -3363,24 +3363,38 @@
Nydegger,%0A
+ OmegaTFT,%0A
OnceBitt
|
039a07bde5975cb6ce40edc43bbd3d931ac5cc92
|
Test borda ranking and spearman.
|
exp/influence2/test/RankAggregatorTest.py
|
exp/influence2/test/RankAggregatorTest.py
|
Python
| 0
|
@@ -0,0 +1,1261 @@
+import numpy %0Aimport unittest%0Aimport logging%0Afrom exp.influence2.RankAggregator import RankAggregator %0Aimport scipy.stats.mstats %0Aimport numpy.testing as nptst %0A%0A%0Aclass RankAggregatorTest(unittest.TestCase):%0A def setUp(self): %0A numpy.random.seed(22) %0A %0A %0A def testSpearmanFootrule(self): %0A list1 = %5B5, 4, 3, 2, 1, 0%5D%0A list2 = %5B5, 4, 3, 2, 1, 0%5D%0A %0A dist = RankAggregator.spearmanFootrule(list1, list2)%0A %0A self.assertEquals(dist, 0)%0A %0A list2 = %5B5, 4, 3, 2, 0, 1%5D%0A dist = RankAggregator.spearmanFootrule(list1, list2)%0A %0A self.assertEquals(dist, 1.0/9)%0A %0A list2 = %5B0, 1, 2, 3, 4, 5%5D%0A dist = RankAggregator.spearmanFootrule(list1, list2)%0A self.assertEquals(dist, 1.0)%0A %0A def testBorda(self): %0A list1 = %5B5, 4, 3, 2, 1, 0%5D%0A list2 = %5B5, 4, 3, 2, 1, 0%5D %0A %0A outList = RankAggregator.borda(list1, list2)%0A %0A nptst.assert_array_equal(outList, numpy.array(%5B5,4,3,2,1,0%5D))%0A %0A list2 = %5B4, 3, 2, 5, 1, 0%5D%0A outList = RankAggregator.borda(list1, list2)%0A nptst.assert_array_equal(outList, numpy.array(%5B4,5,3,2,1,0%5D))%0A %0A%0Aif __name__ == '__main__':%0A unittest.main()%0A%0A
|
|
afec3bf59fd454d61d5bc0024516610acfcb5704
|
Add 1D data viewer.
|
examples/viewer1D.py
|
examples/viewer1D.py
|
Python
| 0
|
@@ -0,0 +1,1522 @@
+from __future__ import print_function%0A%0Aimport numpy as np%0Afrom viewer import Viewer%0A%0Afrom enthought.traits.api import Array%0Afrom enthought.chaco.api import Plot, ArrayPlotData, HPlotContainer, gray%0A%0Aimport lulu%0A%0Aclass Viewer1D(Viewer):%0A image = Array%0A result = Array%0A%0A def _reconstruction_default(self):%0A rows, cols = self.image.shape%5B:2%5D%0A self.plot_data = ArrayPlotData(original=self.image%5B0%5D,%0A reconstruction=self.result%5B0%5D)%0A%0A aspect = cols/float(rows)%0A%0A old = Plot(self.plot_data)%0A old.plot('original', )%0A old.title = 'Old'%0A%0A self.new = Plot(self.plot_data)%0A self.new.plot('reconstruction')%0A self.new.title = 'New'%0A%0A container = HPlotContainer(bgcolor='none')%0A container.add(old)%0A container.add(self.new)%0A%0A return container%0A%0A def update_plot(self):%0A self.plot_data.set_data('reconstruction', self.result%5B0%5D)%0A self.new.request_redraw()%0A%0A%0A%0Aif __name__ == %22__main__%22:%0A import sys%0A if len(sys.argv) %3E= 2 and '-UL' in sys.argv:%0A operator = 'UL'%0A sys.argv.remove('-UL')%0A else:%0A operator = 'LU'%0A%0A image = (np.random.random((1, 100)) * 255).astype(int)%0A%0A print(%22Decomposing using the %25s operator.%22 %25 operator)%0A if operator == 'LU':%0A print(%22Use the '-UL' flag to switch to UL.%22)%0A%0A print()%0A pulses = lulu.decompose(image, operator=operator)%0A%0A viewer = Viewer1D(pulses=pulses, image=image)%0A viewer.configure_traits()%0A
|
|
d9ff51c74c4b41128bc8e2fe61811dba53e7da17
|
Create test_client.py
|
tests/test_client.py
|
tests/test_client.py
|
Python
| 0.000003
|
@@ -0,0 +1,668 @@
+import unittest%0Afrom app import create_app, db%0Afrom app.models import User, Role%0A%0Aclass FlaskClientTestCase(unittest.TestCase):%0A def setUp(self):%0A self.app = create_app('testing')%0A self.app_context = self.app.app_context()%0A self.app_context.push()%0A db.create_all()%0A Role.insert_roles()%0A self.client = self.app.test_client(use_cookies=True)%0A %0A def teardown(self):%0A db.session.remove()%0A db.drop_all()%0A self.app_context.pop()%0A %0A def test_home_page(self):%0A response = self.client.get(url_for('main.index'))%0A self.assertTrue('Stranger' in response.get_data(as_text=True))%0A %0A
|
|
d37d831e54fbaebab427c9a5b88cb7eb358b31af
|
transform data to website via Post & Get
|
WebScraping/4.py
|
WebScraping/4.py
|
Python
| 0
|
@@ -0,0 +1,304 @@
+#!/usr/bin/python%0A# encoding:utf-8%0A%0Aimport sys%0Aimport urllib, urllib2%0Aimport re%0A%0Adic = %7B'hostname': 'n2', 'ip': '2.2.2.2'%7D%0A%0A# url = 'http://127.0.0.1:8000/db/' + '?' + urllib.urlencode(dic)%0Aurl = 'http://127.0.0.1:8000/db/'%0A%0A%0Aresponse = urllib2.urlopen(url, urllib.urlencode(dict))%0Aprint response.read()%0A
|
|
35a9576dce86c9c3d32c6cc32effb7a8f6c2b706
|
Test DjangoAMQPConnection if Django is installed. Closes #10.
|
tests/test_django.py
|
tests/test_django.py
|
Python
| 0
|
@@ -0,0 +1,1794 @@
+import os%0Aimport sys%0Aimport unittest%0Aimport pickle%0Aimport time%0Asys.path.insert(0, os.pardir)%0Asys.path.append(os.getcwd())%0A%0Afrom tests.utils import AMQP_HOST, AMQP_PORT, AMQP_VHOST, %5C%0A AMQP_USER, AMQP_PASSWORD%0Afrom carrot.connection import DjangoAMQPConnection, AMQPConnection%0Afrom UserDict import UserDict%0A%0A%0Aclass DictWrapper(UserDict):%0A%0A def __init__(self, data):%0A self.data = data%0A%0A def __getattr__(self, key):%0A return self.data%5Bkey%5D%0A%0A%0Adef configured_or_configure(settings, **conf):%0A if settings.configured:%0A for conf_name, conf_value in conf.items():%0A setattr(settings, conf_name, conf_value)%0A else:%0A settings.configure(default_settings=DictWrapper(conf))%0A%0A%0Aclass TestDjangoSpecific(unittest.TestCase):%0A%0A def test_DjangoAMQPConnection(self):%0A try:%0A from django.conf import settings%0A except ImportError:%0A sys.stderr.write(%0A %22Django is not installed. %5C%0A Not testing django specific features.%5Cn%22)%0A return%0A configured_or_configure(settings,%0A AMQP_SERVER=AMQP_HOST,%0A AMQP_PORT=AMQP_PORT,%0A AMQP_VHOST=AMQP_VHOST,%0A AMQP_USER=AMQP_USER,%0A AMQP_PASSWORD=AMQP_PASSWORD)%0A%0A expected_values = %7B%0A %22hostname%22: AMQP_HOST,%0A %22port%22: AMQP_PORT,%0A %22virtual_host%22: AMQP_VHOST,%0A %22userid%22: AMQP_USER,%0A %22password%22: AMQP_PASSWORD%7D%0A%0A conn = DjangoAMQPConnection()%0A self.assertTrue(isinstance(conn, AMQPConnection))%0A self.assertTrue(getattr(conn, %22connection%22, None))%0A%0A for val_name, val_value in expected_values.items():%0A self.assertEquals(getattr(conn, val_name, None), val_value)%0A
|
|
0390209498c2a604efabe13595e3f69f7dcbd577
|
Add script for path init.
|
tools/init.py
|
tools/init.py
|
Python
| 0
|
@@ -0,0 +1,715 @@
+#!/usr/bin/env python%0A%0A%22%22%22Setup paths for TPN%22%22%22%0A%0A%0Aimport os.path as osp%0Aimport sys%0A%0Adef add_path(path):%0A if path not in sys.path:%0A sys.path.insert(0, path)%0A%0Athis_dir = osp.dirname(__file__)%0Aext_dir = osp.join(this_dir, '..', 'external')%0A%0A# Add py-faster-rcnn paths to PYTHONPATH%0Afrcn_dir = osp.join(this_dir, '..', 'external', 'py-faster-rcnn')%0Aadd_path(osp.join(frcn_dir, 'lib'))%0A# caffe_path = osp.join('/Volumes/Research/ECCV2016/Code/External/fast-rcnn-VID-test', 'caffe-fast-rcnn', 'python')%0Aadd_path(osp.join(frcn_dir, 'caffe-fast-rcnn', 'python'))%0A%0A# Add vdetlib to PYTHONPATH%0Alib_path = ext_dir%0Aadd_path(lib_path)%0A%0A# tpn related modules%0Asrc_dir = osp.join(this_dir, '..', 'src')%0Aadd_path(src_dir)
|
|
bcfd9808377878f440cc030178b33e76eb4f031c
|
Add presubmit check to catch use of PRODUCT_NAME in resources.
|
chrome/app/PRESUBMIT.py
|
chrome/app/PRESUBMIT.py
|
Python
| 0.00004
|
@@ -0,0 +1,1770 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0A%22%22%22Presubmit script for changes affecting chrome/app/%0A%0ASee http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts%0Afor more details about the presubmit API built into gcl.%0A%22%22%22%0A%0Aimport os%0A%0Adef _CheckNoProductNameInGeneratedResources(input_api, output_api):%0A %22%22%22Check that no PRODUCT_NAME placeholders are found in resources files.%0A%0A These kinds of strings prevent proper localization in some languages. For%0A more information, see the following chromium-dev thread:%0A https://groups.google.com/a/chromium.org/forum/#!msg/chromium-dev/PBs5JfR0Aoc/NOcIHII9u14J%0A %22%22%22%0A%0A problems = %5B%5D%0A filename_filter = lambda x: x.LocalPath().endswith('.grd')%0A%0A for f, line_num, line in input_api.RightHandSideLines(filename_filter):%0A if 'PRODUCT_NAME' in line:%0A problems.append('%25s:%25d' %25 (f.LocalPath(), line_num))%0A%0A if problems:%0A return %5Boutput_api.PresubmitPromptWarning(%0A %22Don't use PRODUCT_NAME placeholders in string resources. Instead, add %22%0A %22separate strings to google_chrome_strings.grd and %22%0A %22chromium_strings.grd. See http://goo.gl/6614MQ for more information.%22%0A %22Problems with this check? Contact dubroy@chromium.org.%22,%0A items=problems)%5D%0A return %5B%5D%0A%0Adef _CommonChecks(input_api, output_api):%0A %22%22%22Checks common to both upload and commit.%22%22%22%0A results = %5B%5D%0A results.extend(_CheckNoProductNameInGeneratedResources(input_api, output_api))%0A return results%0A%0Adef CheckChangeOnUpload(input_api, output_api):%0A return _CommonChecks(input_api, output_api)%0A%0Adef CheckChangeOnCommit(input_api, output_api):%0A return _CommonChecks(input_api, output_api)%0A
|
|
f2807e7505598abdc0f11c8d593655c3dc61c323
|
add legislative.apps
|
opencivicdata/legislative/apps.py
|
opencivicdata/legislative/apps.py
|
Python
| 0.000025
|
@@ -0,0 +1,202 @@
+from django.apps import AppConfig%0Aimport os%0A%0A%0Aclass BaseConfig(AppConfig):%0A name = 'opencivicdata.legislative'%0A verbose_name = 'Open Civic Data - Legislative'%0A path = os.path.dirname(__file__)%0A
|
|
cd7364467de45d63e89eab4e745e29dff9906f69
|
Add crawler for 'dogsofckennel'
|
comics/comics/dogsofckennel.py
|
comics/comics/dogsofckennel.py
|
Python
| 0.998466
|
@@ -0,0 +1,541 @@
+from comics.aggregator.crawler import CreatorsCrawlerBase%0Afrom comics.core.comic_data import ComicDataBase%0A%0A%0Aclass ComicData(ComicDataBase):%0A name = 'Dogs of C-Kennel'%0A language = 'en'%0A url = 'https://www.creators.com/read/dogs-of-c-kennel'%0A rights = 'Mason Mastroianni, Mick Mastroianni, Johnny Hart'%0A%0A%0Aclass Crawler(CreatorsCrawlerBase):%0A history_capable_date = '2007-02-12'%0A schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'%0A time_zone = 'US/Pacific'%0A%0A def crawl(self, pub_date):%0A return self.crawl_helper('179', pub_date)%0A
|
|
0c1ae2fb40e5af5cf732e7ec8e10d2e145be2eb2
|
add run.py
|
run.py
|
run.py
|
Python
| 0.000003
|
@@ -0,0 +1,108 @@
+%22%22%22Simple Migrator.%22%22%22%0A__author__ = 'bkzhn'%0A%0A%0Aif __name__ == '__main__':%0A print('== Simple Migrator ==')%0A
|
|
0d6a31ade487bea9f0b75b1c3e295176fb3a7555
|
Add savecpython script
|
tools/savecpython.py
|
tools/savecpython.py
|
Python
| 0.000001
|
@@ -0,0 +1,2206 @@
+# -*- coding: utf-8 -*-%0Aimport urllib, urllib2%0Afrom datetime import datetime%0A%0ASPEEDURL = 'http://127.0.0.1:8000/'#'http://speed.pypy.org/'%0AHOST = %22bigdog%22%0A%0Adef save(project, revision, results, options, branch, executable, int_options, testing=False):%0A testparams = %5B%5D%0A #Parse data%0A data = %7B%7D%0A current_date = datetime.today()%0A if branch == %22%22: return 1%0A %0A for b in results:%0A bench_name = b%5B0%5D%0A res_type = b%5B1%5D%0A results = b%5B2%5D%0A value = 0%0A if res_type == %22SimpleComparisonResult%22:%0A value = results%5B'base_time'%5D%0A elif res_type == %22ComparisonResult%22:%0A value = results%5B'avg_base'%5D%0A else:%0A print(%22ERROR: result type unknown %22 + b%5B1%5D)%0A return 1%0A data = %7B%0A 'commitid': revision,%0A 'project': project,%0A 'branch': branch,%0A 'executable_name': executable,%0A 'executable_coptions': int_options,%0A 'benchmark': bench_name,%0A 'environment': HOST,%0A 'result_value': value,%0A 'result_date': current_date,%0A %7D%0A if res_type == %22ComparisonResult%22:%0A data%5B'std_dev'%5D = results%5B'std_changed'%5D%0A if testing: testparams.append(data)%0A else: send(data)%0A if testing: return testparams%0A else: return 0%0A %0Adef send(data):%0A #save results%0A params = urllib.urlencode(data)%0A f = None%0A response = %22None%22%0A info = str(datetime.today()) + %22: Saving result for %22 + data%5B'executable_name'%5D + %22 revision %22%0A info += str(data%5B'commitid'%5D) + %22, benchmark %22 + data%5B'benchmark'%5D%0A print(info)%0A try:%0A f = urllib2.urlopen(SPEEDURL + 'result/add/', params)%0A response = f.read()%0A f.close()%0A except urllib2.URLError, e:%0A if hasattr(e, 'reason'):%0A response = '%5Cn We failed to reach a server%5Cn'%0A response += ' Reason: ' + str(e.reason)%0A elif hasattr(e, 'code'):%0A response = '%5Cn The server couldn%5C't fulfill the request%5Cn'%0A response += ' Error code: ' + str(e)%0A print(%22Server (%25s) response: %25s%5Cn%22 %25 (SPEEDURL, response))%0A return 1%0A print %22saved correctly!%5Cn%22%0A return 0%0A
|
|
91773cb6a09f710002e5be03ab9ec0c19b2d6ea3
|
Add script to extract rows from terms.
|
src/Scripts/show-term-convert.py
|
src/Scripts/show-term-convert.py
|
Python
| 0
|
@@ -0,0 +1,547 @@
+# Convert from show term to list of rows associated with each term.%0A%0Aimport re%0Aterm_regex = re.compile(%22Term%5C(%5C%22(%5CS+)%5C%22%5C)%22)%0Arowid_regex = re.compile(%22%5Cs+RowId%5C((%5CS+),%5Cs+(%5CS+)%5C)%22)%0A%0Athis_term = %22%22%0Awith open(%22/tmp/show.results.txt%22) as f:%0A for line in f:%0A rowid_match = rowid_regex.match(line)%0A if rowid_match:%0A this_term += %22,%22 + rowid_match.group(1) + %22-%22 + rowid_match.group(2)%0A%0A term_match = term_regex.match(line)%0A if term_match:%0A print(this_term)%0A this_term = term_match.group(1)%0A
|
|
9d6a838463c3b771bdd2cb304f378f85abc5dc95
|
Remove forward slashes from episode titles
|
tvrenamr/cli/core.py
|
tvrenamr/cli/core.py
|
#!/usr/bin/env python
from __future__ import absolute_import
import functools
import logging
import sys
import click
from tvrenamr import errors
from tvrenamr.cli.helpers import (build_file_list, get_config, start_dry_run,
stop_dry_run)
from tvrenamr.logs import start_logging
from tvrenamr.main import File, TvRenamr
log = logging.getLogger('CLI')
@click.command()
@click.option('--config', type=click.Path(), help='Select a location for your config file. If the path is invalid the default locations will be used.') # noqa
@click.option('-c', '--canonical', help='Set the show\'s canonical name to use when performing the online lookup.') # noqa
@click.option('--debug', is_flag=True)
@click.option('-d', '--dry-run', is_flag=True, help='Dry run your renaming.')
@click.option('-e', '--episode', type=int, help='Set the episode number. Currently this will cause errors when working with more than one file.') # noqa
@click.option('--ignore-filelist', type=tuple, default=())
@click.option('--log-file', type=click.Path(exists=True), help='Set the log file location.')
@click.option('-l', '--log-level', help='Set the log level. Options: short, minimal, info and debug.') # noqa
@click.option('--log-file', type=click.Path(exists=True), help='Set the log file location.')
@click.option('-n', '--name', help="Set the episode's name.")
@click.option('--no-cache', is_flag=True, help='Force all renames to ignore the cache.')
@click.option('-o', '--output-format', help='Set the output format for the episodes being renamed.')
@click.option('--organise/--no-organise', default=True, help='Organise renamed files into folders based on their show name and season number. Can be explicitly disabled.') # noqa
@click.option('-p', '--partial', is_flag=True, help='Allow partial regex matching of the filename.')
@click.option('-q', '--quiet', is_flag=True, help="Don't output logs to the command line")
@click.option('-r', '--recursive', is_flag=True, help='Recursively lookup files in a given directory') # noqa
@click.option('--rename-dir', type=click.Path(), help='The directory to move renamed files to, if not specified the working directory is used.') # noqa
@click.option('--regex', help='The regular expression to use when extracting information from files.') # noqa
@click.option('-s', '--season', help='Set the season number.')
@click.option('--show', help="Set the show's name (will search for this name).")
@click.option('--show-override', help="Override the show's name (only replaces the show's name in the final file)") # noqa
@click.option('--specials', help='Set the show\'s specials folder (defaults to "Season 0")')
@click.option('-t', '--the', is_flag=True, help="Set the position of 'The' in a show's name to the end of the show name") # noqa
@click.argument('paths', nargs=-1, required=False, type=click.Path(exists=True))
def rename(config, canonical, debug, dry_run, episode, # pylint: disable-msg=too-many-arguments
ignore_filelist, log_file, log_level, name, # pylint: disable-msg=too-many-arguments
no_cache, output_format, organise, partial, # pylint: disable-msg=too-many-arguments
quiet, recursive, rename_dir, regex, season, # pylint: disable-msg=too-many-arguments
show, show_override, specials, the, paths): # pylint: disable-msg=too-many-arguments
if debug:
log_level = 10
start_logging(log_file, log_level, quiet)
logger = functools.partial(log.log, 26)
if dry_run or debug:
start_dry_run(logger)
for current_dir, filename in build_file_list(paths, recursive, ignore_filelist):
try:
tv = TvRenamr(current_dir, debug, dry_run, no_cache)
_file = File(**tv.extract_details_from_file(
filename,
user_regex=regex,
partial=partial,
))
# TODO: Warn setting season & episode will override *all* episodes
_file.user_overrides(show, season, episode)
_file.safety_check()
conf = get_config(config)
for ep in _file.episodes:
canonical = conf.get(
'canonical',
_file.show_name,
default=ep.file_.show_name,
override=canonical
)
# TODO: Warn setting name will override *all* episodes
ep.title = tv.retrieve_episode_title(
ep,
canonical=canonical,
override=name,
)
show = conf.get_output(_file.show_name, override=show_override)
the = conf.get('the', show=_file.show_name, override=the)
_file.show_name = tv.format_show_name(show, the=the)
_file.set_output_format(conf.get(
'format',
_file.show_name,
default=_file.output_format,
override=output_format
))
organise = conf.get(
'organise',
_file.show_name,
default=False,
override=organise
)
rename_dir = conf.get(
'renamed',
_file.show_name,
default=current_dir,
override=rename_dir
)
specials_folder = conf.get(
'specials_folder',
_file.show_name,
default='Season 0',
override=specials,
)
path = tv.build_path(
_file,
rename_dir=rename_dir,
organise=organise,
specials_folder=specials_folder,
)
tv.rename(filename, path)
except errors.NetworkException:
if dry_run or debug:
stop_dry_run(logger)
sys.exit(1)
except (AttributeError,
errors.EmptyEpisodeTitleException,
errors.EpisodeNotFoundException,
errors.IncorrectRegExpException,
errors.InvalidXMLException,
errors.MissingInformationException,
errors.OutputFormatMissingSyntaxException,
errors.PathExistsException,
errors.ShowNotFoundException,
errors.UnexpectedFormatException) as e:
continue
except Exception as e:
if debug:
# In debug mode, show the full traceback.
raise
for msg in e.args:
log.critical('Error: %s', msg)
sys.exit(1)
# if we're not doing a dry run add a blank line for clarity
if not (debug and dry_run):
log.info('')
if dry_run or debug:
stop_dry_run(logger)
|
Python
| 0.000001
|
@@ -4586,32 +4586,150 @@
)%0A%0A
+ # TODO: make this a sanitisation method on ep?%0A ep.title = ep.title.replace('/', '-')%0A%0A
show
|
710f6ed188b6139f6469d61775da4fb752bac754
|
Create __init__.py
|
mopidy_ampache/__init__.py
|
mopidy_ampache/__init__.py
|
Python
| 0.000429
|
@@ -0,0 +1,883 @@
+from __future__ import unicode_literals%0A%0Aimport os%0A%0Afrom mopidy import ext, config%0A%0A__version__ = '1.0.0'%0A%0A%0Aclass AmpacheExtension(ext.Extension):%0A%0A dist_name = 'Mopidy-Ampache'%0A ext_name = 'ampache'%0A version = __version__%0A%0A def get_default_config(self):%0A conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')%0A return config.read(conf_file)%0A%0A def get_config_schema(self):%0A schema = super(AmpacheExtension, self).get_config_schema()%0A schema%5B'hostname'%5D = config.Hostname()%0A schema%5B'port'%5D = config.Port()%0A schema%5B'username'%5D = config.String()%0A schema%5B'password'%5D = config.Secret()%0A schema%5B'ssl'%5D = config.Boolean()%0A schema%5B'context'%5D = config.String()%0A return schema%0A%0A def setup(self, registry):%0A from .actor import AmpacheBackend%0A registry.add('backend', AmpacheBackend)%0A
|
|
2cd081a6a7c13b40b5db8f667d03e93353630830
|
Create leetcode-78.py
|
python_practice/leetCode/leetcode-78.py
|
python_practice/leetCode/leetcode-78.py
|
Python
| 0.000003
|
@@ -0,0 +1,321 @@
+class Solution:%0A def subsets(self, nums: List%5Bint%5D) -%3E List%5BList%5Bint%5D%5D:%0A if nums == %5B%5D:%0A return %5B%5B%5D%5D%0A %0A sub = self.subsets(nums%5B1:%5D)%0A newSub = %5B%5D%0A for i in sub:%0A newI = i + %5Bnums%5B0%5D%5D%0A newSub.append(newI)%0A sub.extend(newSub)%0A return sub%0A
|
|
56f8dd435981a28bcb026da0edb395aabd515c29
|
Add a frist test for create_random_data
|
opal/tests/test_command_create_random_data.py
|
opal/tests/test_command_create_random_data.py
|
Python
| 0.000006
|
@@ -0,0 +1,520 @@
+%22%22%22%0AUnittests for opal.management.commands.create_random_data%0A%22%22%22%0Afrom mock import patch, MagicMock%0A%0Afrom opal.core.test import OpalTestCase%0A%0Afrom opal.management.commands import create_random_data as crd%0A%0Aclass StringGeneratorTestCase(OpalTestCase):%0A def test_string_generator(self):%0A mock_field = MagicMock(name='Mock Field')%0A mock_field.max_length = 30%0A frist, last = crd.string_generator(mock_field).split()%0A self.assertIn(frist, crd.adjectives)%0A self.assertIn(last, crd.nouns)%0A
|
|
826c3e3b2787e25d040b3ddf7c4bdabde3da4158
|
Add tasks.py the new and improved celery_tasks.py
|
scrapi/tasks.py
|
scrapi/tasks.py
|
Python
| 0.004637
|
@@ -0,0 +1,1744 @@
+import os%0Aimport logging%0Aimport importlib%0Afrom datetime import datetime%0A%0Afrom celery import Celery%0A%0Aimport settings%0A%0A%0Aapp = Celery()%0Aapp.config_from_object(settings)%0A%0Alogger = logging.getLogger(__name__)%0A%0A%0Adef import_consumer(consumer_name):%0A return importlib.import_module('scrapi.consumers.%7B%7D'.format(consumer_name))%0A%0A%0A@app.task%0Adef run_consumer(consumer_name):%0A logger.info('Runing consumer %22%7B%7D%22'.format(consumer_name))%0A # Form and start a celery chain%0A chain = (consume.si(consumer_name) %7C begin_normalization.s(consumer_name))%0A chain.apply_async()%0A%0A%0A@app.task%0Adef begin_normalization(raw_docs, consumer_name):%0A logger.info('Normalizing %7B%7D documents for consumer %22%7B%7D%22'%0A .format(len(raw_docs), consumer_name))%0A%0A for raw in raw_docs:%0A timestamp = datetime.now()%0A%0A process_raw.si(raw, timestamp).apply_async()%0A%0A chain = (normalize.si(raw, timestamp, consumer_name) %7C process_normalized.s())%0A chain.apply_async()%0A%0A%0A@app.task%0Adef consume(consumer_name):%0A logger.info('Consumer %22%7B%7D%22 has begun consumption'.format(consumer_name))%0A%0A consumer = import_consumer(consumer_name)%0A result = consumer.consume()%0A%0A logger.info('Consumer %22%7B%7D%22 has finished consumption'.format(consumer_name))%0A%0A return result%0A%0A%0A@app.task%0Adef normalize(raw_doc, timestamp, consumer_name):%0A consumer = import_consumer(consumer_name)%0A normalized = consumer.normalize(raw_doc, timestamp)%0A # Do other things here%0A return normalized%0A%0A%0A@app.task%0Adef process_raw(raw_doc, timestamp):%0A pass%0A%0A%0A@app.task%0Adef process_normalized(normalized_doc):%0A pass%0A%0A%0A@app.task%0Adef check_archive():%0A pass%0A%0A%0A@app.task%0Adef tar_archive():%0A os.system('tar -czvf website/static/archive.tar.gz archive/')%0A
|
|
3f1663f7cf32b590affb7a306bcc2711b17af296
|
Add a monitor example.
|
example/user_stream_monitor.py
|
example/user_stream_monitor.py
|
Python
| 0.00024
|
@@ -0,0 +1,1391 @@
+#!/usr/bin/env python%0A#%0A# Copyright (c) 2012 Ralph Meijer %3Cralphm@ik.nu%3E%0A# See LICENSE.txt for details%0A%0A%22%22%22%0APrint Tweets on a user's timeline in real time.%0A%0AThis connects to the Twitter User Stream API endpoint with the given OAuth%0Acredentials and prints out all Tweets of the associated user and of the%0Aaccounts the user follows. This is equivalent to the user's time line.%0A%0AThe arguments, in order, are: consumer key, consumer secret, access token key,%0Aaccess token secret.%0A%0AThis is mostly the same as the C%7Buser_stream.py%7D example, except that this%0Auses L%7Btwittytwisted.streaming.TwitterMonitor%7D. It will reconnect in the%0Aface of disconnections or explicit reconnects to change the API request%0Aparameters (e.g. changing the track keywords).%0A%22%22%22%0A%0Aimport sys%0A%0Afrom twisted.internet import reactor%0A%0Afrom oauth import oauth%0A%0Afrom twittytwister import twitter%0A%0Adef cb(entry):%0A print '%25s: %25s' %25 (entry.user.screen_name.encode('utf-8'),%0A entry.text.encode('utf-8'))%0A%0Adef change(monitor):%0A monitor.args = %7B%7D%0A monitor.connect(forceReconnect=True)%0A%0Aconsumer = oauth.OAuthConsumer(sys.argv%5B1%5D, sys.argv%5B2%5D)%0Atoken = oauth.OAuthToken(sys.argv%5B3%5D, sys.argv%5B4%5D)%0A%0Afeed = twitter.TwitterFeed(consumer=consumer, token=token)%0Amonitor = twitter.TwitterMonitor(feed.user, cb, %7B'with': 'followings'%7D)%0A%0Amonitor.startService()%0Areactor.callLater(30, change, monitor)%0A%0Areactor.run()%0A
|
|
ac3cd54b93aa6d5cddaac89016d09b9e6747a301
|
allow bazel 0.7.x (#1467)
|
check_bazel_version.bzl
|
check_bazel_version.bzl
|
def _parse_bazel_version(bazel_version):
# Remove commit from version.
version = bazel_version.split(" ", 1)[0]
# Split into (release, date) parts and only return the release
# as a tuple of integers.
parts = version.split("-", 1)
# Turn "release" into a tuple of strings
version_tuple = ()
for number in parts[0].split("."):
version_tuple += (int(number),)
return version_tuple
# acceptable min_version <= version <= max_version
def check_version():
check_bazel_version("0.5.4", "0.6.1")
# acceptable min_version <= version <= max_version
def check_bazel_version(min_version, max_version):
if "bazel_version" not in dir(native):
fail("\nCurrent Bazel version is lower than 0.2.1, expected at least %s\n" %
min_version)
elif not native.bazel_version:
print("\nCurrent Bazel is not a release version, cannot check for " +
"compatibility.")
print("Make sure that you are running at least Bazel %s.\n" % min_version)
else:
_version = _parse_bazel_version(native.bazel_version)
_min_version = _parse_bazel_version(min_version)
_max_version = _parse_bazel_version(max_version)
if _version < _min_version:
fail("\nCurrent Bazel version {} is too old, expected at least {}\n".format(
native.bazel_version, min_version))
if _version > _max_version:
fail("\nCurrent Bazel version {} is too new, expected at most {}\n".format(
native.bazel_version, max_version))
|
Python
| 0
|
@@ -531,11 +531,12 @@
%220.
-6.1
+7.99
%22)%0A%0A
|
b40d064ac5b4e01f11cdb1f6b7ce7f1a0a968be5
|
Create set_memory_example.py
|
examples/set_memory_example.py
|
examples/set_memory_example.py
|
Python
| 0.000189
|
@@ -0,0 +1,615 @@
+from chatbot import Chat, register_call%0Aimport os%0Aimport warnings%0Awarnings.filterwarnings(%22ignore%22)%0A%0A%0A@register_call(%22increment_count%22)%0Adef memory_get_set_example(session, query):%0A name=query.strip().lower()%0A # Get memory%0A old_count = session.memory.get(name, '0')%0A new_count = int(old_count) + 1%0A # Set memory%0A session.memory%5Bname%5D=str(new_count)%0A return f%22count %7Bnew_count%7D%22%0A%0A%0Achat = Chat(os.path.join(os.path.dirname(os.path.abspath(__file__)), %22example.template%22))%0Achat.converse(%22%22%22%0AMemory get and set example%0A%0AUsage:%0Aincrement %3Cname%3E%0Ashow %3Cname%3E%0A%0Aexample:%0Aincrement mango%0Ashow mango%0A%22%22%22)%0A
|
|
12b7d2b2296b934675f2cca0f35d059a67f58e7f
|
Create ComputeDailyWage.py
|
ComputeDailyWage.py
|
ComputeDailyWage.py
|
Python
| 0.000011
|
@@ -0,0 +1,2102 @@
+################################ Compute daily wage%0A%0Adef computepay ( w , m , e , g ):%0A total = float(wage) - (float(mileage)/float(gas)) - float(expenses)%0A return total%0A%0Atry:%0A input = raw_input('Enter Wages: ')%0A wage = float(input)%0A input = raw_input('Enter Miles: ')%0A mileage = float(input)%0A input = raw_input('Enter Expenses: ')%0A expenses = float(input)%0A input = raw_input('Enter Price of Gas: ')%0A gas = float(input)%0A%0Aexcept:%0A print %22Not a number!%22%0A quit()%0A%0Aprint 'Total: ', computepay ( wage , mileage , expenses , gas )%0A%0Ahours = raw_input('Enter Hours: ')%0AhourlyWage = computepay ( wage , mileage , expenses , gas ) / float(hours)%0A%0Aprint 'Hourly Wage: ', hourlyWage%0A%0A################################## Compute weekly wage%0A%0Adef computepay ( w, m , e , g ):%0A total = float(wage) - (float(mileage)/float(gas)) - float(expenses)%0A return total%0A%0A#date = raw_input(%22Enter Beginning and End dates of the week: %22)%0A%0A#input = raw_input(%22How many days were worked: %22)%0A#days = int(input)%0A%0A############## Day 1 #######################%0A%0Atry:%0A input = raw_input('Enter Wages: ')%0A wage = float(input)%0A input = raw_input('Enter Miles: ')%0A mileage = float(input)%0A input = raw_input('Enter Expenses: ')%0A expenses = float(input)%0A input = raw_input('Enter Price of Gas: ')%0A gas = float(input)%0A%0Aexcept:%0A print %22Not a number!%22%0A quit()%0A%0Aprint 'Day 1 Total: ', computepay ( wage, mileage , expenses , gas )%0A%0Aday1 = computepay ( wage , mileage , expenses , gas )%0A%0A############## Day 2 ########################%0A%0Atry:%0A input = raw_input('Enter Wages: ')%0A wage = float(input)%0A input = raw_input('Enter Miles: ')%0A mileage = float(input)%0A input = raw_input('Enter Expenses: ')%0A expenses = float(input)%0A input = raw_input('Enter Price of Gas: ')%0A gas = float(input)%0A%0Aexcept:%0A print %22Not a number!%22%0A quit()%0A%0Aprint 'Day 2 Total: ', computepay ( wage , mileage , expenses , gas )%0A%0Aday2 = computepay ( wage , mileage , expenses , gas )%0A%0Aweekwage = day1 + day2%0A%0Aprint %22Your weekly wage for %22, date , %22is: %22 , weekwage , %22dollars.%22%0A
|
|
2429c0bdf5c2db5c2b40dc43d0a4c277e20d72fa
|
add 0001
|
Jaccorot/0001/0001.py
|
Jaccorot/0001/0001.py
|
Python
| 0.999999
|
@@ -0,0 +1,484 @@
+#!/usr/local/bin/python%0A#coding=utf-8%0A%0A#%E7%AC%AC 0001 %E9%A2%98%EF%BC%9A%E5%81%9A%E4%B8%BA Apple Store App %E7%8B%AC%E7%AB%8B%E5%BC%80%E5%8F%91%E8%80%85%EF%BC%8C%E4%BD%A0%E8%A6%81%E6%90%9E%E9%99%90%E6%97%B6%E4%BF%83%E9%94%80%EF%BC%8C%E4%B8%BA%E4%BD%A0%E7%9A%84%E5%BA%94%E7%94%A8%E7%94%9F%E6%88%90%E6%BF%80%E6%B4%BB%E7%A0%81%EF%BC%88%E6%88%96%E8%80%85%E4%BC%98%E6%83%A0%E5%88%B8%EF%BC%89%EF%BC%8C%0A#%E4%BD%BF%E7%94%A8 Python %E5%A6%82%E4%BD%95%E7%94%9F%E6%88%90 200 %E4%B8%AA%E6%BF%80%E6%B4%BB%E7%A0%81%EF%BC%88%E6%88%96%E8%80%85%E4%BC%98%E6%83%A0%E5%88%B8%EF%BC%89%EF%BC%9F%0A%0Aimport uuid%0A%0A%0Adef create_code(num, length):%0A#%E7%94%9F%E6%88%90%E2%80%9Dnum%E2%80%9C%E4%B8%AA%E6%BF%80%E6%B4%BB%E7%A0%81%EF%BC%8C%E6%AF%8F%E4%B8%AA%E6%BF%80%E6%B4%BB%E7%A0%81%E5%90%AB%E6%9C%89%E2%80%9Dlength%E2%80%9C%E4%BD%8D%0A result = %5B%5D%0A while True:%0A uuid_id = uuid.uuid1()%0A temp = str(uuid_id).replace('-', '')%5B:length%5D%0A if not temp in result:%0A result.append(temp)%0A if len(result) == num:%0A break%0A return result%0A%0Aprint create_code(200, 20)%0A
|
|
6913674358d226953c1090ab7c8f5674dac1816c
|
add 0007
|
Jaccorot/0007/0007.py
|
Jaccorot/0007/0007.py
|
Python
| 0.99999
|
@@ -0,0 +1,1175 @@
+#!/usr/bin/python%0A#coding:utf-8%0A%0A%22%22%22%0A%E7%AC%AC 0007 %E9%A2%98%EF%BC%9A%E6%9C%89%E4%B8%AA%E7%9B%AE%E5%BD%95%EF%BC%8C%E9%87%8C%E9%9D%A2%E6%98%AF%E4%BD%A0%E8%87%AA%E5%B7%B1%E5%86%99%E8%BF%87%E7%9A%84%E7%A8%8B%E5%BA%8F%EF%BC%8C%E7%BB%9F%E8%AE%A1%E4%B8%80%E4%B8%8B%E4%BD%A0%E5%86%99%E8%BF%87%E5%A4%9A%E5%B0%91%E8%A1%8C%E4%BB%A3%E7%A0%81%E3%80%82%E5%8C%85%E6%8B%AC%E7%A9%BA%E8%A1%8C%E5%92%8C%E6%B3%A8%E9%87%8A%EF%BC%8C%E4%BD%86%E6%98%AF%E8%A6%81%E5%88%86%E5%88%AB%E5%88%97%E5%87%BA%E6%9D%A5%E3%80%82%0A%22%22%22%0A%0Aimport os%0A%0Adef walk_dir(path):%0A file_path = %5B%5D%0A for root, dirs, files in os.walk(path):%0A for f in files:%0A if f.lower().endswith('py'):%0A file_path.append(os.path.join(root, f))%0A return file_path%0A%0A%0Adef count_the_code(path):%0A file_name = os.path.basename(path)%0A note_flag = False%0A line_num = 0%0A empty_line_num = 0%0A note_num = 0%0A with open(path) as f:%0A for line in f.read().split('%5Cn'):%0A line_num += 1%0A if line.strip().startswith('%5C%22%5C%22%5C%22') and not note_flag:%0A note_flag =True%0A note_num += 1%0A continue%0A%0A if line.strip().startswith('%5C%22%5C%22%5C%22'):%0A note_flag = False%0A note_num += 1%0A%0A if line.strip().startswith('#') or note_flag:%0A note_num += 1%0A%0A if len(line) == 0:%0A empty_line_num += 1%0A%0A print u%22%E5%9C%A8%25s%E4%B8%AD%EF%BC%8C%E5%85%B1%E6%9C%89%25s%E8%A1%8C%E4%BB%A3%E7%A0%81%EF%BC%8C%E5%85%B6%E4%B8%AD%E6%9C%89%25s%E7%A9%BA%E8%A1%8C%EF%BC%8C%E6%9C%89%25s%E6%B3%A8%E9%87%8A%22%25 (file_name, line_num, empty_line_num, note_num)%0A%0A%0Aif __name__ == '__main__':%0A for f in walk_dir('.'):%0A count_the_code(f)%0A
|
|
f8093f59b77e481231aeca49ef057a4602d21b2e
|
add tests for appconfig
|
src/archivematicaCommon/tests/test_appconfig.py
|
src/archivematicaCommon/tests/test_appconfig.py
|
Python
| 0
|
@@ -0,0 +1,2528 @@
+from __future__ import absolute_import%0Aimport os%0Aimport StringIO%0A%0Afrom django.core.exceptions import ImproperlyConfigured%0Aimport pytest%0A%0Afrom appconfig import Config%0A%0A%0ACONFIG_MAPPING = %7B%0A 'search_enabled': %5B%0A %7B'section': 'Dashboard', 'option': 'disable_search_indexing', 'type': 'iboolean'%7D,%0A %7B'section': 'Dashboard', 'option': 'search_enabled', 'type': 'boolean'%7D,%0A %5D,%0A%7D%0A%0A%0A@pytest.mark.parametrize('option, value, expect', %5B%0A ('search_enabled', 'true', True),%0A ('search_enabled', 'false', False),%0A ('disable_search_indexing', 'true', False),%0A ('disable_search_indexing', 'false', True),%0A%5D)%0Adef test_mapping_list_config_file(option, value, expect):%0A config = Config(env_prefix='ARCHIVEMATICA_DASHBOARD', attrs=CONFIG_MAPPING)%0A config.read_defaults(StringIO.StringIO(%0A '%5BDashboard%5D%5Cn'%0A '%7Boption%7D = %7Bvalue%7D'.format(option=option, value=value)))%0A assert config.get('search_enabled') is expect%0A%0A%0A@pytest.mark.parametrize('envvars, expect', %5B%0A (%7B'ARCHIVEMATICA_DASHBOARD_DASHBOARD_SEARCH_ENABLED': 'true'%7D, True),%0A (%7B'ARCHIVEMATICA_DASHBOARD_DASHBOARD_SEARCH_ENABLED': 'false'%7D, False),%0A (%7B'ARCHIVEMATICA_DASHBOARD_SEARCH_ENABLED': 'true'%7D, True),%0A (%7B'ARCHIVEMATICA_DASHBOARD_SEARCH_ENABLED': 'false'%7D, False),%0A (%7B'ARCHIVEMATICA_DASHBOARD_DASHBOARD_DISABLE_SEARCH_INDEXING': 'true'%7D,%0A False),%0A (%7B'ARCHIVEMATICA_DASHBOARD_DASHBOARD_DISABLE_SEARCH_INDEXING': 'false'%7D,%0A True),%0A (%7B'ARCHIVEMATICA_DASHBOARD_DISABLE_SEARCH_INDEXING': 'true'%7D, False),%0A (%7B'ARCHIVEMATICA_DASHBOARD_DISABLE_SEARCH_INDEXING': 'false'%7D, True),%0A (%7B%7D, ImproperlyConfigured),%0A # Following two show that the DISABLE env var overrides the ENABLE one%0A # because of the ordering in CONFIG_MAPPING.%0A (%7B'ARCHIVEMATICA_DASHBOARD_DASHBOARD_SEARCH_ENABLED': 'true',%0A 'ARCHIVEMATICA_DASHBOARD_DASHBOARD_DISABLE_SEARCH_INDEXING': 'true'%7D,%0A False),%0A (%7B'ARCHIVEMATICA_DASHBOARD_DASHBOARD_SEARCH_ENABLED': 'false',%0A 'ARCHIVEMATICA_DASHBOARD_DASHBOARD_DISABLE_SEARCH_INDEXING': 'false'%7D,%0A True),%0A%5D)%0Adef test_mapping_list_env_var(envvars, expect):%0A for var, val in envvars.items():%0A os.environ%5Bvar%5D = val%0A config = Config(env_prefix='ARCHIVEMATICA_DASHBOARD', attrs=CONFIG_MAPPING)%0A if bool(expect) is expect:%0A search_enabled = config.get('search_enabled')%0A assert search_enabled is expect%0A else:%0A with pytest.raises(expect):%0A config.get('search_enabled')%0A for var in envvars:%0A del os.environ%5Bvar%5D%0A
|
|
06dd6ed476549d832159b1dbfe4d415579b4d067
|
add wrapper
|
scripts/fontify.py
|
scripts/fontify.py
|
Python
| 0.000002
|
@@ -0,0 +1,1320 @@
+#!/usr/bin/env python2%0Aimport argparse%0Aimport tempfile%0Aimport shutil%0Aimport os%0Aimport crop_image%0A%0A%0Adef check_input(image):%0A if not os.path.isfile(image):%0A raise FileNotFoundError%0A _, ext = os.path.splitext(image)%0A if ext.lower() not in %5B%22.jpg%22, %22.png%22%5D:%0A raise ValueError(%22Unrecognized image extension%22)%0A%0A%0Adef setup_work_dir(image):%0A tmpdir = tempfile.mkdtemp(prefix=%22fontify%22)%0A _, ext = os.path.splitext(image)%0A shutil.copyfile(image, os.path.join(tmpdir, 'input' + ext))%0A return tmpdir%0A%0A%0Adef process(image, font_name):%0A crop_image.crop()%0A%0A%0Adef tear_down(tmpdir, output):%0A if output == %22%22:%0A output = %22fontify.ttf%22%0A shutil.copyfile(os.path.join(tmpdir, 'fontify.ttf'), output)%0A shutil.rmtree(tmpdir)%0A%0A%0Aif __name__ == %22__main__%22:%0A parser = argparse.ArgumentParser()%0A parser.add_argument(%0A %22image%22, help=%22input image (JPG or PNG)%22%0A )%0A parser.add_argument(%0A %22-n%22, %22--name%22, default=%22Fontify%22, help=%22font name (default: Fontify)%22%0A )%0A parser.add_argument(%0A %22-o%22, metavar=%22OUTPUT%22, default=%22%22,%0A help=%22output font file (default to fontify.ttf in current directory)%22%0A )%0A args = parser.parse_args()%0A check_input(args.image)%0A tmpdir = setup_work_dir()%0A process(tmpdir, args.name)%0A tear_down(tmpdir, args.output)%0A
|
|
6ca1d9f4a3b8a518661409166a9918a20eb61655
|
fix wansu cdn url
|
src/streamlink/plugins/app17.py
|
src/streamlink/plugins/app17.py
|
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents
from streamlink.stream import HLSStream, RTMPStream, HTTPStream
API_URL = "https://api-dsa.17app.co/api/v1/liveStreams/getLiveStreamInfo"
_url_re = re.compile(r"https://17.live/live/(?P<channel>[^/&?]+)")
_status_re = re.compile(r'\\"closeBy\\":\\"\\"')
_rtmp_re = re.compile(r'\\"url\\"\s*:\s*\\"(.+?)\\"')
class App17(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
match = _url_re.match(self.url)
channel = match.group("channel")
http.headers.update({'User-Agent': useragents.CHROME})
payload = '{"liveStreamID": "%s"}' % (channel)
res = http.post(API_URL, data=payload)
status = _status_re.search(res.text)
if not status:
self.logger.info("Stream currently unavailable.")
return
http_url = _rtmp_re.search(res.text).group(1)
if 'pull-rtmp' in http_url:
http_url = http_url.replace("http:", "https:")
yield "live", HTTPStream(self.session, http_url)
if 'pull-rtmp' in http_url:
url = http_url.replace("https:", "rtmp:").replace(".flv", "")
stream = RTMPStream(self.session, {
"rtmp": url,
"live": True
})
yield "live", stream
if 'wansu-global-pull-rtmp' in http_url:
url = http_url.replace(".flv", "/playlist.m3u8")
for stream in HLSStream.parse_variant_playlist(self.session, url).items():
yield stream
else:
url = http_url.replace(".flv", ".m3u8")
yield "live", HLSStream(self.session, url)
__plugin__ = App17
|
Python
| 0.000002
|
@@ -995,48 +995,8 @@
(1)%0A
- if 'pull-rtmp' in http_url:%0A
@@ -1407,24 +1407,8 @@
nsu-
-global-pull-rtmp
' in
|
7f51b7a1b6a319595df5c360bae0264386e590e9
|
add support for tucao.cc
|
src/you_get/extractors/tucao.py
|
src/you_get/extractors/tucao.py
|
Python
| 0
|
@@ -0,0 +1,2116 @@
+#!/usr/bin/env python%0A%0A__all__ = %5B'tucao_download'%5D%0Afrom ..common import *%0A# import re%0Aimport random%0Aimport time%0Afrom xml.dom import minidom%0A%0A#1. %3Cli%3Etype=tudou&vid=199687639%3C/li%3E%0A#2. %3Cli%3Etype=tudou&vid=199506910%7C%3C/li%3E%0A#3. %3Cli%3Etype=video&file=http://xiaoshen140731.qiniudn.com/lovestage04.flv%7C%3C/li%3E%0A#4 may ? %3Cli%3Etype=video&file=http://xiaoshen140731.qiniudn.com/lovestage04.flv%7Cxx**type=&vid=?%3C/li%3E%0A#5. %3Cli%3Etype=tudou&vid=200003098%7C07**type=tudou&vid=200000350%7C08%3C/li%3E%0A%0A# re_pattern=re.compile(r%22(type=(.+?)&(vid%7Cfile)=(.*?))%5B%5C%7C%3C%5D%22)%0A%0Adef tucao_single_download(type_link, title, output_dir=%22.%22, merge=True, info_only=False):%0A if %22file%22 in type_link:%0A url=type_link%5Btype_link.find(%22file=%22)+5:%5D%0A vtype, ext, size=url_info(url)%0A print_info(site_info, title, vtype, size)%0A if not info_only:%0A download_urls(%5Burl%5D, title, ext, size, output_dir)%0A else:%0A u=%22http://www.tucao.cc/api/playurl.php?%7B%7D&key=tucao%7B:07x%7D.cc&r=%7B%7D%22.format(type_link,random.getrandbits(28),int(time.time()*1000))%0A xml=minidom.parseString(get_content(u))%0A urls=%5B%5D%0A size=0%0A for i in xml.getElementsByTagName(%22url%22):%0A urls.append(i.firstChild.nodeValue)%0A vtype, ext, _size=url_info(i.firstChild.nodeValue)%0A size+=_size%0A print_info(site_info, title, vtype, size)%0A if not info_only:%0A download_urls(urls, title, ext, size, output_dir) %0A%0Adef tucao_download(url, output_dir=%22.%22, merge=True, info_only=False):%0A html=get_content(url)%0A title=match1(html,r'%3Ch1 class=%22show_title%22%3E(.*?)%3C%5Cw')%0A raw_list=match1(html,r%22%3Cli%3E(type=.+?)%3C/li%3E%22)%0A raw_l=raw_list.split(%22**%22)%0A if len(raw_l)==1:%0A format_link=raw_l%5B0%5D%5B:-1%5D if raw_l%5B0%5D.endswith(%22%7C%22) else raw_l%5B0%5D%0A tucao_single_download(format_link,title,output_dir,merge,info_only)%0A else:%0A for i in raw_l:%0A format_link,sub_title=i.split(%22%7C%22)%0A tucao_single_download(format_link,title+%22-%22+sub_title,output_dir,merge,info_only)%0A%0A%0Asite_info = %22tucao.cc%22%0Adownload = tucao_download%0Adownload_playlist = playlist_not_supported(%22tucao%22)%0A
|
|
617e6741a06fd63f22ec9b28090e39c120061a84
|
Add the `vulnerability_tickets.py` sample security plugin to deny access to tickets with "security" or "vulnerability" in the `keywords` or `summary` fields.
|
sample-plugins/vulnerability_tickets.py
|
sample-plugins/vulnerability_tickets.py
|
Python
| 0.000038
|
@@ -0,0 +1,1232 @@
+from trac.core import *%0Afrom trac.config import ListOption%0Afrom trac.perm import IPermissionPolicy, IPermissionRequestor, PermissionSystem%0Afrom trac.ticket.model import Ticket%0A%0A%0Aclass SecurityTicketsPolicy(Component):%0A %22%22%22%0A Require the VULNERABILITY_VIEW permission to view any ticket with the words%0A %22security%22 or %22vulnerability%22 in the summary or keywords fields.%0A %22%22%22%0A implements(IPermissionPolicy, IPermissionRequestor)%0A%0A # IPermissionPolicy methods%0A def check_permission(self, username, action, context):%0A # Find available ticket context%0A while context.parent:%0A if context.realm == 'ticket':%0A break%0A context = context.parent%0A%0A if context.realm == 'ticket' and context.id is not None:%0A ticket = Ticket(self.env, context.id)%0A fields = (ticket%5B'keywords'%5D + ticket%5B'summary'%5D).lower()%0A if 'security' in fields or 'vulnerability' in fields:%0A perms = PermissionSystem(self.env).get_user_permissions(username)%0A if 'VULNERABILITY_VIEW' not in perms:%0A return False%0A%0A # IPermissionRequestor methods%0A def get_permission_actions(self):%0A yield 'VULNERABILITY_VIEW'%0A
|
|
b8ab0280ffd76419b7418c39a9f0b9d8131a9d39
|
Add merge migration
|
corehq/apps/users/migrations/0022_merge_20200814_2045.py
|
corehq/apps/users/migrations/0022_merge_20200814_2045.py
|
Python
| 0.000001
|
@@ -0,0 +1,281 @@
+# Generated by Django 2.2.13 on 2020-08-14 20:45%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('users', '0021_add_view_apps_permission'),%0A ('users', '0021_invitation_email_status'),%0A %5D%0A%0A operations = %5B%0A %5D%0A
|
|
cff300eecbbf6189fe7fc9fe4fafd718b414c80e
|
add command to create root
|
src/python/expedient/clearinghouse/commands/management/commands/create_default_root.py
|
src/python/expedient/clearinghouse/commands/management/commands/create_default_root.py
|
Python
| 0.000001
|
@@ -0,0 +1,1477 @@
+'''Command to create default administrators.%0ACreated on Aug 26, 2010%0A%0A@author: jnaous%0A'''%0A%0Afrom django.core.management.base import NoArgsCommand%0Afrom django.conf import settings%0Afrom django.contrib.auth.models import User%0A%0Aclass Command(NoArgsCommand):%0A help = %22Creates the default root user specified by %22 %5C%0A %22settings.ROOT_USERNAME (%25s), settings.ROOT_PASSWORD (%25s), %22 %5C%0A %22settings.ROOT_EMAIL (%25s). See %22 %5C%0A %22the 'defaultsettings.admins' module documentation. If the user %22 %5C%0A %22already exists, the user's password will be reset, and the user %22 %5C%0A %22will be promoted to superuser.%22 %25 (%0A settings.ROOT_USERNAME, settings.ROOT_PASSWORD)%0A%0A def handle_noargs(self, **options):%0A try:%0A u = User.objects.get(username=settings.ROOT_USERNAME)%0A except User.objects.DoesNotExist:%0A User.objects.create_superuser(%0A settings.ROOT_USERNAME,%0A settings.ROOT_EMAIL,%0A settings.ROOT_PASSWORD,%0A )%0A print %22Created superuser %25s with password %25s.%22 %25 (%0A settings.ROOT_USERNAME, settings.ROOT_PASSWORD)%0A else:%0A u.set_password(settings.ROOT_PASSWORD)%0A u.is_superuser = True%0A u.is_staff = True%0A u.save()%0A print %22Reset user %25s's password to %25s and promoted the user %22 %5C%0A %22to superuser.%22 %25 (%0A settings.ROOT_USERNAME, settings.ROOT_PASSWORD)%0A
|
|
7067413504454051ede2b9b2b0c223019282dc84
|
Fix notification testcase
|
monasca/tests/microservice/test_notification_processor.py
|
monasca/tests/microservice/test_notification_processor.py
|
# Copyright 2015 Carnegie Mellon University
#
# Author: Han Chen <hanc@andrew.cmu.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import json
import mock
from monasca.common import email_sender
from monasca.microservice import notification_processor
from monasca.openstack.common import log
from monasca import tests
LOG = log.getLogger(__name__)
response_str = """
{
"hits":{
"hits":[
{
"_score":1.0,
"_type":"notification_methods",
"_id":"c60ec47e-5038-4bf1-9f95-4046c6e9a719",
"_source":{
"type":"EMAIL",
"id":"c60ec47e-5038-4bf1-9f95-4046c6e9a719",
"name":"NotificationMethod",
"address":"hanc@andrew.cmu.edu"
},
"_index":"admin"
}
],
"total":1,
"max_score":1.0
},
"_shards":{
"successful":5,
"failed":0,
"total":5
},
"took":2
}
"""
class Msg:
class message:
value = 'message content'
class Es_conn:
def get_message_by_id(self, id):
return response_str
class TestNotificationProcessor(tests.BaseTestCase):
def setUp(self):
super(TestNotificationProcessor, self).setUp()
def test_handle_alarm_msg(self):
_es_conn = Es_conn()
msg = Msg()
email_sender.EmailSender = mock.Mock()
r = ("{'metrics': {'timestamp': 1432672915.409,"
"'name': 'biz', 'value': 1500,"
"'dimensions': {'key2': 'value2', 'key1': 'value1'}},"
"'state_updated_timestamp': 1432672915,"
"'state': 'ALARM',"
"'alarm_definition':"
"{'alarm_actions': ['c60ec47e-5038-4bf1-9f95-4046c6e9a759'],"
"'undetermined_actions': "
"['c60ec47e-5038-4bf1-9f95-4046c6e9a759'],"
"'name': 'Average CPU percent greater than 10',"
"'match_by': ['hostname'],"
"'severity': 'LOW',"
"'ok_actions': ['c60ec47e-5038-4bf1-9f95-4046c6e9a759'],"
"'expression': 'max(foo{hostname=mini-mon,mu=na}, 120)"
"> 1100 and max(bar { asd = asd} )>1200 or avg(biz)>1300',"
"'id': 'c60ec47e-5038-4bf1-9f95-4046c6e91111',"
"'description': 'The average CPU percent is greater than 10'}}")
with mock.patch.object(email_sender.EmailSender, 'send_emails',
return_value=""):
with mock.patch.object(notification_processor.
NotificationProcessor,
"_get_notification_method_response",
return_value=json.loads(response_str).
get("hits")):
with mock.patch.object(ast, 'literal_eval',
return_value=ast.literal_eval(r)):
np = notification_processor.NotificationProcessor()
np.handle_alarm_msg(_es_conn, msg)
self.assertEqual(np.email_addresses[0],
"hanc@andrew.cmu.edu")
|
Python
| 0.000011
|
@@ -3474,26 +3474,20 @@
ect(
-ast, 'literal_eval
+json, 'loads
',%0A
|
787f956539eb5e41467e04b8239ae571fad60da7
|
Implement code to return how many characters to delete to make 2 strings into an anagram
|
all-domains/tutorials/cracking-the-coding-interview/strings-making-anagrams/solution.py
|
all-domains/tutorials/cracking-the-coding-interview/strings-making-anagrams/solution.py
|
Python
| 0.998758
|
@@ -0,0 +1,823 @@
+# https://www.hackerrank.com/challenges/ctci-making-anagrams%0A# Python 3%0A%0Adef delete_char_at(s, i):%0A return s%5B:i%5D + s%5Bi+1:%5D%0A%0Adef number_needed(a, b):%0A counter = 0%0A loop_over, reference = (a, b) if len(a) %3E len(b) else (b, a)%0A%0A for character in loop_over:%0A index = reference.find(character)%0A if index == -1:%0A counter += 1%0A else:%0A # Remove the character from the reference string%0A reference = delete_char_at(reference, index)%0A%0A # If there are remaining characters in reference string, add those to count%0A counter += len(reference)%0A%0A return counter%0A%0Aa = input().strip()%0Ab = input().strip()%0A%0A# TEST answer should be 3%0A# betas and beast are anagrams...so the trailing bz and a should be removed%0A# a = 'betasbz'%0A# b = 'beasta'%0A%0Aprint(number_needed(a, b))%0A
|
|
b906082034822a825ec2963864b32d6619cf938a
|
Add testing functions for join and relabel
|
skimage/segmentation/tests/test_join.py
|
skimage/segmentation/tests/test_join.py
|
Python
| 0
|
@@ -0,0 +1,1493 @@
+import numpy as np%0Afrom numpy.testing import assert_array_equal, assert_raises%0Afrom skimage.segmentation import join_segmentations, relabel_from_one%0A%0Adef test_join_segmentations():%0A s1 = np.array(%5B%5B0, 0, 1, 1%5D,%0A %5B0, 2, 1, 1%5D,%0A %5B2, 2, 2, 1%5D%5D)%0A s2 = np.array(%5B%5B0, 1, 1, 0%5D,%0A %5B0, 1, 1, 0%5D,%0A %5B0, 1, 1, 1%5D%5D)%0A%0A # test correct join%0A # NOTE: technically, equality to j_ref is not required, only that there%0A # is a one-to-one mapping between j and j_ref. I don't know of an easy way%0A # to check this (i.e. not as error-prone as the function being tested)%0A j = join_segmentations(s1, s2)%0A j_ref = np.array(%5B%5B0, 1, 3, 2%5D,%0A %5B0, 5, 3, 2%5D,%0A %5B4, 5, 5, 3%5D%5D)%0A assert_array_equal(j, j_ref)%0A%0A # test correct exception when arrays are different shapes%0A s3 = np.array(%5B%5B0, 0, 1, 1%5D, %5B0, 2, 2, 1%5D%5D)%0A assert_raises(ValueError, join_segmentations, s1, s3)%0A%0Adef test_relabel_from_one():%0A ar = np.array(%5B1, 1, 5, 5, 8, 99, 42%5D)%0A ar_relab, fw, inv = relabel_from_one(ar)%0A ar_relab_ref = np.array(%5B1, 1, 2, 2, 3, 5, 4%5D)%0A assert_array_equal(ar_relab, ar_relab_ref)%0A fw_ref = np.zeros(100, int)%0A fw_ref%5B1%5D = 1; fw_ref%5B5%5D = 2; fw_ref%5B8%5D = 3; fw_ref%5B42%5D = 4; fw_ref%5B99%5D = 5%0A assert_array_equal(fw, fw_ref)%0A inv_ref = np.array(%5B0, 1, 5, 8, 42, 99%5D)%0A assert_array_equal(inv, inv_ref)%0A%0A%0Aif __name__ == %22__main__%22:%0A np.testing.run_module_suite()%0A
|
|
6b38f963cf555576157f063e9c026a94814f93a2
|
Fix all target for managed install.
|
build/android/tests/multiple_proguards/multiple_proguards.gyp
|
build/android/tests/multiple_proguards/multiple_proguards.gyp
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'package_name': 'multiple_proguard',
},
'targets': [
{
'target_name': 'multiple_proguards_test_apk',
'type': 'none',
'variables': {
'app_manifest_version_name%': '<(android_app_version_name)',
'java_in_dir': '.',
'proguard_enabled': 'true',
'proguard_flags_paths': [
'proguard1.flags',
'proguard2.flags',
],
'R_package': 'dummy',
'R_package_relpath': 'dummy',
'apk_name': 'MultipleProguards',
},
'dependencies': [
# guava has references to objects using reflection which
# should be ignored in proguard step.
'../../../../third_party/guava/guava.gyp:guava_javalib',
],
'includes': [ '../../../../build/java_apk.gypi' ],
},
],
}
|
Python
| 0.000209
|
@@ -705,24 +705,123 @@
Proguards',%0A
+ # This is a build-only test. There's nothing to install.%0A 'gyp_managed_install': 0,%0A
%7D,%0A
|
46be3829264ab1ac59f26d4dbd9b1427405f71dc
|
Write colors to console on Windows with correct background, not always black.
|
src/robot/output/highlighting.py
|
src/robot/output/highlighting.py
|
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Windows highlighting code adapted from color_console.py. It is copyright
# Andre Burgaud, licensed under the MIT License, and available here:
# http://www.burgaud.com/bring-colors-to-the-windows-console-with-python/
import os
import sys
try:
from ctypes import windll, Structure, c_short, c_ushort, byref
except ImportError: # Not on Windows or using Jython
windll = None
def Highlighter(stream):
if os.sep == '/':
return UnixHighlighter(stream)
return DosHighlighter(stream) if windll else NoHighlighting(stream)
class UnixHighlighter(object):
_ANSI_GREEN = '\033[32m'
_ANSI_RED = '\033[31m'
_ANSI_YELLOW = '\033[33m'
_ANSI_RESET = '\033[0m'
def __init__(self, stream):
self._stream = stream
def green(self):
self._set_color(self._ANSI_GREEN)
def red(self):
self._set_color(self._ANSI_RED)
def yellow(self):
self._set_color(self._ANSI_YELLOW)
def reset(self):
self._set_color(self._ANSI_RESET)
def _set_color(self, color):
self._stream.write(color)
class NoHighlighting(UnixHighlighter):
def _set_color(self, color):
pass
class DosHighlighter(object):
_FOREGROUND_GREEN = 0x2
_FOREGROUND_RED = 0x4
_FOREGROUND_YELLOW = 0x6
_FOREGROUND_GREY = 0x7
_FOREGROUND_INTENSITY = 0x8
_STDOUT_HANDLE = -11
_STDERR_HANDLE = -12
def __init__(self, stream):
self._handle = self._get_std_handle(stream)
self._orig_colors = self._get_colors()
def green(self):
self._set_colors(self._FOREGROUND_GREEN)
def red(self):
self._set_colors(self._FOREGROUND_RED)
def yellow(self):
self._set_colors(self._FOREGROUND_YELLOW)
def reset(self):
self._set_colors(self._orig_colors, intense=False)
def _get_std_handle(self, stream):
handle = self._STDOUT_HANDLE \
if stream is sys.__stdout__ else self._STDERR_HANDLE
return windll.kernel32.GetStdHandle(handle)
def _get_colors(self):
csbi = _CONSOLE_SCREEN_BUFFER_INFO()
ok = windll.kernel32.GetConsoleScreenBufferInfo(self._handle, byref(csbi))
if not ok: # Call failed, return default console color
return self._FOREGROUND_GREY
return csbi.wAttributes
def _set_colors(self, colors, intense=True):
if intense:
colors = colors | self._FOREGROUND_INTENSITY
windll.kernel32.SetConsoleTextAttribute(self._handle, colors)
if windll:
class _COORD(Structure):
_fields_ = [("X", c_short),
("Y", c_short)]
class _SMALL_RECT(Structure):
_fields_ = [("Left", c_short),
("Top", c_short),
("Right", c_short),
("Bottom", c_short)]
class _CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("dwSize", _COORD),
("dwCursorPosition", _COORD),
("wAttributes", c_ushort),
("srWindow", _SMALL_RECT),
("dwMaximumWindowSize", _COORD)]
|
Python
| 0
|
@@ -1938,16 +1938,44 @@
Y = 0x8%0A
+ _BACKGROUND_MASK = 0xF0%0A
_STD
@@ -2147,16 +2147,85 @@
colors()
+%0A self._background = self._orig_colors & self._BACKGROUND_MASK
%0A%0A de
@@ -2249,32 +2249,43 @@
self._set_
+foreground_
colors(self._FOR
@@ -2329,32 +2329,43 @@
self._set_
+foreground_
colors(self._FOR
@@ -2410,32 +2410,43 @@
self._set_
+foreground_
colors(self._FOR
@@ -2529,23 +2529,8 @@
lors
-, intense=False
)%0A%0A
@@ -2942,16 +2942,33 @@
le color
+s (gray on black)
%0A
@@ -3043,24 +3043,35 @@
def _set_
+foreground_
colors(self,
@@ -3081,66 +3081,36 @@
lors
-, intense=True):%0A if intense:%0A
+):%0A self._set_
colors
- =
+(
colo
@@ -3140,16 +3140,72 @@
NTENSITY
+ %7C self._background)%0A%0A def _set_colors(self, colors):
%0A
|
c8bcdf4d586277df940b8fd9f977cd72305b5e85
|
add StatusReportView
|
skrill/views.py
|
skrill/views.py
|
Python
| 0
|
@@ -0,0 +1,1765 @@
+from django import http%0Afrom django.views.generic.base import View%0A%0Afrom skrill.models import PaymentRequest, StatusReport%0A%0A%0Aclass StatusReportView(View):%0A def post(self, request, *args, **kwargs):%0A payment_request = PaymentRequest.objects.get(pk=request.POST%5B'transaction_id'%5D)%0A report = StatusReport()%0A report.payment_request = payment_request%0A report.pay_to_email = request.POST%5B'pay_to_email'%5D%0A report.pay_from_email = request.POST%5B'pay_from_email'%5D%0A report.merchant_id = request.POST%5B'merchant_id'%5D%0A report.customer_id = request.POST.get('customer_id', None)%0A report.transaction_id = request.POST%5B'transaction_id'%5D%0A report.mb_transaction_id = request.POST%5B'mb_transaction_id'%5D%0A report.mb_amount = request.POST%5B'mb_amount'%5D%0A report.mb_currency = request.POST%5B'mb_currency'%5D%0A report.status = request.POST%5B'status'%5D%0A report.failed_reason_code = request.POST.get('failed_reason_code', None)%0A report.md5sig = request.POST%5B'md5sig'%5D%0A report.sha2sig = request.POST.get('sha2sig', None)%0A report.amount = request.POST%5B'amount'%5D%0A report.currency = request.POST%5B'currency'%5D%0A report.payment_type = request.POST.get('payment_type', None)%0A report.custom_field_1 = request.POST.get('custom_field_1', None)%0A report.custom_field_2 = request.POST.get('custom_field_2', None)%0A report.custom_field_3 = request.POST.get('custom_field_3', None)%0A report.custom_field_4 = request.POST.get('custom_field_4', None)%0A report.custom_field_5 = request.POST.get('custom_field_5', None)%0A report.save()%0A report.validate_md5sig()%0A report.valid = True%0A report.save()%0A return http.HttpResponse()%0A%0A
|
|
12c3ded4ed05e34a0a44163abd5ae08ab0289c4c
|
Create Score-Calculator.py
|
Score-Calculator.py
|
Score-Calculator.py
|
Python
| 0
|
@@ -0,0 +1,308 @@
+midterm = float(input())%0Aif midterm %3E= 0:%0A if midterm %3C= 60:%0A final = float(input())%0A if final %3E= 0:%0A if final %3C= 60:%0A total = midterm + final%0A avg = total/2%0A print('Total: ' + str(total))%0A print('Average: ' + str(avg))%0A
|
|
3b0fdecb60b9c5e8a104564d5703c85c97c10f27
|
Introduce an ExtruderStack class
|
cura/Settings/ExtruderStack.py
|
cura/Settings/ExtruderStack.py
|
Python
| 0
|
@@ -0,0 +1,723 @@
+# Copyright (c) 2017 Ultimaker B.V.%0A# Cura is released under the terms of the AGPLv3 or higher.%0A%0Afrom UM.MimeTypeDatabase import MimeType, MimeTypeDatabase%0Afrom UM.Settings.ContainerStack import ContainerStack%0Afrom UM.Settings.ContainerRegistry import ContainerRegistry%0A%0Aclass ExtruderStack(ContainerStack):%0A def __init__(self, container_id, *args, **kwargs):%0A super().__init__(container_id, *args, **kwargs)%0A%0Aextruder_stack_mime = MimeType(%0A name = %22application/x-cura-extruderstack%22,%0A comment = %22Cura Extruder Stack%22,%0A suffixes = %5B %22extruder.cfg%22 %5D%0A)%0A%0AMimeTypeDatabase.addMimeType(extruder_stack_mime)%0AContainerRegistry.addContainerTypeByName(ExtruderStack, %22extruder_stack%22, extruder_stack_mime.name)%0A
|
|
1e996e3cf1c8e067bbbb8bf23f93b34202b4cd44
|
add 401
|
leetcode/1.Array_String/401.BinaryWatch.py
|
leetcode/1.Array_String/401.BinaryWatch.py
|
Python
| 0.001659
|
@@ -0,0 +1,2498 @@
+# 401. Binary Watch %0A%0A# A binary watch has 4 LEDs on the top which represent the hours (0-11), and the 6 LEDs on the bottom represent the minutes (0-59).%0A%0A# Each LED represents a zero or one, with the least significant bit on the right.%0A%0A# off off on on%0A# off on on off off on %0A%0A# For example, the above binary watch reads %223:25%22.%0A%0A# Given a non-negative integer n which represents the number of LEDs that are currently on, return all possible times the watch could represent.%0A%0A# Example:%0A%0A# Input: n = 1%0A# Return: %5B%221:00%22, %222:00%22, %224:00%22, %228:00%22, %220:01%22, %220:02%22, %220:04%22, %220:08%22, %220:16%22, %220:32%22%5D%0A%0A# Note:%0A%0A# The order of output does not matter.%0A# The hour must not contain a leading zero, for example %2201:00%22 is not valid, it should be %221:00%22.%0A# The minute must be consist of two digits and may contain a leading zero, for example %2210:2%22 is not valid, it should be %2210:02%22.%0A%0A%0Aclass Solution(object):%0A def readBinaryWatch(self, num):%0A %22%22%22%0A :type num: int%0A :rtype: List%5Bstr%5D%0A The bin() method%0A %22%22%22%0A return_list = %5B%5D%0A for hour in range(12):%0A for minute in range(60):%0A if sum(map(lambda number: int(bin(number)%5B2:%5D.count('1')), %5Bhour, minute%5D)) == num:%0A return_list += %5Bstr(hour) + %22:%22 + str(minute).zfill(2)%5D%0A return return_list%0A%0Aclass Solution1(object):%0A def readBinaryWatch(self, num):%0A %22%22%22%0A :type num: int%0A :rtype: List%5Bstr%5D%0A x = x & (x - 1): turn off the rightmost 1%0A %22%22%22%0A %0A def bit_count(binnum):%0A count = 0%0A while binnum:%0A binnum &= binnum - 1%0A count += 1%0A return count%0A %0A return_list = %5B%5D%0A for hour in range(12):%0A for minute in range(60):%0A if bit_count(hour) + bit_count(minute) == num:%0A return_list += %5B'%7B%7D:%7B%7D'.format(str(hour), str(minute).zfill(2))%5D%0A return return_list%0A%0Aclass Solution2(object):%0A def readBinaryWatch(self, num):%0A %22%22%22%0A :type num: int%0A :rtype: List%5Bstr%5D%0A %22%22%22%0A %0A def bit_count(binnum):%0A count = 0%0A while binnum:%0A binnum &= binnum - 1%0A count += 1%0A return count%0A return %5B'%7B%7D:%7B%7D'.format(str(hour), str(minute).zfill(2)) for hour in range(12) for minute in range(60) if bit_count(hour) + bit_count(minute) == num%5D
|
|
5712da6095594360be9010b0fe6b85606ec1e2d0
|
Add regression test for #891
|
spacy/tests/regression/test_issue891.py
|
spacy/tests/regression/test_issue891.py
|
Python
| 0.000001
|
@@ -0,0 +1,321 @@
+# coding: utf8%0Afrom __future__ import unicode_literals%0A%0Aimport pytest%0A%0A@pytest.mark.xfail%0A@pytest.mark.parametrize('text', %5B%22want/need%22%5D)%0Adef test_issue891(en_tokenizer, text):%0A %22%22%22Test that / infixes are split correctly.%22%22%22%0A tokens = en_tokenizer(text)%0A assert len(tokens) == 3%0A assert tokens%5B1%5D.text == %22/%22%0A
|
|
d11ac35410252c108dcd7e8d2ae03df2abc4697b
|
add statsquid cli util
|
statsquid/statsquid.py
|
statsquid/statsquid.py
|
Python
| 0
|
@@ -0,0 +1,2743 @@
+#!/usr/bin/env python%0A%0Aimport os,sys,logging,signal%0Afrom argparse import ArgumentParser%0A#from . import __version__%0Afrom listener import StatListener%0Afrom collector import StatCollector%0A%0A__version__ = 'alpha'%0Alog = logging.getLogger('statsquid')%0A%0Aclass StatSquid(object):%0A %22%22%22%0A StatSquid %0A params:%0A - role(str): Role of this statsquid instance. Either master or agent.%0A - options(dict): dictionary of options to start instance with%0A %22%22%22%0A #TODO: improve graceful exiting, fix signal catching%0A def __init__(self,role,options):%0A self.role = role%0A signal.signal(signal.SIGTERM, self.sig_handler)%0A if self.role == 'master':%0A self.instance = self.start_master(options)%0A if self.role == 'agent':%0A self.instance = self.start_agent(options)%0A %0A def start_master(self,opts):%0A return StatListener(redis_host=opts%5B'redis_host'%5D,%0A redis_port=opts%5B'redis_port'%5D)%0A%0A def start_agent(self,opts):%0A #format docker url%0A docker_url = %22tcp://%22 + opts%5B'docker_host'%5D + %5C%0A %22:%22 + str(opts%5B'docker_port'%5D)%0A%0A return StatCollector(docker_url,%0A redis_host=opts%5B'redis_host'%5D,%0A redis_port=opts%5B'redis_port'%5D)%0A%0A def sig_handler(self,signal,frame):%0A print('signal caught, exiting')%0A self.instance.stop()%0A sys.exit(0)%0A%0Adef main():%0A commands = %5B 'agent', 'master' %5D%0A parser = ArgumentParser(description='statsquid %25s' %25 __version__)%0A parser.add_argument('--docker-host',%0A dest='docker_host',%0A help='docker host to connect to (default: 127.0.0.1)',%0A default='127.0.0.1')%0A parser.add_argument('--docker-port',%0A dest='docker_port',%0A help='docker port to connect on (default: 4243)',%0A default=4243)%0A parser.add_argument('--redis-host',%0A dest='redis_host',%0A help='redis host to connect to (default: 127.0.0.1)',%0A default='127.0.0.1')%0A parser.add_argument('--redis-port',%0A dest='redis_port',%0A help='redis port to connect on (default: 6379)',%0A default='6379')%0A parser.add_argument('command',%0A help='Mode to run as or command to run (%25s)' %25 %5C%0A ','.join(commands))%0A%0A args = parser.parse_args()%0A%0A if args.command not in commands:%0A log.error('Unknown command %25s' %25 args.command)%0A exit(1)%0A%0A s = StatSquid(args.command,args.__dict__)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
59160eeb24f6311dafce2db34a40f8ba879fd516
|
Add test showing taint for attr store
|
python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep/test_attr.py
|
python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep/test_attr.py
|
Python
| 0.000111
|
@@ -0,0 +1,1240 @@
+# Add taintlib to PATH so it can be imported during runtime without any hassle%0Aimport sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__))))%0Afrom taintlib import *%0A%0A# This has no runtime impact, but allows autocomplete to work%0Afrom typing import TYPE_CHECKING%0Aif TYPE_CHECKING:%0A from ..taintlib import *%0A%0A%0A# Actual tests%0A%0Aclass Foo:%0A def __init__(self, arg):%0A self.arg = arg%0A self.other_arg = %22other_arg%22%0A%0A%0Adef test_tainted_attr():%0A # The following demonstrates how tainting an attribute affected the taintedness of%0A # the object.%0A #%0A # Previously we would (wrongly) treat the object as tainted if we noticed a write of%0A # a tainted value to any of its' attributes. This lead to FP, highlighted in%0A # https://github.com/github/codeql/issues/7786%0A%0A f = Foo(TAINTED_STRING)%0A ensure_not_tainted(f) # $ SPURIOUS: tainted%0A ensure_tainted(f.arg) # $ tainted%0A ensure_not_tainted(f.other_arg)%0A%0A%0A x = Foo(%22x%22)%0A ensure_not_tainted(x, x.arg, x.other_arg)%0A%0A x.arg = TAINTED_STRING%0A ensure_not_tainted(x) # $ SPURIOUS: tainted%0A ensure_tainted(x.arg) # $ tainted%0A ensure_not_tainted(f.other_arg)%0A%0A%0A b = Foo(%22bar%22)%0A ensure_not_tainted(b, b.arg, b.other_arg)%0A
|
|
1f92af62d1a58e496c2ce4251676fca3b571e8f1
|
Add missing specification model tests
|
django_project/localities/tests/test_model_Specification.py
|
django_project/localities/tests/test_model_Specification.py
|
Python
| 0.000001
|
@@ -0,0 +1,804 @@
+# -*- coding: utf-8 -*-%0Afrom django.test import TestCase%0A%0Afrom django.db import IntegrityError%0A%0Afrom .model_factories import (%0A SpecificationF,%0A DomainF,%0A AttributeF%0A)%0A%0A%0Aclass TestModelSpecification(TestCase):%0A def test_model_repr(self):%0A dom = DomainF.create(id=1, name='A domain')%0A attr = AttributeF.create(key='An attribute')%0A spec = SpecificationF.create(domain=dom, attribute=attr)%0A%0A self.assertEqual(unicode(spec), 'A domain an_attribute')%0A%0A def test_model_uniqueness(self):%0A dom = DomainF.create(id=1)%0A attr = AttributeF.create(id=1, key='An attribute')%0A SpecificationF.create(domain=dom, attribute=attr)%0A%0A self.assertRaises(%0A IntegrityError, SpecificationF.create,%0A domain=dom, attribute=attr%0A )%0A
|
|
15298dd59aabd817b3b160910b423d3448c9e189
|
Test for overriding __import__.
|
tests/import/import_override.py
|
tests/import/import_override.py
|
Python
| 0.001211
|
@@ -0,0 +1,1006 @@
+import import1b%0A%0Aassert import1b.var == 123%0A%0Aimport builtins%0A%0Aorg_import = builtins.__import__%0A%0A%0Adef my_import(*args):%0A # MicroPython currently doesn't pass globals/locals, so don't print them%0A # CPython3.5 and lower for %22from pkg.mod import foo%22 appear to call%0A # __import__ twice - once with 5 args, and once with 1 (for %22pkg%22)%0A # CPython3.5 and MicroPython doesn't have such an artifact. So, to make%0A # test pass on CPython3.5-, just don't print calls with less than 5 args%0A if len(args) == 5:%0A print(%22overriden import:%22, args%5B0%5D, args%5B3%5D, args%5B4%5D)%0A return org_import(*args)%0A%0A%0Atry:%0A builtins.__import__ = my_import%0Aexcept AttributeError:%0A print(%22SKIP%22)%0A raise SystemExit%0A%0A# __import__ is called unconditionally on import, even if module is already%0A# imported (actually, runtime doesn't know or care if module is already%0A# imported, sys.modules caching is completely on the level of __import__%0A# itself).%0Aimport import1b%0A%0Aprint(import1b.var)%0A%0Afrom pkg.mod import foo%0A
|
|
02bf100a05ed6267ab3fb618c52150fc2d4884f2
|
Add some basic tests around contact parsing
|
tests/test_contact_parsing.py
|
tests/test_contact_parsing.py
|
Python
| 0
|
@@ -0,0 +1,1296 @@
+import aiosip%0A%0A%0Adef test_simple_header():%0A header = aiosip.Contact.from_header('%3Csip:pytest@127.0.0.1:7000%3E')%0A assert not header%5B'name'%5D%0A assert dict(header%5B'params'%5D) == %7B%7D%0A assert dict(header%5B'uri'%5D) == %7B'scheme': 'sip',%0A 'user': 'pytest',%0A 'password': None,%0A 'host': '127.0.0.1',%0A 'port': 7000,%0A 'params': None,%0A 'headers': None%7D%0A%0A%0Adef test_header_with_name():%0A header = aiosip.Contact.from_header('%22Pytest%22 %3Csip:pytest@127.0.0.1:7000%3E')%0A assert header%5B'name'%5D == %22Pytest%22%0A assert dict(header%5B'params'%5D) == %7B%7D%0A assert dict(header%5B'uri'%5D) == %7B'scheme': 'sip',%0A 'user': 'pytest',%0A 'password': None,%0A 'host': '127.0.0.1',%0A 'port': 7000,%0A 'params': None,%0A 'headers': None%7D%0A%0A%0Adef test_add_tag():%0A header = aiosip.Contact.from_header('%3Csip:pytest@127.0.0.1:7000%3E')%0A assert dict(header%5B'params'%5D) == %7B%7D%0A%0A header.add_tag()%0A assert 'tag' in header%5B'params'%5D%0A
|
|
f5720f2609bcb19ffca308a3589c8e6171d1f8b7
|
Add test cases for removepunctuation
|
tests/test_removepunctuation.py
|
tests/test_removepunctuation.py
|
Python
| 0.000018
|
@@ -0,0 +1,931 @@
+#%0A%0Aimport pytest%0Afrom sdsc.textutil import removepunctuation%0A%0A%0A@pytest.mark.parametrize(%22end%22, %5BTrue, False%5D)%0A@pytest.mark.parametrize(%22start%22, %5BTrue, False%5D)%0A@pytest.mark.parametrize(%22data%22, %5B%0A # 0 - no quotes%0A 'word',%0A # 1 - single quote at the start%0A '%C2%B8word',%0A # 2 - single quote at the end%0A 'word%5C'',%0A # 3 - single quotes at both ends%0A '%5C'word%5C'',%0A # 4 - double quotes at the start%0A %22%5C%22word%22,%0A # 5 - double quotes at the end%0A 'word%22',%0A # 6 - double quotes at both ends%0A '%22word%22',%0A%5D)%0Adef test_removepunctuation(data, start, end):%0A%0A result = %22word%22%0A # For the time being, we check with .isalpha() to cover%0A # all punctuation. However, %22@%22.isalpha() would return False%0A if start and not data%5B0%5D.isalpha():%0A result = data%5B0%5D + result%0A%0A if end and not data%5B-1%5D.isalpha():%0A result = result + data%5B-1%5D%0A%0A removepunctuation(data, start, end) == result%0A
|
|
e8309903b54598358efc20092760fe933cbd8ce7
|
check if a string is a permutation of anohter string
|
CrackingCodingInterview/1.3_string_permutation.py
|
CrackingCodingInterview/1.3_string_permutation.py
|
Python
| 0.999858
|
@@ -0,0 +1,121 @@
+%22%22%22%0Acheck if a string is a permutation of anohter string%0A%22%22%22%0A%0A#utalize sorted, perhaps check length first to make faster%0A
|
|
6dde05fc401ff615b44dc101bfb7775c65535e79
|
Create 2.6_circularlinkedlist.py
|
CrackingCodingInterview/2.6_circularlinkedlist.py
|
CrackingCodingInterview/2.6_circularlinkedlist.py
|
Python
| 0.000019
|
@@ -0,0 +1,61 @@
+%22%22%22%0Areturn node at begining of a cricularly linked list%0A%22%22%22%0A%0A
|
|
cb2deafae258625f0c4ec8bb68713b391129a27c
|
add migration of help text changes
|
isi_mip/climatemodels/migrations/0085_auto_20180215_1105.py
|
isi_mip/climatemodels/migrations/0085_auto_20180215_1105.py
|
Python
| 0.000001
|
@@ -0,0 +1,1584 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.8 on 2018-02-15 10:05%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('climatemodels', '0084_inputdata_protocol_relation'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='impactmodel',%0A name='version',%0A field=models.CharField(blank=True, help_text='The model version with which these simulations were run. Please indicate if the model version used for ISIMIP2b can be evaluated based on comparison of the ISIMIP2a runs with observed impacts.', max_length=500, null=True, verbose_name='Model version'),%0A ),%0A migrations.AlterField(%0A model_name='inputdata',%0A name='protocol_relation',%0A field=models.CharField(choices=%5B('P', 'Protocol'), ('S', 'Supplementary')%5D, default='P', max_length=1),%0A ),%0A migrations.AlterField(%0A model_name='inputdata',%0A name='variables',%0A field=models.ManyToManyField(blank=True, help_text='The variables are filtered based on the data type. To see variables of a different data type, please change and save data type first.', to='climatemodels.ClimateVariable'),%0A ),%0A migrations.AlterField(%0A model_name='outputdata',%0A name='model',%0A field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='climatemodels.ImpactModel'),%0A ),%0A %5D%0A
|
|
5fa3fc6ba78c3e6cf12a25bddb835e9d885bcbd3
|
Create 0035_auto_20190712_2015.py
|
src/submission/migrations/0035_auto_20190712_2015.py
|
src/submission/migrations/0035_auto_20190712_2015.py
|
Python
| 0.000001
|
@@ -0,0 +1,1437 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.21 on 2019-07-12 19:15%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('submission', '0034_auto_20190416_1009'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='article',%0A name='abstract',%0A field=models.TextField(blank=True, help_text='Please avoid pasting content from word processors as they can add unwanted styling to the abstract. You can retype the abstract here or copy and paste it into notepad/a plain text editor before pasting here.', null=True),%0A ),%0A migrations.AlterField(%0A model_name='article',%0A name='stage',%0A field=models.CharField(choices=%5B('Unsubmitted', 'Unsubmitted'), ('Unassigned', 'Unassigned'), ('Assigned', 'Assigned to Editor'), ('Under Review', 'Peer Review'), ('Under Revision', 'Revision'), ('Rejected', 'Rejected'), ('Accepted', 'Accepted'), ('Editor Copyediting', 'Editor Copyediting'), ('Author Copyediting', 'Author Copyediting'), ('Final Copyediting', 'Final Copyediting'), ('Typesetting', 'Typesetting'), ('Proofing', 'Proofing'), ('pre_publication', 'Pre Publication'), ('Published', 'Published'), ('preprint_review', 'Preprint Review'), ('preprint_published', 'Preprint Published')%5D, default='Unsubmitted', max_length=200),%0A ),%0A %5D%0A
|
|
e0229179b01805ca7f7e23d3094737a4f366e162
|
Add missing files for d8af78447f286ad07ad0736d4202e0becd0dd319
|
board/migrations/0001_initial.py
|
board/migrations/0001_initial.py
|
Python
| 0.000006
|
@@ -0,0 +1,735 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Afrom django.conf import settings%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A migrations.swappable_dependency(settings.AUTH_USER_MODEL),%0A %5D%0A%0A operations = %5B%0A migrations.CreateModel(%0A name='UserProfile',%0A fields=%5B%0A ('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),%0A ('nick', models.CharField(max_length=16)),%0A ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),%0A %5D,%0A options=%7B%0A %7D,%0A bases=(models.Model,),%0A ),%0A %5D%0A
|
|
2a502236de5c28d4f4e6626317565c7bb60ebb13
|
Create NumberofDigitOne_001.py
|
leetcode/233-Number-of-Digit-One/NumberofDigitOne_001.py
|
leetcode/233-Number-of-Digit-One/NumberofDigitOne_001.py
|
Python
| 0.000057
|
@@ -0,0 +1,389 @@
+class Solution:%0A # @param %7Binteger%7D n%0A # @return %7Binteger%7D%0A def countDigitOne(self, n):%0A res, d = 0, 10%0A while 10 * n %3E= d:%0A t = d / 10%0A r = n %25 d%0A res += n / d * t%0A if t - 1 %3C r %3C 2 * t - 1:%0A res += r - t + 1%0A elif 2 * t - 1 %3C= r:%0A res += t%0A d *= 10%0A return res%0A
|
|
052dbe05c0e1d3e2821857a035e469be2a1055ae
|
Add "what is my purpose in life" plugin
|
plugins/pass_the_butter.py
|
plugins/pass_the_butter.py
|
Python
| 0.000021
|
@@ -0,0 +1,250 @@
+from espresso.main import robot%0A%0A@robot.respond(r%22(?i)pass the butter%22)%0Adef pass_the_butter(res):%0A res.reply(res.msg.user, %22What is my purpose in life?%22)%0A%0A@robot.respond(r%22(?i)you pass butter%22)%0Adef you_pass_butter(res):%0A res.send(%22Oh my god.%22)%0A
|
|
5b57686868b595fb4e7b431822fe4c7bf2de6cfb
|
Add unittests for title handling methods
|
test/test_uploadbot.py
|
test/test_uploadbot.py
|
Python
| 0
|
@@ -0,0 +1,1323 @@
+#!/usr/bin/env python%0A# -*- coding: latin-1 -*-%0A%0A%22%22%22Unit tests.%22%22%22%0A%0Aimport unittest%0Afrom uploadlibrary.UploadBot import _cut_title%0A%0A%0Aclass TestUploadBot(unittest.TestCase):%0A%0A %22%22%22Testing UploadBot methods.%22%22%22%0A%0A def test_cut_title_witout_cutting(self):%0A %22%22%22Test _cut_title() without cutting%22%22%22%0A inputs = %5B(%22%22, %22ABC%22, %22%22),%0A (%22%22, %22ABC%22, %22 123456789%22),%0A (%221234 %22, %22ABC%22, %22%22),%0A (%221234 %22, %22ABC%22, %22 123456789%22)%5D%0A outputs = %5B_cut_title(*x, MAX_LENGTH=25) for x in inputs%5D%0A expected_results = %5B'ABC',%0A 'ABC 123456789',%0A '1234 ABC',%0A '1234 ABC 123456789'%5D%0A self.assertListEqual(outputs, expected_results)%0A%0A def test_cut_title_with_cutting(self):%0A %22%22%22Test _cut_title() with cutting.%22%22%22%0A inputs = %5B(%221234 %22, %22ABC DEF G H%22, %22 123456789%22),%0A (%221234 %22, %22ABC DE FG H%22, %22 123456789%22),%0A (%221234 %22, %22ABC D E FG H%22, %22 123456789%22)%5D%0A outputs = %5B_cut_title(*x, MAX_LENGTH=25) for x in inputs%5D%0A expected_results = %5B'1234 ABC DEF... 123456789',%0A '1234 ABC DE... 123456789',%0A '1234 ABC D E... 123456789'%5D%0A self.assertListEqual(outputs, expected_results)
|
|
b15c7c044b0c514285bcb8c29b7bcfc8cf777c8b
|
Add tests for the signals
|
ormcache/tests/test_signals.py
|
ormcache/tests/test_signals.py
|
Python
| 0.000002
|
@@ -0,0 +1,1327 @@
+from django.core.cache import cache%0Afrom django.test import SimpleTestCase%0A%0Afrom ormcache.signals import cache_hit, cache_missed, cache_invalidated%0Afrom ormcache.tests.testapp.models import CachedDummyModel%0A%0A%0Aclass SignalsTestCase(SimpleTestCase):%0A%0A def setUp(self):%0A self.signal_called = False%0A self.instance_pk = CachedDummyModel.objects.create().pk%0A cache.clear()%0A%0A def _signal_callback(self, sender, signal):%0A self.signal_called = True%0A%0A def test_cache_hit_signal(self):%0A cache_hit.connect(self._signal_callback)%0A%0A CachedDummyModel.objects.get(pk=self.instance_pk) # miss%0A self.assertFalse(self.signal_called)%0A CachedDummyModel.objects.get(pk=self.instance_pk) # hit%0A self.assertTrue(self.signal_called)%0A%0A def test_cache_missed_signal(self):%0A cache_missed.connect(self._signal_callback)%0A%0A CachedDummyModel.objects.get(pk=self.instance_pk) # miss%0A self.assertTrue(self.signal_called)%0A%0A def test_cache_invalidated_signal(self):%0A cache_invalidated.connect(self._signal_callback)%0A%0A instance = CachedDummyModel.objects.get(pk=self.instance_pk) # miss%0A self.assertFalse(self.signal_called)%0A instance.title = %22hello%22%0A instance.save() # invalidate%0A self.assertTrue(self.signal_called)%0A
|
|
f07cdf5bd22dd352122d679a6e8c4cc213aad013
|
Create multiarm_selector.py
|
multiarm_selector.py
|
multiarm_selector.py
|
Python
| 0.000001
|
@@ -0,0 +1,2066 @@
+from __future__ import division%0Aimport random%0A%0A%0Aclass MultiarmSelector(object):%0A def __init__(self):%0A self.versions_served = %5B%5D%0A self.clicks = 0%0A self.missed = 0%0A%0A self.success_count = %7B%0A %22A%22: 0,%0A %22B%22: 0%0A %7D%0A self.total_count = %7B%0A %22A%22: 0,%0A %22B%22: 0%0A %7D%0A%0A def handle_response_from_new_user(self, user_data):%0A selection, not_selected = self._get_selection()%0A self.versions_served.append(selection)%0A self._update_success_and_total(selection, user_data)%0A if user_data%5Bselection%5D:%0A self.clicks += 1%0A return%0A if user_data%5Bnot_selected%5D:%0A self.missed += 1%0A return%0A%0A def prepare_report(self):%0A return self.clicks, self.missed%0A%0A def versions_served(self):%0A return self.versions_served%0A%0A def did_give_correct_answer(self):%0A %22We are assuming for test that B is always better than A%22%0A expected_reward_A = self.success_count%5B%22A%22%5D / self.total_count%5B%22A%22%5D%0A expected_reward_B = self.success_count%5B%22B%22%5D / self.total_count%5B%22B%22%5D%0A if expected_reward_B %3E expected_reward_A:%0A return 1%0A else:%0A return 0%0A%0A def _update_success_and_total(self, selection, user_data):%0A self.total_count%5Bselection%5D += 1%0A if user_data%5Bselection%5D:%0A self.success_count%5Bselection%5D += 1%0A%0A def _get_selection(self):%0A if random.random() %3C 0.1:%0A return self._get_random_selection()%0A%0A if self.total_count%5B%22A%22%5D == 0 or self.total_count%5B%22B%22%5D == 0:%0A return self._get_random_selection()%0A%0A expected_reward_A = self.success_count%5B%22A%22%5D / self.total_count%5B%22A%22%5D%0A expected_reward_B = self.success_count%5B%22B%22%5D / self.total_count%5B%22B%22%5D%0A%0A if expected_reward_B %3E expected_reward_A:%0A return %22B%22, %22A%22%0A else:%0A return %22A%22, %22B%22%0A%0A def _get_random_selection(self):%0A if random.random() %3C 0.5:%0A return %22A%22, %22B%22%0A else:%0A return %22B%22, %22A%22%0A
|
|
bf2cc99162389c6b5c18051f01756e17d9d11ce6
|
Add a test for rename.
|
tests/integration/test_rename.py
|
tests/integration/test_rename.py
|
Python
| 0.000004
|
@@ -0,0 +1,1154 @@
+%22%22%22%0ATest 'rename'.%0A%22%22%22%0A%0Aimport subprocess%0Aimport unittest%0A%0Afrom ._constants import _CLI%0A%0Afrom ._misc import Service%0A%0A%0A@unittest.skip(%22Wating for Rename%22)%0Aclass Rename1TestCase(unittest.TestCase):%0A %22%22%22%0A Test 'rename' when pool is non-existant.%0A %22%22%22%0A _MENU = %5B'rename'%5D%0A _POOLNAME = 'deadpool'%0A _NEW_POOLNAME = 'livepool'%0A%0A def setUp(self):%0A %22%22%22%0A Start the stratisd daemon with the simulator.%0A %22%22%22%0A self._service = Service()%0A self._service.setUp()%0A%0A def tearDown(self):%0A %22%22%22%0A Stop the stratisd simulator and daemon.%0A %22%22%22%0A self._service.tearDown()%0A%0A def testRename(self):%0A %22%22%22%0A This should fail because original name does not exist.%0A %22%22%22%0A try:%0A command_line = %5C%0A %5B'python', _CLI%5D + %5C%0A self._MENU + %5C%0A %5Bself._POOLNAME%5D + %5C%0A %5Bself._NEW_POOLNAME%5D%0A subprocess.check_call(command_line)%0A self.fail(%0A %22Should have failed because %25s does not exist.%22 %25 self._POOLNAME%0A )%0A except subprocess.CalledProcessError:%0A pass%0A
|
|
4d661b0fcb6f4b130370c010d16a2afec2449456
|
Create mergesort.py
|
aids/sorting_and_searching/mergesort.py
|
aids/sorting_and_searching/mergesort.py
|
Python
| 0.000001
|
@@ -0,0 +1,194 @@
+'''%0AIn this module, we implement merge sort%0ATime complexity: O(n * log n)%0A'''%0A%0A%0Adef mergesort(arr):%0A '''%0A Sort array using mergesort%0A %0A '''%0A pass%0A %0A%0Adef _merge(arr):%0A pass%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.