commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
6fb3c87f0f9b238eab71df6880568005e3a2b461
|
add DuckDuckGo Instant Answer plugin
|
plugins/ddg.py
|
plugins/ddg.py
|
Python
| 0.000002
|
@@ -0,0 +1,1516 @@
+# coding=utf-8%0A%0Aimport json%0Aimport urllib.request%0A%0A@yui.threaded%0A@yui.command('duckduckgo', 'ddg')%0Adef ddg(argv):%0A '''Returns the Instant Answer for a given query. Usage: ddg -lang %3Cquery%3E'''%0A lang = 'en_US'%0A if len(argv) %3C 1:%0A return%0A%0A # check if a language was given%0A argv = argv%5B1:%5D%0A if len(argv) %3E 1 and argv%5B0%5D.startswith('-'):%0A lang = argv%5B0%5D%5B1:%5D%0A argv = argv%5B1:%5D%0A%0A q = urllib.request.quote(' '.join(argv).encode('utf-8'))%0A url = f'https://api.duckduckgo.com/?q=%7Bq%7D&format=json&no_html=1&skip_disambig=1&no_redirect=1'%0A h = %7B 'Accept-Language' : lang %7D%0A req = urllib.request.Request(url, headers=h)%0A%0A with urllib.request.urlopen(req) as r:%0A js = json.loads(r.read().decode('utf-8'))%0A%0A Type = js.get('Type')%0A AbstractText = js.get('AbstractText')%0A AbstractURL = js.get('AbstractURL')%0A Heading = js.get('Heading')%0A Answer = js.get('Answer')%0A Redirect = js.get('Redirect')%0A%0A reply = 'No results.'%0A%0A if Type == 'D' or Type == 'C': # disambiguation or category%0A reply = f'%7BHeading%7D: %7BAbstractURL%7D'%0A elif Type == 'A': # article%0A reply = f'%7BHeading%7D: %7BAbstractText%7D - %7BAbstractURL%7D'%0A elif Type == 'E': # exclusive, e.g. calc/conversion and redirects%0A if type(Answer) is str and Answer != '':%0A reply = Answer%0A elif type(Answer) is dict and 'result' in Answer:%0A reply = Answer%5B'result'%5D%0A elif Redirect != '':%0A reply = f'Redirect: %7BRedirect%7D'%0A%0A return reply%0A
|
|
176940b006df7aeb759c3a985bd636887955926a
|
fix for issue 524
|
pyglet/app/cocoa.py
|
pyglet/app/cocoa.py
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
from __future__ import with_statement
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from pyglet.app.base import PlatformEventLoop
from pyglet.libs.darwin import *
class CocoaEventLoop(PlatformEventLoop):
def __init__(self):
super(CocoaEventLoop, self).__init__()
# Prepare the default application.
NSApplication.sharedApplication()
# Create an autorelease pool for menu creation and finishLaunching
pool = NSAutoreleasePool.alloc().init()
self._create_application_menu()
NSApp().finishLaunching()
NSApp().activateIgnoringOtherApps_(True)
# Then get rid of the pool when we're done.
del pool
def _create_application_menu(self):
# Sets up a menu and installs a "quit" item so that we can use
# Command-Q to exit the application.
# See http://cocoawithlove.com/2010/09/minimalist-cocoa-programming.html
# This could also be done much more easily with a NIB.
menubar = NSMenu.alloc().init()
appMenuItem = NSMenuItem.alloc().init()
menubar.addItem_(appMenuItem)
NSApp().setMainMenu_(menubar)
appMenu = NSMenu.alloc().init()
processName = NSProcessInfo.processInfo().processName()
hideItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
"Hide " + processName, "hide:", "h")
appMenu.addItem_(hideItem)
appMenu.addItem_(NSMenuItem.separatorItem())
quitItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
"Quit " + processName, "terminate:", "q")
appMenu.addItem_(quitItem)
appMenuItem.setSubmenu_(appMenu)
def start(self):
pass
def step(self, timeout=None):
# Create an autorelease pool for this iteration.
pool = NSAutoreleasePool.alloc().init()
# Determine the timeout date.
if timeout is None:
# Using distantFuture as untilDate means that nextEventMatchingMask
# will wait until the next event comes along.
timeout_date = NSDate.distantFuture()
else:
timeout_date = NSDate.dateWithTimeIntervalSinceNow_(timeout)
# Retrieve the next event (if any). We wait for an event to show up
# and then process it, or if timeout_date expires we simply return.
# We only process one event per call of step().
self._is_running.set()
event = NSApp().nextEventMatchingMask_untilDate_inMode_dequeue_(
NSAnyEventMask, timeout_date, NSDefaultRunLoopMode, True)
# Dispatch the event (if any).
if event is not None:
event_type = event.type()
if event_type != NSApplicationDefined:
# Send out event as normal. Responders will still receive
# keyUp:, keyDown:, and flagsChanged: events.
NSApp().sendEvent_(event)
# Resend key events as special pyglet-specific messages
# which supplant the keyDown:, keyUp:, and flagsChanged: messages
# because NSApplication translates multiple key presses into key
# equivalents before sending them on, which means that some keyUp:
# messages are never sent for individual keys. Our pyglet-specific
# replacements ensure that we see all the raw key presses & releases.
# We also filter out key-down repeats since pyglet only sends one
# on_key_press event per key press.
if event_type == NSKeyDown and not event.isARepeat():
NSApp().sendAction_to_from_("pygletKeyDown:", None, event)
elif event_type == NSKeyUp:
NSApp().sendAction_to_from_("pygletKeyUp:", None, event)
elif event_type == NSFlagsChanged:
NSApp().sendAction_to_from_("pygletFlagsChanged:", None, event)
NSApp().updateWindows()
did_time_out = False
else:
did_time_out = True
self._is_running.clear()
# Destroy the autorelease pool used for this step.
del pool
return did_time_out
def stop(self):
pass
def notify(self):
pool = NSAutoreleasePool.alloc().init()
notifyEvent = NSEvent.otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_(
NSApplicationDefined, # type
NSPoint(0.0, 0.0), # location
0, # modifierFlags
0, # timestamp
0, # windowNumber
None, # graphicsContext
0, # subtype
0, # data1
0, # data2
)
NSApp().postEvent_atStart_(notifyEvent, False)
del pool
|
Python
| 0
|
@@ -2259,16 +2259,74 @@
_menu()%0A
+ NSApp().setActivationPolicy_(0) # Mac OS X 10.6 %0A
|
e8576b67f8a3778c43a85e24d1cbc0e1985fe2ca
|
Add float.is_integer() example
|
trypython/basic/builtinfunc/float01.py
|
trypython/basic/builtinfunc/float01.py
|
Python
| 0.002001
|
@@ -0,0 +1,549 @@
+%22%22%22%0A%E7%B5%84%E3%81%BF%E8%BE%BC%E3%81%BF%E3%82%AF%E3%83%A9%E3%82%B9 float %E3%81%AE%E3%82%B5%E3%83%B3%E3%83%97%E3%83%AB%E3%81%A7%E3%81%99.%0A%0Afloat.is_integer() %E3%81%AB%E3%81%A4%E3%81%84%E3%81%A6%0A%22%22%22%0Afrom trypython.common.commoncls import SampleBase%0Afrom trypython.common.commonfunc import pr%0A%0A%0Aclass Sample(SampleBase):%0A def exec(self):%0A num = 1.00%0A pr('type(num)', type(num))%0A pr('is_integer', num.is_integer()) # ==%3E True (%E6%95%B4%E6%95%B0%E3%81%AB%E5%87%BA%E6%9D%A5%E3%82%8B%E3%81%AE%E3%81%A7)%0A pr('int()', int(num))%0A%0A num = 1.05%0A pr('is_integer', num.is_integer()) # ==%3E False (%E6%95%B4%E6%95%B0%E3%81%AB%E5%87%BA%E6%9D%A5%E3%81%AA%E3%81%84%E3%81%AE%E3%81%A7)%0A pr('int()', int(num))%0A%0A%0Adef go():%0A obj = Sample()%0A obj.exec()%0A%0A%0Aif __name__ == '__main__':%0A go()%0A
|
|
3b41e3bcc721d47ce5a7314af2ac4d9006598164
|
add word class
|
pythonfiles/word.py
|
pythonfiles/word.py
|
Python
| 0.006846
|
@@ -0,0 +1,961 @@
+%0A#Comtains models for storing words and data returned from parsing websites%0A#%0Aclass word(object):%0A def __init__(self,name=None,part_of_speech=None,definition=None,context=None,subwords=None,count=None,rating=None):%0A self.name=name%0A self.part_of_speech=part_of_speech%0A self.definition=definition%0A self.context=context%0A%0A self.subwords=subwords%0A %0A self.count=count%0A self.rating=rating%0A %0A def __str__(self):%0A return self.name%0A def get_name(self):%0A return self.name%0A def get_part_of_speech(self):%0A return self.part_of_speech%0A def get_definition(self):%0A return self.definition%0A def get_context(self):%0A return self.context%0A %0A def get_forms(self):%0A return self.forms%0A %0A def get_count(self):%0A return self.count%0A def get_rating(self):%0A return self.rating%0A%0A def set_count(self,value):%0A self.count = value%0A def incr_count(self,value):%0A self.count += value%0A
|
|
83f6febfc051c8d7d3462e9a60f4eae036fa0529
|
guessing the number genetic algorithm
|
numberGuesser.py
|
numberGuesser.py
|
Python
| 0.999916
|
@@ -0,0 +1,1168 @@
+import random%0Aimport time%0A%0A%0Adef go(lower_bound, upper_bound, bots):%0A iterations = 1%0A target = random.randint(lower_bound, upper_bound)%0A%0A while True:%0A print(f%22Target number: %7Btarget%7D%22)%0A answers = %5Brandom.randint(lower_bound, upper_bound)%0A for _ in range(bots)%5D%0A scores = %5Banswer - target for answer in answers%5D%0A curr_low = -upper_bound%0A curr_high = upper_bound%0A for score in scores:%0A answer = answers%5Bscores.index(score)%5D%0A%0A if score == 0:%0A print(%0A f%22Answer found: %7Banswers%5Bscores.index(score)%5D%7D%5CnIterations required: %7Biterations%7D %22)%0A return answer%0A%0A if score %3C 0 and answer %3E lower_bound:%0A lower_bound = answer%0A print(f%22Shifting lower bound to %7Blower_bound%7D%22)%0A%0A elif score %3E 0 and answer %3C upper_bound:%0A upper_bound = answer%0A print(f%22Shifting upper bound to %7Bupper_bound%7D%22)%0A%0A time.sleep(0.5)%0A%0A print(f%22Searching in new range %7Blower_bound%7D to %7Bupper_bound%7D%22)%0A iterations += 1%0A%0A%0Aif __name__ == %22__main__%22:%0A go(1, 100, 5)%0A
|
|
6ac6202837cade41b1dad5af9a474be171255993
|
Check that Constant is given Moments instance, not class
|
bayespy/inference/vmp/nodes/constant.py
|
bayespy/inference/vmp/nodes/constant.py
|
################################################################################
# Copyright (C) 2011-2012,2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import numpy as np
from .node import Node
class Constant(Node):
r"""
Node for presenting constant values.
The node wraps arrays into proper node type.
"""
def __init__(self, moments, x, **kwargs):
self._moments = moments
x = np.asanyarray(x)
# Compute moments
self.u = self._moments.compute_fixed_moments(x)
# Dimensions of the moments
dims = self._moments.compute_dims_from_values(x)
# Resolve plates
D = len(dims[0])
if D > 0:
plates = np.shape(self.u[0])[:-D]
else:
plates = np.shape(self.u[0])
# Parent constructor
super().__init__(dims=dims, plates=plates, **kwargs)
def _get_id_list(self):
"""
Returns the stochastic ID list.
This method is used to check that same stochastic nodes are not direct
parents of a node several times. It is only valid if there are
intermediate stochastic nodes.
To put it another way: each ID corresponds to one factor q(..) in the
posterior approximation. Different IDs mean different factors, thus they
mean independence. The parents must have independent factors.
Stochastic nodes should return their unique ID. Deterministic nodes
should return the IDs of their parents. Constant nodes should return
empty list of IDs.
"""
return []
def get_moments(self):
return self.u
def set_value(self, x):
x = np.asanyarray(x)
shapes = [np.shape(ui) for ui in self.u]
self.u = self._moments.compute_fixed_moments(x)
for (i, shape) in enumerate(shapes):
if np.shape(self.u[i]) != shape:
raise ValueError("Incorrect shape for the array")
def lower_bound_contribution(self, gradient=False, **kwargs):
# Deterministic functions are delta distributions so the lower bound
# contribuion is zero.
return 0
|
Python
| 0.998847
|
@@ -295,16 +295,25 @@
ort Node
+, Moments
%0A%0Aclass
@@ -475,32 +475,196 @@
, x, **kwargs):%0A
+ if not isinstance(moments, Moments) and issubclass(moments, Moments):%0A raise ValueError(%22Give moments as an object instance instead of a class%22)%0A
self._mo
|
435004cebce00510db3bf36ae21b2cbf37020f32
|
add petpvc specs file
|
nipype/interfaces/tests/test_auto_PETPVC.py
|
nipype/interfaces/tests/test_auto_PETPVC.py
|
Python
| 0
|
@@ -0,0 +1,1545 @@
+# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT%0Afrom ...testing import assert_equal%0Afrom ..petpvc import PETPVC%0A%0A%0Adef test_PETPVC_inputs():%0A input_map = dict(alpha=dict(argstr='-a %25.4f',%0A ),%0A args=dict(argstr='%25s',%0A ),%0A debug=dict(argstr='-d',%0A usedefault=True,%0A ),%0A environ=dict(nohash=True,%0A usedefault=True,%0A ),%0A fwhm_x=dict(argstr='-x %25.4f',%0A mandatory=True,%0A ),%0A fwhm_y=dict(argstr='-y %25.4f',%0A mandatory=True,%0A ),%0A fwhm_z=dict(argstr='-z %25.4f',%0A mandatory=True,%0A ),%0A ignore_exception=dict(nohash=True,%0A usedefault=True,%0A ),%0A in_file=dict(argstr='-i %25s',%0A mandatory=True,%0A ),%0A mask_file=dict(argstr='-m %25s',%0A mandatory=True,%0A ),%0A n_deconv=dict(argstr='-k %25d',%0A ),%0A n_iter=dict(argstr='-n %25d',%0A ),%0A out_file=dict(argstr='-o %25s',%0A genfile=True,%0A hash_files=False,%0A ),%0A pvc=dict(argstr='-p %25s',%0A mandatory=True,%0A ),%0A stop_crit=dict(argstr='-a %25.4f',%0A ),%0A terminal_output=dict(nohash=True,%0A ),%0A )%0A inputs = PETPVC.input_spec()%0A%0A for key, metadata in list(input_map.items()):%0A for metakey, value in list(metadata.items()):%0A yield assert_equal, getattr(inputs.traits()%5Bkey%5D, metakey), value%0A%0A%0Adef test_PETPVC_outputs():%0A output_map = dict(out_file=dict(),%0A )%0A outputs = PETPVC.output_spec()%0A%0A for key, metadata in list(output_map.items()):%0A for metakey, value in list(metadata.items()):%0A yield assert_equal, getattr(outputs.traits()%5Bkey%5D, metakey), value%0A
|
|
99e531ec0e86d7c1d34de154cc49584821f85904
|
Make all metavars uppercased
|
powerline/commands/config.py
|
powerline/commands/config.py
|
# vim:fileencoding=utf-8:noet
from __future__ import (division, absolute_import, print_function)
import argparse
import powerline.bindings.config as config
class StrFunction(object):
def __init__(self, function, name=None):
self.name = name or function.__name__
self.function = function
def __call__(self, *args, **kwargs):
self.function(*args, **kwargs)
def __str__(self):
return self.name
TMUX_ACTIONS = {
'source': StrFunction(config.source_tmux_files, 'source'),
}
SHELL_ACTIONS = {
'command': StrFunction(config.shell_command, 'command'),
'uses': StrFunction(config.uses),
}
class ConfigArgParser(argparse.ArgumentParser):
def parse_args(self, *args, **kwargs):
ret = super(ConfigArgParser, self).parse_args(*args, **kwargs)
if not hasattr(ret, 'function'):
# In Python-3* `powerline-config` (without arguments) raises
# AttributeError. I have not found any standard way to display same
# error message as in Python-2*.
self.error('too few arguments')
return ret
def get_argparser(ArgumentParser=ConfigArgParser):
parser = ArgumentParser(description='Script used to obtain powerline configuration.')
subparsers = parser.add_subparsers()
tmux_parser = subparsers.add_parser('tmux', help='Tmux-specific commands')
tmux_parser.add_argument(
'function',
choices=tuple(TMUX_ACTIONS.values()),
metavar='action',
type=(lambda v: TMUX_ACTIONS.get(v)),
help='If action is `source\' then version-specific tmux configuration files are sourced.'
)
shell_parser = subparsers.add_parser('shell', help='Shell-specific commands')
shell_parser.add_argument(
'function',
choices=tuple(SHELL_ACTIONS.values()),
type=(lambda v: SHELL_ACTIONS.get(v)),
metavar='action',
help='If action is `command\' then preferred powerline command is output, if it is `uses\' then powerline-config script will exit with 1 if specified component is disabled and 0 otherwise.',
)
shell_parser.add_argument(
'component',
nargs='?',
choices=('tmux', 'prompt'),
metavar='component',
)
shell_parser.add_argument(
'-s', '--shell',
metavar='SHELL',
help='Shell for which query is run',
)
return parser
|
Python
| 0.999999
|
@@ -1350,30 +1350,30 @@
%0A%09%09metavar='
-action
+ACTION
',%0A%09%09type=(l
@@ -1713,22 +1713,22 @@
etavar='
-action
+ACTION
',%0A%09%09hel
@@ -2015,25 +2015,25 @@
etavar='
-component
+COMPONENT
',%0A%09)%0A%09s
|
e3211df0a22571c9bd95f51d98dcc69dce7847aa
|
convert CT_GraphicalObjectData to xmlchemy
|
pptx/oxml/shapes/graphfrm.py
|
pptx/oxml/shapes/graphfrm.py
|
# encoding: utf-8
"""
lxml custom element class for CT_GraphicalObjectFrame XML element.
"""
from __future__ import absolute_import
from .. import parse_xml
from ..ns import nsdecls, qn
from .shared import BaseShapeElement
from .table import CT_Table
from ..xmlchemy import BaseOxmlElement, OneAndOnlyOne
class CT_GraphicalObject(BaseOxmlElement):
"""
``<a:graphic>`` element, which is the container for the reference to or
definition of the framed graphical object.
"""
graphicData = OneAndOnlyOne('a:graphicData')
class CT_GraphicalObjectData(BaseShapeElement):
"""
``<p:graphicData>`` element, the direct container for a table, a chart,
or another graphical object.
"""
@property
def tbl(self):
"""
The contained table object, or |None| if not present.
"""
return self.find(qn('a:tbl'))
class CT_GraphicalObjectFrame(BaseShapeElement):
"""
``<p:graphicFrame>`` element, which is a container for a table, a chart,
or another graphical object.
"""
nvGraphicFramePr = OneAndOnlyOne('p:nvGraphicFramePr')
graphic = OneAndOnlyOne('a:graphic')
DATATYPE_TABLE = 'http://schemas.openxmlformats.org/drawingml/2006/table'
_graphicFrame_tmpl = (
'<p:graphicFrame %s>\n'
' <p:nvGraphicFramePr>\n'
' <p:cNvPr id="%s" name="%s"/>\n'
' <p:cNvGraphicFramePr>\n'
' <a:graphicFrameLocks noGrp="1"/>\n'
' </p:cNvGraphicFramePr>\n'
' <p:nvPr/>\n'
' </p:nvGraphicFramePr>\n'
' <p:xfrm>\n'
' <a:off x="%s" y="%s"/>\n'
' <a:ext cx="%s" cy="%s"/>\n'
' </p:xfrm>\n'
' <a:graphic>\n'
' <a:graphicData/>\n'
' </a:graphic>\n'
'</p:graphicFrame>' %
(nsdecls('a', 'p'), '%d', '%s', '%d', '%d', '%d', '%d')
)
def get_or_add_xfrm(self):
"""
Return the required ``<p:xfrm>`` child element. Overrides version on
BaseShapeElement.
"""
return self.xfrm
@property
def has_table(self):
"""
True if graphicFrame contains a table, False otherwise.
"""
datatype = self.graphic.graphicData.get('uri')
if datatype == CT_GraphicalObjectFrame.DATATYPE_TABLE:
return True
return False
@staticmethod
def new_graphicFrame(id_, name, left, top, width, height):
"""
Return a new ``<p:graphicFrame>`` element tree suitable for containing
a table or chart. Note that a graphicFrame element is not a valid
shape until it contains a graphical object such as a table.
"""
xml = CT_GraphicalObjectFrame._graphicFrame_tmpl % (
id_, name, left, top, width, height)
graphicFrame = parse_xml(xml)
return graphicFrame
@staticmethod
def new_table(id_, name, rows, cols, left, top, width, height):
"""
Return a ``<p:graphicFrame>`` element tree populated with a table
element.
"""
graphicFrame = CT_GraphicalObjectFrame.new_graphicFrame(
id_, name, left, top, width, height)
# set type of contained graphic to table
graphicData = graphicFrame.graphic.graphicData
graphicData.set('uri', CT_GraphicalObjectFrame.DATATYPE_TABLE)
# add tbl element tree
tbl = CT_Table.new_tbl(rows, cols, width, height)
graphicData.append(tbl)
return graphicFrame
@property
def xfrm(self):
"""
The required ``<p:xfrm>`` child element
"""
return self.find(qn('p:xfrm'))
class CT_GraphicalObjectFrameNonVisual(BaseOxmlElement):
"""
``<p:nvGraphicFramePr>`` element, container for the non-visual properties
of a graphic frame, such as name, id, etc.
"""
cNvPr = OneAndOnlyOne('p:cNvPr')
|
Python
| 0.999995
|
@@ -300,16 +300,27 @@
dOnlyOne
+, ZeroOrOne
%0A%0A%0Aclass
@@ -728,150 +728,23 @@
-@property%0A def tbl(self):%0A %22%22%22%0A The contained table object, or %7CNone%7C if not present.%0A %22%22%22%0A return self.find(qn
+tbl = ZeroOrOne
('a:
@@ -748,17 +748,16 @@
'a:tbl')
-)
%0A%0A%0Aclass
|
c94653382a5a8aa2ec9af94f3aaa02ed5cae9b4c
|
Create test_send_sms.py
|
test_send_sms.py
|
test_send_sms.py
|
Python
| 0.000002
|
@@ -0,0 +1,1216 @@
+import serial%0Afrom curses import ascii%0A# since we need ascii code from CTRL-Z%0Aimport time%0A%0A# here we are testing sending an SMS via virtual serial port ttyUSB0 that was created by a USB serial modem%0A%0Aphonenumber = #enter phone number to send SMS to e.g. %22+441234123123%22%0ASMS = %22here's your SMS!%22%0Aser = serial.Serial('/dev/ttyUSB0', 460800, timeout=1)%0A# 460800 is baud rate, ttyUSB0 is virtual serial port we are sending to%0Aser.write(%22AT%5Cr%5Cn%22)%0A# send AT to the ttyUSB0 virtual serial port%0Aline = ser.readline()%0Aprint(line)%0A# what did we get back from AT command? Should be OK%0Aser.write(%22AT+CMGF=1%5Cr%5Cn%22)%0A# send AT+CMGF=1 so setting up for SMS followed by CR %0Aline = ser.readline()%0Aprint(line)%0A# what did we get back from that AT command?%0Aser.write('AT+CMGS=%22%25s%22%5Cr%5Cn' %25phonenumber)%0A# send AT+CMGS then CR, then phonenumber variable%0Aser.write(SMS)%0A# send the SMS variable after we sent the CR%0Aser.write(ascii.ctrl('z'))%0A# send a CTRL-Z after the SMS variable using ascii library%0Atime.sleep(10)%0A# wait 10 seconds%0Aprint ser.readline()%0Aprint ser.readline()%0Aprint ser.readline()%0Aprint ser.readline()%0A# what did we get back after we tried AT_CMGS=phonenumber followed%0A# by %3CCR%3E , then SMS variable, then %3CCTRL-Z%3E ascii code??%0A
|
|
c4f0d10e2112a391f716f58ffa449a490549fac6
|
find kth smallest
|
Python/kthsmallest.py
|
Python/kthsmallest.py
|
Python
| 0.999975
|
@@ -0,0 +1,1078 @@
+'''%0AFind the k-th smallest element in a union of two given sorted arrays,%0Aa and b%0A'''%0A%0A# O(k) implementation%0Adef findkth(a, b, k):%0A %0A i = 0%0A j = 0%0A found = 1%0A while i %3C len(a) and j%3C len(b):%0A if a%5Bi%5D %3C b%5Bj%5D:%0A smaller = a%5Bi%5D%0A i += 1%0A else:%0A smaller = b%5Bj%5D%0A j += 1%0A kth_smallest = smaller%0A if found == k:%0A return kth_smallest%0A else:%0A found += 1%0A %0A if i %3C len(a):%0A for num in a%5Bi:%5D:%0A kth_smallest = num%0A if found == k:%0A return kth_smallest%0A else:%0A found += 1%0A if j %3C len(b):%0A for num in b%5Bj:%5D:%0A kth_smallest = num%0A if found == k:%0A return kth_smallest%0A else:%0A found += 1%0A %0Aassert findkth(%5B1, 2, 4%5D, %5B3, 7, 11%5D, 3) == 3%0Aassert findkth(%5B1, 2, 4, 5%5D, %5B3, 7, 11%5D, 5) == 5%0Aassert findkth(%5B1, 2%5D, %5B3, 7, 11%5D, 4) == 7%0Aassert findkth(%5B1, 2%5D, %5B3, 7, 11%5D, 5) == 11%0Aassert findkth(%5B1, 2%5D, %5B-1, 7, 11%5D, 1) == -1%0A%0A
|
|
1dad9de4cf94d7f321176cf5b339b1a68f4ac6d2
|
Add template to LanguageAdmin; #551
|
judge/admin/runtime.py
|
judge/admin/runtime.py
|
from django.db.models import TextField
from django.forms import TextInput, ModelForm, ModelMultipleChoiceField
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from reversion.admin import VersionAdmin
from judge.models import Problem
from judge.widgets import AdminPagedownWidget, HeavySelect2MultipleWidget
class LanguageForm(ModelForm):
problems = ModelMultipleChoiceField(
label=_('Disallowed problems'),
queryset=Problem.objects.all(),
required=False,
help_text=_('These problems are NOT allowed to be submitted in this language'),
widget=HeavySelect2MultipleWidget(data_view='problem_select2'))
class LanguageAdmin(VersionAdmin):
fields = ('key', 'name', 'short_name', 'common_name', 'ace', 'pygments', 'info', 'description', 'problems')
list_display = ('key', 'name', 'common_name', 'info')
form = LanguageForm
if AdminPagedownWidget is not None:
formfield_overrides = {
TextField: {'widget': AdminPagedownWidget},
}
def save_model(self, request, obj, form, change):
super(LanguageAdmin, self).save_model(request, obj, form, change)
obj.problem_set = Problem.objects.exclude(id__in=form.cleaned_data['problems'].values('id'))
def get_form(self, request, obj=None, **kwargs):
self.form.base_fields['problems'].initial = \
Problem.objects.exclude(id__in=obj.problem_set.values('id')).values_list('pk', flat=True) if obj else []
return super(LanguageAdmin, self).get_form(request, obj, **kwargs)
class GenerateKeyTextInput(TextInput):
def render(self, name, value, attrs=None):
text = super(TextInput, self).render(name, value, attrs)
return mark_safe(text + format_html(
'''\
<a href="#" onclick="return false;" class="button" id="id_{0}_regen">Regenerate</a>
<script type="text/javascript">
(function ($) {{
$(document).ready(function () {{
$('#id_{0}_regen').click(function () {{
var length = 100,
charset = "abcdefghijklnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789`~!@#$%^&*()_+-=|[]{{}};:,<>./?",
key = "";
for (var i = 0, n = charset.length; i < length; ++i) {{
key += charset.charAt(Math.floor(Math.random() * n));
}}
$('#id_{0}').val(key);
}});
}});
}})(django.jQuery);
</script>
''', name))
class JudgeAdminForm(ModelForm):
class Meta:
widgets = {'auth_key': GenerateKeyTextInput}
if AdminPagedownWidget is not None:
widgets['description'] = AdminPagedownWidget
class JudgeAdmin(VersionAdmin):
form = JudgeAdminForm
readonly_fields = ('created', 'online', 'start_time', 'ping', 'load', 'last_ip', 'runtimes', 'problems')
fieldsets = (
(None, {'fields': ('name', 'auth_key')}),
(_('Description'), {'fields': ('description',)}),
(_('Information'), {'fields': ('created', 'online', 'last_ip', 'start_time', 'ping', 'load')}),
(_('Capabilities'), {'fields': ('runtimes', 'problems')}),
)
list_display = ('name', 'online', 'start_time', 'ping', 'load', 'last_ip')
ordering = ['-online', 'name']
def get_readonly_fields(self, request, obj=None):
if obj is not None and obj.online:
return self.readonly_fields + ('name',)
return self.readonly_fields
def has_delete_permission(self, request, obj=None):
result = super(JudgeAdmin, self).has_delete_permission(request, obj)
if result and obj is not None:
return not obj.online
return result
if AdminPagedownWidget is not None:
formfield_overrides = {
TextField: {'widget': AdminPagedownWidget},
}
|
Python
| 0
|
@@ -871,16 +871,28 @@
iption',
+ 'template',
'proble
|
e262d176ecd7d8871a9e06ebc542cf473acf0925
|
Add migration for transnational weights
|
reports/migrations/0004_transnational_weights.py
|
reports/migrations/0004_transnational_weights.py
|
Python
| 0.000016
|
@@ -0,0 +1,1040 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0Afrom django_countries import countries%0A%0Adef populate_weights(apps, schema_editor):%0A Weights = apps.get_model(%22reports%22, %22Weights%22)%0A db_alias = schema_editor.connection.alias%0A%0A for item in COUNTRY_WEIGHTS:%0A country = item%5B'Country'%5D%0A item.pop('Country')%0A for media_type, weight in item.iteritems():%0A w = Weights.objects.using(db_alias).create(%0A country=country,%0A media_type=media_type,%0A weight=weight)%0A w.save()%0A%0Adef backwards(apps, schema_editor):%0A pass%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('reports', '0003_indonesia-weights'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(%0A populate_weights,%0A backwards,%0A ),%0A %5D%0A%0ACOUNTRY_WEIGHTS= %5B%0A%7B'Country': 'T1',%0A 'Internet': '1',%0A 'Print': '1',%0A 'Radio': '1',%0A 'Television': '1',%0A 'Twitter': '1'%7D%5D%0A
|
|
6ff99aa939bf07d18595507efeca6ada7fc267a5
|
add a test thing for wilk
|
tools/ast-env.py
|
tools/ast-env.py
|
Python
| 0.000003
|
@@ -0,0 +1,2003 @@
+#!/usr/bin/env python%0A%0Aif __name__ == '__main__':%0A import os%0A import sys%0A import copy%0A import json%0A%0A sys.path.append(os.path.dirname(__file__))%0A%0A import logging%0A%0A logging.basicConfig(level=logging.DEBUG)%0A%0A import roush.db.database%0A from roush.db import api as db_api%0A from roush.db.database import init_db%0A%0A from sqlalchemy.orm import sessionmaker, create_session, scoped_session%0A from sqlalchemy.ext.declarative import declarative_base%0A%0A from roushclient.client import RoushEndpoint%0A%0A from roush.webapp.ast import FilterBuilder, FilterTokenizer%0A from roush.webapp.solver import Solver%0A%0A ep = RoushEndpoint()%0A%0A init_db('sqlite:///roush.db')%0A db_session = scoped_session(lambda: create_session(autocommit=False,%0A autoflush=False,%0A bind=engine))%0A%0A Base = declarative_base()%0A Base.query = db_session.query_property()%0A%0A ##########################%0A%0A ast_logger = logging.getLogger('roush.webapp.ast')%0A ast_logger.setLevel(logging.WARNING)%0A%0A expr1 = 'facts.woof = %22goober%22'%0A expr2 = 'facts.arf = %22woof%22'%0A%0A api = db_api.api_from_models()%0A%0A solver = Solver(api, 4, %5B'facts.ostype=%22hi%22'%5D)%0A solved, requires_input, plan = solver.solve()%0A%0A print 'Solver plan: %25s' %25 plan%0A%0A solver_from_plan = Solver.from_plan(api, 4,%0A %5B'facts.ostype=%22hi%22'%5D,%0A plan)%0A%0A new_plan = solver_from_plan.plan()%0A%0A print 'Solver plan: %25s' %25 new_plan%0A%0A print 'plans identical: %25s' %25 new_plan == plan%0A%0A print plan%0A print new_plan%0A%0A print json.dumps(solver_from_plan.adventure(), sort_keys=True, indent=4)%0A%0A%0A # foo = FilterBuilder(FilterTokenizer(),%0A # 'nodes: %22test%22 in union(facts.blah, %22test%22)')%0A # root_node = foo.build()%0A # print 'expression: %25s' %25 root_node.to_s()%0A # print 'inverted: %25s' %25 root_node.invert()%0A
|
|
b0577ce3b8b162ce3702430b189905f9beaae8d5
|
Add script to clean up all FD phone and fax numbers.
|
firecares/firestation/management/commands/cleanup_phonenumbers.py
|
firecares/firestation/management/commands/cleanup_phonenumbers.py
|
Python
| 0
|
@@ -0,0 +1,1445 @@
+from django.core.management.base import BaseCommand%0Afrom firecares.firestation.models import FireDepartment%0Afrom phonenumber_field.modelfields import PhoneNumber%0Aimport re%0A%0A%22%22%22%0AThis command is for cleaning up every phone and fax number in the%0Adatabase. It removes all non-numeric characters, such as parenthesis,%0Ahyphens, spaces, etc. It also removes prefixed 1s These numbers should%0Abe made human-readable on the client side.%0A%22%22%22%0A%0Adef cleanNumber(no1):%0A no2 = re.sub('%5B%5E0-9%5D','', no1)%0A if no2.startswith(%221%22):%0A no2 = no2%5B1:%5D%0A return no2%0A%0Aclass Command(BaseCommand):%0A%0A def handle(self, *args, **kwargs):%0A print(%22Don't worry, it always takes this long.%22)%0A%0A for fd in FireDepartment.objects.all():%0A # If the FD has a phone number, clean it up%0A if fd.headquarters_phone and not fd.headquarters_phone.raw_input == %22Invalid Input%22:%0A newPhone = cleanNumber(fd.headquarters_phone.raw_input)%0A print(newPhone)%0A fd.headquarters_phone = newPhone%0A # If the FD has a fax number, clean it up%0A if fd.headquarters_fax and not fd.headquarters_fax.raw_input == %22Invalid Input%22:%0A newFax = cleanNumber(fd.headquarters_fax.raw_input)%0A print(newFax)%0A fd.headquarters_fax = newFax%0A # Save and continue to the next FD (if any)%0A fd.save()%0A%0A print(%22Completed successfully!%22)%0A
|
|
370fa8682b09de76af36129817e412270c8206c8
|
add a new mgmt command for data analysis: compute the intersections of every pair of boundaries
|
boundaries/management/commands/compute_intersections.py
|
boundaries/management/commands/compute_intersections.py
|
Python
| 0.000158
|
@@ -0,0 +1,1548 @@
+from optparse import make_option%0A%0Afrom django.conf import settings%0Afrom django.core.management.base import BaseCommand%0Afrom django.contrib.gis.geos import MultiPolygon, Polygon%0A%0Afrom boundaries.models import BoundarySet, Boundary%0A%0Aclass Command(BaseCommand):%0A%09help = 'Create a report of the area of intersection of every pair of boundaries from two boundary sets specified by their slug.'%0A%09args = 'boundaryset1 boundaryset1'%0A%0A%09def handle(self, *args, **options):%0A%09%09if len(args) %3C 2:%0A%09%09%09print %22Specify two boundaryset slugs.%22%0A%09%09%09return%0A%09%09%09%0A%09%09bset_a = BoundarySet.objects.get(slug=args%5B0%5D)%0A%09%09bset_b = BoundarySet.objects.get(slug=args%5B1%5D)%0A%09%09%0A%09%09print bset_a.slug, %22area_1%22, bset_b.slug, %22area_2%22, %22area_intersection%22, %22pct_of_1%22, %22pct_of_2%22%0A%09%09%0A%09%09# For each boundary in the first set...%0A%09%09for a_slug in bset_a.boundaries.order_by(%22slug%22).values_list('slug', flat=True):%0A%09%09%09a_bdry = bset_a.boundaries.get(slug=a_slug)%0A%09%09%09a_area = a_bdry.shape.area%0A%09%09%09%0A%09%09%09# Find each intersecting boundary in the second set...%0A%09%09%09for b_bdry in bset_b.boundaries%5C%0A%09%09%09%09.filter(shape__intersects=a_bdry.shape):%0A%09%09%09%09%09%0A%09%09%09%09geometry = a_bdry.shape.intersection(b_bdry.shape)%0A%09%09%09%09int_area = geometry.area%09%09%09%09%0A%09%09%09%09if geometry.empty: continue%0A%09%09%09%09%0A%09%09%09%09b_area = b_bdry.shape.area%0A%09%09%09%09%0A%09%09%09%09# Skip overlaps that are less than .1%25 of the area of either of the shapes.%0A%09%09%09%09# These are probably not true overlaps.%0A%09%09%09%09if int_area/a_area %3C .001 or int_area/b_area %3C .001:%0A%09%09%09%09%09continue%0A%09%09%09%09%0A%09%09%09%09print a_slug, a_area, b_bdry.slug, b_area, int_area, int_area/a_area, int_area/b_area%0A%0A%09%09%0A%0A
|
|
66306f46417e8b0caa6b4e6d13d72e5ae6b226ef
|
include builds
|
corehq/apps/app_manager/management/commands/migrate_to_conditional_case_update.py
|
corehq/apps/app_manager/management/commands/migrate_to_conditional_case_update.py
|
from datetime import datetime
import traceback
from corehq.apps.app_manager.dbaccessors import wrap_app
from corehq.apps.app_manager.management.commands.helpers import AppMigrationCommandBase
from corehq.apps.domain.models import Domain
import random
def get_new_case_update_json(name_path):
return {
'question_path': name_path,
'update_mode': 'always'
}
class Command(AppMigrationCommandBase):
help = """
One-time migration to transition form action models to use ConditionalCaseUpdate as part of the new
"save only if edited" feature: https://github.com/dimagi/commcare-hq/pull/30910.
"""
include_linked_apps = True
include_builds = False
chunk_size = 5
DOMAIN_LIST_FILENAME = "migrate_to_cond_case_update_cmd_domain_list.txt"
DOMAIN_PROGRESS_NUMBER_FILENAME = "migrate_to_cond_case_update_cmd_domain_progress.txt"
APP_WRAPPING_ERRORS_LOG = "migrate_to_cond_case_update_wrapping_errors.txt"
def add_arguments(self, parser):
super().add_arguments(parser)
# Used for a dry run on 1000 domains to get a taste of how long a full migration would take.
parser.add_argument(
'--num-domains-test',
action='store',
default=None,
help='''For a dry run, use this argument to test on X number of domains. Dry run flag must be
included and domain flag cannot be included.''',
)
def _has_been_migrated(self, app_doc):
for module in app_doc['modules']:
for form in module['forms']:
if module['module_type'] == "basic":
actions = form.get('actions', '')
if actions:
open_case_action = actions.get('open_case', '')
if open_case_action:
if (open_case_action.get('name_update', '')
and not open_case_action.get('name_path', '')):
return True
if open_case_action.get('name_path', ''):
return False
elif module['module_type'] == "advanced":
for form in module['forms']:
if form['form_type'] == 'advanced_form':
actions = form.get('actions', '')
if actions:
open_case_action = actions.get('open_cases', '')[0] \
if actions.get('open_cases', '') else None
if open_case_action:
if (open_case_action.get('name_update', '')
and not open_case_action.get('name_path', '')):
return True
if open_case_action.get('name_path', ''):
return False
# Catch-all; if it's all surveys or something else strange, migrate it by default
return False
def migrate_app(self, app_doc):
if self._has_been_migrated(app_doc):
return None
else:
try:
wrapped_app = wrap_app(app_doc)
return wrapped_app
except Exception as e:
print(e)
self.log_error(app_doc)
return None
@property
def num_domains_test(self):
return self.options.get('num_domains_test', None)
def get_domains(self):
if self.is_dry_run and self.num_domains_test:
print(self.num_domains_test)
all_domain_names = Domain.get_all_names()
random.shuffle(all_domain_names)
return all_domain_names[:int(self.num_domains_test)]
else:
return Domain.get_all_names()
def log_error(self, app_doc):
with open(self.APP_WRAPPING_ERRORS_LOG, 'a') as f:
error_string = (f"{datetime.now()}\nOn domain: {app_doc['domain']}, "
f"App ID: {app_doc['_id']}\n{traceback.format_exc().strip()}\n")
f.write(error_string)
|
Python
| 0
|
@@ -681,20 +681,19 @@
uilds =
-Fals
+Tru
e%0A ch
|
7a75174716ecbc0757c84f29ebbb3fd309521936
|
add management command to fire all repeaters in a domain
|
corehq/apps/cleanup/management/commands/fire_repeaters.py
|
corehq/apps/cleanup/management/commands/fire_repeaters.py
|
Python
| 0
|
@@ -0,0 +1,808 @@
+import datetime%0Afrom django.core.management.base import BaseCommand, CommandError%0Afrom corehq.apps.receiverwrapper.models import RepeatRecord%0Afrom dimagi.utils.post import simple_post%0A%0A%0Aclass Command(BaseCommand):%0A args = '%3Cdomain%3E'%0A help = 'Fire all repeaters in a domain.'%0A%0A def handle(self, *args, **options):%0A if len(args) == 1:%0A domain = args%5B0%5D%0A else:%0A raise CommandError('Usage: %25s%5Cn%25s' %25 (self.args, self.help))%0A%0A next_year = datetime.datetime.now() + datetime.timedelta(days=365)%0A records = RepeatRecord.all(domain=domain, due_before=next_year)%0A for record in records:%0A record.fire(post_fn=simple_post)%0A record.save()%0A print '%7B%7D %7B%7D'.format(record._id, 'successful' if record.succeeded else 'failed')%0A
|
|
d5d3fcfb331c1486acbfb004705b94b1923a0db8
|
Add code to dump features into libsvm file format
|
Codes/SuperEdge/SuperEdge/dump_libsvm.py
|
Codes/SuperEdge/SuperEdge/dump_libsvm.py
|
Python
| 0
|
@@ -0,0 +1,738 @@
+import numpy as np%0Afrom datetime import datetime%0Afrom sklearn.datasets import dump_svmlight_file%0Aimport os.path as path%0A%0Adef main():%0A cache_path = 'largecache/'%0A feat_name = 'feat.dat'%0A lbl_name = 'lbl.dat'%0A feat_len = 4224 #1088%0A now = datetime.now()%0A lbl_memmap = np.memmap(path.join(cache_path, lbl_name), dtype='uint8', mode='r')%0A feat_memmap = np.memmap(path.join(cache_path, feat_name), dtype='float32', mode='r', shape=(lbl_memmap.shape%5B0%5D, feat_len))%0A print 'loading dataset took ', (datetime.now() - now)%0A now = datetime.now()%0A print 'starting dumping feature files to libsvm format'%0A dump_svmlight_file(feat_memmap, lbl_memmap, 'largecache/data.train.txt')%0A%0Aif __name__ == '__main__':%0A main()
|
|
269f1b743583609ecdc7658e35073db9b985634c
|
rename http.py
|
examples/http_example.py
|
examples/http_example.py
|
Python
| 0.00223
|
@@ -0,0 +1,350 @@
+from poyonga.client import Groonga%0A%0Ag = Groonga()%0A%0Acmds = %5B(%22status%22, %7B%7D),%0A (%22log_level%22, %7B%22level%22: %22warning%22%7D),%0A #(%22table_create%22, %7B%22name%22: %22Site%22, %22flags%22: %22TABLE_HASH_KEY%22%7D),%0A (%22select%22, %7B%22table%22: %22Site%22%7D)%5D%0Afor cmd, kwargs in cmds:%0A ret = g.call(cmd, **kwargs)%0A print(ret.status)%0A print(ret.body)%0A print(%22*%22 * 40)%0A
|
|
3d976002db5de89c23071f32e0df2663ab7fa657
|
Fix bug in OpenCage geocoder.
|
geopy/geocoders/opencage.py
|
geopy/geocoders/opencage.py
|
"""
:class:`.OpenCage` is the Opencagedata geocoder.
"""
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT, DEFAULT_SCHEME
from geopy.exc import (
GeocoderQueryError,
GeocoderQuotaExceeded,
)
from geopy.location import Location
from geopy.util import logger
__all__ = ("OpenCage", )
class OpenCage(Geocoder):
"""
Geocoder using the Open Cage Data API. Documentation at:
http://geocoder.opencagedata.com/api.html
..versionadded:: 1.1.0
"""
def __init__(
self,
api_key,
domain='api.opencagedata.com',
scheme=DEFAULT_SCHEME,
timeout=DEFAULT_TIMEOUT,
proxies=None,
): # pylint: disable=R0913
"""
Initialize a customized Open Cage Data geocoder.
:param string api_key: The API key required by Open Cage Data
to perform geocoding requests. You can get your key here:
https://developer.opencagedata.com/
:param string domain: Currently it is 'api.opencagedata.com', can
be changed for testing purposes.
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
"""
super(OpenCage, self).__init__(
scheme=scheme, timeout=timeout, proxies=proxies
)
self.api_key = api_key
self.domain = domain.strip('/')
self.scheme = scheme
self.api = '%s://%s/geocode/v1/json' % (self.scheme, self.domain)
def geocode(
self,
query,
bounds=None,
country=None,
language=None,
exactly_one=True,
timeout=None,
): # pylint: disable=W0221,R0913
"""
Geocode a location query.
:param string query: The query string to be geocoded; this must
be URL encoded.
:param string language: an IETF format language code (such as `es`
for Spanish or pt-BR for Brazilian Portuguese); if this is
omitted a code of `en` (English) will be assumed by the remote
service.
:param string bounds: Provides the geocoder with a hint to the region
that the query resides in. This value will help the geocoder
but will not restrict the possible results to the supplied
region. The bounds parameter should be specified as 4
coordinate points forming the south-west and north-east
corners of a bounding box. For example,
`bounds=-0.563160,51.280430,0.278970,51.683979`.
:param string country: Provides the geocoder with a hint to the
country that the query resides in. This value will help the
geocoder but will not restrict the possible results to the
supplied country. The country code is a 3 character code as
defined by the ISO 3166-1 Alpha 3 standard.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'key': self.api_key,
'q': self.format_string % query,
}
if bounds:
params['bounds'] = bounds
if bounds:
params['language'] = language
if bounds:
params['country'] = country
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def reverse(
self,
query,
language=None,
exactly_one=False,
timeout=None,
): # pylint: disable=W0221,R0913
"""
Given a point, find an address.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param string language: The language in which to return results.
:param boolean exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
"""
params = {
'key': self.api_key,
'q': self._coerce_point_to_string(query),
}
if language:
params['language'] = language
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout), exactly_one
)
def _parse_json(self, page, exactly_one=True):
'''Returns location, (latitude, longitude) from json feed.'''
places = page.get('results', [])
if not len(places):
self._check_status(page.get('status'))
return None
def parse_place(place):
'''Get the location, lat, lng from a single json place.'''
location = place.get('formatted')
latitude = place['geometry']['lat']
longitude = place['geometry']['lng']
return Location(location, (latitude, longitude), place)
if exactly_one:
return parse_place(places[0])
else:
return [parse_place(place) for place in places]
@staticmethod
def _check_status(status):
"""
Validates error statuses.
"""
status_code = status['code']
if status_code == 429:
# Rate limit exceeded
raise GeocoderQuotaExceeded(
'The given key has gone over the requests limit in the 24'
' hour period or has submitted too many requests in too'
' short a period of time.'
)
if status_code == 200:
# When there are no results, just return.
return
if status_code == 403:
raise GeocoderQueryError(
'Your request was denied.'
)
else:
raise GeocoderQueryError('Unknown error.')
|
Python
| 0
|
@@ -3857,38 +3857,40 @@
unds%0A if
-bounds
+language
:%0A pa
@@ -3920,38 +3920,39 @@
uage%0A if
-bounds
+country
:%0A pa
|
95b304d2f0a9dc851926506795310f96c3312682
|
Add SQL example.
|
examples/sql_rockstar.py
|
examples/sql_rockstar.py
|
Python
| 0.000001
|
@@ -0,0 +1,177 @@
+from RockStar import RockStar%0A%0Asql_code = %22SELECT 'Hello World!';%22%0Arock_it_bro = RockStar(days=400, file_name='hello_world.sql', code=sql_code)%0Arock_it_bro.make_me_a_rockstar()%0A
|
|
d5cf05e40b638afbf12fd95cf721a22ad0f3281d
|
Create appointments.py
|
backend/appointments.py
|
backend/appointments.py
|
Python
| 0.000001
|
@@ -0,0 +1 @@
+%0A
|
|
dfd35c528004476e1ea534561835bcc167338c72
|
Add common javascript mimetypes to be detected as TextResponses, also improve mimetype detection to avoid querying twice classes dict
|
scrapy/core/downloader/responsetypes/__init__.py
|
scrapy/core/downloader/responsetypes/__init__.py
|
"""
This module implements a class which returns the appropiate Response class
based on different criterias.
"""
from os.path import abspath, dirname, join
from mimetypes import MimeTypes
from scrapy.http import Response
from scrapy.utils.misc import load_object
from scrapy.utils.python import isbinarytext
from scrapy.conf import settings
class ResponseTypes(object):
CLASSES = {
'text/html': 'scrapy.http.HtmlResponse',
'application/atom+xml': 'scrapy.http.XmlResponse',
'application/rdf+xml': 'scrapy.http.XmlResponse',
'application/rss+xml': 'scrapy.http.XmlResponse',
'application/xhtml+xml': 'scrapy.http.HtmlResponse',
'application/xml': 'scrapy.http.XmlResponse',
'text/xml': 'scrapy.http.XmlResponse',
'text': 'scrapy.http.TextResponse',
}
def __init__(self):
self.CLASSES.update(settings.get('RESPONSE_CLASSES', {}))
self.classes = {}
mimefile = join(abspath(dirname(__file__)), 'mime.types')
self.mimetypes = MimeTypes([mimefile])
for mimetype, cls in self.CLASSES.iteritems():
self.classes[mimetype] = load_object(cls)
def from_mimetype(self, mimetype):
"""Return the most appropiate Response class for the given mimetype"""
if mimetype is not None:
return self.classes.get(mimetype, self.classes.get(mimetype.split('/')[0], Response))
else:
return Response
def from_content_type(self, content_type):
"""Return the most appropiate Response class from an HTTP Content-Type
header """
mimetype = content_type.split(';')[0].strip().lower()
return self.from_mimetype(mimetype)
def from_content_disposition(self, content_disposition):
try:
filename = content_disposition.split(';')[1].split('=')[1]
filename = filename.strip('"\'')
return self.from_filename(filename)
except IndexError:
return Response
def from_headers(self, headers):
"""Return the most appropiate Response class by looking at the HTTP
headers"""
cls = Response
if 'Content-Type' in headers:
cls = self.from_content_type(headers['Content-type'])
if cls is Response and 'Content-Disposition' in headers:
cls = self.from_content_disposition(headers['Content-Disposition'])
return cls
def from_filename(self, filename):
"""Return the most appropiate Response class from a file name"""
mimetype, encoding = self.mimetypes.guess_type(filename)
if mimetype and not encoding:
return self.from_mimetype(mimetype)
else:
return Response
def from_url(self, url):
"""Return the most appropiate Response class from a URL"""
return self.from_mimetype(self.mimetypes.guess_type(url)[0])
def from_body(self, body):
"""Try to guess the appropiate response based on the body content.
This method is a bit magic and could be improved in the future, but
it's not meant to be used except for special cases where response types
cannot be guess using more straightforward methods."""
chunk = body[:5000]
if isbinarytext(chunk):
return self.from_mimetype('application/octet-stream')
elif "<html>" in chunk.lower():
return self.from_mimetype('text/html')
elif "<?xml" in chunk.lower():
return self.from_mimetype('text/xml')
else:
return self.from_mimetype('text')
def from_args(self, headers=None, url=None, filename=None, body=None):
"""Guess the most appropiate Response class based on the given arguments"""
cls = Response
if headers is not None:
cls = self.from_headers(headers)
if cls is Response and url is not None:
cls = self.from_url(url)
if cls is Response and filename is not None:
cls = self.from_filename(filename)
if cls is Response and body is not None:
cls = self.from_body(body)
return cls
responsetypes = ResponseTypes()
|
Python
| 0
|
@@ -715,32 +715,214 @@
p.XmlResponse',%0A
+ 'application/json': 'scrapy.http.TextResponse',%0A 'application/javascript': 'scrapy.http.TextResponse',%0A 'application/x-javascript': 'scrapy.http.TextResponse',%0A
'text/xm
@@ -965,16 +965,18 @@
'text
+/*
': 'scra
@@ -1481,28 +1481,24 @@
mimetype is
-not
None:%0A
@@ -1514,33 +1514,38 @@
urn
-self.classes.get(
+Response%0A elif
mimetype
, se
@@ -1540,17 +1540,19 @@
mimetype
-,
+ in
self.cl
@@ -1560,80 +1560,166 @@
sses
-.get(mimetype.split('/')%5B0%5D, Response))%0A else:%0A return
+:%0A return self.classes%5Bmimetype%5D%0A else:%0A basetype = %22%25s/*%22 %25 mimetype.split('/')%5B0%5D%0A return self.classes.get(basetype,
Res
@@ -1715,32 +1715,33 @@
setype, Response
+)
%0A%0A def from_c
|
b017c43c9bf46cd2e1fd7380904a1b022b6930ba
|
Create __init__.py
|
beprof/__init__.py
|
beprof/__init__.py
|
Python
| 0.000006
|
@@ -0,0 +1,5 @@
+pass%0A
|
|
6ef879ffec5b182c62c3673be155a1911291aef6
|
fix #563: split prog parts before formating in item's name
|
beetsplug/duplicates.py
|
beetsplug/duplicates.py
|
# This file is part of beets.
# Copyright 2013, Pedro Silva.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""List duplicate tracks or albums.
"""
import shlex
import logging
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_obj, vararg_callback, Subcommand, UserError
from beets.util import command_output, displayable_path, subprocess
PLUGIN = 'duplicates'
log = logging.getLogger('beets')
def _process_item(item, lib, copy=False, move=False, delete=False,
tag=False, format=None):
"""Process Item `item` in `lib`.
"""
if copy:
item.move(basedir=copy, copy=True)
item.store()
if move:
item.move(basedir=move, copy=False)
item.store()
if delete:
item.remove(delete=True)
if tag:
try:
k, v = tag.split('=')
except:
raise UserError('%s: can\'t parse k=v tag: %s' % (PLUGIN, tag))
setattr(k, v)
item.store()
print_obj(item, lib, fmt=format)
def _checksum(item, prog):
"""Run external `prog` on file path associated with `item`, cache
output as flexattr on a key that is the name of the program, and
return the key, checksum tuple.
"""
args = shlex.split(prog.format(file=item.path))
key = args[0]
checksum = getattr(item, key, False)
if not checksum:
log.debug('%s: key %s on item %s not cached: computing checksum',
PLUGIN, key, displayable_path(item.path))
try:
checksum = command_output(args)
setattr(item, key, checksum)
item.store()
log.debug('%s: computed checksum for %s using %s',
PLUGIN, item.title, key)
except subprocess.CalledProcessError as e:
log.debug('%s: failed to checksum %s: %s',
PLUGIN, displayable_path(item.path), e)
else:
log.debug('%s: key %s on item %s cached: not computing checksum',
PLUGIN, key, displayable_path(item.path))
return key, checksum
def _group_by(objs, keys):
"""Return a dictionary with keys arbitrary concatenations of attributes and
values lists of objects (Albums or Items) with those keys.
"""
import collections
counts = collections.defaultdict(list)
for obj in objs:
key = '\001'.join(repr(getattr(obj, k, '')) for k in keys)
counts[key].append(obj)
return counts
def _duplicates(objs, keys, full):
"""Generate triples of keys, duplicate counts, and constituent objects.
"""
offset = 0 if full else 1
for k, objs in _group_by(objs, keys).iteritems():
if len(objs) > 1:
yield (k, len(objs) - offset, objs[offset:])
class DuplicatesPlugin(BeetsPlugin):
"""List duplicate tracks or albums
"""
def __init__(self):
super(DuplicatesPlugin, self).__init__()
self.config.add({
'format': '',
'count': False,
'album': False,
'full': False,
'path': False,
'keys': ['mb_trackid', 'mb_albumid'],
'checksum': None,
'copy': False,
'move': False,
'delete': False,
'tag': False,
})
self._command = Subcommand('duplicates',
help=__doc__,
aliases=['dup'])
self._command.parser.add_option('-f', '--format', dest='format',
action='store', type='string',
help='print with custom format',
metavar='FMT')
self._command.parser.add_option('-a', '--album', dest='album',
action='store_true',
help='show duplicate albums instead of'
' tracks')
self._command.parser.add_option('-c', '--count', dest='count',
action='store_true',
help='show duplicate counts')
self._command.parser.add_option('-C', '--checksum', dest='checksum',
action='store', metavar='PROG',
help='report duplicates based on'
' arbitrary command')
self._command.parser.add_option('-d', '--delete', dest='delete',
action='store_true',
help='delete items from library and '
'disk')
self._command.parser.add_option('-F', '--full', dest='full',
action='store_true',
help='show all versions of duplicate'
' tracks or albums')
self._command.parser.add_option('-k', '--keys', dest='keys',
action='callback', metavar='KEY1 KEY2',
callback=vararg_callback,
help='report duplicates based on keys')
self._command.parser.add_option('-m', '--move', dest='move',
action='store', metavar='DEST',
help='move items to dest')
self._command.parser.add_option('-o', '--copy', dest='copy',
action='store', metavar='DEST',
help='copy items to dest')
self._command.parser.add_option('-p', '--path', dest='path',
action='store_true',
help='print paths for matched items or'
' albums')
self._command.parser.add_option('-t', '--tag', dest='tag',
action='store',
help='tag matched items with \'k=v\''
' attribute')
def commands(self):
def _dup(lib, opts, args):
self.config.set_args(opts)
fmt = self.config['format'].get()
album = self.config['album'].get(bool)
full = self.config['full'].get(bool)
keys = self.config['keys'].get()
checksum = self.config['checksum'].get()
copy = self.config['copy'].get()
move = self.config['move'].get()
delete = self.config['delete'].get(bool)
tag = self.config['tag'].get()
if album:
keys = ['mb_albumid']
items = lib.albums(decargs(args))
else:
items = lib.items(decargs(args))
if self.config['path']:
fmt = '$path'
# Default format string for count mode.
if self.config['count'] and not fmt:
if album:
fmt = '$albumartist - $album'
else:
fmt = '$albumartist - $album - $title'
fmt += ': {0}'
if checksum:
for i in items:
k, _ = _checksum(i, checksum)
keys = [k]
for obj_id, obj_count, objs in _duplicates(items,
keys=keys,
full=full):
if obj_id: # Skip empty IDs.
for o in objs:
_process_item(o, lib,
copy=copy,
move=move,
delete=delete,
tag=tag,
format=fmt.format(obj_count))
self._command.func = _dup
return [self._command]
|
Python
| 0
|
@@ -1766,24 +1766,10 @@
s =
-shlex.split(prog
+%5Bp
.for
@@ -1783,25 +1783,67 @@
e=item.path)
-)
+ for p in shlex.split(prog)%5D%0A print args
%0A key = a
|
753388550e4ea7a8b09ddb22189021be3585a5e5
|
Clean PlexUpdate plugin
|
beetsplug/plexupdate.py
|
beetsplug/plexupdate.py
|
"""Updates an Plex library whenever the beets library is changed.
Put something like the following in your config.yaml to configure:
plex:
host: localhost
port: 32400
"""
import requests
from urlparse import urljoin
import xml.etree.ElementTree as ET
from beets import config
from beets.plugins import BeetsPlugin
# Global variable to detect if database is changed that the update
# is only run once before beets exists.
database_changed = False
def get_music_section(host, port):
"""Getting the section key for the music library in Plex.
"""
api_endpoint = 'library/sections'
url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
# Sends request.
r = requests.get(url)
# Parse xml tree and extract music section key.
tree = ET.fromstring(r.text)
for child in tree.findall('Directory'):
if child.get('title') == 'Music':
return child.get('key')
def update_plex(host, port):
"""Sends request to the Plex api to start a library refresh.
"""
# Getting section key and build url.
section_key = get_music_section(host, port)
api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
# Sends request and returns requests object.
r = requests.get(url)
return r
class PlexUpdate(BeetsPlugin):
def __init__(self):
super(PlexUpdate, self).__init__()
# Adding defaults.
config['plex'].add({
u'host': u'localhost',
u'port': 32400})
@PlexUpdate.listen('database_change')
def listen_for_db_change(lib=None):
"""Listens for beets db change and set global database_changed
variable to True.
"""
global database_changed
database_changed = True
@PlexUpdate.listen('cli_exit')
def update(lib=None):
"""When the client exists and the database_changed variable is True
trying to send refresh request to Plex server.
"""
if database_changed:
print('Updating Plex library...')
# Try to send update request.
try:
update_plex(
config['plex']['host'].get(),
config['plex']['port'].get())
print('... started.')
except requests.exceptions.RequestException:
print('Update failed.')
|
Python
| 0
|
@@ -334,142 +334,8 @@
n%0A%0A%0A
-# Global variable to detect if database is changed that the update%0A# is only run once before beets exists.%0Adatabase_changed = False%0A%0A%0A
def
@@ -1441,35 +1441,46 @@
2400%7D)%0A%0A
-%0A@PlexUpdate.
+ self.register_
listen
+er
('databa
@@ -1489,18 +1489,50 @@
_change'
-)%0A
+, self.listen_for_db_change)%0A%0A
def list
@@ -1548,27 +1548,32 @@
_change(
-lib=None):%0A
+self, lib):%0A
%22%22%22L
@@ -1607,142 +1607,73 @@
and
-set global database_changed%0A variable to True.%0A %22%22%22%0A global database_changed%0A database_changed = True%0A%0A%0A@PlexUpdate.
+register the update for the end%22%22%22%0A self.register_
listen
+er
('cl
@@ -1683,114 +1683,88 @@
xit'
-)%0Adef update(lib=None):%0A %22%22%22When the client exists and the database_changed variable is True%0A trying
+, self.update)%0A%0A def update(self, lib):%0A %22%22%22When the client exists try
to
@@ -1808,50 +1808,38 @@
-%22%22%22%0A if database_changed:%0A print
+ %22%22%22%0A self._log.info
('Up
@@ -2044,21 +2044,30 @@
-print
+self._log.info
('... st
@@ -2145,13 +2145,25 @@
-print
+self._log.warning
('Up
|
e5bdbfb075bf95b7834f2db8c7d6ffa69cb03bc0
|
Create convnet_basic.py
|
Classifying_datasets/statoil/convnet_basic.py
|
Classifying_datasets/statoil/convnet_basic.py
|
Python
| 0.000004
|
@@ -0,0 +1,4 @@
+adi%0A
|
|
6f3bb73dd387fd73022a20c3a45adf05213339cf
|
add new package (#20109)
|
var/spack/repos/builtin/packages/py-rosinstall-generator/package.py
|
var/spack/repos/builtin/packages/py-rosinstall-generator/package.py
|
Python
| 0
|
@@ -0,0 +1,840 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0A%0Aclass PyRosinstallGenerator(PythonPackage):%0A %22%22%22A tool for generating rosinstall files.%22%22%22%0A%0A homepage = %22https://wiki.ros.org/rosinstall_generator%22%0A url = %22https://pypi.io/packages/source/r/rosinstall-generator/rosinstall_generator-0.1.22.tar.gz%22%0A%0A version('0.1.22', sha256='22d22599cd3f08a1f77fb2b1d9464cc8062ede50752a75564d459fcf5447b8c5')%0A%0A depends_on('py-catkin-pkg@0.1.28:', type=('build', 'run'))%0A depends_on('py-rosdistro@0.7.3:', type=('build', 'run'))%0A depends_on('py-rospkg', type=('build', 'run'))%0A depends_on('py-pyyaml', type=('build', 'run'))%0A depends_on('py-setuptools', type=('build', 'run'))%0A
|
|
a2516d28c86fd23efcb893e59de42b33526bfe6f
|
Add a Python Tkinter example showing how to map a scale widget.
|
swig/tkgui.py
|
swig/tkgui.py
|
Python
| 0
|
@@ -0,0 +1,805 @@
+#!/usr/bin/env python%0A%0Aimport Tkinter%0Aimport sys%0Aimport mapper%0A%0Adef on_gui_change(x):%0A# print 'on_gui_change',x,x.__class__%0A sig_out.update_scalar(int(x))%0A%0Adef on_mapper_change(sig, x):%0A# print 'on_mapper_change', x, x.__class__%0A w.set(int(x))%0A%0Adev = mapper.device(%22tkgui%22, 9000)%0A%0Asig_in = mapper.signal(1, %22/signal0%22, None, 'f', on_mapper_change)%0Adev.register_input(sig_in)%0A%0Asig_out = mapper.signal(1, %22/signal0%22, None, 'f', lambda x: x)%0Adev.register_output(sig_out)%0A%0Amaster = Tkinter.Tk()%0Amaster.title(%22libmapper Python GUI demo%22)%0A%0Aw = Tkinter.Scale(master, from_=0, to=100, label='signal0',%0A orient=Tkinter.HORIZONTAL, length=300,%0A command=on_gui_change)%0A%0Aw.pack()%0A%0Adef do_poll():%0A dev.poll(20)%0A master.after(5, do_poll)%0A%0Ado_poll()%0Amaster.mainloop()%0A
|
|
99578401585435c08aed6f29e090fbde955423fd
|
Create good2d.py
|
rocksetta-examples/good2d.py
|
rocksetta-examples/good2d.py
|
Python
| 0.000004
|
@@ -0,0 +1,949 @@
+import tensorflow as tf%0Aimport numpy as np%0A%0A# Create 100 phony x, y data points in NumPy, y = x * 0.1 + 0.3%0Ax_data = np.random.rand(100).astype(%22float32%22)%0Ay_data = x_data * 0.1 + 0.3%0A%0A# Try to find values for W and b that compute y_data = W * x_data + b%0A# (We know that W should be 0.1 and b 0.3, but Tensorflow will%0A# figure that out for us.)%0AW = tf.Variable(tf.random_uniform(%5B1%5D, -1.0, 1.0))%0Ab = tf.Variable(tf.zeros(%5B1%5D))%0Ay = W * x_data + b%0A%0A# Minimize the mean squared errors.%0Aloss = tf.reduce_mean(tf.square(y - y_data))%0Aoptimizer = tf.train.GradientDescentOptimizer(0.5)%0Atrain = optimizer.minimize(loss)%0A%0A# Before starting, initialize the variables. We will 'run' this first.%0Ainit = tf.initialize_all_variables()%0A%0A# Launch the graph.%0Asess = tf.Session()%0Asess.run(init)%0A%0A# Fit the line.%0Afor step in xrange(201):%0A sess.run(train)%0A if step %25 20 == 0:%0A print(step, sess.run(W), sess.run(b))%0A%0A# Learns best fit is W: %5B0.1%5D, b: %5B0.3%5D%0A
|
|
35e51f55adfbe8383bfb3d34688eb7aee89a8351
|
add shuffle_by_year script, to toss messages into yearly folders
|
shuffle_by_year.py
|
shuffle_by_year.py
|
Python
| 0
|
@@ -0,0 +1,1881 @@
+#!/usr/bin/env python%0A%0A# Q&D script to sort mail into subfolders by year.%0A# Reduces the burden upon the filesystem gnomes.%0A%0ADIRPATH = %22/stor0/backups/imapbak/rtucker/Fastmail-rey_fmgirl_com%22%0A%0Aimport email%0Aimport mailbox%0Aimport imap2maildir%0Aimport sys%0Aimport time%0Aimport os%0A%0Adef main():%0A db = imap2maildir.open_sql_session(DIRPATH + %22/.imap2maildir.sqlite%22)%0A mbox = mailbox.Maildir(DIRPATH, False)%0A%0A try:%0A%0A counter = 0%0A c = db.cursor()%0A%0A for result in db.execute(%22select mailfile,folder from seenmessages where folder is null or folder = ''%22):%0A key = result%5B0%5D%0A msg = mbox.get_message(key)%0A%0A year = None%0A%0A if 'Date' in msg:%0A ttup = email.utils.parsedate(msg%5B'Date'%5D)%0A if ttup:%0A year = ttup%5B0%5D%0A%0A if year is None:%0A tstamp = msg.get_date()%0A year = time.gmtime(tstamp).tm_year%0A print(key + %22 has no valid Date header; going with %22 + str(year))%0A%0A ybox = mbox.add_folder(str(year))%0A%0A ybox.lock()%0A newkey = ybox.add(msg)%0A ybox.flush()%0A ybox.unlock()%0A%0A c.execute(%22update seenmessages set mailfile = ?, folder = ? where mailfile = ?%22, (newkey, year, key))%0A%0A mbox.lock()%0A mbox.discard(key)%0A mbox.flush()%0A mbox.unlock()%0A%0A print(%22moved %22 + key + %22 to %22 + str(year) + %22/%22 + newkey)%0A%0A counter += 1%0A%0A if counter %25 25 == 0:%0A print(%22committing db%22)%0A db.commit()%0A sys.stdout.flush()%0A%0A if os.path.exists(%22.STOP%22):%0A print(%22stop requested%22)%0A os.unlink(%22.STOP%22)%0A break%0A%0A finally:%0A mbox.unlock()%0A db.commit()%0A%0Aif __name__ == %22__main__%22:%0A main()%0A%0A
|
|
7423de8d2222e81693fe380a6b4c8638a96a9db8
|
Create urls.py
|
urls.py
|
urls.py
|
Python
| 0.000017
|
@@ -0,0 +1,551 @@
+from django.conf.urls import patterns, include, url%0A%0A# Uncomment the next two lines to enable the admin:%0Afrom django.contrib import admin%0A%0Afrom pysoldev import settings%0A%0Aadmin.autodiscover()%0A%0Aurlpatterns = patterns('',%0A url(r'%5Eadmin/', include(admin.site.urls)),%0A url(r'%5E$', 'pysoldev.views.home', name='home'),%0A url(r'%5E$', 'pysoldev.app.views.index', name='index'),%0A%0A)%0A%0Aif settings.DEBUG:%0A urlpatterns += patterns('',%0A (r'%5Emedia/(?P%3Cpath%3E.*)$', 'django.views.static.serve', %0A %7B'document_root': settings.MEDIA_ROOT%7D),%0A )%0A
|
|
0079676729fa8023bea93fcf03bb48c4ff24a495
|
add partition
|
resource-4/combinatorics/integer-partitions/partition1.py
|
resource-4/combinatorics/integer-partitions/partition1.py
|
Python
| 0.000007
|
@@ -0,0 +1,271 @@
+# counting partitions%0Adef partition1(n,k=-1):%0A%09if (k == -1):%0A%09%09return sum(%5Bpartition1(n,i) for i in range(1,n+1)%5D)%0A%09if (n %3C k):%0A%09%09return 0%0A%09if((n==0) or (n==1)):%0A%09%09return 1%0A%09if((k==1) or (n==k)):%0A%09%09return 1%0A%09return sum(%5Bpartition1(n-k,i) for i in range(1,min(k,n-k)+1)%5D)%0A
|
|
a787ceea91abf1c0fbeb3b97e063d3ec1aa61b57
|
Create implement-rand10-using-rand7.py
|
Python/implement-rand10-using-rand7.py
|
Python/implement-rand10-using-rand7.py
|
Python
| 0.000022
|
@@ -0,0 +1,954 @@
+# Time: O(1)%0A# Space: O(1)%0A%0A# Given a function rand7 which generates a uniform random integer in the range 1 to 7,%0A# write a function rand10 which generates a uniform random integer in the range 1 to 10.%0A#%0A# Do NOT use system's Math.random().%0A#%0A# Example 1:%0A#%0A# Input: 1%0A# Output: %5B7%5D%0A# Example 2:%0A#%0A# Input: 2%0A# Output: %5B8,4%5D%0A# Example 3:%0A#%0A# Input: 3%0A# Output: %5B8,1,10%5D%0A#%0A# Note:%0A#%0A# rand7 is predefined.%0A# Each testcase has one argument: n, the number of times that rand10 is called.%0A#%0A# Follow up:%0A# - What is the expected value for the number of calls to rand7() function?%0A# - Could you minimize the number of calls to rand7()?%0A#%0A# The rand7() API is already defined for you.%0A# def rand7():%0A# @return a random integer in the range 1 to 7%0A%0Aclass Solution(object):%0A def rand10(self):%0A %22%22%22%0A :rtype: int%0A %22%22%22%0A while True:%0A x = (rand7()-1)*7 + (rand7()-1)%0A if x %3C 40:%0A return x%2510 + 1%0A
|
|
b659dd572bd92e10bde8899540792bdb26529a45
|
add qdb_test
|
scripts/qdb_test.py
|
scripts/qdb_test.py
|
Python
| 0.000001
|
@@ -0,0 +1,367 @@
+from qdb import set_trace, RemoteCommandManager%0A%0Adef f():%0A in_f = True%0A return 'getting out of f'%0A%0Adef main():%0A set_trace(%0A uuid='qdb',%0A host='localhost',%0A port=8001,%0A cmd_manager=RemoteCommandManager(),%0A )%0A mutable_object = %7B%7D%0A print 'Hello world!'%0A f()%0A print mutable_object%0A%0Aif __name__ == '__main__':%0A main()
|
|
5c02d7ccb9721e0b02cdd31edc36be095a4568e2
|
test the Meta and the MetaMixin classes
|
tests/meta.py
|
tests/meta.py
|
Python
| 0
|
@@ -0,0 +1,1907 @@
+import unittest%0Aimport slumber%0A%0A%0Aclass MetaTestCase(unittest.TestCase):%0A%0A def test_init_kwargs_to_attributes(self):%0A m = slumber.Meta(item1=%22test%22, item2=41, item3=%22example%22)%0A%0A self.assertEqual(m.item1, %22test%22)%0A self.assertEqual(m.item2, 41)%0A self.assertEqual(m.item3, %22example%22)%0A%0A%0Aclass MetaMixinTestCase(unittest.TestCase):%0A%0A def test_init_kwargs_to_meta(self):%0A class MetaMixinTest(slumber.MetaMixin, object):%0A class Meta:%0A item1 = None%0A item2 = None%0A item3 = None%0A%0A mmt = MetaMixinTest(item1=%22test%22, item2=41, item3=%22example%22)%0A%0A self.assertTrue(hasattr(mmt, %22_meta%22))%0A self.assertTrue(isinstance(mmt._meta, slumber.Meta))%0A%0A self.assertEqual(mmt._meta.item1, %22test%22)%0A self.assertEqual(mmt._meta.item2, 41)%0A self.assertEqual(mmt._meta.item3, %22example%22)%0A%0A def test_meta_to_meta_defaults(self):%0A class MetaMixinTest(slumber.MetaMixin, object):%0A class Meta:%0A item1 = None%0A item2 = None%0A item3 = None%0A%0A mmt = MetaMixinTest()%0A%0A self.assertTrue(hasattr(mmt, %22_meta%22))%0A self.assertTrue(isinstance(mmt._meta, slumber.Meta))%0A%0A self.assertEqual(mmt._meta.item1, None)%0A self.assertEqual(mmt._meta.item2, None)%0A self.assertEqual(mmt._meta.item3, None)%0A%0A def test_meta_to_meta_defaults_with_init_kwargs(self):%0A class MetaMixinTest(slumber.MetaMixin, object):%0A class Meta:%0A item1 = None%0A item2 = None%0A item3 = None%0A%0A mmt = MetaMixinTest(item2=41)%0A%0A self.assertTrue(hasattr(mmt, %22_meta%22))%0A self.assertTrue(isinstance(mmt._meta, slumber.Meta))%0A%0A self.assertEqual(mmt._meta.item1, None)%0A self.assertEqual(mmt._meta.item2, 41)%0A self.assertEqual(mmt._meta.item3, None)%0A
|
|
4c3c9c6929ebc3f439ccf3bb7d3696f484b154bc
|
Add missing noop-migrations for PositiveIntegerField
|
karspexet/ticket/migrations/0017_positive_integers_20180322_2056.py
|
karspexet/ticket/migrations/0017_positive_integers_20180322_2056.py
|
Python
| 0.00008
|
@@ -0,0 +1,1241 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.1 on 2018-03-22 19:56%0Afrom __future__ import unicode_literals%0A%0Aimport django.core.validators%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('ticket', '0016_add_voucher_note_20180213_2307'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='discount',%0A name='amount',%0A field=models.PositiveIntegerField(validators=%5Bdjango.core.validators.MinValueValidator(100), django.core.validators.MaxValueValidator(5000)%5D),%0A ),%0A migrations.AlterField(%0A model_name='reservation',%0A name='ticket_price',%0A field=models.PositiveIntegerField(),%0A ),%0A migrations.AlterField(%0A model_name='reservation',%0A name='total',%0A field=models.PositiveIntegerField(),%0A ),%0A migrations.AlterField(%0A model_name='ticket',%0A name='price',%0A field=models.PositiveIntegerField(),%0A ),%0A migrations.AlterField(%0A model_name='voucher',%0A name='amount',%0A field=models.PositiveIntegerField(help_text='Rabatt i SEK'),%0A ),%0A %5D%0A
|
|
af508daaf016b824c7518a36f9b92f571f0f65af
|
Implement management command for creating demo records of balance history (NC-842)
|
nodeconductor/structure/management/commands/init_balance_history.py
|
nodeconductor/structure/management/commands/init_balance_history.py
|
Python
| 0
|
@@ -0,0 +1,791 @@
+from datetime import timedelta%0A%0Afrom django.core.management.base import BaseCommand%0Afrom django.utils import timezone%0A%0Afrom nodeconductor.structure.models import BalanceHistory%0Afrom nodeconductor.structure.models import Customer%0A%0A%0Aclass Command(BaseCommand):%0A help = %22%22%22 Initialize demo records of balance history %22%22%22%0A%0A def handle(self, *args, **options):%0A self.stdout.write('Creating demo records of balance history for all customers')%0A for customer in Customer.objects.all():%0A for i in range(10):%0A BalanceHistory.objects.create(customer=customer,%0A created=timezone.now() - timedelta(days=i),%0A amount=100 + i * 10)%0A%0A self.stdout.write('... Done')%0A
|
|
7f1883275e9aa0b489de99947db7daf87f616ed4
|
solve k58
|
chap06/k58.py
|
chap06/k58.py
|
Python
| 0.999919
|
@@ -0,0 +1,910 @@
+#%0A# usage: python k58.py %7Bfile name%7D %7Bnumber%7D%0A#%0A%0Aimport sys%0Afrom xml.etree import ElementTree as ET%0A%0Adef get_tuple(nll, dll):%0A return %5B%5Bp, %5Bn%5B1%5D for n in nl if n%5B0%5D == p%5D%5B0%5D,%0A %5Bd%5B1%5D for d in dl if d%5B0%5D == p%5D%5B0%5D%5D%0A for nl, dl in zip(nll, dll)%0A for p in list(%7Bn%5B0%5D for n in nl%7D & %7Bd%5B0%5D for d in dl%7D)%5D%0A %0Aif __name__ == '__main__':%0A fn = sys.argv%5B1%5D%0A%0A root = ET.parse(fn).getroot()%0A cdl = %5Bd for d in root.findall('document/sentences/*/dependencies')%0A if d.get('type') == 'collapsed-dependencies'%5D%0A nll = %5B%5B%5Bn.find('governor').text, n.find('dependent').text%5D%0A for n in e.findall('*%5B@type=%22nsubj%22%5D')%5D%0A for e in cdl%5D%0A dll = %5B%5B%5Bd.find('governor').text, d.find('dependent').text%5D%0A for d in e.findall('*%5B@type=%22dobj%22%5D')%5D%0A for e in cdl%5D%0A%0A for t in get_tuple(nll, dll):%0A print('%5Ct'.join(t))%0A
|
|
a9d0a3dcc5221adbca5142a0cd7548ef874afd45
|
add script to make matrix
|
matrix/build_maxrix.py
|
matrix/build_maxrix.py
|
Python
| 0.000001
|
@@ -0,0 +1,840 @@
+WIDTH = 10%0AHEIGHT = 10%0AHORIZONTAL_NEXT = %22hnext%22%0AVERTICAL_NEXT = %22vnext%22%0ABASE = %22Cell%22%0A%0Aprint %22%22%22%0Aabstract sig %25(BASE)s %7B%0A %25(HORIZONTAL_NEXT)s: lone %25(BASE)s,%0A %25(VERTICAL_NEXT)s: lone %25(BASE)s%0A%7D%0A%22%22%22 %25 globals()%0A%0Afor x in range(WIDTH):%0A for y in range(HEIGHT):%0A print %22one sig Cell_%25d_%25d extends %25s %7B%7D%22 %25 (x, y, BASE)%0A%0A# fact%0Aprint %22fact matrix_adj %7B%22%0Afor x in range(WIDTH):%0A for y in range(HEIGHT - 1):%0A next = y + 1%0A print %22 Cell_%25(x)d_%25(y)d.%25(VERTICAL_NEXT)s = Cell_%25(x)d_%25(next)d%22 %25 globals()%0A print %22 no Cell_%25(x)d_%25(next)d.%25(VERTICAL_NEXT)s%22 %25 globals()%0A%0Afor y in range(HEIGHT):%0A for x in range(WIDTH - 1):%0A next = x + 1%0A print %22 Cell_%25(x)d_%25(y)d.%25(HORIZONTAL_NEXT)s = Cell_%25(next)d_%25(y)d%22 %25 globals()%0A print %22 no Cell_%25(next)d_%25(y)d.%25(HORIZONTAL_NEXT)s%22 %25 globals()%0A%0A%0Aprint %22%7D%22%0A
|
|
9efc48eadfb896a70197f8963caa6034d9b4856d
|
store res_log in database instead of memory, add index
|
bin/addons/base/res/res_log.py
|
bin/addons/base/res/res_log.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
import tools
import time
class res_log(osv.osv_memory):
_name = 'res.log'
_columns = {
'name': fields.char('Message', size=128, help='The logging message.', required=True),
'user_id': fields.many2one('res.users','User', required=True),
'res_model': fields.char('Object', size=128),
'context': fields.char('Context', size=250),
'res_id': fields.integer('Object ID'),
'secondary': fields.boolean('Secondary Log', help='Do not display this log if it belongs to the same object the user is working on'),
'create_date': fields.datetime('Created Date', readonly=True),
'read': fields.boolean('Read', help="If this log item has been read, get() should not send it to the client")
}
_defaults = {
'user_id': lambda self,cr,uid,ctx: uid,
'create_date': fields.datetime.now,
'context': "{}",
'read': False
}
_order='create_date desc'
# TODO: do not return secondary log if same object than in the model (but unlink it)
def get(self, cr, uid, context=None):
unread_log_ids = self.search(cr, uid, [('user_id','=',uid),
('read', '=', False)],
context=context)
unread_logs = self.read(cr, uid, unread_log_ids,
['name','res_model','res_id'],
context=context)
self.write(cr, uid, unread_log_ids, {'read': True}, context=context)
return unread_logs
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
res = []
log_ids = super(res_log, self).search(cr, uid, args, offset, limit, order, context, count)
logs = {}
for log in self.browse(cr, uid, log_ids, context=context):
res_dict = logs.get(log.res_model, {})
res_dict.update({log.res_id: log.id})
logs.update({log.res_model: res_dict})
res = map(lambda x: x.values(), logs.values())
return tools.flatten(res)
res_log()
|
Python
| 0
|
@@ -1051,15 +1051,8 @@
.osv
-_memory
):%0A
@@ -1815,52 +1815,8 @@
id,%0A
- 'create_date': fields.datetime.now,%0A
@@ -1895,16 +1895,396 @@
desc'%0A%0A
+ _index_name = 'res_log_uid_read'%0A def _auto_init(self, cr, context=%7B%7D):%0A super(res_log, self)._auto_init(cr, context)%0A cr.execute('SELECT 1 FROM pg_indexes WHERE indexname=%25s',%0A (self._index_name,))%0A if not cr.fetchone():%0A cr.execute('CREATE INDEX %25s ON res_log (user_id, read)' %25%0A self._index_name)%0A%0A
# TO
|
98cbb29d008fc7abf1a066d9ecf7b3399395aefe
|
add users api
|
mcp/interface/users.py
|
mcp/interface/users.py
|
Python
| 0.000001
|
@@ -0,0 +1,885 @@
+import json%0A%0Afrom mcp import users%0Afrom mcp.interface import common%0A%0Aclass UsersHandler(common.AuthorizedHandler):%0A%09def forbidden(self):%0A%09%09return True%0A%0A%09def do_get(self):%0A%09%09return 200, json.dumps(list(iter(users.user_db)))%0A%0Aclass UserHandler(common.AuthorizedHandler):%0A%09def __init__(self, request, response, groups):%0A%09%09common.AuthorizedHandler.__init__(self, request, response, groups)%0A%09%09self.userentry = users.get(self.groups%5B0%5D)%0A%0A%09def forbidden(self):%0A%09%09return self.user.name != self.userentry.name%0A%0Aclass UserInfoHandler(UserHandler):%0A%09def do_get(self):%0A%09%09return 200, json.dumps(%7B'name': self.userentry.name, 'key': self.userentry.key, 'admin': self.userentry.admin, 'active': self.userentry.active, 'servers': self.userentry.servers%7D)%0A%0Ausers_base = '/users/'%0Auser_base = users_base + '(' + users.users_allowed + ')'%0A%0Aroutes = %7Busers_base: UsersHandler, user_base: UserInfoHandler%7D%0A
|
|
6e165165974f99a30ba6dce528d97c03152eeab6
|
Add tracegraph script
|
tracegraph.py
|
tracegraph.py
|
Python
| 0.000001
|
@@ -0,0 +1,2102 @@
+#!/usr/bin/python%0Aimport sys%0Aimport re%0A%0Aclass Host(object):%0A def __init__(self, step, name, ip):%0A self.step = step%0A self.name = name%0A self.ip = ip%0A self.pings = %5B%5D%0A%0A def add_ping(self, ping):%0A self.pings.append(ping)%0A%0A def max_ping(self):%0A return max(self.pings) if self.pings else 0%0A%0A def min_ping(self):%0A return min(self.pings) if self.pings else float('inf')%0A%0Aclass Step(object):%0A def __init__(self):%0A self.hosts = %5B%5D%0A%0Adef to_host(line):%0A parts = re.split(r'%5Cs+', line.strip())%0A%0A step = ''%0A if parts%5B0%5D.isdigit():%0A step = int(parts%5B0%5D)%0A parts = parts%5B1:%5D%0A%0A x = 0%0A while x %3C len(parts) and parts%5Bx%5D == '*':%0A x += 1%0A%0A name = '?'%0A ip = '?'%0A%0A if x %3C len(parts):%0A name = parts%5Bx%5D%0A x += 1%0A%0A if x %3C len(parts):%0A ip = parts%5Bx%5D%0A x += 1%0A%0A pings = %5Bfloat(t) for t in parts%5Bx:%5D if t != 'ms' and t != '*'%5D%0A%0A host = Host(step, name, ip)%0A for ping in pings:%0A host.add_ping(ping)%0A return host%0A%0Adef barplot(host, scale):%0A if not host.pings:%0A return ''%0A%0A p0 = int(host.min_ping() * scale)%0A p1 = int(host.max_ping() * scale)%0A%0A return (max(0, p0 - 1) * ' '%0A + 'o'%0A + (p1 - p0 - 1) * '-'%0A + ('o' if p1 %3E p0 else ''))%0A%0Adef rchop_to(s, l):%0A if len(s) %3C= l:%0A return s%0A return '...' + s%5B-l+3:%5D%0A%0Aif len(sys.argv) %3E 1:%0A # Pass arguments to traceroute%0A import subprocess%0A #lines = subprocess.check_output(%5B'traceroute'%5D + sys.argv%5B1:%5D).splitlines()%0A p = subprocess.Popen(%5B'traceroute'%5D + sys.argv%5B1:%5D, stdout=subprocess.PIPE, stderr=None)%0A%0A lines = %5B%5D%0A for line in iter(p.stdout.readline,''):%0A lines.append(line.rstrip())%0A print line.rstrip()%0Aelse:%0A print 'Reading from stdin.'%0A lines = sys.stdin.read().splitlines()%0A print 'traceroute:'%0A print '%5Cn'.join(lines)%0A%0Aprint ''%0Aprint 'tracegraph:'%0A%0Ahosts = list(to_host(line) for line in lines)%0Amax_ping = max(h.max_ping() for h in hosts)%0A%0AWIDTH = 60%0A%0Ascale = float(WIDTH) / max_ping%0A%0Afor host in hosts:%0A print '%252s %25-20s %7C %25s' %25 (host.step, rchop_to(host.name, 20), barplot(host, scale))%0A%0Aprint 25 * ' ' + ' 0ms' + (WIDTH - 8) * ' ' + ' %25.1fms' %25 max_ping%0A
|
|
8bc3c2c82b1b8f7e4582fe401c05c4c4f34cc345
|
create structure for python
|
src/boat_servo_sail/setup.py
|
src/boat_servo_sail/setup.py
|
Python
| 0.000261
|
@@ -0,0 +1,469 @@
+## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD%0A## http://docs.ros.org/api/catkin/html/howto/format2/installing_python.html%0A## pdf download of the page in sources folder:%0A## docs-ros_installing_python.pdf%0Afrom distutils.core import setup%0Afrom catkin_pkg.python_setup import generate_distutils_setup%0A%0A# fetch values from package.xml%0Asetup_args = generate_distutils_setup(%0A packages=%5B'boat_servo_sail'%5D,%0A package_dir=%7B'': 'src'%7D)%0A%0Asetup(**setup_args)%0A
|
|
dc3df810c3c6ffea429e43ec0f8e6f006a9c1c6f
|
Create tensorCase.py
|
tensorCase.py
|
tensorCase.py
|
Python
| 0.000002
|
@@ -0,0 +1,553 @@
+import tensorflow as tf%0A%0Asess = tf.InteractiveSession()%0Ax = tf.random_uniform(%5B%5D)%0Ay = tf.random_uniform(%5B%5D)%0Aout1 = tf.cond(tf.greater(x,y), lambda:tf.add(x,y), lambda:(tf.subtract(x,y)))%0Aprint(x.eval(), y.eval(), out1.eval())%0A%0Ax = tf.random_uniform(%5B%5D,-1,1)%0Ay = tf.random_uniform(%5B%5D,-1,1)%0Adef f1(): return tf.cast(tf.add(x,y), tf.float32)%0Adef f2(): return tf.cast(tf.subtract(x,y), tf.float32)%0Adef f3(): return tf.cast(tf.constant(0), tf.float32)%0Aout2 = tf.case(%7Btf.less(x, y):f2, tf.greater(x,y):f1%7D, default=f3)%0Aprint(x.eval(), y.eval(), out2.eval())%0A
|
|
6584d9e71c82097f65f316bf85b2f019350cfa58
|
Add run_aiotest.py
|
run_aiotest.py
|
run_aiotest.py
|
Python
| 0.000003
|
@@ -0,0 +1,199 @@
+import aioeventlet%0Aimport aiotest.run%0Aimport eventlet%0A%0Aconfig = aiotest.TestConfig()%0Aconfig.new_event_pool_policy = aioeventlet.EventLoopPolicy%0Aconfig.sleep = eventlet.sleep%0Aaiotest.run.main(config)%0A
|
|
50983c0a6d18e1ec8fcaed076f3c82b5935fe913
|
Solve problem 23
|
problem023.py
|
problem023.py
|
Python
| 0.999965
|
@@ -0,0 +1,458 @@
+#!/usr/bin/env python3%0A%0Afrom itertools import *%0A%0Adef divisors(n):%0A for d in takewhile(lambda d: d * d %3C= n, count(1)):%0A if n %25 d == 0:%0A yield d%0A if n // d != d:%0A yield n // d%0A%0Adef is_abundant_number(n):%0A return n %3C sum(divisors(n)) - n%0A%0Aall_abundant = set(filter(is_abundant_number, range(1, 28123+1)))%0Aprint(sum(n for n in range(1, 28123+1)%0A if not any(n-d in all_abundant for d in all_abundant)))%0A
|
|
8db65dc2a6a99a0e6287b12f7bfdcd423a62e515
|
Add test running harness
|
run_harness.py
|
run_harness.py
|
Python
| 0
|
@@ -0,0 +1,3077 @@
+import sys%0Aimport os%0Aimport subprocess%0Aimport shutil%0Aimport shlex%0Aimport collections%0Afrom babi_train import TrainExitStatus%0Afrom graceful_interrupt import GracefulInterruptHandler%0A%0ATaskSpec = collections.namedtuple(%22TaskSpec%22, %5B%22task_name%22, %22variant_name%22, %22run_params%22%5D)%0A%0Adef run(tasks_dir, output_dir, base_params, specs):%0A base_params_split = shlex.split(base_params)%0A for spec in specs:%0A print(%22### Task %7B%7D (%7B%7D) ###%22.format(spec.task_name, spec.variant_name))%0A run_params_split = shlex.split(spec.run_params)%0A%0A task_folder_train = os.path.join(tasks_dir, %22%7B%7D_train%22.format(spec.task_name))%0A if not os.path.isdir(task_folder_train):%0A print(%22Train directory doesn't exist. Parsing text file...%22)%0A textfile = task_folder_train + %22.txt%22%0A subprocess.run(%5B%22python3%22,%22babi_graph_parse.py%22,textfile%5D, check=True)%0A%0A task_folder_valid = os.path.join(tasks_dir, %22%7B%7D_valid%22.format(spec.task_name))%0A if not os.path.isdir(task_folder_valid):%0A print(%22Validation directory doesn't exist. Parsing text file...%22)%0A textfile = task_folder_valid + %22.txt%22%0A subprocess.run(%5B%22python3%22,%22babi_graph_parse.py%22,textfile%5D, check=True)%0A%0A task_output_dir = os.path.join(output_dir, spec.task_name, spec.variant_name)%0A if not os.path.isdir(task_output_dir):%0A os.makedirs(task_output_dir)%0A%0A completed_file = os.path.join(task_output_dir, %22completed.txt%22)%0A if os.path.exists(completed_file):%0A print(%22Task is already completed! Skipping...%22)%0A continue%0A%0A stdout_fn = os.path.join(task_output_dir, %22stdout.txt%22)%0A%0A all_params = %5Btask_folder_train%5D + run_params_split + base_params_split%0A all_params.extend(%5B%22--outputdir%22, task_output_dir%5D)%0A all_params.extend(%5B%22--validation%22, task_folder_valid%5D)%0A all_params.extend(%5B%22--set-exit-status%22%5D)%0A all_params.extend(%5B%22--resume-auto%22%5D)%0A with open(stdout_fn, 'a', 1) as stdout_file:%0A proc = subprocess.Popen(all_params, stdout=stdout_file, stderr=subprocess.STDOUT)%0A with GracefulInterruptHandler():%0A returncode = proc.wait()%0A task_status = TrainExitStatus(returncode)%0A%0A if task_status == TrainExitStatus.accuracy_success:%0A print(%22SUCCESS! Reached desired accuracy.%22)%0A with open(completed_file,'w') as f:%0A f.write(%22SUCCESS%5Cn%22)%0A elif task_status == TrainExitStatus.reached_update_limit:%0A print(%22FAIL! Reached update limit without attaining desired accuracy.%22)%0A with open(completed_file,'w') as f:%0A f.write(%22FAIL_UPDATE_LIMIT%5Cn%22)%0A elif task_status == TrainExitStatus.error:%0A print(%22Got an error; skipping for now. See %7B%7D for details.%22.format(stdout_fn))%0A elif task_status == TrainExitStatus.nan_loss:%0A print(%22NaN loss detected; skipping for now.%22)%0A elif task_status == TrainExitStatus.interrupted:%0A print(%22Process was interrupted! Stopping now%22)%0A break%0A
|
|
a423b0eb6277c260a16dc11f37088e496964cc0c
|
Create count-neighbours.py
|
home/count-neighbours.py
|
home/count-neighbours.py
|
Python
| 0.000009
|
@@ -0,0 +1,830 @@
+def checkio ( data ) :%0A %0A if len( data ) %3C 10 : return False%0A %0A if not any( c in data for c in %22abcdefghijklmnopqrstuvwxyz%22 ) : return False%0A if not any( c in data for c in %22ABCDEFGHIJKLMNOPQRSTUVWXYZ%22 ) : return False%0A if not any( c in data for c in %221234567890%22 ) : return False%0A %0A return True%0A%0A#Some hints%0A#Just check all conditions%0A%0A%0Aif __name__ == '__main__':%0A #These %22asserts%22 using only for self-checking and not necessary for auto-testing%0A assert checkio('A1213pokl') == False, %221st example%22%0A assert checkio('bAse730onE4') == True, %222nd example%22%0A assert checkio('asasasasasasasaas') == False, %223rd example%22%0A assert checkio('QWERTYqwerty') == False, %224th example%22%0A assert checkio('123456123456') == False, %225th example%22%0A assert checkio('QwErTy911poqqqq') == True, %226th example%22%0A
|
|
557d21ffbbbf5cb8a452d6bc0b4c013daf8eabdc
|
Add new migration
|
server/store/migrations/0003_alter_testimonial_reviewer_location.py
|
server/store/migrations/0003_alter_testimonial_reviewer_location.py
|
Python
| 0
|
@@ -0,0 +1,390 @@
+# Generated by Django 4.1.3 on 2022-11-14 02:09%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A (%22store%22, %220002_initial%22),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name=%22testimonial%22,%0A name=%22reviewer_location%22,%0A field=models.CharField(max_length=100),%0A ),%0A %5D%0A
|
|
0df48d7159841f66cc4e8dac8e9a52727c69091e
|
add testcase for settings()
|
test/model.py
|
test/model.py
|
Python
| 0
|
@@ -0,0 +1,495 @@
+# -*- coding: utf-8 -*-%0A%0Aimport unittest%0Afrom cliez.conf import settings, Settings%0A%0A%0Aclass Model(object):%0A config_none = settings()%0A%0A def __init__(self):%0A self.config = settings()%0A pass%0A%0A pass%0A%0A%0Aclass ModelTestCase(unittest.TestCase):%0A def setUp(self):%0A Settings.bind('cliez.conf')%0A pass%0A%0A def test_ok(self):%0A a = Model()%0A self.assertEqual(None, a.config_none)%0A self.assertEqual(None, a.config.PACKAGE_ROOT)%0A pass%0A%0A pass%0A
|
|
b1c405950ce5be535796d6d31a329b5083dc91a9
|
adding test_hello
|
test_hello.py
|
test_hello.py
|
Python
| 0.999096
|
@@ -0,0 +1,18 @@
+print %22Hello All%22%0A
|
|
bb0e8032d325d2fd015a53d4513d632d12e8afb3
|
Create pset2part3.py
|
pset2part3.py
|
pset2part3.py
|
Python
| 0.000003
|
@@ -0,0 +1,1010 @@
+balance = 270472%0AannualInterestRate = 0.21%0A# test case 1%0A# balance = 320000%0A# annualInterestRate = 0.2%0A# Lowest Payment: 29157.09%0A# test case 2%0A# balance = 999999%0A# annualInterestRate = 0.18%0A# Lowest Payment: 90325.03%0A%0Aepsilon = 0.01%0Alower = balance / 12%0Aupper = balance * ((1 + annualInterestRate / 12.0) ** 12) / 12.0%0Aans = (lower + upper) / 2.0%0A%0Adef yearEndBalance(monthlyPayment):%0A '''%0A Calculates year end balance given a monthly payment%0A as an argument. monthlyPayment can be int or float '''%0A myBalance = balance%0A for m in range(12):%0A interest = (myBalance - monthlyPayment) * annualInterestRate / 12.0%0A myBalance = myBalance + interest - monthlyPayment%0A return myBalance%0A%0Awhile abs(yearEndBalance(ans)) %3E= epsilon:%0A # print(%22lower = %22 + str(lower) + %22 upper = %22 + str(upper) + %22 ans = %22 + str(ans))%0A if yearEndBalance(ans) %3C 0:%0A upper = ans%0A else:%0A lower = ans%0A ans = (lower + upper) / 2.0%0A %0Aprint (%22Lowest Payment: %22 + str(round(ans, 2)))%0A%0A
|
|
3f64c94f51698ab69fc1996d73a04bde58da9052
|
Update build script.
|
client/scripts/build-plugin.py
|
client/scripts/build-plugin.py
|
import os, sys
import ue4config
import ue4util, gitutil, shutil, uploadutil
plugin_version = gitutil.get_short_version('.')
plugin_output_folder = os.path.abspath('./unrealcv-%s' % plugin_version)
def build_plugin():
UAT_script = ue4config.conf['UATScript']
if not os.path.isfile(UAT_script):
print('Can not find Automation Script of UE4 %s' % UAT_script)
print('Please set UnrealEnginePath in ue4config.py correctly first')
return False
else:
if gitutil.is_dirty(os.path.abspath('.')):
print 'Error: uncommited changes of this repo exist'
return False
plugin_file = os.path.abspath('../../UnrealCV.uplugin')
plugin_file = ue4util.get_real_path(plugin_file)
UAT_script = UAT_script.replace(' ', '\ ')
cmd = '%s BuildPlugin -plugin=%s -package=%s -rocket -targetplatforms=Win64+Linux' % (UAT_script, plugin_file, plugin_output_folder)
print(cmd)
os.system(cmd)
# Clean up intermediate files
intermediate_folder = os.path.join(plugin_output_folder, 'Intermediate')
print 'Delete intermediate folder %s' % intermediate_folder
shutil.rmtree(intermediate_folder)
return True
def output_plugin(output_conf):
type = output_conf['Type']
upload_handlers = dict(
scp = uploadutil.upload_scp,
s3 = uploadutil.upload_s3,
)
upload_handlers[type](output_conf, [plugin_output_folder], '.')
if __name__ == '__main__':
if build_plugin():
output_confs = ue4config.conf['PluginOutput']
for conf in output_confs:
print conf['Type']
output_plugin(conf)
|
Python
| 0
|
@@ -190,16 +190,83 @@
version)
+%0Aplugin_output_folder = ue4util.get_real_path(plugin_output_folder)
%0A%0Adef bu
|
1765ac3a12ea2a56b4e25e05cf1f1b531de5b2cf
|
Add External Temperature Probe from OpenWeather
|
pyexternal.py
|
pyexternal.py
|
Python
| 0
|
@@ -0,0 +1,1900 @@
+#!/usr/bin/env python%0A# Get External Temperature from OpenWeatherMap%0A# External informations are :%0A# - temperature%0A# - humidity%0A# - pressure%0A# - precipitation volume (each 3h)%0A%0Aimport urllib.request%0Aimport json%0Aimport pyowm%0Afrom datetime import datetime%0A%0Afrom pyserial import pySerial%0Afrom imports.pyTemperature import pyTemperature%0ABASE_URL = %22http://api.openweathermap.org/data/2.5/weather?q=%22%0ADEFAULT_CITY = %22Meyreuil, France%22%0AAPI_KEY = %224ca5e2bebb63f72d4cc5564300cf68d5%22%0A%0A%0Aclass py_external(object):%0A def __init__(self):%0A super(py_external, self).__init__()%0A self.pyTemperature = None%0A%0A%0A def getDataAPI(self):%0A owm = pyowm.OWM(API_KEY)%0A #observation = owm.weather_at_place(DEFAULT_CITY,'accurate')%0A observation = owm.weather_at_id(2994068)%0A print(observation)%0A if observation is not None:%0A%0A w = observation.get_weather()%0A%0A w_temp = w.get_temperature(unit='celsius')%0A w_hum = w.get_humidity()%0A w_pres = w.get_pressure()%0A w_prec = w.get_rain()%0A #print(w_prec)%0A l = observation.get_location()%0A #print(l.get_ID())%0A #print(l.get_name())%0A #print(l.get_lat())%0A #print(l.get_lon())%0A%0A #pyTemperature Constructor (self, date = datetime.now(), temp=None,pressure=None,humidity=None,precicipationVol=None):%0A dateNow = datetime.now()%0A self.pyTemperature = pyTemperature(dateNow,w_temp%5B'temp'%5D,w_pres%5B'press'%5D,w_hum)%0A #print(%22Temperature at pyExternal%22)%0A #self.pyTemperature.printTemperature()%0A%0A%0A%0A def getPyTemperature(self):%0A return self.pyTemperature%0A%0A def setDate(self,newDate):%0A self.date = newDate%0A%0A def setPressure(self,newPressure):%0A self.pressure = newPressure%0A%0A def setHumidity(self,newHumidity):%0A self.humidity = newHumidity%0A%0A%0A%0A
|
|
9a37f573aa985b3bd876c2df4c3f83670fddab42
|
add script to set script permissions
|
chmod.py
|
chmod.py
|
Python
| 0.000001
|
@@ -0,0 +1,391 @@
+import os, stat, sublime, sublime_plugin%0A%0A# Configuration%0Apkg = 'NSIS'%0Ascript = sublime.packages_path() + '/' + pkg + '/nsis_build.sh'%0A%0Adef plugin_loaded():%0A from package_control import events%0A%0A # chmod +x %3Cscript%3E%0A if (events.install(pkg) or events.post_upgrade(pkg)) and os.name is 'posix' or 'mac':%0A st = os.stat(script)%0A os.chmod(script, st.st_mode %7C stat.S_IEXEC)
|
|
7cd1c65b77eb474f67f1e194ceeb7bcde2d2bdb9
|
Create wsgi.py
|
NGeO/NGeO/wsgi.py
|
NGeO/NGeO/wsgi.py
|
Python
| 0.000001
|
@@ -0,0 +1,383 @@
+%22%22%22%0AWSGI config for NGeO project.%0A%0AIt exposes the WSGI callable as a module-level variable named %60%60application%60%60.%0A%0AFor more information on this file, see%0Ahttps://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/%0A%22%22%22%0A%0Aimport os%0Aos.environ.setdefault(%22DJANGO_SETTINGS_MODULE%22, %22NGeO.settings%22)%0A%0Afrom django.core.wsgi import get_wsgi_application%0Aapplication = get_wsgi_application()%0A
|
|
a7f1565efbdfa20d4d97d90a688b78da51533113
|
Add new package: ycsb (#17788)
|
var/spack/repos/builtin/packages/ycsb/package.py
|
var/spack/repos/builtin/packages/ycsb/package.py
|
Python
| 0
|
@@ -0,0 +1,1654 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Ycsb(Package):%0A %22%22%22Yahoo! Cloud Serving Benchmark.%22%22%22%0A%0A homepage = %22https://research.yahoo.com/news/yahoo-cloud-serving-benchmark/%22%0A url = %22https://github.com/brianfrankcooper/YCSB/archive/0.17.0.tar.gz%22%0A git = %22https://github.com/brianfrankcooper/YCSB.git%22%0A%0A version('0.17.0', sha256='5dd1a3d4dd7ac336eadccc83b097c811e142cfe1b23fc278f247054a1892c0e0')%0A version('0.16.0', sha256='4296fd5e90d7d6d7dfcbad90039ddf16e785706a07f99c1c8a06e6ee06440f71')%0A version('0.15.0', sha256='50b83c11f1a2f19f45e3cc6781f952c69944d1221dfec72169c3587802fc7fbb')%0A version('0.14.0', sha256='456bcc9fa3d5d66d76fffa9cec34afd4528d9f02aa8a8d1135f511650516d5cb')%0A version('0.13.0', sha256='21cb8078a0fe2d8d909145744ca15848dbb6757e98a7fdc97fb4049f82f4afbc')%0A%0A depends_on('maven', type='build')%0A depends_on('java@8', type=('build', 'run'))%0A depends_on('mongodb-async-driver', type='build')%0A%0A def install(self, spec, prefix):%0A mvn = which('mvn')%0A jar_name = 'target/mongodb-async-driver-' + %5C%0A spec%5B'mongodb-async-driver'%5D.version.string + '.jar'%0A path = join_path(self.spec%5B'mongodb-async-driver'%5D.prefix, jar_name)%0A mvn('install:install-file', '-Dfile=%7B0%7D'.format(path),%0A '-DgroupId=com.allanbank', '-DartifactId=mongodb-async-driver',%0A '-Dversion=2.0.1', '-Dpackaging=jar')%0A mvn('package', '-DskipTests')%0A install_tree('.', prefix)%0A
|
|
b7baf1e53f24bb96a0b09e9305f5f1e562cf3547
|
Create analog_tester.py
|
home/moz4r/analog_tester.py
|
home/moz4r/analog_tester.py
|
Python
| 0.000009
|
@@ -0,0 +1,418 @@
+arduino = Runtime.createAndStart(%22arduino%22,%22Arduino%22)%0Aarduino.setBoardNano()%0Aarduino.connect(%22COM6%22)%0Aarduino.setAref(%22DEFAULT%22)%0Adef publishPin(pins):%09 %0A%09for pin in range(0, len(pins)):print(pins%5Bpin%5D.value)%0Aarduino.addListener(%22publishPinArray%22,%22python%22,%22publishPin%22)%0A#arduino.enablePin(pinAddress, rate)%0A#analog pin range are 14-18 on uno, 54-70 on mega%0A#rate is the number of polling / sec%0Aarduino.enablePin(14, 1)%0A
|
|
5e6cfc84a4b34a292281ea466bf11facb680e72b
|
initialize radix sort file
|
radix_sort.py
|
radix_sort.py
|
Python
| 0.000001
|
@@ -0,0 +1,178 @@
+def radix_sort(array):%0A %22%22%22%0A Sorts an array of numbers using the least signficant digit radix algorithm.%0A %22%22%22%0A%0A%0Aif __name__ == '__main__':%0A print radix_sort.func_doc%0A
|
|
3e54e311a747b1e032384c7a74a8ed9aeafe1e8d
|
Fix constructor for Bernoulli node
|
bayespy/inference/vmp/nodes/bernoulli.py
|
bayespy/inference/vmp/nodes/bernoulli.py
|
######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
A module for the Bernoulli distribution node
"""
import numpy as np
from .binomial import (BinomialMoments,
BinomialDistribution,
Binomial)
class BernoulliMoments(BinomialMoments):
"""
Class for the moments of Bernoulli variables.
"""
def __init__(self):
super().__init__(1)
class BernoulliDistribution(BinomialDistribution):
"""
Class for the VMP formulas of Bernoulli variables.
"""
def __init__(self):
super().__init__(1)
class Bernoulli(Binomial):
"""
Node for Bernoulli random variables.
"""
_moments = BernoulliMoments()
_distribution = BernoulliDistribution()
def __init__(self, p, **kwargs):
super().__init__(1, p, **kwargs)
def show(self):
"""
Print the distribution using standard parameterization.
"""
p = 1 / (1 + np.exp(-self.phi[0]))
print("%s ~ Bernoulli(p)" % self.name)
print(" p = ")
print(p)
|
Python
| 0
|
@@ -1737,79 +1737,544 @@
-def __init__(self, p, **kwargs):%0A super().__init__(1, p, **kwarg
+@classmethod%0A def _constructor(cls, p, **kwargs):%0A %22%22%22%0A Constructs distribution and moments objects.%0A %22%22%22%0A p = cls._ensure_moments(p, cls._parent_moments%5B0%5D)%0A parents = %5Bp%5D%0A return ( parents,%0A kwargs,%0A ( (), ),%0A cls._total_plates(kwargs.get('plates'),%0A cls._distribution.plates_from_parent(0, p.plates)),%0A cls._distribution, %0A cls._moments, %0A cls._parent_moment
s)%0A%0A
|
0a16a2002e1247ad87a877de6aa85bb0844dc9c4
|
tag tweaks
|
boards/NRF51TAG.py
|
boards/NRF51TAG.py
|
#!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# This file contains information for a specific board - the available pins, and where LEDs,
# Buttons, and other in-built peripherals are. It is used to build documentation as well
# as various source and header files for Espruino.
# ----------------------------------------------------------------------------------------
import pinutils;
info = {
'name' : "nRF51 Tag",
'link' : [ "" ],
'default_console' : "EV_SERIAL1",
'default_console_tx' : "D15",
'default_console_rx' : "D17",
'default_console_baudrate' : "9600",
'variables' : 310,
'binary_name' : 'espruino_%v_nrf51tag.bin',
'build' : {
'defines' : [
'USE_BLUETOOTH'
]
}
};
chip = {
'part' : "NRF51822",
'family' : "NRF51",
'package' : "QFN48",
'ram' : 16,
'flash' : 256,
'speed' : 16,
'usart' : 1,
'spi' : 1,
'i2c' : 1,
'adc' : 1,
'dac' : 0,
# If using DFU bootloader, it sits at 0x3C000 - 0x40000 (0x40000 is end of flash)
# Might want to change 256 -> 240 in the code below
'saved_code' : {
'address' : ((256 - 3) * 1024),
'page_size' : 1024,
'pages' : 3,
'flash_available' : (256 - 108 - 16) # total flash pages - softdevice - bootloader
}
};
devices = {
# 'LED1' : { 'pin' : 'D22' },
# 'LED2' : { 'pin' : 'D21' },
# 'LED3' : { 'pin' : 'D23' }
};
def get_pins():
pins = pinutils.generate_pins(0,31) # 32 General Purpose I/O Pins.
pinutils.findpin(pins, "PD27", True)["functions"]["XL1"]=0;
pinutils.findpin(pins, "PD26", True)["functions"]["XL2"]=0;
#The boot/reset button will function as a reset button in normal operation. Pin reset on PD21 needs to be enabled on the nRF52832 device for this to work.
return pins
|
Python
| 0.000001
|
@@ -856,18 +856,21 @@
%22EV_
-SERIAL1
+BLUETOOTH
%22,%0A
+#
'de
@@ -896,16 +896,17 @@
%22D15%22,%0A
+#
'defaul
@@ -928,16 +928,17 @@
%22D17%22,%0A
+#
'defaul
@@ -982,18 +982,18 @@
bles' :
-3
1
+5
0,%0A 'bin
@@ -1469,16 +1469,21 @@
(256 - 3
+ - 16
) * 1024
@@ -1565,16 +1565,20 @@
108 - 16
+ - 3
) # tota
@@ -1616,16 +1616,29 @@
otloader
+ - saved code
%0A %7D%0A%7D;%0A
|
e96832e16a6e5746faeaf647c6cd681f1d2f9bca
|
Create break_fracmorse.py
|
break_fracmorse.py
|
break_fracmorse.py
|
Python
| 0.998165
|
@@ -0,0 +1,2687 @@
+# usage: python break_fracmorse.py 'CIPHERTEXTMESSAGE'%0A# ideally you'll want 200 or so characters to reliably decrypt, shorter will often work but not as reliably.%0A%0Aimport random%0Afrom ngram_score import ngram_score%0Aimport re%0Aimport sys%0Afrom pycipher import FracMorse%0A%0A#ctext = FracMorse('PQRSTUVWXYZABCDEFGHIJKLMNO').encipher(%22He has not been returned to sea because of his affection for caregivers.The waitress pointed to the lunch menu, but the oldest living ex-major leaguer had no use for it%22)%0Afitness = ngram_score('fmorse_quadgrams.txt') # load our quadgram model%0A%0A# helper function, converts an integer 0-25 into a character%0Adef i2a(i): return 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'%5Bi%2526%5D%0A%0A# decipher a piece of text using the substitution cipher and a certain key %0Adef sub_decipher(text,key):%0A invkey = %5Bi2a(key.index(i)) for i in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'%5D%0A ret = '' %0A for c in text:%0A if c.isalpha(): ret += invkey%5Bord(c.upper())-ord('A')%5D%0A else: ret += c%0A return ret%0A%0A# This code is just the simple substitution cipher cracking code, it works perfectly for fractionated morse as%0A# long as you use fractioned morse statistics instead of english statistics.%0Adef break_simplesub(ctext,startkey=None):%0A ''' perform hill-climbing with a single start. This function may have to be called many times%0A to break a substitution cipher. '''%0A # make sure ciphertext has all spacing/punc removed and is uppercase%0A ctext = re.sub('%5B%5EA-Z%5D','',ctext.upper())%0A parentkey,parentscore = startkey or list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'),-99e99%0A if not startkey: random.shuffle(parentkey)%0A parentscore = fitness.score(sub_decipher(ctext,parentkey))%0A count = 0%0A while count %3C 1000:%0A a = random.randint(0,25)%0A b = random.randint(0,25)%0A child = parentkey%5B:%5D%0A # swap two characters in the child%0A child%5Ba%5D,child%5Bb%5D = child%5Bb%5D,child%5Ba%5D%0A score = fitness.score(sub_decipher(ctext,child))%0A # if the child was better, replace the parent with it%0A if score %3E parentscore:%0A parentscore, parentkey = score, child%5B:%5D%0A count = 0 # reset the counter%0A count += 1%0A return parentscore, parentkey%0A%0Actext = sys.argv%5B1%5D%0Actext = re.sub(r'%5B%5EA-Z %5D','',ctext.upper())%0Amaxscore, maxkey = break_simplesub(ctext,list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'))%0Aprint str(maxscore),'simplesub key:',''.join(maxkey), 'decrypt: ',sub_decipher(ctext,maxkey)%0Afor i in range(1000):%0A score, key = break_simplesub(ctext)%0A if score %3E maxscore:%0A maxscore,maxkey = score,key%5B:%5D%0A print str(maxscore),'FractionatedMorse key:',''.join(maxkey), 'decrypt: ',FracMorse(maxkey).decipher(ctext)%0A%0A
|
|
decf4b1916a421fe996a31feb131b7ed9e4e3c36
|
Add a simple benchmark script
|
numpy-benchmark-one.py
|
numpy-benchmark-one.py
|
Python
| 0.000001
|
@@ -0,0 +1,441 @@
+import timeit%0A%0Anormal_py_sec = timeit.timeit('sum (x*x for x in xrange(1000))',number = 10000)%0Anaive_np_sec = timeit.timeit('sum(na*na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)%0Agood_np_sec = timeit.timeit('na.dot(na)',setup='import numpy as np; na=np.arange(1000)', number = 10000)%0A%0Aprint(%22Normal Python: %25f sec%22%25normal_py_sec)%0Aprint(%22Naive Numpy : %25f sec%22%25naive_np_sec)%0Aprint(%22Good Numpy : %25f sec%22%25good_np_sec)%0A%0A%0A
|
|
cb517a2cd1dea12fadf4f72147fecf0105cbd717
|
include missing Message
|
palaverapi/message.py
|
palaverapi/message.py
|
Python
| 0.000027
|
@@ -0,0 +1,1385 @@
+# Adapted from https://github.com/kylef/irctk/blob/master/irctk/message.py%0A%0Afrom typing import List, Optional%0A%0A%0Aclass Message:%0A @classmethod%0A def parse(cls, string: str) -%3E 'Message':%0A prefix = None%0A parameters = %5B%5D%0A%0A if string.startswith('@'):%0A _, string = string%5B1:%5D.split(' ', 1)%0A%0A if string.startswith(':'):%0A prefix, string = string.split(' ', 1)%0A prefix = prefix%5B1:%5D%0A%0A if ' ' in string:%0A command, string = string.split(' ', 1)%0A else:%0A command = string%0A string = ''%0A%0A while len(string) != 0:%0A if string%5B0%5D == ':':%0A parameters.append(string%5B1:%5D)%0A string = ''%0A elif ' ' in string:%0A parameter, string = string.split(' ', 1)%0A parameters.append(parameter)%0A else:%0A parameters.append(string)%0A string = ''%0A%0A return cls(prefix, command, parameters)%0A%0A def __init__(%0A self,%0A prefix: str = None,%0A command: str = '',%0A parameters: List%5Bstr%5D = None,%0A ):%0A self.prefix = prefix%0A self.command = command%0A self.parameters = parameters or %5B%5D%0A%0A def get(self, index: int) -%3E Optional%5Bstr%5D:%0A if index %3E= len(self.parameters):%0A return None%0A%0A return self.parameters%5Bindex%5D%0A
|
|
6db9688d7c078c8cf8d1b17305e89bb680a46e53
|
Create lc1001.py
|
LeetCode/lc1001.py
|
LeetCode/lc1001.py
|
Python
| 0.000003
|
@@ -0,0 +1,1698 @@
+xs = %7B%7D%0Ays = %7B%7D%0Afs = %7B%7D%0Ass = %7B%7D%0Agrid = %7B%7D%0A%0Adef checkdic(xs, x):%0A if x not in xs:%0A xs%5Bx%5D = set()%0A%0Adef checkempty(xs, x):%0A if x not in xs:%0A return 1%0A if len(xs%5Bx%5D) == 0:%0A return 1%0A return 0%0A%0Adef remove(x, y):%0A if x not in grid:%0A return%0A if y not in grid%5Bx%5D:%0A return%0A grid%5Bx%5D.remove(y)%0A xs%5Bx%5D.remove((x,y))%0A ys%5By%5D.remove((x,y))%0A fs%5Bx+y%5D.remove((x,y))%0A ss%5Bx-y%5D.remove((x,y))%0A%0Aclass Solution:%0A def gridIllumination(self, N: int, lamps: List%5BList%5Bint%5D%5D, queries: List%5BList%5Bint%5D%5D) -%3E List%5Bint%5D:%0A global xs, ys, fs, ss, grid%0A xs = %7B%7D%0A ys = %7B%7D%0A fs = %7B%7D%0A ss = %7B%7D%0A grid = %7B%7D%0A n = len(lamps)%0A for l in lamps:%0A x = l%5B0%5D%0A y = l%5B1%5D%0A if x not in grid:%0A grid%5Bx%5D = %7By%7D%0A else:%0A grid%5Bx%5D.add(y)%0A checkdic(xs, x)%0A checkdic(ys, y)%0A checkdic(fs, x + y)%0A checkdic(ss, x - y)%0A xs%5Bx%5D.add((x,y))%0A ys%5By%5D.add((x,y))%0A fs%5Bx+y%5D.add((x,y))%0A ss%5Bx-y%5D.add((x,y))%0A ans = %5B%5D%0A for q in queries:%0A x = q%5B0%5D%0A y = q%5B1%5D%0A tmp = checkempty(xs, x) and checkempty(ys, y) and checkempty(fs, x+y) and checkempty(ss, x-y)%0A if tmp:%0A ans.append(0)%0A else:%0A ans.append(1)%0A remove(x, y-1)%0A remove(x, y)%0A remove(x, y+1)%0A remove(x-1, y-1)%0A remove(x-1, y)%0A remove(x-1, y+1)%0A remove(x+1, y-1)%0A remove(x+1, y)%0A remove(x+1, y+1)%0A %0A return ans%0A %0A
|
|
98aee2af9aa3f7dcc75969f1ec3118c40539793e
|
Add clone of Haskell version
|
pandoc-include-code.py
|
pandoc-include-code.py
|
Python
| 0
|
@@ -0,0 +1,2179 @@
+#! /usr/bin/env python3%0A%0Afrom sys import stdout, stderr, exit%0Aimport json%0A%0A%0Adef walktransform(tree):%0A if isinstance(tree, list):%0A return %5Bwalktransform(subtree)%0A for subtree%0A in tree%5D%0A elif not isinstance(tree, dict):%0A exit('Unsupported AST node', type(tree))%0A elif isinstance(tree, dict):%0A if tree.get('t') == 'CodeBlock':%0A (_, _, meta, *_), code = tree.get('c', %5B%5BNone, None, None%5D, ''%5D)%0A if code.strip():%0A breakpoint()%0A exit('Code in block:', code, sep='%5Cn')%0A includes = %5Bv for k, v in meta if k == 'include'%5D%0A if len(includes) %3E 1:%0A exit('Too many includes', *includes)%0A elif not includes:%0A exit('No file to include', meta)%0A else:%0A with open(includes%5B0%5D) as fp:%0A code = fp.read()%0A return %7B%0A 't': 'CodeBlock',%0A 'c': %5B%0A %5B%0A '',%0A %5B%5D,%0A %5B%0A # TODO: file type%0A %5D,%0A %5D,%0A code%0A %5D,%0A %7D%0A%0A # TODO: https://github.com/owickstrom/pandoc-include-code#snippets%0A # TODO: https://github.com/owickstrom/pandoc-include-code#ranges%0A # TODO: https://github.com/owickstrom/pandoc-include-code#dedent%0A # TODO: https://github.com/owickstrom/pandoc-include-code#adding-base-url-for-all-codeblock-links # noqa%0A%0A%0Aif __name__ == '__main__':%0A from argparse import ArgumentParser, FileType%0A argument_parser = ArgumentParser()%0A argument_parser.add_argument('ast', type=FileType('r'), default='-')%0A args = argument_parser.parse_args()%0A%0A ast = json.load(args.ast)%0A if ast%5B'pandoc-api-version'%5D != (1, 22):%0A print('Unsupported Pandoc API version',%0A '.'.join(map(str, ast%5B'pandoc-api-version'%5D)) + '.',%0A 'Use at own risk.',%0A file=stderr)%0A json.dump(walktransform(ast%5B'blocks'%5D), stdout)%0A
|
|
70a6553d9323b3522e492c414b67e76111519368
|
Add file to create all files to school census.
|
scripts/data_download/school_census/create_all_files.py
|
scripts/data_download/school_census/create_all_files.py
|
Python
| 0
|
@@ -0,0 +1,784 @@
+import os%0Aimport commands%0Aimport time%0Aimport logging%0Aimport sys%0A%0Aif len(sys.argv) != 3 or (sys.argv%5B1:%5D%5B0%5D not in %5B'pt', 'en'%5D):%0A print %22ERROR! Use:%5Cn python scripts/data_download/school_census/create_files.py en/pt output_path%5Cn%22%0A exit()%0A%0Alogging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv%5B2%5D,str(sys.argv%5B0%5D.split('/')%5B2%5D) + '-all-data-download.log' )),level=logging.DEBUG)%0A%0Afor year in range(2007, 2016):%0A logging.info(%22python scripts/data_download/higher_education/create_files.py %22+str(sys.argv%5B1%5D)+%22 %22+str(sys.argv%5B2%5D)+%22 %22+ str(year) + %22%5Cn%22)%0A ret = commands.getoutput(%22python scripts/data_download/school_census/create_files.py %22+str(sys.argv%5B1%5D)+%22 %22+str(sys.argv%5B2%5D)+%22 %22+ str(year))%0A logging.info(str(ret) + %22%5CnYear: %22 + str(year) + %22 ok =D%5Cn%5Cn%22)
|
|
e8d596ffc074dea152a2d714e9f68bb9e5c75eaa
|
change serviceClass from CharField to ForeignKey
|
planetstack/core/models/slice.py
|
planetstack/core/models/slice.py
|
import os
from django.db import models
from core.models import PlCoreBase
from core.models import Site
from core.models import User
from core.models import Role
from core.models import DeploymentNetwork
# Create your models here.
class Slice(PlCoreBase):
tenant_id = models.CharField(max_length=200, help_text="Keystone tenant id")
name = models.CharField(unique=True, help_text="The Name of the Slice", max_length=80)
enabled = models.BooleanField(default=True, help_text="Status for this Slice")
omf_friendly = models.BooleanField()
description=models.TextField(blank=True,help_text="High level description of the slice and expected activities", max_length=1024)
slice_url = models.URLField(blank=True, max_length=512)
site = models.ForeignKey(Site, related_name='slices', help_text="The Site this Node belongs too")
network_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum network")
router_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum router id")
subnet_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum subnet id")
SVC_CLASS_CHOICES = (('besteffort', 'Best Effort'), ('silver', 'Silver'), ('gold','Gold'))
serviceClass = models.CharField(verbose_name="Service Class",default="besteffort",help_text="The Service Class of this slice", max_length=30, choices=SVC_CLASS_CHOICES)
def __unicode__(self): return u'%s' % (self.name)
def save(self, *args, **kwds):
if not hasattr(self, 'os_manager'):
from openstack.manager import OpenStackManager
setattr(self, 'os_manager', OpenStackManager())
self.os_manager.save_slice(self)
super(Slice, self).save(*args, **kwds)
def delete(self, *args, **kwds):
if not hasattr(self, 'os_manager'):
from openstack.manager import OpenStackManager
setattr(self, 'os_manager', OpenStackManager())
self.os_manager.delete_slice(self)
super(Slice, self).delete(*args, **kwds)
class SliceMembership(PlCoreBase):
user = models.ForeignKey('User', related_name='slice_memberships')
slice = models.ForeignKey('Slice', related_name='slice_memberships')
role = models.ForeignKey('Role')
def __unicode__(self): return u'%s %s %s' % (self.slice, self.user, self.role)
def save(self, *args, **kwds):
if not hasattr(self, 'os_manager'):
from openstack.manager import OpenStackManager
setattr(self, 'os_manager', OpenStackManager())
if self.os_manager.driver:
self.os_manager.driver.add_user_role(self.user.kuser_id, self.slice.tenant_id, self.role.role_type)
super(SliceMembership, self).save(*args, **kwds)
def delete(self, *args, **kwds):
if not hasattr(self, 'os_manager'):
from openstack.manager import OpenStackManager
setattr(self, 'os_manager', OpenStackManager())
if self.os_manager.driver:
self.os_manager.driver.delete_user_role(self.user.kuser_id, self.slice.tenant_id, self.role.role_type)
super(SliceMembership, self).delete(*args, **kwds)
|
Python
| 0
|
@@ -195,16 +195,53 @@
tNetwork
+%0Afrom core.models import ServiceClass
%0A%0A# Crea
@@ -1196,272 +1196,124 @@
-SVC_CLASS_CHOICES = (('besteffort', 'Best Effort'), ('silver', 'Silver'), ('gold','Gold'))%0A serviceClass = models.CharField(verbose_name=%22
+serviceClass = models.ForeignKey(
Service
-
Class
-%22,default=%22besteffort%22,help_text=%22The
+, related_name = %22slices%22, null=True, default=
Service
-
Class
- of this slice%22, max_length=30, choices=SVC_CLASS_CHOICES)%0A
+.get_default)
%0A%0A
@@ -1393,32 +1393,336 @@
*args, **kwds):%0A
+ if self.serviceClass is None:%0A # We allowed None=True for serviceClass because Django evolution%0A # will fail unless it is allowed. But, we we really don't want it to%0A # ever save None, so fix it up here.%0A self.serviceClass = ServiceClass.get_default()%0A
if not h
|
91b58112f1c83048511fdab09f9aad58351eb991
|
add new package (#23573)
|
var/spack/repos/builtin/packages/py-pycocotools/package.py
|
var/spack/repos/builtin/packages/py-pycocotools/package.py
|
Python
| 0
|
@@ -0,0 +1,792 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyPycocotools(PythonPackage):%0A %22%22%22Official APIs for the MS-COCO dataset.%22%22%22%0A%0A homepage = %22https://github.com/cocodataset/cocoapi%22%0A pypi = %22pycocotools/pycocotools-2.0.2.tar.gz%22%0A%0A version('2.0.2', sha256='24717a12799b4471c2e54aa210d642e6cd4028826a1d49fcc2b0e3497e041f1a')%0A%0A depends_on('python', type=('build', 'link', 'run'))%0A depends_on('py-setuptools@18.0:', type='build')%0A depends_on('py-cython@0.27.3:', type='build')%0A depends_on('py-numpy', type=('build', 'link', 'run'))%0A depends_on('py-matplotlib@2.1.0:', type=('build', 'run'))%0A
|
|
ba169e32c41a1ea531526c80e4652ede4e7621e4
|
Allow type inference tracking with dynamic loading.
|
Wrapping/Generators/Python/itkTypes.py
|
Wrapping/Generators/Python/itkTypes.py
|
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
from typing import Dict, Any
# noinspection PyPep8Naming
class itkCType:
__c_types__: Dict[str, Any] = {}
__c_types_for_dtype__: Dict[str, Any] = {}
def __init__(self, name: str, short_name: str, np_dtype=None):
self.name = name
self.short_name = short_name
self.dtype = np_dtype
itkCType.__c_types__[self.name] = self
if np_dtype:
itkCType.__c_types_for_dtype__[np_dtype] = self
def __del__(self):
try:
del itkCType.__c_types__[self.name]
except KeyError:
pass
def __repr__(self):
return "<itkCType %s>" % self.name
@staticmethod
def GetCType(name):
"""Get the type corresponding to the provided C primitive type name."""
aliases: Dict[str, str] = {
"short": "signed short",
"int": "signed int",
"long": "signed long",
"long long": "signed long long",
}
desired_name: str = aliases.get(name, name)
try:
return itkCType.__c_types__[desired_name]
except KeyError:
return None
@staticmethod
def GetCTypeForDType(np_dtype):
"""Get the type corresponding to the provided numpy.dtype."""
try:
return itkCType.__c_types_for_dtype__[np_dtype]
except KeyError:
return None
@staticmethod
def initialize_c_types_once():
"""
This function is intended to be run only one time
"""
import os
import numpy as np
_F = itkCType("float", "F", np.float32)
_D = itkCType("double", "D", np.float64)
_UC = itkCType("unsigned char", "UC", np.uint8)
_US = itkCType("unsigned short", "US", np.uint16)
_UI = itkCType("unsigned int", "UI", np.uint32)
if os.name == "nt":
_UL = itkCType("unsigned long", "UL", np.uint32)
_SL = itkCType("signed long", "SL", np.int32)
_LD = itkCType("long double", "LD")
else:
_UL = itkCType("unsigned long", "UL", np.uint64)
_SL = itkCType("signed long", "SL", np.int64)
_LD = itkCType("long double", "LD", np.float128)
_ULL = itkCType("unsigned long long", "ULL", np.uint64)
_SC = itkCType("signed char", "SC", np.int8)
_SS = itkCType("signed short", "SS", np.int16)
_SI = itkCType("signed int", "SI", np.int32)
_SLL = itkCType("signed long long", "SLL", np.int64)
_B = itkCType("bool", "B", np.bool)
return _F, _D, _UC, _US, _UI, _UL, _SL, _LD, _ULL, _SC, _SS, _SI, _SLL, _B
(
F,
D,
UC,
US,
UI,
UL,
SL,
LD,
ULL,
SC,
SS,
SI,
SLL,
B,
) = itkCType.initialize_c_types_once()
del Dict, Any
|
Python
| 0
|
@@ -752,38 +752,8 @@
*/%0A%0A
-from typing import Dict, Any%0A%0A
%0A# n
@@ -789,24 +789,128 @@
s itkCType:%0A
+ # import locally to facilitate dynamic loading in itk/__init__.py%0A from typing import Dict, Any%0A%0A
__c_type
@@ -1504,24 +1504,131 @@
Type(name):%0A
+ # import locally to facilitate dynamic loading in itk/__init__.py%0A from typing import Dict%0A%0A
%22%22%22G
@@ -3715,19 +3715,4 @@
e()%0A
-%0Adel Dict, Any%0A
|
638ee09f0f2958a955fbad42368ffc6bb2a2688a
|
Add minimal REST API script based on flask
|
pipeline/scripts/bb_pipeline_api.py
|
pipeline/scripts/bb_pipeline_api.py
|
Python
| 0
|
@@ -0,0 +1,1462 @@
+#!/usr/bin/env python3%0A%0Afrom tempfile import NamedTemporaryFile%0Aimport json%0Afrom threading import Lock%0A%0Aimport numpy as np%0Afrom flask import Flask, request%0Afrom scipy.misc import imread%0Afrom pipeline import Pipeline%0Afrom pipeline.objects import Image, Candidates, Saliencies, IDs%0Afrom pipeline.pipeline import get_auto_config%0A%0Aapp = Flask(__name__)%0A%0A%0Adef init_pipeline():%0A pipeline = Pipeline(%5BImage%5D,%0A %5BCandidates, Saliencies, IDs%5D,%0A **get_auto_config())%0A return pipeline%0A%0Apipeline = init_pipeline()%0Apipeline_lock = Lock()%0A%0A%0Adef jsonify(instance):%0A if isinstance(instance, np.ndarray):%0A return instance.tolist()%0A return instance%0A%0A%0Adef process_image(image):%0A with pipeline_lock:%0A results = pipeline(%5Bimage%5D)%0A return json.dumps(dict(%5B(k.__name__, jsonify(v)) for k, v in%0A results.items()%5D), ensure_ascii=False)%0A%0A%0A@app.route('/process', methods=%5B'POST'%5D)%0Adef api_message():%0A print('Retrieving process request')%0A if request.headers%5B'Content-Type'%5D == 'application/octet-stream':%0A try:%0A with NamedTemporaryFile(delete=True) as f:%0A f.write(request.data)%0A image = imread(f)%0A return process_image(image)%0A except Exception as err:%0A return '%7B%7D'.format(err)%0A else:%0A return %22415 Unsupported Media Type%22%0A%0Aif __name__ == '__main__':%0A app.run(host='0.0.0.0', port=10000)%0A
|
|
427caaa998ea03bf80a00aaf90833eb910cf909d
|
Add migration file
|
webapp/apps/taxbrain/migrations/0061_auto_20171220_1859.py
|
webapp/apps/taxbrain/migrations/0061_auto_20171220_1859.py
|
Python
| 0.000001
|
@@ -0,0 +1,1125 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Aimport webapp.apps.taxbrain.models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('taxbrain', '0060_auto_20171219_2153'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='taxsaveinputs',%0A name='ID_RealEstate_crt',%0A field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),%0A ),%0A migrations.AddField(%0A model_name='taxsaveinputs',%0A name='ID_RealEstate_crt_cpi',%0A field=models.NullBooleanField(default=None),%0A ),%0A migrations.AddField(%0A model_name='taxsaveinputs',%0A name='ID_StateLocalTax_crt',%0A field=webapp.apps.taxbrain.models.CommaSeparatedField(default=None, max_length=200, null=True, blank=True),%0A ),%0A migrations.AddField(%0A model_name='taxsaveinputs',%0A name='ID_StateLocalTax_crt_cpi',%0A field=models.NullBooleanField(default=None),%0A ),%0A %5D%0A
|
|
80d75bad57c8be1b08fbb2129bb0511c633446e2
|
Create CertPaIT.py
|
plugins/feeds/public/CertPaIT.py
|
plugins/feeds/public/CertPaIT.py
|
Python
| 0
|
@@ -0,0 +1,1131 @@
+import logging%0Afrom datetime import datetime, timedelta%0Afrom core.observables import Hash%0Afrom core.feed import Feed%0Afrom core.errors import ObservableValidationError%0A%0Aclass CertPaIt(Feed):%0A%0A default_values = %7B%0A %22frequency%22: timedelta(minutes=30),%0A %22name%22: %22CertPaIT%22,%0A %22source%22 : %22https://infosec.cert-pa.it/analyze/submission.rss%22,%0A %22description%22: %22This feed contains data from infosec.cert-pa.it%22,%0A %7D%0A%0A def update(self):%0A for item in self.update_xml('item', %5B%22title%22, %22link%22%5D):%0A self.analyze(item)%0A%0A def analyze(self, item):%0A md5 = item%5B'title'%5D.replace(%22MD5: %22, %22%22)%0A context = %7B%7D%0A context%5B'date_added'%5D = datetime.now()%0A context%5B'source'%5D = self.name%0A context%5B'url'%5D = item%5B'link'%5D%0A%0A try:%0A if md5:%0A hash_data = Hash.get_or_create(value=md5)%0A if hash_data.new is True or self.name not in hash_data.sources:%0A hash_data.add_context(context)%0A hash_data.add_source(self.name)%0A%0A except ObservableValidationError as e:%0A logging.error(e)%0A
|
|
90a467a849bb05cd0922ca0808279bf009657150
|
Create reverse_words.py
|
Google_Code_Jam/2010_Africa/Qualification_Round/B/reverse_words.py
|
Google_Code_Jam/2010_Africa/Qualification_Round/B/reverse_words.py
|
Python
| 0.999848
|
@@ -0,0 +1,690 @@
+#!/usr/bin/python -tt%0A%0A%22%22%22Solves problem B from Google Code Jam Qualification Round Africa 2010%0A(https://code.google.com/codejam/contest/351101/dashboard#s=p1)%0A%22Reverse Words%22%0A%22%22%22%0A%0Aimport sys%0A%0Adef main():%0A %22%22%22Reads problem data from stdin and prints answers to stdout.%0A%0A Args:%0A None%0A%0A Returns:%0A Nothing%0A %22%22%22%0A%0A lines = sys.stdin.read().splitlines()%0A num_test_cases = int(lines%5B0%5D)%0A test_cases = lines%5B1:%5D%0A assert len(test_cases) == num_test_cases%0A i = 1%0A for test_case in test_cases:%0A words = test_case.split()%0A words.reverse()%0A print 'Case #%25d:' %25 (i,), ' '.join(words)%0A i += 1%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
3155a8ab725c1b1535a99229f31008587ceb3e64
|
Add continuous finite test
|
HARK/ConsumptionSaving/tests/test_ConsRiskyContribModel.py
|
HARK/ConsumptionSaving/tests/test_ConsRiskyContribModel.py
|
Python
| 0.000178
|
@@ -0,0 +1,2008 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Tue Jan 26 10:06:51 2021%0A%0A@author: Mateo%0A%22%22%22%0A%0Aimport unittest%0Afrom copy import copy%0Aimport numpy as np%0Afrom HARK.ConsumptionSaving.ConsRiskyAssetModel import (%0A RiskyContribConsumerType,%0A init_riskyContrib%0A)%0A%0Aclass test_(unittest.TestCase):%0A %0A def setUp(self):%0A %0A # A set of finite parameters%0A self.par_finite = init_riskyContrib.copy()%0A%0A # Four period model%0A self.par_finite%5B'PermGroFac'%5D = %5B2.0, 1.0, 0.1, 1.0%5D%0A self.par_finite%5B'PermShkStd'%5D = %5B0.1, 0.1, 0.0, 0.0%5D%0A self.par_finite%5B'TranShkStd'%5D = %5B0.2, 0.2, 0.0, 0.0%5D%0A self.par_finite%5B'AdjustPrb'%5D = %5B0.5, 0.5, 1.0, 1.0%5D%0A self.par_finite%5B'tau'%5D = %5B0.1, 0.1, 0.0, 0.0%5D%0A self.par_finite%5B'LivPrb'%5D = %5B1.0, 1.0, 1.0, 1.0%5D%0A self.par_finite%5B'T_cycle'%5D = 4%0A self.par_finite%5B'T_retire'%5D = 0%0A self.par_finite%5B'T_age'%5D = 4%0A %0A # Adjust discounting and returns distribution so that they make sense in a %0A # 4-period model%0A self.par_finite%5B'DiscFac'%5D = 0.95**15%0A self.par_finite%5B'Rfree'%5D = 1.03**15%0A self.par_finite%5B'RiskyAvg'%5D = 1.08**15 # Average return of the risky asset%0A self.par_finite%5B'RiskyStd'%5D = 0.20*np.sqrt(15) # Standard deviation of (log) risky returns%0A %0A def test_finite_cont_share(self):%0A %0A cont_params = copy(self.par_finite)%0A cont_params%5B'DiscreteShareBool'%5D = False%0A cont_params%5B'vFuncBool'%5D = False%0A %0A fin_cont_agent = RiskyContribConsumerType(**cont_params)%0A fin_cont_agent.solve()%0A %0A self.assertAlmostEqual(%0A fin_cont_agent.solution%5B0%5D.stageSols%5B'Reb'%5D.DFuncAdj(3,4), -0.87757204%0A )%0A self.assertAlmostEqual(%0A fin_cont_agent.solution%5B0%5D.stageSols%5B'Sha'%5D.ShareFuncAdj(5,0.1), 0.10846904%0A )%0A self.assertAlmostEqual(%0A fin_cont_agent.solution%5B0%5D.stageSols%5B'Cns'%5D.cFunc(3,4,0.1), 2.46055802%0A )%0A
|
|
0f6961c10def1f1343c6c31d117e5ca87cefd4b7
|
add openvas_vulns migration
|
alembic/versions/506c8e35ba7c_create_openvas_vuln_table.py
|
alembic/versions/506c8e35ba7c_create_openvas_vuln_table.py
|
Python
| 0
|
@@ -0,0 +1,740 @@
+%22%22%22create openvas_vuln table%0A%0ARevision ID: 506c8e35ba7c%0ARevises: 13b7c3d4c802%0ACreate Date: 2017-07-21 12:19:35.711173%0A%0A%22%22%22%0Afrom alembic import op%0Aimport sqlalchemy as sa%0Aimport datetime%0A%0A%0Adef _get_date():%0A return datetime.datetime.now()%0A%0A# revision identifiers, used by Alembic.%0Arevision = '506c8e35ba7c'%0Adown_revision = '13b7c3d4c802'%0Abranch_labels = None%0Adepends_on = None%0A%0A%0Adef upgrade():%0A op.create_table('openvas_vulns',%0A sa.Column('id', sa.Integer, primary_key=True, nullable=False),%0A sa.Column('name', sa.Text, unique=True, nullable=False),%0A sa.Column('created_at', sa.TIMESTAMP(timezone=False), default=_get_date))%0A%0A%0Adef downgrade():%0A op.drop_table('openvas_vulns')%0A
|
|
6ce0d934cfe8b9e93a833ff1d31915ffd14c643d
|
add new package (#25526)
|
var/spack/repos/builtin/packages/py-pydantic/package.py
|
var/spack/repos/builtin/packages/py-pydantic/package.py
|
Python
| 0
|
@@ -0,0 +1,789 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyPydantic(PythonPackage):%0A %22%22%22Data validation and settings management using Python type hinting.%22%22%22%0A%0A homepage = %22https://github.com/samuelcolvin/pydantic%22%0A pypi = %22pydantic/pydantic-1.8.2.tar.gz%22%0A%0A version('1.8.2', sha256='26464e57ccaafe72b7ad156fdaa4e9b9ef051f69e175dbbb463283000c05ab7b')%0A%0A depends_on('python@3.6.1:', type=('build', 'run'))%0A depends_on('py-setuptools', type='build')%0A depends_on('py-dataclasses@0.6:', when='%5Epython@:3.6', type=('build', 'run'))%0A depends_on('py-typing-extensions@3.7.4.3:', type=('build', 'run'))%0A
|
|
f363864f7f6ad9da45cb3053816d500838821a27
|
add new package (#27093)
|
var/spack/repos/builtin/packages/r-posterior/package.py
|
var/spack/repos/builtin/packages/r-posterior/package.py
|
Python
| 0
|
@@ -0,0 +1,1685 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass RPosterior(RPackage):%0A %22%22%22Tools for Working with Posterior Distributions.%0A%0A Provides useful tools for both users and developers of packages for%0A fitting Bayesian models or working with output from Bayesian models. The%0A primary goals of the package are to: (a) Efficiently convert between many%0A different useful formats of draws (samples) from posterior or prior%0A distributions. (b) Provide consistent methods for operations commonly%0A performed on draws, for example, subsetting, binding, or mutating draws.%0A (c) Provide various summaries of draws in convenient formats. (d) Provide%0A lightweight implementations of state of the art posterior inference%0A diagnostics. References: Vehtari et al. (2021) %3Cdoi:10.1214/20-BA1221%3E.%22%22%22%0A%0A homepage = %22https://mc-stan.org/posterior/%22%0A cran = %22posterior%22%0A%0A version('1.1.0', sha256='eff6262dbcc1bf18337f535b0c75ba2fe360322e8b170c466e24ed3ee76cf4d2')%0A%0A depends_on('r@3.2.0:', type=('build', 'run'))%0A depends_on('r-abind', type=('build', 'run'))%0A depends_on('r-checkmate', type=('build', 'run'))%0A depends_on('r-rlang@0.4.7:', type=('build', 'run'))%0A depends_on('r-tibble@3.0.0:', type=('build', 'run'))%0A depends_on('r-vctrs', type=('build', 'run'))%0A depends_on('r-tensora', type=('build', 'run'))%0A depends_on('r-pillar', type=('build', 'run'))%0A depends_on('r-distributional', type=('build', 'run'))%0A depends_on('r-matrixstats', type=('build', 'run'))%0A
|
|
1596d091183d89c703e67555e81f24722dc0d8a2
|
add import script for Chelmsford
|
polling_stations/apps/data_collection/management/commands/import_chelmsford.py
|
polling_stations/apps/data_collection/management/commands/import_chelmsford.py
|
Python
| 0
|
@@ -0,0 +1,374 @@
+from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter%0A%0Aclass Command(BaseXpressDemocracyClubCsvImporter):%0A council_id = 'E07000070'%0A addresses_name = 'Democracy_Club__04May2017 (1).tsv'%0A stations_name = 'Democracy_Club__04May2017 (1).tsv'%0A elections = %5B'local.essex.2017-05-04'%5D%0A csv_delimiter = '%5Ct'%0A csv_encoding = 'latin-1'%0A
|
|
94e83a48d3700cdc7c9bb6bd9a14860d2665c655
|
Add custom roster module
|
_modules/roster.py
|
_modules/roster.py
|
Python
| 0
|
@@ -0,0 +1,833 @@
+# import python libraries%0Aimport logging%0A%0A# import salt libraries%0Aimport salt.utils.files%0Aimport salt.utils.yaml%0A%0Alog = logging.getLogger(__name__)%0A%0Adef remove(roster, name):%0A '''%0A remove an entry from the salt-ssh roster%0A '''%0A with salt.utils.files.fopen(roster, 'r') as conf:%0A roster_txt = conf.read()%0A roster_yaml = salt.utils.yaml.safe_load(roster_txt)%0A try:%0A del roster_yaml%5Bname%5D%0A except KeyError:%0A log.error('%7B0%7D does not exist in roster file %7B1%7D'.format(name, roster))%0A return False%0A%0A try:%0A with salt.utils.files.fopen(roster, 'w+') as conf:%0A salt.utils.yaml.safe_dump(roster_yaml, conf, default_flow_style=False)%0A except (IOError, OSError):%0A log.error('Unable to delete %7B0%7D from roster file %7B1%7D'.format(name, roster))%0A return False%0A%0A
|
|
8a043a2d3a9517c5eb84aea3e9916419f6136e23
|
Add tests for IndexAbstractor.
|
test/lib/indexabstractor.py
|
test/lib/indexabstractor.py
|
Python
| 0
|
@@ -0,0 +1,2324 @@
+%22%22%22 The PyUnit test framework for the indexabstractor. %22%22%22%0A%0Aimport unittest%0A%0Afrom lib import parser%0Afrom lib.indexabstractor import *%0A%0Aclass indexTestCase(unittest.TestCase):%0A def setUp(self):%0A self.sumo = parser.Ontology('data/Merge.kif', name='SUMO')%0A self.kif = parser.kifparse(self.sumo)%0A self.indexabstractor = IndexAbstractor()%0A%0A def test0Normalize(self):%0A self.assertEqual(normalize('t.erm '), 'term')%0A self.assertEqual(normalize(' TeRM '), 'term')%0A self.assertNotEqual(normalize('t erm '), 'term')%0A%0A def test1BuildIndex(self):%0A self.indexabstractor.update_index(self.kif)%0A self.assertEqual(self.indexabstractor.ontologies, %7Bself.sumo%7D)%0A self.assertEqual(self.indexabstractor.root, self.kif)%0A assert self.sumo in self.indexabstractor.index%0A%0A def test2Search(self):%0A self.maxDiff = None%0A self.indexabstractor.update_index(self.kif)%0A self.assertEqual(self.indexabstractor.search('Plasma'),%0A self.indexabstractor.search('plasma'))%0A self.assertEqual(self.indexabstractor.search('ValidDeductiveArgument'),%0A self.indexabstractor.search(' valIddedUctiVeargument '))%0A self.assertNotEqual(self.indexabstractor.search('ValidDeductiveArgument'),%0A self.indexabstractor.search('InvalidDeductiveArgument'))%0A result = self.indexabstractor.search(' ContentbearingObJect')%0A assert self.sumo in result%0A definition = result%5Bself.sumo%5D%0A self.assertEqual(sorted(definition),%0A sorted(%5B'( relatedInternalConcept ContentBearingObject containsInformation )',%0A '( subclass ContentBearingObject CorpuscularObject )',%0A '( subclass ContentBearingObject ContentBearingPhysical )',%0A '( documentation ContentBearingObject EnglishLanguage %22Any &%25SelfConnectedObject that expressescontent. This content may be a &%25Proposition, e.g. when the &%25ContentBearingObjectis a &%25Sentence or &%25Text, or it may be a representation of an abstract orphysical object, as with an &%25Icon, a &%25Word or a &%25Phrase.%22 )'%5D))%0A%0AindexTestSuit = unittest.makeSuite(indexTestCase, 'test')%0A%0Aif __name__ == %22__main__%22:%0A runner = unittest.TextTestRunner()%0A runner.run(indexTestSuit)%0A
|
|
ee1358dd0e08e6a21d6431ab37858ef8a6c774dd
|
Add `[link to issue]` support to Jira-Github integration (#21376)
|
Utils/github_workflow_scripts/jira_integration_scripts/link_pr_to_jira_issue.py
|
Utils/github_workflow_scripts/jira_integration_scripts/link_pr_to_jira_issue.py
|
import argparse
import re
import sys
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
JIRA_HOST_FOR_REGEX = "https:\/\/jira-hq.paloaltonetworks.local\/browse\/"
JIRA_KEY_REGEX = "([A-Z][A-Z0-9]+-[0-9]+))\s?"
JIRA_FIXED_ISSUE_REGEX = f"[fF]ixes:\s?({JIRA_HOST_FOR_REGEX}{JIRA_KEY_REGEX}"
JIRA_RELATED_ISSUE_REGEX = f"[rR]elates:\s?({JIRA_HOST_FOR_REGEX}{JIRA_KEY_REGEX}"
GENERIC_WEBHOOK_NAME = "GenericWebhook_link_pr_to_jira"
def arguments_handler():
""" Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description='Linking GitHub PR to Jira Issue.')
parser.add_argument('-l', '--pr_link', help='The PR url.')
parser.add_argument('-n', '--pr_num', help='The PR number.')
parser.add_argument('-t', '--pr_title', help='The PR Title.')
parser.add_argument('-b', '--pr_body', help='The content of the PR description.')
parser.add_argument('-m', '--is_merged', help='Boolean. Whether the PR was merged or not.')
parser.add_argument('-u', '--username', help='The instance username.')
parser.add_argument('-s', '--password', help='The instance password.')
parser.add_argument('-url', '--url', help='The instance url.')
return parser.parse_args()
def find_fixed_issue_in_body(body_text, is_merged):
"""
Getting the issues url in the PR's body as part of `fixing: <issue>` format.
Return list of issues found: [{"link": link, "id": issue_id}]
"""
fixed_jira_issues = re.findall(JIRA_FIXED_ISSUE_REGEX, body_text)
related_jira_issue = re.findall(JIRA_RELATED_ISSUE_REGEX, body_text)
# If a PR is not merged, we just add the pr link to all the linked issues using Gold.
# If the PR is merged, we only send issues that should be closed by it.
# Assuming If the PR was merged, all the related links were fetched when the PR last edited.
fixed_issue = [{"link": link, "id": issue_id} for link, issue_id in fixed_jira_issues]
related_issue = []
if not is_merged:
related_issue = [{"link": link, "id": issue_id} for link, issue_id in related_jira_issue]
return fixed_issue + related_issue
def trigger_generic_webhook(options):
pr_link = options.pr_link
pr_title = options.pr_title
pr_body = options.pr_body
is_merged = options.is_merged
pr_num = options.pr_num
username = options.username
password = options.password
instance_url = options.url
print(f"Detected Pr: {pr_title=}, {pr_link=}, {pr_body=}")
issues_in_pr = find_fixed_issue_in_body(pr_body, is_merged)
if not issues_in_pr:
print("No linked issues were found in PR. Make sure you correctly linked issues.")
return
print(f"found issues in PR: {issues_in_pr}")
body = {
"name": GENERIC_WEBHOOK_NAME,
"raw_json": {
"PullRequestNum": pr_num,
"closeIssue": is_merged, # whether to close the fixed issue in Jira
"PullRequestLink": pr_link, # will be used to add to jira issue's fields
"PullRequestTitle": f"{pr_title} ({pr_link})", # will be used in comment of attaching jira issue.
"JiraIssues": issues_in_pr
},
}
print(body)
# post to Content Gold
res = requests.post(instance_url, json=body, auth=(username, password))
if res.status_code != 200:
print(
f"Trigger playbook for Linking GitHub PR to Jira Issue failed. Post request to Content"
f" Gold has status code of {res.status_code}")
sys.exit(1)
res_json = res.json()
if res_json and isinstance(res_json, list):
res_json_response_data = res.json()[0]
if res_json_response_data:
investigation_id = res_json_response_data.get("id")
print(f'{investigation_id=}')
def main():
options = arguments_handler()
trigger_generic_webhook(options)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -61,16 +61,48 @@
urllib3%0A
+from blessings import Terminal%0A%0A
%0Aurllib3
@@ -320,24 +320,26 @@
%5BfF%5Dixes:%5Cs?
+.*
(%7BJIRA_HOST_
@@ -409,16 +409,18 @@
ates:%5Cs?
+.*
(%7BJIRA_H
@@ -2713,15 +2713,53 @@
-print(%22
+t = Terminal()%0A print(f%22%7Bt.red%7DERROR:
No l
@@ -2827,27 +2827,38 @@
issues.
+%7Bt.normal%7D
%22)%0A
+%0A
return%0A%0A
@@ -2849,22 +2849,27 @@
-return
+sys.exit(1)
%0A%0A pr
|
58de77d09564a43ae1934e2cee4543bc028c25c6
|
Make android bot configuration more consistent.
|
build/android/pylib/device_settings.py
|
build/android/pylib/device_settings.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from pylib import content_settings
def ConfigureContentSettingsDict(device, desired_settings):
"""Configures device content setings from a dictionary.
Many settings are documented at:
http://developer.android.com/reference/android/provider/Settings.Global.html
http://developer.android.com/reference/android/provider/Settings.Secure.html
http://developer.android.com/reference/android/provider/Settings.System.html
Many others are undocumented.
Args:
device: A DeviceUtils instance for the device to configure.
desired_settings: A dict of {table: {key: value}} for all
settings to configure.
"""
try:
sdk_version = int(device.old_interface.system_properties[
'ro.build.version.sdk'])
except ValueError:
logging.error('Skipping content settings configuration, unknown sdk %s',
device.old_interface.system_properties[
'ro.build.version.sdk'])
return
if sdk_version < 16:
logging.error('Skipping content settings configuration due to outdated sdk')
return
for table, key_value in sorted(desired_settings.iteritems()):
settings = content_settings.ContentSettings(table, device)
for key, value in key_value.iteritems():
settings[key] = value
logging.info('\n%s %s', table, (80 - len(table)) * '-')
for key, value in sorted(settings.iteritems()):
logging.info('\t%s: %s', key, value)
DETERMINISTIC_DEVICE_SETTINGS = {
'com.google.settings/partner': {
'use_location_for_services': 1,
},
'settings/global': {
# Disable "auto time" and "auto time zone" to avoid network-provided time
# to overwrite the device's datetime and timezone synchronized from host
# when running tests later. See b/6569849.
'auto_time': 0,
'auto_time_zone': 0,
'stay_on_while_plugged_in': 3,
'verifier_verify_adb_installs' : 0,
},
'settings/secure': {
# Ensure that we never get random dialogs like "Unfortunately the process
# android.process.acore has stopped", which steal the focus, and make our
# automation fail (because the dialog steals the focus then mistakenly
# receives the injected user input events).
'anr_show_background': 0,
# Ensure Geolocation is enabled and allowed for tests.
'location_providers_allowed': 'gps,network',
'lockscreen.disabled': 1,
'screensaver_enabled': 0,
},
'settings/system': {
# Don't want devices to accidentally rotate the screen as that could
# affect performance measurements.
'accelerometer_rotation': 0,
'lockscreen.disabled': 1,
# Turn down brightness and disable auto-adjust so that devices run cooler.
'screen_brightness': 5,
'screen_brightness_mode': 0,
'user_rotation': 0,
},
}
NETWORK_DISABLED_SETTINGS = {
'settings/global': {
'airplane_mode_on': 1,
},
}
|
Python
| 0.000423
|
@@ -1666,24 +1666,58 @@
partner': %7B%0A
+ 'network_location_opt_in': 0,%0A
'use_loc
@@ -1764,24 +1764,56 @@
/global': %7B%0A
+ 'assisted_gps_enabled': 0,%0A%0A
# Disabl
@@ -2044,24 +2044,348 @@
_zone': 0,%0A%0A
+ 'development_settings_enabled': 1,%0A%0A # Flag for allowing ActivityManagerService to send ACTION_APP_ERROR intents%0A # on application crashes and ANRs. If this is disabled, the crash/ANR dialog%0A # will never display the %22Report%22 button.%0A # Type: int ( 0 = disallow, 1 = allow )%0A 'send_action_app_error': 0,%0A%0A
'stay_on
@@ -2472,24 +2472,117 @@
/secure': %7B%0A
+ 'allowed_geolocation_origins':%0A 'http://www.google.co.uk http://www.google.com',%0A%0A
# Ensure
@@ -3504,19 +3504,37 @@
ode_on': 1,%0A
+ 'wifi_on': 0,%0A
%7D,%0A%7D%0A
|
859d1031bc61cd4466953cbc7a5e282abff35e50
|
Create database.py
|
database.py
|
database.py
|
Python
| 0.000001
|
@@ -0,0 +1 @@
+%0A
|
|
c8d57138240e87c802b84cf0b2b01efd01c80e41
|
Create solution.py
|
hackerrank/algorithms/implementation/easy/angry_professor/py/solution.py
|
hackerrank/algorithms/implementation/easy/angry_professor/py/solution.py
|
Python
| 0.000018
|
@@ -0,0 +1,464 @@
+#!/bin/python3%0A%0Aimport sys%0A%0Adef isClassCancelled(arrivalTimes, cancellationThreshold):%0A count = 0%0A %0A for arrivalTime in arrivalTimes: %0A if arrivalTime %3C= 0:%0A count += 1%0A %0A return count %3C cancellationThreshold%0A%0At = int(input())%0A%0Afor a0 in range(t):%0A n, k = map(int, input().split())%0A a = tuple(map(int, input().split()))%0A %0A if isClassCancelled(a, k):%0A print('YES')%0A else:%0A print('NO')%0A
|
|
02183bdcd1b3e4109568f5077a6074573bbd8bf9
|
Add send_unsent.py.
|
send_unsent.py
|
send_unsent.py
|
Python
| 0
|
@@ -0,0 +1,636 @@
+#!/usr/bin/env python2%0A# -*- coding: utf8 -*-%0Aimport smtplib%0Aimport enseigner.model as model%0Aimport enseigner.emails as emails%0A%0Amails = model.Mail.all_unsent()%0A%0Ayesno = raw_input(u'Envoyer %25d mails ? ' %25 len(mails))%0A%0Aif yesno != 'yes':%0A exit(0)%0A%0Asender = emails.Sender()%0Aerrors = %5B%5D%0Afor mail in mails:%0A try:%0A sender.send(mail.recipient, mail.subject, mail.content)%0A except smtplib.SMTPException as e:%0A errors.append((mail, e))%0A else:%0A mail.set_sent()%0A%0Aprint(repr(errors))%0Awith open('/tmp/enseigner_errors.txt', 'a') as fd:%0A for error in errors:%0A fd.write('%5Cn%5Cn')%0A fd.write(repr(error))%0A
|
|
eee8b3e96f7b0c9f24e7c43483bb6d74bd8a490a
|
add proto
|
servy/proto.py
|
servy/proto.py
|
Python
| 0.000006
|
@@ -0,0 +1,592 @@
+import json%0A%0Aclass Response(object):%0A @classmethod%0A def encode(cls, content):%0A return json.dumps(content)%0A%0A @classmethod%0A def decode(cls, content):%0A return json.loads(content)%0A%0A%0Aclass Request(object):%0A @classmethod%0A def encode(cls, proc, args, kw):%0A return json.dumps(%7B%0A 'proc': proc,%0A 'args': args,%0A 'kw': kw,%0A %7D)%0A%0A @classmethod%0A def decode(cls, content):%0A content = json.loads(content)%0A return (%0A content%5B'proc'%5D,%0A content%5B'args'%5D,%0A content%5B'kw'%5D,%0A )%0A
|
|
6f8d2e724f4aafb6b8295b8b0a1f915d5f21fa38
|
fix script
|
tests/scripts/makereport.py
|
tests/scripts/makereport.py
|
# accepts 0,1, or 2 arguments. If a string starting with a number is handed in, it is assumed to be a subdirectory of the current directory to run on. If not specified, the newest build is used. Any other string is taken as the branch name for this test (or treated as mainline). Order of the arguments does not matter.
# for questions: Timo Heister
import xml.etree.ElementTree as ET
import glob
import sys
from datetime import datetime
import subprocess
class Group:
def __init__(self, name):
self.name = name
self.n_tests = 0
self.n_fail = 0
self.fail = []
self.fail_text = {}
class Revision:
def __init__(self):
self.groups = {}
self.number = -1
self.name = ''
self.n_tests = 0
self.n_fail = 0
branch=''
args=sys.argv
args.pop(0)
dirname=""
while len(args)>0:
if args[0].startswith("20"): #I hope this script is not used in the year 2100
dirname=args[0].replace('/','')
else:
branch=args[0].replace('/','')+'/'
args.pop(0)
if len(glob.glob(dirname+'/Update.xml'))>0:
#new format
tree = ET.parse(dirname+'/Update.xml')
name = tree.getroot().find('BuildName').text
number = tree.getroot().find('Revision').text
date = datetime.strptime(dirname,"%Y%m%d-%H%M")
else:
#old format
tree = ET.parse(dirname+'/Notes.xml')
name = tree.getroot().attrib['BuildName']
number = name.split('-')[-1]
number = number[1:]
date = datetime.strptime(dirname,"%Y%m%d-%H%M")
header = "Revision: %s"%number + "\n"
header += "Date: %s"%(date.strftime("%Y %j %F %U-%w")) + '\n'
id = subprocess.check_output(["id","-un"])+'@'+subprocess.check_output(["hostname"])
id=id.replace('\n','')
header += "Id: %s"%id
#now Test.xml:
tree = ET.parse(dirname+'/Test.xml')
root = tree.getroot()
testing = root.find('Testing')
tests={}
for test in testing.findall("Test"):
status = test.attrib['Status']
fail=False
if status=="failed": fail=True
name = test.find('Name').text
group = name.split('/')[0]
if fail:
line = "%s 3 %s%s"%(date,branch,name)
else:
line = "%s + %s%s"%(date,branch,name)
if group not in tests: tests[group]=[]
tests[group].append( line )
for g in sorted(tests):
group = tests[g]
print header
for l in group:
print l
|
Python
| 0.000003
|
@@ -1042,16 +1042,133 @@
pop(0)%0A%0A
+if dirname==%22%22:%0A n=glob.glob(%22*/Build.xml%22)%0A n.sort(reverse=True)%0A dirname = n%5B0%5D.replace('/Build.xml','')%0A%0A
if len(g
@@ -1633,16 +1633,17 @@
%25H%25M%22)%0A%0A
+%0A
header =
|
afb6b73c92227a735f5ec8d97fba5ad959871202
|
Check text instead of data-page attribute
|
test/browser_testing/features/pages/closing_disclosure.py
|
test/browser_testing/features/pages/closing_disclosure.py
|
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium import webdriver
from pages.base import Base
import re
import time
class ClosingDisclosure(Base):
def __init__(self, logger, directory, base_url=r'http://localhost/',
driver=None, driver_wait=10, delay_secs=0):
super(ClosingDisclosure, self).__init__(logger, directory, base_url,
driver, driver_wait,
delay_secs)
# check that element(s) is on the page
# @return: element or elements if there are many
def _element_found(self, css_selector):
try:
elements = self.driver.find_elements_by_css_selector(css_selector)
if len(elements) == 1:
return elements[0]
else:
return elements
except NoSuchElementException:
return []
def tab_is_found(self, tab_name):
tab_css_selector = '.tab-link__%s' % tab_name.lower()
if self._element_found(tab_css_selector):
return True
return False
def content_image_is_loaded(self):
image_css_selector = '.image-map_image'
images = self._element_found(image_css_selector)
for image in images:
if not image.is_displayed():
next
if image:
size = image.size
if size['width'] > 200 and size['height'] > 200:
return True
return False
def resize_to_mobile_size(self):
self.driver.set_window_size(360, 640)
def expandable_explainers_are_loaded(self, tab_name):
parent_css_selector = 'div.expandable__form-explainer-%s'\
% tab_name.lower()
elements = self._element_found(parent_css_selector)
good_elements = 0
for element in elements:
content_css_selector = '#%s .expandable_content'\
% element.get_attribute('id')
if element.is_displayed() and self._expandable_explainer_content_is_loaded(content_css_selector, element):
good_elements += 1
return good_elements
def _expandable_explainer_content_is_loaded(self, css_selector, parent_element):
element = self._element_found(css_selector)
if not element:
return False
original_visibility = element.is_displayed()
ActionChains(self.driver).move_to_element(parent_element).perform()
try:
parent_element.click()
# time.sleep(1)
new_visibility = element.is_displayed()
return original_visibility != new_visibility
except WebDriverException:
# sometimes elements don't have content, just the header
# in that case the header elements are span and not buttons
# and so are not clickable
return True
def _click_tab(self, tab_name):
css_selector = '.tab-link__%s' % tab_name.lower()
self.driver.find_element_by_css_selector(css_selector).click()
def _element_size(self, css_selector):
element = self._element_found(css_selector)
item = element
if type(element) is list:
for el in element:
if el.is_displayed():
item = el
if item and item.size:
return item.size
return {width: 0, height: 0}
def hover_an_overlay(self):
bad_elements = 0
elements = self._element_found('a.image-map_overlay')
time.sleep(1)
for element in elements:
if element.is_displayed():
ActionChains(self.driver).move_to_element(element).perform()
anchor = element.get_attribute('href')
anchor = re.sub('^[^#]*', '', anchor)
explainer_element = self._element_found(anchor)
if not explainer_element:
bad_elements += 1
else:
classes = filter(lambda x: x,
explainer_element.get_attribute('class').split(' '))
if 'has-attention' not in classes:
bad_elements += 1
return bad_elements
def click_page(self, page_num):
elements = self.driver.find_elements_by_class_name('form-explainer_page-link')
for element in elements:
if element.get_attribute('data-page') == page_num:
script = "arguments[0].scrollIntoView(true);"
self.driver.execute_script(script, element)
element.click()
break
def current_page(self):
element = self.driver.find_element_by_class_name('current-page')
return element.get_attribute('data-page')
def click_next_page(self, current_num):
element_css = "#explain_page-" + current_num + " .next.btn"
msg = "Element " + element_css + " NOT found!"
element = WebDriverWait(self.driver, self.driver_wait)\
.until(EC.visibility_of_element_located((By.CSS_SELECTOR, element_css)), msg)
element.click()
def click_prev_page(self, current_num):
element_css = "#explain_page-" + current_num + " .prev.btn"
msg = "Element " + element_css + " NOT found!"
element = WebDriverWait(self.driver, self.driver_wait)\
.until(EC.visibility_of_element_located((By.CSS_SELECTOR, element_css)), msg)
element.click()
|
Python
| 0.000001
|
@@ -5142,34 +5142,12 @@
ent.
-get_attribute('data-page')
+text
%0A%0A
|
339bb5cd325c7b9c08b8a43994f55bbe1756fbde
|
validate redirect
|
signup/auth.py
|
signup/auth.py
|
Python
| 0.000001
|
@@ -0,0 +1,1930 @@
+# Copyright (c) 2014, Fortylines LLC%0A# All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions are met:%0A#%0A# * Redistributions of source code must retain the above copyright notice,%0A# this list of conditions and the following disclaimer.%0A# * Redistributions in binary form must reproduce the above copyright notice,%0A# this list of conditions and the following disclaimer in the documentation%0A# and/or other materials provided with the distribution.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS %22AS IS%22%0A# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,%0A# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR%0A# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR%0A# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,%0A# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,%0A# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;%0A# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,%0A# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR%0A# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF%0A# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A%0Aimport urlparse%0A%0Afrom django.conf import settings%0Afrom django.http.request import split_domain_port, validate_host%0A%0Adef validate_redirect_url(next_url):%0A %22%22%22%0A Returns the next_url path if next_url matches allowed hosts.%0A %22%22%22%0A if not next_url:%0A return None%0A parts = urlparse.urlparse(next_url)%0A if parts.netloc:%0A domain, port = split_domain_port(parts.netloc)%0A allowed_hosts = %5B'*'%5D if settings.DEBUG else settings.ALLOWED_HOSTS%0A if not (domain and validate_host(domain, allowed_hosts)):%0A return None%0A return parts.path%0A%0A
|
|
c6e85e35a090c33bc1d6813dce959c8d47588ae8
|
send an email with current IP address
|
send_Email.py
|
send_Email.py
|
Python
| 0
|
@@ -0,0 +1,1039 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Fri Jun 16 12:14:51 2017%0A%0A@author: smrak%0A%22%22%22%0Aimport requests%0Aimport urllib3%0Afrom datetime import datetime%0A%0Adef getIP():%0A %22%22%22%0A Sebastijan Mrak:%0A get & reteurn a public IP address%0A %22%22%22%0A http = urllib3.PoolManager()%0A r = http.request('GET', 'http://ip.42.pl/raw')%0A my_ip = r.data.decode('utf-8')%0A return my_ip%0A%0Adef send_simple_message(dev_name, ip):%0A dt = datetime.utcnow()%0A time = dt.strftime(%22%25d-%25m %25H:%25M%22)%0A return requests.post(%0A %22https://api.mailgun.net/v3/sandbox1b5516af304e4d3bbb4ce505c254cbca.mailgun.org/messages%22,%0A auth=(%22api%22, %22key-6e8d2a811ff2ea98114574c72dc988f6%22),%0A data=%7B%22from%22: %22Mailgun Sandbox %3Cpostmaster@sandbox1b5516af304e4d3bbb4ce505c254cbca.mailgun.org%3E%22,%0A %22to%22: %22Sebastijan %3Csebastijan.mrak@gmail.com%3E%22,%0A %22subject%22: %22Current IP address for device: %22 + dev_name,%0A %22text%22: %22IP address at a time %22+str(time)+%22 is: %22+str(ip)%7D)%0A %0Aip = getIP()%0A#print (ip)%0Asend_simple_message('PC1', ip)
|
|
c13968125383581e67804e11bc430391d355145a
|
Create DataStreamasDisjointIntervals.py
|
leetcode/352-Data-Stream-as-Disjoint-Intervals/DataStreamasDisjointIntervals.py
|
leetcode/352-Data-Stream-as-Disjoint-Intervals/DataStreamasDisjointIntervals.py
|
Python
| 0
|
@@ -0,0 +1,1450 @@
+# Definition for an interval.%0A# class Interval(object):%0A# def __init__(self, s=0, e=0):%0A# self.start = s%0A# self.end = e%0A%0Aclass SummaryRanges(object):%0A%0A def __init__(self):%0A %22%22%22%0A Initialize your data structure here.%0A %22%22%22%0A self.intervals = %5B%5D%0A %0A%0A def addNum(self, val):%0A %22%22%22%0A :type val: int%0A :rtype: void%0A %22%22%22%0A intv = self.intervals%0A l, r = 0, len(self.intervals) - 1%0A print val%0A %0A while l %3C= r:%0A m = l + (r - l) / 2%0A if val %3C intv%5Bm%5D.start:%0A r = m - 1%0A elif val %3C= intv%5Bm%5D.end:%0A break%0A else:%0A l = m + 1%0A %0A if l %3E r:%0A if 1 %3C= l %3C len(intv) and intv%5Bl - 1%5D.end + 1 == val and intv%5Bl%5D.start - 1 == val:%0A intv%5Bl - 1%5D.end = intv%5Bl%5D.end%0A intv.pop(l)%0A elif len(intv) %3E l and intv%5Bl%5D.start - 1 == val:%0A intv%5Bl%5D.start = val%0A elif l %3E 0 and intv%5Bl - 1%5D.end + 1 == val:%0A intv%5Bl - 1%5D.end = val%0A else:%0A intv.insert(l, Interval(val, val))%0A %0A%0A def getIntervals(self):%0A %22%22%22%0A :rtype: List%5BInterval%5D%0A %22%22%22%0A return self.intervals%0A %0A%0A%0A# Your SummaryRanges object will be instantiated and called as such:%0A# obj = SummaryRanges()%0A# obj.addNum(val)%0A# param_2 = obj.getIntervals()%0A
|
|
435220dda7eb928d9d959594d7986136f17da973
|
Add actual url patter for #239
|
popit/urls/rooturls.py
|
popit/urls/rooturls.py
|
Python
| 0.000002
|
@@ -0,0 +1,170 @@
+from django.conf.urls import url%0Afrom popit.views import *%0A%0A%0Aurlpatterns = %5B%0A url(r'%5E(?P%3Clanguage%3E%5Cw%7B2%7D)', api_root, name=%22api-root%22),%0A url(r'%5E$', api_root_all),%0A%5D%0A
|
|
57297540c6b6be2f956fc27b175e15cde6b5aa76
|
Add test for admin #4, VATINField in django.contrib.admin
|
vies/tests.py
|
vies/tests.py
|
from django.utils import unittest
from django.db.models import Model, CharField
from django.forms import Form, ModelForm
from vies import fields, VATIN
from vies import models
VALID_VIES = 'DE284754038'
VALID_VIES_COUNTRY_CODE = 'DE'
VALID_VIES_NUMBER = '284754038'
class VIESModel(Model):
vat = models.VATINField()
class EmptyVIESModel(Model):
name = CharField(default='John Doe', max_length='50')
vat = models.VATINField(blank=True, null=True)
class VIESModelForm(ModelForm):
class Meta:
model = VIESModel
class EmptyVIESModelForm(ModelForm):
class Meta:
model = EmptyVIESModel
class VIESForm(Form):
vat = fields.VATINField()
class EmptyVIESForm(Form):
vat = fields.VATINField(required=False)
class VIESTestCase(unittest.TestCase):
def setUp(self):
pass
def test_creation(self):
try:
VATIN(VALID_VIES_COUNTRY_CODE, VALID_VIES_NUMBER)
except Exception, e:
self.fail(e.message)
def test_verified(self):
with self.assertRaises(ValueError):
VATIN('xx', VALID_VIES_NUMBER)
def test_country_code_getter(self):
v = VATIN(VALID_VIES_COUNTRY_CODE.lower(), VALID_VIES_NUMBER)
self.assertEqual(v.country_code, VALID_VIES_COUNTRY_CODE)
class ModelTestCase(unittest.TestCase):
def setUp(self):
pass
def test_create(self):
"""Object is correctly created."""
vies = VIESModel.objects.create(vat=VALID_VIES)
self.assertNotEqual(VIESModel.objects.count(), 0)
self.assertEqual(vies.vat, VALID_VIES)
def test_save(self):
"""Object is correctly saved."""
vies_saved = VIESModel()
vies_saved.vat = VALID_VIES
vies_saved.save()
vies_received = VIESModel.objects.get(pk=vies_saved.pk)
self.assertNotEqual(VIESModel.objects.count(), 0)
self.assertEqual(vies_received.vat, VALID_VIES)
class ModelFormTestCase(unittest.TestCase):
def test_is_valid(self):
"""Form is valid"""
form = VIESModelForm({'vat_0': VALID_VIES_COUNTRY_CODE, 'vat_1': VALID_VIES_NUMBER})
self.assertTrue(form.is_valid())
vies = form.save()
self.assertEqual(vies.vat, VALID_VIES)
def test_is_not_valid_country(self):
"""Invalid country"""
form = VIESModelForm({'vat_0': 'xx', 'vat_1': VALID_VIES_NUMBER})
self.assertFalse(form.is_valid())
def test_is_not_valid_numbers(self):
"""Invalid number"""
form = VIESModelForm({'vat_0': VALID_VIES_COUNTRY_CODE, 'vat_1': 'xx123+-'})
self.assertFalse(form.is_valid())
def test_is_not_valid(self):
"""Invalid number"""
form = VIESModelForm({'vat_0': 'GB', 'vat_1': '000000000'})
self.assertFalse(form.is_valid())
def test_save(self):
"""Form is saved"""
form = VIESModelForm({'vat_0': VALID_VIES_COUNTRY_CODE, 'vat_1': VALID_VIES_NUMBER})
self.assertTrue(form.is_valid())
vies_saved = form.save()
vies_received = VIESModel.objects.get(pk=vies_saved.pk)
self.assertEqual(vies_received, vies_saved)
self.assertNotEqual(VIESModel.objects.count(), 0)
self.assertEqual(vies_received.vat, VALID_VIES)
def test_empty(self):
form = EmptyVIESModelForm({'name': 'Eva'})
self.assertTrue(form.is_valid())
|
Python
| 0
|
@@ -9,29 +9,96 @@
ngo.
-utils import un
+contrib.admin.options import ModelAdmin%0Afrom django.contrib.admin.s
it
-t
es
-t
+ import AdminSite
%0Afro
@@ -180,16 +180,50 @@
odelForm
+%0Afrom django.utils import unittest
%0A%0Afrom v
@@ -3453,28 +3453,419 @@
.assertTrue(form.is_valid())
+%0A%0A%0A%0Aclass MockRequest(object):%0A pass%0A%0Arequest = MockRequest()%0A%0Aclass AdminTestCase(unittest.TestCase):%0A%0A def setUp(self):%0A self.site = AdminSite()%0A%0A def test_VATINField_admin(self):%0A %22%22%22Admin form is generated%22%22%22%0A ma = ModelAdmin(VIESModel, self.site)%0A%0A try:%0A ma.get_form(request)%0A except Exception, e:%0A self.fail(e.message)%0A
|
3f6ebda543542a8b32b592ceaba62d8eee1fab5e
|
Version bump to s25.1
|
src/ggrc/settings/default.py
|
src/ggrc/settings/default.py
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
DEBUG = False
TESTING = False
# Flask-SQLAlchemy fix to be less than `wait_time` in /etc/mysql/my.cnf
SQLALCHEMY_POOL_RECYCLE = 120
# Settings in app.py
AUTOBUILD_ASSETS = False
ENABLE_JASMINE = False
DEBUG_ASSETS = False
FULLTEXT_INDEXER = None
USER_PERMISSIONS_PROVIDER = None
EXTENSIONS = []
exports = []
# Deployment-specific variables
COMPANY = "Company, Inc."
COMPANY_LOGO_TEXT = "Company GRC"
COPYRIGHT = u"Confidential. Copyright " + u"\u00A9" # \u00A9 is the (c) symbol
VERSION = "s25"
# Initialize from environment if present
import os
SQLALCHEMY_DATABASE_URI = os.environ.get('GGRC_DATABASE_URI', '')
SECRET_KEY = os.environ.get('GGRC_SECRET_KEY', 'Replace-with-something-secret')
|
Python
| 0
|
@@ -727,16 +727,18 @@
N = %22s25
+.1
%22%0A%0A# Ini
|
f04f7555b06f5087100b336f3127b19e252b3794
|
Fix issue 137
|
pages/http.py
|
pages/http.py
|
"""Page CMS functions related to the request object."""
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import loader, Context, RequestContext
from django.core.urlresolvers import reverse
from pages import settings
def get_request_mock():
"""Build a request mock that can be used for testing."""
bh = BaseHandler()
bh.load_middleware()
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'test',
'SERVER_PORT': '8000',
})
# Apply request middleware
for middleware_method in bh._request_middleware:
# LocaleMiddleware should never be applied a second time because
# it would broke the current real request language
if 'LocaleMiddleware' not in str(middleware_method.im_class):
response = middleware_method(request)
return request
class AutoRenderHttpError(Exception):
"""Cannot return context dictionary because a view returned an HTTP
response when a (template_name, context) tuple was expected."""
pass
def auto_render(func):
"""
This view decorator automatically calls the ``render_to_response``
shortcut. A view that use this decorator should return a tuple of this
form : (template name, context) instead of a ``HttpRequest`` object.
"""
def _dec(request, *args, **kwargs):
template_override = kwargs.pop('template_name', None)
only_context = kwargs.pop('only_context', False)
if only_context:
# return only context dictionary
response = func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
raise AutoRenderHttpError
(template_name, context) = response
return context
response = func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
return response
(template_name, context) = response
t = context['template_name'] = template_override or template_name
return render_to_response(t, context,
context_instance=RequestContext(request))
return _dec
def get_slug_and_relative_path(path):
"""Return the page's slug and relative path."""
root = reverse('pages-root')
if path.startswith(root):
path = path[len(root):]
if len(path) and path[-1] == '/':
path = path[:-1]
slug = path.split("/")[-1]
lang = None
if settings.PAGE_USE_LANGUAGE_PREFIX:
lang = path.split("/")[0]
path = path[(len(lang) + 1):]
return slug, path, lang
def get_template_from_request(request, page=None):
"""
Gets a valid template from different sources or falls back to the
default template.
"""
if settings.PAGE_TEMPLATES is None:
return settings.DEFAULT_PAGE_TEMPLATE
template = request.REQUEST.get('template', None)
if template is not None and \
(template in dict(settings.PAGE_TEMPLATES).keys() or
template == settings.DEFAULT_PAGE_TEMPLATE):
return template
if page is not None:
return page.get_template()
return settings.DEFAULT_PAGE_TEMPLATE
def get_language_from_request(request):
"""Return the most obvious language according the request."""
language = request.GET.get('language', None)
if language:
return language
if hasattr(request, 'LANGUAGE_CODE'):
return settings.PAGE_LANGUAGE_MAPPING(str(request.LANGUAGE_CODE))
else:
return settings.PAGE_DEFAULT_LANGUAGE
|
Python
| 0
|
@@ -645,16 +645,49 @@
'8000',%0A
+ 'HTTP_HOST': 'testhost',%0A
%7D)%0A
|
99f454b3fa62cffac922a7b3431e0024e6dfde3d
|
add data migration script
|
dataactcore/scripts/migrateDataBroker.py
|
dataactcore/scripts/migrateDataBroker.py
|
Python
| 0.000001
|
@@ -0,0 +1,2106 @@
+# migrate data using pg_dump and pg_restore%0A# data copied from tables:%0A# error_data:%0A# error_metadata%0A# file%0A# job_tracker:%0A# job%0A# submission%0A# job_dependency%0A# user_manager;%0A# users%0A# email_token%0A# validator:%0A# appropriation%0A# award_financial%0A# award_financial_assistance%0A# object_class_program_activity%0A%0A# run on command line: python migrateDataBroker.py%0A%0Afrom dataactcore.config import CONFIG_DB%0Aimport subprocess%0A%0Ac = 'postgresql://%7B%7D:%7B%7D@%7B%7D/'.format(%0A CONFIG_DB%5B'username'%5D, CONFIG_DB%5B'password'%5D, CONFIG_DB%5B'host'%5D)%0Atarget = '%7B%7Ddata_broker'.format(c)%0A%0A# error_data%0Adb = 'error_data'%0Asource = '%7B%7D%7B%7D'.format(c, db)%0Aprint('migrating %7B%7D'.format(db))%0Acmd = 'pg_dump -d %7B%7D -t error_metadata -t file --data-only --format=c %7C ' %5C%0A 'pg_restore -d %7B%7D --data-only'.format(source, target)%0Ap = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)%0Aprint('return code = %7B%7D%5Cn'.format(p))%0A%0A# job_tracker%0Adb = 'job_tracker'%0Asource = '%7B%7D%7B%7D'.format(c, db)%0Aprint('migrating %7B%7D'.format(db))%0Acmd = 'pg_dump -d %7B%7D -t job_dependency -t job -t submission --data-only --format=c %7C ' %5C%0A 'pg_restore -d %7B%7D --data-only'.format(source, target)%0Ap = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)%0Aprint('return code = %7B%7D%5Cn'.format(p))%0A%0A# user_manager%0Adb = 'user_manager'%0Asource = '%7B%7D%7B%7D'.format(c, db)%0Aprint('migrating %7B%7D'.format(db))%0Acmd = 'pg_dump -d %7B%7D -t users -t email_token --data-only --format=c %7C ' %5C%0A 'pg_restore -d %7B%7D --data-only'.format(source, target)%0Ap = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)%0Aprint('return code = %7B%7D%5Cn'.format(p))%0A%0A# validation - these tables are larger, so do individually%0Adb = 'validation'%0Asource = '%7B%7D%7B%7D'.format(c, db)%0Atables = %5B'appropriation', 'object_class_program_activity',%0A 'award_financial', 'award_financial_assistance'%5D%0A%0Afor t in tables:%0A print('migrating %7B%7D: %7B%7D'.format(db, t))%0A cmd = 'pg_dump -d %7B%7D -t %7B%7D --data-only --format=c %7C ' %5C%0A 'pg_restore -d %7B%7D --data-only'.format(source, t, target)%0A p = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)%0A print('return code = %7B%7D%5Cn'.format(p))%0A%0A%0A
|
|
17bbd6d44ec7edd1a079b12a44c283a358b11b92
|
add import script for Teignbridge (closes #865)
|
polling_stations/apps/data_collection/management/commands/import_teignbridge.py
|
polling_stations/apps/data_collection/management/commands/import_teignbridge.py
|
Python
| 0
|
@@ -0,0 +1,384 @@
+from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter%0A%0Aclass Command(BaseXpressDemocracyClubCsvImporter):%0A council_id = 'E07000045'%0A addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsv'%0A stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsv'%0A elections = %5B'parl.2017-06-08'%5D%0A csv_delimiter = '%5Ct'%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.