text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from django.db.backends.postgresql_psycopg2 import base
from transaction_hooks.mixin import TransactionHooksDatabaseWrapperMixin
class DatabaseWrapper(TransactionHooksDatabaseWrapperMixin,
base.DatabaseWrapper):
pass
|
{
"content_hash": "53188abbaa66a0d1d925a858068e6da1",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 72,
"avg_line_length": 30.75,
"alnum_prop": 0.7886178861788617,
"repo_name": "carljm/django-transaction-hooks",
"id": "0ae316897c9b26ac06bb8ac7ba2fcd5acc6edf1f",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transaction_hooks/backends/postgresql_psycopg2/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "42"
},
{
"name": "Python",
"bytes": "14470"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_rename_stream
from zerver.lib.str_utils import force_text
from zerver.models import Realm, Service, UserProfile, get_realm
import sys
class Command(BaseCommand):
help = """Given an existing bot, converts it into an outgoing webhook bot."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('string_id', metavar='<string_id>', type=str,
help='subdomain or string_id of bot')
parser.add_argument('bot_email', metavar='<bot_email>', type=str,
help='email of bot')
parser.add_argument('service_name', metavar='<service_name>', type=str,
help='name of Service object to create')
parser.add_argument('base_url', metavar='<base_url>', type=str,
help='base url of outgoing webhook')
# TODO: Add token and interface as arguments once OutgoingWebhookWorker
# uses these fields on the Service object.
def handle(self, *args, **options):
# type: (*Any, **str) -> None
string_id = options['string_id']
bot_email = options['bot_email']
service_name = options['service_name']
base_url = options['base_url']
encoding = sys.getfilesystemencoding()
realm = get_realm(force_text(string_id, encoding))
if realm is None:
print('Unknown subdomain or string_id %s' % (string_id,))
exit(1)
if not bot_email:
print('Email of existing bot must be provided')
exit(1)
if not service_name:
print('Name for Service object must be provided')
exit(1)
if not base_url:
print('Base URL of outgoing webhook must be provided')
exit(1)
# TODO: Normalize email?
bot_profile = UserProfile.objects.get(email=bot_email)
if not bot_profile:
print('User %s does not exist' % (bot_email,))
exit(1)
if not bot_profile.is_bot:
print('User %s is not a bot' % (bot_email,))
exit(1)
if bot_profile.is_outgoing_webhook_bot:
print('%s is already marked as an outgoing webhook bot' % (bot_email,))
exit(1)
Service.objects.create(name=service_name,
user_profile=bot_profile,
base_url=base_url,
token='',
interface=1)
bot_profile.bot_type = UserProfile.OUTGOING_WEBHOOK_BOT
bot_profile.save()
print('Successfully converted %s into an outgoing webhook bot' % (bot_email,))
|
{
"content_hash": "82918b3a270731eb70a44bd86d91ea11",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 86,
"avg_line_length": 37.48717948717949,
"alnum_prop": 0.5807113543091655,
"repo_name": "jrowan/zulip",
"id": "1cc11bd2b9459c2bbfa304646dd441f03576280c",
"size": "2924",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zerver/management/commands/convert_bot_to_outgoing_webhook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "400886"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "470981"
},
{
"name": "JavaScript",
"bytes": "2070164"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "87465"
},
{
"name": "Python",
"bytes": "3500902"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "38344"
}
],
"symlink_target": ""
}
|
__version__=''' $Id: textobject.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__="""
PDFTextObject is an efficient way to add text to a Canvas. Do not
instantiate directly, obtain one from the Canvas instead.
Progress Reports:
8.83, 2000-01-13, gmcm: created from pdfgen.py
"""
import sys
import string
from types import *
from reportlab.lib.colors import Color, CMYKColor, CMYKColorSep, toColor, black, white, _CMYK_black, _CMYK_white
from reportlab.lib.utils import fp_str, isStrType
from reportlab.pdfbase import pdfmetrics
class _PDFColorSetter:
'''Abstracts the color setting operations; used in Canvas and Textobject
asseumes we have a _code object'''
def _checkSeparation(self,cmyk):
if isinstance(cmyk,CMYKColorSep):
name,sname = self._doc.addColor(cmyk)
if name not in self._colorsUsed:
self._colorsUsed[name] = sname
return name
#if this is set to a callable(color) --> color it can be used to check color setting
#see eg _enforceCMYK/_enforceRGB
_enforceColorSpace = None
def setFillColorCMYK(self, c, m, y, k, alpha=None):
"""set the fill color useing negative color values
(cyan, magenta, yellow and darkness value).
Takes 4 arguments between 0.0 and 1.0"""
self.setFillColor((c,m,y,k),alpha=alpha)
def setStrokeColorCMYK(self, c, m, y, k, alpha=None):
"""set the stroke color useing negative color values
(cyan, magenta, yellow and darkness value).
Takes 4 arguments between 0.0 and 1.0"""
self.setStrokeColor((c,m,y,k),alpha=alpha)
def setFillColorRGB(self, r, g, b, alpha=None):
"""Set the fill color using positive color description
(Red,Green,Blue). Takes 3 arguments between 0.0 and 1.0"""
self.setFillColor((r,g,b),alpha=alpha)
def setStrokeColorRGB(self, r, g, b, alpha=None):
"""Set the stroke color using positive color description
(Red,Green,Blue). Takes 3 arguments between 0.0 and 1.0"""
self.setStrokeColor((r,g,b),alpha=alpha)
def setFillColor(self, aColor, alpha=None):
"""Takes a color object, allowing colors to be referred to by name"""
if self._enforceColorSpace:
aColor = self._enforceColorSpace(aColor)
if isinstance(aColor, CMYKColor):
d = aColor.density
c,m,y,k = (d*aColor.cyan, d*aColor.magenta, d*aColor.yellow, d*aColor.black)
self._fillColorObj = aColor
name = self._checkSeparation(aColor)
if name:
self._code.append('/%s cs %s scn' % (name,fp_str(d)))
else:
self._code.append('%s k' % fp_str(c, m, y, k))
elif isinstance(aColor, Color):
rgb = (aColor.red, aColor.green, aColor.blue)
self._fillColorObj = aColor
self._code.append('%s rg' % fp_str(rgb) )
elif isinstance(aColor,(tuple,list)):
l = len(aColor)
if l==3:
self._fillColorObj = aColor
self._code.append('%s rg' % fp_str(aColor) )
elif l==4:
self._fillColorObj = aColor
self._code.append('%s k' % fp_str(aColor))
else:
raise ValueError('Unknown color %r' % aColor)
elif isStrType(aColor):
self.setFillColor(toColor(aColor))
else:
raise ValueError('Unknown color %r' % aColor)
if alpha is not None:
self.setFillAlpha(alpha)
elif getattr(aColor, 'alpha', None) is not None:
self.setFillAlpha(aColor.alpha)
def setStrokeColor(self, aColor, alpha=None):
"""Takes a color object, allowing colors to be referred to by name"""
if self._enforceColorSpace:
aColor = self._enforceColorSpace(aColor)
if isinstance(aColor, CMYKColor):
d = aColor.density
c,m,y,k = (d*aColor.cyan, d*aColor.magenta, d*aColor.yellow, d*aColor.black)
self._strokeColorObj = aColor
name = self._checkSeparation(aColor)
if name:
self._code.append('/%s CS %s SCN' % (name,fp_str(d)))
else:
self._code.append('%s K' % fp_str(c, m, y, k))
elif isinstance(aColor, Color):
rgb = (aColor.red, aColor.green, aColor.blue)
self._strokeColorObj = aColor
self._code.append('%s RG' % fp_str(rgb) )
elif isinstance(aColor,(tuple,list)):
l = len(aColor)
if l==3:
self._strokeColorObj = aColor
self._code.append('%s RG' % fp_str(aColor) )
elif l==4:
self._fillColorObj = aColor
self._code.append('%s K' % fp_str(aColor))
else:
raise ValueError('Unknown color %r' % aColor)
elif isStrType(aColor):
self.setStrokeColor(toColor(aColor))
else:
raise ValueError('Unknown color %r' % aColor)
if alpha is not None:
self.setStrokeAlpha(alpha)
elif getattr(aColor, 'alpha', None) is not None:
self.setStrokeAlpha(aColor.alpha)
def setFillGray(self, gray, alpha=None):
"""Sets the gray level; 0.0=black, 1.0=white"""
self._fillColorObj = (gray, gray, gray)
self._code.append('%s g' % fp_str(gray))
if alpha is not None:
self.setFillAlpha(alpha)
def setStrokeGray(self, gray, alpha=None):
"""Sets the gray level; 0.0=black, 1.0=white"""
self._strokeColorObj = (gray, gray, gray)
self._code.append('%s G' % fp_str(gray))
if alpha is not None:
self.setFillAlpha(alpha)
def setStrokeAlpha(self,a):
if not (isinstance(a,(float,int)) and 0<=a<=1):
raise ValueError('setStrokeAlpha invalid value %r' % a)
getattr(self,'_setStrokeAlpha',lambda x: None)(a)
def setFillAlpha(self,a):
if not (isinstance(a,(float,int)) and 0<=a<=1):
raise ValueError('setFillAlpha invalid value %r' % a)
getattr(self,'_setFillAlpha',lambda x: None)(a)
def setStrokeOverprint(self,a):
getattr(self,'_setStrokeOverprint',lambda x: None)(a)
def setFillOverprint(self,a):
getattr(self,'_setFillOverprint',lambda x: None)(a)
def setOverprintMask(self,a):
getattr(self,'_setOverprintMask',lambda x: None)(a)
class PDFTextObject(_PDFColorSetter):
"""PDF logically separates text and graphics drawing; text
operations need to be bracketed between BT (Begin text) and
ET operators. This class ensures text operations are
properly encapusalted. Ask the canvas for a text object
with beginText(x, y). Do not construct one directly.
Do not use multiple text objects in parallel; PDF is
not multi-threaded!
It keeps track of x and y coordinates relative to its origin."""
def __init__(self, canvas, x=0,y=0):
self._code = ['BT'] #no point in [] then append RGB
self._canvas = canvas #canvas sets this so it has access to size info
self._fontname = self._canvas._fontname
self._fontsize = self._canvas._fontsize
self._leading = self._canvas._leading
self._doc = self._canvas._doc
self._colorsUsed = self._canvas._colorsUsed
self._enforceColorSpace = getattr(canvas,'_enforceColorSpace',None)
font = pdfmetrics.getFont(self._fontname)
self._curSubset = -1
self.setTextOrigin(x, y)
self._textRenderMode = 0
self._clipping = 0
def getCode(self):
"pack onto one line; used internally"
self._code.append('ET')
if self._clipping:
self._code.append('%d Tr' % (self._textRenderMode^4))
return ' '.join(self._code)
def setTextOrigin(self, x, y):
if self._canvas.bottomup:
self._code.append('1 0 0 1 %s Tm' % fp_str(x, y)) #bottom up
else:
self._code.append('1 0 0 -1 %s Tm' % fp_str(x, y)) #top down
# The current cursor position is at the text origin
self._x0 = self._x = x
self._y0 = self._y = y
def setTextTransform(self, a, b, c, d, e, f):
"Like setTextOrigin, but does rotation, scaling etc."
if not self._canvas.bottomup:
c = -c #reverse bottom row of the 2D Transform
d = -d
self._code.append('%s Tm' % fp_str(a, b, c, d, e, f))
# The current cursor position is at the text origin Note that
# we aren't keeping track of all the transform on these
# coordinates: they are relative to the rotations/sheers
# defined in the matrix.
self._x0 = self._x = e
self._y0 = self._y = f
def moveCursor(self, dx, dy):
"""Starts a new line at an offset dx,dy from the start of the
current line. This does not move the cursor relative to the
current position, and it changes the current offset of every
future line drawn (i.e. if you next do a textLine() call, it
will move the cursor to a position one line lower than the
position specificied in this call. """
# Check if we have a previous move cursor call, and combine
# them if possible.
if self._code and self._code[-1][-3:]==' Td':
L = self._code[-1].split()
if len(L)==3:
del self._code[-1]
else:
self._code[-1] = ''.join(L[:-4])
# Work out the last movement
lastDx = float(L[-3])
lastDy = float(L[-2])
# Combine the two movement
dx += lastDx
dy -= lastDy
# We will soon add the movement to the line origin, so if
# we've already done this for lastDx, lastDy, remove it
# first (so it will be right when added back again).
self._x0 -= lastDx
self._y0 -= lastDy
# Output the move text cursor call.
self._code.append('%s Td' % fp_str(dx, -dy))
# Keep track of the new line offsets and the cursor position
self._x0 += dx
self._y0 += dy
self._x = self._x0
self._y = self._y0
def setXPos(self, dx):
"""Starts a new line dx away from the start of the
current line - NOT from the current point! So if
you call it in mid-sentence, watch out."""
self.moveCursor(dx,0)
def getCursor(self):
"""Returns current text position relative to the last origin."""
return (self._x, self._y)
def getStartOfLine(self):
"""Returns a tuple giving the text position of the start of the
current line."""
return (self._x0, self._y0)
def getX(self):
"""Returns current x position relative to the last origin."""
return self._x
def getY(self):
"""Returns current y position relative to the last origin."""
return self._y
def _setFont(self, psfontname, size):
"""Sets the font and fontSize
Raises a readable exception if an illegal font
is supplied. Font names are case-sensitive! Keeps track
of font anme and size for metrics."""
self._fontname = psfontname
self._fontsize = size
font = pdfmetrics.getFont(self._fontname)
if font._dynamicFont:
self._curSubset = -1
else:
pdffontname = self._canvas._doc.getInternalFontName(psfontname)
self._code.append('%s %s Tf' % (pdffontname, fp_str(size)))
def setFont(self, psfontname, size, leading = None):
"""Sets the font. If leading not specified, defaults to 1.2 x
font size. Raises a readable exception if an illegal font
is supplied. Font names are case-sensitive! Keeps track
of font anme and size for metrics."""
self._fontname = psfontname
self._fontsize = size
if leading is None:
leading = size * 1.2
self._leading = leading
font = pdfmetrics.getFont(self._fontname)
if font._dynamicFont:
self._curSubset = -1
else:
pdffontname = self._canvas._doc.getInternalFontName(psfontname)
self._code.append('%s %s Tf %s TL' % (pdffontname, fp_str(size), fp_str(leading)))
def setCharSpace(self, charSpace):
"""Adjusts inter-character spacing"""
self._charSpace = charSpace
self._code.append('%s Tc' % fp_str(charSpace))
def setWordSpace(self, wordSpace):
"""Adjust inter-word spacing. This can be used
to flush-justify text - you get the width of the
words, and add some space between them."""
self._wordSpace = wordSpace
self._code.append('%s Tw' % fp_str(wordSpace))
def setHorizScale(self, horizScale):
"Stretches text out horizontally"
self._horizScale = 100 + horizScale
self._code.append('%s Tz' % fp_str(horizScale))
def setLeading(self, leading):
"How far to move down at the end of a line."
self._leading = leading
self._code.append('%s TL' % fp_str(leading))
def setTextRenderMode(self, mode):
"""Set the text rendering mode.
0 = Fill text
1 = Stroke text
2 = Fill then stroke
3 = Invisible
4 = Fill text and add to clipping path
5 = Stroke text and add to clipping path
6 = Fill then stroke and add to clipping path
7 = Add to clipping path
after we start clipping we mustn't change the mode back until after the ET
"""
assert mode in (0,1,2,3,4,5,6,7), "mode must be in (0,1,2,3,4,5,6,7)"
if (mode & 4)!=self._clipping:
mode |= 4
self._clipping = mode & 4
if self._textRenderMode!=mode:
self._textRenderMode = mode
self._code.append('%d Tr' % mode)
def setRise(self, rise):
"Move text baseline up or down to allow superscrip/subscripts"
self._rise = rise
self._y = self._y - rise # + ? _textLineMatrix?
self._code.append('%s Ts' % fp_str(rise))
def _formatText(self, text):
"Generates PDF text output operator(s)"
canv = self._canvas
font = pdfmetrics.getFont(self._fontname)
R = []
if font._dynamicFont:
#it's a truetype font and should be utf8. If an error is raised,
for subset, t in font.splitString(text, canv._doc):
if subset!=self._curSubset:
pdffontname = font.getSubsetInternalName(subset, canv._doc)
R.append("%s %s Tf %s TL" % (pdffontname, fp_str(self._fontsize), fp_str(self._leading)))
self._curSubset = subset
R.append("(%s) Tj" % canv._escape(t))
elif font._multiByte:
#all the fonts should really work like this - let them know more about PDF...
R.append("%s %s Tf %s TL" % (
canv._doc.getInternalFontName(font.fontName),
fp_str(self._fontsize),
fp_str(self._leading)
))
R.append("(%s) Tj" % font.formatForPdf(text))
else:
#convert to T1 coding
fc = font
if sys.version_info[0] < 3 and not isinstance(text,unicode):
try:
text = text.decode('utf8')
except UnicodeDecodeError as e:
i,j = e.args[2:4]
raise UnicodeDecodeError(*(e.args[:4]+('%s\n%s-->%s<--%s' % (e.args[4],text[max(i-10,0):i],text[i:j],text[j:j+10]),)))
for f, t in pdfmetrics.unicode2T1(text,[font]+font.substitutionFonts):
if f!=fc:
R.append("%s %s Tf %s TL" % (canv._doc.getInternalFontName(f.fontName), fp_str(self._fontsize), fp_str(self._leading)))
fc = f
R.append("(%s) Tj" % canv._escape(t))
if font!=fc:
R.append("%s %s Tf %s TL" % (canv._doc.getInternalFontName(self._fontname), fp_str(self._fontsize), fp_str(self._leading)))
return ' '.join(R)
def _textOut(self, text, TStar=0):
"prints string at current point, ignores text cursor"
self._code.append('%s%s' % (self._formatText(text), (TStar and ' T*' or '')))
def textOut(self, text):
"""prints string at current point, text cursor moves across."""
self._x = self._x + self._canvas.stringWidth(text, self._fontname, self._fontsize)
self._code.append(self._formatText(text))
def textLine(self, text=''):
"""prints string at current point, text cursor moves down.
Can work with no argument to simply move the cursor down."""
# Update the coordinates of the cursor
self._x = self._x0
if self._canvas.bottomup:
self._y = self._y - self._leading
else:
self._y = self._y + self._leading
# Update the location of the start of the line
# self._x0 is unchanged
self._y0 = self._y
# Output the text followed by a PDF newline command
self._code.append('%s T*' % self._formatText(text))
def textLines(self, stuff, trim=1):
"""prints multi-line or newlined strings, moving down. One
comon use is to quote a multi-line block in your Python code;
since this may be indented, by default it trims whitespace
off each line and from the beginning; set trim=0 to preserve
whitespace."""
if isStrType(stuff):
lines = '\n'.split(stuff.strip())
if trim==1:
lines = [s.strip() for s in lines]
elif isinstance(stuff,(tuple,list)):
lines = stuff
else:
assert 1==0, "argument to textlines must be string,, list or tuple"
# Output each line one at a time. This used to be a long-hand
# copy of the textLine code, now called as a method.
for line in lines:
self.textLine(line)
def __nonzero__(self):
'PDFTextObject is true if it has something done after the init'
return self._code != ['BT']
|
{
"content_hash": "7356bc3d5e8cd6d1ad23619b06da7bc5",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 139,
"avg_line_length": 40.761160714285715,
"alnum_prop": 0.5801434751656536,
"repo_name": "nakagami/reportlab",
"id": "8326a5c139b2b0fa4d57b4d57804746d8b93e28a",
"size": "18451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/reportlab/pdfgen/textobject.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "754561"
},
{
"name": "C++",
"bytes": "1351"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "3439804"
},
{
"name": "Shell",
"bytes": "1673"
}
],
"symlink_target": ""
}
|
import numpy as np
import cv2
def drawMatches(img1, kp1, img2, kp2, matches):
"""
My own implementation of cv2.drawMatches as OpenCV 2.4.9
does not have this function available but it's supported in
OpenCV 3.0.0
This function takes in two images with their associated
keypoints, as well as a list of DMatch data structure (matches)
that contains which keypoints matched in which images.
An image will be produced where a montage is shown with
the first image followed by the second image beside it.
Keypoints are delineated with circles, while lines are connected
between matching keypoints.
img1,img2 - Grayscale images
kp1,kp2 - Detected list of keypoints through any of the OpenCV keypoint
detection algorithms
matches - A list of matches of corresponding keypoints through any
OpenCV keypoint matching algorithm
"""
# Create a new output image that concatenates the two images together
# (a.k.a) a montage
rows1 = img1.shape[0]
cols1 = img1.shape[1]
rows2 = img2.shape[0]
cols2 = img2.shape[1]
out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')
# Place the first image to the left
out[:rows1,:cols1] = np.dstack([img1, img1, img1])
# Place the next image to the right of it
out[:rows2,cols1:] = np.dstack([img2, img2, img2])
# For each pair of points we have between both images
# draw circles, then connect a line between them
for mat in matches:
# Get the matching keypoints for each of the images
img1_idx = mat.queryIdx
img2_idx = mat.trainIdx
# x - columns
# y - rows
(x1,y1) = kp1[img1_idx].pt
(x2,y2) = kp2[img2_idx].pt
# Draw a small circle at both co-ordinates
# radius 4
# colour blue
# thickness = 1
cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1)
cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)
# Draw a line in between the two points
# thickness = 1
# colour blue
cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255, 0, 0), 1)
# Show the image
cv2.imshow('Matched Features', out)
cv2.waitKey(0)
cv2.destroyWindow('Matched Features')
# Also return the image if you'd like a copy
return out
|
{
"content_hash": "814d51abbd59eae9f0011b0af7f8f9c7",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 81,
"avg_line_length": 32.067567567567565,
"alnum_prop": 0.6367467340918669,
"repo_name": "NataliaDymnikova/camera-position-by-line",
"id": "dcb0ec0d0d2841d039c09afd3912fa9b4bff6ce9",
"size": "2373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/drawMatches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "287"
},
{
"name": "C++",
"bytes": "12568"
},
{
"name": "CMake",
"bytes": "11834"
},
{
"name": "M",
"bytes": "3076"
},
{
"name": "Matlab",
"bytes": "2556562"
},
{
"name": "Python",
"bytes": "7631"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
import pycuda.driver as drv
from pycuda.tools import context_dependent_memoize
from struct import unpack_from
from pytools import memoize, memoize_method
from float_ew import call_compound_kernel
from layers import DataLayer, FullLayer, ConvLayer, PoolLayer, _get_sm_count
class GPUTensor(object):
def __init__(self, shape,
dtype = np.float16,
allocator = drv.mem_alloc,
base = None,
gpudata = None,
strides = None,
is_trans = False,
name = None,
rounding = 0):
# supported dtypes
assert dtype in (np.float16, np.float32, np.uint8, np.int8)
dtype = np.dtype(dtype)
try:
size = 1
for dim in shape:
size *= dim
except TypeError:
assert isinstance(shape, (int, long, np.integer))
size = shape
shape = (shape,)
if isinstance(size, np.integer):
size = np.asscalar(size)
# only support C ordering for now.
if strides is None:
self.strides = _contiguous_strides(dtype.itemsize, shape)
else:
self.strides = tuple(strides)
self.base = base
self.shape = shape
self.size = size
self.dtype = dtype
self.nbytes = dtype.itemsize * size
self.allocator = allocator
self.is_trans = is_trans
self.name = name
self.rounding = rounding
if gpudata is None:
if size:
#print drv.mem_get_info()
self.gpudata = allocator(self.nbytes)
else:
self.gpudata = None
assert base is None
else:
self.gpudata = gpudata
def __str__(self):
return ("Array(0x%x) name:%s dtype:%s shape:%s strides:%s "
" is_trans:%s" % (self.gpudata, self.name, self.dtype,
self.shape, self.strides, self.is_trans))
def __getstate__(self):
"""
Defines what and how we go about serializing an instance of this class.
Returns:
numpy.ndarray: Representation of the underlying
`cudanet.CUDAMatrix` tensor
"""
statedict = {'numpydata': self.asnumpyarray(),
'shape': self.shape,
'dtype': self.dtype,
'strides': self.strides,
'is_trans': self.is_trans,
'name': self.name}
return statedict
def __setstate__(self, statedict):
"""
Defines how we go about deserializing into an instance of this class.
Arguments:
state (numpy.ndarray): Serialized representation of the underlying
`cudanet.CUDAMatrix` tensor to be unpacked.
"""
kwargs = {x: statedict[x] for x in statedict.keys()
if x not in ('shape', 'numpydata')}
import pycuda.autoinit # TODO: Only create if it does not exist
self.__init__(statedict['shape'], dtype=np.float32)
self.fill(statedict['numpydata'])
def __repr__(self):
return self.__str__()
def __int__(self):
return int(self.gpudata)
def __len__(self):
"""Return the size of the leading dimension of self."""
if len(self.shape):
return self.shape[0]
else:
return 0
@property
@memoize_method
def is_contiguous(self):
return self.strides == _contiguous_strides(self.dtype.itemsize, self.shape)
def set(self, ary, device=None):
"""
copy host array to device.
Arguments:
ary: host array, needs to be contiguous
device: device id, if not the one attached to current context
Returns:
self
"""
assert ary.size == self.size
assert self.is_contiguous, "Array in set() must be contiguous"
if ary.dtype is not self.dtype:
ary = ary.astype(self.dtype)
assert ary.strides == self.strides
if device is None:
drv.memcpy_htod(self.gpudata, ary)
else:
# with multithreaded datasets, make a context before copying
# and destroy it again once done.
ctx = drv.Device(device).make_context()
drv.memcpy_htod(self.gpudata, ary)
ctx.pop()
del ctx
return self
def get(self):
"""
copy device array to host.
Returns:
the host numpy array
"""
assert self.is_contiguous, "Array in get() must be contiguous"
ary = np.empty(self.shape, self.dtype)
drv.memcpy_dtoh(ary, self.gpudata)
return ary
def asnumpyarray(self):
"""
asnumpyarray is an alias of get(), needed for MOP compatibility
"""
return self.get()
def asbuffer(self):
"""
asbuffer returns buffer interface to gpu data
"""
return self.gpudata.as_buffer(self.nbytes)
def __getitem__(self, index):
"""
return a sliced view of an array
"""
if not isinstance(index, tuple):
index = (index,)
new_shape = []
new_offset = 0
new_strides = []
seen_ellipsis = False
index_axis = 0
array_axis = 0
while index_axis < len(index):
index_entry = index[index_axis]
if array_axis > len(self.shape):
raise IndexError("too many axes in index")
if isinstance(index_entry, slice):
start, stop, idx_stride = index_entry.indices(
self.shape[array_axis])
array_stride = self.strides[array_axis]
new_shape.append((stop-start)//idx_stride)
new_strides.append(idx_stride*array_stride)
new_offset += array_stride*start
index_axis += 1
array_axis += 1
elif isinstance(index_entry, (int, np.integer)):
array_shape = self.shape[array_axis]
if index_entry < 0:
index_entry += array_shape
if not (0 <= index_entry < array_shape):
raise IndexError(
"subindex in axis %d out of range" % index_axis)
new_offset += self.strides[array_axis]*index_entry
index_axis += 1
array_axis += 1
elif index_entry is Ellipsis:
index_axis += 1
remaining_index_count = len(index) - index_axis
new_array_axis = len(self.shape) - remaining_index_count
if new_array_axis < array_axis:
raise IndexError("invalid use of ellipsis in index")
while array_axis < new_array_axis:
new_shape.append(self.shape[array_axis])
new_strides.append(self.strides[array_axis])
array_axis += 1
if seen_ellipsis:
raise IndexError(
"more than one ellipsis not allowed in index")
seen_ellipsis = True
else:
raise IndexError("invalid subindex in axis %d" % index_axis)
while array_axis < len(self.shape):
new_shape.append(self.shape[array_axis])
new_strides.append(self.strides[array_axis])
array_axis += 1
return self.__class__(
shape = tuple(new_shape),
dtype = self.dtype,
allocator = self.allocator,
base = self,
gpudata = int(self.gpudata)+new_offset,
strides = tuple(new_strides),
name = self.name,
rounding = self.rounding)
def _assign(self, value):
if isinstance(value, (int, float)):
# if we have a contiguous array, then use the speedy driver kernel
if self.is_contiguous:
value = self.dtype.type(value)
if self.dtype.itemsize == 1:
drv.memset_d8( self.gpudata,
unpack_from('B', value)[0],
self.size)
elif self.dtype.itemsize == 2:
drv.memset_d16(self.gpudata,
unpack_from('H', value)[0],
self.size)
else:
drv.memset_d32(self.gpudata,
unpack_from('I', value)[0],
self.size)
# otherwise use our copy kerel
else:
OpTreeNode.build("assign", self, value)
elif isinstance(value, GPUTensor):
# TODO: add an is_binary_compat like function
if self.is_contiguous and value.is_contiguous and self.dtype == value.dtype:
drv.memcpy_dtod(self.gpudata, value.gpudata, self.nbytes)
else:
OpTreeNode.build("assign", self, value)
# collapse and execute an op tree as a kernel
elif isinstance(value, OpTreeNode):
OpTreeNode.build("assign", self, value)
# assign to numpy array (same as set())
elif isinstance(value, np.ndarray):
self.set(value)
else:
raise TypeError("Invalid type for assignment: %s" % type(value))
return self
def __setitem__(self, index, value):
self.__getitem__(index)._assign(value)
def fill(self, value):
return self._assign(value)
def copy(self, a):
return self._assign(a)
def copy_from(self, a):
""" alias of copy"""
return self.set(a)
def reshape(self, *shape):
"""
return a reshaped view
"""
if isinstance(shape[0], tuple) or isinstance(shape[0], list):
shape = tuple(shape[0])
if shape == self.shape:
return self
size = reduce(lambda x, y: x * y, shape, 1)
if size != self.size:
raise ValueError("total size of new array must be unchanged")
if not self.is_contiguous:
raise TypeError("reshaping of non-contigous "
"arrays is not yet supported")
return self.__class__(
shape = shape,
dtype = self.dtype,
allocator = self.allocator,
base = self,
gpudata = self.gpudata,
strides = _contiguous_strides(self.dtype.itemsize, shape),
name = self.name,
rounding = self.rounding)
def share(self, shape, dtype=None, name=None):
"""
return a view: ary, where ary.size <= self.size
Allows easy sharing of tempoary memory
"""
size = reduce(lambda x, y: x * y, shape, 1)
if size > self.size:
raise ValueError("total size of new array must <= size of parent")
if not self.is_contiguous:
raise TypeError("sharing of non-contigous "
"arrays is not yet supported")
if dtype is None:
dtype = self.dtype
else:
dtype = np.dtype(dtype)
return self.__class__(
shape = shape,
dtype = dtype,
allocator = self.allocator,
base = self,
gpudata = self.gpudata,
strides = _contiguous_strides(dtype.itemsize, shape),
name = name,
rounding = self.rounding)
@property
def T(self):
"""
return a transposed view
"""
return self.__class__(
shape = self.shape[::-1],
dtype = self.dtype,
allocator = self.allocator,
base = self,
gpudata = self.gpudata,
strides = self.strides[::-1],
is_trans = not self.is_trans,
name = self.name,
rounding = self.rounding)
def __add__ (self, other): return OpTreeNode.build("add", self, other)
def __sub__ (self, other): return OpTreeNode.build("sub", self, other)
def __mul__ (self, other): return OpTreeNode.build("mul", self, other)
def __div__ (self, other): return OpTreeNode.build("div", self, other)
def __truediv__ (self, other): return OpTreeNode.build("div", self, other)
def __pow__ (self, other): return OpTreeNode.build("pow", self, other)
def __radd__ (self, other): return OpTreeNode.build("add", other, self)
def __rsub__ (self, other): return OpTreeNode.build("sub", other, self)
def __rmul__ (self, other): return OpTreeNode.build("mul", other, self)
def __rdiv__ (self, other): return OpTreeNode.build("div", other, self)
def __rtruediv__ (self, other): return OpTreeNode.build("div", other, self)
def __rpow__ (self, other): return OpTreeNode.build("pow", other, self)
def __eq__ (self, other): return OpTreeNode.build("eq", self, other)
def __ne__ (self, other): return OpTreeNode.build("ne", self, other)
def __lt__ (self, other): return OpTreeNode.build("lt", self, other)
def __le__ (self, other): return OpTreeNode.build("le", self, other)
def __gt__ (self, other): return OpTreeNode.build("gt", self, other)
def __ge__ (self, other): return OpTreeNode.build("ge", self, other)
def __abs__ (self): return OpTreeNode.build("abs", self, None)
def __neg__ (self): return OpTreeNode.build("neg", self, None)
def __iadd__ (self, other): return OpTreeNode.build("add", self, other, out=self)
def __isub__ (self, other): return OpTreeNode.build("sub", self, other, out=self)
def __imul__ (self, other): return OpTreeNode.build("mul", self, other, out=self)
def __idiv__ (self, other): return OpTreeNode.build("div", self, other, out=self)
def __itruediv__ (self, other): return OpTreeNode.build("div", self, other, out=self)
def __ipow__ (self, other): return OpTreeNode.build("pow", self, other, out=self)
class NervanaGPU(object):
def __init__(self, stochastic_round=False, bench=False,
cubin_path=os.path.join("kernels", "cubin")):
self.round_mode = 1 if stochastic_round else 0
self.cubin_path = os.path.join(os.path.dirname(__file__), cubin_path)
self.bench = bench
def empty(self, shape, dtype=np.float16, name=None, allocator=drv.mem_alloc):
"""
allocate the space for a GPUTensor
"""
return GPUTensor(shape, dtype, allocator=allocator,
name=name, rounding=self.round_mode)
def array(self, ary, dtype=np.float16, name=None, allocator=drv.mem_alloc):
"""
converts a numpy array to a GPUTensor
"""
return GPUTensor(ary.shape, dtype, allocator=allocator,
name=name, rounding=self.round_mode).set(ary)
def zeros(self, shape, dtype=np.float16, name=None, allocator=drv.mem_alloc):
"""
Returns an array of the given shape and dtype filled with 0's.
"""
return GPUTensor(shape, dtype, allocator=allocator,
name=name, rounding=self.round_mode)._assign(0)
def ones(self, shape, dtype=np.float16, name=None, allocator=drv.mem_alloc):
"""
Returns an array of the given shape and dtype filled with 1's.
"""
return GPUTensor(shape, dtype, allocator,
name=name, rounding=self.round_mode)._assign(1)
def empty_like(self, other_ary, name=None):
"""
Returns an array with the same params as another
"""
return GPUTensor(other_ary.shape, other_ary.dtype, other_ary.allocator,
name=name, rounding=self.round_mode)
def conv_layer(self, dtype,
N, C, K,
D=1, H=1, W=1,
T=1, R=1, S=1,
pad_d=0, pad_h=0, pad_w=0,
str_d=1, str_h=1, str_w=1,
grid_P=0, grid_Q=0, update_size=None):
"""
Create a new ConvLayer parameter object.
This then is passed as an argument to all the convolution operations.
N: Number of images in mini-batch
C: Number of input feature maps
K: Number of output feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
T: Depth of filter kernel
R: Height of filter kernel
S: Width of filter kernel
padding: amount of zero-padding around the given edge
strides: factor to step the filters by in a given direction
grid_P, grid_Q: For the update operation define the size of the grid
to distribute the work accross SMs. The smaller the grid, the deeper the
MM and hence more accumulation is done in fp32. The bigger the grid,
the more the work can be evenly spanned accross the SMs, at the cost of
needing more fp16 accumuation operations and increased error.
Set to 1,1 for full fp32 accuracy
Set to P,Q for maximal distribution of work acrross SMs
Set to 0,0 for automactially calculated optimal balance (recommened).
Tweaking these params can have a large impact on performance as the
L2 cache utilization is greatly effected by them.
update_size: override kernel size selection for update.
"C64_K64" (fp16 only)
"C128_K64" (fp32 only)
"C128_K128" (both)
dtype: need to know dtype to setup proper kernels and params.
Maximum utilization is achieved when N, K and C*R*S*T is
a multiple of 64
"""
return ConvLayer(self, dtype, N, C, K, D, H, W, T, R, S,
pad_d, pad_h, pad_w, str_d, str_h, str_w, grid_P, grid_Q, update_size)
def fprop_conv(self, layer, I, F, O, alpha=1.0, relu=False, repeat=1):
assert layer.sizeI == I.size
assert layer.sizeF == F.size
assert layer.sizeO == O.size
return self._execute_conv(
layer, "fprop", layer.fprop_size,
layer.fprop_grid, layer.fprop_block, layer.kernel_args, layer.lut_size,
I, F, O, alpha, relu, False, repeat)
def bprop_conv(self, layer, F, E, grad_I, alpha=1.0, repeat=1):
assert layer.sizeF == F.size
assert layer.sizeO == E.size
assert layer.sizeI == grad_I.size
return self._execute_conv(
layer, "bprop", layer.bprop_size,
layer.bprop_grid, layer.bprop_block, layer.kernel_args, layer.lut_size,
F, E, grad_I, alpha, False, True, repeat)
def update_conv(self, layer, I, E, grad_F, alpha=1.0, repeat=1):
assert layer.sizeI == I.size
assert layer.sizeO == E.size
assert layer.sizeF == grad_F.size
return self._execute_conv(
layer, "updat", layer.updat_size,
layer.updat_grid, layer.updat_block, layer.update_args, 0,
I, E, grad_F, alpha, False, True, repeat)
def _execute_conv(self, layer, op, size, grid, block, args, shared, A, B, C, alpha, relu, zero, repeat):
assert B.dtype == C.dtype
clss = "hconv" if C.dtype.type is np.float16 else "sconv"
if A.dtype.type is np.uint8: op += '_u8'
elif A.dtype.type is np.int8: op += '_s8'
flags = 0
if C.rounding: flags |= 1
if relu: flags |= 2
kernel = _get_conv_kernel(self.cubin_path, clss, op, size)
params = [grid, block, _get_rand_state(),
C.gpudata, A.gpudata, B.gpudata,
alpha, flags ]
params.extend(args)
# Warmup
if repeat > 1:
for r in range(max(repeat // 10, 1)):
kernel.prepared_call(*params, shared_size=shared)
if self.bench or repeat > 1:
start, end = _get_events()
start.record()
for r in range(repeat):
if zero: C.fill(0.0)
kernel.prepared_call(*params, shared_size=shared)
if self.bench or repeat > 1:
end.record()
end.synchronize()
msecs = end.time_since(start) / repeat
gflops = layer.flops / (msecs * 1000000.0)
print "%7.3f msecs %8.3f gflops (%s: %s) size:%s grid:%s" \
"" % (msecs, gflops, op, layer, size, grid)
def pool_layer(self, dtype,
op, N, C,
D=1, H=1, W=1,
J=1, T=1, R=1, S=1,
pad_j=0, pad_d=0, pad_h=0, pad_w=0,
str_j=None, str_d=None, str_h=None, str_w=None):
"""
Create a new PoolLayer parameter object.
This then is passed as an argument to all pooling kernels.
op: max, avg, l2 pooling
N: Number of images in mini-batch
C: Number of input feature maps
D: Depth of input image
H: Height of input image
W: Width of input image
J: Size of feature map pooling window (maxout n_pieces)
T: Depth of pooling window
R: Height of pooling window
S: Width of pooling window
padding: amount of zero-padding around the given image or feature map edge
strides: factor to step the window by in a given direction (overlap allowed)
Leave spatial dimensions at 1 to allow feature map pooling in the fc layers.
"""
# default to non-overlapping
if str_j is None: str_j = J
if str_d is None: str_d = T
if str_h is None: str_h = R
if str_w is None: str_w = S
return PoolLayer(self, dtype, op, N, C, D, H, W, J, T, R, S,
pad_j, pad_d, pad_h, pad_w, str_j, str_d, str_h, str_w)
def fprop_pool(self, layer, I, O, repeat=1):
assert layer.sizeI == I.size
assert layer.sizeO == O.size
return self._execute_pool(layer, I, O, None, 0, repeat)
def bprop_pool(self, layer, I, E, grad_I, repeat=1):
assert layer.sizeI == I.size
assert layer.sizeO == E.size
assert layer.sizeI == grad_I.size
assert I.dtype == grad_I.dtype
return self._execute_pool(layer, I, E, grad_I, 1, repeat)
def _execute_pool(self, layer, I, O, B, mode, repeat):
assert I.dtype == O.dtype
clss = "hpool" if I.dtype.type is np.float16 else "spool"
b_data = 0 if B is None else B.gpudata
kernel = _get_pool_kernel(self.cubin_path, clss, layer.op)
params = [layer.grid, layer.block, I.gpudata, O.gpudata, b_data, mode]
params.extend(layer.kernel_args)
# Warmup
if repeat > 1:
for r in range(max(repeat // 10, 1)):
kernel.prepared_call(*params, shared_size=layer.lut_size)
if self.bench or repeat > 1:
start, end = _get_events()
start.record()
for r in range(repeat):
if mode: B.fill(0)
kernel.prepared_call(*params, shared_size=layer.lut_size)
if self.bench or repeat > 1:
end.record()
end.synchronize()
msecs = end.time_since(start) / repeat
print "%7.3f msecs (%s) grid:%s" % (msecs, layer, layer.grid)
def dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False, repeat=1, size=None):
"""
C = alpha * A * B + beta * C
C = alpha * A.T * B + beta * C
C = alpha * A * B.T + beta * C
relu: if true applied before output (and prior to beta addition)
size: one of 32, 64, 128. Sometimes the fastest tiling isn't chosen for you.
"""
assert A.dtype == B.dtype == C.dtype
itemsize = C.dtype.itemsize
# one dimention must be contiguous
assert min(A.strides) == itemsize
assert min(B.strides) == itemsize
assert min(C.strides) == itemsize
lda = max(A.strides) // itemsize
ldb = max(B.strides) // itemsize
ldc = max(C.strides) // itemsize
opA = 't' if A.is_trans else 'n'
opB = 't' if B.is_trans else 'n'
op = opA + opB
assert op != "tt"
m = A.shape[0]
n = B.shape[1]
k = A.shape[1]
assert m == C.shape[0]
assert n == C.shape[1]
assert k == B.shape[0]
gridA = m // 128 + (m % 128 != 0)
if op == "nt":
size = 128
# Some basic tile size selection.
# Your best bet is to benchmark your code with all 3 sizes
# and manually fine tune the selection for each layer.
if size is None:
if n < 384-16:
n128 = n % 128
if 0 < n128 < 112:
if 48 < n128 <= 64:
n64 = n // 64
n64 *= gridA // _get_sm_count()
# nn_64 is only faster than nn_32 when occupancy is
# more than 1 warp per scheduler.
if n64 > 1 or op == "tn":
size = 64
else:
size = 32
else:
size = 32
else:
size = 128
# There's a large regime where 64 is faster, but it's hard to characterize
else:
size = 128
gridB = n // size + (n % size != 0)
threads = 256 if size == 128 else 128
size = "128x%d" % size
# nt and nn are more efficient with k%16==0
if C.dtype.type is np.float16:
clss = "hgemm"
if op == "tn" and m % 8 == 0 and n % 8 == 0 or \
op == "nn" and k % 16 == 0 and n % 8 == 0 or \
op == "nt" and k % 16 == 0:
op += "_vec"
else:
clss = "sgemm"
if op == "tn" and m % 4 == 0 and n % 4 == 0 or \
op == "nn" and k % 8 == 0 and n % 4 == 0 or \
op == "nt" and k % 16 == 0:
op += "_vec"
flags = 0
if C.rounding: flags |= 1
if relu: flags |= 2
kernel = _get_gemm_kernel(self.cubin_path, clss, op, size)
params = [
(gridA,gridB,1), (threads,1,1), _get_rand_state(),
A.gpudata, B.gpudata, C.gpudata,
lda, ldb, ldc, m, n, k,
alpha, beta, flags ]
# Warmup
if repeat > 1:
for r in range(max(repeat // 10, 1)):
kernel.prepared_call(*params)
if self.bench or repeat > 1:
start, end = _get_events()
start.record()
for r in range(repeat):
kernel.prepared_call(*params)
if self.bench or repeat > 1:
end.record()
end.synchronize()
msecs = end.time_since(start) / repeat
gflops = (m * n * k * 2.0) / (msecs * 1000000.0)
print "%7.3f msecs %4.0f gflops (%s_%s: %d,%d,%d) size:%s grid:(%d,%d)" % \
(msecs,gflops,clss,op,m,n,k, size,gridA,gridB)
if repeat > 1:
return gflops
return C
def add (self, a, b, out=None): return OpTreeNode.build("add", a, b, out=out)
def subtract (self, a, b, out=None): return OpTreeNode.build("sub", a, b, out=out)
def multiply (self, a, b, out=None): return OpTreeNode.build("mul", a, b, out=out)
def divide (self, a, b, out=None): return OpTreeNode.build("div", a, b, out=out)
def true_divide (self, a, b, out=None): return OpTreeNode.build("div", a, b, out=out)
def power (self, a, b, out=None): return OpTreeNode.build("pow", a, b, out=out)
def reciprocal (self, a, out=None): return OpTreeNode.build("div", 1, a, out=out)
def negative (self, a, out=None): return OpTreeNode.build("neg", a, None, out=out)
def absolute (self, a, out=None): return OpTreeNode.build("abs", a, None, out=out)
def fabs (self, a, out=None): return OpTreeNode.build("abs", a, None, out=out)
def sqrt (self, a, out=None): return OpTreeNode.build("sqrt", a, None, out=out)
def square (self, a, out=None): return OpTreeNode.build("sqr", a, None, out=out)
def exp (self, a, out=None): return OpTreeNode.build("exp", a, None, out=out)
def exp2 (self, a, out=None): return OpTreeNode.build("exp2", a, None, out=out)
def log (self, a, out=None): return OpTreeNode.build("log", a, None, out=out)
def log2 (self, a, out=None): return OpTreeNode.build("log2", a, None, out=out)
def sig (self, a, out=None): return OpTreeNode.build("sig", a, None, out=out)
def sig2 (self, a, out=None): return OpTreeNode.build("sig2", a, None, out=out)
def tanh (self, a, out=None): return OpTreeNode.build("tanh", a, None, out=out)
def tanh2 (self, a, out=None): return OpTreeNode.build("tanh2",a, None, out=out)
def finite (self, a, out=None): return OpTreeNode.build("finite", a, None, out=out)
def equal (self, a, b, out=None): return OpTreeNode.build("eq", a, b, out=out)
def not_equal (self, a, b, out=None): return OpTreeNode.build("ne", a, b, out=out)
def less (self, a, b, out=None): return OpTreeNode.build("lt", a, b, out=out)
def less_equal (self, a, b, out=None): return OpTreeNode.build("le", a, b, out=out)
def greater (self, a, b, out=None): return OpTreeNode.build("gt", a, b, out=out)
def greater_equal (self, a, b, out=None): return OpTreeNode.build("ge", a, b, out=out)
def maximum(self, a, b, out=None): return OpTreeNode.build("maximum", a, b, out=out)
def minimum(self, a, b, out=None): return OpTreeNode.build("minimum", a, b, out=out)
def clip(self, a, a_min, a_max, out=None):
return self.minimum(self.maximum(a, a_min), a_max, out=out)
def sum(self, a, axis=None, partial=None, out=None, keepdims=True):
if axis is None:
assert partial is not None
return self.sum(self.sum(a, axis=1, out=partial), axis=0, out=out)
return OpTreeNode.build("sum", a, None, axis=axis, out=out)
def max(self, a, axis=None, partial=None, out=None, keepdims=True):
if axis is None:
assert partial is not None
return self.max(self.max(a, axis=1, out=partial), axis=0, out=out)
return OpTreeNode.build("max", a, None, axis=axis, out=out)
def min(self, a, axis=None, partial=None, out=None, keepdims=True):
if axis is None:
assert partial is not None
return self.min(self.min(a, axis=1, out=partial), axis=0, out=out)
return OpTreeNode.build("min", a, None, axis=axis, out=out)
def argmax(self, a, axis=1, out=None, keepdims=True):
return OpTreeNode.build("argmax", a, None, axis=axis, out=out)
def argmin(self, a, axis=1, out=None, keepdims=True):
return OpTreeNode.build("argmin", a, None, axis=axis, out=out)
def mean(self, a, axis=None, partial=None, out=None, keepdims=True):
shape = OpTreeNode.shape(a)
if axis is None:
assert partial is not None
return self.multiply(
self.sum(self.sum(a, axis=1, out=partial), axis=0),
1.0/(shape[0]*shape[1]),
out=out)
return self.multiply(self.sum(a, axis=axis), 1.0/shape[axis], out=out)
def var(self, a, axis=None, partial=None, out=None, keepdims=True):
if axis is None:
assert partial is not None
return self.mean(
self.square(a - self.mean(a, axis=axis, partial=partial, out=partial[0:1,0:1])),
axis=axis, partial=partial, out=out)
return self.mean(self.square(a - self.mean(a, axis=axis)), axis=axis, out=out)
def std(self, a, axis=None, partial=None, out=None, keepdims=True):
return self.sqrt(self.var(a, axis=axis, partial=partial, out=out))
def rand(self, out=None): return OpTreeNode.build("rand", None, None, out=out)
def dropout(self, keep=0.5, out=None):
return self.less_equal(self.rand(), keep, out=out)
# For constructing an op tree used in lazy evaluation
class OpTreeNode(tuple):
def __new__(cls, *args):
return tuple.__new__(cls, args)
@staticmethod
def build(op, a, b, out=None, **kwargs):
for arg in (a,b):
if not isinstance(arg, (int, float, GPUTensor, OpTreeNode, type(None))):
return NotImplemented
op_dict = { "op" : op }
op_dict.update(kwargs)
node = OpTreeNode(op_dict, a, b)
# execute explicit assignment
if op == "assign":
return node.execute()
# passing in an out value counts as assignment
if out is not None:
return OpTreeNode({ "op" : "assign" }, out, node).execute()
# delay execution until assignment
return node
def execute(self):
stack = self.traverse(list())
return call_compound_kernel(_get_rand_state(), *stack)
# post order walk op tree and produce postfix stack
def traverse(self, stack):
# Left
if type(self[1]) is OpTreeNode:
self[1].traverse(stack)
elif self[1] is not None:
stack.append(self[1])
# Right
if type(self[2]) is OpTreeNode:
self[2].traverse(stack)
elif self[2] is not None:
stack.append(self[2])
stack.append(self[0])
return stack
@staticmethod
def shape(node):
if type(node) is GPUTensor:
return node.shape
if type(node) is OpTreeNode:
max_shape = [1,1]
stack = node.traverse(list())
for item in stack:
if type(item) is GPUTensor:
for i in range(2):
max_shape[i] = max(max_shape[i], item.shape[i])
return tuple(max_shape)
#scalar
return (1,1)
def __add__ (self, other): return self.build("add", self, other)
def __sub__ (self, other): return self.build("sub", self, other)
def __mul__ (self, other): return self.build("mul", self, other)
def __div__ (self, other): return self.build("div", self, other)
def __truediv__ (self, other): return self.build("div", self, other)
def __pow__ (self, other): return self.build("pow", self, other)
def __radd__ (self, other): return self.build("add", other, self)
def __rsub__ (self, other): return self.build("sub", other, self)
def __rmul__ (self, other): return self.build("mul", other, self)
def __rdiv__ (self, other): return self.build("div", other, self)
def __rtruediv__ (self, other): return self.build("div", other, self)
def __rpow__ (self, other): return self.build("pow", other, self)
def __eq__ (self, other): return self.build("eq", self, other)
def __ne__ (self, other): return self.build("ne", self, other)
def __lt__ (self, other): return self.build("lt", self, other)
def __le__ (self, other): return self.build("le", self, other)
def __gt__ (self, other): return self.build("gt", self, other)
def __ge__ (self, other): return self.build("ge", self, other)
def __abs__ (self): return self.build("abs", self, None)
def __neg__ (self): return self.build("neg", self, None)
def _contiguous_strides(itemsize, shape):
if shape:
strides = [itemsize]
for s in shape[:0:-1]:
strides.append(strides[-1]*s)
return tuple(strides[::-1])
else:
return ()
@context_dependent_memoize
def _get_rand_state():
# initialize our common pool of randomness (1/4 MB):
# MAX_THREADS_PER_MULTIPROCESSOR * 32 SMs (32 to be somewhat future proof
# and power of two). This size is currently hardcoded in the kernels,
# to be parameterized ...
rand_init = np.random.random_integers(0,2**32-1,(2048*32,)).astype(np.uint32)
rand_state = drv.mem_alloc(rand_init.nbytes)
drv.memcpy_htod(rand_state, rand_init)
return rand_state
@context_dependent_memoize
def _get_events():
return (drv.Event(), drv.Event())
@context_dependent_memoize
def _get_module(path, clss, op, size=None):
size = "" if size is None else "_" + size
cubin = "{0}_{1}{2}.cubin".format(clss, op, size)
return drv.module_from_file(os.path.join(path, cubin))
@context_dependent_memoize
def _get_gemm_kernel(path, clss, op, size):
module = _get_module(path, clss, op, size)
kernel = "{0}_{1}_{2}".format(clss, op, size)
func = module.get_function(kernel)
func.prepare("PPPPIIIIIIffI")
#print "Loaded: ", kernel
return func
@context_dependent_memoize
def _get_conv_kernel(path, clss, op, size):
module = _get_module(path, clss, op, size)
kernel = "{0}_{1}_{2}".format(clss, op, size)
func = module.get_function(kernel)
func.prepare("PPPPfIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII")
#print "Loaded: ", kernel
return func
@context_dependent_memoize
def _get_pool_kernel(path, clss, op):
module = _get_module(path, clss, op)
kernel = "{0}_{1}".format(clss, op)
func = module.get_function(kernel)
func.prepare("PPPIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII")
#print "Loaded: ", kernel
return func
|
{
"content_hash": "9a6ec1bb644a183b162b25d859d47043",
"timestamp": "",
"source": "github",
"line_count": 1029,
"max_line_length": 108,
"avg_line_length": 36.97959183673469,
"alnum_prop": 0.5418112057184905,
"repo_name": "baidu-research/nervanagpu",
"id": "612b3a0239166b1881aefa630ff96f9c95ec4aab",
"size": "38657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nervanagpu/nervanagpu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4594"
},
{
"name": "CSS",
"bytes": "508747"
},
{
"name": "Cuda",
"bytes": "80249"
},
{
"name": "Makefile",
"bytes": "5462"
},
{
"name": "Python",
"bytes": "152492"
}
],
"symlink_target": ""
}
|
import sys, string, hashlib
from Crypto.PublicKey import RSA
from OP_RETURN import *
CN = raw_input("Which is the primary Host Name of your website (Common Name - CN)?") #Common Name
O = raw_input("Which is the Organizations name?") #Organization
O2 = O.replace(" ", "_") #Replace the spaces of the Organization with underscores
ans = raw_input("Do you have a key pair? [Y]es [N]o, default: [Y]")
if ans == "Y" or ans == "y" or ans == "" or ans == " ":
PK = raw_input("Which is your public key?") #Public Key of the owner
else:
#pip install pycryptodome
print "Public/Private key pair creation:" #RSA Public/Private key pair creation with PyCryptodome
print "Warning: This is a pseudo-random generation."
print "Warning: If you want complete randomness consider other ways of Public/Private key pair generation."
secret_code = raw_input("Give an unguessable passphrase: ")
key = RSA.generate(2048)
encrypted_key = key.exportKey(passphrase=secret_code, pkcs=8, protection="scryptAndAES128-CBC")
file_out = open("rsa_key.bin", "wb") #Save the keys to the rsa_key.bin
file_out.write(encrypted_key)
print key.publickey().exportKey()
PK = key.publickey().exportKey()
PK2 = PK.replace("-----BEGIN RSA PUBLIC KEY-----", "") #Remove header
PK3 = PK2.replace("-----END RSA PUBLIC KEY-----", "") #Remove footer
FS = CN + ";" + O2 + ";" + PK3 + ";" #Final String - concatenation
m = hashlib.sha256()
m.update(FS)
print "\nThe string for hashing is %s" %FS
print "\nYour Certificate is: ", m.hexdigest()
ans2 = raw_input("Do you want to send your certificate to the blockchain? [Y]es [N]o, default: [Y]")
if ans2 == "Y" or ans2 == "y" or ans2 == "" or ans2 == " ":
send_address = raw_input("Give your bitcoin address: ") # Transaction to the same bitcoin address
send_amount = 0.00009 # Minimum ammount of bitcoin transaction fee
metadata = m.hexdigest() # metadata equals the SHA256 hash that was previously denerated
metadata_from_hex=OP_RETURN_hex_to_bin(metadata)
if metadata_from_hex is not None:
metadata=metadata_from_hex
result=OP_RETURN_send(send_address, float(send_amount), metadata)
if 'error' in result:
print('Error: '+result['error'])
else:
print('TxID: '+result['txid']+'\nWait a few seconds then check on: http://coinsecrets.org/')
else:
sys.exit()
|
{
"content_hash": "485ad1f7546dce8477b1dc37e0834d94",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 111,
"avg_line_length": 47.38,
"alnum_prop": 0.6758125791473195,
"repo_name": "Cr0wTom/Block-SSL",
"id": "9fd75b8a1e8571beb1c5ef18a4b39d5a32304d3a",
"size": "2388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Legacy/Block_SSL_0.1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85592"
}
],
"symlink_target": ""
}
|
import numpy as np
import pdb
import math
from FoldResult import FoldResult
import Maybe
import errors
class CvResult(object):
'''Cross-validation result.
Created by running main program cv-cell.py
Contains results from n-fold cross validation.
'''
def __init__(self):
self.fold_results = {}
def save_FoldResult(self, fr):
self.fold_results[len(self.fold_results)] = fr
def __str__(self):
s = 'CvResult(%s folds)' % len(self.fold_results)
return s
def __getitem__(self, key):
return self.fold_results[key]
def _reduce_fold_errors(self,
summarize_fold_accuracy,
reduce_fold_summary_to_number):
'''Return Maybe(a number, summarizing accuracy across folds).
'''
# 1: summarize actuals and estimates from each fold by a number
fold = np.full(len(self.fold_results), np.nan)
for index, fr in self.fold_results.iteritems():
maybe_fold_errors = fr.maybe_errors()
if maybe_fold_errors.has_value:
# squash large value that will fail on error * error
errors = maybe_fold_errors.value
errors[np.abs(errors) > 1e100] = 1e100
fold[index] = summarize_fold_accuracy(errors)
# now fold[fold_index] is a number, possible NaN, summarizing fold
# estimation accuracy
# 2: reduce fold summaries to a number
if np.isnan(fold).all():
return Maybe.NoValue()
else:
known_fold_values = fold[~np.isnan(fold)] # drop NaNs
return Maybe.Maybe(reduce_fold_summary_to_number(known_fold_values))
def mean_of_root_mean_squared_errors(self):
# return Maybe(mean of root mean squared errors)
return self._reduce_fold_errors(errors.root_mean_squared_error,
errors.mean_error)
def median_of_root_median_squared_errors(self):
# return Mabye(median of root median squared errors)
return self._reduce_fold_errors(errors.root_median_squared_error,
errors.median_error)
def mean_of_fraction_wi10(self):
# return Maybe(mean of fraction of estimates within 10 percent)
# approach
# 1. Determine for each fold the fraction within 10 percent
# 2. Take the mean of these fractions
fold = np.full(len(self.fold_results), np.nan)
for index, fr in self.fold_results.iteritems():
abs_errors = np.abs(fr.actuals - fr.estimates)
abs_errors_is_less = (abs_errors / fr.actuals) < 0.10
fold[index] = sum(abs_errors_is_less * 1.0 / fr.actuals.size)
result = np.mean(fold)
return Maybe.Maybe(result)
if __name__ == '__main__':
import unittest
if False:
pdb.set_trace()
class Test(unittest.TestCase):
def setUp(self):
def make_fold_result(actuals, estimates):
fr = FoldResult()
fr.extend(np.array(actuals), np.array(estimates))
return fr
def make_cv_result(*fr_list):
cvresult = CvResult()
for fr in fr_list:
cvresult.save_FoldResult(fr)
return cvresult
fr1 = make_fold_result([1, 2, -3],
[10, 20, -30])
fr2 = make_fold_result([-100, 200],
[-10, 20])
fr3 = make_fold_result([1, np.nan],
[np.nan, 2])
fr4 = make_fold_result([0],
[1e200])
self.cv1 = make_cv_result(fr1, fr2, fr3)
self.cv2 = make_cv_result(fr3)
self.cv3 = make_cv_result(fr4)
def test_mean_of_root_mean_squared_errors_cv1(self):
fr1 = math.sqrt((9 * 9 + 18 * 18 + 27 * 27) / 3.0)
fr2 = math.sqrt((90 * 90 + 180 * 180) / 2.0)
expected = (fr1 + fr2) / 2.0
cv = self.cv1.mean_of_root_mean_squared_errors()
self.assertAlmostEqual(cv.value, expected)
pass
def test_mean_of_root_mean_squared_errors_cv2(self):
cv = self.cv2.mean_of_root_mean_squared_errors()
self.assertTrue(not cv.has_value)
pass
def test_mean_of_root_mean_squared_errors_cv3(self):
cv = self.cv3.mean_of_root_mean_squared_errors()
self.assertTrue(cv.has_value)
pass
def test_median_of_root_median_squared_errors_cv1(self):
f1 = math.sqrt(18 * 18)
f2 = math.sqrt(.5 * (90 * 90 + 180 * 180))
expected = .5 * (f1 + f2)
cv = self.cv1.median_of_root_median_squared_errors()
self.assertAlmostEqual(cv.value, expected)
def test_median_of_root_median_squared_errors_cv2(self):
cv = self.cv2.median_of_root_median_squared_errors()
self.assertTrue(not cv.has_value)
unittest.main()
|
{
"content_hash": "46ea37a73829357a995fd16793fed5c6",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 80,
"avg_line_length": 35.87323943661972,
"alnum_prop": 0.5520219866509619,
"repo_name": "rlowrance/re-local-linear",
"id": "fb91e610f485cf019e430df6e4aec7b53c77bf44",
"size": "5094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CvResult.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "11544"
},
{
"name": "Python",
"bytes": "312571"
},
{
"name": "R",
"bytes": "140007"
},
{
"name": "Shell",
"bytes": "226"
}
],
"symlink_target": ""
}
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import dateparser
STYLES_DICT = {
'black': '',
'green': 'primary',
'red': 'danger'
}
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def create_blocks(text: str, entitlement: str, options: list, reply: str) -> list:
value = json.dumps({
'entitlement': entitlement,
'reply': reply
})
blocks: list = [{
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': text
}
}]
elements = []
for option in options:
element = {
'type': 'button',
'text': {
'type': 'plain_text',
'emoji': True,
'text': option['text']
},
'value': value
}
if 'style' in option:
element['style'] = option['style']
elements.append(element)
if elements:
actions = {
'type': 'actions',
'elements': elements
}
blocks.append(actions)
return blocks
def main():
res = demisto.executeCommand('addEntitlement', {'persistent': demisto.get(demisto.args(), 'persistent'),
'replyEntriesTag': demisto.get(demisto.args(), 'replyEntriesTag')})
if isError(res[0]):
demisto.results(res)
sys.exit(0)
entitlement = demisto.get(res[0], 'Contents')
option1 = demisto.get(demisto.args(), 'option1')
option2 = demisto.get(demisto.args(), 'option2')
extra_options = argToList(demisto.args().get('additionalOptions', ''))
reply = demisto.get(demisto.args(), 'reply')
response_type = demisto.get(demisto.args(), 'responseType')
lifetime = demisto.get(demisto.args(), 'lifetime')
try:
parsed_date = dateparser.parse('in ' + lifetime, settings={'TIMEZONE': 'UTC'})
assert parsed_date is not None, f'could not parse in {lifetime}'
expiry = datetime.strftime(parsed_date,
DATE_FORMAT)
except Exception:
parsed_date = dateparser.parse('in 1 day', settings={'TIMEZONE': 'UTC'})
assert parsed_date is not None
expiry = datetime.strftime(parsed_date,
DATE_FORMAT)
default_response = demisto.get(demisto.args(), 'defaultResponse')
entitlement_string = entitlement + '@' + demisto.investigation()['id']
if demisto.get(demisto.args(), 'task'):
entitlement_string += '|' + demisto.get(demisto.args(), 'task')
args = {
'ignoreAddURL': 'true',
'using-brand': 'SlackV2'
}
user_options = [option1, option2]
options = []
if extra_options:
user_options += extra_options
if response_type == 'thread':
for option in user_options:
options.append(option.split('#')[0])
string_options = ' or '.join(list(map(lambda o: '`{}`'.format(o), options)))
message = '{} - Please reply to this thread with {}.'.format(demisto.args()['message'], string_options)
args['message'] = json.dumps({
'message': message,
'entitlement': entitlement_string,
'reply': reply,
'expiry': expiry,
'default_response': default_response
})
else:
for option in user_options:
option = option.split('#')
button = {
'text': option[0]
}
if len(option) > 1:
style = STYLES_DICT.get(option[1])
if style:
button['style'] = style
options.append(button)
blocks = json.dumps(create_blocks(demisto.args()['message'], entitlement_string, options, reply))
args['blocks'] = json.dumps({
'blocks': blocks,
'entitlement': entitlement_string,
'reply': reply,
'expiry': expiry,
'default_response': default_response
})
args['message'] = demisto.args()['message']
to = demisto.get(demisto.args(), 'user')
channel = demisto.get(demisto.args(), 'channel')
if to:
args['to'] = to
elif channel:
args['channel'] = channel
else:
return_error('Either a user or a channel must be provided.')
try:
demisto.results(demisto.executeCommand('send-notification', args))
except ValueError as e:
if 'Unsupported Command' in str(e):
return_error('The command is unsupported by any integration instance. If you have SlackV3 or above enabled, '
'please use SlackAskV2 instead.')
else:
return_error('An error has occurred while executing the send-notification command',
error=e)
if __name__ in ('__builtin__', 'builtins', '__main__'):
main()
|
{
"content_hash": "b269eb29115c9386ab06ce194c9cb2e6",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 121,
"avg_line_length": 33.895833333333336,
"alnum_prop": 0.5449702929727515,
"repo_name": "VirusTotal/content",
"id": "13a9e5e1330131387096df61b15532af20afda70",
"size": "4881",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/Slack/Scripts/SlackAsk/SlackAsk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
}
|
from debug_mode_logging import DebugModeLoggingMiddleware
|
{
"content_hash": "89d204e404f57e226c798a0aef847ce2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 57,
"avg_line_length": 58,
"alnum_prop": 0.896551724137931,
"repo_name": "prior/djextras",
"id": "819f2abf594edaf7d31d12e607626ec8201bd60b",
"size": "58",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djextras/middleware/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4804"
}
],
"symlink_target": ""
}
|
import os
HAIL_USE_FULL_QUERY = os.environ.get('HAIL_USE_FULL_QUERY') is not None
|
{
"content_hash": "11d049eab1aaaf66b40d1f3f9928d96a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 71,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.7349397590361446,
"repo_name": "danking/hail",
"id": "f1163e1a73f3628e8baad5cc4f0e2384a5e7adbe",
"size": "83",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "monitoring/monitoring/configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "289"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CSS",
"bytes": "29124"
},
{
"name": "Dockerfile",
"bytes": "13073"
},
{
"name": "Emacs Lisp",
"bytes": "252"
},
{
"name": "HTML",
"bytes": "151709"
},
{
"name": "Java",
"bytes": "32302"
},
{
"name": "JavaScript",
"bytes": "3309"
},
{
"name": "Jupyter Notebook",
"bytes": "162395"
},
{
"name": "Makefile",
"bytes": "73914"
},
{
"name": "Python",
"bytes": "4149266"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "9075"
},
{
"name": "Scala",
"bytes": "4426573"
},
{
"name": "Shell",
"bytes": "49103"
},
{
"name": "TeX",
"bytes": "7125"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
}
|
"""
Common fabric utilities used for different tasks such as manager setup, child
management, AMI creation and so on.
"""
from fabric.api import put, get, sudo, run
from cStringIO import StringIO
__all__ = ['use_vpc_sources', 'update_hosts']
def use_vpc_sources():
"""
Updates the APT sources for instances deployed as part of a VPC.
VPC instances cannot connect to other AWS instances running outside of the
cloud using private IP addressing anf as the URL of the internal AWS mirror
resolves to a private IP address, it can't be used.
"""
SOURCES = """
deb http://archive.ubuntu.com/ubuntu/ lucid main universe
deb-src http://archive.ubuntu.com/ubuntu/ lucid main universe
deb http://archive.ubuntu.com/ubuntu/ lucid-updates main universe
deb-src http://archive.ubuntu.com/ubuntu/ lucid-updates main universe
deb http://security.ubuntu.com/ubuntu lucid-security main universe
deb-src http://security.ubuntu.com/ubuntu lucid-security main universe
"""
put(StringIO(SOURCES), '/etc/apt/sources.list', use_sudo=True, mode=0644)
sudo('chown root:root /etc/apt/sources.list')
sudo('apt-get -y update')
def update_hosts():
"""
Updates the /etc/hosts file to include a loopback entry for the currently
defined hostname. This prevents the sudo command to print warnings about
unresolved hosts for the local machine.
"""
hosts = StringIO()
hostname = run('hostname')
get('/etc/hosts', hosts)
hosts = StringIO(hosts.getvalue().replace(
'127.0.0.1 localhost',
'127.0.0.1 localhost\n127.0.0.1 {0}'.format(hostname)
))
put(hosts, '/etc/hosts', use_sudo=True, mode=644)
sudo('chown root:root /etc/hosts')
|
{
"content_hash": "eeae1d28782a1d4135172273bf693718",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 32.79245283018868,
"alnum_prop": 0.6904487917146145,
"repo_name": "GaretJax/pop-utils",
"id": "faf3f516d306e33d3766f3cd6a385534d059a141",
"size": "1738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poputils/fabfiles/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "189513"
},
{
"name": "C++",
"bytes": "1389566"
},
{
"name": "Perl",
"bytes": "53030"
},
{
"name": "Python",
"bytes": "75382"
},
{
"name": "Shell",
"bytes": "246824"
}
],
"symlink_target": ""
}
|
"""Tests for `tf.data.Iterator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
class IteratorTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.graph_only_combinations())
def testNoGradients(self):
component = constant_op.constant([1.])
side = constant_op.constant(0.)
add = lambda x: x + side
dataset = dataset_ops.Dataset.from_tensor_slices(component).map(add)
value = dataset_ops.make_one_shot_iterator(dataset).get_next()
self.assertIsNone(gradients_impl.gradients(value, component)[0])
self.assertIsNone(gradients_impl.gradients(value, side)[0])
self.assertIsNone(gradients_impl.gradients(value, [component, side])[0])
@combinations.generate(test_base.graph_only_combinations())
def testCapturingStateInOneShotRaisesException(self):
var = variables.Variable(37.0, name="myvar")
dataset = (
dataset_ops.Dataset.from_tensor_slices([0.0, 1.0, 2.0])
.map(lambda x: x + var))
with self.assertRaisesRegex(
ValueError, r"`Dataset.make_one_shot_iterator\(\)` does not support "
"datasets that capture stateful objects.+myvar"):
dataset_ops.make_one_shot_iterator(dataset)
@combinations.generate(test_base.graph_only_combinations())
def testOneShotIterator(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(14))
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.graph_only_combinations())
def testOneShotIteratorCaptureByValue(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
tensor_components = tuple([ops.convert_to_tensor(c) for c in components])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(tensor_components)
.map(_map_fn).repeat(14))
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.cached_session() as sess:
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.default_test_combinations())
def testOneShotIteratorInsideContainer(self):
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
def within_container():
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn).repeat(14))
return iterator.get_next()
server = server_lib.Server.create_local_server()
# Create two iterators within unique containers, and run them to
# make sure that the resources aren't shared.
#
# The test below would fail if cname were the same across both
# sessions.
for j in range(2):
with session.Session(server.target) as sess:
cname = "iteration%d" % j
with ops.container(cname):
get_next = within_container()
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.graph_only_combinations())
def testOneShotIteratorNonBlocking(self):
dataset = dataset_ops.Dataset.from_tensors([1, 2, 3]).map(lambda x: x * x)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
# Create a session with a single thread to ensure that the
# one-shot iterator initializer does not deadlock.
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, use_per_session_threads=True)
with session.Session(config=config) as sess:
self.assertAllEqual([1, 4, 9], sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Test with multiple threads invoking the one-shot iterator concurrently.
with session.Session(config=config) as sess:
results = []
def consumer_thread():
try:
results.append(sess.run(next_element))
except errors.OutOfRangeError:
results.append(None)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertLen(results, num_threads)
self.assertLen([None for r in results if r is None], num_threads - 1)
self.assertAllEqual([[1, 4, 9]], [r for r in results if r is not None])
@combinations.generate(test_base.graph_only_combinations())
def testOneShotIteratorInitializerFails(self):
# Define a dataset whose initialization will always fail.
dataset = dataset_ops.Dataset.from_tensors(array_ops.gather([0], [4]))
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegex(errors.InvalidArgumentError, ""):
sess.run(next_element)
# Test that subsequent attempts to use the iterator also fail.
with self.assertRaisesRegex(errors.InvalidArgumentError, ""):
sess.run(next_element)
with self.cached_session() as sess:
def consumer_thread():
with self.assertRaisesRegex(errors.InvalidArgumentError, ""):
sess.run(next_element)
num_threads = 8
threads = [
self.checkedThread(consumer_thread) for _ in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
@combinations.generate(test_base.graph_only_combinations())
def testSimpleSharedResource(self):
components = (np.array(1, dtype=np.int64),
np.array([1, 2, 3], dtype=np.int64),
np.array(37.0, dtype=np.float64))
server = server_lib.Server.create_local_server()
# Create two non-overlapping sessions that share the same iterator
# resource on the same server, and verify that an action of the
# first session (initializing the iterator) is visible in the
# second session.
with ops.Graph().as_default():
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(
components).map(lambda x, y, z: (x, y, z)),
shared_name="shared_iterator")
init_op = iterator.initializer
get_next = iterator.get_next()
with session.Session(server.target) as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Re-initialize the iterator in the first session.
sess.run(init_op)
with ops.Graph().as_default():
# Re-define the iterator manually, without defining any of the
# functions in this graph, to ensure that we are not
# accidentally redefining functions with the same names in the
# new graph.
iterator = iterator_ops.Iterator.from_structure(
shared_name="shared_iterator",
output_types=(dtypes.int64, dtypes.int64, dtypes.float64),
output_shapes=([], [3], []))
get_next = iterator.get_next()
with session.Session(server.target) as sess:
# Use the iterator without re-initializing in the second session.
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.graph_only_combinations())
def testNotInitializedError(self):
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.from_tensors(components))
get_next = iterator.get_next()
with self.cached_session() as sess:
with self.assertRaisesRegex(errors.FailedPreconditionError,
"iterator has not been initialized"):
sess.run(get_next)
@combinations.generate(test_base.graph_only_combinations())
def testReinitializableIterator(self):
dataset_3 = dataset_ops.Dataset.from_tensors(
constant_op.constant([1, 2, 3]))
dataset_4 = dataset_ops.Dataset.from_tensors(
constant_op.constant([4, 5, 6, 7]))
iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(dataset_3), [None])
dataset_3_init_op = iterator.make_initializer(dataset_3)
dataset_4_init_op = iterator.make_initializer(dataset_4)
get_next = iterator.get_next()
self.assertEqual(
dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(
dataset_ops.get_legacy_output_types(dataset_4),
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(
[None], dataset_ops.get_legacy_output_shapes(iterator).as_list())
with self.cached_session() as sess:
# The iterator is initially uninitialized.
with self.assertRaises(errors.FailedPreconditionError):
sess.run(get_next)
# Initialize with one dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Initialize with a different dataset.
sess.run(dataset_4_init_op)
self.assertAllEqual([4, 5, 6, 7], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Reinitialize with the first dataset.
sess.run(dataset_3_init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.graph_only_combinations())
def testReinitializableIteratorWithFunctions(self):
def g():
for i in range(10):
yield i
iterator = iterator_ops.Iterator.from_structure(dtypes.int64, [])
next_element = iterator.get_next()
with self.cached_session() as sess:
dataset_1 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_1))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
dataset_2 = dataset_ops.Dataset.from_generator(
g, output_types=dtypes.int64)
sess.run(iterator.make_initializer(dataset_2))
for expected in range(10):
self.assertEqual(expected, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
@combinations.generate(test_base.default_test_combinations())
def testReinitializableIteratorStaticErrors(self):
# Non-matching structure for types and shapes.
with self.assertRaises(TypeError):
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), [None])
# Test validation of dataset argument.
iterator = iterator_ops.Iterator.from_structure((dtypes.int64,
dtypes.float64))
# Incompatible structure.
with self.assertRaises(ValueError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(((constant_op.constant(
[1, 2, 3], dtype=dtypes.int64),), (constant_op.constant(
[4., 5., 6., 7.], dtype=dtypes.float64),))))
# Incompatible types.
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int32),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float32))))
# Incompatible shapes.
iterator = iterator_ops.Iterator.from_structure(
(dtypes.int64, dtypes.float64), ([None], []))
with self.assertRaises(TypeError):
iterator.make_initializer(
dataset_ops.Dataset.from_tensors(
(constant_op.constant([1, 2, 3], dtype=dtypes.int64),
constant_op.constant([4., 5., 6., 7.], dtype=dtypes.float64))))
@combinations.generate(test_base.graph_only_combinations())
def testIteratorStringHandle(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4)
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
next_element = feedable_iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(dataset_3),
dataset_ops.get_structure(feedable_iterator)))
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
@combinations.generate(test_base.graph_only_combinations())
def testIteratorStringHandleFuture(self):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
dataset_4 = dataset_ops.Dataset.from_tensor_slices([10, 20, 30, 40])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_4 = dataset_ops.make_one_shot_iterator(dataset_4)
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_iterator = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
next_element = feedable_iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(dataset_3),
dataset_ops.get_structure(feedable_iterator)))
with self.cached_session() as sess:
iterator_3_handle = sess.run(iterator_3.string_handle())
iterator_4_handle = sess.run(iterator_4.string_handle())
self.assertEqual(
10,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
1,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
20,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
2,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
30,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
self.assertEqual(
3,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_3_handle}))
self.assertEqual(
40,
sess.run(
next_element,
feed_dict={handle_placeholder: iterator_4_handle}))
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_3_handle})
with self.assertRaises(errors.OutOfRangeError):
sess.run(
next_element, feed_dict={handle_placeholder: iterator_4_handle})
@combinations.generate(test_base.graph_only_combinations())
def testIteratorStringHandleReuseTensorObject(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
one_shot_iterator = dataset_ops.make_one_shot_iterator(dataset)
initializable_iterator = dataset_ops.make_initializable_iterator(dataset)
structure_iterator = iterator_ops.Iterator.from_structure(
dataset_ops.get_legacy_output_types(dataset))
created_ops = len(ops.get_default_graph().get_operations())
self.assertIs(one_shot_iterator.string_handle(),
one_shot_iterator.string_handle())
self.assertIs(initializable_iterator.string_handle(),
initializable_iterator.string_handle())
self.assertIs(structure_iterator.string_handle(),
structure_iterator.string_handle())
# Assert that getting the (default) string handle creates no ops.
self.assertEqual(created_ops, len(ops.get_default_graph().get_operations()))
# Specifying an explicit name will create a new op.
handle_with_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo", handle_with_name.op.name)
self.assertIsNot(one_shot_iterator.string_handle(), handle_with_name)
handle_with_same_name = one_shot_iterator.string_handle(name="foo")
self.assertEqual("foo_1", handle_with_same_name.op.name)
self.assertIsNot(handle_with_name, handle_with_same_name)
@combinations.generate(test_base.graph_only_combinations())
def testIteratorStringHandleError(self):
dataset_int_scalar = (
dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat())
dataset_float_vector = (dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0]))
handle_placeholder = array_ops.placeholder(dtypes.string, shape=[])
feedable_int_scalar = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [])
feedable_int_vector = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32, [None])
feedable_int_any = iterator_ops.Iterator.from_string_handle(
handle_placeholder, dtypes.int32)
with self.cached_session() as sess:
handle_int_scalar = sess.run(dataset_ops.make_one_shot_iterator(
dataset_int_scalar).string_handle())
handle_float_vector = sess.run(dataset_ops.make_one_shot_iterator(
dataset_float_vector).string_handle())
self.assertEqual(1,
sess.run(
feedable_int_scalar.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
self.assertEqual(2,
sess.run(
feedable_int_any.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_int_scalar}))
with self.assertRaises(errors.InvalidArgumentError):
print(sess.run(
feedable_int_vector.get_next(),
feed_dict={handle_placeholder: handle_float_vector}))
@combinations.generate(test_base.graph_only_combinations())
def testRemoteIteratorUsingRemoteCallOpDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 3
with ops.device("/job:localhost/replica:0/task:0/cpu:1"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
@function.Defun(dtypes.string)
def _remote_fn(h):
remote_iterator = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
remote_op = functional_ops.remote_call(
args=[iterator_3_handle],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.session(config=worker_config) as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [1])
# Fails when target is cpu:2 where the resource is not located.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:2"
})
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:1"
})
@combinations.generate(test_base.graph_only_combinations())
def testRemoteIteratorUsingRemoteCallOpMultiWorkers(self):
s1 = server_lib.Server.create_local_server()
s2 = server_lib.Server.create_local_server()
s3 = server_lib.Server.create_local_server()
cluster_def = cluster_pb2.ClusterDef()
workers = cluster_def.job.add()
workers.name = "worker"
workers.tasks[0] = s1.target[len("grpc://"):]
workers.tasks[1] = s2.target[len("grpc://"):]
client = cluster_def.job.add()
client.name = "client"
client.tasks[0] = s3.target[len("grpc://"):]
config = config_pb2.ConfigProto(cluster_def=cluster_def)
worker_devices = [
"/job:worker/replica:0/task:%d/cpu:0" % i for i in range(2)
]
itr_handles = []
for device in worker_devices:
with ops.device(device):
src = dataset_ops.Dataset.from_tensor_slices([device])
itr = dataset_ops.make_one_shot_iterator(src)
itr_handles.append(itr.string_handle())
targets = dataset_ops.Dataset.from_tensor_slices(worker_devices)
handles = dataset_ops.Dataset.from_tensor_slices(itr_handles)
@function.Defun(dtypes.string)
def loading_func(h):
remote_itr = iterator_ops.Iterator.from_string_handle(
h, dataset_ops.get_legacy_output_types(itr),
dataset_ops.get_legacy_output_shapes(itr))
return remote_itr.get_next()
def map_fn(target, handle):
return functional_ops.remote_call(
args=[handle], Tout=[dtypes.string], f=loading_func, target=target)
with ops.device("/job:client"):
client_dataset = dataset_ops.Dataset.zip((targets, handles)).map(map_fn)
itr = dataset_ops.make_initializable_iterator(client_dataset)
n = itr.get_next()
with session.Session(s3.target, config=config) as sess:
sess.run(itr.initializer)
expected_values = worker_devices
for expected in expected_values:
self.assertEqual((compat.as_bytes(expected),), sess.run(n))
with self.assertRaises(errors.OutOfRangeError):
sess.run(n)
@combinations.generate(test_base.graph_only_combinations())
def testRemoteIteratorUsingRemoteCallOpDirectSessionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
dataset_3 = dataset_ops.Dataset.from_tensor_slices([1, 2, 3])
iterator_3 = dataset_ops.make_one_shot_iterator(dataset_3)
iterator_3_handle = iterator_3.string_handle()
def _encode_raw(byte_array):
return bytes(bytearray(byte_array))
@function.Defun(dtypes.uint8)
def _remote_fn(h):
handle = script_ops.py_func(_encode_raw, [h], dtypes.string)
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, dataset_ops.get_legacy_output_types(dataset_3),
dataset_ops.get_legacy_output_shapes(dataset_3))
return remote_iterator.get_next()
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
target_placeholder = array_ops.placeholder(dtypes.string, shape=[])
iterator_3_handle_uint8 = parsing_ops.decode_raw(
input_bytes=iterator_3_handle, out_type=dtypes.uint8)
remote_op = functional_ops.remote_call(
args=[iterator_3_handle_uint8],
Tout=[dtypes.int32],
f=_remote_fn,
target=target_placeholder)
with self.cached_session() as sess:
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [1])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [2])
elem = sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
self.assertEqual(elem, [3])
with self.assertRaises(errors.OutOfRangeError):
sess.run(
remote_op,
feed_dict={
target_placeholder: "/job:localhost/replica:0/task:0/cpu:0"
})
@combinations.generate(test_base.graph_only_combinations())
def testRepeatedGetNextWarning(self):
iterator = dataset_ops.make_one_shot_iterator(dataset_ops.Dataset.range(10))
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
for _ in range(100):
iterator.get_next()
self.assertEqual(100 - iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD, len(w))
for warning in w:
self.assertIn(
iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE, str(warning.message))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
expected_element_structure=tensor_spec.TensorSpec([],
dtypes.float32),
expected_output_classes=ops.Tensor,
expected_output_types=dtypes.float32,
expected_output_shapes=[[]])))
def testTensorIteratorStructure(self, expected_element_structure,
expected_output_classes,
expected_output_types,
expected_output_shapes):
tf_value_fn = lambda: constant_op.constant(37.0)
tf_value = tf_value_fn()
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(tf_value))
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(iterator), expected_element_structure))
self.assertEqual(expected_output_classes,
dataset_ops.get_legacy_output_classes(iterator))
self.assertEqual(expected_output_types,
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(expected_output_shapes,
dataset_ops.get_legacy_output_shapes(iterator))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
expected_element_structure=sparse_tensor.SparseTensorSpec(
[1], dtypes.int32),
expected_output_classes=sparse_tensor.SparseTensor,
expected_output_types=dtypes.int32,
expected_output_shapes=[[1]])))
def testSparseTensorIteratorStructure(self, expected_element_structure,
expected_output_classes,
expected_output_types,
expected_output_shapes):
def tf_value_fn():
return sparse_tensor.SparseTensor(
indices=[[0]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1])
tf_value = tf_value_fn()
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(tf_value))
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(iterator), expected_element_structure))
self.assertEqual(expected_output_classes,
dataset_ops.get_legacy_output_classes(iterator))
self.assertEqual(expected_output_types,
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(expected_output_shapes,
dataset_ops.get_legacy_output_shapes(iterator))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
expected_element_structure={
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (tensor_spec.TensorSpec([1], dtypes.string),
tensor_spec.TensorSpec([], dtypes.string))
},
expected_output_classes={
"a": ops.Tensor,
"b": (ops.Tensor, ops.Tensor)
},
expected_output_types={
"a": dtypes.float32,
"b": (dtypes.string, dtypes.string)
},
expected_output_shapes={
"a": [],
"b": ([1], [])
})))
def testNestedTensorIteratorStructure(self, expected_element_structure,
expected_output_classes,
expected_output_types,
expected_output_shapes):
def tf_value_fn():
return {
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
}
tf_value = tf_value_fn()
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(tf_value))
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(iterator), expected_element_structure))
self.assertEqual(expected_output_classes,
dataset_ops.get_legacy_output_classes(iterator))
self.assertEqual(expected_output_types,
dataset_ops.get_legacy_output_types(iterator))
self.assertEqual(expected_output_shapes,
dataset_ops.get_legacy_output_shapes(iterator))
@combinations.generate(test_base.default_test_combinations())
def testIteratorGetNextName(self):
with ops.Graph().as_default():
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.from_tensors(37.0))
next_element = iterator.get_next(name="overridden_name")
self.assertEqual("overridden_name", next_element.op.name)
@combinations.generate(
combinations.combine(
tf_api_version=[1, 2],
mode="eager",
execution_mode=[context.ASYNC, context.SYNC]))
def testIteratorEagerIteration(self, execution_mode):
with context.eager_mode(), context.execution_mode(execution_mode):
val = 0
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
for foo in iterator:
self.assertEqual(val, foo.numpy())
val += 1
@combinations.generate(test_base.eager_only_combinations())
def testOwnedIteratorFunction(self):
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
@def_function.function
def fn():
dataset = dataset_ops.Dataset.range(10)
iterator = iter(dataset)
for _ in range(10):
queue.enqueue(next(iterator))
fn()
for i in range(10):
self.assertEqual(queue.dequeue().numpy(), i)
@combinations.generate(test_base.eager_only_combinations())
def testOwnedIteratorFunctionError(self):
# In this test we verify that a function that raises an error ends up
# properly deallocating the iterator resource.
queue = data_flow_ops.FIFOQueue(10, dtypes.int64)
queue.enqueue(0)
def init_fn(n):
return n
def next_fn(_):
ds = dataset_ops.Dataset.range(0)
return next(iter(ds))
def finalize_fn(n):
queue.enqueue(0)
return n
@def_function.function
def fn():
output_signature = tensor_spec.TensorSpec((), dtypes.int64)
dataset = dataset_ops._GeneratorDataset(1, init_fn, next_fn, finalize_fn,
output_signature)
iterator = iter(dataset)
next(iterator)
with self.assertRaises(errors.OutOfRangeError):
fn()
self.assertEqual(queue.size().numpy(), 2)
@combinations.generate(test_base.eager_only_combinations())
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(iterator):
trace_count[0] += 1
counter = np.int64(0)
for elem in iterator:
counter += elem
return counter
dataset = dataset_ops.Dataset.range(5)
dataset2 = dataset_ops.Dataset.range(10)
for _ in range(10):
self.assertEqual(self.evaluate(f(iter(dataset))), 10)
self.assertEqual(self.evaluate(f(iter(dataset2))), 45)
self.assertEqual(trace_count[0], 1)
@combinations.generate(test_base.eager_only_combinations())
def testNestedFunctionsIteratorResource(self):
@def_function.function
def sum_dataset(ds):
it = iter(ds)
@def_function.function
def next_element(it):
return next(it)
total = 0
for _ in range(10):
total += next_element(it)
return total
ds = dataset_ops.Dataset.range(10)
self.assertEqual(sum_dataset(ds).numpy(), 45)
self.assertEqual(sum_dataset(ds).numpy(), 45)
@combinations.generate(test_base.default_test_combinations())
def testNestedAutomaticControlDependencies(self):
counter_var = variables.Variable(0)
def map_fn(x):
counter_var.assign_add(1)
return x
def dataset_fn():
return dataset_ops.Dataset.range(10).map(map_fn)
@def_function.function
def fn():
it = iter(dataset_fn())
for _ in range(10):
_ = next(it)
return counter_var
self.evaluate(counter_var.initializer)
self.assertEqual(self.evaluate(fn()), 10)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "5f78c6a2f0f32e1c53d292d0adccfd89",
"timestamp": "",
"source": "github",
"line_count": 1009,
"max_line_length": 80,
"avg_line_length": 38.69970267591675,
"alnum_prop": 0.6361145257119443,
"repo_name": "aam-at/tensorflow",
"id": "a59c64d50b2469ee0d19aa79278aa8455b5a5856",
"size": "39737",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/kernel_tests/iterator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "16049"
},
{
"name": "C",
"bytes": "784149"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "69481042"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73667"
},
{
"name": "Go",
"bytes": "1670128"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "844222"
},
{
"name": "Jupyter Notebook",
"bytes": "1665601"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101287"
},
{
"name": "Objective-C",
"bytes": "104023"
},
{
"name": "Objective-C++",
"bytes": "182460"
},
{
"name": "PHP",
"bytes": "17733"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "49451363"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4697"
},
{
"name": "Shell",
"bytes": "495434"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
}
|
while True:
reply = input('Enter text: ')
if reply == 'stop': break
print(reply.upper())
|
{
"content_hash": "77cdfe1620e2f3e1a368542d912b8042",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 33,
"avg_line_length": 25.25,
"alnum_prop": 0.5841584158415841,
"repo_name": "eroicaleo/LearningPython",
"id": "7b22891cacfb2413cbc8e7927332f0a556d81c57",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch10/interact1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18342"
},
{
"name": "HTML",
"bytes": "95429"
},
{
"name": "Java",
"bytes": "5182"
},
{
"name": "JavaScript",
"bytes": "31062"
},
{
"name": "Jupyter Notebook",
"bytes": "439846"
},
{
"name": "Makefile",
"bytes": "39"
},
{
"name": "Python",
"bytes": "1489221"
},
{
"name": "TeX",
"bytes": "795"
}
],
"symlink_target": ""
}
|
"""
code that tests the SimpleGraph class defined in simple_graph.py
can be run with py.test
"""
import pytest # used for the exception testing
import unittest
from simple_graph import Node, Edge
class MyFuncTestCase(unittest.TestCase):
# g.nodes(): return a list of all nodes in the graph
# g.edges(): return a list of all edges in the graph
# g.add_node(n): adds a new node 'n' to the graph
# g.add_edge(n1, n2): adds a new edge to the graph connecting 'n1' and 'n2', if either n1 or n2 are not already present in the graph, they should be added.
# g.del_node(n): deletes the node 'n' from the graph, raises an error if no such node exists
# g.del_edge(n1, n2): deletes the edge connecting 'n1' and 'n2' from the graph, raises an error if no such edge exists
# g.has_node(n): True if node 'n' is contained in the graph, False if not.
# g.neighbors(n): returns the list of all nodes connected to 'n' by edges, raises an error if n is not in g
# g.adjacent(n1, n2): returns True if there is an edge connecting n1 and n2, False if not, raises an error if either of the supplied nodes are not in g
def test_Node___init__(self):
Node0 = Node(0)
self.assertEqual(Node0.val, 0)
self.assertIsNone(Node0.link)
def test_IterNode(self):
i1 = IterNode(None)
self.assertIsNone(i1.cur)
with self.assertRaises(StopIteration):
i1.next()
Node0 = Node(0)
i2 = IterNode(Node0)
self.assertIsNotNone(i2.cur)
Node1 = i2.next()
self.assertEqual(Node0, Node1)
with self.assertRaises(StopIteration):
i2.next()
def test_LinkedList___init__(self):
l1 = LinkedList()
self.assertIsNone(l1.head)
def test_LinkedList_insert(self):
l1 = LinkedList()
l1.insert(0)
# ensure Node(0) added to list
Node0 = l1.head
self.assertEqual(Node0.val, 0)
self.assertIsNone(Node0.link)
# ensure Node(1) added to front of list
l1.insert(1)
Node1 = l1.head
self.assertEqual(Node1.val, 1)
self.assertIsNotNone(Node1.link)
# ensure Node(0) is next in list
Node0 = Node1.link
self.assertEqual(Node0.val, 0)
self.assertIsNone(Node0.link)
# ensure adding 'None' adds something
sz1 = l1.size()
l1.insert(None)
self.assertEqual(sz1+1, l1.size())
# ensure adding recursively adds nothing
sz1 = l1.size()
l1.insert(l1)
self.assertEqual(sz1, l1.size())
def test_LinkedList_append(self):
l1 = LinkedList()
l1.append(0)
# ensure Node(0) added to list
Node0 = l1.head
self.assertEqual(Node0.val, 0)
self.assertIsNone(Node0.link)
# ensure Node(1) appended to back of list
l1.append(1)
Node0 = l1.head
self.assertEqual(Node0.val, 0)
self.assertIsNotNone(Node0.link)
# ensure Node(0) is next in list
Node1 = Node0.link
self.assertEqual(Node1.val, 1)
self.assertIsNone(Node1.link)
# ensure adding 'None' adds something
sz1 = l1.size()
l1.append(None)
self.assertEqual(sz1+1, l1.size())
# ensure adding recursively adds nothing
sz1 = l1.size()
l1.append(l1)
self.assertEqual(sz1, l1.size())
def test_LinkedList_pop(self):
l1 = LinkedList()
l1.insert(0)
self.assertEqual(l1.size(), 1)
l1.insert(1)
self.assertEqual(l1.size(), 2)
# ensure Node(0) added to list
Val1 = l1.pop()
self.assertEqual(l1.size(), 1)
self.assertEqual(Val1, 1)
Val0 = l1.pop()
self.assertEqual(l1.size(), 0)
self.assertEqual(Val0, 0)
# if the list is empty and you pop, raise ValueError
with self.assertRaises(ValueError):
ValNone = l1.pop()
def test_LinkedList_size(self):
l1 = LinkedList()
self.assertEqual(l1.size(), 0)
l1.insert(0)
self.assertEqual(l1.size(), 1)
l1.insert(1)
self.assertEqual(l1.size(), 2)
l1.insert(2)
self.assertEqual(l1.size(), 3)
l1.remove_val(2)
self.assertEqual(l1.size(), 2)
l1.remove_val(0)
self.assertEqual(l1.size(), 1)
l1.remove_val(1)
self.assertEqual(l1.size(), 0)
def test_LinkedList_search(self):
l1 = LinkedList()
l1.insert(0)
l1.insert(1)
l1.insert(2)
Node2 = l1.search(2)
self.assertEqual(Node2.val, 2)
Node0 = l1.search(0)
self.assertEqual(Node0.val, 0)
Node1 = l1.search(1)
self.assertEqual(Node1.val, 1)
def test_LinkedList_remove(self):
l1 = LinkedList()
l1.insert(0)
l1.insert(1)
l1.insert(2)
Node3 = Node(3)
with self.assertRaises(ValueError):
l1.remove(Node3)
l1.remove_val(3)
self.assertEqual(l1.size(), 3)
Node2 = l1.search(2)
l1.remove(Node2)
self.assertEqual(l1.size(), 2)
Node0 = l1.search(0)
l1.remove(Node0)
self.assertEqual(l1.size(), 1)
Node1 = l1.search(1)
l1.remove(Node1)
self.assertEqual(l1.size(), 0)
with self.assertRaises(ValueError):
l1.remove(Node1)
l1.remove(Node0)
l1.remove(Node2)
l1.remove_val(1)
l1.remove_val(0)
l1.remove_val(2)
def test_LinkedList___repr__(self):
l1 = LinkedList()
l1.insert(0)
l1.insert(1)
l1.insert(2)
self.assertEquals(l1.__repr__(), "(2, 1, 0)")
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "50f1a7218cf3b8d35ff1ceffb95e21f4",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 159,
"avg_line_length": 29.426395939086294,
"alnum_prop": 0.5778851129894773,
"repo_name": "CharlesGust/data_structures",
"id": "06c767a4bdcadf1de4c7510e6cbda63c1ce0e810",
"size": "5820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_graph/test_simple_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76735"
}
],
"symlink_target": ""
}
|
import os
import sys
from setuptools import setup, Command, find_packages
# Pull version from source without importing
# since we can't import something we haven't built yet :)
exec(open('kafka/version.py').read())
class Tox(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
@classmethod
def run(cls):
import tox
sys.exit(tox.cmdline([]))
test_require = ['tox', 'mock']
if sys.version_info < (2, 7):
test_require.append('unittest2')
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
README = f.read()
setup(
name="kafka-python",
version=__version__,
tests_require=test_require,
cmdclass={"test": Tox},
packages=find_packages(exclude=['test']),
author="Distsys Streaming",
author_email="distsys-streaming@yelp.com",
url="https://github.com/Yelp/kafka-python",
license="Apache License 2.0",
description="Pure Python client for Apache Kafka",
long_description=README,
keywords="apache kafka",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
|
{
"content_hash": "94c7a0e5552ea2f5979bc8ff6b29e54d",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 71,
"avg_line_length": 28.34375,
"alnum_prop": 0.6273428886438809,
"repo_name": "Yelp/kafka-python",
"id": "f611c3cfc241917935168b316c401eea741bdf84",
"size": "1814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1365"
},
{
"name": "Makefile",
"bytes": "2730"
},
{
"name": "Python",
"bytes": "1239943"
},
{
"name": "Shell",
"bytes": "3333"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fabric_package_management'
copyright = u'2015, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'fabric_package_managementdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'fabric_package_management.tex', u'fabric\\_package\\_management Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fabric_package_management', u'fabric_package_management Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'fabric_package_management', u'fabric_package_management Documentation',
u'Author', 'fabric_package_management', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'fabric_package_management'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2015, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'fabric_package_management'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
{
"content_hash": "b54d14afc57f9d2173cf6d882af16a16",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 92,
"avg_line_length": 31.4748427672956,
"alnum_prop": 0.7082625636926766,
"repo_name": "andrewsomething/fabric-package-management",
"id": "5481cb09903f03a8cb62bbfe434d0828fe15bcf6",
"size": "10447",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18351"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
import warnings
from ._compat import string_types, with_metaclass
from ._typing import TYPE_CHECKING
from .utils import canonicalize_version
from .version import Version, LegacyVersion, parse
if TYPE_CHECKING: # pragma: no cover
from typing import List, Dict, Union, Iterable, Iterator, Optional, Callable, Tuple
ParsedVersion = Union[Version, LegacyVersion]
UnparsedVersion = Union[Version, LegacyVersion, str]
CallableOperator = Callable[[ParsedVersion, str], bool]
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): # type: ignore
@abc.abstractmethod
def __str__(self):
# type: () -> str
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
# type: () -> int
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
# type: (object) -> bool
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
# type: (object) -> bool
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
# type: () -> Optional[bool]
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
# type: (bool) -> None
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
# type: (str, Optional[bool]) -> bool
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
# type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion]
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {} # type: Dict[str, str]
def __init__(self, spec="", prereleases=None):
# type: (str, Optional[bool]) -> None
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
) # type: Tuple[str, str]
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
# type: () -> str
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre)
def __str__(self):
# type: () -> str
return "{0}{1}".format(*self._spec)
@property
def _canonical_spec(self):
# type: () -> Tuple[str, Union[Version, str]]
return self._spec[0], canonicalize_version(self._spec[1])
def __hash__(self):
# type: () -> int
return hash(self._canonical_spec)
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, string_types):
try:
other = self.__class__(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._canonical_spec == other._canonical_spec
def __ne__(self, other):
# type: (object) -> bool
if isinstance(other, string_types):
try:
other = self.__class__(str(other))
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
# type: (str) -> CallableOperator
operator_callable = getattr(
self, "_compare_{0}".format(self._operators[op])
) # type: CallableOperator
return operator_callable
def _coerce_version(self, version):
# type: (UnparsedVersion) -> ParsedVersion
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
# type: () -> str
return self._spec[0]
@property
def version(self):
# type: () -> str
return self._spec[1]
@property
def prereleases(self):
# type: () -> Optional[bool]
return self._prereleases
@prereleases.setter
def prereleases(self, value):
# type: (bool) -> None
self._prereleases = value
def __contains__(self, item):
# type: (str) -> bool
return self.contains(item)
def contains(self, item, prereleases=None):
# type: (UnparsedVersion, Optional[bool]) -> bool
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
normalized_item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if normalized_item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
operator_callable = self._get_operator(self.operator) # type: CallableOperator
return operator_callable(normalized_item, self.version)
def filter(self, iterable, prereleases=None):
# type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion]
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if parsed_version.is_prerelease and not (
prereleases or self.prereleases
):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex_str = r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def __init__(self, spec="", prereleases=None):
# type: (str, Optional[bool]) -> None
super(LegacySpecifier, self).__init__(spec, prereleases)
warnings.warn(
"Creating a LegacyVersion has been deprecated and will be "
"removed in the next major release",
DeprecationWarning,
)
def _coerce_version(self, version):
# type: (Union[ParsedVersion, str]) -> LegacyVersion
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
# type: (LegacyVersion, str) -> bool
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
# type: (LegacyVersion, str) -> bool
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
# type: (LegacyVersion, str) -> bool
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
# type: (LegacyVersion, str) -> bool
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
# type: (LegacyVersion, str) -> bool
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
# type: (LegacyVersion, str) -> bool
return prospective > self._coerce_version(spec)
def _require_version_compare(
fn, # type: (Callable[[Specifier, ParsedVersion, str], bool])
):
# type: (...) -> Callable[[Specifier, ParsedVersion, str], bool]
@functools.wraps(fn)
def wrapped(self, prospective, spec):
# type: (Specifier, ParsedVersion, str) -> bool
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex_str = r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# type: (ParsedVersion, str) -> bool
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
)
@_require_version_compare
def _compare_equal(self, prospective, spec):
# type: (ParsedVersion, str) -> bool
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
split_spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
split_prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
shortened_prospective = split_prospective[: len(split_spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
padded_spec, padded_prospective = _pad_version(
split_spec, shortened_prospective
)
return padded_prospective == padded_spec
else:
# Convert our spec string into a Version
spec_version = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec_version.local:
prospective = Version(prospective.public)
return prospective == spec_version
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
# type: (ParsedVersion, str) -> bool
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
# type: (ParsedVersion, str) -> bool
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
# type: (ParsedVersion, str) -> bool
# NB: Local version identifiers are NOT permitted in the version
# specifier, so local version labels can be universally removed from
# the prospective version.
return Version(prospective.public) >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec_str):
# type: (ParsedVersion, str) -> bool
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec_str):
# type: (ParsedVersion, str) -> bool
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec_str)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is technically greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
# type: (Version, str) -> bool
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# type: () -> bool
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
# type: (bool) -> None
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
# type: (str) -> List[str]
result = [] # type: List[str]
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
# type: (List[str], List[str]) -> Tuple[List[str], List[str]]
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# type: (str, Optional[bool]) -> None
# Split on , to break each individual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in split_specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
# type: () -> str
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
# type: () -> str
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
# type: () -> int
return hash(self._specs)
def __and__(self, other):
# type: (Union[SpecifierSet, str]) -> SpecifierSet
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
# type: (object) -> bool
if isinstance(other, (string_types, _IndividualSpecifier)):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
# type: (object) -> bool
if isinstance(other, (string_types, _IndividualSpecifier)):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
# type: () -> int
return len(self._specs)
def __iter__(self):
# type: () -> Iterator[_IndividualSpecifier]
return iter(self._specs)
@property
def prereleases(self):
# type: () -> Optional[bool]
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
# type: (bool) -> None
self._prereleases = value
def __contains__(self, item):
# type: (Union[ParsedVersion, str]) -> bool
return self.contains(item)
def contains(self, item, prereleases=None):
# type: (Union[ParsedVersion, str], Optional[bool]) -> bool
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
def filter(
self,
iterable, # type: Iterable[Union[ParsedVersion, str]]
prereleases=None, # type: Optional[bool]
):
# type: (...) -> Iterable[Union[ParsedVersion, str]]
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = [] # type: List[Union[ParsedVersion, str]]
found_prereleases = [] # type: List[Union[ParsedVersion, str]]
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
|
{
"content_hash": "f9631db96d39e67ebdcd7aa9145a92f3",
"timestamp": "",
"source": "github",
"line_count": 861,
"max_line_length": 88,
"avg_line_length": 37.198606271777,
"alnum_prop": 0.5650680654427376,
"repo_name": "kimjinyong/i2nsf-framework",
"id": "a42cbfef3325fc2425c765ddf12c296053d53f48",
"size": "32208",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "Hackathon-112/analyzer/.local/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4396520"
},
{
"name": "C++",
"bytes": "9389"
},
{
"name": "CSS",
"bytes": "51736"
},
{
"name": "Dockerfile",
"bytes": "3839"
},
{
"name": "Emacs Lisp",
"bytes": "24812"
},
{
"name": "Erlang",
"bytes": "1364078"
},
{
"name": "HTML",
"bytes": "42486541"
},
{
"name": "Hack",
"bytes": "6349"
},
{
"name": "Java",
"bytes": "7976"
},
{
"name": "JavaScript",
"bytes": "533000"
},
{
"name": "Makefile",
"bytes": "401170"
},
{
"name": "PHP",
"bytes": "164007"
},
{
"name": "Perl",
"bytes": "2188"
},
{
"name": "Python",
"bytes": "3004949"
},
{
"name": "QMake",
"bytes": "360"
},
{
"name": "Roff",
"bytes": "3906372"
},
{
"name": "Shell",
"bytes": "83872"
},
{
"name": "XSLT",
"bytes": "167018"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.apps import AppConfig
class OccurrenceConfig(AppConfig):
name = 'occurrence'
|
{
"content_hash": "38f2370063dd13cd34101c9064d2a613",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 39,
"avg_line_length": 19.428571428571427,
"alnum_prop": 0.7647058823529411,
"repo_name": "parksandwildlife/wastd",
"id": "3037b7ce35fe55c47949ffd9150b133015630b93",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "occurrence/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9150"
},
{
"name": "HTML",
"bytes": "60851"
},
{
"name": "JavaScript",
"bytes": "18966"
},
{
"name": "Python",
"bytes": "853568"
},
{
"name": "Shell",
"bytes": "4200"
},
{
"name": "TeX",
"bytes": "16951"
}
],
"symlink_target": ""
}
|
"""This module contains an object that represents a Encrypted PassportFile."""
from typing import TYPE_CHECKING, List, Optional
from telegram._telegramobject import TelegramObject
from telegram._utils.defaultvalue import DEFAULT_NONE
from telegram._utils.types import JSONDict, ODVInput
if TYPE_CHECKING:
from telegram import Bot, File, FileCredentials
class PassportFile(TelegramObject):
"""
This object represents a file uploaded to Telegram Passport. Currently all Telegram Passport
files are in JPEG format when decrypted and don't exceed 10MB.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`file_unique_id` is equal.
Args:
file_id (:obj:`str`): Identifier for this file, which can be used to download
or reuse the file.
file_unique_id (:obj:`str`): Unique identifier for this file, which
is supposed to be the same over time and for different bots.
Can't be used to download or reuse the file.
file_size (:obj:`int`): File size in bytes.
file_date (:obj:`int`): Unix time when the file was uploaded.
Attributes:
file_id (:obj:`str`): Identifier for this file.
file_unique_id (:obj:`str`): Unique identifier for this file, which
is supposed to be the same over time and for different bots.
Can't be used to download or reuse the file.
file_size (:obj:`int`): File size in bytes.
file_date (:obj:`int`): Unix time when the file was uploaded.
"""
__slots__ = (
"file_date",
"file_id",
"file_size",
"_credentials",
"file_unique_id",
)
def __init__(
self,
file_id: str,
file_unique_id: str,
file_date: int,
file_size: int,
credentials: "FileCredentials" = None,
*,
api_kwargs: JSONDict = None,
):
super().__init__(api_kwargs=api_kwargs)
# Required
self.file_id = file_id
self.file_unique_id = file_unique_id
self.file_size = file_size
self.file_date = file_date
# Optionals
self._credentials = credentials
self._id_attrs = (self.file_unique_id,)
@classmethod
def de_json_decrypted(
cls, data: Optional[JSONDict], bot: "Bot", credentials: "FileCredentials"
) -> Optional["PassportFile"]:
"""Variant of :meth:`telegram.TelegramObject.de_json` that also takes into account
passport credentials.
Args:
data (Dict[:obj:`str`, ...]): The JSON data.
bot (:class:`telegram.Bot`): The bot associated with this object.
credentials (:class:`telegram.FileCredentials`): The credentials
Returns:
:class:`telegram.PassportFile`:
"""
data = cls._parse_data(data)
if not data:
return None
data["credentials"] = credentials
return super().de_json(data=data, bot=bot)
@classmethod
def de_list_decrypted(
cls, data: Optional[List[JSONDict]], bot: "Bot", credentials: List["FileCredentials"]
) -> List[Optional["PassportFile"]]:
"""Variant of :meth:`telegram.TelegramObject.de_list` that also takes into account
passport credentials.
Args:
data (Dict[:obj:`str`, ...]): The JSON data.
bot (:class:`telegram.Bot`): The bot associated with these objects.
credentials (:class:`telegram.FileCredentials`): The credentials
Returns:
List[:class:`telegram.PassportFile`]:
"""
if not data:
return []
return [
cls.de_json_decrypted(passport_file, bot, credentials[i])
for i, passport_file in enumerate(data)
]
async def get_file(
self,
*,
read_timeout: ODVInput[float] = DEFAULT_NONE,
write_timeout: ODVInput[float] = DEFAULT_NONE,
connect_timeout: ODVInput[float] = DEFAULT_NONE,
pool_timeout: ODVInput[float] = DEFAULT_NONE,
api_kwargs: JSONDict = None,
) -> "File":
"""
Wrapper over :attr:`telegram.Bot.get_file`. Will automatically assign the correct
credentials to the returned :class:`telegram.File` if originating from
:obj:`telegram.PassportData.decrypted_data`.
For the documentation of the arguments, please see :meth:`telegram.Bot.get_file`.
Returns:
:class:`telegram.File`
Raises:
:class:`telegram.error.TelegramError`
"""
file = await self.get_bot().get_file(
file_id=self.file_id,
read_timeout=read_timeout,
write_timeout=write_timeout,
connect_timeout=connect_timeout,
pool_timeout=pool_timeout,
api_kwargs=api_kwargs,
)
file.set_credentials(self._credentials)
return file
|
{
"content_hash": "3e69ab696833f86ea14263a74eed9d23",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 96,
"avg_line_length": 32.64705882352941,
"alnum_prop": 0.6048048048048048,
"repo_name": "tzpBingo/github-trending",
"id": "4158648367074576a34eceb5e94cab488254f633",
"size": "5804",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "codespace/python/telegram/_passport/passportfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
}
|
import json
import unittest
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import group_report
from dashboard import short_uri
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import bug_data
from dashboard.models import page_state
from dashboard.models import sheriff
class GroupReportTest(testing_common.TestCase):
def setUp(self):
super(GroupReportTest, self).setUp()
app = webapp2.WSGIApplication(
[('/group_report', group_report.GroupReportHandler)])
self.testapp = webtest.TestApp(app)
def _AddAnomalyEntities(
self, revision_ranges, test_key, sheriff_key, bug_id=None):
"""Adds a group of Anomaly entities to the datastore."""
urlsafe_keys = []
for start_rev, end_rev in revision_ranges:
anomaly_key = anomaly.Anomaly(
start_revision=start_rev, end_revision=end_rev,
test=test_key, bug_id=bug_id, sheriff=sheriff_key,
median_before_anomaly=100, median_after_anomaly=200).put()
urlsafe_keys.append(anomaly_key.urlsafe())
return urlsafe_keys
def _AddTests(self):
"""Adds sample TestMetadata entities and returns their keys."""
testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {
'scrolling-benchmark': {
'first_paint': {},
'mean_frame_time': {},
}
})
keys = [
utils.TestKey(
'ChromiumGPU/linux-release/scrolling-benchmark/first_paint'),
utils.TestKey(
'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time'),
]
# By default, all TestMetadata entities have an improvement_direction of
# UNKNOWN, meaning that neither direction is considered an improvement.
# Here we set the improvement direction so that some anomalies are
# considered improvements.
for test_key in keys:
test = test_key.get()
test.improvement_direction = anomaly.DOWN
test.put()
return keys
def _AddSheriff(self):
"""Adds a Sheriff entity and returns the key."""
return sheriff.Sheriff(
id='Chromium Perf Sheriff', email='sullivan@google.com').put()
def testGet(self):
response = self.testapp.get('/group_report')
self.assertEqual('text/html', response.content_type)
self.assertIn('Chrome Performance Dashboard', response.body)
def testPost_WithAnomalyKeys_ShowsSelectedAndOverlapping(self):
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
selected_ranges = [(400, 900), (200, 700)]
overlapping_ranges = [(300, 500), (500, 600), (600, 800)]
non_overlapping_ranges = [(100, 200)]
selected_keys = self._AddAnomalyEntities(
selected_ranges, test_keys[0], sheriff_key)
self._AddAnomalyEntities(
overlapping_ranges, test_keys[0], sheriff_key)
self._AddAnomalyEntities(
non_overlapping_ranges, test_keys[0], sheriff_key)
response = self.testapp.post(
'/group_report?keys=%s' % ','.join(selected_keys))
alert_list = self.GetJsonValue(response, 'alert_list')
# Confirm the first N keys are the selected keys.
first_keys = [alert_list[i]['key'] for i in xrange(len(selected_keys))]
self.assertSetEqual(set(selected_keys), set(first_keys))
# Expect selected alerts + overlapping alerts,
# but not the non-overlapping alert.
self.assertEqual(5, len(alert_list))
def testPost_WithInvalidSidParameter_ShowsError(self):
response = self.testapp.post('/group_report?sid=foobar')
error = self.GetJsonValue(response, 'error')
self.assertIn('No anomalies specified', error)
def testPost_WithValidSidParameter(self):
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
selected_ranges = [(400, 900), (200, 700)]
selected_keys = self._AddAnomalyEntities(
selected_ranges, test_keys[0], sheriff_key)
json_keys = json.dumps(selected_keys)
state_id = short_uri.GenerateHash(','.join(selected_keys))
page_state.PageState(id=state_id, value=json_keys).put()
response = self.testapp.post('/group_report?sid=%s' % state_id)
alert_list = self.GetJsonValue(response, 'alert_list')
# Confirm the first N keys are the selected keys.
first_keys = [alert_list[i]['key'] for i in xrange(len(selected_keys))]
self.assertSetEqual(set(selected_keys), set(first_keys))
self.assertEqual(2, len(alert_list))
def testPost_WithKeyOfNonExistentAlert_ShowsError(self):
key = ndb.Key('Anomaly', 123)
response = self.testapp.post('/group_report?keys=%s' % key.urlsafe())
error = self.GetJsonValue(response, 'error')
self.assertEqual('No Anomaly found for key %s.' % key.urlsafe(), error)
def testPost_WithInvalidKeyParameter_ShowsError(self):
response = self.testapp.post('/group_report?keys=foobar')
error = self.GetJsonValue(response, 'error')
self.assertIn('Invalid Anomaly key', error)
def testPost_WithRevParameter(self):
# If the rev parameter is given, then all alerts whose revision range
# includes the given revision should be included.
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
self._AddAnomalyEntities(
[(190, 210), (200, 300), (100, 200), (400, 500)],
test_keys[0], sheriff_key)
response = self.testapp.post('/group_report?rev=200')
alert_list = self.GetJsonValue(response, 'alert_list')
self.assertEqual(3, len(alert_list))
def testPost_WithInvalidRevParameter_ShowsError(self):
response = self.testapp.post('/group_report?rev=foo')
error = self.GetJsonValue(response, 'error')
self.assertEqual('Invalid rev "foo".', error)
def testPost_WithBugIdParameter(self):
sheriff_key = self._AddSheriff()
test_keys = self._AddTests()
bug_data.Bug(id=123).put()
self._AddAnomalyEntities(
[(200, 300), (100, 200), (400, 500)],
test_keys[0], sheriff_key, bug_id=123)
self._AddAnomalyEntities(
[(150, 250)], test_keys[0], sheriff_key)
response = self.testapp.post('/group_report?bug_id=123')
alert_list = self.GetJsonValue(response, 'alert_list')
self.assertEqual(3, len(alert_list))
def testPost_WithInvalidBugIdParameter_ShowsError(self):
response = self.testapp.post('/group_report?bug_id=foo')
alert_list = self.GetJsonValue(response, 'alert_list')
self.assertIsNone(alert_list)
error = self.GetJsonValue(response, 'error')
self.assertEqual('Invalid bug ID "foo".', error)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "823b758116c77f9bc980baff93b3cd3b",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 77,
"avg_line_length": 38.23837209302326,
"alnum_prop": 0.6842025239470884,
"repo_name": "catapult-project/catapult-csm",
"id": "4aa48091fb0d1e9d97aad90fef5c4aa1cf722d1f",
"size": "6740",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dashboard/dashboard/group_report_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
}
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class lsp_generation(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/timers/lsp-generation. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS LSP Generation.
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "lsp-generation"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"timers",
"lsp-generation",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/lsp_generation/config (container)
YANG Description: This container defines ISIS LSP Generation timers
configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/lsp_generation/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS LSP Generation timers
configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/lsp_generation/state (container)
YANG Description: This container defines state information for ISIS LSP Generation
timers.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/lsp_generation/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state information for ISIS LSP Generation
timers.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class lsp_generation(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/timers/lsp-generation. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS LSP Generation.
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "lsp-generation"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"timers",
"lsp-generation",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/lsp_generation/config (container)
YANG Description: This container defines ISIS LSP Generation timers
configuration.
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/lsp_generation/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: This container defines ISIS LSP Generation timers
configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/lsp_generation/state (container)
YANG Description: This container defines state information for ISIS LSP Generation
timers.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/timers/lsp_generation/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: This container defines state information for ISIS LSP Generation
timers.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
{
"content_hash": "549b632b821ae0272082140cd9cdd2d0",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 377,
"avg_line_length": 38.38075313807531,
"alnum_prop": 0.5805080126458083,
"repo_name": "napalm-automation/napalm-yang",
"id": "d4cc4b0651cf5f5fd4a4fae5d5b49347444b30a6",
"size": "18370",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/timers/lsp_generation/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
}
|
import json
import datetime
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader, Context
from django.contrib.sites.models import Site
from django.contrib.auth.decorators import login_required
from pycon.tutorials.models import PyConTutorialProposal
from pycon.tutorials.utils import process_tutorial_request
from symposion.schedule.forms import SlotEditForm
from symposion.schedule.models import Schedule, Day, Slot, Presentation
from symposion.schedule.timetable import TimeTable
def fetch_schedule(slug):
qs = Schedule.objects.all()
if slug is None:
if qs.count() > 1:
raise Http404()
schedule = next(iter(qs), None)
if schedule is None:
raise Http404()
else:
schedule = get_object_or_404(qs, section__slug=slug)
return schedule
def schedule_conference(request):
days = Day.objects.filter(schedule__published=True)
days = days.select_related('schedule')
days = days.prefetch_related('schedule__section')
days = days.order_by('date')
timetables = [TimeTable(day) for day in days]
return render(request, "schedule/schedule_conference.html", {
"timetables": timetables,
})
def schedule_detail(request, slug=None):
schedule = fetch_schedule(slug)
if not schedule.published and not request.user.is_staff:
raise Http404()
days = Day.objects.filter(schedule=schedule)
days = days.select_related('schedule')
days = days.prefetch_related('schedule__section')
days = days.order_by('date')
timetables = [TimeTable(day) for day in days]
return render(request, "schedule/schedule_detail.html", {
"schedule": schedule,
"timetables": timetables,
})
def schedule_list(request, slug=None):
schedule = fetch_schedule(slug)
presentations = Presentation.objects.filter(section=schedule.section)
presentations = presentations.exclude(cancelled=True).order_by('title')
ctx = {
"schedule": schedule,
"presentations": presentations,
}
return render(request, "schedule/schedule_list.html", ctx)
def schedule_list_csv(request, slug=None):
schedule = fetch_schedule(slug)
presentations = Presentation.objects.filter(section=schedule.section)
presentations = presentations.exclude(cancelled=True).order_by("id")
response = HttpResponse(mimetype="text/csv")
if slug:
file_slug = slug
else:
file_slug = "presentations"
response["Content-Disposition"] = 'attachment; filename="%s.csv"' % file_slug
response.write(loader.get_template("schedule/schedule_list.csv").render(Context({
"presentations": presentations,
})))
return response
@login_required
def schedule_edit(request, slug=None):
if not request.user.is_staff:
raise Http404()
schedule = fetch_schedule(slug)
days = Day.objects.filter(schedule=schedule)
days = days.select_related('schedule')
days = days.prefetch_related('schedule__section')
days = days.order_by('date')
timetables = [TimeTable(day) for day in days]
return render(request, "schedule/schedule_edit.html", {
"schedule": schedule,
"timetables": timetables,
})
@login_required
def schedule_slot_edit(request, slug, slot_pk):
if not request.user.is_staff:
raise Http404()
slot = get_object_or_404(Slot, day__schedule__section__slug=slug, pk=slot_pk)
if request.method == "POST":
form = SlotEditForm(request.POST, slot=slot)
if form.is_valid():
save = False
if "content_override" in form.cleaned_data:
slot.content_override = form.cleaned_data["content_override"]
save = True
if "presentation" in form.cleaned_data:
presentation = form.cleaned_data["presentation"]
if presentation is None:
slot.unassign()
else:
slot.assign(presentation)
if save:
slot.save()
return redirect("schedule_edit", slug)
else:
form = SlotEditForm(slot=slot)
ctx = {
"slug": slug,
"form": form,
"slot": slot,
}
return render(request, "schedule/_slot_edit.html", ctx)
def schedule_presentation_detail(request, pk):
presentation = get_object_or_404(Presentation, pk=pk)
# Tutorials allow for communication between instructor/attendee(s).
# Offload the logic to its utility
if isinstance(presentation.proposal, PyConTutorialProposal) and \
request.method == 'POST':
return process_tutorial_request(request, presentation)
if presentation.slot:
schedule = presentation.slot.day.schedule
else:
schedule = None
ctx = {
"presentation": presentation,
"proposal": presentation.proposal,
"speakers": presentation.proposal.speakers,
"schedule": schedule,
}
return render(request, "schedule/presentation_detail.html", ctx)
def json_serializer(obj):
if isinstance(obj, datetime.time):
return obj.strftime("%H:%M")
raise TypeError
def schedule_json(request):
"""
Returns information about the schedule.
*No authentication required.*
URL: /<YEAR>/schedule/conference.json
The data returned is in JSON format, and looks like::
[ <slot>, <slot>, ..., <poster>, <poster> ...]
where a slot represents a talk, tutorial, or plenary and looks like::
{
"kind": "talk"|"tutorial"|"plenary",
"name": "Title of talk",
"room": "roomname1, roomname2, ..., roomnameN",
"start": "HH:MM:SS", # ISO format
"end": "HH:MM:SS", # ISO format
"duration": 30, # minutes
"authors" ["author name 1", "author name 2", ..., "author name N"],
"abstract": "Lorem ipsum and so forth and so on",
"description: "Lorem ipsum and so forth and so on",
"conf_key": 27,
"conf_url": "https://conference_domain/path/to/talk",
"video_url": "https://somehost/path/to/video_of_talk",
"slides_url": "https://somehost/path/to/slides_of_talk",
"assets_url": "https://somehost/path/to/assets_for_talk",
"tags": "tag1, tag2, ..., tagN",
"recording_release": true
}
and a poster looks like::
{
"kind": "poster",
"name": "Title of poster",
"authors" ["author name 1", "author name 2", ..., "author name N"],
"abstract": "Lorem ipsum and so forth and so on",
"description: "Lorem ipsum and so forth and so on",
"room": "roomname1, roomname2, ..., roomnameN",
"start": "HH:MM:SS", # Provided but meaningless, ignore...
"end": "HH:MM:SS", # Provided but meaningless, ignore...
"conf_key": 1227,
"conf_url": "https://conference_domain/path/to/page/about/talk"
}
"""
slots = Slot.objects.all().order_by("start")
data = []
for slot in slots:
if slot.kind.label in ["talk", "tutorial", "plenary"] and slot.content:
slot_data = {
"name": slot.content.title,
"room": ", ".join(room["name"] for room in slot.rooms.values()),
"start": slot.start_date.isoformat(),
"end": slot.end_date.isoformat(),
"duration": slot.duration,
"authors": [s.name for s in slot.content.speakers()],
"abstract": getattr(slot.content.abstract, 'raw', slot.content.abstract),
"description": getattr(slot.content.description, 'raw', slot.content.description),
"conf_key": slot.pk,
"conf_url": "https://%s%s" % (
Site.objects.get_current().domain,
reverse("schedule_presentation_detail", args=[slot.content.pk])
),
"kind": slot.kind.label,
"video_url": slot.content.video_url,
"slides_url": slot.content.slides_url,
"assets_url": slot.content.assets_url,
"tags": "",
"recording_release": slot.content.proposal.recording_release if hasattr(slot.content.proposal, 'recording_release') else None,
}
else:
continue
data.append(slot_data)
for poster in Presentation.objects.filter(section__slug="posters", cancelled=False):
poster_data = {
"name": poster.title,
"authors": [s.name for s in poster.speakers()],
"description": getattr(poster.description, 'raw', poster.description),
"abstract": getattr(poster.abstract, 'raw', poster.abstract),
"room": "Poster Room",
"start": datetime.datetime(2014, 03, 17, 10).isoformat(),
"end": datetime.datetime(2014, 03, 17, 13, 10).isoformat(),
"conf_key": 1000 + poster.pk,
"conf_url": "https://%s%s" % (
Site.objects.get_current().domain,
reverse("schedule_presentation_detail", args=[poster.pk])
),
"kind": "poster",
}
data.append(poster_data)
return HttpResponse(
json.dumps(data, default=json_serializer),
content_type="application/json"
)
|
{
"content_hash": "16ef312783a5d7fd9990312d6226cab1",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 142,
"avg_line_length": 34.581227436823106,
"alnum_prop": 0.6015241674496294,
"repo_name": "PyCon/pycon",
"id": "66c69821dfc84649173cabd590255a4859c1c1a2",
"size": "9579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "symposion/schedule/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "80909"
},
{
"name": "Dockerfile",
"bytes": "163"
},
{
"name": "HTML",
"bytes": "313093"
},
{
"name": "JavaScript",
"bytes": "161207"
},
{
"name": "Makefile",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "993540"
},
{
"name": "Shell",
"bytes": "14094"
},
{
"name": "Smarty",
"bytes": "7379"
}
],
"symlink_target": ""
}
|
import simplejson as json
from twisted.web import resource, static
from twisted.internet import defer
from coherence import log
class JsonInterface(resource.Resource, log.Loggable):
logCategory = 'json'
#isLeaf = False
def __init__(self, controlpoint):
resource.Resource.__init__(self)
log.Loggable.__init__(self)
self.controlpoint = controlpoint
self.controlpoint.coherence.add_web_resource('json',
self)
self.children = {}
def render_GET(self, request):
d = defer.maybeDeferred(self.do_the_render, request)
return d
def render_POST(self, request):
d = defer.maybeDeferred(self.do_the_render, request)
return d
def getChildWithDefault(self, path, request):
self.info('getChildWithDefault, %s, %s, %s %s %r', request.method, path, request.uri, request.client, request.args)
#return self.do_the_render(request)
d = defer.maybeDeferred(self.do_the_render, request)
return d
def do_the_render(self, request):
self.warning('do_the_render, %s, %s, %s %r %s', request.method, request.path, request.uri, request.args, request.client)
msg = "Houston, we've got a problem"
path = request.path.split('/')
path = path[2:]
self.warning('path %r', path)
if request.method in ('GET', 'POST'):
request.postpath = None
if request.method == 'GET':
if path[0] == 'devices':
return self.list_devices(request)
else:
device = self.controlpoint.get_device_with_id(path[0])
if device != None:
service = device.get_service_by_type(path[1])
if service != None:
action = service.get_action(path[2])
if action != None:
return self.call_action(action, request)
else:
msg = "action %r on service type %r for device %r not found" % (path[2], path[1], path[0])
else:
msg = "service type %r for device %r not found" % (path[1], path[0])
else:
msg = "device with id %r not found" % path[0]
request.setResponseCode(404, message=msg)
return static.Data("<html><p>%s</p></html>" % msg, 'text/html')
def list_devices(self, request):
devices = []
for device in self.controlpoint.get_devices():
devices.append(device.as_dict())
return static.Data(json.dumps(devices), 'application/json')
def call_action(self, action, request):
kwargs = {}
for entry, value_list in request.args.items():
kwargs[entry] = unicode(value_list[0])
def to_json(result):
self.warning("to_json")
return static.Data(json.dumps(result), 'application/json')
def fail(f):
request.setResponseCode(404)
return static.Data("<html><p>Houston, we've got a problem</p></html>", 'text/html')
d = action.call(**kwargs)
d.addCallback(to_json)
d.addErrback(fail)
return d
|
{
"content_hash": "d1907d78d850a222914d05ebfd8696bf",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 128,
"avg_line_length": 38.616279069767444,
"alnum_prop": 0.5459199036434809,
"repo_name": "furbrain/Coherence",
"id": "1190aabef5757af330104d5610f8f748ca5e7930",
"size": "3429",
"binary": false,
"copies": "3",
"ref": "refs/heads/maintain/0.6.x",
"path": "coherence/json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "712"
},
{
"name": "Python",
"bytes": "1322063"
},
{
"name": "Shell",
"bytes": "1569"
}
],
"symlink_target": ""
}
|
class NotFoundError(Exception):
def __init__(self, path):
self.path = path
def __str__(self):
return "Error: %s not found." % self.path
class CmdError(Exception):
def __init__(self, exitstatus, output):
self.status = exitstatus
self.output = output
def __str__(self):
return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output)
def runcmd(args, dir = None):
import commands
if dir:
olddir = os.path.abspath(os.curdir)
if not os.path.exists(dir):
raise NotFoundError(dir)
os.chdir(dir)
# print("cwd: %s -> %s" % (olddir, dir))
try:
args = [ commands.mkarg(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
(exitstatus, output) = commands.getstatusoutput(cmd)
if exitstatus != 0:
raise CmdError(exitstatus >> 8, output)
return output
finally:
if dir:
os.chdir(olddir)
class PatchError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "Patch Error: %s" % self.msg
class PatchSet(object):
defaults = {
"strippath": 1
}
def __init__(self, dir, d):
self.dir = dir
self.d = d
self.patches = []
self._current = None
def current(self):
return self._current
def Clean(self):
"""
Clean out the patch set. Generally includes unapplying all
patches and wiping out all associated metadata.
"""
raise NotImplementedError()
def Import(self, patch, force):
if not patch.get("file"):
if not patch.get("remote"):
raise PatchError("Patch file must be specified in patch import.")
else:
patch["file"] = bb.fetch.localpath(patch["remote"], self.d)
for param in PatchSet.defaults:
if not patch.get(param):
patch[param] = PatchSet.defaults[param]
if patch.get("remote"):
patch["file"] = bb.data.expand(bb.fetch.localpath(patch["remote"], self.d), self.d)
patch["filemd5"] = bb.utils.md5_file(patch["file"])
def Push(self, force):
raise NotImplementedError()
def Pop(self, force):
raise NotImplementedError()
def Refresh(self, remote = None, all = None):
raise NotImplementedError()
class PatchTree(PatchSet):
def __init__(self, dir, d):
PatchSet.__init__(self, dir, d)
def Import(self, patch, force = None):
""""""
PatchSet.Import(self, patch, force)
if self._current is not None:
i = self._current + 1
else:
i = 0
self.patches.insert(i, patch)
def _applypatch(self, patch, force = False, reverse = False, run = True):
shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
if reverse:
shellcmd.append('-R')
if not run:
return "sh" + "-c" + " ".join(shellcmd)
if not force:
shellcmd.append('--dry-run')
output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
if force:
return
shellcmd.pop(len(shellcmd) - 1)
output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
return output
def Push(self, force = False, all = False, run = True):
bb.note("self._current is %s" % self._current)
bb.note("patches is %s" % self.patches)
if all:
for i in self.patches:
if self._current is not None:
self._current = self._current + 1
else:
self._current = 0
bb.note("applying patch %s" % i)
self._applypatch(i, force)
else:
if self._current is not None:
self._current = self._current + 1
else:
self._current = 0
bb.note("applying patch %s" % self.patches[self._current])
return self._applypatch(self.patches[self._current], force)
def Pop(self, force = None, all = None):
if all:
for i in self.patches:
self._applypatch(i, force, True)
else:
self._applypatch(self.patches[self._current], force, True)
def Clean(self):
""""""
class GitApplyTree(PatchTree):
def __init__(self, dir, d):
PatchTree.__init__(self, dir, d)
def _applypatch(self, patch, force = False, reverse = False, run = True):
shellcmd = ["git", "--git-dir=.", "apply", "-p%s" % patch['strippath']]
if reverse:
shellcmd.append('-R')
shellcmd.append(patch['file'])
if not run:
return "sh" + "-c" + " ".join(shellcmd)
return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
class QuiltTree(PatchSet):
def _runcmd(self, args, run = True):
quiltrc = bb.data.getVar('QUILTRCFILE', self.d, 1)
if not run:
return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
def _quiltpatchpath(self, file):
return os.path.join(self.dir, "patches", os.path.basename(file))
def __init__(self, dir, d):
PatchSet.__init__(self, dir, d)
self.initialized = False
p = os.path.join(self.dir, 'patches')
if not os.path.exists(p):
os.makedirs(p)
def Clean(self):
try:
self._runcmd(["pop", "-a", "-f"])
except Exception:
pass
self.initialized = True
def InitFromDir(self):
# read series -> self.patches
seriespath = os.path.join(self.dir, 'patches', 'series')
if not os.path.exists(self.dir):
raise Exception("Error: %s does not exist." % self.dir)
if os.path.exists(seriespath):
series = file(seriespath, 'r')
for line in series.readlines():
patch = {}
parts = line.strip().split()
patch["quiltfile"] = self._quiltpatchpath(parts[0])
patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
if len(parts) > 1:
patch["strippath"] = parts[1][2:]
self.patches.append(patch)
series.close()
# determine which patches are applied -> self._current
try:
output = runcmd(["quilt", "applied"], self.dir)
except CmdError:
import sys
if sys.exc_value.output.strip() == "No patches applied":
return
else:
raise sys.exc_value
output = [val for val in output.split('\n') if not val.startswith('#')]
for patch in self.patches:
if os.path.basename(patch["quiltfile"]) == output[-1]:
self._current = self.patches.index(patch)
self.initialized = True
def Import(self, patch, force = None):
if not self.initialized:
self.InitFromDir()
PatchSet.Import(self, patch, force)
args = ["import", "-p", patch["strippath"]]
if force:
args.append("-f")
args.append("-dn")
args.append(patch["file"])
self._runcmd(args)
patch["quiltfile"] = self._quiltpatchpath(patch["file"])
patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
# TODO: determine if the file being imported:
# 1) is already imported, and is the same
# 2) is already imported, but differs
self.patches.insert(self._current or 0, patch)
def Push(self, force = False, all = False, run = True):
# quilt push [-f]
args = ["push"]
if force:
args.append("-f")
if all:
args.append("-a")
if not run:
return self._runcmd(args, run)
self._runcmd(args)
if self._current is not None:
self._current = self._current + 1
else:
self._current = 0
def Pop(self, force = None, all = None):
# quilt pop [-f]
args = ["pop"]
if force:
args.append("-f")
if all:
args.append("-a")
self._runcmd(args)
if self._current == 0:
self._current = None
if self._current is not None:
self._current = self._current - 1
def Refresh(self, **kwargs):
if kwargs.get("remote"):
patch = self.patches[kwargs["patch"]]
if not patch:
raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
(type, host, path, user, pswd, parm) = bb.decodeurl(patch["remote"])
if type == "file":
import shutil
if not patch.get("file") and patch.get("remote"):
patch["file"] = bb.fetch.localpath(patch["remote"], self.d)
shutil.copyfile(patch["quiltfile"], patch["file"])
else:
raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
else:
# quilt refresh
args = ["refresh"]
if kwargs.get("quiltfile"):
args.append(os.path.basename(kwargs["quiltfile"]))
elif kwargs.get("patch"):
args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
self._runcmd(args)
class Resolver(object):
def __init__(self, patchset):
raise NotImplementedError()
def Resolve(self):
raise NotImplementedError()
def Revert(self):
raise NotImplementedError()
def Finalize(self):
raise NotImplementedError()
class NOOPResolver(Resolver):
def __init__(self, patchset):
self.patchset = patchset
def Resolve(self):
olddir = os.path.abspath(os.curdir)
os.chdir(self.patchset.dir)
try:
self.patchset.Push()
except Exception:
import sys
os.chdir(olddir)
raise sys.exc_value
# Patch resolver which relies on the user doing all the work involved in the
# resolution, with the exception of refreshing the remote copy of the patch
# files (the urls).
class UserResolver(Resolver):
def __init__(self, patchset):
self.patchset = patchset
# Force a push in the patchset, then drop to a shell for the user to
# resolve any rejected hunks
def Resolve(self):
olddir = os.path.abspath(os.curdir)
os.chdir(self.patchset.dir)
try:
self.patchset.Push(False)
except CmdError, v:
# Patch application failed
patchcmd = self.patchset.Push(True, False, False)
t = bb.data.getVar('T', d, 1)
if not t:
bb.msg.fatal(bb.msg.domain.Build, "T not set")
bb.mkdirhier(t)
import random
rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random())
f = open(rcfile, "w")
f.write("echo '*** Manual patch resolution mode ***'\n")
f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n")
f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n")
f.write("echo ''\n")
f.write(" ".join(patchcmd) + "\n")
f.write("#" + bb.data.getVar('TERMCMDRUN', d, 1))
f.close()
os.chmod(rcfile, 0775)
os.environ['TERMWINDOWTITLE'] = "Bitbake: Please fix patch rejects manually"
os.environ['TERMRCFILE'] = rcfile
rc = os.system(bb.data.getVar('TERMCMDRUN', d, 1))
if os.WIFEXITED(rc) and os.WEXITSTATUS(rc) != 0:
bb.msg.fatal(bb.msg.domain.Build, ("Cannot proceed with manual patch resolution - '%s' not found. " \
+ "Check TERMCMDRUN variable.") % bb.data.getVar('TERMCMDRUN', d, 1))
# Construct a new PatchSet after the user's changes, compare the
# sets, checking patches for modifications, and doing a remote
# refresh on each.
oldpatchset = self.patchset
self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
for patch in self.patchset.patches:
oldpatch = None
for opatch in oldpatchset.patches:
if opatch["quiltfile"] == patch["quiltfile"]:
oldpatch = opatch
if oldpatch:
patch["remote"] = oldpatch["remote"]
if patch["quiltfile"] == oldpatch["quiltfile"]:
if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
# user change? remote refresh
self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
else:
# User did not fix the problem. Abort.
raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
except Exception:
os.chdir(olddir)
raise
os.chdir(olddir)
|
{
"content_hash": "86c5cb0198e55ab938da9549d566c685",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 153,
"avg_line_length": 33.434889434889435,
"alnum_prop": 0.531305114638448,
"repo_name": "bticino/openembedded",
"id": "54c12bac2ad07af4c5296ed3d9dbb8c9f179edac",
"size": "13608",
"binary": false,
"copies": "1",
"ref": "refs/heads/bt-overlay",
"path": "lib/oe/patch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4997735"
},
{
"name": "C++",
"bytes": "181067"
},
{
"name": "D",
"bytes": "5030"
},
{
"name": "JavaScript",
"bytes": "13087"
},
{
"name": "PHP",
"bytes": "9265"
},
{
"name": "Perl",
"bytes": "662"
},
{
"name": "Python",
"bytes": "338056"
},
{
"name": "Shell",
"bytes": "317785"
}
],
"symlink_target": ""
}
|
import getpass
import sys
import argparse
# In the main directory of the repo where you are developing with PyGithub,
# type:
# git submodule add git://github.com/jacquev6/PyGithub.git PyGithub
# git submodule init
# git submodule update
#
# That will populate a PyGithub subdirectory with a clone of PyGithub
# Then, to add it to your Python path, you can do:
sys.path.append("./PyGithub");
from github import Github
from github import GithubException
parser = argparse.ArgumentParser(description='List all repos for an org')
parser.add_argument('orgName',help='github Organization name')
args = parser.parse_args()
username = raw_input("Github Username:")
pw = getpass.getpass()
g = Github(username, pw)
print("All repos for organization: ",args.orgName)
org = g.get_organization(args.orgName)
## TODO: Add some error checking code here to see whether
## the lookup was successful. Do we try/except or check the return value?
repos = org.get_repos()
for repo in repos:
print (repo.name)
def main():
pass
if __name__ == '__main__':
main()
|
{
"content_hash": "7005d2264b4a08530324d4339ef3d029",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 75,
"avg_line_length": 23.91111111111111,
"alnum_prop": 0.7267657992565055,
"repo_name": "SLAC-Lab/svn_to_git",
"id": "807002f5a2e329088e0f1b543a318c6e361006a2",
"size": "1135",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "repolist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4403"
}
],
"symlink_target": ""
}
|
from toscaparser.common.exception import ExceptionCollector
from toscaparser.common.exception import UnknownFieldError
from toscaparser.elements.capabilitytype import CapabilityTypeDef
import toscaparser.elements.interfaces as ifaces
from toscaparser.elements.interfaces import InterfacesDef
from toscaparser.elements.relationshiptype import RelationshipType
from toscaparser.elements.statefulentitytype import StatefulEntityType
class NodeType(StatefulEntityType):
'''TOSCA built-in node type.'''
SECTIONS = (DERIVED_FROM, METADATA, PROPERTIES, VERSION,
DESCRIPTION, ATTRIBUTES, REQUIREMENTS, CAPABILITIES,
INTERFACES, ARTIFACTS) = \
('derived_from', 'metadata', 'properties', 'version',
'description', 'attributes', 'requirements', 'capabilities',
'interfaces', 'artifacts')
def __init__(self, ntype, custom_def=None):
super(NodeType, self).__init__(ntype, self.NODE_PREFIX, custom_def)
self.ntype = ntype
self.custom_def = custom_def
self._validate_keys()
@property
def parent_type(self):
'''Return a node this node is derived from.'''
if not hasattr(self, 'defs'):
return None
pnode = self.derived_from(self.defs)
if pnode:
return NodeType(pnode, self.custom_def)
@property
def relationship(self):
'''Return a dictionary of relationships to other node types.
This method returns a dictionary of named relationships that nodes
of the current node type (self) can have to other nodes (of specific
types) in a TOSCA template.
'''
relationship = {}
requires = self.get_all_requirements()
if requires:
# NOTE(sdmonov): Check if requires is a dict.
# If it is a dict convert it to a list of dicts.
# This is needed because currently the code below supports only
# lists as requirements definition. The following check will
# make sure if a map (dict) was provided it will be converted to
# a list before proceeding to the parsing.
if isinstance(requires, dict):
requires = [{key: value} for key, value in requires.items()]
keyword = None
node_type = None
for require in requires:
for key, req in require.items():
if 'relationship' in req:
relation = req.get('relationship')
if 'type' in relation:
relation = relation.get('type')
node_type = req.get('node')
value = req
if node_type:
keyword = 'node'
else:
# If value is a dict and has a type key
# we need to lookup the node type using
# the capability type
value = req
if isinstance(value, dict):
captype = value['capability']
value = (self.
_get_node_type_by_cap(captype))
keyword = key
node_type = value
rtype = RelationshipType(relation, keyword,
self.custom_def)
relatednode = NodeType(node_type, self.custom_def)
relationship[rtype] = relatednode
return relationship
def _get_node_type_by_cap(self, cap):
'''Find the node type that has the provided capability
This method will lookup all node types if they have the
provided capability.
'''
# Filter the node types
node_types = [node_type for node_type in self.TOSCA_DEF.keys()
if node_type.startswith(self.NODE_PREFIX) and
node_type != 'tosca.nodes.Root']
custom_node_types = [node_type for node_type in self.custom_def.keys()
if node_type.startswith(self.NODE_PREFIX) and
node_type != 'tosca.nodes.Root']
for node_type in node_types + custom_node_types:
if node_type in self.TOSCA_DEF:
node_def = self.TOSCA_DEF[node_type]
else:
node_def = self.custom_def[node_type]
if isinstance(node_def, dict) and 'capabilities' in node_def:
node_caps = node_def['capabilities']
for value in node_caps.values():
if isinstance(value, dict) and \
'type' in value and value['type'] == cap:
return node_type
def _get_relation(self, key, ndtype):
relation = None
ntype = NodeType(ndtype, self.custom_def)
caps = ntype.get_capabilities()
if caps and key in caps.keys():
c = caps[key]
for r in self.RELATIONSHIP_TYPE:
rtypedef = ntype.TOSCA_DEF[r]
for properties in rtypedef.values():
if c.type in properties:
relation = r
break
if relation:
break
else:
for properties in rtypedef.values():
if c.parent_type in properties:
relation = r
break
return relation
def get_capabilities_objects(self):
'''Return a list of capability objects.'''
typecapabilities = []
caps = self.get_value(self.CAPABILITIES, None, True)
if caps:
# 'name' is symbolic name of the capability
# 'value' is a dict { 'type': <capability type name> }
for name, value in caps.items():
ctype = value.get('type')
cap = CapabilityTypeDef(name, ctype, self.type,
self.custom_def)
typecapabilities.append(cap)
return typecapabilities
def get_capabilities(self):
'''Return a dictionary of capability name-objects pairs.'''
return {cap.name: cap
for cap in self.get_capabilities_objects()}
@property
def requirements(self):
return self.get_value(self.REQUIREMENTS, None, True)
def get_all_requirements(self):
return self.requirements
@property
def interfaces(self):
interfaces = self.get_value(self.INTERFACES)
if self.parent_type is not None:
if self.parent_type.interfaces is not None:
import copy
parent_interfaces = copy.deepcopy(self.parent_type.interfaces)
parent_interfaces.pop(ifaces.LIFECYCLE, None)
parent_interfaces.pop(ifaces.CONFIGURE, None)
parent_interfaces.pop(ifaces.LIFECYCLE_SHORTNAME, None)
parent_interfaces.pop(ifaces.CONFIGURE_SHORTNAME, None)
if parent_interfaces:
if interfaces:
parent_interfaces.update(interfaces)
interfaces = parent_interfaces
return interfaces
@property
def lifecycle_inputs(self):
'''Return inputs to life cycle operations if found.'''
inputs = []
interfaces = self.interfaces
if interfaces:
for name, value in interfaces.items():
if name == ifaces.LIFECYCLE:
for x, y in value.items():
if x == 'inputs':
for i in y.iterkeys():
inputs.append(i)
return inputs
@property
def lifecycle_operations(self):
'''Return available life cycle operations if found.'''
ops = None
interfaces = self.interfaces
if interfaces:
i = InterfacesDef(self.type, ifaces.LIFECYCLE)
ops = i.lifecycle_ops
return ops
def get_capability(self, name):
caps = self.get_capabilities()
if caps and name in caps.keys():
return caps[name].value
def get_capability_type(self, name):
captype = self.get_capability(name)
if captype and name in captype.keys():
return captype[name].value
def _validate_keys(self):
if self.defs:
for key in self.defs.keys():
if key not in self.SECTIONS:
ExceptionCollector.appendException(
UnknownFieldError(what='Nodetype"%s"' % self.ntype,
field=key))
|
{
"content_hash": "429e7edd697f2f0d8c4f2a88ab57507b",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 78,
"avg_line_length": 40.75688073394495,
"alnum_prop": 0.537647720877884,
"repo_name": "openstack/tosca-parser",
"id": "07551e7a34493b2270e5f7631296f9b2dc441d92",
"size": "9458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toscaparser/elements/nodetype.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "572583"
},
{
"name": "Shell",
"bytes": "14077"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from iniconfig import IniConfig
class Config(IniConfig):
# file section
file_useCsvWizard = IniConfig.iniproperty('file', 'useCsvWizard', True)
file_owner = IniConfig.iniproperty('file', 'owner', {'name':'david'})
file_address = IniConfig.iniproperty('file', 'address', 'my house')
file_files = IniConfig.iniproperty('file', 'files', [])
# tools section
tools_searches = IniConfig.iniproperty('tools', 'searches', ['primate', 'wolf'])
tools_matchMode = IniConfig.iniproperty('tools', 'matchMode', 0)
tools_matchCase = IniConfig.iniproperty('tools', 'matchCase', False)
tools_weight = IniConfig.iniproperty('tools', 'weight', False)
# config section
config_restore = IniConfig.iniproperty('config', 'restore', True)
config = Config(filename='demo.ini')
print('\nbefore')
print('file/useCsvWizard:', config.file_useCsvWizard)
print('file/owner:', config.file_owner)
print('file/files:', config.file_files)
print('file/address:', config.file_address)
print('tools/searches:', config.tools_searches)
config.file_useCsvWizard = False
config.file_files = ['myfile.py']
config.file_owner['surname'] = 'miro'
config.file_address = 'my office'
print(config.file_owner.__class__.__name__)
print('\nafter')
print('file/useCsvWizard:', config.file_useCsvWizard)
print('file/owner:', config.file_owner)
print('file/files:', config.file_files)
print('file/address:', config.file_address)
print('tools/searches:', config.tools_searches)
|
{
"content_hash": "d99f71958fc2fc0ea3c6edf4725f5d17",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 84,
"avg_line_length": 33,
"alnum_prop": 0.7108585858585859,
"repo_name": "dmiro/iniconfig",
"id": "e165dd06930227abc75394b533c90d61766e4f7e",
"size": "1633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9920"
}
],
"symlink_target": ""
}
|
from Primitives.RockPrimitives import *
from Planners.RockTestPlanner import *
numLocs = 2
numRocks = 2
rockLocs = [0, 0, 1, 1]
robotX = 0
robotY = 1
rockSampled = [0, 0]
rockQuality = [0.5, 0.5]
initial_state = RockState(numLocs, numRocks, rockLocs, rockQuality, rockSampled, robotX, robotY)
goal_state = initial_state
p = RockTestPlanner()
print p.next_action(initial_state, goal_state, None)
print p.next_action(initial_state, goal_state, 'ogood')
print p.next_action(initial_state, goal_state, 'ogood')
print p.next_action(initial_state, goal_state, 'obad')
print p.next_action(initial_state, goal_state, 'ogood')
print p.next_action(initial_state, goal_state, 'obad')
print p.next_action(initial_state, goal_state, 'ogood')
print p.next_action(initial_state, goal_state, 'ogood')
print p.next_action(initial_state, goal_state, 'obad')
|
{
"content_hash": "ac5eb94652afd01a7504a820f154d8d8",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 96,
"avg_line_length": 32.5,
"alnum_prop": 0.7420118343195267,
"repo_name": "ciarakamahele/sasy",
"id": "fc804d207fa3b7dfe7b78c9890962e8b00d2eac4",
"size": "1437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulator/Tests/RockTestPlannerTest.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "128735"
},
{
"name": "Makefile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "79176"
}
],
"symlink_target": ""
}
|
'''
Created on Dec 11, 2016
@author: safdar
'''
from basetrainer import BaseTrainer
from keras.layers.core import Flatten, Dropout, Dense, Lambda, Activation
from keras.layers.advanced_activations import ELU
from keras.layers.convolutional import Convolution2D
from keras.models import Sequential
from keras.optimizers import Adam, SGD
from keras.layers.pooling import MaxPooling2D
class Custom2Trainer(BaseTrainer):
def __init__(self, model_name, overwrite):
BaseTrainer.__init__(self, model_name, overwrite=overwrite)
def __create_model__(self):
row, col, ch = 64, 64, 3 # camera format
model = Sequential()
model.add(Convolution2D(16, 3, 3, border_mode="same", input_shape=(row, col, ch)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(ELU())
model.add(Convolution2D(32, 3, 3, border_mode="same"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.50))
model.add(ELU())
model.add(Flatten())
model.add(Dense(200))
# model.add(Dropout(.2))
model.add(ELU())
model.add(Dense(100))
model.add(Dropout(0.5))
model.add(ELU())
model.add(Dense(1))
# model.add(Activation('tanh')) # To limit the output to between -1 and 1
return model
def __compile_model__(self):
# opt = SGD(lr=0.1, momentum=0.1, decay=0.1, nesterov=True)
opt = Adam(lr=0.001)
self.__model__.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
|
{
"content_hash": "6dcd42805859c24a76d309896d47978f",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 90,
"avg_line_length": 30.529411764705884,
"alnum_prop": 0.6255619781631342,
"repo_name": "safdark/behavioral-cloning",
"id": "c0ef3eaa1f56c98ed6fc5d2f615aac054216d616",
"size": "1557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/custom2trainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "179216"
},
{
"name": "Python",
"bytes": "20565"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import copy
import inspect
import os
import pydoc
__all__ = [
'settings',
]
DEFAULT = {
'AUTH_COLLECTION': '',
'CORS': {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET,POST,PUT,PATCH,DELETE,OPTIONS',
'Access-Control-Allow-Headers': 'Accept,Accept-Encoding,Authorization,'
'Content-Length,Content-Type,Origin,'
'User-Agent,X-CSRFToken,'
'X-Requested-With',
'Access-Control-Allow-Credentials': 'true',
},
'MIDDLEWARES': [],
'MONGODB': {
'URI': '',
'USERNAME': '',
'PASSWORD': '',
'HOST': 'localhost',
'HOSTS': [],
'PORT': 27017,
'PORTS': [],
'DATABASE': 'mongorest',
'OPTIONS': [],
},
'RETRY_LIMIT': 5,
'BASE_RETRY_TIME': 2,
'LINEAR_RETRIES': False,
'SESSION_STORE': '',
}
class Settings(object):
"""
Settings Class
Will be responsible for loading the DEFAULT settings and USER settings
Uses the environment variable MONGOREST_SETTINGS_MODULE to find where the
Settings are store
"""
_settings = copy.deepcopy(DEFAULT)
_settings_module = None
def __getattr__(self, name):
settings_module = os.environ.get('MONGOREST_SETTINGS_MODULE')
if not settings_module:
self._settings_module = None
self._settings = copy.deepcopy(DEFAULT)
if settings_module and self._settings_module != settings_module:
self._settings_module = settings_module
self._settings = dict(
self._settings, **dict(
(name, setting) for (name, setting) in inspect.getmembers(
pydoc.locate(settings_module)
) if name.isupper()
)
)
try:
return self._settings[name]
except KeyError:
raise AttributeError('Invalid setting: \'{0}\''.format(name))
settings = Settings()
|
{
"content_hash": "6d7377a9757f5636b667c2c0f480467f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 28.210526315789473,
"alnum_prop": 0.5377798507462687,
"repo_name": "lvieirajr/mongorest",
"id": "13cd1a9a8cf35383560273bcffa4cdac90050e59",
"size": "2170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongorest/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "147328"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
"""Assorted utilities shared between parts of apitools."""
import collections
import os
import random
from protorpc import messages
import six
from six.moves import http_client
import six.moves.urllib.error as urllib_error
import six.moves.urllib.parse as urllib_parse
import six.moves.urllib.request as urllib_request
from apitools.base.py import encoding
from apitools.base.py import exceptions
__all__ = [
'DetectGae',
'DetectGce',
]
_RESERVED_URI_CHARS = r":/?#[]@!$&'()*+,;="
def DetectGae():
"""Determine whether or not we're running on GAE.
This is based on:
https://developers.google.com/appengine/docs/python/#The_Environment
Returns:
True iff we're running on GAE.
"""
server_software = os.environ.get('SERVER_SOFTWARE', '')
return (server_software.startswith('Development/') or
server_software.startswith('Google App Engine/'))
def DetectGce():
"""Determine whether or not we're running on GCE.
This is based on:
https://cloud.google.com/compute/docs/metadata#runninggce
Returns:
True iff we're running on a GCE instance.
"""
try:
o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open(
urllib_request.Request('http://metadata.google.internal'))
except urllib_error.URLError:
return False
return (o.getcode() == http_client.OK and
o.headers.get('metadata-flavor') == 'Google')
def NormalizeScopes(scope_spec):
"""Normalize scope_spec to a set of strings."""
if isinstance(scope_spec, six.string_types):
return set(scope_spec.split(' '))
elif isinstance(scope_spec, collections.Iterable):
return set(scope_spec)
raise exceptions.TypecheckError(
'NormalizeScopes expected string or iterable, found %s' % (
type(scope_spec),))
def Typecheck(arg, arg_type, msg=None):
if not isinstance(arg, arg_type):
if msg is None:
if isinstance(arg_type, tuple):
msg = 'Type of arg is "%s", not one of %r' % (type(arg), arg_type)
else:
msg = 'Type of arg is "%s", not "%s"' % (type(arg), arg_type)
raise exceptions.TypecheckError(msg)
return arg
def ExpandRelativePath(method_config, params, relative_path=None):
"""Determine the relative path for request."""
path = relative_path or method_config.relative_path or ''
for param in method_config.path_params:
param_template = '{%s}' % param
# For more details about "reserved word expansion", see:
# http://tools.ietf.org/html/rfc6570#section-3.2.2
reserved_chars = ''
reserved_template = '{+%s}' % param
if reserved_template in path:
reserved_chars = _RESERVED_URI_CHARS
path = path.replace(reserved_template, param_template)
if param_template not in path:
raise exceptions.InvalidUserInputError(
'Missing path parameter %s' % param)
try:
# TODO(craigcitro): Do we want to support some sophisticated
# mapping here?
value = params[param]
except KeyError:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
if value is None:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
try:
if not isinstance(value, six.string_types):
value = str(value)
path = path.replace(param_template,
urllib_parse.quote(value.encode('utf_8'),
reserved_chars))
except TypeError as e:
raise exceptions.InvalidUserInputError(
'Error setting required parameter %s to value %s: %s' % (
param, value, e))
return path
def CalculateWaitForRetry(retry_attempt, max_wait=60):
"""Calculates amount of time to wait before a retry attempt.
Wait time grows exponentially with the number of attempts.
A random amount of jitter is added to spread out retry attempts from different
clients.
Args:
retry_attempt: Retry attempt counter.
max_wait: Upper bound for wait time.
Returns:
Amount of time to wait before retrying request.
"""
wait_time = 2 ** retry_attempt
# randrange requires a nonzero interval, so we want to drop it if
# the range is too small for jitter.
if retry_attempt:
max_jitter = (2 ** retry_attempt) / 2
wait_time += random.randrange(-max_jitter, max_jitter)
return min(wait_time, max_wait)
def AcceptableMimeType(accept_patterns, mime_type):
"""Return True iff mime_type is acceptable for one of accept_patterns.
Note that this function assumes that all patterns in accept_patterns
will be simple types of the form "type/subtype", where one or both
of these can be "*". We do not support parameters (i.e. "; q=") in
patterns.
Args:
accept_patterns: list of acceptable MIME types.
mime_type: the mime type we would like to match.
Returns:
Whether or not mime_type matches (at least) one of these patterns.
"""
unsupported_patterns = [p for p in accept_patterns if ';' in p]
if unsupported_patterns:
raise exceptions.GeneratedClientError(
'MIME patterns with parameter unsupported: "%s"' % ', '.join(
unsupported_patterns))
def MimeTypeMatches(pattern, mime_type):
"""Return True iff mime_type is acceptable for pattern."""
# Some systems use a single '*' instead of '*/*'.
if pattern == '*':
pattern = '*/*'
return all(accept in ('*', provided) for accept, provided
in zip(pattern.split('/'), mime_type.split('/')))
return any(MimeTypeMatches(pattern, mime_type) for pattern in accept_patterns)
def MapParamNames(params, request_type):
"""Reverse parameter remappings for URL construction."""
return [encoding.GetCustomJsonFieldMapping(request_type, json_name=p) or p
for p in params]
def MapRequestParams(params, request_type):
"""Perform any renames/remappings needed for URL construction.
Currently, we have several ways to customize JSON encoding, in
particular of field names and enums. This works fine for JSON
bodies, but also needs to be applied for path and query parameters
in the URL.
This function takes a dictionary from param names to values, and
performs any registered mappings. We also need the request type (to
look up the mappings).
Args:
params: (dict) Map from param names to values
request_type: (protorpc.messages.Message) request type for this API call
Returns:
A new dict of the same size, with all registered mappings applied.
"""
new_params = dict(params)
for param_name, value in params.items():
field_remapping = encoding.GetCustomJsonFieldMapping(
request_type, python_name=param_name)
if field_remapping is not None:
new_params[field_remapping] = new_params.pop(param_name)
if isinstance(value, messages.Enum):
new_params[param_name] = encoding.GetCustomJsonEnumMapping(
type(value), python_name=str(value)) or str(value)
return new_params
|
{
"content_hash": "325bd65942f4cfe00d5e41c6dce2b42a",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 80,
"avg_line_length": 33.65384615384615,
"alnum_prop": 0.684,
"repo_name": "craigcitro/original-apitools",
"id": "5158bfa4eb069fda9e61ca5904db8c6dbaded7f5",
"size": "7000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apitools/base/py/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "315950"
}
],
"symlink_target": ""
}
|
"""momentsinfo_convroll4_doublescale_fs5"""
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import tmp_dnn
import tta
batch_size = 128
chunk_size = 32768
num_chunks_train = 240
momentum = 0.9
learning_rate_schedule = {
0: 0.001,
100: 0.0001,
200: 0.00001,
}
validate_every = 40
save_every = 40
sdir = "/mnt/storage/users/avdnoord/git/kaggle-plankton/predictions/"
train_pred_file = sdir+"train--pl_blend5_convroll4_fastpool_1024_weightdecay_no_dropout_33_66--pl_blend5_convroll4_fastpool_1024_weightdecay_no_dropout_33_66-geit-20150313-181138--avg-probs.npy"
valid_pred_file = sdir+"valid--pl_blend5_convroll4_fastpool_1024_weightdecay_no_dropout_33_66--pl_blend5_convroll4_fastpool_1024_weightdecay_no_dropout_33_66-geit-20150313-181138--avg-probs.npy"
test_pred_file = sdir+"test--pl_blend5_convroll4_fastpool_1024_weightdecay_no_dropout_33_66--pl_blend5_convroll4_fastpool_1024_weightdecay_no_dropout_33_66-geit-20150313-181138--avg-probs.npy"
data_loader = load.PredictionsWithMomentsDataLoader(train_pred_file=train_pred_file, valid_pred_file=valid_pred_file, test_pred_file=test_pred_file,
num_chunks_train=num_chunks_train, chunk_size=chunk_size)
create_train_gen = lambda: data_loader.create_random_gen()
create_eval_train_gen = lambda: data_loader.create_fixed_gen("train")
create_eval_valid_gen = lambda: data_loader.create_fixed_gen("valid")
create_eval_test_gen = lambda: data_loader.create_fixed_gen("test")
def build_model():
l0 = nn.layers.InputLayer((batch_size, data.num_classes))
l0_size = nn.layers.InputLayer((batch_size, 7))
l1_size = nn.layers.DenseLayer(l0_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l2_size = nn.layers.DenseLayer(l1_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l3_size = nn.layers.DenseLayer(l2_size, num_units=data.num_classes, W=nn_plankton.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=None)
l1 = nn_plankton.NonlinLayer(l0, T.log)
ltot = nn.layers.ElemwiseSumLayer([l1, l3_size])
# norm_by_sum = lambda x: x / x.sum(1).dimshuffle(0, "x")
lout = nn_plankton.NonlinLayer(ltot, nonlinearity=T.nnet.softmax)
return [l0, l0_size], lout
def build_objective(l_ins, l_out):
print "regu"
lambda_reg = 0.002
# lambda_reg = 0.005
params = nn.layers.get_all_non_bias_params(l_out)
reg_term = sum(T.sum(p**2) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + lambda_reg * reg_term
return nn.objectives.Objective(l_out, loss_function=loss)
# L2 0.0005 0.5646362
# L2 0.001 0.560494
# L2 0.002 0.559762
# L2 0.01 0.560949
# L2 0.05 0.563861
# 0.559762
# 1 layer 64
|
{
"content_hash": "49f736e76c30df7ae5565e78fb669651",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 194,
"avg_line_length": 33.116279069767444,
"alnum_prop": 0.7082162921348315,
"repo_name": "yejingxin/kaggle-ndsb",
"id": "e1afd1502e71875b0c5127ae06055466d176f79b",
"size": "2849",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "configurations/featmomentsinfo_pl_blend5_convroll4_fastpool_1024_weightdecay_no_dropout_33_66.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "581526"
}
],
"symlink_target": ""
}
|
import marshmallow
from molecule import logger
from molecule.model import base
LOG = logger.get_logger(__name__)
class AnsibleSchema(base.Base):
config_file = marshmallow.fields.Str()
playbook = marshmallow.fields.Str()
raw_env_vars = marshmallow.fields.Dict()
extra_vars = marshmallow.fields.Str()
verbose = marshmallow.fields.Bool()
become = marshmallow.fields.Bool()
tags = marshmallow.fields.Str()
class DriverSchema(base.Base):
name = marshmallow.fields.Str()
class PlatformSchema(base.Base):
name = marshmallow.fields.Str()
box = marshmallow.fields.Str()
box_version = marshmallow.fields.Str()
box_url = marshmallow.fields.Str()
class ProviderOptionsSchema(base.Base):
memory = marshmallow.fields.Int()
cpus = marshmallow.fields.Int()
class ProviderSchema(base.Base):
name = marshmallow.fields.Str()
type = marshmallow.fields.Str()
options = marshmallow.fields.Nested(ProviderOptionsSchema())
class InterfaceSchema(base.Base):
network_name = marshmallow.fields.Str()
type = marshmallow.fields.Str()
auto_config = marshmallow.fields.Bool()
ip = marshmallow.fields.Str()
class InstanceOptionsSchema(base.Base):
append_platform_to_hostname = marshmallow.fields.Bool()
class InstanceSchema(base.Base):
name = marshmallow.fields.Str()
ansible_groups = marshmallow.fields.List(marshmallow.fields.Str())
interfaces = marshmallow.fields.List(
marshmallow.fields.Nested(InterfaceSchema()))
raw_config_args = marshmallow.fields.List(marshmallow.fields.Str())
options = marshmallow.fields.Nested(InstanceOptionsSchema())
class VagrantSchema(base.Base):
platforms = marshmallow.fields.List(
marshmallow.fields.Nested(PlatformSchema()))
providers = marshmallow.fields.List(
marshmallow.fields.Nested(ProviderSchema()))
instances = marshmallow.fields.List(
marshmallow.fields.Nested(InstanceSchema()))
class VerifierOptionsSchema(base.Base):
sudo = marshmallow.fields.Bool()
class VerifierSchema(base.Base):
name = marshmallow.fields.Str()
options = marshmallow.fields.Nested(VerifierOptionsSchema())
class MoleculeSchema(marshmallow.Schema):
ansible = marshmallow.fields.Nested(AnsibleSchema())
vagrant = marshmallow.fields.Nested(VagrantSchema())
driver = marshmallow.fields.Nested(DriverSchema())
verifier = marshmallow.fields.Nested(VerifierSchema())
def validate(c):
schema = MoleculeSchema(strict=True)
return schema.load(c)
|
{
"content_hash": "2f966ada2deb5f64db4d6aa71b18f94a",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 71,
"avg_line_length": 28.53932584269663,
"alnum_prop": 0.728740157480315,
"repo_name": "kireledan/molecule",
"id": "98cfc46f6b6976e5dddfabfdb0ebf31feab2147e",
"size": "3660",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "molecule/model/schema_v1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "577315"
}
],
"symlink_target": ""
}
|
import socket
import Queue
import time
import select
def plugin_main(parameter, queue):
host = 'localhost'
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = int(parameter)
sock.bind(('localhost', port))
sock.listen(5)
insock = [sock]
while True:
inready, outready, excpetready = select.select(insock, [], [])
for s in inready:
if s == sock:
client, addr = sock.accept()
insock.append(client)
else:
data = s.recv(1024)
queue.put(data)
time.sleep(5)
|
{
"content_hash": "c5f42ea1669fa623285534948f9e32b2",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 70,
"avg_line_length": 23.88888888888889,
"alnum_prop": 0.5224806201550387,
"repo_name": "Pathangi-Jatinshravan/py_shipper",
"id": "fa8626ade2bf959faf958e52227262e580a68a2c",
"size": "645",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/inputs/network_port.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7899"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
}
|
"""This API defines FeatureColumn for sequential input.
NOTE: This API is a work in progress and will likely be changing frequently.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.feature_column import feature_column as fc_old
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope
# pylint: disable=protected-access
def sequence_input_layer(
features,
feature_columns,
weight_collections=None,
trainable=True):
""""Builds input layer for sequence input.
All `feature_columns` must be sequence dense columns with the same
`sequence_length`. The output of this method can be fed into sequence
networks, such as RNN.
The output of this method is a 3D `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ from
batch to batch.
If multiple `feature_columns` are given with `Di` `num_elements` each, their
outputs are concatenated. So, the final `Tensor` has shape
`[batch_size, T, D0 + D1 + ... + Dn]`.
Example:
```python
rating = sequence_numeric_column('rating')
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [rating, watches]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
features: A dict mapping keys to tensors.
feature_columns: An iterable of dense sequence columns. Valid columns are
- `embedding_column` that wraps a `sequence_categorical_column_with_*`
- `sequence_numeric_column`.
weight_collections: A list of collection names to which the Variable will be
added. Note that variables will also be added to collections
`tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
Returns:
An `(input_layer, sequence_length)` tuple where:
- input_layer: A float `Tensor` of shape `[batch_size, T, D]`.
`T` is the maximum sequence length for this batch, which could differ
from batch to batch. `D` is the sum of `num_elements` for all
`feature_columns`.
- sequence_length: An int `Tensor` of shape `[batch_size]`. The sequence
length for each example.
Raises:
ValueError: If any of the `feature_columns` is the wrong type.
"""
feature_columns = fc_old._normalize_feature_columns(feature_columns)
for c in feature_columns:
if not isinstance(c, fc_old._SequenceDenseColumn):
raise ValueError(
'All feature_columns must be of type _SequenceDenseColumn. '
'You can wrap a sequence_categorical_column with an embedding_column '
'or indicator_column. '
'Given (type {}): {}'.format(type(c), c))
with variable_scope.variable_scope(
None, default_name='sequence_input_layer', values=features.values()):
builder = fc_old._LazyBuilder(features)
output_tensors = []
sequence_lengths = []
ordered_columns = []
for column in sorted(feature_columns, key=lambda x: x.name):
ordered_columns.append(column)
with variable_scope.variable_scope(
None, default_name=column._var_scope_name):
dense_tensor, sequence_length = column._get_sequence_dense_tensor(
builder,
weight_collections=weight_collections,
trainable=trainable)
# Flattens the final dimension to produce a 3D Tensor.
num_elements = column._variable_shape.num_elements()
shape = array_ops.shape(dense_tensor)
target_shape = [shape[0], shape[1], num_elements]
output_tensors.append(
array_ops.reshape(dense_tensor, shape=target_shape))
sequence_lengths.append(sequence_length)
fc_old._verify_static_batch_size_equality(output_tensors, ordered_columns)
fc_old._verify_static_batch_size_equality(sequence_lengths, ordered_columns)
sequence_length = _assert_all_equal_and_return(sequence_lengths)
return array_ops.concat(output_tensors, -1), sequence_length
def concatenate_context_input(context_input, sequence_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def sequence_categorical_column_with_identity(
key, num_buckets, default_value=None):
"""Returns a feature column that represents sequences of integers.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [watches_embedding]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
num_buckets: Range of inputs. Namely, inputs are expected to be in the
range `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace out-of-range inputs.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
return fc_old._SequenceCategoricalColumn(
fc_old.categorical_column_with_identity(
key=key,
num_buckets=num_buckets,
default_value=default_value))
def sequence_categorical_column_with_hash_bucket(
key, hash_bucket_size, dtype=dtypes.string):
"""A sequence of categorical terms where ids are set by hashing.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
tokens = sequence_categorical_column_with_hash_bucket(
'tokens', hash_bucket_size=1000)
tokens_embedding = embedding_column(tokens, dimension=10)
columns = [tokens_embedding]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
return fc_old._SequenceCategoricalColumn(
fc_old.categorical_column_with_hash_bucket(
key=key,
hash_bucket_size=hash_bucket_size,
dtype=dtype))
def sequence_categorical_column_with_vocabulary_file(
key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0,
default_value=None, dtype=dtypes.string):
"""A sequence of categorical terms where ids use a vocabulary file.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
states = sequence_categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
states_embedding = embedding_column(states, dimension=10)
columns = [states_embedding]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return fc_old._SequenceCategoricalColumn(
fc_old.categorical_column_with_vocabulary_file(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
dtype=dtype))
def sequence_categorical_column_with_vocabulary_list(
key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
"""A sequence of categorical terms where ids use an in-memory list.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
colors = sequence_categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
colors_embedding = embedding_column(colors, dimension=3)
columns = [colors_embedding]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
input_layer, sequence_length = sequence_input_layer(features, columns)
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input feature.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported.
If `None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `_SequenceCategoricalColumn`.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
return fc_old._SequenceCategoricalColumn(
fc_old.categorical_column_with_vocabulary_list(
key=key,
vocabulary_list=vocabulary_list,
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets))
def sequence_numeric_column(
key,
shape=(1,),
default_value=0.,
dtype=dtypes.float32,
normalizer_fn=None):
"""Returns a feature column that represents sequences of numeric data.
Example:
```python
temperature = sequence_numeric_column('temperature')
columns = [temperature]
features = tf.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatureLayer(columns)
input_layer, sequence_length = sequence_feature_layer(features)
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
outputs, state = tf.nn.dynamic_rnn(
rnn_cell, inputs=input_layer, sequence_length=sequence_length)
```
Args:
key: A unique string identifying the input features.
shape: The shape of the input data per sequence id. E.g. if `shape=(2,)`,
each example must contain `2 * sequence_length` values.
default_value: A single value compatible with `dtype` that is used for
padding the sparse data into a dense `Tensor`.
dtype: The type of values.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `SequenceNumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int.
ValueError: if any dimension in shape is not a positive integer.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = fc._check_shape(shape=shape, key=key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
return SequenceNumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with ops.name_scope(name, 'assert_all_equal', values=tensors):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(check_ops.assert_equal(tensors[0], t))
with ops.control_dependencies(assert_equal_ops):
return array_ops.identity(tensors[0])
class SequenceNumericColumn(
fc.SequenceDenseColumn,
collections.namedtuple(
'SequenceNumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""Represents sequences of numeric data."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return input_tensor
@property
def variable_shape(self):
"""Returns a `TensorShape` representing the shape of sequence input."""
return tensor_shape.TensorShape(self.shape)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
sp_tensor = transformation_cache.get(self, state_manager)
dense_tensor = sparse_ops.sparse_tensor_to_dense(
sp_tensor, default_value=self.default_value)
# Reshape into [batch_size, T, variable_shape].
dense_shape = array_ops.concat(
[array_ops.shape(dense_tensor)[:1], [-1], self.variable_shape],
axis=0)
dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)
# Get the number of timesteps per example
# For the 2D case, the raw values are grouped according to num_elements;
# for the 3D case, the grouping happens in the third dimension, and
# sequence length is not affected.
num_elements = (self.variable_shape.num_elements()
if sp_tensor.shape.ndims == 2 else 1)
seq_length = fc_old._sequence_length_from_sparse_tensor(
sp_tensor, num_elements=num_elements)
return fc.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=seq_length)
# TODO(b/119409767): Implement parents, _{get,from}_config.
@property
def parents(self):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
def _get_config(self):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
@classmethod
def _from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
raise NotImplementedError()
# pylint: enable=protected-access
|
{
"content_hash": "265bb4ba602642e6ddcba7d628bfc96a",
"timestamp": "",
"source": "github",
"line_count": 548,
"max_line_length": 80,
"avg_line_length": 37.589416058394164,
"alnum_prop": 0.6989174231758823,
"repo_name": "brchiu/tensorflow",
"id": "67ffb939663358b5e356b3b626978db959c1bac9",
"size": "21288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/feature_column/python/feature_column/sequence_feature_column_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "473950"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "51674376"
},
{
"name": "CMake",
"bytes": "199085"
},
{
"name": "Dockerfile",
"bytes": "36908"
},
{
"name": "Go",
"bytes": "1285435"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "875500"
},
{
"name": "Jupyter Notebook",
"bytes": "2623054"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "63390"
},
{
"name": "Objective-C",
"bytes": "15634"
},
{
"name": "Objective-C++",
"bytes": "101475"
},
{
"name": "PHP",
"bytes": "5191"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41718475"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "490100"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
"""Clipper Management Utilities"""
from __future__ import print_function, with_statement, absolute_import
from fabric.api import *
from fabric.contrib.files import append
import os
import requests
import json
import yaml
import pprint
import subprocess32 as subprocess
import shutil
from sklearn import base
from sklearn.externals import joblib
from cStringIO import StringIO
import sys
from .cloudpickle import CloudPickler
import time
import re
__all__ = ['Clipper']
cur_dir = os.path.dirname(os.path.abspath(__file__))
MODEL_REPO = "/tmp/clipper-models"
DOCKER_NW = "clipper_nw"
CONTAINER_CONDA_PLATFORM = 'linux-64'
REDIS_STATE_DB_NUM = 1
REDIS_MODEL_DB_NUM = 2
REDIS_CONTAINER_DB_NUM = 3
REDIS_RESOURCE_DB_NUM = 4
REDIS_APPLICATION_DB_NUM = 5
DEFAULT_REDIS_IP = "redis"
DEFAULT_REDIS_PORT = 6379
CLIPPER_QUERY_PORT = 1337
CLIPPER_MANAGEMENT_PORT = 1338
CLIPPER_RPC_PORT = 7000
CLIPPER_LOGS_PATH = "/tmp/clipper-logs"
CLIPPER_DOCKER_LABEL = "ai.clipper.container.label"
CLIPPER_MODEL_CONTAINER_LABEL = "ai.clipper.model_container.model_version"
DEFAULT_LABEL = ["DEFAULT"]
aws_cli_config = """
[default]
region = us-east-1
aws_access_key_id = {access_key}
aws_secret_access_key = {secret_key}
"""
LOCAL_HOST_NAMES = ["local", "localhost", "127.0.0.1"]
EXTERNALLY_MANAGED_MODEL = "EXTERNAL"
class ClipperManagerException(Exception):
pass
class Clipper:
"""
Connection to a Clipper instance for administrative purposes.
Sets up the machine for running Clipper. This includes verifying
SSH credentials and initializing Docker.
Docker and docker-compose must already by installed on the machine
before connecting to a machine.
Parameters
----------
host : str
The hostname of the machine to start Clipper on. The machine
should allow passwordless SSH access.
user : str, optional
The SSH username. This field must be specified if `host` is not local.
key_path : str, optional.
The path to the SSH private key. This field must be specified if `host` is not local.
sudo : bool, optional.
Specifies level of execution for docker commands (sudo if true, standard if false).
ssh_port : int, optional
The SSH port to use. Default is port 22.
check_for_docker : bool, optional
If True, checks that Docker is running on the host machine. Default is True.
redis_port : int, optional
The port to use for connecting to redis. Default is port 6379.
redis_ip : string, optional
The ip address of the redis instance that Clipper should use.
If unspecified, a docker container running redis will be started
on `host` at the port specified by `redis_port`.
redis_persistence_path : string, optional
The directory path to which redis data should be persisted. The directory
should not already exist. If unspecified, redis will not persist data to disk.
restart_containers : bool, optional
If true, containers will restart on failure. If false, containers
will not restart automatically.
"""
def __init__(self,
host,
user=None,
key_path=None,
sudo=False,
ssh_port=22,
check_for_docker=True,
redis_ip=DEFAULT_REDIS_IP,
redis_port=DEFAULT_REDIS_PORT,
redis_persistence_path=None,
restart_containers=True):
self.redis_ip = redis_ip
self.redis_port = redis_port
self.docker_compost_dict = {
'networks': {
'default': {
'external': {
'name': DOCKER_NW
}
}
},
'services': {
'mgmt_frontend': {
'command': [
'--redis_ip=%s' % self.redis_ip,
'--redis_port=%d' % self.redis_port
],
'image':
'clipper/management_frontend:latest',
'ports': [
'%d:%d' % (CLIPPER_MANAGEMENT_PORT,
CLIPPER_MANAGEMENT_PORT)
],
'labels': {
CLIPPER_DOCKER_LABEL: ""
}
},
'query_frontend': {
'command': [
'--redis_ip=%s' % self.redis_ip,
'--redis_port=%d' % self.redis_port
],
'depends_on': ['mgmt_frontend'],
'image':
'clipper/query_frontend:latest',
'ports': [
'%d:%d' % (CLIPPER_RPC_PORT, CLIPPER_RPC_PORT),
'%d:%d' % (CLIPPER_QUERY_PORT, CLIPPER_QUERY_PORT)
],
'labels': {
CLIPPER_DOCKER_LABEL: ""
}
}
},
'version': '2'
}
start_redis = (self.redis_ip == DEFAULT_REDIS_IP)
if start_redis:
self.docker_compost_dict['services']['redis'] = {
'image': 'redis:alpine',
'ports': ['%d:%d' % (self.redis_port, self.redis_port)],
'command': "redis-server --port %d" % self.redis_port,
'labels': {
CLIPPER_DOCKER_LABEL: ""
}
}
self.docker_compost_dict['services']['mgmt_frontend'][
'depends_on'] = ['redis']
self.docker_compost_dict['services']['query_frontend'][
'depends_on'].append('redis')
if redis_persistence_path:
if not os.path.exists(redis_persistence_path):
self.docker_compost_dict['services']['redis'][
'volumes'] = ['%s:/data' % redis_persistence_path]
else:
print(
"The directory specified by the redis persistence path already exists"
)
raise ClipperManagerException(
"The directory specified by the redis persistence path already exists"
)
self.restart_containers = restart_containers
if self.restart_containers:
self.docker_compost_dict['services']['mgmt_frontend'][
'restart'] = 'always'
self.docker_compost_dict['services']['query_frontend'][
'restart'] = 'always'
if start_redis:
self.docker_compost_dict['services']['redis'][
'restart'] = 'always'
self.sudo = sudo
self.host = host
if self._host_is_local():
self.host = "localhost"
env.host_string = self.host
else:
if not user or not key_path:
print(
"user and key_path must be specified when instantiating Clipper with a nonlocal host"
)
raise ClipperManagerException(
"user and key_path must be specified when instantiating Clipper with a nonlocal host"
)
env.user = user
env.key_filename = key_path
env.host_string = "%s:%d" % (host, ssh_port)
if check_for_docker:
# Make sure docker is running on cluster
self._start_docker_if_necessary()
def _host_is_local(self):
return self.host in LOCAL_HOST_NAMES
def _start_docker_if_necessary(self):
with hide("warnings", "output", "running"):
print("Checking if Docker is running...")
self._execute_root("docker ps")
dc_installed = self._execute_root(
"docker-compose --version", warn_only=True)
if dc_installed.return_code != 0:
print("docker-compose not installed on host.")
raise ClipperManagerException(
"docker-compose not installed on host.")
nw_create_command = ("docker network create --driver bridge {nw}"
.format(nw=DOCKER_NW))
self._execute_root(nw_create_command, warn_only=True)
self._execute_standard(
"mkdir -p {model_repo}".format(model_repo=MODEL_REPO))
def _execute_root(self, *args, **kwargs):
if not self.sudo:
return self._execute_standard(*args, **kwargs)
elif self._host_is_local():
return self._execute_local(True, *args, **kwargs)
else:
return sudo(*args, **kwargs)
def _execute_standard(self, *args, **kwargs):
if self._host_is_local():
return self._execute_local(False, *args, **kwargs)
else:
return run(*args, **kwargs)
def _execute_local(self, as_root, *args, **kwargs):
if self.sudo and as_root:
root_args = list(args)
root_args[0] = "sudo %s" % root_args[0]
args = tuple(root_args)
# local is not currently capable of simultaneously printing and
# capturing output, as run/sudo do. The capture kwarg allows you to
# switch between printing and capturing as necessary, and defaults to
# False. In this case, we need to capture the output and return it.
if "capture" not in kwargs:
kwargs["capture"] = True
# fabric.local() does not accept the "warn_only"
# key word argument, so we must remove it before
# calling
if "warn_only" in kwargs:
del kwargs["warn_only"]
# Forces execution to continue in the face of an error,
# just like warn_only=True
with warn_only():
result = local(*args, **kwargs)
else:
result = local(*args, **kwargs)
return result
def _execute_append(self, filename, text, **kwargs):
if self._host_is_local():
file = open(filename, "a+")
# As with fabric.append(), we should only
# append the text if it is not already
# present within the file
if text not in file.read():
file.write(text)
file.close()
else:
append(filename, text, **kwargs)
def _execute_put(self, local_path, remote_path, *args, **kwargs):
if self._host_is_local():
# We should only copy data if the paths are different
if local_path != remote_path:
if os.path.isdir(local_path):
remote_path = os.path.join(remote_path,
os.path.basename(local_path))
# if remote_path exists, delete it because shutil.copytree requires
# that the dst path doesn't exist
if os.path.exists(remote_path):
shutil.rmtree(remote_path)
shutil.copytree(local_path, remote_path)
else:
shutil.copy2(local_path, remote_path)
else:
put(
local_path=local_path,
remote_path=remote_path,
*args,
**kwargs)
def start(self):
"""Start a Clipper instance.
"""
with hide("output", "warnings", "running"):
self._execute_standard("rm -f docker-compose.yml")
self._execute_append("docker-compose.yml",
yaml.dump(
self.docker_compost_dict,
default_flow_style=False))
print(
"Note: Docker must download the Clipper Docker images if they are not already cached. This may take awhile."
)
self._execute_root("docker-compose up -d query_frontend")
print("Clipper is running")
def register_application(self, name, model, input_type, default_output,
slo_micros):
"""Register a new Clipper application.
Parameters
----------
name : str
The name of the application.
model : str
The name of the model this application will query.
input_type : str
One of "integers", "floats", "doubles", "bytes", or "strings".
default_output : string
The default prediction to use if the model does not return a prediction
by the end of the latency objective.
slo_micros : int
The query latency objective for the application in microseconds.
This is the processing latency between Clipper receiving a request
and sending a response. It does not account for network latencies
before a request is received or after a response is sent.
If Clipper cannot process a query within the latency objective,
the default output is returned. Therefore, it is recommended that
the objective not be set aggressively low unless absolutely necessary.
40000 (40ms) is a good starting value, but the optimal latency objective
will vary depending on the application.
"""
url = "http://%s:%d/admin/add_app" % (self.host,
CLIPPER_MANAGEMENT_PORT)
req_json = json.dumps({
"name": name,
"candidate_model_names": [model],
"input_type": input_type,
"default_output": default_output,
"latency_slo_micros": slo_micros
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
print(r.text)
def get_all_apps(self, verbose=False):
"""Gets information about all applications registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the apps' names.
If set to True, the list contains application info dictionaries.
These dictionaries have the same attribute name-value pairs that were
provided to `register_application`.
Returns
-------
list
Returns a list of information about all apps registered to Clipper.
If no apps are registered with Clipper, an empty list is returned.
"""
url = "http://%s:1338/admin/get_all_applications" % self.host
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
return r.json()
else:
print(r.text)
return None
def get_app_info(self, name):
"""Gets detailed information about a registered application.
Parameters
----------
name : str
The name of the application to look up
Returns
-------
dict
Returns a dictionary with the specified application's info. This
will contain the attribute name-value pairs that were provided to
`register_application`. If no application with name `name` is
registered with Clipper, None is returned.
"""
url = "http://%s:1338/admin/get_application" % self.host
req_json = json.dumps({"name": name})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
app_info = r.json()
if len(app_info) == 0:
return None
return app_info
else:
print(r.text)
return None
def deploy_model(self,
name,
version,
model_data,
container_name,
input_type,
labels=DEFAULT_LABEL,
num_containers=1):
"""Registers a model with Clipper and deploys instances of it in containers.
Parameters
----------
name : str
The name to assign this model.
version : int
The version to assign this model.
model_data : str or BaseEstimator
The trained model to add to Clipper. This can either be a
Scikit-Learn trained model object (an instance of BaseEstimator),
or a path to a serialized model. Note that many model serialization
formats split the model across multiple files (e.g. definition file
and weights file or files). If this is the case, `model_data` must be a path
to the root of a directory tree containing ALL the needed files.
Depending on the model serialization library you use, this may or may not
be the path you provided to the serialize method call.
container_name : str
The Docker container image to use to run this model container.
input_type : str
One of "integers", "floats", "doubles", "bytes", or "strings".
labels : list of str, optional
A list of strings annotating the model
num_containers : int, optional
The number of replicas of the model to create. More replicas can be
created later as well. Defaults to 1.
"""
with hide("warnings", "output", "running"):
if isinstance(model_data, base.BaseEstimator):
fname = name.replace("/", "_")
pkl_path = '/tmp/%s/%s.pkl' % (fname, fname)
model_data_path = "/tmp/%s" % fname
try:
os.mkdir(model_data_path)
except OSError:
pass
joblib.dump(model_data, pkl_path)
elif isinstance(model_data, str):
# assume that model_data is a path to the serialized model
model_data_path = model_data
print("model_data_path is: %s" % model_data_path)
else:
warn("%s is invalid model format" % str(type(model_data)))
return False
vol = "{model_repo}/{name}/{version}".format(
model_repo=MODEL_REPO, name=name, version=version)
# publish model to Clipper and verify success before copying model
# parameters to Clipper and starting containers
if not self._publish_new_model(
name, version, labels, input_type, container_name,
os.path.join(vol, os.path.basename(model_data_path))):
return False
print("Published model to Clipper")
if (not self._put_container_on_host(container_name)):
return False
# Put model parameter data on host
with hide("warnings", "output", "running"):
self._execute_standard("mkdir -p {vol}".format(vol=vol))
with cd(vol):
with hide("warnings", "output", "running"):
self._execute_put(model_data_path, vol)
print("Copied model data to host")
# aggregate results of starting all containers
return all([
self.add_container(name, version)
for r in range(num_containers)
])
def register_external_model(self,
name,
version,
input_type,
labels=DEFAULT_LABEL):
"""Registers a model with Clipper without deploying it in any containers.
Parameters
----------
name : str
The name to assign this model.
version : int
The version to assign this model.
input_type : str
One of "integers", "floats", "doubles", "bytes", or "strings".
labels : list of str, optional
A list of strings annotating the model.
"""
return self._publish_new_model(name, version, labels, input_type,
EXTERNALLY_MANAGED_MODEL,
EXTERNALLY_MANAGED_MODEL)
def _save_python_function(self, name, predict_function):
relative_base_serializations_dir = "predict_serializations"
predict_fname = "predict_func.pkl"
environment_fname = "environment.yml"
conda_dep_fname = "conda_dependencies.txt"
pip_dep_fname = "pip_dependencies.txt"
# Serialize function
s = StringIO()
c = CloudPickler(s, 2)
c.dump(predict_function)
serialized_prediction_function = s.getvalue()
# Set up serialization directory
serialization_dir = os.path.join(
'/tmp', relative_base_serializations_dir, name)
if not os.path.exists(serialization_dir):
os.makedirs(serialization_dir)
# Export Anaconda environment
environment_file_abs_path = os.path.join(serialization_dir,
environment_fname)
conda_env_exported = self._export_conda_env(environment_file_abs_path)
if conda_env_exported:
print("Anaconda environment found. Verifying packages.")
# Confirm that packages installed through conda are solvable
# Write out conda and pip dependency files to be supplied to container
if not (self._check_and_write_dependencies(
environment_file_abs_path, serialization_dir,
conda_dep_fname, pip_dep_fname)):
return False
print("Supplied environment details")
else:
print(
"Warning: Anaconda environment was either not found or exporting the environment "
"failed. Your function will still be serialized and deployed, but may fail due to "
"missing dependencies. In this case, please re-run inside an Anaconda environment. "
"See http://clipper.ai/documentation/python_model_deployment/ for more information."
)
# Write out function serialization
func_file_path = os.path.join(serialization_dir, predict_fname)
with open(func_file_path, "w") as serialized_function_file:
serialized_function_file.write(serialized_prediction_function)
print("Serialized and supplied predict function")
return serialization_dir
def deploy_pyspark_model(self,
name,
version,
predict_function,
pyspark_model,
sc,
input_type,
labels=DEFAULT_LABEL,
num_containers=1):
"""Deploy a Spark MLLib model to Clipper.
Parameters
----------
name : str
The name to assign this model.
version : int
The version to assign this model.
predict_function : function
A function that takes three arguments, a SparkContext, the ``model`` parameter and
a list of inputs of the type specified by the ``input_type`` argument.
Any state associated with the function other than the Spark model should
be captured via closure capture. Note that the function must not capture
the SparkContext or the model implicitly, as these objects are not pickleable
and therefore will prevent the ``predict_function`` from being serialized.
pyspark_model : pyspark.mllib.util.Saveable
An object that mixes in the pyspark Saveable mixin. Generally this
is either an mllib model or transformer. This model will be loaded
into the Clipper model container and provided as an argument to the
predict function each time it is called.
sc : SparkContext
The SparkContext associated with the model. This is needed
to save the model for pyspark.mllib models.
input_type : str
One of "integers", "floats", "doubles", "bytes", or "strings".
labels : list of str, optional
A set of strings annotating the model
num_containers : int, optional
The number of replicas of the model to create. More replicas can be
created later as well. Defaults to 1.
Returns
-------
bool
True if the model was successfully deployed. False otherwise.
"""
model_class = re.search("pyspark.*'",
str(type(pyspark_model))).group(0).strip("'")
if model_class is None:
raise ClipperManagerException(
"pyspark_model argument was not a pyspark object")
# save predict function
serialization_dir = self._save_python_function(name, predict_function)
# save Spark model
spark_model_save_loc = os.path.join(serialization_dir,
"pyspark_model_data")
try:
# we only import pyspark here so that if the caller of the library does
# not want to use this function, clipper_manager does not have a dependency
# on pyspark
import pyspark
if isinstance(pyspark_model, pyspark.ml.pipeline.PipelineModel):
pyspark_model.save(spark_model_save_loc)
else:
pyspark_model.save(sc, spark_model_save_loc)
except Exception as e:
print("Error saving spark model: %s" % e)
raise e
pyspark_container = "clipper/pyspark-container"
# extract the pyspark class name. This will be something like
# pyspark.mllib.classification.LogisticRegressionModel
with open(os.path.join(serialization_dir, "metadata.json"),
"w") as metadata_file:
json.dump({"model_class": model_class}, metadata_file)
print("Spark model saved")
# Deploy model
deploy_result = self.deploy_model(name, version, serialization_dir,
pyspark_container, input_type,
labels, num_containers)
# Remove temp files
shutil.rmtree(serialization_dir)
return deploy_result
def deploy_predict_function(self,
name,
version,
predict_function,
input_type,
labels=DEFAULT_LABEL,
num_containers=1):
"""Deploy an arbitrary Python function to Clipper.
The function should take a list of inputs of the type specified by `input_type` and
return a Python or numpy array of predictions as strings. All dependencies for the function
must be installed with Anaconda or Pip and this function must be called from within an Anaconda
environment.
Parameters
----------
name : str
The name to assign this model.
version : int
The version to assign this model.
predict_function : function
The prediction function. Any state associated with the function should be
captured via closure capture.
input_type : str
One of "integers", "floats", "doubles", "bytes", or "strings".
labels : list of str, optional
A list of strings annotating the model
num_containers : int, optional
The number of replicas of the model to create. More replicas can be
created later as well. Defaults to 1.
Returns
-------
bool
True if the model was successfully deployed. False otherwise.
Example
-------
Define a feature function ``center()`` and train a model on the featurized input::
def center(xs):
means = np.mean(xs, axis=0)
return xs - means
centered_xs = center(xs)
model = sklearn.linear_model.LogisticRegression()
model.fit(centered_xs, ys)
def centered_predict(inputs):
centered_inputs = center(inputs)
return model.predict(centered_inputs)
clipper.deploy_predict_function(
"example_model",
1,
centered_predict,
"doubles",
num_containers=1)
"""
default_python_container = "clipper/python-container"
serialization_dir = self._save_python_function(name, predict_function)
# Deploy function
deploy_result = self.deploy_model(name, version, serialization_dir,
default_python_container, input_type,
labels, num_containers)
# Remove temp files
shutil.rmtree(serialization_dir)
return deploy_result
def get_all_models(self, verbose=False):
"""Gets information about all models registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the models' names.
If set to True, the list contains model info dictionaries.
Returns
-------
list
Returns a list of information about all apps registered to Clipper.
If no models are registered with Clipper, an empty list is returned.
"""
url = "http://%s:1338/admin/get_all_models" % self.host
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
return r.json()
else:
print(r.text)
return None
def get_model_info(self, model_name, model_version):
"""Gets detailed information about a registered model.
Parameters
----------
model_name : str
The name of the model to look up
model_version : int
The version of the model to look up
Returns
-------
dict
Returns a dictionary with the specified model's info.
If no model with name `model_name@model_version` is
registered with Clipper, None is returned.
"""
url = "http://%s:1338/admin/get_model" % self.host
req_json = json.dumps({
"model_name": model_name,
"model_version": model_version
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
app_info = r.json()
if len(app_info) == 0:
return None
return app_info
else:
print(r.text)
return None
def get_all_containers(self, verbose=False):
"""Gets information about all containers registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the apps' names.
If set to True, the list contains container info dictionaries.
Returns
-------
list
Returns a list of information about all apps registered to Clipper.
If no containerss are registered with Clipper, an empty list is returned.
"""
url = "http://%s:1338/admin/get_all_containers" % self.host
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
return r.json()
else:
print(r.text)
return None
def get_container_info(self, model_name, model_version, replica_id):
"""Gets detailed information about a registered container.
Parameters
----------
model_name : str
The name of the container to look up
model_version : int
The version of the container to look up
replica_id : int
The container replica to look up
Returns
-------
dict
A dictionary with the specified container's info.
If no corresponding container is registered with Clipper, None is returned.
"""
url = "http://%s:1338/admin/get_container" % self.host
req_json = json.dumps({
"model_name": model_name,
"model_version": model_version,
"replica_id": replica_id,
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
app_info = r.json()
if len(app_info) == 0:
return None
return app_info
else:
print(r.text)
return None
def _inspect_selection_policy(self, app_name, uid):
# NOTE: This method is private (it's still functional, but it won't be documented)
# until Clipper supports different selection policies
"""Fetches a human-readable string with the current selection policy state.
Parameters
----------
app_name : str
The application whose policy state should be inspected.
uid : int
The user whose policy state should be inspected. The convention
in Clipper is to use 0 as the default user ID, but this may be
application specific.
Returns
-------
str
The string describing the selection state. Note that if the
policy state was not found, this string may contain an error
message from Clipper describing the problem.
"""
url = "http://%s:%d/admin/get_state" % (self.host,
CLIPPER_MANAGEMENT_PORT)
req_json = json.dumps({
"app_name": app_name,
"uid": uid,
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
return r.text
def _export_conda_env(self, environment_file_abs_path):
"""Returns true if attempt to export the current conda environment is successful
Parameters
----------
environment_file_abs_path : str
The desired absolute path for the exported conda environment file
"""
process = subprocess.Popen(
"PIP_FORMAT=legacy conda env export >> {environment_file_abs_path}".
format(environment_file_abs_path=environment_file_abs_path),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
process.wait()
return process.returncode == 0
def _check_and_write_dependencies(self, environment_path, directory,
conda_dep_fname, pip_dep_fname):
"""Returns true if the provided conda environment is compatible with the container os.
If packages listed in specified conda environment file have conflicting dependencies,
this function will warn the user and return False.
If there are no conflicting package dependencies, existence of the packages in the
container conda channel is tested. The user is warned about any missing packages.
All existing conda packages are written out to `conda_dep_fname` and pip packages
to `pip_dep_fname` in the given `directory`. This function then returns True.
Parameters
----------
environment_path : str
The path to the input conda environment file
directory : str
The path to the diretory containing the environment file
conda_dep_fname : str
The name of the output conda dependency file
pip_dep_fname : str
The name of the output pip dependency file
Returns
-------
bool
Returns True if the packages specified in `environment_fname` are compatible with conda
on the container os. Otherwise returns False.
"""
if "CONDA_PREFIX" not in os.environ:
print("No Anaconda environment found")
return False
root_prefix = os.environ["CONDA_PREFIX"].split("envs")[0]
py_path = os.path.join(root_prefix, "bin", "python")
process = subprocess.Popen(
"{py_path} {cur_dir}/check_and_write_deps.py {environment_path} {directory} {platform} {conda_dep_fname} {pip_dep_fname}".
format(
py_path=py_path,
cur_dir=cur_dir,
environment_path=environment_path,
directory=directory,
platform=CONTAINER_CONDA_PLATFORM,
conda_dep_fname=conda_dep_fname,
pip_dep_fname=pip_dep_fname),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
out, err = process.communicate()
print(out)
print(err)
return process.returncode == 0
def add_container(self, model_name, model_version):
"""Create a new container for an existing model.
Starts a new container for a model that has already been added to
Clipper. Note that models are uniquely identified by both name
and version, so this method will fail if you have not already called
`Clipper.deploy_model()` for the specified name and version.
Parameters
----------
model_name : str
The name of the model
model_version : int
The version of the model
Returns
----------
bool
True if the container was added successfully and False
if the container could not be added.
"""
with hide("warnings", "output", "running"):
# Look up model info in Redis
if self.redis_ip == DEFAULT_REDIS_IP:
redis_host = self.host
else:
redis_host = self.redis_ip
model_key = "{mn}:{mv}".format(mn=model_name, mv=model_version)
result = local(
"redis-cli -h {host} -p {redis_port} -n {db} hgetall {key}".
format(
host=redis_host,
redis_port=self.redis_port,
key=model_key,
db=REDIS_MODEL_DB_NUM),
capture=True)
print(result)
if "empty list or set" in result.stdout:
# Model not found
warn("Trying to add container but model {mn}:{mv} not in "
"Redis".format(mn=model_name, mv=model_version))
return False
splits = result.stdout.split("\n")
model_metadata = dict([(splits[i].strip(), splits[i + 1].strip())
for i in range(0, len(splits), 2)])
image_name = model_metadata["container_name"]
model_data_path = model_metadata["model_data_path"]
model_input_type = model_metadata["input_type"]
restart_policy = 'always' if self.restart_containers else 'no'
if image_name != EXTERNALLY_MANAGED_MODEL:
# Start container
add_container_cmd = (
"docker run -d --network={nw} --restart={restart_policy} -v {path}:/model:ro "
"-e \"CLIPPER_MODEL_NAME={mn}\" -e \"CLIPPER_MODEL_VERSION={mv}\" "
"-e \"CLIPPER_IP=query_frontend\" -e \"CLIPPER_INPUT_TYPE={mip}\" -l \"{clipper_label}\" -l \"{mv_label}\" "
"{image}".format(
path=model_data_path,
nw=DOCKER_NW,
image=image_name,
mn=model_name,
mv=model_version,
mip=model_input_type,
clipper_label=CLIPPER_DOCKER_LABEL,
mv_label="%s=%s:%d" % (CLIPPER_MODEL_CONTAINER_LABEL,
model_name, model_version),
restart_policy=restart_policy))
result = self._execute_root(add_container_cmd)
return result.return_code == 0
else:
print("Cannot start containers for externally managed model %s"
% model_name)
return False
def get_clipper_logs(self):
"""Copies the logs from all Docker containers running on the host machine
that have been tagged with the Clipper label (ai.clipper.container.label) into
the local filesystem.
Returns
-------
list(str)
Returns a list of local filenames containing the Docker container log snapshots.
"""
container_ids = self._get_clipper_container_ids()
cur_time_logs_path = os.path.join(CLIPPER_LOGS_PATH,
time.strftime("%Y%m%d-%H%M%S"))
if not os.path.exists(cur_time_logs_path):
os.makedirs(cur_time_logs_path)
log_file_names = []
for container in container_ids:
output = self._execute_root(
"docker logs {container}".format(container=container))
cur_log_fname = os.path.join(cur_time_logs_path,
"%s-container.log" % container)
with open(cur_log_fname, "w") as f:
f.write(output)
log_file_names.append(cur_log_fname)
return log_file_names
def _get_clipper_container_ids(self):
"""
Gets the container IDs of all containers labeled with the clipper label
"""
containers = self._execute_root(
"docker ps -aq --filter label={clipper_label}".format(
clipper_label=CLIPPER_DOCKER_LABEL))
ids = [l.strip() for l in containers.split("\n")]
print("Clipper container IDS found: %s" % str(ids))
return ids
def inspect_instance(self):
"""Fetches metrics from the running Clipper instance.
Returns
-------
str
The JSON string containing the current set of metrics
for this instance. On error, the string will be an error message
(not JSON formatted).
"""
url = "http://%s:%d/metrics" % (self.host, CLIPPER_QUERY_PORT)
r = requests.get(url)
try:
s = r.json()
except TypeError:
s = r.text
return s
def set_model_version(self, model_name, model_version, num_containers=0):
"""Changes the current model version to `model_version`.
This method can be used to do model rollback and rollforward to
any previously deployed version of the model. Note that model
versions automatically get updated when `deploy_model()` is
called, so there is no need to manually update the version as well.
Parameters
----------
model_name : str
The name of the model
model_version : int
The version of the model. Note that `model_version`
must be a model version that has already been deployed.
num_containers : int
The number of new containers to start with the newly
selected model version.
"""
url = "http://%s:%d/admin/set_model_version" % (
self.host, CLIPPER_MANAGEMENT_PORT)
req_json = json.dumps({
"model_name": model_name,
"model_version": model_version
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
print(r.text)
for r in range(num_containers):
self.add_container(model_name, model_version)
def remove_inactive_containers(self, model_name):
"""Removes all containers serving stale versions of the specified model.
Parameters
----------
model_name : str
The name of the model whose old containers you want to clean.
"""
# Get all Docker containers tagged as model containers
num_containers_removed = 0
with hide("output", "warnings", "running"):
containers = self._execute_root(
"docker ps -aq --filter label={model_container_label}".format(
model_container_label=CLIPPER_MODEL_CONTAINER_LABEL))
if len(containers) > 0:
container_ids = [l.strip() for l in containers.split("\n")]
for container in container_ids:
# returns a string formatted as "<model_name>:<model_version>"
container_model_name_and_version = self._execute_root(
"docker inspect --format \"{{ index .Config.Labels \\\"%s\\\"}}\" %s"
% (CLIPPER_MODEL_CONTAINER_LABEL, container))
splits = container_model_name_and_version.split(":")
container_model_name = splits[0]
container_model_version = int(splits[1])
if container_model_name == model_name:
# check if container_model_version is the currently deployed version
model_info = self.get_model_info(
container_model_name, container_model_version)
if model_info == None or not model_info["is_current_version"]:
self._execute_root("docker stop {container}".
format(container=container))
self._execute_root("docker rm {container}".format(
container=container))
num_containers_removed += 1
print("Removed %d inactive containers for model %s" %
(num_containers_removed, model_name))
return num_containers_removed
def stop_all(self):
"""Stops and removes all Clipper Docker containers on the host.
"""
print("Stopping Clipper and all running models...")
with hide("output", "warnings", "running"):
container_ids = self._get_clipper_container_ids()
container_id_str = " ".join(container_ids)
self._execute_root(
"docker stop {ids}".format(ids=container_id_str),
warn_only=True)
self._execute_root(
"docker rm {ids}".format(ids=container_id_str), warn_only=True)
def _publish_new_model(self, name, version, labels, input_type,
container_name, model_data_path):
url = "http://%s:%d/admin/add_model" % (self.host,
CLIPPER_MANAGEMENT_PORT)
req_json = json.dumps({
"model_name": name,
"model_version": version,
"labels": labels,
"input_type": input_type,
"container_name": container_name,
"model_data_path": model_data_path
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
if r.status_code == requests.codes.ok:
return True
else:
print("Error publishing model: %s" % r.text)
return False
def _put_container_on_host(self, container_name):
"""Puts the provided container on the host.
Parameters
__________
container_name : str
The name of the container.
Notes
-----
This method will first check the host, then Docker Hub, then the local
machine to find the container.
This method is safe to call multiple times with the same container name.
Subsequent calls will detect that the container is already present on
the host and do nothing.
"""
with hide("output", "warnings", "running"):
# first see if container is already present on host
host_result = self._execute_root(
"docker images -q {cn}".format(cn=container_name))
if len(host_result.stdout) > 0:
print("Found %s on host" % container_name)
return True
# now try to pull from Docker Hub
hub_result = self._execute_root(
"docker pull {cn}".format(cn=container_name), warn_only=True)
if hub_result.return_code == 0:
print("Found %s in Docker hub" % container_name)
return True
# assume container_name refers to a local container and
# copy it to host
local_result = local(
"docker images -q {cn}".format(cn=container_name))
if len(local_result.stdout) > 0:
saved_fname = container_name.replace("/", "_")
subprocess.call("docker save -o /tmp/{fn}.tar {cn}".format(
fn=saved_fname, cn=container_name))
tar_loc = "/tmp/{fn}.tar".format(fn=saved_fname)
self._execute_put(tar_loc, tar_loc)
self._execute_root("docker load -i {loc}".format(loc=tar_loc))
# self._execute_root("docker tag {image_id} {cn}".format(
# image_id=image_id, cn=cn))
# now check to make sure we can access it
host_result = self._execute_root(
"docker images -q {cn}".format(cn=container_name))
if len(host_result.stdout) > 0:
print("Successfuly copied %s to host" % container_name)
return True
else:
warn("Problem copying container %s to host" %
container_name)
return False
# out of options
warn("Could not find %s, please try with a valid "
"container docker image")
return False
|
{
"content_hash": "d2ccb7b2811da70e29581501395b1358",
"timestamp": "",
"source": "github",
"line_count": 1257,
"max_line_length": 134,
"avg_line_length": 40.517899761336515,
"alnum_prop": 0.5491940075788813,
"repo_name": "rmdort/clipper",
"id": "3df0663604baa2ab0f10420056f5028127f632fd",
"size": "50931",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "clipper_admin/clipper_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "531447"
},
{
"name": "CMake",
"bytes": "118952"
},
{
"name": "Java",
"bytes": "40503"
},
{
"name": "Python",
"bytes": "153639"
},
{
"name": "Scala",
"bytes": "50751"
},
{
"name": "Shell",
"bytes": "17173"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from six.moves import range
epsilon = 1.0e-9
def mean_squared_error(y_true, y_pred):
return T.sqr(y_pred - y_true).mean()
def mean_absolute_error(y_true, y_pred):
return T.abs_(y_pred - y_true).mean()
def squared_hinge(y_true, y_pred):
return T.sqr(T.maximum(1. - y_true * y_pred, 0.)).mean()
def hinge(y_true, y_pred):
return T.maximum(1. - y_true * y_pred, 0.).mean()
def categorical_crossentropy(y_true, y_pred):
'''Expects a binary class matrix instead of a vector of scalar classes
'''
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
# scale preds so that the class probas of each sample sum to 1
y_pred /= y_pred.sum(axis=1, keepdims=True)
cce = T.nnet.categorical_crossentropy(y_pred, y_true)
return cce.mean()
def binary_crossentropy(y_true, y_pred):
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
bce = T.nnet.binary_crossentropy(y_pred, y_true)
return bce.mean()
# aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
from .utils.generic_utils import get_from_module
def get(identifier):
return get_from_module(identifier, globals(), 'objective')
|
{
"content_hash": "49db7d42251a124c18c66a0760246f40",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 30.317073170731707,
"alnum_prop": 0.6798069187449718,
"repo_name": "jmportilla/keras",
"id": "4388406b805eb6dba51eb5dd556ec6006c8c5d91",
"size": "1243",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "keras/objectives.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182620"
}
],
"symlink_target": ""
}
|
import unittest
import requests
import json
class TestBasicAPI(unittest.TestCase):
def setUp(self):
self.data = {"text" : "Example sentences, which can be tokenized. We can also split the sentences. We can extract scores like an agatston score of 432."}
self.headers = {'Content-Type': 'application/json'}
self.base_url = "http://127.0.0.1:5000/api/"
def test_accepted_method(self):
tasks = ('simple/tokenize','simple/sentence_split','extract/health_scores')
for task in tasks:
url = self.base_url + task
r = requests.get(url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# check status code (405)
self.assertEquals(result['status'], 405)
def test_endpoint_not_found(self):
url = self.base_url + "non_existing"
r = requests.post(url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# check status code (404)
self.assertEquals(result['status'], 404)
def test_content_type(self):
tasks = ('simple/tokenize','simple/sentence_split','extract/health_scores')
self.headers['Content-Type'] = "application/xml"
for task in tasks:
url = self.base_url + task
r = requests.post(url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# check status code (415)
self.assertEquals(result['status'], 415)
def test_missing_text(self):
tasks = ('simple/tokenize','simple/sentence_split','extract/health_scores')
self.data = {"incorrect_label" : "Example sentences, which can be tokenized. We can also split the sentences. We can extract scores like an agatston score of 432."}
for task in tasks:
url = self.base_url + task
r = requests.post(url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# check status code (415)
self.assertEquals(result['status'], 400)
class TestSimpleFunctions(unittest.TestCase):
def setUp(self):
self.data = {"text" : "Example sentences, which can be tokenized. We can also split the sentences. We can extract scores like an agatston score of 432."}
self.headers = {'Content-Type': 'application/json'}
self.base_url = "http://127.0.0.1:5000/api/simple/"
def test_tokenize(self):
url = self.base_url + "tokenize"
r = requests.post(url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# check status code (200)
self.assertEquals(result['status'], 200)
# check if result exists in json
self.assertTrue('result' in result)
# check if length of the list matches number of tokens
self.assertTrue(len(result['result']), 26)
def test_sentence_split(self):
url = self.base_url + "sentence_split"
r = requests.post(url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# check status code (200)
self.assertEquals(result['status'], 200)
# check if result exists in json
self.assertTrue('result' in result)
# check if length of the list matches number of tokens
self.assertTrue(len(result['result']), 3)
class TestExtractionFunctions(unittest.TestCase):
def setUp(self):
self.data = {"text" : "Example sentences, which can be tokenized met pizza brood voor unknownValue test. The first agatston score is 432 432 (mesa percentiel 22). We can also split the sentences. We can extract scores like an agatston score of 612 which is the 23e MESA percentiel. Other agatston scores are -9 which is too low. Also, for agatston 8,6 is incorrect. Finally, an agatston score of 40000 is too high to be possible. The following agatston is too far away to be considered useful 430."}
self.health_scores = {
'agatston' : {
'synonyms' : [
'agatston',
'agatston-score',
'agatstonscore',
'kalkscore',
'calciumscore'
],
'values' : {
'type' : "int",
'min' : 0,
'max' : 9999,
'format' : "(\-*\d+)((,|\.)\d+)?",
'group' : 0,
'position' : {
'before' : 0,
'after' : 40
}
}
},
'unknownValue' : {
'synonyms' : ["pizza", "brood"],
'values' : {}
},
'mesa' : {
'synonyms' : [
'MESA'
],
'values' : {
'type' : "int",
'min' : 0,
'max' : 100,
'format' : "\D(\d+)\D",
'group' : 1,
'position' : {
'before' : -10,
'after' : 35
}
}
}
}
self.headers = {'Content-Type': 'application/json'}
self.base_url = "http://127.0.0.1:5000/api/extract/health_scores"
def test_missing_health_scores(self):
r = requests.post(self.base_url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# check status code (415)
self.assertEquals(result['status'], 400)
def test_multiple_values_strict(self):
self.data['health_scores'] = self.health_scores
r = requests.post(self.base_url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# assert status
self.assertEquals(result['status'], 200)
#assert existence of json result
#print result
self.assertTrue('result' in result)
# assert existence of agatston score in result
self.assertTrue('agatston' in result['result']['findings'])
self.assertTrue('mesa' in result['result']['findings'])
# assert correct value of agatston
self.assertEquals(len(result['result']['findings']['agatston']), 6)
# First 2 are 'good' findings
self.assertEquals(432, result['result']['findings']['agatston'][0]['value'])
self.assertEquals(612, result['result']['findings']['agatston'][1]['value'])
# Last 4 should be warnings (but still included for highlight / 'incorrect' value display)
self.assertTrue(-9 in [x['value'] for x in result['result']['findings']['agatston'][2]['optional'] ])
# assert correct value of mesa
self.assertEquals(len(result['result']['findings']['mesa']), 2)
self.assertEquals(22, result['result']['findings']['mesa'][0]['value'])
self.assertEquals(23, result['result']['findings']['mesa'][1]['value'])
def test_multiple_values_fuzzy(self):
self.data['health_scores'] = {
'agatston' : {
'synonyms' : [
'agatston',
'agatston-score',
'agatstonscore',
'kalkscore',
'calciumscore'
],
'values' : {}
}
}
r = requests.post(self.base_url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# assert status
self.assertEquals(result['status'], 200)
#assert existence of json result
self.assertTrue('result' in result)
# assert existence of agatston score in result
self.assertTrue('agatston' in result['result']['findings'])
# assert correct value of agatston
self.assertEquals(432, result['result']['findings']['agatston'][0]['value'])
self.assertEquals(612, result['result']['findings']['agatston'][1]['value'])
## Following values are not in the results, since 'hack' default values
# Too low
self.assertTrue(-9 in [x['value'] for x in result['result']['findings']['agatston'][2]['optional']])
# It is a float, but agatston accepts int, so give warning
self.assertTrue("8.6" in [x['value'] for x in result['result']['findings']['agatston'][3]['optional']])
# Too high
self.assertTrue(40000 in [x['value'] for x in result['result']['findings']['agatston'][4]['optional']])
def test_nonexistent_type(self):
self.data['health_scores'] = self.health_scores
self.data['health_scores']['unknownValue']['values']['type'] = "string"
r = requests.post(self.base_url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# assert status
self.assertEquals(result['status'], 400)
def test_types(self):
types = ('float','int')
self.data['health_scores'] = self.health_scores
for t in types:
self.data['health_scores']['agatston']['values']['type'] = t
r = requests.post(self.base_url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# assert status
self.assertEquals(result['status'], 200)
class TestResultFormatting(unittest.TestCase):
def setUp(self):
self.data = {"text" : "RA (rheumatoid antigen) can be positive in several different conditions beyond rheumatoid arthritis. It can also be incidentally positive without disease. If it is specifically your muscles hurting and not your joints then rheumatoid arthritis seems a bit less likely for you."}
self.health_scores = {
'rheuma' : {
'synonyms' : [
"rheumatoid disease",
"atrofische artritis",
"rheumatoid arthritis"
]
}
}
self.headers = {'Content-Type': 'application/json'}
self.base_url = "http://127.0.0.1:5000/api/extract/health_scores"
def test_without_valueDescription(self):
self.data['health_scores'] = self.health_scores
r = requests.post(self.base_url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
# Check if keys are properly returned
self.assertTrue('sentences' in result['result'])
self.assertTrue('findings' in result['result'])
self.assertTrue(isinstance(result['result']['findings']['rheuma'], list))
self.assertEquals(result['status'], 200)
def test_empty_valueDescription(self):
self.data['health_scores'] = self.health_scores
self.data['health_scores']['rheuma']['values'] = {}
r = requests.post(self.base_url, data=json.dumps(self.data), headers=self.headers)
result = r.json()
self.assertTrue('sentences' in result['result'])
self.assertTrue('findings' in result['result'])
self.assertTrue(isinstance(result['result']['findings']['rheuma'], list))
self.assertEquals(result['status'], 200)
# If nothing is found it should return something
def test_empty_synonyms(self):
import copy
tempData = copy.copy(self.data)
tempData['health_scores'] = {
"rheuma" : {
"synonyms" : []
}
}
r = requests.post(self.base_url, data=json.dumps(tempData), headers=self.headers)
result = r.json()
self.assertTrue('sentences' in result['result'])
self.assertTrue('findings' in result['result'])
self.assertTrue(isinstance(result['result']['findings']['rheuma'], list))
self.assertEquals(len(result['result']['findings']['rheuma']), 0)
self.assertEquals(result['status'], 200)
def test_unfound_synonyms(self):
import copy
tempData = copy.copy(self.data)
tempData['health_scores'] = {
"rheuma" : {
"synonyms" : ["924989ufhusadfhjehrewrwer9ae"]
}
}
r = requests.post(self.base_url, data=json.dumps(tempData), headers=self.headers)
result = r.json()
self.assertTrue('sentences' in result['result'])
self.assertTrue('findings' in result['result'])
self.assertTrue(isinstance(result['result']['findings']['rheuma'], list))
self.assertEquals(len(result['result']['findings']['rheuma']), 0)
self.assertEquals(result['status'], 200)
if __name__ == '__main__':
print "Testing basic API functionality\n"
suite = unittest.TestLoader().loadTestsFromTestCase(TestBasicAPI)
unittest.TextTestRunner(verbosity=2).run(suite)
print "\n\n"
print "Testing simple text analysis functionality\n"
suite = unittest.TestLoader().loadTestsFromTestCase(TestSimpleFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
print "\n\n"
print "Testing result format\n"
suite = unittest.TestLoader().loadTestsFromTestCase(TestResultFormatting)
unittest.TextTestRunner(verbosity=2).run(suite)
print "\n\n"
print "Testing extraction functionality\n"
suite = unittest.TestLoader().loadTestsFromTestCase(TestExtractionFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
print "\n\n"
|
{
"content_hash": "4995ace8757d3496286816cb56b28aae",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 501,
"avg_line_length": 32.02521008403362,
"alnum_prop": 0.6746260823930726,
"repo_name": "904labs/ctTrakr",
"id": "4943878db4301533f5ae83497b00793f7fcccc64",
"size": "11433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21670"
}
],
"symlink_target": ""
}
|
"""User models."""
import datetime
from flask_login import UserMixin, AnonymousUserMixin
from oneiNote.database import Column, Model, SurrogatePK, db, \
reference_col, relationship
from oneiNote.extensions import bcrypt
class Role(SurrogatePK, Model):
"""A role for a user."""
__tablename__ = 'roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = reference_col('users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name="", **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instsance as a unique string."""
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, SurrogatePK, Model):
"""A user of the app."""
__tablename__ = 'users'
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=False)
#: The hashed password
password = Column(db.Binary(60), nullable=True)
created_at = Column(db.DateTime, nullable=False,
default=datetime.datetime.utcnow)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
def __init__(self, username="", email="", password=None, **kwargs):
"""Create instance."""
db.Model.__init__(self, username=username, email=email, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def __str__(self):
"""String representation of the user. Shows the users email address."""
return self.email
def set_password(self, password):
"""Set password"""
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
"""Check password."""
return bcrypt.check_password_hash(self.password, value)
def get_id(self):
"""Return the email address to satisfy Flask-Login's requirements"""
return self.id
@property
def full_name(self):
"""Full user name."""
return "{0} {1}".format(self.first_name, self.last_name)
@property
def is_active(self):
"""Active or non active user (required by flask-login)"""
return self.active
@property
def is_authenticated(self):
"""Return True if the user is authenticated."""
if isinstance(self, AnonymousUserMixin):
return False
else:
return True
@property
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
|
{
"content_hash": "9b35442ae5d0a91eddef9dfba4271345",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 30.88888888888889,
"alnum_prop": 0.6190647482014389,
"repo_name": "on3iro/oneiNote",
"id": "8a03ce67c738d0405341f6e90985ac348048050b",
"size": "2804",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "oneiNote/users/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5659"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "51226"
}
],
"symlink_target": ""
}
|
from webapp2 import RequestHandler
from src.handlers import BaseHandler
from src.settings import JINJA_ENVIRONMENT, DEBUG
from src.main.scraper import Scraper
class Index(RequestHandler):
def get(self):
template_values = {
'nav': 'home',
}
template = JINJA_ENVIRONMENT.get_template('main/templates/index.html')
self.response.write(template.render(template_values))
class Scrape(RequestHandler):
def get(self):
scraper = Scraper()
scraper.run()
self.response.write('OK')
|
{
"content_hash": "0ce30ace5698c16b636c77d696382572",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 27.45,
"alnum_prop": 0.6757741347905283,
"repo_name": "Tjorriemorrie/housing",
"id": "c77fdb678cfbfb312a2222c5af7e65b26f565381",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6292"
}
],
"symlink_target": ""
}
|
import re
from babel.numbers import format_currency
from muntjac.demo.sampler.ExampleUtil import ExampleUtil
from muntjac.api import VerticalLayout, Table
from muntjac.ui.window import Notification
from muntjac.ui.table import IHeaderClickListener, IFooterClickListener
from muntjac.util import defaultLocale
class TableClickListenersExample(VerticalLayout):
CURRENCY_PATTERN = u'([\u00A3\u0024\u20AC])(\d+(?:\.\d{2})?)'
def __init__(self):
super(TableClickListenersExample, self).__init__()
# Create our data source
dataSource = ExampleUtil.getOrderContainer()
# Calculate total sum
totalSum = 0.0
for i in range(len(dataSource)):
item = dataSource.getItem(dataSource.getIdByIndex(i))
value = item.getItemProperty(
ExampleUtil.ORDER_ITEMPRICE_PROPERTY_ID).getValue()
match = re.search(self.CURRENCY_PATTERN, str(value))
if match is not None:
amount = match.groups()[1]
totalSum += float(amount)
# Create table
table = Table('', ExampleUtil.getOrderContainer())
table.setColumnExpandRatio(ExampleUtil.ORDER_DESCRIPTION_PROPERTY_ID, 1)
table.setSortDisabled(True)
table.setWidth('100%')
table.setPageLength(6)
table.setFooterVisible(True)
table.setImmediate(True)
# Add some total sum and description to footer
table.setColumnFooter(ExampleUtil.ORDER_DESCRIPTION_PROPERTY_ID,
'Total Price')
l = defaultLocale()
fc = format_currency(totalSum, currency='USD', locale=l).encode('utf-8')
table.setColumnFooter(ExampleUtil.ORDER_ITEMPRICE_PROPERTY_ID, fc)
# Add a header click handler
table.addListener(HeaderListener(self), IHeaderClickListener)
# Add a footer click handler
table.addListener(FooterListener(self), IFooterClickListener)
self.addComponent(table)
def showHeaderHelpText(self, column):
"""Shows some help text when clicking on the header
@param column
"""
notification = None
# Description
if column == ExampleUtil.ORDER_DESCRIPTION_PROPERTY_ID:
notification = Notification(str(column) + '<br>',
'The description describes the type of product '
'that has been ordered.')
# Item price
elif column == ExampleUtil.ORDER_ITEMPRICE_PROPERTY_ID:
notification = Notification(str(column) + '<br>',
'The item price is calculated by multiplying '
'the unit price with the quantity.')
# Quantity
elif column == ExampleUtil.ORDER_QUANTITY_PROPERTY_ID:
notification = Notification(str(column) + '<br>',
'The quantity describes how many items has been ordered.')
# Unit price
elif column == ExampleUtil.ORDER_UNITPRICE_PROPERTY_ID:
notification = Notification(str(column) + '<br>',
'The unit price is how much a single items costs. '
'Taxes included.')
else:
return
self.getWindow().showNotification(notification)
def showFooterHelpText(self, column):
"""Shows a footer help text
@param column
"""
notification = Notification('Total Price<br>',
'The total price is calculated by summing every items '
'item price together.')
self.getWindow().showNotification(notification)
class HeaderListener(IHeaderClickListener):
def __init__(self, c):
self._c = c
def headerClick(self, event):
# Show a notification help text when the user clicks on a
# column header
self._c.showHeaderHelpText(event.getPropertyId())
class FooterListener(IFooterClickListener):
def __init__(self, c):
self._c = c
def footerClick(self, event):
# Show a notification help text when the user clicks on a
# column footer
self._c.showFooterHelpText(event.getPropertyId())
|
{
"content_hash": "c336b5ec611ce3444b65544f8e6e487f",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 80,
"avg_line_length": 34.31404958677686,
"alnum_prop": 0.6274084778420038,
"repo_name": "rwl/muntjac",
"id": "15f82014c48cdc6366dd6c6a926e732f3fc33203",
"size": "4153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "muntjac/demo/sampler/features/table/TableClickListenersExample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8602"
},
{
"name": "Java",
"bytes": "2243"
},
{
"name": "JavaScript",
"bytes": "32438"
},
{
"name": "Python",
"bytes": "3212361"
}
],
"symlink_target": ""
}
|
import logging
import random
import time
from config import get_self_fqdn
from redis_manager import healthy_root_redis
from topology import get_best_source
def health_check_leaf(local_rd, info):
# There is a chance that cascade.py has restarted and last time we self-selected to be
# a branch, but this time we're not. If we're in the list of branches, then we should remove
# ourselves.
local_branches = local_rd.smembers('cascade:branches')
root_rd = None
if get_self_fqdn() in local_branches:
root_rd, _ = healthy_root_redis(local_rd)
if root_rd:
logging.warning('Leaf node removing myself from list of branches.')
root_rd.srem('cascade:branches', get_self_fqdn())
# If we have slaves, update the root so it can count our clients.
if info.get('connected_slaves', 0) > 0:
if not root_rd:
root_rd, _ = healthy_root_redis(local_rd)
if root_rd:
root_rd.sadd('cascade:draining-branches', get_self_fqdn())
# Following from the above case, if our branch is no longer in the branches list, we could
# nicely choose to move off of them. We don't, however, because then we can cause rebalance
# storms when cascade gets rolled globally (and self-selected branches all move).
#
# Instead, let the periodic rebalance take care of the problem, since it won't pick leaf nodes
# next time.
# Periodic Rebalance:
#
# With some percentage chance (this fires every 1 second) we want to pick a new branch and
# move to it. This provides dumb but probably effective rebalancing for the tree.
if random.random() <= 0.0017: # 1 in 600 (10 minute average)
leaf_rebalance(local_rd, info)
def leaf_rebalance(local_rd, info):
branch_host, branch_ip = get_best_source(local_rd)
if not branch_host:
logging.error('Periodic rebalance found no available, healthy branches!')
return
if branch_ip == info['master_host']:
# If we manage to select our own branch again, let's just pretend this didn't happen.
# This rebalance was not meant to be.
return
logging.info('Periodic rebalance, picked new branch: %s(%s).' % (branch_host, branch_ip))
local_rd.slaveof(host=branch_ip, port=2578)
time.sleep(3) # Give a little extra time to start the sync.
|
{
"content_hash": "9a2c812d17cca9d41e06f0746230f8cd",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 98,
"avg_line_length": 42.81818181818182,
"alnum_prop": 0.6764331210191082,
"repo_name": "zorkian/cascade",
"id": "970016a227b48bbd0cf63b8962064b295f9ce23a",
"size": "2355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cascade/leaf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24637"
}
],
"symlink_target": ""
}
|
''' Nif User Interface, connect custom properties from properties.py into Blenders UI'''
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2005-2015, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import bpy
from bpy.types import Panel
class CollisionBoundsPanel(Panel):
bl_label = "Collision Bounds"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "physics"
'''
@classmethod
def poll(cls, context):
'''
def draw_header(self, context):
game = context.active_object.game
self.layout.prop(game, "use_collision_bounds", text="")
def draw(self, context):
layout = self.layout
game = context.active_object.game
col_setting = context.active_object.nifcollision
layout.active = game.use_collision_bounds
layout.prop(game, "collision_bounds_type", text="Bounds Type")
layout.prop(game, "radius", text="Radius")
layout.prop(game, "velocity_max", text="Velocity Max")
box = layout.box()
box.active = game.use_collision_bounds
box.prop(col_setting, "col_filter", text='Col Filter') # col filter prop
box.prop(col_setting, "deactivator_type", text='Deactivator Type') # motion dactivation prop
box.prop(col_setting, "solver_deactivation", text='Solver Deactivator') # motion dactivation prop
box.prop(col_setting, "quality_type", text='Quality Type') # quality type prop
box.prop(col_setting, "oblivion_layer", text='Oblivion Layer') # oblivion layer prop
box.prop(col_setting, "max_linear_velocity", text='Max Linear Velocity') # oblivion layer prop
box.prop(col_setting, "max_angular_velocity", text='Max Angular Velocity') # oblivion layer prop
box.prop(col_setting, "motion_system", text='Motion System') # motion system prop
box.prop(col_setting, "havok_material", text='Havok Material') # havok material prop
con_setting = context.active_object.niftools_constraint
box.prop(con_setting, "LHMaxFriction", text='LHMaxFriction')
box.prop(con_setting, "tau", text='tau')
box.prop(con_setting, "damping", text='Damping')
|
{
"content_hash": "153bf98d98190facaccd799f01d3cf0f",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 105,
"avg_line_length": 43.73863636363637,
"alnum_prop": 0.6923876331514679,
"repo_name": "nightstrike/blender_nif_plugin",
"id": "5b5c7e11613ae02b44fb1a0b4799094fe925594f",
"size": "3850",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "io_scene_nif/ui/collision.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3668"
},
{
"name": "Python",
"bytes": "1119385"
},
{
"name": "Shell",
"bytes": "2692"
}
],
"symlink_target": ""
}
|
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
class DefaultConfig(object):
DEBUG = False
TESTING = False
SQLALCHEMY_DATABASE_URI = 'sqlite:////var/lib/pony-express/ponyexpress.db'
REQUEST_LOG = '/var/log/ponyexpress.log'
class ProductionConfig(DefaultConfig):
SQLALCHEMY_DATABASE_URI = 'sqlite:////var/lib/pony-express/ponyexpress.db'
class DevelopmentConfig(DefaultConfig):
#SQLALCHEMY_ECHO = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, '../../', 'ponyexpress.db')
REQUEST_LOG = os.path.join(_basedir, '../../ponyexpress.log')
class TestingConfig(DefaultConfig):
TESTING = True
SQLALCHEMY_ECHO = False
SQLALCHEMY_DATABASE_URI = 'sqlite://'
REQUEST_LOG = os.path.join(_basedir, 'ponyexpress.log')
|
{
"content_hash": "d854326bd1d2b0f7dfe84dc738c30913",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 95,
"avg_line_length": 29.071428571428573,
"alnum_prop": 0.6855036855036855,
"repo_name": "TelekomCloud/pony-express",
"id": "a5162cb0e75f103921280bc4ff7dc93ebc143b1a",
"size": "814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ponyexpress/config/configuration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "79350"
}
],
"symlink_target": ""
}
|
""" Define and register a listing directive using the existing CodeBlock """
from __future__ import unicode_literals
from codecs import open as codecs_open # for patching purposes
try:
from urlparse import urlunsplit
except ImportError:
from urllib.parse import urlunsplit # NOQA
from docutils import core
from docutils.parsers.rst import directives
try:
from docutils.parsers.rst.directives.body import CodeBlock
except ImportError: # docutils < 0.9 (Debian Sid For The Loss)
from dummy import CodeBlock # NOQA
import os
class Listing(CodeBlock):
""" listing directive: create a CodeBlock from file
Usage:
.. listing:: nikola.py python
:number-lines:
"""
has_content = False
required_arguments = 1
optional_arguments = 1
option_spec = {
'start-at': directives.unchanged,
'end-at': directives.unchanged,
'start-after': directives.unchanged,
'end-before': directives.unchanged,
}
def run(self):
fname = self.arguments.pop(0)
with codecs_open(os.path.join('listings', fname), 'rb+', 'utf8') as fileobject:
self.content = fileobject.read().splitlines()
self.trim_content()
target = urlunsplit(("link", 'listing', fname, '', ''))
generated_nodes = (
[core.publish_doctree('`{0} <{1}>`_'.format(fname, target))[0]])
generated_nodes += self.get_code_from_file(fileobject)
return generated_nodes
def trim_content(self):
"""Cut the contents based in options."""
start = 0
end = len(self.content)
if 'start-at' in self.options:
for start, l in enumerate(self.content):
if self.options['start-at'] in l:
break
else:
start = 0
elif 'start-before' in self.options:
for start, l in enumerate(self.content):
if self.options['start-before'] in l:
if start > 0:
start -= 1
break
else:
start = 0
if 'end-at' in self.options:
for end, l in enumerate(self.content):
if self.options['end-at'] in l:
break
else:
end = len(self.content)
elif 'end-before' in self.options:
for end, l in enumerate(self.content):
if self.options['end-before'] in l:
end -= 1
break
else:
end = len(self.content)
self.content = self.content[start:end]
def get_code_from_file(self, data):
""" Create CodeBlock nodes from file object content """
return super(Listing, self).run()
def assert_has_content(self):
""" Listing has no content, override check from superclass """
pass
directives.register_directive('listing', Listing)
|
{
"content_hash": "fbc7af3ba0353930a6e188b2cb220100",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 87,
"avg_line_length": 31.189473684210526,
"alnum_prop": 0.5693553830577118,
"repo_name": "kotnik/nikola",
"id": "eb60055e5e6fae27ca3bd25cdc4b09d1dec6a0c1",
"size": "4106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/plugins/compile_rest/listing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "243896"
},
{
"name": "JavaScript",
"bytes": "191598"
},
{
"name": "Python",
"bytes": "442653"
},
{
"name": "Shell",
"bytes": "663"
}
],
"symlink_target": ""
}
|
import json
import argparse
import Queue
import socket
import datetime
import time
import os
import mosquitto
import logging
import logging.handlers
import math
from mqttrpc.client import TMQTTRPCClient
from jsonrpc.exceptions import JSONRPCError
logger = logging.getLogger('')
handler = logging.handlers.SysLogHandler(address = '/dev/log')
formatter = logging.Formatter('sensor-tools-client: %(levelname)s:%(name)s:%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
ch = logging.StreamHandler()
ch_formatter = logging.Formatter('%(asctime)s - %(name)s:%(levelname)s:%(message)s')
ch.setFormatter(ch_formatter)
logger.addHandler(ch)
logging.getLogger('').setLevel(logging.WARNING)
class TServerConnection(object):
MAX_RECONNECT_INTERVAL = 60
MIN_RECONNECT_INTERVAL = 0.1
RESPONSE_OK = '@OK'
RESPONSE_FAIL = '@FAIL'
POLLING_INTERVAL = 60
DEFAULT_SAVING_INTERVAL = 300
def __init__(self, host, port, timeout=1, saving_interval = None):
self.host = host
self.port = port
self.timeout = timeout
self.sock = None
self.fd = None
self.saving_interval = saving_interval or self.DEFAULT_SAVING_INTERVAL
def reconnect(self):
if self.sock:
try:
self.sock.close()
except:
pass
reconnect_interval = self.MIN_RECONNECT_INTERVAL
while True:
try:
self.connect()
except socket.error, err:
logging.exception("connect failed")
time.sleep(reconnect_interval)
reconnect_interval = min(self.MAX_RECONNECT_INTERVAL, reconnect_interval * 2)
else:
return
def connect(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.sock.settimeout(self.timeout)
self.fd = self.sock.makefile()
def _escape(self, var):
if isinstance(var, unicode):
var = var.encode('utf-8', 'ignore')
if not isinstance(var, str):
var = str(var)
return var.replace(':', '').replace(';','').replace('#','')
def format_request(self, client_id, timestamp, live, channels):
parts = [ ("id", self._escape(client_id)),
('datetime', timestamp.strftime('%Y%m%d%H%M%S')),
('saving_interval' , self.saving_interval),
('live' , int(live)),
]
for k, v in channels.iteritems():
try:
_v = float(v)
except ValueError:
pass
else:
if not math.isnan(_v) and not math.isinf(_v):
parts.append(('t%d' % k, v))
var_data = ";".join( "%s:%s" % (k, self._escape(v)) for k, v in parts)
return "".join( ("#", var_data, "#"))
def do_request(self, req):
""" sends request and returns response (without trailing newline).
if socket is closed, returns None
throws socket.timeout on timeout """
logging.debug("Sending request: '%s'" % req)
self.fd.write(req)
self.fd.write('\n')
self.fd.flush()
response = self.fd.readline()
if response:
# strip \r\n
return response[:-2]
else:
return None
class TSensorToolsClient(object):
DEFAULT_TCP_TIMEOUT = 20
DEFAULT_QUEUE_SIZE = 100
DEFAULT_RPC_TIMEOUT = 10
RPC_MAX_ROWS_PER_QUERY = 100
SERVER_FAIL_RETRY_TIMEOUT = 10
SERVER_FAIL_RETRY_ATTEMPTS = 5
RPC_ERROR_RETRY_TIMEOUT = 10
def _ensure_config_var(self, obj, var):
if var not in obj:
raise RuntimeError('Missing mandatory option %s in config' % var)
def load_last_success_item(self):
if os.path.exists(self.last_success_item_fname):
try:
parts = open(self.last_success_item_fname).read().strip().split()
self.last_success_timestamp = float(parts[0])
self.last_success_uid = int(parts[1])
return
except:
logging.warning("can't load last sucessfull item data from file")
def set_last_success_item(self, timestamp, uid = None):
self.last_success_timestamp = timestamp
if uid is not None:
self.last_success_uid = uid
if not os.path.exists(os.path.dirname(self.last_success_item_fname)):
os.makedirs(os.path.dirname(self.last_success_item_fname))
open(self.last_success_item_fname, 'wt').write("%f %d\n" % (self.last_success_timestamp, self.last_success_uid))
def __init__(self, config):
self.mqtt_client = mosquitto.Mosquitto()
self.config = config
self.live_mode = False
self.live_queue = Queue.Queue(maxsize=int(self.config.get('queue_size', self.DEFAULT_QUEUE_SIZE)))
self._ensure_config_var(self.config, 'server_host')
self._ensure_config_var(self.config, 'server_port')
self._ensure_config_var(self.config, 'channels')
self.scada_client_id = self.config['client_id']
self.last_success_timestamp = 0
self.last_success_uid = -1
self.last_success_item_fname = self.config.get('last_success_file', '/var/lib/sensor-tools/last_success_item.dat')
self.load_last_success_item()
self.channel_map = {}
for channel_id, channel in self.config['channels'].iteritems():
device_id, control_id = channel
self.channel_map[(str(device_id), str(control_id))] = int(channel_id)
self.scada_conn = TServerConnection(self.config['server_host'], self.config['server_port'],
self.config.get('tcp_timeout', self.DEFAULT_TCP_TIMEOUT),
saving_interval = self.config.get('saving_interval_prop'))
if self.config.get('mqtt_username'):
self.mqtt_client.username_pw_set(self.config['mqtt_username'], self.config.get('mqtt_password', ''))
self.rpc_client = TMQTTRPCClient(self.mqtt_client)
self.rpc_timeout = float(self.config.get('rpc_timeout', self.DEFAULT_RPC_TIMEOUT))
self.mqtt_client.on_message = self.on_mqtt_message
self.mqtt_client.on_connect = self.on_mqtt_connect
self.mqtt_client.connect(self.config.get('mqtt_host', 'localhost'), self.config.get('mqtt_port', 1883))
self.mqtt_client.loop_start()
def on_mqtt_message(self, mosq, obj, msg):
if self.rpc_client.on_mqtt_message(mosq, obj, msg):
return
if not self.live_mode:
return
if not mosquitto.topic_matches_sub('/devices/+/controls/+', msg.topic):
return
parts = msg.topic.split('/')
device_id = parts[2]
control_id = parts[4]
channel_id = self.channel_map.get((device_id, control_id))
if channel_id:
try:
self.live_queue.put_nowait((datetime.datetime.now(), channel_id, msg.payload))
except Queue.Full, exc:
logging.info("Setting live_mode to false")
self.live_mode = False
# do not call Queue methods inside 'with' block!
# Queue.join() won't work after clearing in this way
with self.live_queue.mutex:
self.live_queue.queue.clear()
def on_mqtt_connect(self, mosq, obj, rc):
for device_id, control_id in self.channel_map.iterkeys():
topic = "/devices/%s/controls/%s" % (device_id, control_id)
self.mqtt_client.subscribe(topic)
def process_data_item(self, channel_id, dt, value, live_mode):
req = self.scada_conn.format_request(self.scada_client_id, dt, live=live_mode, channels={channel_id : value})
# пытаемся отправить строку на сервер. Если словили timeout, пытаемся ещё, пока находимся в live mode
# если получили FAIL, то (пока) делаем то же самое, но без реконнекта
server_fail_number = 0
while True:
if live_mode:
if not self.live_mode:
return False
try:
resp = self.scada_conn.do_request(req)
except (socket.timeout, socket.error):
logging.warning("request timeout, reconnecting")
self.scada_conn.reconnect()
else:
if resp == self.scada_conn.RESPONSE_OK:
return True
elif resp == self.scada_conn.RESPONSE_FAIL:
server_fail_number += 1
if server_fail_number >= self.SERVER_FAIL_RETRY_ATTEMPTS:
logging.warning("server returned @FAIL %d times, giving up" % server_fail_number)
return False
else:
logging.warning("server returned @FAIL, trying again")
time.sleep(self.SERVER_FAIL_RETRY_TIMEOUT)
elif resp is None:
logging.warning("server closed the connection")
self.scada_conn.reconnect()
else:
logging.warning("unexpected answer from server: '%s'" % resp)
self.scada_conn.reconnect()
def do_archive_mode_work(self):
""" return True if there is no more data to send"""
resp = self.rpc_client.call('db_logger', 'history', 'get_values', {
'channels': self.channel_map.keys(),
'timestamp' : {
'gt': self.last_success_timestamp,
},
'uid' : {
'gt': self.last_success_uid,
},
'limit' : self.RPC_MAX_ROWS_PER_QUERY,
}, self.rpc_timeout)
for row in resp.get(u'values', ()):
channel_id = self.channel_map.get((str(row[u'device']),str(row[u'control'])))
assert channel_id is not None
value = str(row[u'value'])
dt = datetime.datetime.fromtimestamp(row[u'timestamp'])
if self.process_data_item(channel_id, dt, value, live_mode=False):
self.set_last_success_item(row[u'timestamp'], row[u'uid'])
if not resp.get(u'has_more'):
return True
def loop(self):
""" main loop"""
self.scada_conn.reconnect()
while 1:
if self.live_mode:
dt, channel_id, value = self.live_queue.get(timeout=1E100)
if self.process_data_item(channel_id, dt, value, live_mode=True):
self.set_last_success_item(time.mktime(dt.timetuple()))
else:
logging.debug("not in live mode")
try:
done = self.do_archive_mode_work()
except AssertionError:
raise
except:
logging.exception("error in archive mode handler")
time.sleep(self.RPC_ERROR_RETRY_TIMEOUT)
else:
if done:
logging.debug("going back to live mode")
self.live_mode = True
def main():
parser = argparse.ArgumentParser(description='Sensor-Tools TCP SCADA client', add_help=False)
parser.add_argument('config_file', type=str,
help='Config file location')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='debug')
args = parser.parse_args()
config = json.load(open(args.config_file))
if args.debug or config.get('debug', None):
logging.getLogger().setLevel(logging.DEBUG)
scada_client = TSensorToolsClient(config)
scada_client.loop()
if __name__ == "__main__":
main()
|
{
"content_hash": "be34a02c645e9ebef3af0111cf78489f",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 122,
"avg_line_length": 33.29234972677595,
"alnum_prop": 0.5533032416906032,
"repo_name": "contactless/sensor-tools-scada-client",
"id": "8027663f72f9d2db5a4dd7f8d5c0d6f734a80860",
"size": "12333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sensor-tools-client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "202"
},
{
"name": "Python",
"bytes": "12828"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Localisation.logo_image'
db.add_column(u'cms_localisation', 'logo_image',
self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Localisation.logo_image_height'
db.add_column(u'cms_localisation', 'logo_image_height',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Localisation.logo_image_width'
db.add_column(u'cms_localisation', 'logo_image_width',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'Localisation.logo_text'
db.add_column(u'cms_localisation', 'logo_text',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Localisation.logo_description'
db.add_column(u'cms_localisation', 'logo_description',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Localisation.logo_image'
db.delete_column(u'cms_localisation', 'logo_image')
# Deleting field 'Localisation.logo_image_height'
db.delete_column(u'cms_localisation', 'logo_image_height')
# Deleting field 'Localisation.logo_image_width'
db.delete_column(u'cms_localisation', 'logo_image_width')
# Deleting field 'Localisation.logo_text'
db.delete_column(u'cms_localisation', 'logo_text')
# Deleting field 'Localisation.logo_description'
db.delete_column(u'cms_localisation', 'logo_description')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cms.category': {
'Meta': {'ordering': "('position', 'title')", 'object_name': 'Category'},
'featured_in_navbar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'last_author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'category_last_author'", 'null': 'True', 'to': u"orm['auth.User']"}),
'localisation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cms.Localisation']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cms.Category']", 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'cms.contentrepository': {
'Meta': {'object_name': 'ContentRepository'},
'custom_license_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'custom_license_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'default': "'CC-BY-NC-ND-4.0'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'targets': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cms.PublishingTarget']", 'symmetrical': 'False'}),
'url': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'cms.localisation': {
'Meta': {'object_name': 'Localisation'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'logo_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_image_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'logo_image_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'logo_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'cms.post': {
'Meta': {'ordering': "('position', '-created_at')", 'object_name': 'Post'},
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_in_category': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'last_author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'post_last_author'", 'null': 'True', 'to': u"orm['auth.User']"}),
'localisation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cms.Localisation']", 'null': 'True', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': u"orm['cms.Category']"}),
'related_posts': ('sortedm2m.fields.SortedManyToManyField', [], {'symmetrical': 'False', 'related_name': "'related_posts_set'", 'blank': 'True', 'to': u"orm['cms.Post']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cms.Post']", 'null': 'True', 'blank': 'True'}),
'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'cms.publishingtarget': {
'Meta': {'object_name': 'PublishingTarget'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cms']
|
{
"content_hash": "689781bf659fe241925101221cae1849",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 195,
"avg_line_length": 77,
"alnum_prop": 0.560285393618727,
"repo_name": "universalcore/unicore-cms-django",
"id": "ec6a7a1da824cda05402aabea1005a11e139a7c4",
"size": "12498",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/migrations/0025_auto__add_field_localisation_logo_image__add_field_localisation_logo_i.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "779"
},
{
"name": "HTML",
"bytes": "20961"
},
{
"name": "Python",
"bytes": "392191"
},
{
"name": "Shell",
"bytes": "2043"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import re
import six
from six import reraise
from six.moves import range
from impala.util import _escape
from impala.error import (Error, Warning, InterfaceError, DatabaseError,
InternalError, OperationalError, ProgrammingError,
IntegrityError, DataError, NotSupportedError)
class Connection(object):
# PEP 249
# Connection objects are associated with a TCLIService.Client thrift
# service
# it's instantiated with an alive TCLIService.Client
def close(self):
# PEP 249
raise NotImplementedError
def commit(self):
# PEP 249
raise NotImplementedError
def rollback(self):
# PEP 249
raise NotImplementedError
def cursor(self, session_handle=None, user=None, configuration=None):
# PEP 249
raise NotImplementedError
def reconnect(self):
raise NotImplementedError
def kerberized(self):
# returns bool whether underlying service is kerberized or not
from thrift_sasl import TSaslClientTransport
if isinstance(self.service._iprot.trans, TSaslClientTransport):
if self.service._iprot.trans.mechanism == 'GSSAPI':
return True
return False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if exc_type is not None:
reraise(exc_type, exc_val, exc_tb)
# optional DB API addition to make the errors attributes of Connection
Error = Error
Warning = Warning
InterfaceError = InterfaceError
DatabaseError = DatabaseError
InternalError = InternalError
OperationalError = OperationalError
ProgrammingError = ProgrammingError
IntegrityError = IntegrityError
DataError = DataError
NotSupportedError = NotSupportedError
class Cursor(object):
"""Abstract representation of Cursor"""
def description(self):
raise NotImplementedError
def rowcount(self):
raise NotImplementedError
def query_string(self):
raise NotImplementedError
def get_arraysize(self):
raise NotImplementedError
def set_arraysize(self, arraysize):
raise NotImplementedError
def buffersize(self):
raise NotImplementedError
def has_result_set(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def close_operation(self):
raise NotImplementedError
def execute(self, operation, parameters=None, configuration=None):
raise NotImplementedError
def _execute_sync(self, operation_fn):
raise NotImplementedError
def _reset_state(self):
raise NotImplementedError
def _wait_to_finish(self):
raise NotImplementedError
def executemany(self, operation, seq_of_parameters):
raise NotImplementedError
def fetchone(self):
raise NotImplementedError
def fetchmany(self, size=None):
raise NotImplementedError
def fetchall(self):
raise NotImplementedError
def setinputsizes(self, sizes):
raise NotImplementedError
def setoutputsize(self, size, column=None):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def __next__(self):
raise NotImplementedError
def next(self):
# for py2 compat
return self.__next__()
def ping(self):
raise NotImplementedError
def get_log(self):
raise NotImplementedError
def get_profile(self):
raise NotImplementedError
def get_summary(self):
raise NotImplementedError
def build_summary_table(self, summary, idx, is_fragment_root, indent_level,
output):
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
if exc_type is not None:
reraise(exc_type, exc_val, exc_tb)
def _replace_numeric_markers(operation, string_parameters):
"""
Replaces qname and numeric markers in the given operation, from
the string_parameters list.
Raises ProgrammingError on wrong number of parameters or bindings
when using qmark. There is no error checking on numeric parameters.
"""
# replace qmark parameters
param_count = len(string_parameters)
qmark_index = 0
while operation.find('?') > -1:
if qmark_index < param_count:
operation = operation.replace('?',
string_parameters[qmark_index],
1)
qmark_index += 1
else:
raise ProgrammingError("Incorrect number of bindings "+
"supplied. The current statement uses "
"%d or more, and there are %d supplied." %
(qmark_index+1, param_count))
if qmark_index != 0 and qmark_index != param_count:
raise ProgrammingError("Incorrect number of bindings "+
"supplied. The current statement uses "
"%d or more, and there are %d supplied." %
(qmark_index+1, param_count))
# replace numbered parameters
# Go through them backwards so smaller numbers don't replace
# parts of larger ones
for index in range(param_count, 0, -1):
operation = operation.replace(':' + str(index),
string_parameters[index-1])
return operation
def _bind_parameters_list(operation, parameters):
string_parameters = []
for value in parameters:
if value is None:
string_parameters.append('NULL')
elif isinstance(value, six.string_types):
string_parameters.append("'" + _escape(value) + "'")
else:
string_parameters.append(str(value))
# replace qmark and numeric parameters
return _replace_numeric_markers(operation, string_parameters)
def _bind_parameters_dict(operation, parameters):
string_parameters = {}
for (name, value) in six.iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, six.string_types):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
# replace named parameters by their pyformat equivalents
operation = re.sub(":([^\d\W]\w*)", "%(\g<1>)s", operation)
# replace pyformat parameters
return operation % string_parameters
def _bind_parameters(operation, parameters):
# If parameters is a list, assume either qmark or numeric
# format. If not, assume either named or pyformat parameters
if isinstance(parameters, (list, tuple)):
return _bind_parameters_list(operation, parameters)
elif isinstance(parameters, dict):
return _bind_parameters_dict(operation, parameters)
else:
raise ProgrammingError("Query parameters argument should be a "+
"list, tuple, or dict object")
|
{
"content_hash": "5ccbee9d1cda4c44970b96bd38169f42",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 79,
"avg_line_length": 30.43404255319149,
"alnum_prop": 0.6387024608501118,
"repo_name": "mariusvniekerk/impyla",
"id": "c629ddd10b7f45403ed34366c9b2b2a2782a18ca",
"size": "7730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "impala/dbapi/interface.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "30000"
},
{
"name": "Makefile",
"bytes": "1226"
},
{
"name": "Python",
"bytes": "2535355"
},
{
"name": "Shell",
"bytes": "7014"
},
{
"name": "Thrift",
"bytes": "87254"
}
],
"symlink_target": ""
}
|
"""
Load the cornell movie dialog corpus.
Available from here:
http://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html
"""
class CornellData:
"""
"""
def __init__(self, dirName):
"""
Args:
dirName (string): directory where to load the corpus
"""
self.lines = {}
self.conversations = []
MOVIE_LINES_FIELDS = ["lineID","characterID","movieID","character","text"]
MOVIE_CONVERSATIONS_FIELDS = ["character1ID","character2ID","movieID","utteranceIDs"]
self.lines = self.loadLines(dirName + "movie_lines.txt", MOVIE_LINES_FIELDS)
self.conversations = self.loadConversations(dirName + "movie_conversations.txt", MOVIE_CONVERSATIONS_FIELDS)
# TODO: Cleaner program (merge copy-paste) !!
def loadLines(self, fileName, fields):
"""
Args:
fileName (str): file to load
field (set<str>): fields to extract
Return:
dict<dict<str>>: the extracted fields for each line
"""
lines = {}
with open(fileName, 'r', encoding='iso-8859-1') as f: # TODO: Solve Iso encoding pb !
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
lineObj = {}
for i, field in enumerate(fields):
lineObj[field] = values[i]
lines[lineObj['lineID']] = lineObj
return lines
def loadConversations(self, fileName, fields):
"""
Args:
fileName (str): file to load
field (set<str>): fields to extract
Return:
dict<dict<str>>: the extracted fields for each line
"""
conversations = []
with open(fileName, 'r', encoding='iso-8859-1') as f: # TODO: Solve Iso encoding pb !
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
lineIds = convObj["utteranceIDs"][2:-3].split("', '")
#print(convObj["utteranceIDs"])
#for lineId in lineIds:
#print(lineId, end=' ')
#print()
# Reassemble lines
convObj["lines"] = []
for lineId in lineIds:
convObj["lines"].append(self.lines[lineId])
conversations.append(convObj)
return conversations
def getConversations(self):
return self.conversations
|
{
"content_hash": "ec2531e1be61f62f5a83e03a90c9757b",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 116,
"avg_line_length": 29.786516853932586,
"alnum_prop": 0.5224443606186345,
"repo_name": "Fogapod/ChatBot",
"id": "1248ab27dfc41e2769e3a16507926ebedfc5f857",
"size": "3333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chatbot/cornelldata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "95568"
}
],
"symlink_target": ""
}
|
import cStringIO
import textwrap
__all__ = ["print_nested_list"]
def tokenize_list(obj):
if isinstance(obj, list):
yield "("
for item in obj:
for elem in tokenize_list(item):
yield elem
yield ")"
else:
yield obj
def wrap_lines(lines):
for line in lines:
indent = " " * (len(line) - len(line.lstrip()) + 4)
line = line.replace("-", "_") # textwrap breaks on "-", but not "_"
line = textwrap.fill(line, subsequent_indent=indent, break_long_words=False)
yield line.replace("_", "-")
def print_nested_list(nested_list):
stream = cStringIO.StringIO()
indent = 0
startofline = True
pendingspace = False
for token in tokenize_list(nested_list):
if token == "(":
if not startofline:
stream.write("\n")
stream.write("%s(" % (" " * indent))
indent += 2
startofline = False
pendingspace = False
elif token == ")":
indent -= 2
stream.write(")")
startofline = False
pendingspace = False
else:
if startofline:
stream.write(" " * indent)
if pendingspace:
stream.write(" ")
stream.write(token)
startofline = False
pendingspace = True
for line in wrap_lines(stream.getvalue().splitlines()):
print line
|
{
"content_hash": "19df31289a7f62fde2c1daabe0a280d1",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 84,
"avg_line_length": 28.745098039215687,
"alnum_prop": 0.5163710777626194,
"repo_name": "amelim/RIP-2013",
"id": "2b515699ce186f0c1ac408a2fae30483d1e065f1",
"size": "1466",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "solvers/lama-2011/src/translate/pddl/pretty_print.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1126811"
},
{
"name": "C++",
"bytes": "2510509"
},
{
"name": "Objective-C",
"bytes": "703"
},
{
"name": "Perl",
"bytes": "13696"
},
{
"name": "Python",
"bytes": "226529"
},
{
"name": "Shell",
"bytes": "89876"
}
],
"symlink_target": ""
}
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Find-GPOLocation',
'Author': ['@harmj0y'],
'Description': ('Takes a user/group name and optional domain, and determines the computers in the domain the user/group has local admin (or RDP) rights to. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/PowerShellEmpire/PowerTools/tree/master/PowerView'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'UserName' : {
'Description' : 'A (single) user name name to query for access.',
'Required' : False,
'Value' : ''
},
'GroupName' : {
'Description' : 'A (single) group name name to query for access.',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to use for the query, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'DomainController' : {
'Description' : 'Domain controller to reflect LDAP queries through.',
'Required' : False,
'Value' : ''
},
'LocalGroup' : {
'Description' : 'The local group to check access against, "Administrators", "RDP/Remote Desktop Users", or a custom SID. Defaults to "Administrators".',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
return script
|
{
"content_hash": "29183cba886e334aa9d35875f1519aed",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 189,
"avg_line_length": 35.888888888888886,
"alnum_prop": 0.47678018575851394,
"repo_name": "rvazarkar/Empire",
"id": "33b5c7adc2f9b5d6d9265da1aa317293df08aef7",
"size": "3876",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/modules/situational_awareness/network/powerview/find_gpo_location.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PHP",
"bytes": "2563"
},
{
"name": "PowerShell",
"bytes": "4057571"
},
{
"name": "Python",
"bytes": "640506"
},
{
"name": "Shell",
"bytes": "977"
}
],
"symlink_target": ""
}
|
extensions = ["sphinx.ext.autodoc"]
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "doc8"
# pylint: disable=redefined-builtin
copyright = "2013, OpenStack Foundation"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = "sphinx_rtd_theme"
|
{
"content_hash": "ae8822cf12a381640c3ccf24b2576c89",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 30.944444444444443,
"alnum_prop": 0.6732495511669659,
"repo_name": "stackforge/doc8",
"id": "94bdea9d945caefc0aad94716604725f7618c05b",
"size": "1330",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40957"
}
],
"symlink_target": ""
}
|
import time
from typing import Any, Callable, Dict, Iterable, Iterator, Optional, TypeVar
from pynamodb.constants import (CAMEL_COUNT, ITEMS, LAST_EVALUATED_KEY, SCANNED_COUNT,
CONSUMED_CAPACITY, TOTAL, CAPACITY_UNITS)
from pynamodb.settings import OperationSettings
_T = TypeVar('_T')
class RateLimiter:
"""
RateLimiter limits operations to a pre-set rate of units/seconds
Example:
Initialize a RateLimiter with the desired rate
rate_limiter = RateLimiter(rate_limit)
Now, every time before calling an operation, call acquire()
rate_limiter.acquire()
And after an operation, update the number of units consumed
rate_limiter.consume(units)
"""
def __init__(self, rate_limit: float, time_module: Optional[Any] = None) -> None:
"""
Initializes a RateLimiter object
:param rate_limit: The desired rate
:param time_module: Optional: the module responsible for calculating time. Intended to be used for testing purposes.
"""
if rate_limit <= 0:
raise ValueError("rate_limit must be greater than zero")
self._rate_limit = rate_limit
self._consumed = 0
self._time_of_last_acquire = 0.0
self._time_module: Any = time_module or time
def consume(self, units: int) -> None:
"""
Records the amount of units consumed.
:param units: Number of units consumed
:return: None
"""
self._consumed += units
def acquire(self) -> None:
"""
Sleeps the appropriate amount of time to follow the rate limit restriction
:return: None
"""
self._time_module.sleep(max(0, self._consumed/float(self.rate_limit) - (self._time_module.time()-self._time_of_last_acquire)))
self._consumed = 0
self._time_of_last_acquire = self._time_module.time()
@property
def rate_limit(self) -> float:
"""
A limit of units per seconds
"""
return self._rate_limit
@rate_limit.setter
def rate_limit(self, rate_limit: float):
if rate_limit <= 0:
raise ValueError("rate_limit must be greater than zero")
self._rate_limit = rate_limit
class PageIterator(Iterator[_T]):
"""
PageIterator handles Query and Scan result pagination.
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination
"""
def __init__(
self,
operation: Callable,
args: Any,
kwargs: Dict[str, Any],
rate_limit: Optional[float] = None,
settings: OperationSettings = OperationSettings.default,
) -> None:
self._operation = operation
self._args = args
self._kwargs = kwargs
self._last_evaluated_key = kwargs.get('exclusive_start_key')
self._is_last_page = False
self._total_scanned_count = 0
self._rate_limiter = None
if rate_limit:
self._rate_limiter = RateLimiter(rate_limit)
self._settings = settings
def __iter__(self) -> Iterator[_T]:
return self
def __next__(self) -> _T:
if self._is_last_page:
raise StopIteration()
self._kwargs['exclusive_start_key'] = self._last_evaluated_key
if self._rate_limiter:
self._rate_limiter.acquire()
self._kwargs['return_consumed_capacity'] = TOTAL
page = self._operation(*self._args, settings=self._settings, **self._kwargs)
self._last_evaluated_key = page.get(LAST_EVALUATED_KEY)
self._is_last_page = self._last_evaluated_key is None
self._total_scanned_count += page[SCANNED_COUNT]
if self._rate_limiter:
consumed_capacity = page.get(CONSUMED_CAPACITY, {}).get(CAPACITY_UNITS, 0)
self._rate_limiter.consume(consumed_capacity)
return page
def next(self) -> _T:
return self.__next__()
@property
def key_names(self) -> Iterable[str]:
# If the current page has a last_evaluated_key, use it to determine key attributes
if self._last_evaluated_key:
return self._last_evaluated_key.keys()
# Use the table meta data to determine the key attributes
table_meta = self._operation.__self__.get_meta_table() # type: ignore
return table_meta.get_key_names(self._kwargs.get('index_name'))
@property
def page_size(self) -> Optional[int]:
return self._kwargs.get('limit')
@page_size.setter
def page_size(self, page_size: int) -> None:
self._kwargs['limit'] = page_size
@property
def last_evaluated_key(self) -> Optional[Dict[str, Dict[str, Any]]]:
return self._last_evaluated_key
@property
def total_scanned_count(self) -> int:
return self._total_scanned_count
class ResultIterator(Iterator[_T]):
"""
ResultIterator handles Query and Scan item pagination.
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination
"""
def __init__(
self,
operation: Callable,
args: Any,
kwargs: Dict[str, Any],
map_fn: Optional[Callable] = None,
limit: Optional[int] = None,
rate_limit: Optional[float] = None,
settings: OperationSettings = OperationSettings.default,
) -> None:
self.page_iter: PageIterator = PageIterator(operation, args, kwargs, rate_limit, settings)
self._map_fn = map_fn
self._limit = limit
self._total_count = 0
self._index = 0
self._count = 0
def _get_next_page(self) -> None:
page = next(self.page_iter)
self._count = page[CAMEL_COUNT]
self._items = page.get(ITEMS) # not returned if 'Select' is set to 'COUNT'
self._index = 0 if self._items else self._count
self._total_count += self._count
def __iter__(self) -> Iterator[_T]:
return self
def __next__(self) -> _T:
if self._limit == 0:
raise StopIteration
while self._index == self._count:
self._get_next_page()
item = self._items[self._index]
self._index += 1
if self._limit is not None:
self._limit -= 1
if self._map_fn:
item = self._map_fn(item)
return item
def next(self) -> _T:
return self.__next__()
@property
def last_evaluated_key(self) -> Optional[Dict[str, Dict[str, Any]]]:
if self._index == self._count:
# Not started iterating yet: return `exclusive_start_key` if set, otherwise expect None; or,
# Entire page has been consumed: last_evaluated_key is whatever DynamoDB returned
# It may correspond to the current item, or it may correspond to an item evaluated but not returned.
return self.page_iter.last_evaluated_key
# In the middle of a page of results: reconstruct a last_evaluated_key from the current item
# The operation should be resumed starting at the last item returned, not the last item evaluated.
# This can occur if the 'limit' is reached in the middle of a page.
item = self._items[self._index - 1]
return {key: item[key] for key in self.page_iter.key_names}
@property
def total_count(self) -> int:
return self._total_count
|
{
"content_hash": "1f12cce463d83e3a62d084311b59c90d",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 134,
"avg_line_length": 34.28378378378378,
"alnum_prop": 0.613060044672185,
"repo_name": "pynamodb/PynamoDB",
"id": "0f19c360c8f25e415d2cf4ca59ca2963b189c9d9",
"size": "7611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynamodb/pagination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "629971"
}
],
"symlink_target": ""
}
|
import requests
from lib.base import OpscenterAction
class DecommissionNode(OpscenterAction):
def run(self, node_ip, cluster_id=None):
if not cluster_id:
cluster_id = self.cluster_id
url = self._get_full_url([cluster_id, 'ops', 'decommission', node_ip])
return requests.post(url).json()
|
{
"content_hash": "0e95edcd9a4a8b9480afac95a2cd7d23",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 25.53846153846154,
"alnum_prop": 0.6596385542168675,
"repo_name": "pidah/st2contrib",
"id": "692422461672e7e5aedaae65846ed1589d51c77d",
"size": "332",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "packs/opscenter/actions/decommission_node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "4592"
},
{
"name": "Python",
"bytes": "665076"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "15738"
}
],
"symlink_target": ""
}
|
"""Trains a Bayesian neural network to classify MNIST digits.
The architecture is LeNet-5 [1].
#### References
[1]: Yann LeCun, Leon Bottou, Yoshua Bengio, and Patrick Haffner.
Gradient-based learning applied to document recognition.
_Proceedings of the IEEE_, 1998.
http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from trainer.tfgfile_wrapper import tfgfile_wrapper
import os
import warnings
# Dependency imports
from absl import flags
flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.")
import matplotlib
matplotlib.use("Agg")
from matplotlib import figure # pylint: disable=g-import-not-at-top
from matplotlib.backends import backend_agg
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.contrib.learn.python.learn.datasets import mnist
# TODO(b/78137893): Integration tests currently fail with seaborn imports.
warnings.simplefilter(action="ignore")
try:
import seaborn as sns # pylint: disable=g-import-not-at-top
HAS_SEABORN = True
except ImportError:
HAS_SEABORN = False
tfd = tfp.distributions
IMAGE_SHAPE = [28, 28, 1]
flags.DEFINE_float("learning_rate",
default=0.001,
help="Initial learning rate.")
flags.DEFINE_integer("max_steps",
default=6000,
help="Number of training steps to run.")
flags.DEFINE_integer("batch_size",
default=128,
help="Batch size.")
flags.DEFINE_string("data_dir",
default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"),
"bayesian_neural_network/data"),
help="Directory where data is stored (if using real data).")
flags.DEFINE_string(
"model_dir",
default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"),
"bayesian_neural_network/"),
help="Directory to put the model's fit.")
flags.DEFINE_integer("viz_steps",
default=400,
help="Frequency at which save visualizations.")
flags.DEFINE_integer("num_monte_carlo",
default=50,
help="Network draws to compute predictive probabilities.")
flags.DEFINE_bool("fake_data",
default=None,
help="If true, uses fake data. Defaults to real data.")
FLAGS = flags.FLAGS
@tfgfile_wrapper
def plot_weight_posteriors(names, qm_vals, qs_vals, fname):
"""Save a PNG plot with histograms of weight means and stddevs.
Args:
names: A Python `iterable` of `str` variable names.
qm_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior means of weight varibles.
qs_vals: A Python `iterable`, the same length as `names`,
whose elements are Numpy `array`s, of any shape, containing
posterior standard deviations of weight varibles.
fname: Python `str` filename to save the plot to.
"""
fig = figure.Figure(figsize=(6, 3))
canvas = backend_agg.FigureCanvasAgg(fig)
ax = fig.add_subplot(1, 2, 1)
for n, qm in zip(names, qm_vals):
sns.distplot(qm.flatten(), ax=ax, label=n)
ax.set_title("weight means")
ax.set_xlim([-1.5, 1.5])
ax.legend()
ax = fig.add_subplot(1, 2, 2)
for n, qs in zip(names, qs_vals):
sns.distplot(qs.flatten(), ax=ax)
ax.set_title("weight stddevs")
ax.set_xlim([0, 1.])
fig.tight_layout()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
@tfgfile_wrapper
def plot_heldout_prediction(input_vals, probs,
fname, n=10, title=""):
"""Save a PNG plot visualizing posterior uncertainty on heldout data.
Args:
input_vals: A `float`-like Numpy `array` of shape
`[num_heldout] + IMAGE_SHAPE`, containing heldout input images.
probs: A `float`-like Numpy array of shape `[num_monte_carlo,
num_heldout, num_classes]` containing Monte Carlo samples of
class probabilities for each heldout sample.
fname: Python `str` filename to save the plot to.
n: Python `int` number of datapoints to vizualize.
title: Python `str` title for the plot.
"""
fig = figure.Figure(figsize=(9, 3*n))
canvas = backend_agg.FigureCanvasAgg(fig)
for i in range(n):
ax = fig.add_subplot(n, 3, 3*i + 1)
ax.imshow(input_vals[i, :].reshape(IMAGE_SHAPE[:-1]), interpolation="None")
ax = fig.add_subplot(n, 3, 3*i + 2)
for prob_sample in probs:
sns.barplot(np.arange(10), prob_sample[i, :], alpha=0.1, ax=ax)
ax.set_ylim([0, 1])
ax.set_title("posterior samples")
ax = fig.add_subplot(n, 3, 3*i + 3)
sns.barplot(np.arange(10), np.mean(probs[:, i, :], axis=0), ax=ax)
ax.set_ylim([0, 1])
ax.set_title("predictive probs")
fig.suptitle(title)
fig.tight_layout()
canvas.print_figure(fname, format="png")
print("saved {}".format(fname))
def build_input_pipeline(mnist_data, batch_size, heldout_size):
"""Build an Iterator switching between train and heldout data."""
# Build an iterator over training batches.
training_dataset = tf.data.Dataset.from_tensor_slices(
(mnist_data.train.images, np.int32(mnist_data.train.labels)))
training_batches = training_dataset.shuffle(
50000, reshuffle_each_iteration=True).repeat().batch(batch_size)
training_iterator = tf.compat.v1.data.make_one_shot_iterator(training_batches)
# Build a iterator over the heldout set with batch_size=heldout_size,
# i.e., return the entire heldout set as a constant.
heldout_dataset = tf.data.Dataset.from_tensor_slices(
(mnist_data.validation.images,
np.int32(mnist_data.validation.labels)))
heldout_frozen = (heldout_dataset.take(heldout_size).
repeat().batch(heldout_size))
heldout_iterator = tf.compat.v1.data.make_one_shot_iterator(heldout_frozen)
# Combine these into a feedable iterator that can switch between training
# and validation inputs.
handle = tf.compat.v1.placeholder(tf.string, shape=[])
feedable_iterator = tf.compat.v1.data.Iterator.from_string_handle(
handle, training_batches.output_types, training_batches.output_shapes)
images, labels = feedable_iterator.get_next()
return images, labels, handle, training_iterator, heldout_iterator
def build_fake_data(num_examples=10):
"""Build fake MNIST-style data for unit testing."""
class Dummy(object):
pass
num_examples = 10
mnist_data = Dummy()
mnist_data.train = Dummy()
mnist_data.train.images = np.float32(np.random.randn(
num_examples, *IMAGE_SHAPE))
mnist_data.train.labels = np.int32(np.random.permutation(
np.arange(num_examples)))
mnist_data.train.num_examples = num_examples
mnist_data.validation = Dummy()
mnist_data.validation.images = np.float32(np.random.randn(
num_examples, *IMAGE_SHAPE))
mnist_data.validation.labels = np.int32(np.random.permutation(
np.arange(num_examples)))
mnist_data.validation.num_examples = num_examples
return mnist_data
def main(argv):
del argv # unused
if tf.io.gfile.exists(FLAGS.model_dir):
tf.compat.v1.logging.warning(
"Warning: deleting old log directory at {}".format(FLAGS.model_dir))
tf.io.gfile.rmtree(FLAGS.model_dir)
tf.io.gfile.makedirs(FLAGS.model_dir)
if FLAGS.fake_data:
mnist_data = build_fake_data()
else:
mnist_data = mnist.read_data_sets(FLAGS.data_dir, reshape=False)
(images, labels, handle,
training_iterator, heldout_iterator) = build_input_pipeline(
mnist_data, FLAGS.batch_size, mnist_data.validation.num_examples)
# Build a Bayesian LeNet5 network. We use the Flipout Monte Carlo estimator
# for the convolution and fully-connected layers: this enables lower
# variance stochastic gradients than naive reparameterization.
with tf.compat.v1.name_scope("bayesian_neural_net", values=[images]):
neural_net = tf.keras.Sequential([
tfp.layers.Convolution2DFlipout(6,
kernel_size=5,
padding="SAME",
activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D(pool_size=[2, 2],
strides=[2, 2],
padding="SAME"),
tfp.layers.Convolution2DFlipout(16,
kernel_size=5,
padding="SAME",
activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D(pool_size=[2, 2],
strides=[2, 2],
padding="SAME"),
tfp.layers.Convolution2DFlipout(120,
kernel_size=5,
padding="SAME",
activation=tf.nn.relu),
tf.keras.layers.Flatten(),
tfp.layers.DenseFlipout(84, activation=tf.nn.relu),
tfp.layers.DenseFlipout(10)
])
logits = neural_net(images)
labels_distribution = tfd.Categorical(logits=logits)
# Compute the -ELBO as the loss, averaged over the batch size.
neg_log_likelihood = -tf.reduce_mean(
input_tensor=labels_distribution.log_prob(labels))
kl = sum(neural_net.losses) / mnist_data.train.num_examples
elbo_loss = neg_log_likelihood + kl
# Build metrics for evaluation. Predictions are formed from a single forward
# pass of the probabilistic layers. They are cheap but noisy predictions.
predictions = tf.argmax(input=logits, axis=1)
accuracy, accuracy_update_op = tf.compat.v1.metrics.accuracy(
labels=labels, predictions=predictions)
# Extract weight posterior statistics for layers with weight distributions
# for later visualization.
names = []
qmeans = []
qstds = []
for i, layer in enumerate(neural_net.layers):
try:
q = layer.kernel_posterior
except AttributeError:
continue
names.append("Layer {}".format(i))
qmeans.append(q.mean())
qstds.append(q.stddev())
with tf.compat.v1.name_scope("train"):
optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=FLAGS.learning_rate)
train_op = optimizer.minimize(elbo_loss)
init_op = tf.group(tf.compat.v1.global_variables_initializer(),
tf.compat.v1.local_variables_initializer())
with tf.compat.v1.Session() as sess:
sess.run(init_op)
# Run the training loop.
train_handle = sess.run(training_iterator.string_handle())
heldout_handle = sess.run(heldout_iterator.string_handle())
for step in range(FLAGS.max_steps):
_ = sess.run([train_op, accuracy_update_op],
feed_dict={handle: train_handle})
if step % 100 == 0:
loss_value, accuracy_value = sess.run(
[elbo_loss, accuracy], feed_dict={handle: train_handle})
print("Step: {:>3d} Loss: {:.3f} Accuracy: {:.3f}".format(
step, loss_value, accuracy_value))
if (step+1) % FLAGS.viz_steps == 0:
# Compute log prob of heldout set by averaging draws from the model:
# p(heldout | train) = int_model p(heldout|model) p(model|train)
# ~= 1/n * sum_{i=1}^n p(heldout | model_i)
# where model_i is a draw from the posterior p(model|train).
probs = np.asarray([sess.run((labels_distribution.probs),
feed_dict={handle: heldout_handle})
for _ in range(FLAGS.num_monte_carlo)])
mean_probs = np.mean(probs, axis=0)
image_vals, label_vals = sess.run((images, labels),
feed_dict={handle: heldout_handle})
heldout_lp = np.mean(np.log(mean_probs[np.arange(mean_probs.shape[0]),
label_vals.flatten()]))
print(" ... Held-out nats: {:.3f}".format(heldout_lp))
qm_vals, qs_vals = sess.run((qmeans, qstds))
if HAS_SEABORN:
plot_weight_posteriors(names, qm_vals, qs_vals,
fname=os.path.join(
FLAGS.model_dir,
"step{:05d}_weights.png".format(step)))
plot_heldout_prediction(image_vals, probs,
fname=os.path.join(
FLAGS.model_dir,
"step{:05d}_pred.png".format(step)),
title="mean heldout logprob {:.2f}"
.format(heldout_lp))
if __name__ == "__main__":
tf.compat.v1.app.run()
|
{
"content_hash": "ddde451e9c4feb01b7881e70d9ef2275",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 116,
"avg_line_length": 38.687687687687685,
"alnum_prop": 0.6202747807187767,
"repo_name": "GoogleCloudPlatform/ml-on-gcp",
"id": "a5b8e56e32dd64d9a43c71a91558ab2966465135",
"size": "13561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_zoo/tensorflow/probability/bayesian_neural_network/trainer/bayesian_neural_network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2829"
},
{
"name": "HTML",
"bytes": "2609"
},
{
"name": "Jupyter Notebook",
"bytes": "485638"
},
{
"name": "Python",
"bytes": "909187"
},
{
"name": "R",
"bytes": "9425"
},
{
"name": "Shell",
"bytes": "72356"
}
],
"symlink_target": ""
}
|
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import urllib
import urlparse
import re
import base64
from resources.lib.modules import cache
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['hd-streams.org']
self.base_link = 'https://hd-streams.org'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases), year)
return urllib.urlencode({'url': url}) if url else None
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'tvshowtitle': tvshowtitle, 'localtvshowtitle': localtvshowtitle, 'aliases': aliases}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
tvshowtitle = data['tvshowtitle']
localtvshowtitle = data['localtvshowtitle']
aliases = source_utils.aliases_to_array(eval(data['aliases']))
year = re.findall('(\d{4})', premiered)
year = year[0] if year else data['year']
url = self.__search([localtvshowtitle] + aliases, year, season)
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + aliases,year, season)
if not url: return
return urllib.urlencode({'url': url, 'episode': episode})
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = urlparse.urljoin(self.base_link, data.get('url'))
episode = data.get('episode')
r = client.request(url)
aj = self.__get_ajax_object(r)
b = dom_parser.parse_dom(r, 'img', attrs={'class': 'dgvaup'}, req='data-img')[0].attrs['data-img']
if episode:
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream-ep', 'data-episode': episode}, req=['data-episode', 'data-server'])
else:
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'lang-de'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie'})
r = dom_parser.parse_dom(r, 'a', attrs={'class': 'btn-stream'}, req=['data-episode', 'data-server'])
r = [(i.attrs['data-episode'], i.attrs['data-server']) for i in r]
for epi, server in r:
try:
x = {'action': aj.get('load_episodes'), 'episode': epi, 'pid': aj.get('postid'), 'server': server, 'nonce': aj.get('nonce'), 'b': b}
x = client.request(aj.get('ajax_url'), post=x, XHR=True, referer=url)
x = json.loads(x)
q = source_utils.label_to_quality(x.get('q'))
x = json.loads(base64.decodestring(x.get('u')))
u = source_utils.evp_decode(x.get('ct'), base64.decodestring(b), x.get('s').decode("hex"))
u = u.replace('\/', '/').strip('"')
valid, host = source_utils.is_host_valid(u, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': q, 'language': 'de', 'url': u, 'direct': False, 'debridonly': False, 'checkquality': True})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles, year, season='0'):
try:
aj = cache.get(self.__get_ajax_object, 24)
t = [cleantitle.get(i) for i in set(titles) if i]
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(aj.get('ajax_url'), post={'action': aj.get('search'), 'nonce': aj.get('snonce'), 'query': cleantitle.query(titles[0])})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-result'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'search-item-content'})
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content.lower()) for i in r if i]
r = [(i[0], i[1], re.findall('(.+?) \(*(\d{4})', i[1])) for i in r]
r = [(i[0], i[2][0][0] if len(i[2]) > 0 else i[1], i[2][0][1] if len(i[2]) > 0 else '0') for i in r]
r = [(i[0], i[1], i[2], re.findall('(.+?)\s+(?:staf+el|s)\s+(\d+)', i[1])) for i in r]
r = [(i[0], i[3][0][0] if len(i[3]) > 0 else i[1], i[2], i[3][0][1] if len(i[3]) > 0 else '0') for i in r]
r = [(i[0], i[1].replace(' hd', ''), i[2], '1' if int(season) > 0 and i[3] == '0' else i[3]) for i in r]
r = sorted(r, key=lambda i: int(i[2]), reverse=True) # with year > no year
r = [i[0] for i in r if cleantitle.get(i[1]) in t and i[2] in y and int(i[3]) == int(season)][0]
return source_utils.strip_domain(r)
except:
return
def __get_ajax_object(self, html=None):
try:
r = client.request(self.base_link) if not html else html
r = re.findall('ajax_object\s*=\s*({.*?});', r)[0]
r = json.loads(r)
return r
except:
return {}
|
{
"content_hash": "742e8790d590c82ba02a0e6168573c76",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 154,
"avg_line_length": 42.0125,
"alnum_prop": 0.5415055043141922,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "7207c27aa9fc6b2844fae3327aaf7ce3ac242621",
"size": "6747",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "script.module.uncoded/lib/resources/lib/sources/de/hdstreams.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
from schematics import version_info
# Building on Read the Docs?
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
doctest_path = [os.path.abspath('..')]
doctest_test_doctest_blocks = 'default'
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Schematics'
copyright = u'2013 j2labs'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '{0}.{1}'.format(*version_info)
# The full version, including alpha/beta/rc tags.
release = '{0}.{1}.{2}'.format(*version_info)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Schematicsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Schematics.tex', u'Schematics Documentation',
u'j2labs \\& Plain Vanilla Games', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'schematics', u'Schematics Documentation',
[u'j2labs & Plain Vanilla Games'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Schematics', u'Schematics Documentation',
u'j2labs & Plain Vanilla Games', 'Schematics', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "4e56f0793e4bc2d6213ba8ea889b5268",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 87,
"avg_line_length": 32.64876033057851,
"alnum_prop": 0.6991520060751804,
"repo_name": "openprocurement/schematics",
"id": "cf674f9a8948059a6a5a46c7f9f2016143cd1516",
"size": "8322",
"binary": false,
"copies": "4",
"ref": "refs/heads/development",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "197427"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import pytest
import scitbx.matrix
from cctbx import sgtbx
from cctbx.sgtbx import bravais_types
from dxtbx.model import Crystal, Experiment, ExperimentList
from dials.algorithms.indexing import assign_indices, non_primitive_basis
from dials.array_family import flex
@pytest.mark.parametrize("space_group_symbol", bravais_types.acentric)
def test_detect(space_group_symbol):
sgi = sgtbx.space_group_info(space_group_symbol)
cs = sgi.any_compatible_crystal_symmetry(volume=1000)
ms = cs.build_miller_set(anomalous_flag=True, d_min=1).expand_to_p1()
result = non_primitive_basis.detect(ms.indices())
if sgi.group().conventional_centring_type_symbol() != "P":
assert result is not None
assert isinstance(result, scitbx.matrix.sqr)
assert result.n == (3, 3)
else:
assert result is None
@pytest.mark.parametrize("space_group_symbol", bravais_types.acentric)
def test_correct(space_group_symbol):
sgi = sgtbx.space_group_info(space_group_symbol)
cs = sgi.any_compatible_crystal_symmetry(volume=1000)
ms = cs.build_miller_set(anomalous_flag=True, d_min=1).expand_to_p1()
# the reciprocal matrix
B = scitbx.matrix.sqr(cs.unit_cell().fractionalization_matrix()).transpose()
crystal = Crystal(B, sgtbx.space_group())
expts = ExperimentList([Experiment(crystal=crystal)])
refl = flex.reflection_table()
refl["miller_index"] = ms.indices()
refl["rlp"] = B.elems * ms.indices().as_vec3_double()
refl["imageset_id"] = flex.int(len(refl))
refl["xyzobs.mm.value"] = flex.vec3_double(len(refl))
non_primitive_basis.correct(expts, refl, assign_indices.AssignIndicesGlobal())
cs_corrected = expts.crystals()[0].get_crystal_symmetry()
assert cs_corrected.change_of_basis_op_to_primitive_setting().is_identity_op()
assert (
cs.change_of_basis_op_to_primitive_setting().apply(ms.indices())
== refl["miller_index"]
)
|
{
"content_hash": "1fd0e6b7e0f4add6775200eafd5018f9",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 82,
"avg_line_length": 37.37735849056604,
"alnum_prop": 0.7062089853609288,
"repo_name": "dials/dials",
"id": "6e124cebe34a406900580793ae0094d72683c583",
"size": "1981",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/algorithms/indexing/test_non_primitive_basis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "379"
},
{
"name": "C++",
"bytes": "1758129"
},
{
"name": "CMake",
"bytes": "34388"
},
{
"name": "Dockerfile",
"bytes": "329"
},
{
"name": "Gherkin",
"bytes": "400"
},
{
"name": "HTML",
"bytes": "25033"
},
{
"name": "Makefile",
"bytes": "76"
},
{
"name": "Python",
"bytes": "6147100"
},
{
"name": "Shell",
"bytes": "6419"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from xml.etree import ElementTree as et
import os, sys
def reportfordir(path, reportfilename):
"""
Usage: python exemplarlistreport.py directorypath reportfile.txt
Output will be appended to existing reportfile.txt
"""
with open(reportfilename, mode="a", encoding='utf-8') as ofile:
chars = ofile.write("\n\nReport for " + path)
try:
for file in sorted(os.listdir(path)):
cf = os.path.join(path, file)
chars = ofile.write("\n\n" + file)
r = et.parse(cf).getroot()
e = set(r.findall(".//exemplarCharacters[@draft='generated']"))
if len(e) == 0:
chars = ofile.write("\nNo generated exemplar lists found")
continue
d = set(r.findall(".//exemplarCharacters[@draft='generated'][@type]"))
m = list(e-d)
if len(m) == 0:
chars = ofile.write("\nNo generated main exemplar list found")
continue
else:
chars = ofile.write("\nGenerated main exemplar list: " + m[0].text)
a = r.findall(".//exemplarCharacters[@draft='generated'][@type='auxiliary']")
if len(a) == 0:
pass
# chars = ofile.write("\nNo generated auxiliary list found")
else:
chars = ofile.write("\nGenerated auxiliary exemplar list: " + a[0].text)
except OSError:
chars = ofile.write("\nDirectory not found or empty")
if __name__ == '__main__':
if len(sys.argv) > 2:
reportfordir(sys.argv[1], sys.argv[2])
else:
print(reportfordir.__doc__)
|
{
"content_hash": "58667cf04cfee2a510a1a585982ffb86",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 93,
"avg_line_length": 42.285714285714285,
"alnum_prop": 0.5281531531531531,
"repo_name": "silnrsi/sldr",
"id": "f8049224eb6b8da3dc0ed2d12d4bffd12c55f213",
"size": "1823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/exemplarlistreport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "100863"
},
{
"name": "JavaScript",
"bytes": "99270"
},
{
"name": "Less",
"bytes": "486"
},
{
"name": "Makefile",
"bytes": "224"
},
{
"name": "Python",
"bytes": "28201"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
"""Copyright 2011 The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors - Jie Yu (jieyu@umich.edu)
"""
import os
import shutil
import socket
import subprocess
import threading
import time
import psutil
from maple.core import config
from maple.core import logging
from maple.core import testing
def __client_0(test):
subprocess.call([test.mysql(), '-u', 'root', '-D', 'test'], stdin=file(test.insert_sql))
class Client(threading.Thread):
def __init__(self, client_idx, test):
threading.Thread.__init__(self)
self.client_idx = client_idx
self.test = test
def run(self):
eval('__client_%d(self.test)' % self.client_idx)
class Test(testing.ServerTest):
def __init__(self, input_idx):
testing.ServerTest.__init__(self, input_idx)
self.add_input([0])
def setup(self):
# Make sure we are running the script using root permission.
assert os.getuid() == 0
# Prepare SQL files.
self.create_sql = self.home() + '/create.sql'
f = open(self.create_sql, 'w')
f.write('create table t1 (id int);')
f.close()
self.insert_sql = self.home() + '/insert.sql'
f = open(self.insert_sql, 'w')
for i in range(50):
f.write('insert into t1 values (%d);' % i)
f.close()
self.flush_sql = self.home() + '/flush.sql'
f = open(self.flush_sql, 'w')
f.write('flush logs;')
f.close()
# Setup test database.
shutil.rmtree(self.var(), ignore_errors=True)
subprocess.call([self.mysql_install_db()])
self.server = subprocess.Popen([self.mysqld_safe(), '--user=root', '--log-bin'])
self.wait_for_idle()
subprocess.call([self.mysql(), '-u', 'root', '-D', 'test'], stdin=file(self.create_sql))
self.wait_for_idle()
subprocess.call([self.mysqladmin(), '--user=root', 'shutdown'])
self.server.wait()
# Wrap the mysqld.
self.wrap_mysqld()
def tear_down(self):
# Remove SQL files.
os.remove(self.create_sql)
os.remove(self.insert_sql)
os.remove(self.flush_sql)
# Unwrap the mysqld.
self.unwrap_mysqld()
def start(self):
logging.msg('starting server for mysql_bug_791_training\n')
self.server = subprocess.Popen([self.mysqld_safe(), '--user=root', '--log-bin'])
self.wait_for_idle()
def stop(self):
self.wait_for_idle()
logging.msg('stopping server for mysql_bug_791_training\n')
time.sleep(2)
subprocess.call([self.mysqladmin(), '--user=root', 'shutdown'])
self.server.wait()
self.server = None
def issue(self):
clients = []
ipt = self.input()
for idx in range(len(ipt)):
clients.append(Client(ipt[idx], self))
logging.msg('issuing requests for mysql_bug_791_training\n')
for i in range(len(clients)):
clients[i].start()
for i in range(len(clients)):
clients[i].join()
def wrap_mysqld(self):
cmd = []
if self.prefix != None:
cmd.extend(self.prefix)
cmd.append(self.mysqld_real())
cmd.append('$*')
os.rename(self.mysqld(), self.mysqld_real())
script = open(self.mysqld(), 'w')
script.write('#!/bin/sh\n\n')
for c in cmd:
script.write('%s ' % c)
script.write('\n\n')
script.close()
os.chmod(self.mysqld(), 0755)
def unwrap_mysqld(self):
os.remove(self.mysqld())
os.rename(self.mysqld_real(), self.mysqld())
def wait_for_idle(self):
while not os.path.exists(self.pid_file()):
time.sleep(0.1)
p = psutil.Process(self.server.pid)
while True:
if p.get_cpu_percent() < 10.0:
break
def home(self):
return config.benchmark_home('mysql_bug_791')
def pid_file(self):
return self.home() + '/var/' + socket.gethostname() + '.pid'
def mysql_install_db(self):
return self.home() + '/bin/mysql_install_db'
def mysqld_safe(self):
return self.home() + '/bin/mysqld_safe'
def mysql(self):
return self.home() + '/bin/mysql'
def mysqladmin(self):
return self.home() + '/bin/mysqladmin'
def var(self):
return self.home() + '/var'
def mysqld(self):
return self.home() + '/libexec/mysqld'
def mysqld_real(self):
return self.home() + '/libexec/mysqld.real'
def get_test(input_idx='default'):
return Test(input_idx)
|
{
"content_hash": "0b0a55fd05cf0a507daf0e9aa1aa7cac",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 96,
"avg_line_length": 35.04827586206896,
"alnum_prop": 0.5956316410861865,
"repo_name": "jieyu/maple",
"id": "75e021b25f37217e3bc1fa479a74bd4be4d8efcd",
"size": "5082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "script/maple/benchmark/mysql_bug_791_training.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3636"
},
{
"name": "C++",
"bytes": "1407098"
},
{
"name": "Makefile",
"bytes": "14594"
},
{
"name": "Protocol Buffer",
"bytes": "8613"
},
{
"name": "Python",
"bytes": "401005"
},
{
"name": "Shell",
"bytes": "1733"
}
],
"symlink_target": ""
}
|
from typing import Optional
from fastapi import FastAPI, Security
from fastapi.security import OAuth2AuthorizationCodeBearer
from fastapi.testclient import TestClient
app = FastAPI()
oauth2_scheme = OAuth2AuthorizationCodeBearer(
authorizationUrl="authorize",
tokenUrl="token",
description="OAuth2 Code Bearer",
auto_error=True,
)
@app.get("/items/")
async def read_items(token: Optional[str] = Security(oauth2_scheme)):
return {"token": token}
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Items",
"operationId": "read_items_items__get",
"security": [{"OAuth2AuthorizationCodeBearer": []}],
}
}
},
"components": {
"securitySchemes": {
"OAuth2AuthorizationCodeBearer": {
"type": "oauth2",
"flows": {
"authorizationCode": {
"authorizationUrl": "authorize",
"tokenUrl": "token",
"scopes": {},
}
},
"description": "OAuth2 Code Bearer",
}
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_no_token():
response = client.get("/items")
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
def test_incorrect_token():
response = client.get("/items", headers={"Authorization": "Non-existent testtoken"})
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
def test_token():
response = client.get("/items", headers={"Authorization": "Bearer testtoken"})
assert response.status_code == 200, response.text
assert response.json() == {"token": "testtoken"}
|
{
"content_hash": "f373f386c2e1687952db598bb8d5306e",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 88,
"avg_line_length": 28.925925925925927,
"alnum_prop": 0.5458813486982501,
"repo_name": "tiangolo/fastapi",
"id": "bdaa543fc343a2d3bad999f2d7a70e95f7b4f898",
"size": "2343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_security_oauth2_authorization_code_bearer_description.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
}
|
import os
import ctypes
import time
import threading
import traceback
import cv2
import numpy as np
from pynq.drivers import video
from pynq.drivers.video import HDMI
from cffi import FFI
from ikalog.utils import *
from ikalog.inputs import VideoInput
class PynqCapture(VideoInput):
# override
def _enumerate_sources_func(self):
if IkaUtils.isWindows():
return self._videoinput_wrapper.get_device_list()
return ['Device Enumeration not supported']
# override
def _initialize_driver_func(self):
# OpenCV File doesn't need pre-initialization.
self._cleanup_driver_func()
# override
def _cleanup_driver_func(self):
self.lock.acquire()
try:
if self.ffi is not None:
self.ffi = None
if self.framebuffer is not None:
for fb in self.framebuffer:
del fb
if self.hdmi_out is not None:
self.hdmi_out.stop()
self.hdmi_out = None
if self.hdmi_in is not None:
self.hdmi_in.stop()
self.hdmi_in = None
self.reset()
finally:
self.lock.release()
# override
def _is_active_func(self):
return (self.hdmi_in is not None)
# override
def _select_device_by_index_func(self, source):
self._cleanup_driver_func()
self.lock.acquire()
try:
self.ffi = FFI()
self.hdmi_in = HDMI('in', init_timeout=10)
self.hdmi_in.start()
# TODO: under development
if False and self._enable_output:
self.hdmi_out = HDMI('out', frame_list=self.hdmi_in.frame_list)
mode = self._select_output_mode(self.hdmi_in.frame_width(), self.hdmi_in.frame_height())
self.hdmi_out.mode(mode)
time.sleep(1)
if self.hdmi_out is not None:
self.hdmi_out.start()
self.hdmi_in_geom = \
(self.hdmi_in.frame_width(), self.hdmi_in.frame_height())
self.framebuffer = []
for i in range(video.VDMA_DICT['NUM_FSTORES']):
pointer = self.ffi.cast('uint8_t *', self.hdmi_in.frame_addr(i))
#buffer_size = video.MAX_FRAME_WIDTH * video.MAX_FRAME_HEIGHT * 3 # 3 == sizeof(RGB)
buffer_size = self.hdmi_in_geom[0] * self.hdmi_in_geom[1] * 3
_bf = self.ffi.buffer(pointer, buffer_size)
bf = np.frombuffer(_bf,np.uint8).reshape(self.hdmi_in_geom[1],self.hdmi_in_geom[0],3)
#self.framebuffer.append(bf[:self.hdmi_in_geom[1],:self.hdmi_in_geom[0],:])
self.framebuffer.append(bf)
IkaUtils.dprint('%s: resolution %dx%d' % (self, self.hdmi_in_geom[0], self.hdmi_in_geom[1]))
except:
print(traceback.format_exc())
self.hdmi_in = None
self.hdmi_out = None
if self.framebuffer is not None:
for fb in self.framebuffer:
del fb
self.ffi = None
finally:
self.lock.release()
self.systime_base = time.time()
return self.is_active()
# override
def _select_device_by_name_func(self, source):
IkaUtils.dprint('%s: Select device by name "%s"' % (self, source))
try:
index = self.enumerate_sources().index(source)
except ValueError:
IkaUtils.dprint('%s: Input "%s" not found' % (self, source))
return False
IkaUtils.dprint('%s: "%s" -> %d' % (self, source, index))
self._select_device_by_index_func(index)
# override
def _get_current_timestamp_func(self):
return int((time.time() - self.systime_base) * 1000)
# override
def _read_frame_func(self):
t1 = time.time()
if self._mode == 1 and hasattr(self.hdmi_in, 'frame_raw2'):
# Modified version of PYNQ library has faster capture function.
frame = self.hdmi_in.frame_raw2()
elif self._mode == 2:
index = self.hdmi_in.frame_index()
self.hdmi_in.frame_index_next()
frame = self.framebuffer[index]
else:
# This function is supported in original version, but 10X slow.
frame_raw = self.hdmi_in.frame_raw()
frame = np.frombuffer(frame_raw, dtype=np.uint8)
frame = frame.reshape(1080, 1920, 3)
frame = frame[0:720, 0:1280, :]
t2 = time.time()
if self._debug:
print('read_frame_func: %6.6f' % (t2 - t1))
return frame
def _select_output_mode(self, width, height):
if width == 640 and height == 480:
return 0
if width == 800 and height == 600:
return 1
if width == 1280 and height == 720:
return 2
if width == 1280 and height == 1024:
return 3
if width == 1920 and height == 1080:
return 4
raise Exception("Specific output frame size not supported: %dx%d"%(width,height))
def __init__(self, enable_output=False, debug=False, mode=2):
self.hdmi_in = None
self.hdmi_out = None
self.ffi = None
self.framebuffer = None
self._enable_output = enable_output
self._debug = debug
self._mode = mode
IkaUtils.dprint(
'%s: debug %s enable_output %s mode %s' %
(self, self._debug, self._enable_output, self._mode))
super(PynqCapture, self).__init__()
if __name__ == "__main__":
from PIL import Image
obj = PynqCapture(debug=True,enable_output=False)
obj.select_source(0)
time.sleep(1)
k = 0
t = time.time()
while (time.time() - t) < 100:
frame = obj.read_frame()
Image.frombytes("RGB",(frame.shape[1],frame.shape[0]),bytes(frame[:,:,::-1])).save("dump/test_%d.jpg"%k)
k = k+1
|
{
"content_hash": "2502124c9eb6c8fcb088da64ac22128e",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 112,
"avg_line_length": 32.53260869565217,
"alnum_prop": 0.5512863347811561,
"repo_name": "deathmetalland/IkaLog",
"id": "e7d86c7b0ccb358bf700e9793d1e828470797d08",
"size": "6651",
"binary": false,
"copies": "3",
"ref": "refs/heads/youtube_sample",
"path": "ikalog/inputs/pynq_capture.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37511"
},
{
"name": "Makefile",
"bytes": "1057"
},
{
"name": "Python",
"bytes": "760091"
},
{
"name": "Shell",
"bytes": "4625"
}
],
"symlink_target": ""
}
|
import copy
import mock
from openstackclient.common import exceptions
from openstackclient.network import utils as network_utils
from openstackclient.network.v2 import security_group_rule
from openstackclient.tests.compute.v2 import fakes as compute_fakes
from openstackclient.tests import fakes
from openstackclient.tests.identity.v3 import fakes as identity_fakes
from openstackclient.tests.network.v2 import fakes as network_fakes
from openstackclient.tests import utils as tests_utils
class TestSecurityGroupRuleNetwork(network_fakes.TestNetworkV2):
def setUp(self):
super(TestSecurityGroupRuleNetwork, self).setUp()
# Get a shortcut to the network client
self.network = self.app.client_manager.network
class TestSecurityGroupRuleCompute(compute_fakes.TestComputev2):
def setUp(self):
super(TestSecurityGroupRuleCompute, self).setUp()
# Get a shortcut to the network client
self.compute = self.app.client_manager.compute
class TestCreateSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork):
# The security group rule to be created.
_security_group_rule = None
# The security group that will contain the rule created.
_security_group = \
network_fakes.FakeSecurityGroup.create_one_security_group()
expected_columns = (
'direction',
'ethertype',
'id',
'port_range_max',
'port_range_min',
'project_id',
'protocol',
'remote_group_id',
'remote_ip_prefix',
'security_group_id',
)
expected_data = None
def _setup_security_group_rule(self, attrs=None):
self._security_group_rule = \
network_fakes.FakeSecurityGroupRule.create_one_security_group_rule(
attrs)
self.network.create_security_group_rule = mock.Mock(
return_value=self._security_group_rule)
self.expected_data = (
self._security_group_rule.direction,
self._security_group_rule.ethertype,
self._security_group_rule.id,
self._security_group_rule.port_range_max,
self._security_group_rule.port_range_min,
self._security_group_rule.project_id,
self._security_group_rule.protocol,
self._security_group_rule.remote_group_id,
self._security_group_rule.remote_ip_prefix,
self._security_group_rule.security_group_id,
)
def setUp(self):
super(TestCreateSecurityGroupRuleNetwork, self).setUp()
self.network.find_security_group = mock.Mock(
return_value=self._security_group)
# Set identity client v3. And get a shortcut to Identity client.
identity_client = identity_fakes.FakeIdentityv3Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
self.app.client_manager.identity = identity_client
self.identity = self.app.client_manager.identity
# Get a shortcut to the ProjectManager Mock
self.projects_mock = self.identity.projects
self.projects_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.PROJECT),
loaded=True,
)
# Get a shortcut to the DomainManager Mock
self.domains_mock = self.identity.domains
self.domains_mock.get.return_value = fakes.FakeResource(
None,
copy.deepcopy(identity_fakes.DOMAIN),
loaded=True,
)
# Get the command object to test
self.cmd = security_group_rule.CreateSecurityGroupRule(
self.app, self.namespace)
def test_create_no_options(self):
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, [], [])
def test_create_all_source_options(self):
arglist = [
'--src-ip', '10.10.0.0/24',
'--src-group', self._security_group.id,
self._security_group.id,
]
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_create_bad_ethertype(self):
arglist = [
'--ethertype', 'foo',
self._security_group.id,
]
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_create_all_protocol_options(self):
arglist = [
'--protocol', 'tcp',
'--proto', 'tcp',
self._security_group.id,
]
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_create_all_port_range_options(self):
arglist = [
'--dst-port', '80:80',
'--icmp-type', '3',
'--icmp-code', '1',
self._security_group.id,
]
verifylist = [
('dst_port', (80, 80)),
('icmp_type', 3),
('icmp_code', 1),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError, self.cmd.take_action,
parsed_args)
def test_create_default_rule(self):
self._setup_security_group_rule({
'port_range_max': 443,
'port_range_min': 443,
})
arglist = [
'--dst-port', str(self._security_group_rule.port_range_min),
self._security_group.id,
]
verifylist = [
('dst_port', (self._security_group_rule.port_range_min,
self._security_group_rule.port_range_max)),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_security_group_rule.assert_called_once_with(**{
'direction': self._security_group_rule.direction,
'ethertype': self._security_group_rule.ethertype,
'port_range_max': self._security_group_rule.port_range_max,
'port_range_min': self._security_group_rule.port_range_min,
'protocol': self._security_group_rule.protocol,
'remote_ip_prefix': self._security_group_rule.remote_ip_prefix,
'security_group_id': self._security_group.id,
})
self.assertEqual(self.expected_columns, columns)
self.assertEqual(self.expected_data, data)
def test_create_proto_option(self):
self._setup_security_group_rule({
'protocol': 'icmp',
'remote_ip_prefix': '10.0.2.0/24',
})
arglist = [
'--proto', self._security_group_rule.protocol,
'--src-ip', self._security_group_rule.remote_ip_prefix,
self._security_group.id,
]
verifylist = [
('proto', self._security_group_rule.protocol),
('protocol', None),
('src_ip', self._security_group_rule.remote_ip_prefix),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_security_group_rule.assert_called_once_with(**{
'direction': self._security_group_rule.direction,
'ethertype': self._security_group_rule.ethertype,
'protocol': self._security_group_rule.protocol,
'remote_ip_prefix': self._security_group_rule.remote_ip_prefix,
'security_group_id': self._security_group.id,
})
self.assertEqual(self.expected_columns, columns)
self.assertEqual(self.expected_data, data)
def test_create_source_group(self):
self._setup_security_group_rule({
'port_range_max': 22,
'port_range_min': 22,
'remote_group_id': self._security_group.id,
})
arglist = [
'--dst-port', str(self._security_group_rule.port_range_min),
'--ingress',
'--src-group', self._security_group.name,
self._security_group.id,
]
verifylist = [
('dst_port', (self._security_group_rule.port_range_min,
self._security_group_rule.port_range_max)),
('ingress', True),
('src_group', self._security_group.name),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_security_group_rule.assert_called_once_with(**{
'direction': self._security_group_rule.direction,
'ethertype': self._security_group_rule.ethertype,
'port_range_max': self._security_group_rule.port_range_max,
'port_range_min': self._security_group_rule.port_range_min,
'protocol': self._security_group_rule.protocol,
'remote_group_id': self._security_group_rule.remote_group_id,
'security_group_id': self._security_group.id,
})
self.assertEqual(self.expected_columns, columns)
self.assertEqual(self.expected_data, data)
def test_create_source_ip(self):
self._setup_security_group_rule({
'protocol': 'icmp',
'remote_ip_prefix': '10.0.2.0/24',
})
arglist = [
'--protocol', self._security_group_rule.protocol,
'--src-ip', self._security_group_rule.remote_ip_prefix,
self._security_group.id,
]
verifylist = [
('protocol', self._security_group_rule.protocol),
('src_ip', self._security_group_rule.remote_ip_prefix),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_security_group_rule.assert_called_once_with(**{
'direction': self._security_group_rule.direction,
'ethertype': self._security_group_rule.ethertype,
'protocol': self._security_group_rule.protocol,
'remote_ip_prefix': self._security_group_rule.remote_ip_prefix,
'security_group_id': self._security_group.id,
})
self.assertEqual(self.expected_columns, columns)
self.assertEqual(self.expected_data, data)
def test_create_network_options(self):
self._setup_security_group_rule({
'direction': 'egress',
'ethertype': 'IPv6',
'port_range_max': 443,
'port_range_min': 443,
'protocol': '6',
'remote_group_id': None,
'remote_ip_prefix': None,
})
arglist = [
'--dst-port', str(self._security_group_rule.port_range_min),
'--egress',
'--ethertype', self._security_group_rule.ethertype,
'--project', identity_fakes.project_name,
'--project-domain', identity_fakes.domain_name,
'--protocol', self._security_group_rule.protocol,
self._security_group.id,
]
verifylist = [
('dst_port', (self._security_group_rule.port_range_min,
self._security_group_rule.port_range_max)),
('egress', True),
('ethertype', self._security_group_rule.ethertype),
('project', identity_fakes.project_name),
('project_domain', identity_fakes.domain_name),
('protocol', self._security_group_rule.protocol),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_security_group_rule.assert_called_once_with(**{
'direction': self._security_group_rule.direction,
'ethertype': self._security_group_rule.ethertype,
'port_range_max': self._security_group_rule.port_range_max,
'port_range_min': self._security_group_rule.port_range_min,
'protocol': self._security_group_rule.protocol,
'security_group_id': self._security_group.id,
'tenant_id': identity_fakes.project_id,
})
self.assertEqual(self.expected_columns, columns)
self.assertEqual(self.expected_data, data)
def test_create_tcp_with_icmp_type(self):
arglist = [
'--protocol', 'tcp',
'--icmp-type', '15',
self._security_group.id,
]
verifylist = [
('protocol', 'tcp'),
('icmp_type', 15),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError, self.cmd.take_action,
parsed_args)
def test_create_icmp_code(self):
arglist = [
'--protocol', '1',
'--icmp-code', '1',
self._security_group.id,
]
verifylist = [
('protocol', '1'),
('icmp_code', 1),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(exceptions.CommandError, self.cmd.take_action,
parsed_args)
def test_create_icmp_type(self):
self._setup_security_group_rule({
'port_range_min': 15,
'protocol': 'icmp',
'remote_ip_prefix': '0.0.0.0/0',
})
arglist = [
'--icmp-type', str(self._security_group_rule.port_range_min),
'--protocol', self._security_group_rule.protocol,
self._security_group.id,
]
verifylist = [
('dst_port', None),
('icmp_type', self._security_group_rule.port_range_min),
('icmp_code', None),
('protocol', self._security_group_rule.protocol),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_security_group_rule.assert_called_once_with(**{
'direction': self._security_group_rule.direction,
'ethertype': self._security_group_rule.ethertype,
'port_range_min': self._security_group_rule.port_range_min,
'protocol': self._security_group_rule.protocol,
'remote_ip_prefix': self._security_group_rule.remote_ip_prefix,
'security_group_id': self._security_group.id,
})
self.assertEqual(self.expected_columns, columns)
self.assertEqual(self.expected_data, data)
def test_create_ipv6_icmp_type_code(self):
self._setup_security_group_rule({
'ethertype': 'IPv6',
'port_range_min': 139,
'port_range_max': 2,
'protocol': 'ipv6-icmp',
})
arglist = [
'--icmp-type', str(self._security_group_rule.port_range_min),
'--icmp-code', str(self._security_group_rule.port_range_max),
'--protocol', self._security_group_rule.protocol,
self._security_group.id,
]
verifylist = [
('dst_port', None),
('icmp_type', self._security_group_rule.port_range_min),
('icmp_code', self._security_group_rule.port_range_max),
('protocol', self._security_group_rule.protocol),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_security_group_rule.assert_called_once_with(**{
'direction': self._security_group_rule.direction,
'ethertype': self._security_group_rule.ethertype,
'port_range_min': self._security_group_rule.port_range_min,
'port_range_max': self._security_group_rule.port_range_max,
'protocol': self._security_group_rule.protocol,
'security_group_id': self._security_group.id,
})
self.assertEqual(self.expected_columns, columns)
self.assertEqual(self.expected_data, data)
def test_create_icmpv6_type(self):
self._setup_security_group_rule({
'ethertype': 'IPv6',
'port_range_min': 139,
'protocol': 'icmpv6',
})
arglist = [
'--icmp-type', str(self._security_group_rule.port_range_min),
'--protocol', self._security_group_rule.protocol,
self._security_group.id,
]
verifylist = [
('dst_port', None),
('icmp_type', self._security_group_rule.port_range_min),
('icmp_code', None),
('protocol', self._security_group_rule.protocol),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.create_security_group_rule.assert_called_once_with(**{
'direction': self._security_group_rule.direction,
'ethertype': self._security_group_rule.ethertype,
'port_range_min': self._security_group_rule.port_range_min,
'protocol': self._security_group_rule.protocol,
'security_group_id': self._security_group.id,
})
self.assertEqual(self.expected_columns, columns)
self.assertEqual(self.expected_data, data)
class TestCreateSecurityGroupRuleCompute(TestSecurityGroupRuleCompute):
# The security group rule to be created.
_security_group_rule = None
# The security group that will contain the rule created.
_security_group = \
compute_fakes.FakeSecurityGroup.create_one_security_group()
def _setup_security_group_rule(self, attrs=None):
self._security_group_rule = \
compute_fakes.FakeSecurityGroupRule.create_one_security_group_rule(
attrs)
self.compute.security_group_rules.create.return_value = \
self._security_group_rule
expected_columns, expected_data = \
security_group_rule._format_security_group_rule_show(
self._security_group_rule._info)
return expected_columns, expected_data
def setUp(self):
super(TestCreateSecurityGroupRuleCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
self.compute.security_groups.get.return_value = self._security_group
# Get the command object to test
self.cmd = security_group_rule.CreateSecurityGroupRule(self.app, None)
def test_create_no_options(self):
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, [], [])
def test_create_all_source_options(self):
arglist = [
'--src-ip', '10.10.0.0/24',
'--src-group', self._security_group.id,
self._security_group.id,
]
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_create_bad_protocol(self):
arglist = [
'--protocol', 'foo',
self._security_group.id,
]
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_create_all_protocol_options(self):
arglist = [
'--protocol', 'tcp',
'--proto', 'tcp',
self._security_group.id,
]
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_create_network_options(self):
arglist = [
'--ingress',
'--ethertype', 'IPv4',
'--icmp-type', '3',
'--icmp-code', '11',
'--project', identity_fakes.project_name,
'--project-domain', identity_fakes.domain_name,
self._security_group.id,
]
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, arglist, [])
def test_create_default_rule(self):
expected_columns, expected_data = self._setup_security_group_rule()
dst_port = str(self._security_group_rule.from_port) + ':' + \
str(self._security_group_rule.to_port)
arglist = [
'--dst-port', dst_port,
self._security_group.id,
]
verifylist = [
('dst_port', (self._security_group_rule.from_port,
self._security_group_rule.to_port)),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.compute.security_group_rules.create.assert_called_once_with(
self._security_group.id,
self._security_group_rule.ip_protocol,
self._security_group_rule.from_port,
self._security_group_rule.to_port,
self._security_group_rule.ip_range['cidr'],
None,
)
self.assertEqual(expected_columns, columns)
self.assertEqual(expected_data, data)
def test_create_source_group(self):
expected_columns, expected_data = self._setup_security_group_rule({
'from_port': 22,
'to_port': 22,
'group': {'name': self._security_group.name},
})
arglist = [
'--dst-port', str(self._security_group_rule.from_port),
'--src-group', self._security_group.name,
self._security_group.id,
]
verifylist = [
('dst_port', (self._security_group_rule.from_port,
self._security_group_rule.to_port)),
('src_group', self._security_group.name),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.compute.security_group_rules.create.assert_called_once_with(
self._security_group.id,
self._security_group_rule.ip_protocol,
self._security_group_rule.from_port,
self._security_group_rule.to_port,
self._security_group_rule.ip_range['cidr'],
self._security_group.id,
)
self.assertEqual(expected_columns, columns)
self.assertEqual(expected_data, data)
def test_create_source_ip(self):
expected_columns, expected_data = self._setup_security_group_rule({
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'ip_range': {'cidr': '10.0.2.0/24'},
})
arglist = [
'--protocol', self._security_group_rule.ip_protocol,
'--src-ip', self._security_group_rule.ip_range['cidr'],
self._security_group.id,
]
verifylist = [
('protocol', self._security_group_rule.ip_protocol),
('src_ip', self._security_group_rule.ip_range['cidr']),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.compute.security_group_rules.create.assert_called_once_with(
self._security_group.id,
self._security_group_rule.ip_protocol,
self._security_group_rule.from_port,
self._security_group_rule.to_port,
self._security_group_rule.ip_range['cidr'],
None,
)
self.assertEqual(expected_columns, columns)
self.assertEqual(expected_data, data)
def test_create_proto_option(self):
expected_columns, expected_data = self._setup_security_group_rule({
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'ip_range': {'cidr': '10.0.2.0/24'},
})
arglist = [
'--proto', self._security_group_rule.ip_protocol,
'--src-ip', self._security_group_rule.ip_range['cidr'],
self._security_group.id,
]
verifylist = [
('proto', self._security_group_rule.ip_protocol),
('protocol', None),
('src_ip', self._security_group_rule.ip_range['cidr']),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.compute.security_group_rules.create.assert_called_once_with(
self._security_group.id,
self._security_group_rule.ip_protocol,
self._security_group_rule.from_port,
self._security_group_rule.to_port,
self._security_group_rule.ip_range['cidr'],
None,
)
self.assertEqual(expected_columns, columns)
self.assertEqual(expected_data, data)
class TestDeleteSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork):
# The security group rule to be deleted.
_security_group_rule = \
network_fakes.FakeSecurityGroupRule.create_one_security_group_rule()
def setUp(self):
super(TestDeleteSecurityGroupRuleNetwork, self).setUp()
self.network.delete_security_group_rule = mock.Mock(return_value=None)
self.network.find_security_group_rule = mock.Mock(
return_value=self._security_group_rule)
# Get the command object to test
self.cmd = security_group_rule.DeleteSecurityGroupRule(
self.app, self.namespace)
def test_security_group_rule_delete(self):
arglist = [
self._security_group_rule.id,
]
verifylist = [
('rule', self._security_group_rule.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.network.delete_security_group_rule.assert_called_once_with(
self._security_group_rule)
self.assertIsNone(result)
class TestDeleteSecurityGroupRuleCompute(TestSecurityGroupRuleCompute):
# The security group rule to be deleted.
_security_group_rule = \
compute_fakes.FakeSecurityGroupRule.create_one_security_group_rule()
def setUp(self):
super(TestDeleteSecurityGroupRuleCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
# Get the command object to test
self.cmd = security_group_rule.DeleteSecurityGroupRule(self.app, None)
def test_security_group_rule_delete(self):
arglist = [
self._security_group_rule.id,
]
verifylist = [
('rule', self._security_group_rule.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.compute.security_group_rules.delete.assert_called_once_with(
self._security_group_rule.id)
self.assertIsNone(result)
class TestListSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork):
# The security group to hold the rules.
_security_group = \
network_fakes.FakeSecurityGroup.create_one_security_group()
# The security group rule to be listed.
_security_group_rule_tcp = \
network_fakes.FakeSecurityGroupRule.create_one_security_group_rule({
'protocol': 'tcp',
'port_range_max': 80,
'port_range_min': 80,
'security_group_id': _security_group.id,
})
_security_group_rule_icmp = \
network_fakes.FakeSecurityGroupRule.create_one_security_group_rule({
'protocol': 'icmp',
'remote_ip_prefix': '10.0.2.0/24',
'security_group_id': _security_group.id,
})
_security_group.security_group_rules = [_security_group_rule_tcp._info,
_security_group_rule_icmp._info]
_security_group_rules = [_security_group_rule_tcp,
_security_group_rule_icmp]
expected_columns_with_group_and_long = (
'ID',
'IP Protocol',
'IP Range',
'Port Range',
'Direction',
'Ethertype',
'Remote Security Group',
)
expected_columns_no_group = (
'ID',
'IP Protocol',
'IP Range',
'Port Range',
'Remote Security Group',
'Security Group',
)
expected_data_with_group_and_long = []
expected_data_no_group = []
for _security_group_rule in _security_group_rules:
expected_data_with_group_and_long.append((
_security_group_rule.id,
_security_group_rule.protocol,
_security_group_rule.remote_ip_prefix,
security_group_rule._format_network_port_range(
_security_group_rule),
_security_group_rule.direction,
_security_group_rule.ethertype,
_security_group_rule.remote_group_id,
))
expected_data_no_group.append((
_security_group_rule.id,
_security_group_rule.protocol,
_security_group_rule.remote_ip_prefix,
security_group_rule._format_network_port_range(
_security_group_rule),
_security_group_rule.remote_group_id,
_security_group_rule.security_group_id,
))
def setUp(self):
super(TestListSecurityGroupRuleNetwork, self).setUp()
self.network.find_security_group = mock.Mock(
return_value=self._security_group)
self.network.security_group_rules = mock.Mock(
return_value=self._security_group_rules)
# Get the command object to test
self.cmd = security_group_rule.ListSecurityGroupRule(
self.app, self.namespace)
def test_list_default(self):
self._security_group_rule_tcp.port_range_min = 80
parsed_args = self.check_parser(self.cmd, [], [])
columns, data = self.cmd.take_action(parsed_args)
self.network.security_group_rules.assert_called_once_with(**{})
self.assertEqual(self.expected_columns_no_group, columns)
self.assertEqual(self.expected_data_no_group, list(data))
def test_list_with_group_and_long(self):
self._security_group_rule_tcp.port_range_min = 80
arglist = [
'--long',
self._security_group.id,
]
verifylist = [
('long', True),
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.security_group_rules.assert_called_once_with(**{
'security_group_id': self._security_group.id,
})
self.assertEqual(self.expected_columns_with_group_and_long, columns)
self.assertEqual(self.expected_data_with_group_and_long, list(data))
def test_list_with_ignored_options(self):
self._security_group_rule_tcp.port_range_min = 80
arglist = [
'--all-projects',
]
verifylist = [
('all_projects', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.security_group_rules.assert_called_once_with(**{})
self.assertEqual(self.expected_columns_no_group, columns)
self.assertEqual(self.expected_data_no_group, list(data))
class TestListSecurityGroupRuleCompute(TestSecurityGroupRuleCompute):
# The security group to hold the rules.
_security_group = \
compute_fakes.FakeSecurityGroup.create_one_security_group()
# The security group rule to be listed.
_security_group_rule_tcp = \
compute_fakes.FakeSecurityGroupRule.create_one_security_group_rule({
'ip_protocol': 'tcp',
'from_port': 80,
'to_port': 80,
'group': {'name': _security_group.name},
})
_security_group_rule_icmp = \
compute_fakes.FakeSecurityGroupRule.create_one_security_group_rule({
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'ip_range': {'cidr': '10.0.2.0/24'},
'group': {'name': _security_group.name},
})
_security_group.rules = [_security_group_rule_tcp._info,
_security_group_rule_icmp._info]
expected_columns_with_group = (
'ID',
'IP Protocol',
'IP Range',
'Port Range',
'Remote Security Group',
)
expected_columns_no_group = \
expected_columns_with_group + ('Security Group',)
expected_data_with_group = []
expected_data_no_group = []
for _security_group_rule in _security_group.rules:
rule = network_utils.transform_compute_security_group_rule(
_security_group_rule
)
expected_rule_with_group = (
rule['id'],
rule['ip_protocol'],
rule['ip_range'],
rule['port_range'],
rule['remote_security_group'],
)
expected_rule_no_group = expected_rule_with_group + \
(_security_group_rule['parent_group_id'],)
expected_data_with_group.append(expected_rule_with_group)
expected_data_no_group.append(expected_rule_no_group)
def setUp(self):
super(TestListSecurityGroupRuleCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
self.compute.security_groups.get.return_value = \
self._security_group
self.compute.security_groups.list.return_value = \
[self._security_group]
# Get the command object to test
self.cmd = security_group_rule.ListSecurityGroupRule(self.app, None)
def test_list_default(self):
parsed_args = self.check_parser(self.cmd, [], [])
columns, data = self.cmd.take_action(parsed_args)
self.compute.security_groups.list.assert_called_once_with(
search_opts={'all_tenants': False}
)
self.assertEqual(self.expected_columns_no_group, columns)
self.assertEqual(self.expected_data_no_group, list(data))
def test_list_with_group(self):
arglist = [
self._security_group.id,
]
verifylist = [
('group', self._security_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.compute.security_groups.get.assert_called_once_with(
self._security_group.id
)
self.assertEqual(self.expected_columns_with_group, columns)
self.assertEqual(self.expected_data_with_group, list(data))
def test_list_all_projects(self):
arglist = [
'--all-projects',
]
verifylist = [
('all_projects', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.compute.security_groups.list.assert_called_once_with(
search_opts={'all_tenants': True}
)
self.assertEqual(self.expected_columns_no_group, columns)
self.assertEqual(self.expected_data_no_group, list(data))
def test_list_with_ignored_options(self):
arglist = [
'--long',
]
verifylist = [
('long', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.compute.security_groups.list.assert_called_once_with(
search_opts={'all_tenants': False}
)
self.assertEqual(self.expected_columns_no_group, columns)
self.assertEqual(self.expected_data_no_group, list(data))
class TestShowSecurityGroupRuleNetwork(TestSecurityGroupRuleNetwork):
# The security group rule to be shown.
_security_group_rule = \
network_fakes.FakeSecurityGroupRule.create_one_security_group_rule()
columns = (
'direction',
'ethertype',
'id',
'port_range_max',
'port_range_min',
'project_id',
'protocol',
'remote_group_id',
'remote_ip_prefix',
'security_group_id',
)
data = (
_security_group_rule.direction,
_security_group_rule.ethertype,
_security_group_rule.id,
_security_group_rule.port_range_max,
_security_group_rule.port_range_min,
_security_group_rule.project_id,
_security_group_rule.protocol,
_security_group_rule.remote_group_id,
_security_group_rule.remote_ip_prefix,
_security_group_rule.security_group_id,
)
def setUp(self):
super(TestShowSecurityGroupRuleNetwork, self).setUp()
self.network.find_security_group_rule = mock.Mock(
return_value=self._security_group_rule)
# Get the command object to test
self.cmd = security_group_rule.ShowSecurityGroupRule(
self.app, self.namespace)
def test_show_no_options(self):
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, [], [])
def test_show_all_options(self):
arglist = [
self._security_group_rule.id,
]
verifylist = [
('rule', self._security_group_rule.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.network.find_security_group_rule.assert_called_once_with(
self._security_group_rule.id, ignore_missing=False)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
class TestShowSecurityGroupRuleCompute(TestSecurityGroupRuleCompute):
# The security group rule to be shown.
_security_group_rule = \
compute_fakes.FakeSecurityGroupRule.create_one_security_group_rule()
columns, data = \
security_group_rule._format_security_group_rule_show(
_security_group_rule._info)
def setUp(self):
super(TestShowSecurityGroupRuleCompute, self).setUp()
self.app.client_manager.network_endpoint_enabled = False
# Build a security group fake customized for this test.
security_group_rules = [self._security_group_rule._info]
security_group = fakes.FakeResource(
info=copy.deepcopy({'rules': security_group_rules}),
loaded=True)
security_group.rules = security_group_rules
self.compute.security_groups.list.return_value = [security_group]
# Get the command object to test
self.cmd = security_group_rule.ShowSecurityGroupRule(self.app, None)
def test_show_no_options(self):
self.assertRaises(tests_utils.ParserException,
self.check_parser, self.cmd, [], [])
def test_show_all_options(self):
arglist = [
self._security_group_rule.id,
]
verifylist = [
('rule', self._security_group_rule.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.compute.security_groups.list.assert_called_once_with()
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
|
{
"content_hash": "4d6c93248d287acb44ee59e62afbdf24",
"timestamp": "",
"source": "github",
"line_count": 1075,
"max_line_length": 79,
"avg_line_length": 37.32372093023256,
"alnum_prop": 0.5876679211424869,
"repo_name": "redhat-openstack/python-openstackclient",
"id": "2a64b88442f6ff48229e20e74e9f832f5c990161",
"size": "40689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master-patches",
"path": "openstackclient/tests/network/v2/test_security_group_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2229284"
},
{
"name": "Shell",
"bytes": "591"
}
],
"symlink_target": ""
}
|
import pygame
import time
import argparse
from pygame.locals import *
from numpy import *
from naoqi import ALProxy
joy = None
hat_up = None
hat_down = None
hat_left = None
hat_right = None
motion = None
posture = None
speech = None
memory = None
leds = None
is_nao_walking = None
is_nao_headmoving = None
is_nao_turning = None
def global_init(robotIP, PORT):
global joy, hat_up, hat_down, hat_left, has_right
global motion, posture, speech, memory, leds
global is_nao_walking, is_nao_headmoving
#### Init Joystick - PI game
pygame.init()
pygame.joystick.init()
joy = pygame.joystick.Joystick(0)
joy.init()
print joy.get_name()
print joy.get_init()
#### boolean of hat joystick
hat_up = False
hat_down = False
hat_left = False
hat_right = False
##### Init of nao, position and move
is_nao_walking = False
is_nao_headmoving = False
is_nao_turning = False
try:
motion = ALProxy("ALMotion", robotIP, PORT)
except Exception, e:
print "Could not create proxy to ALMotion"
print "Error was: ",e
try:
posture = ALProxy("ALRobotPosture",robotIP, PORT)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ",e
try:
speech = ALProxy("ALTextToSpeech", robotIP, PORT)
except Exception, e:
print "Could not create proxy to ALTextToSpeech"
print "Error was: ",e
try:
memory = ALProxy("ALMemory", robotIP, PORT)
except Exception, e:
print "Could not create proxy to ALMemory"
print "Error was: ",e
try:
leds = ALProxy("ALLeds", robotIP, PORT)
except Exception, e:
print "Could not create proxy to ALLeds"
print "Error was: ",e
def nao_init_pos():
if motion:
motion.setStiffnesses("Body", 1.0)
nao_go_posture("StandInit")
## Enable arms control by Motion algorithm
if motion:
motion.setMoveArmsEnabled(True, True)
## Enable head to move
motion.wbEnableEffectorControl("Head", True)
def nao_go_posture(posture_name):
if posture_name != "Rest":
posture.goToPosture(posture_name, 0.65)
else:
motion.rest()
print "rest !"
#####
# If you just want a shortcut to reach the posture quickly when manipulating the robot
# you can use ALRobotPostureProxy::applyPosture() (you will have to help the robot)
#####
##### MEMO DES POSTURES
#
#Crouch,
#LyingBack,
#LyingBelly,
#Sit,
#SitRelax,
#Stand,
#StandInit,
#StandZero
##############
def nao_update_walk(X, Y, Theta, Speed):
if Speed > 0.01 :
Frequency = abs(Speed)
try:
#motion.moveToward( X, Y, Theta, [["Frequency", Frequency]])
motion.setWalkTargetVelocity( X, Y, Theta, Frequency)
except Exception, errorMsg:
print str(errorMsg)
print " not allowed to walk "
else:
motion.moveToward(0,0,0)
#motion.stopMove()
#nao_go_posture("StandInit")
########### MEMO ######
# motion.stopMove()
# motionProxy.moveTo(x, y, theta,
# [ ["MaxStepX", 0.02], # step of 2 cm in front
# ["MaxStepY", 0.16], # default value
# ["MaxStepTheta", 0.4], # default value
# ["MaxStepFrequency", 0.0], # low frequency
# ["StepHeight", 0.01], # step height of 1 cm
# ["TorsoWx", 0.0], # default value
# ["TorsoWy", 0.1] ]) # torso bend 0.1 rad in front
#################
def nao_move_head(yaw,pitch):
global is_nao_headmoving
if(not(is_nao_headmoving) and abs(yaw * pitch)>0):
motion.stiffnessInterpolation("Head", 1.0, 0.1)
is_nao_headmoving = True
fractionMaxSpeed = 0.2
motion.setAngles("HeadYaw",yaw*3.14/180.0, fractionMaxSpeed);
motion.setAngles("HeadPitch",pitch*3.14/180.0, fractionMaxSpeed);
if(not(is_nao_headmoving) and (yaw*pitch==0.0)):
motion.stiffnessInterpolation("Head", 0.0, 0.4)
#timeLists = [[0.2], [0.2]]
#motion.angleInterpolationBezier(names, timeLists, angleLists)
def nao_move_hands(isLeftOpen, isRightOpen):
stiffness = 1.0
time = 1.0
motion.stiffnessInterpolation("Hand", stiffness, time)
if isLeftOpen:
motion.openHand("LHAND")
else:
motion.closeHand("LHAND")
if isRightOpen:
motion.openHand("RHAND")
else:
motion.closeHand("RHAND")
def nao_memoryEvent(name, num):
memory.raiseEvent(name, num)
def nao_leds(name, value):
if name == "ear" :
if value > 0 :
leds.on("EarLeds")
else:
leds.off("EarLeds")
if name == "rotate" :
leds.rotateEyes(0x00FF0000, 0.5, value)
if name == "rasta":
leds.rasta(value)
def nao_say(toSay):
speech.say("Dance, dance, dance! How are You?")
def joystick_update():
global is_nao_headmoving, is_nao_walking, is_nao_turning
global hat_up, hat_down, hat_left, hat_right
for event in pygame.event.get():
if pygame.joystick.get_count() > 0:
if event.type == pygame.locals.JOYBUTTONDOWN:
###### BOUTON DE MANETTE #########
if joy.get_button(5):
print "RB tourner à gauche"
nao_move_hands(False, False)
if joy.get_button(4):
print "LB tourner à droite"
nao_move_hands(True, True)
if joy.get_button(3):
if hat_up:
print "Y up Led couleur 1"
nao_leds("ear", 1 )
elif hat_down:
print " Y down Se lever"
nao_go_posture("Stand")
elif hat_left:
print " Y left Bras gauche "
elif hat_right:
print "Y right Bras droite"
else :
print "Y dire quelque chose"
nao_say("bijour les amis")
if joy.get_button(2):
if hat_up:
print "X up Led couleur 2"
nao_leds("rotate", 2)
elif hat_down:
print " X down position Crouch"
nao_go_posture("Rest")
elif hat_left:
print " X left Bras gauche"
elif hat_right:
print "X right Bras droit"
else :
print "X animation 1"
nao_memoryEvent("anim", 1)
if joy.get_button(1):
if hat_up:
print "B up Led couleur 3"
nao_leds("rasta", 2)
elif hat_down:
print " B down position stand init"
nao_go_posture("StandInit")
elif hat_left:
print " B left Bras gauche"
elif hat_right:
print "B right Bras droit"
else :
print "B animation 2"
nao_memoryEvent("anim", 2)
if joy.get_button(0):
if hat_up:
print "A up Led couleur 4"
nao_leds("ear",0 )
elif hat_down:
print " A down s'assoir"
nao_go_posture("LyingBack")
elif hat_left:
print " A left Bras gauche"
elif hat_right:
print "A right Bras droit"
else :
print "A animation 3"
nao_memoryEvent("anim", 3)
if event.type == pygame.locals.JOYAXISMOTION:
###### JOYSTICK MARCHE #########
if abs(joy.get_axis(1)) + abs( joy.get_axis(0)) < 0.18 :
if is_nao_walking :
print "stop"
is_nao_walking = False
nao_update_walk(0.0, 0.0, 0.0, 0.0)
else:
ax1= joy.get_axis(1)
ax0= joy.get_axis(0)
print "avance "+str(ax1)+ " - "+str(ax0)
is_nao_walking = True
nao_update_walk(abs(ax1)/(-ax1),0.0, -ax0*0.75, abs(ax1))
# axis 1, avant (-1) arriere (1), axis 0, gauche (-1), droite (1)
###### LT - RT ROTATION DU ROBOT #########
if abs(joy.get_axis(2))>0.1:
is_nao_turning = True
ax2 = joy.get_axis(2)
if(ax2 > 0 ) :
print"LT "+str(ax2)
nao_update_walk(0.0,0.0,0.9 , ax2)
else:
print"RT "+str(joy.get_axis(2))
nao_update_walk(0.0,0.0,-0.9 , abs(ax2))
else :
if is_nao_turning:
is_nao_turning = False
print("Stop turning")
nao_update_walk(0.0, 0.0, 0.0, 0.0)
###### JOYSTICK TETE #########
if abs(joy.get_axis(3)) + abs(joy.get_axis(4)) < 0.18 :
if is_nao_headmoving :
print "stop move head"
is_nao_headmoving = False
nao_move_head(0.0,0.0)
else:
ax3 = joy.get_axis(3)
ax4 = joy.get_axis(4)
print "move head "+str(ax3)+" - "+str(ax4)
nao_move_head(-ax4*85.5, -ax3*29.5)
# axis 3 (up -1, down 1) acix 4 (-1 left, 1 right )
###### JOYSTICK HAT #########
if event.type == pygame.locals.JOYHATMOTION:
(a,b) = joy.get_hat(0)
if a > 0.2 :
#print "hat right"
hat_right = True
else:
hat_right = False
if a < -0.2 :
#print "hat left"
hat_left = True
else:
hat_left = False
if b > 0.2 :
#print "hat up"
hat_up = True
else :
hat_up = False
if b < -0.2 :
#print "hat down"
hat_down = True
else:
hat_down = False
def main(robotIP, PORT=9559):
global_init(robotIP, PORT)
nao_init_pos()
time.sleep(2)
while 1:
joystick_update()
time.sleep(0.02)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="10.0.1.12",
help="Robot ip address")
parser.add_argument("--port", type=int, default=9559,
help="Robot port number")
args = parser.parse_args()
main(args.ip, args.port)
|
{
"content_hash": "bb796541b71a9306d4afde756dbb58b0",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 90,
"avg_line_length": 29.404040404040405,
"alnum_prop": 0.45980762624527655,
"repo_name": "ThotAlion/FIRE",
"id": "86950e2ad32132eb5105cb74b43577ce03e882e2",
"size": "11669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NAO/control_joystick/1/testJoystick1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8438"
},
{
"name": "HTML",
"bytes": "78508"
},
{
"name": "JavaScript",
"bytes": "13736"
},
{
"name": "Promela",
"bytes": "38100"
},
{
"name": "Python",
"bytes": "528544"
}
],
"symlink_target": ""
}
|
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.translation import gettext as _
from django.views.generic.base import View
from wagtail.admin.auth import PermissionPolicyChecker
from wagtail.admin.forms.search import SearchForm
from wagtail.admin.modal_workflow import render_modal_workflow
from wagtail.admin.ui.tables import Column, DateColumn, Table, TitleColumn
from wagtail.core import hooks
from wagtail.documents import get_document_model
from wagtail.documents.forms import get_document_form
from wagtail.documents.permissions import permission_policy
from wagtail.search import index as search_index
permission_checker = PermissionPolicyChecker(permission_policy)
def get_document_chosen_response(request, document):
"""
helper function: given a document, return the modal workflow response that returns that
document back to the calling page
"""
return render_modal_workflow(
request, None, None,
None, json_data={'step': 'document_chosen', 'result': {
'id': document.id,
'title': document.title,
'url': document.url,
'filename': document.filename,
'edit_link': reverse('wagtaildocs:edit', args=(document.id,)),
}}
)
class DownloadColumn(Column):
cell_template_name = "wagtaildocs/tables/download_cell.html"
def get_cell_context_data(self, instance, parent_context):
context = super().get_cell_context_data(instance, parent_context)
context['download_url'] = instance.url
return context
class BaseChooseView(View):
def get(self, request):
Document = get_document_model()
if permission_policy.user_has_permission(request.user, 'add'):
DocumentForm = get_document_form(Document)
self.uploadform = DocumentForm(user=request.user, prefix='document-chooser-upload')
else:
self.uploadform = None
documents = permission_policy.instances_user_has_any_permission_for(
request.user, ['choose']
)
# allow hooks to modify the queryset
for hook in hooks.get_hooks('construct_document_chooser_queryset'):
documents = hook(documents, request)
self.q = None
self.is_searching = False
self.collection_id = request.GET.get('collection_id')
if self.collection_id:
documents = documents.filter(collection=self.collection_id)
self.documents_exist = documents.exists()
if 'q' in request.GET:
self.searchform = SearchForm(request.GET)
if self.searchform.is_valid():
self.q = self.searchform.cleaned_data['q']
documents = documents.search(self.q)
self.is_searching = True
else:
self.searchform = SearchForm()
if not self.is_searching:
documents = documents.order_by('-created_at')
paginator = Paginator(documents, per_page=10)
self.documents = paginator.get_page(request.GET.get('p'))
self.collections = permission_policy.collections_user_has_permission_for(
request.user, 'choose'
)
if len(self.collections) < 2:
self.collections = None
columns = [
TitleColumn(
'title', label=_("Title"), url_name='wagtaildocs:document_chosen', link_classname='document-choice'
),
DownloadColumn('filename', label=_("File")),
DateColumn('created_at', label=_("Created"), width='16%'),
]
if self.collections:
columns.insert(2, Column('collection', label=_("Collection")))
self.table = Table(columns, self.documents)
return self.render_to_response()
def get_context_data(self):
return {
'documents': self.documents,
'documents_exist': self.documents_exist,
'table': self.table,
'uploadform': self.uploadform,
'query_string': self.q,
'searchform': self.searchform,
'collections': self.collections,
'is_searching': self.is_searching,
'collection_id': self.collection_id,
}
def render_to_response(self):
raise NotImplementedError()
class ChooseView(BaseChooseView):
def render_to_response(self):
return render_modal_workflow(
self.request, 'wagtaildocs/chooser/chooser.html', None, self.get_context_data(),
json_data={
'step': 'chooser',
'error_label': _("Server Error"),
'error_message': _("Report this error to your website administrator with the following information:"),
'tag_autocomplete_url': reverse('wagtailadmin_tag_autocomplete'),
}
)
class ChooseResultsView(BaseChooseView):
def render_to_response(self):
return TemplateResponse(self.request, "wagtaildocs/chooser/results.html", self.get_context_data())
def document_chosen(request, document_id):
document = get_object_or_404(get_document_model(), id=document_id)
return get_document_chosen_response(request, document)
@permission_checker.require('add')
def chooser_upload(request):
Document = get_document_model()
DocumentForm = get_document_form(Document)
if request.method == 'POST':
document = Document(uploaded_by_user=request.user)
form = DocumentForm(
request.POST, request.FILES, instance=document, user=request.user, prefix='document-chooser-upload'
)
if form.is_valid():
document.file_size = document.file.size
# Set new document file hash
document.file.seek(0)
document._set_file_hash(document.file.read())
document.file.seek(0)
form.save()
# Reindex the document to make sure all tags are indexed
search_index.insert_or_update_object(document)
return get_document_chosen_response(request, document)
else:
form = DocumentForm(user=request.user, prefix='document-chooser-upload')
return render_modal_workflow(
request, None, None,
None, json_data={
'step': 'reshow_upload_form',
'htmlFragment': render_to_string('wagtaildocs/chooser/upload_form.html', {'form': form}, request),
}
)
|
{
"content_hash": "82877db80afd7f17c226fefacc84abe1",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 118,
"avg_line_length": 35.57837837837838,
"alnum_prop": 0.6399270738377393,
"repo_name": "mixxorz/wagtail",
"id": "ebb72306e0168691f901420bd749f29d656feb30",
"size": "6582",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wagtail/documents/views/chooser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3390"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "560244"
},
{
"name": "JavaScript",
"bytes": "508189"
},
{
"name": "Makefile",
"bytes": "1014"
},
{
"name": "Python",
"bytes": "5487927"
},
{
"name": "SCSS",
"bytes": "246385"
},
{
"name": "Shell",
"bytes": "6688"
},
{
"name": "TypeScript",
"bytes": "237634"
}
],
"symlink_target": ""
}
|
"""First version
Revision ID: 4c23f9943036
Revises: None
Create Date: 2013-04-18 12:37:52.535777
Apply all the changes added since 3.9.0, if they were not already applied.
"""
# revision identifiers, used by Alembic.
revision = '4c23f9943036'
down_revision = None
# TODO: add here all the missing migrators from:
# ../admin/cli/migrate_db_40m1_to_50/migrate.py
####################################################################
#
# Take into account that we are actively supporting SQLite. SQLite
# does not support operations such as ALTER TABLE Foo DROP COLUMN
# or so, so many methods, such as "op.drop_column", will not work, and
# a work arounds (locating all the data in a new table, drop the other
# and rename) will be required.
#
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import MetaData
def add_priority_to_permission_parameter(m):
pass
def add_initialization_in_accounting_to_permission_parameter(m):
pass
def add_access_forward_to_permission(m):
pass
def add_federation_role(m):
pass
def add_access_forward_to_federated(m):
pass
def add_admin_panel_to_administrators(m):
pass
def add_reservation_id_to_user_used_experiment(m):
pass
def add_finish_reason_to_user_experiment(m):
pass
def add_max_error_in_millis_to_user_used_experiment(m):
pass
def add_permission_id_to_user_used_experiment(m):
pass
def remove_external_entity_from_permission_type(m):
pass
def remove_applicable_permission_types(m):
# if 'UserApplicablePermissionType' in m.tables:
# op.add_column('UserPermission', 'permission_type_id', sa.Integer)
#
# results = op.execute(
# op.tables['UserPermission'].
# )
#
# op.drop_table('UserApplicablePermissionType')
pass
def remove_external_entity_is_member_of(m):
pass
def remove_external_entity_permission_parameter(m):
pass
def remove_external_entity_permission(m):
pass
def remove_external_entity_command(m):
pass
def remove_external_entity_file(m):
pass
def remove_external_entity_user_used_experiment(m):
pass
def remove_external_entity_aplicable_permission_type(m):
pass
def migrate_user_permissions(m):
pass
def migrate_group_permissions(m):
pass
def migrate_role_permissions(m):
pass
def remove_permission_type_parameter(m):
pass
def remove_permission_type(m):
pass
def upgrade():
m = MetaData()
m.reflect(op.get_bind())
# remove_applicable_permission_types(m)
def downgrade():
# TODO
pass
|
{
"content_hash": "3dd4759b835771a5f595f4ad97e2ec55",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 75,
"avg_line_length": 21.275,
"alnum_prop": 0.690560125342734,
"repo_name": "ganeshgore/myremolab",
"id": "2dc2409545af674248960d4a924a116e82772164",
"size": "2553",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/src/weblab/db/upgrade/regular/versions/4c23f9943036_first.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "C#",
"bytes": "265761"
},
{
"name": "CSS",
"bytes": "39653"
},
{
"name": "Java",
"bytes": "689284"
},
{
"name": "JavaScript",
"bytes": "74198"
},
{
"name": "PHP",
"bytes": "97324"
},
{
"name": "Python",
"bytes": "5335681"
},
{
"name": "Shell",
"bytes": "794"
},
{
"name": "VHDL",
"bytes": "1372"
}
],
"symlink_target": ""
}
|
from itertools import groupby
from operator import itemgetter
import web
import pysolr
import string
import json
from collections import defaultdict
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from sner import Ner
urls = (
'/', 'SimpleIndexSearchPage',
'/entityAwareSearchPage', 'EntityAwareSearch',
'/searchSimpleIndex', 'SearchSimpleIndex',
'/searchEntityAwareIndex', 'SearchEntityAwareIndex',
'/searchEntityAwareWithEfficientQuery', 'SearchEntityAwareWithEfficientQuery'
)
CATEGORY = {'b': 'Business', 'e': 'Entertainment', 't': 'Science and Technology', 'm': 'Health'}
render = web.template.render('templates/', base='layout')
SOLR_SIMPLEINDEX = pysolr.Solr('http://localhost:8983/solr/simpleindex')
SOLR_ENTITYAWAREINDEX = pysolr.Solr('http://localhost:8983/solr/entityawareindex')
STANFORD_NER_SERVER = Ner(host='localhost', port=9199)
def get_web_input(web_input):
draw = web_input['draw']
query = web_input['search[value]']
if len(query) == 0:
query = '*:*'
offset = web_input['start']
count = web_input['length']
return draw, query, offset, count
def search_simple_index(query, offset, count, draw):
"""
This function is responsible for hitting the solr endpoint
and returning the results back.
"""
results = SOLR_SIMPLEINDEX.search(q=query, **{
'start': int(offset),
'rows': int(count)
})
print("Saw {0} result(s) for query {1}.".format(len(results), query))
formatted_hits = []
for hit in results.docs:
formatted_hits.append(
[hit['_news_title'], hit['_news_publisher'], CATEGORY[hit['_news_category'][0]], hit['_news_url']])
response = {'draw': draw,
'recordsFiltered': results.hits,
'data': formatted_hits}
web.header('Content-Type', 'application/json')
return json.dumps(response)
def search_entity_aware_index(query, offset, count, draw, qf):
"""
This function is responsible for hitting the solr endpoint
and returning the results back.
"""
results = SOLR_ENTITYAWAREINDEX.search(q=query, **{
'start': int(offset),
'rows': int(count),
'qf': qf
})
print("Saw {0} result(s) for query {1}.".format(len(results), query))
formatted_hits = []
for hit in results.docs:
formatted_hits.append(
[hit['_news_title'], hit['_news_publisher'], CATEGORY[hit['_news_category'][0]], hit['_news_url']])
response = {'draw': draw,
'recordsFiltered': results.hits,
'data': formatted_hits}
web.header('Content-Type', 'application/json')
return json.dumps(response)
class SimpleIndexSearchPage:
def GET(self):
return render.simpleIndexSearchPage()
class EntityAwareSearch:
def GET(self):
return render.entityAwareSearchPage()
class SearchSimpleIndex:
def GET(self):
draw, query, offset, count = get_web_input(web_input=web.input())
if query == '*:*':
return search_simple_index(query=query, offset=offset, count=count, draw=draw)
clauses = []
for token in word_tokenize(query):
clauses.append("+_text_:%s" % token)
query = " AND ".join(clauses)
return search_simple_index(query=query, offset=offset, count=count, draw=draw)
class SearchEntityAwareIndex:
def accumulate_tags(self, list_of_tuples):
tokens, entities = zip(*list_of_tuples)
recognised = defaultdict(set)
duplicates = defaultdict(list)
for i, item in enumerate(entities):
duplicates[item].append(i)
for key, value in duplicates.items():
for k, g in groupby(enumerate(value), lambda x: x[0] - x[1]):
indices = list(map(itemgetter(1), g))
recognised[key].add(' '.join(tokens[index] for index in indices))
# recognised.pop('O', None)
recognised = dict(recognised)
ner_info = {}
for key, value in recognised.iteritems():
ner_info[key] = list(value)
return ner_info
def GET(self):
draw, query, offset, count = get_web_input(web_input=web.input())
# TODO: Write code for handling the empty query (no keywords)
# TODO: Write code for tokenizing the search query
# TODO: Use the Stanford NER server to get NER tags for the query
# TODO: Write out the candidate query with NER index fields
# TODO: Define the boosting parameters for different NER index field matches
return None
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
|
{
"content_hash": "3757d76aae3b01ca9164511513131d2e",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 115,
"avg_line_length": 33.936619718309856,
"alnum_prop": 0.6158954139863042,
"repo_name": "candidate-selection-tutorial-sigir2017/candidate-selection-tutorial",
"id": "38d9be2503d14f4f6b6f7f37f37f77171e675828",
"size": "4819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assignments/assignment2/excercise/src/frontend/app.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "15478"
},
{
"name": "Python",
"bytes": "28923"
},
{
"name": "Shell",
"bytes": "4054"
}
],
"symlink_target": ""
}
|
from smsgw.core import db
from datetime import datetime
from sqlalchemy.sql.expression import text
from sqlalchemy.ext.declarative import AbstractConcreteBase
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
class BaseModel(AbstractConcreteBase, db.Model):
__table_args__ = {'mysql_engine': 'InnoDB', 'mysql_charset': 'utf8'}
def to_dict(self, properties=None):
return {key: getattr(self, key) for key in properties}
def update(self, data):
for key in data:
if hasattr(self, key):
setattr(self, key, data[key])
@classmethod
def get_one(cls, **kwargs):
"""
Find item by kwargs
"""
try:
instance = cls.query.filter_by(**kwargs).one()
except (NoResultFound, MultipleResultsFound):
instance = None
return instance
@classmethod
def get_or_create(cls, _latest=True, **kwargs):
"""
Get or create row in DB
"""
instance = cls.query.filter_by(**kwargs).first()
if instance is None:
instance = cls(**kwargs)
db.session.add(instance)
return instance
class DateMixin(object):
created = db.Column(db.TIMESTAMP, default=datetime.utcnow,
server_default=text('CURRENT_TIMESTAMP'))
updated = db.Column(db.TIMESTAMP, default=datetime.utcnow,
onupdate=datetime.utcnow)
|
{
"content_hash": "1adf20954a55690ce456f023b070c2f2",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 72,
"avg_line_length": 29.04,
"alnum_prop": 0.6150137741046832,
"repo_name": "VojtechBartos/smsgw",
"id": "f9a6a40d0336e78da53c5bbb0ee7ea4324f17716",
"size": "1542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smsgw/models/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34680"
},
{
"name": "HTML",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "143152"
},
{
"name": "Makefile",
"bytes": "212"
},
{
"name": "Mako",
"bytes": "526"
},
{
"name": "Python",
"bytes": "189569"
},
{
"name": "Shell",
"bytes": "464"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0025_auto_20191006_1746'),
]
operations = [
migrations.AddField(
model_name='product',
name='price',
field=models.PositiveIntegerField(blank=True, default=0, verbose_name='price'),
preserve_default=False,
),
]
|
{
"content_hash": "4b41ac1692fcdbb3d7567ff9d01e6afd",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 91,
"avg_line_length": 24.294117647058822,
"alnum_prop": 0.5907990314769975,
"repo_name": "flavoi/diventi",
"id": "c0a193cc23b0edebda7fdfaacd6ea9423bcd4271",
"size": "462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diventi/products/migrations/0026_product_price.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "385265"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "826530"
}
],
"symlink_target": ""
}
|
import pipes
import tempfile
from behave import given, then
from gppylib.db import dbconn
from gppylib.gparray import GpArray
from test.behave_utils.utils import run_cmd
class Tablespace:
def __init__(self, name):
self.name = name
self.path = tempfile.mkdtemp()
self.dbname = 'tablespace_db_%s' % name
self.table_counter = 0
self.initial_data = None
gparray = GpArray.initFromCatalog(dbconn.DbURL())
for host in gparray.getHostList():
run_cmd('ssh %s mkdir -p %s' % (pipes.quote(host), pipes.quote(self.path)))
conn = dbconn.connect(dbconn.DbURL(), unsetSearchPath=False)
dbconn.execSQL(conn, "CREATE TABLESPACE %s LOCATION '%s'" % (self.name, self.path))
dbconn.execSQL(conn, "CREATE DATABASE %s TABLESPACE %s" % (self.dbname, self.name))
conn.close()
conn = dbconn.connect(dbconn.DbURL(dbname=self.dbname), unsetSearchPath=False)
dbconn.execSQL(conn, "CREATE TABLE tbl (i int) DISTRIBUTED RANDOMLY")
dbconn.execSQL(conn, "INSERT INTO tbl VALUES (GENERATE_SERIES(0, 25))")
# save the distributed data for later verification
self.initial_data = dbconn.query(conn, "SELECT gp_segment_id, i FROM tbl").fetchall()
conn.close()
def cleanup(self):
conn = dbconn.connect(dbconn.DbURL(dbname="postgres"), unsetSearchPath=False)
dbconn.execSQL(conn, "DROP DATABASE IF EXISTS %s" % self.dbname)
dbconn.execSQL(conn, "DROP TABLESPACE IF EXISTS %s" % self.name)
# Without synchronous_commit = 'remote_apply' introduced in 9.6, there
# is no guarantee that the mirrors have removed their tablespace
# directories by the time the DROP TABLESPACE command returns.
# We need those directories to no longer be in use by the mirrors
# before removing them below.
_checkpoint_and_wait_for_replication_replay(conn)
conn.close()
gparray = GpArray.initFromCatalog(dbconn.DbURL())
for host in gparray.getHostList():
run_cmd('ssh %s rm -rf %s' % (pipes.quote(host), pipes.quote(self.path)))
def verify(self, hostname=None, port=0):
"""
Verify tablespace functionality by ensuring the tablespace can be
written to, read from, and the initial data is still correctly
distributed.
"""
url = dbconn.DbURL(hostname=hostname, port=port, dbname=self.dbname)
with dbconn.connect(url, unsetSearchPath=False) as conn:
data = dbconn.query(conn, "SELECT gp_segment_id, i FROM tbl").fetchall()
# verify that we can still write to the tablespace
self.table_counter += 1
dbconn.execSQL(conn, "CREATE TABLE tbl_%s (i int) DISTRIBUTED RANDOMLY" % self.table_counter)
dbconn.execSQL(conn, "INSERT INTO tbl_%s VALUES (GENERATE_SERIES(0, 25))" % self.table_counter)
conn.close()
if sorted(data) != sorted(self.initial_data):
raise Exception("Tablespace data is not identically distributed. Expected:\n%r\n but found:\n%r" % (
sorted(self.initial_data), sorted(data)))
def verify_for_gpexpand(self, hostname=None, port=0):
"""
For gpexpand, we need make sure:
1. data is the same after redistribution finished
2. the table's numsegments is enlarged to the new cluster size
"""
url = dbconn.DbURL(hostname=hostname, port=port, dbname=self.dbname)
with dbconn.connect(url, unsetSearchPath=False) as conn:
data = dbconn.query(conn, "SELECT gp_segment_id, i FROM tbl").fetchall()
tbl_numsegments = dbconn.querySingleton(conn,
"SELECT numsegments FROM gp_distribution_policy "
"WHERE localoid = 'tbl'::regclass::oid")
num_segments = dbconn.querySingleton(conn,
"SELECT COUNT(DISTINCT(content)) - 1 FROM gp_segment_configuration")
conn.close()
if tbl_numsegments != num_segments:
raise Exception("After gpexpand the numsegments for tablespace table 'tbl' %d does not match "
"the number of segments in the cluster %d." % (tbl_numsegments, num_segments))
initial_data = [i for _, i in self.initial_data]
data_without_segid = [i for _, i in data]
if sorted(data_without_segid) != sorted(initial_data):
raise Exception("Tablespace data is not identically distributed after running gp_expand. "
"Expected pre-gpexpand data:\n%\n but found post-gpexpand data:\n%r" % (
sorted(self.initial_data), sorted(data)))
def _checkpoint_and_wait_for_replication_replay(conn):
"""
Taken from src/test/walrep/sql/missing_xlog.sql
"""
dbconn.execSQL(conn, """
-- checkpoint to ensure clean xlog replication before bring down mirror
create or replace function checkpoint_and_wait_for_replication_replay (retries int) returns bool as
$$
declare
i int;
checkpoint_locs pg_lsn[];
replay_locs pg_lsn[];
failed_for_segment text[];
r record;
all_caught_up bool;
begin
i := 0;
-- Issue a checkpoint.
checkpoint;
-- Get the WAL positions after the checkpoint records on every segment.
for r in select gp_segment_id, pg_current_xlog_location() as loc from gp_dist_random('gp_id') loop
checkpoint_locs[r.gp_segment_id] = r.loc;
end loop;
-- and the QD, too.
checkpoint_locs[-1] = pg_current_xlog_location();
-- Force some WAL activity, to nudge the mirrors to replay past the
-- checkpoint location. There are some cases where a non-transactional
-- WAL record is created right after the checkpoint record, which
-- doesn't get replayed on the mirror until something else forces it
-- out.
drop table if exists dummy;
create temp table dummy (id int4) distributed randomly;
-- Wait until all mirrors have replayed up to the location we
-- memorized above.
loop
all_caught_up = true;
for r in select gp_segment_id, replay_location as loc from gp_stat_replication loop
replay_locs[r.gp_segment_id] = r.loc;
if r.loc < checkpoint_locs[r.gp_segment_id] then
all_caught_up = false;
failed_for_segment[r.gp_segment_id] = 1;
else
failed_for_segment[r.gp_segment_id] = 0;
end if;
end loop;
if all_caught_up then
return true;
end if;
if i >= retries then
RAISE INFO 'checkpoint_locs: %', checkpoint_locs;
RAISE INFO 'replay_locs: %', replay_locs;
RAISE INFO 'failed_for_segment: %', failed_for_segment;
return false;
end if;
perform pg_sleep(0.1);
i := i + 1;
end loop;
end;
$$ language plpgsql;
SELECT checkpoint_and_wait_for_replication_replay(0);
DROP FUNCTION checkpoint_and_wait_for_replication_replay(int);
""")
@given('a tablespace is created with data')
def impl(context):
_create_tablespace_with_data(context, "outerspace")
@given('another tablespace is created with data')
def impl(context):
_create_tablespace_with_data(context, "myspace")
def _create_tablespace_with_data(context, name):
if 'tablespaces' not in context:
context.tablespaces = {}
context.tablespaces[name] = Tablespace(name)
@then('the tablespace is valid')
def impl(context):
context.tablespaces["outerspace"].verify()
@then('the tablespace is valid on the standby master')
def impl(context):
context.tablespaces["outerspace"].verify(context.standby_hostname, context.standby_port)
@then('the other tablespace is valid')
def impl(context):
context.tablespaces["myspace"].verify()
@then('the tablespace is valid after gpexpand')
def impl(context):
for _, tbs in context.tablespaces.items():
tbs.verify_for_gpexpand()
@then('all tablespaces are dropped')
def impl(context):
for tablespace in context.tablespaces.values():
tablespace.cleanup()
context.tablespaces = {}
|
{
"content_hash": "65ba6423dccb5f1415a052f0a1e4e7ae",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 121,
"avg_line_length": 38.018957345971565,
"alnum_prop": 0.6546995761655447,
"repo_name": "jmcatamney/gpdb",
"id": "ded17ff5763f6a485e958936503a219cda8cdeb8",
"size": "8022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpMgmt/test/behave/mgmt_utils/steps/tablespace_mgmt_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3724"
},
{
"name": "Awk",
"bytes": "836"
},
{
"name": "Batchfile",
"bytes": "12854"
},
{
"name": "C",
"bytes": "42498841"
},
{
"name": "C++",
"bytes": "14366259"
},
{
"name": "CMake",
"bytes": "38452"
},
{
"name": "Csound Score",
"bytes": "223"
},
{
"name": "DTrace",
"bytes": "3873"
},
{
"name": "Dockerfile",
"bytes": "11932"
},
{
"name": "Emacs Lisp",
"bytes": "3488"
},
{
"name": "Fortran",
"bytes": "14863"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "335208"
},
{
"name": "HTML",
"bytes": "53484"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "229556"
},
{
"name": "M4",
"bytes": "111147"
},
{
"name": "Makefile",
"bytes": "496239"
},
{
"name": "Objective-C",
"bytes": "38376"
},
{
"name": "PLpgSQL",
"bytes": "8009512"
},
{
"name": "Perl",
"bytes": "798767"
},
{
"name": "PowerShell",
"bytes": "422"
},
{
"name": "Python",
"bytes": "3000118"
},
{
"name": "Raku",
"bytes": "698"
},
{
"name": "Roff",
"bytes": "32437"
},
{
"name": "Ruby",
"bytes": "77585"
},
{
"name": "SCSS",
"bytes": "339"
},
{
"name": "Shell",
"bytes": "451713"
},
{
"name": "XS",
"bytes": "6983"
},
{
"name": "Yacc",
"bytes": "674092"
},
{
"name": "sed",
"bytes": "1231"
}
],
"symlink_target": ""
}
|
" File manager models"
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
from django.conf import settings
from mptt.models import MPTTModel
import os
import shutil
# A directory is a node of our LIbrary File Tree.
# Can containt other Directory and file
class Directory(MPTTModel):
parent = models.ForeignKey('self',
related_name='children',
db_index=True,
null=True,
blank=True,
help_text=_(u"Select the parent directory for this directory."))
name = models.CharField(_(u"Folder Name"),
max_length=100,
help_text=_(u"Name of the folder"))
def add_directory(self, name):
new_dir = Directory.objects.create(name=name)
new_dir.parent = self
new_dir.save()
def get_absolute_path(self):
if self.parent:
return self.parent.get_absolute_path() + "/" + self.name
else:
return self.filemanager.get().get_absolute_path()
def size(self):
calculated_size = 0
for folder in self.children.all():
calculated_size += folder.size()
for file in self.files.all():
if file and file.file():
try:
calculated_size += file.file().file.size
except IOError:
pass
return calculated_size
def rename(self, new_name):
self.name = new_name
self.save()
class Meta:
verbose_name = _(u"Directory")
verbose_name_plural = _(u'Directories')
def __unicode__(self):
return u'Directory'
def delete(self, *args, **kwargs):
if self.parent:
for dir_child in self.children.all():
dir_child.delete()
for file_inside in self.files.all():
file_inside.delete()
super(Directory, self).delete(*args, **kwargs)
# FileManage have some method to manipulate data at a high level.
class FileManager(models.Model):
root = models.ForeignKey(Directory,
related_name="filemanager",
default=None,
blank=True,
null=True)
def delete(self, *args, **kwargs):
shutil.rmtree(self.website.get().media_root())
super(FileManager, self).delete(*args, **kwargs)
def get_absolute_path(self):
return self.website.get().media_root()
def size(self):
return self.root.size()
class Meta:
verbose_name = _(u"Files Library")
verbose_name_plural = _(u'Files Libraries')
class File(models.Model):
dir = models.ForeignKey(Directory,
related_name="files",
blank=False,
null=False)
name = models.CharField(_(u'File name'), max_length=256, editable=False)
type = models.CharField(_(u'Type'), max_length=50, editable=False)
date_creation = models.DateField(_(u'Creation date'), auto_now_add=True, editable=False)
date_modification = models.DateField(_(u'Modification date'), auto_now=True, editable=False)
file_type = models.ForeignKey(ContentType,
related_name="file_file",
limit_choices_to = {'model__startswith': 'file_manager_'},
verbose_name=_("file_type"))
file_id = models.PositiveIntegerField(editable=False)
file_object = generic.GenericForeignKey('file_type', 'file_id')
def file(self):
try:
tmp_fs_file_object = self.file_object.file.file
return self.file_object.file
except IOError:
return None
def get_icon(self):
return self.file_object.get_icon()
def get_thumbnail(self):
website = self.dir.get_root().filemanager.get().website.get()
return self.file_object.get_thumbnail(website)
def get_thumbnail_icon(self):
website = self.dir.get_root().filemanager.get().website.get()
return self.file_object.get_thumbnail_icon(website)
def get_absolute_url(self):
return self.file_object.get_absolute_url()
def get_selector_button(self):
return self.file_object.get_selector_button()
def rename(self, new_name):
file_field_object = self.file()
if not file_field_object:
return None
root, ext = os.path.splitext(new_name)
new_filename = "%s_%d%s" % (slugify(root), self.id, ext)
old_path = file_field_object.path
new_path = "%s%s" % (os.path.join(file_field_object.path.strip(file_field_object.path.split("/")[-1])), new_filename)
media_root = self.dir.get_root().filemanager.get().website.get().media_root()
shutil.move(old_path, new_path)
self.name = "%s%s" % (slugify(root), ext)
file_field_object.name = os.path.join(media_root, new_filename)
self.file_object.save()
self.save()
self.file_object.rename(old_path)
# Rename all thumbnail
def delete(self, *args, **kwargs):
"""
Delete a file and all thumbnail associated
"""
# Delete file
self.file_object.delete()
super(File, self).delete(*args, **kwargs)
class AbstractFile(models.Model):
file = models.FileField(upload_to="file_manager/", max_length=255)
def get_icon(self):
return '<i class="icon-file icon-white"></i>'
def get_thumbnail(self, website):
return "<p>Aucun apercu disponible</p>"
def get_thumbnail_icon(self, website):
return '<img src="%sadmin/images/glyphicons/313_white.png" alt="%s" />' % (settings.STATIC_URL, self.file.name)
def get_absolute_url(self):
return os.path.join(settings.MEDIA_URL, self.file.name)
def delete(self, *args, **kwargs):
self.file.delete()
super(AbstractFile, self).delete(*args, **kwargs)
def get_selector_button(self):
"""
Return selector button with multiple action in some case
"""
html = render_to_string('administration/file_manager/selector/selector_button.html',
{'absolute_url': self.get_absolute_url(),})
return html
def rename(self, old_path):
"""
No need to do any rename in a classic file type.
"""
pass
class Meta:
abstract = True
class Image(AbstractFile):
#
def get_icon(self):
return '<i class="icon-picture icon-white"></i>'
#
def get_thumbnail(self, website):
html = render_to_string('administration/file_manager/thumbnail/image.html',
{'file': self.file,
'website': website})
return html
def get_thumbnail_icon(self, website):
html = render_to_string('administration/file_manager/thumbnail/image_icon.html',
{'file': self.file,
'website': website})
return html
def delete(self, *args, **kwargs):
# Delete Thumbnail
root, ext = os.path.splitext(self.file.path)
image_list = []
for label in settings.VERSIONS:
image_list.append("%s_%s%s" % (root, label, ext))
for file in image_list:
try:
os.remove(file)
except:
pass
self.file.delete()
super(AbstractFile, self).delete(*args, **kwargs)
def rename(self, old_path):
"""
Rename all thumbnail linked to this image
"""
old_root, old_ext = os.path.splitext(old_path)
new_root, new_ext = os.path.splitext(self.file.path)
for label in settings.VERSIONS:
try:
shutil.move("%s_%s%s" % (old_root, label, old_ext), "%s_%s%s" % (new_root, label, new_ext))
except Exception,e:
pass
def get_selector_button(self):
"""
Return selector button with multiple action in some case
"""
image_list = []
for version in settings.VERSIONS:
image_list.append([version, settings.VERSIONS[version]['verbose_name']])
html = render_to_string('administration/file_manager/selector/selector_button_images.html',
{'absolute_url': self.get_absolute_url(),
'versions': image_list,})
return html
class Meta:
verbose_name = _(u"Image")
verbose_name_plural = _(u'Images')
class Document(AbstractFile):
#
class Meta:
verbose_name = _(u"Document")
verbose_name_plural = _(u'Documents')
class Audio(AbstractFile):
#
def get_icon(self):
return '<i class="icon-music icon-white"></i>'
class Meta:
verbose_name = _(u"Audio")
verbose_name_plural = _(u'Audios')
#
class Archive(AbstractFile):
#
class Meta:
verbose_name = _(u"Archive")
verbose_name_plural = _(u'Archives')
#
class Other(AbstractFile):
#
class Meta:
verbose_name = _(u"Other")
verbose_name_plural = _(u'Others')
|
{
"content_hash": "9389d3afb079df123a403e56ca07b006",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 125,
"avg_line_length": 31.61198738170347,
"alnum_prop": 0.5425606226923461,
"repo_name": "makinacorpus/ionyweb",
"id": "5be27c21c830d90b029e69648e704a4d50c44f4c",
"size": "10045",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ionyweb/file_manager/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "124754"
},
{
"name": "JavaScript",
"bytes": "260880"
},
{
"name": "Python",
"bytes": "1024305"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2014, Philipp Krähenbühl
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Stanford University nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY Philipp Krähenbühl ''AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Philipp Krähenbühl BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pylab import *
from gop import *
import numpy as np
from util import *
from sys import argv
from time import time
#prop = proposals.Proposal( setupBaseline( 130, 5, 0.8 ) )
#prop = proposals.Proposal( setupBaseline( 150, 7, 0.85 ) )
prop = proposals.Proposal( setupLearned( 140, 4, 0.8 ) )
#prop = proposals.Proposal( setupLearned( 160, 6, 0.85 ) )
detector = contour.MultiScaleStructuredForest()
detector.load( "../data/sf.dat" )
for im in argv[1:]:
t0 = time()
s = segmentation.geodesicKMeans( imgproc.imread( im ), detector, 1000 )
t1 = time()
b = prop.propose( s )
t2 = time()
# If you just want the boxes use
boxes = s.maskToBox( b )
print( "Generated %d proposals in %0.2fs (OverSeg: %0.2fs, Prop: %0.2fs)"%(b.shape[0],t2-t0,t1-t0,t2-t1) )
figure()
for i in range(min(20,b.shape[0])):
im = np.array( s.image )
im[ b[i,s.s] ] = (255,0,0)
ax = subplot( 4, 5, i+1 )
ax.imshow( im )
# Draw the bounding box
from matplotlib.patches import FancyBboxPatch
ax.add_patch( FancyBboxPatch( (boxes[i,0],boxes[i,1]), boxes[i,2]-boxes[i,0], boxes[i,3]-boxes[i,1], boxstyle="square,pad=0.", ec="b", fc="none", lw=2) )
show()
|
{
"content_hash": "2ce5836651544edd5760ffd30b6facb0",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 155,
"avg_line_length": 45.36666666666667,
"alnum_prop": 0.7134459955914768,
"repo_name": "LiZimo/FuncFlow",
"id": "eaa00fdfa48e231cd63e480ef8b90552742155a2",
"size": "2750",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "external/gop_1.3/src/example.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Matlab",
"bytes": "115858"
}
],
"symlink_target": ""
}
|
import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_hierarchical():
raw = pd.read_csv(
os.path.join(os.path.dirname(__file__), "..", "examples", "testdata.csv"),
index_col=0,
)
orig_raw = pd.read_csv(
os.path.join(
os.path.dirname(__file__),
"..",
"examples",
"results",
"testperiods_hierarchical.csv",
),
index_col=[0, 1],
)
starttime = time.time()
aggregation = tsam.TimeSeriesAggregation(
raw,
noTypicalPeriods=8,
hoursPerPeriod=24,
clusterMethod="hierarchical",
extremePeriodMethod="new_cluster_center",
addPeakMin=["T"],
addPeakMax=["Load"],
)
typPeriods = aggregation.createTypicalPeriods()
print("Clustering took " + str(time.time() - starttime))
# sort the typical days in order to avoid error assertion due to different order
sortedDaysOrig = orig_raw.sum(axis=0, level=0).sort_values("GHI").index
sortedDaysTest = typPeriods.sum(axis=0, level=0).sort_values("GHI").index
# rearange their order
orig = orig_raw[typPeriods.columns].unstack().loc[sortedDaysOrig, :].stack()
test = typPeriods.unstack().loc[sortedDaysTest, :].stack()
np.testing.assert_array_almost_equal(orig.values, test.values, decimal=4)
if __name__ == "__main__":
test_hierarchical()
|
{
"content_hash": "d6b1162ad9fce10f60b47332a12ca9c5",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 84,
"avg_line_length": 26.071428571428573,
"alnum_prop": 0.6143835616438356,
"repo_name": "FZJ-IEK3-VSA/tsam",
"id": "7025ce3b32f7632aaf08939304343d895d3d2648",
"size": "1460",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_hierarchical.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "159851"
}
],
"symlink_target": ""
}
|
"""
A Python package for recipe parsing and management.
"""
import yaml
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from .exceptions import LoadError, ParseError, PyprikaError, FieldError # noqa
from .ingredient import Ingredient # noqa
from .quantity import Quantity # noqa
from .recipe import Recipe
from .version import __author__, __version__ # noqa
def load(fp, loader=None, **kw):
""" Load ``fp``, a file-like object
The file is assumed to be a pyprika-compliant YAML document. If the
document contains a sequence, a list of ``Recipe`` objects will be
returned. Otherwise, a single ``Recipe`` object should be returned.
Note that this function wraps the underlying exceptions thrown by
:meth:`Recipe.from_dict` under the assumption it is due to a malformed
document, but the original traceback is preserved.
:param file-like fp: the file-like object containing the document to load
:param callable loader: takes one positional argument and optional
arguments and returns a dict (defaults to
``yaml.load``)
:param kw: passed through to loader
:raises LoadError: if there was an error in the loading of the document,
usually indicative of a syntax error
:returns: the recipe data contained in the stream
:rtype: :class:`Recipe` or list of :class:`Recipe`
"""
loader = loader or yaml.load
try:
d = loader(fp)
if isinstance(d, (tuple, list)):
return [Recipe.from_dict(x) for x in d]
elif isinstance(d, dict):
return Recipe.from_dict(d)
else:
raise LoadError('Recipe did not decode as expected (got %s)' %
type(d).__name__)
except PyprikaError as e:
raise LoadError(*e.args, cause=e)
def loads(data, loader=None, **kw):
""" Load recipe from string data.
This wraps ``data`` in a :class:`cString.StringIO` and calls :func:`load`
on it.
See :func:`load` for more information.
:param str data: recipe document data
:returns: the recipe data contained in ``data``
:rtype: :class:`Recipe` or list of :class:`Recipe`
"""
return load(StringIO(data), loader=loader, **kw)
def dump(recipe, fp, dumper=None, **kw):
""" Dump recipe to a file-like object
:param Recipe recipe: the recipe to dump
:param file-like fp: the file stream to dump to
:param callable dumper: a callable which takes two positional arguments,
the first a dict and the second a file stream, and
optional keyword arguments and encodes the recipe
to the file stream (defaults to yaml.dump)
:param kw: passed through to dumper
"""
dumper = dumper or yaml.dump
d = recipe.to_dict(serialize=True)
dumper(d, fp, **kw)
def dumps(recipe, dumper=None, **kw):
""" Dump recipe object as a string.
This is a convenience method to dump to a StringIO object.
See :func:`dump` for parameter details.
:returns: recipe encoded as a string
:rtype: str
"""
fp = StringIO()
dump(recipe, fp, dumper=dumper, **kw)
return fp.getvalue()
|
{
"content_hash": "43dd9705057d7e73818490ed3910dc6b",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 34.260416666666664,
"alnum_prop": 0.6397081179689875,
"repo_name": "OEP/pyprika",
"id": "88fcbd1db9da3e509077f5b7cc6ecfe05a708ea8",
"size": "3289",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyprika/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6870"
},
{
"name": "Python",
"bytes": "41734"
}
],
"symlink_target": ""
}
|
from unittest import mock
import pytest
from praw.exceptions import ClientException, RedditAPIException
from praw.models import Rule
from ... import IntegrationTest
class TestRule(IntegrationTest):
@property
def subreddit(self):
return self.reddit.subreddit(pytest.placeholders.test_subreddit)
def test_add_rule(self):
self.reddit.read_only = False
with self.use_cassette():
rule = self.subreddit.rules.mod.add(
short_name="PRAW Test",
kind="all",
description="Test by PRAW",
violation_reason="PTest",
)
assert rule.short_name == "PRAW Test"
assert rule.kind == "all"
assert rule.description == "Test by PRAW"
assert rule.violation_reason == "PTest"
def test_add_rule_without_violation_reason(self):
self.reddit.read_only = False
with self.use_cassette():
rule = self.subreddit.rules.mod.add(
short_name="PRAW Test 2", kind="comment"
)
assert rule.short_name == "PRAW Test 2"
assert rule.kind == "comment"
assert rule.description == ""
assert rule.violation_reason == "PRAW Test 2"
@mock.patch("time.sleep", return_value=None)
def test_delete_rule(self, _):
self.reddit.read_only = False
with self.use_cassette():
rules = list(self.subreddit.rules)
rule = rules[-1]
rule.mod.delete()
assert len(list(self.subreddit.rules)) == (len(rules) - 1)
def test_iter_rules(self):
with self.use_cassette():
for rule in self.subreddit.rules:
assert isinstance(rule, Rule)
@pytest.mark.filterwarnings("ignore", category=DeprecationWarning)
def test_iter_call(self):
with self.use_cassette():
assert self.subreddit.rules()["rules"][0]["short_name"] == "Test post 12"
def test_iter_rule_string(self):
with self.use_cassette("TestRule.test_iter_rules"):
rule = self.subreddit.rules["PRAW Test"]
assert isinstance(rule, Rule)
rule._fetch()
assert rule.kind
def test_iter_rule_invalid(self):
with self.use_cassette("TestRule.test_iter_rules"):
rule = self.subreddit.rules["fake rule"]
with pytest.raises(ClientException) as excinfo:
rule.kind
assert (
excinfo.value.args[0]
== f"Subreddit {self.subreddit} does not have the rule fake rule"
)
def test_iter_rule_int(self):
with self.use_cassette("TestRule.test_iter_rules"):
assert isinstance(self.subreddit.rules[0], Rule)
def test_iter_rule_negative_int(self):
with self.use_cassette("TestRule.test_iter_rules"):
assert isinstance(self.subreddit.rules[-1], Rule)
def test_iter_rule_slice(self):
with self.use_cassette("TestRule.test_iter_rules"):
rules = self.subreddit.rules[-3:]
assert len(rules) == 3
for rule in rules:
assert isinstance(rule, Rule)
@mock.patch("time.sleep", return_value=None)
def test_reorder_rules(self, _):
self.reddit.read_only = False
with self.use_cassette():
rule_list = list(self.subreddit.rules)
reordered = rule_list[2:3] + rule_list[0:2] + rule_list[3:]
rule_info = {rule.short_name: rule for rule in rule_list}
self.subreddit.rules.mod.reorder(reordered)
new_rules = list(self.subreddit.rules)
assert new_rules != rule_list
for rule in new_rules:
assert rule_info[rule.short_name] == rule
@mock.patch("time.sleep", return_value=None)
def test_reorder_rules_double(self, _):
self.reddit.read_only = False
with self.use_cassette():
rule_list = list(self.subreddit.rules)
with pytest.raises(RedditAPIException):
self.subreddit.rules.mod.reorder(rule_list + rule_list[0:1])
@mock.patch("time.sleep", return_value=None)
def test_reorder_rules_empty(self, _):
self.reddit.read_only = False
with self.use_cassette():
with pytest.raises(RedditAPIException):
self.subreddit.rules.mod.reorder([])
@mock.patch("time.sleep", return_value=None)
def test_reorder_rules_no_reorder(self, _):
self.reddit.read_only = False
with self.use_cassette():
rule_list = list(self.subreddit.rules)
assert self.subreddit.rules.mod.reorder(rule_list) == rule_list
@mock.patch("time.sleep", return_value=None)
def test_reorder_rules_omit(self, _):
self.reddit.read_only = False
with self.use_cassette():
rule_list = list(self.subreddit.rules)
with pytest.raises(RedditAPIException):
self.subreddit.rules.mod.reorder(rule_list[:-1])
@mock.patch("time.sleep", return_value=None)
def test_update_rule(self, _):
self.reddit.read_only = False
with self.use_cassette():
rule = self.subreddit.rules[0]
rule2 = rule.mod.update(
description="Updated rule",
kind="link",
violation_reason="PUpdate",
)
assert rule.description != rule2.description
assert rule2.description == "Updated rule"
assert rule.kind != rule2.kind
assert rule2.kind == "link"
assert rule.violation_reason != rule2.violation_reason
assert rule2.violation_reason == "PUpdate"
@mock.patch("time.sleep", return_value=None)
def test_update_rule_short_name(self, _):
self.reddit.read_only = False
with self.use_cassette():
rule = self.subreddit.rules[1]
rule2 = rule.mod.update(
short_name="PRAW Update",
description="Updated rule",
kind="comment",
violation_reason="PUpdate",
)
assert rule != rule2
assert rule2.short_name == "PRAW Update"
assert rule.description != rule2.description
assert rule2.description == "Updated rule"
assert rule.kind != rule2.kind
assert rule2.kind == "comment"
assert rule.violation_reason != rule2.violation_reason
assert rule2.violation_reason == "PUpdate"
for new_rule in self.subreddit.rules:
assert new_rule.short_name != rule.short_name
@mock.patch("time.sleep", return_value=None)
def test_update_rule_no_params(self, _):
self.reddit.read_only = False
with self.use_cassette():
rule = self.subreddit.rules[1]
rule2 = rule.mod.update()
for attr in (
"created_utc",
"description",
"kind",
"priority",
"short_name",
"subreddit",
"violation_reason",
):
assert getattr(rule, attr) == getattr(rule2, attr)
|
{
"content_hash": "47106c1fb4b5eee85796951c0445c377",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 85,
"avg_line_length": 38.17989417989418,
"alnum_prop": 0.575110864745011,
"repo_name": "praw-dev/praw",
"id": "f189b2c7db29a2fd4c86f06ba858b5a6678900a4",
"size": "7216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/models/reddit/test_rules.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1145273"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
}
|
"""
Tests for the zipline.finance package
"""
import itertools
import operator
import pytz
from unittest import TestCase
from datetime import datetime, timedelta
import numpy as np
from nose.tools import timed
from six.moves import range
import zipline.protocol
from zipline.protocol import Event, DATASOURCE_TYPE
import zipline.utils.factory as factory
import zipline.utils.simfactory as simfactory
from zipline.finance.blotter import Blotter
from zipline.gens.composites import date_sorted_sources
from zipline.finance import trading
from zipline.finance.trading import TradingEnvironment
from zipline.finance.execution import MarketOrder, LimitOrder
from zipline.finance.trading import SimulationParameters
from zipline.finance.performance import PerformanceTracker
from zipline.utils.test_utils import(
setup_logger,
teardown_logger,
assert_single_position
)
DEFAULT_TIMEOUT = 15 # seconds
EXTENDED_TIMEOUT = 90
class FinanceTestCase(TestCase):
def setUp(self):
self.zipline_test_config = {
'sid': 133,
}
setup_logger(self)
def tearDown(self):
teardown_logger(self)
@timed(DEFAULT_TIMEOUT)
def test_factory_daily(self):
sim_params = factory.create_simulation_parameters()
trade_source = factory.create_daily_trade_source(
[133],
200,
sim_params
)
prev = None
for trade in trade_source:
if prev:
self.assertTrue(trade.dt > prev.dt)
prev = trade
@timed(EXTENDED_TIMEOUT)
def test_full_zipline(self):
# provide enough trades to ensure all orders are filled.
self.zipline_test_config['order_count'] = 100
# making a small order amount, so that each order is filled
# in a single transaction, and txn_count == order_count.
self.zipline_test_config['order_amount'] = 25
# No transactions can be filled on the first trade, so
# we have one extra trade to ensure all orders are filled.
self.zipline_test_config['trade_count'] = 101
full_zipline = simfactory.create_test_zipline(
**self.zipline_test_config)
assert_single_position(self, full_zipline)
# TODO: write tests for short sales
# TODO: write a test to do massive buying or shorting.
@timed(DEFAULT_TIMEOUT)
def test_partially_filled_orders(self):
# create a scenario where order size and trade size are equal
# so that orders must be spread out over several trades.
params = {
'trade_count': 360,
'trade_amount': 100,
'trade_interval': timedelta(minutes=1),
'order_count': 2,
'order_amount': 100,
'order_interval': timedelta(minutes=1),
# because we placed an order for 100 shares, and the volume
# of each trade is 100, the simulator should spread the order
# into 4 trades of 25 shares per order.
'expected_txn_count': 8,
'expected_txn_volume': 2 * 100
}
self.transaction_sim(**params)
# same scenario, but with short sales
params2 = {
'trade_count': 360,
'trade_amount': 100,
'trade_interval': timedelta(minutes=1),
'order_count': 2,
'order_amount': -100,
'order_interval': timedelta(minutes=1),
'expected_txn_count': 8,
'expected_txn_volume': 2 * -100
}
self.transaction_sim(**params2)
@timed(DEFAULT_TIMEOUT)
def test_collapsing_orders(self):
# create a scenario where order.amount <<< trade.volume
# to test that several orders can be covered properly by one trade,
# but are represented by multiple transactions.
params1 = {
'trade_count': 6,
'trade_amount': 100,
'trade_interval': timedelta(hours=1),
'order_count': 24,
'order_amount': 1,
'order_interval': timedelta(minutes=1),
# because we placed an orders totaling less than 25% of one trade
# the simulator should produce just one transaction.
'expected_txn_count': 24,
'expected_txn_volume': 24
}
self.transaction_sim(**params1)
# second verse, same as the first. except short!
params2 = {
'trade_count': 6,
'trade_amount': 100,
'trade_interval': timedelta(hours=1),
'order_count': 24,
'order_amount': -1,
'order_interval': timedelta(minutes=1),
'expected_txn_count': 24,
'expected_txn_volume': -24
}
self.transaction_sim(**params2)
# Runs the collapsed trades over daily trade intervals.
# Ensuring that our delay works for daily intervals as well.
params3 = {
'trade_count': 6,
'trade_amount': 100,
'trade_interval': timedelta(days=1),
'order_count': 24,
'order_amount': 1,
'order_interval': timedelta(minutes=1),
'expected_txn_count': 24,
'expected_txn_volume': 24
}
self.transaction_sim(**params3)
@timed(DEFAULT_TIMEOUT)
def test_alternating_long_short(self):
# create a scenario where we alternate buys and sells
params1 = {
'trade_count': int(6.5 * 60 * 4),
'trade_amount': 100,
'trade_interval': timedelta(minutes=1),
'order_count': 4,
'order_amount': 10,
'order_interval': timedelta(hours=24),
'alternate': True,
'complete_fill': True,
'expected_txn_count': 4,
'expected_txn_volume': 0 # equal buys and sells
}
self.transaction_sim(**params1)
def transaction_sim(self, **params):
""" This is a utility method that asserts expected
results for conversion of orders to transactions given a
trade history"""
trade_count = params['trade_count']
trade_interval = params['trade_interval']
order_count = params['order_count']
order_amount = params['order_amount']
order_interval = params['order_interval']
expected_txn_count = params['expected_txn_count']
expected_txn_volume = params['expected_txn_volume']
# optional parameters
# ---------------------
# if present, alternate between long and short sales
alternate = params.get('alternate')
# if present, expect transaction amounts to match orders exactly.
complete_fill = params.get('complete_fill')
sid = 1
sim_params = factory.create_simulation_parameters()
blotter = Blotter()
price = [10.1] * trade_count
volume = [100] * trade_count
start_date = sim_params.first_open
generated_trades = factory.create_trade_history(
sid,
price,
volume,
trade_interval,
sim_params
)
if alternate:
alternator = -1
else:
alternator = 1
order_date = start_date
for i in range(order_count):
blotter.set_date(order_date)
blotter.order(sid, order_amount * alternator ** i, MarketOrder())
order_date = order_date + order_interval
# move after market orders to just after market next
# market open.
if order_date.hour >= 21:
if order_date.minute >= 00:
order_date = order_date + timedelta(days=1)
order_date = order_date.replace(hour=14, minute=30)
# there should now be one open order list stored under the sid
oo = blotter.open_orders
self.assertEqual(len(oo), 1)
self.assertTrue(sid in oo)
order_list = oo[sid]
self.assertEqual(order_count, len(order_list))
for i in range(order_count):
order = order_list[i]
self.assertEqual(order.sid, sid)
self.assertEqual(order.amount, order_amount * alternator ** i)
tracker = PerformanceTracker(sim_params)
benchmark_returns = [
Event({'dt': dt,
'returns': ret,
'type':
zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
'source_id': 'benchmarks'})
for dt, ret in trading.environment.benchmark_returns.iterkv()
if dt.date() >= sim_params.period_start.date()
and dt.date() <= sim_params.period_end.date()
]
generated_events = date_sorted_sources(generated_trades,
benchmark_returns)
# this approximates the loop inside TradingSimulationClient
transactions = []
for dt, events in itertools.groupby(generated_events,
operator.attrgetter('dt')):
for event in events:
if event.type == DATASOURCE_TYPE.TRADE:
for txn, order in blotter.process_trade(event):
transactions.append(txn)
tracker.process_event(txn)
tracker.process_event(event)
if complete_fill:
self.assertEqual(len(transactions), len(order_list))
total_volume = 0
for i in range(len(transactions)):
txn = transactions[i]
total_volume += txn.amount
if complete_fill:
order = order_list[i]
self.assertEqual(order.amount, txn.amount)
self.assertEqual(total_volume, expected_txn_volume)
self.assertEqual(len(transactions), expected_txn_count)
cumulative_pos = tracker.cumulative_performance.positions[sid]
self.assertEqual(total_volume, cumulative_pos.amount)
# the open orders should now be empty
oo = blotter.open_orders
self.assertTrue(sid in oo)
order_list = oo[sid]
self.assertEqual(0, len(order_list))
def test_blotter_processes_splits(self):
sim_params = factory.create_simulation_parameters()
blotter = Blotter()
blotter.set_date(sim_params.period_start)
# set up two open limit orders with very low limit prices,
# one for sid 1 and one for sid 2
blotter.order(1, 100, LimitOrder(10))
blotter.order(2, 100, LimitOrder(10))
# send in a split for sid 2
split_event = factory.create_split(2, 0.33333,
sim_params.period_start +
timedelta(days=1))
blotter.process_split(split_event)
for sid in [1, 2]:
order_lists = blotter.open_orders[sid]
self.assertIsNotNone(order_lists)
self.assertEqual(1, len(order_lists))
aapl_order = blotter.open_orders[1][0].to_dict()
fls_order = blotter.open_orders[2][0].to_dict()
# make sure the aapl order didn't change
self.assertEqual(100, aapl_order['amount'])
self.assertEqual(10, aapl_order['limit'])
self.assertEqual(1, aapl_order['sid'])
# make sure the fls order did change
# to 300 shares at 3.33
self.assertEqual(300, fls_order['amount'])
self.assertEqual(3.33, fls_order['limit'])
self.assertEqual(2, fls_order['sid'])
class TradingEnvironmentTestCase(TestCase):
"""
Tests for date management utilities in zipline.finance.trading.
"""
def setUp(self):
setup_logger(self)
def tearDown(self):
teardown_logger(self)
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment()
@timed(DEFAULT_TIMEOUT)
def test_is_trading_day(self):
# holidays taken from: http://www.nyse.com/press/1191407641943.html
new_years = datetime(2008, 1, 1, tzinfo=pytz.utc)
mlk_day = datetime(2008, 1, 21, tzinfo=pytz.utc)
presidents = datetime(2008, 2, 18, tzinfo=pytz.utc)
good_friday = datetime(2008, 3, 21, tzinfo=pytz.utc)
memorial_day = datetime(2008, 5, 26, tzinfo=pytz.utc)
july_4th = datetime(2008, 7, 4, tzinfo=pytz.utc)
labor_day = datetime(2008, 9, 1, tzinfo=pytz.utc)
tgiving = datetime(2008, 11, 27, tzinfo=pytz.utc)
christmas = datetime(2008, 5, 25, tzinfo=pytz.utc)
a_saturday = datetime(2008, 8, 2, tzinfo=pytz.utc)
a_sunday = datetime(2008, 10, 12, tzinfo=pytz.utc)
holidays = [
new_years,
mlk_day,
presidents,
good_friday,
memorial_day,
july_4th,
labor_day,
tgiving,
christmas,
a_saturday,
a_sunday
]
for holiday in holidays:
self.assertTrue(not self.env.is_trading_day(holiday))
first_trading_day = datetime(2008, 1, 2, tzinfo=pytz.utc)
last_trading_day = datetime(2008, 12, 31, tzinfo=pytz.utc)
workdays = [first_trading_day, last_trading_day]
for workday in workdays:
self.assertTrue(self.env.is_trading_day(workday))
def test_simulation_parameters(self):
env = SimulationParameters(
period_start=datetime(2008, 1, 1, tzinfo=pytz.utc),
period_end=datetime(2008, 12, 31, tzinfo=pytz.utc),
capital_base=100000,
)
self.assertTrue(env.last_close.month == 12)
self.assertTrue(env.last_close.day == 31)
@timed(DEFAULT_TIMEOUT)
def test_sim_params_days_in_period(self):
# January 2008
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5
# 6 7 8 9 10 11 12
# 13 14 15 16 17 18 19
# 20 21 22 23 24 25 26
# 27 28 29 30 31
env = SimulationParameters(
period_start=datetime(2007, 12, 31, tzinfo=pytz.utc),
period_end=datetime(2008, 1, 7, tzinfo=pytz.utc),
capital_base=100000,
)
expected_trading_days = (
datetime(2007, 12, 31, tzinfo=pytz.utc),
# Skip new years
# holidays taken from: http://www.nyse.com/press/1191407641943.html
datetime(2008, 1, 2, tzinfo=pytz.utc),
datetime(2008, 1, 3, tzinfo=pytz.utc),
datetime(2008, 1, 4, tzinfo=pytz.utc),
# Skip Saturday
# Skip Sunday
datetime(2008, 1, 7, tzinfo=pytz.utc)
)
num_expected_trading_days = 5
self.assertEquals(num_expected_trading_days, env.days_in_period)
np.testing.assert_array_equal(expected_trading_days,
env.trading_days.tolist())
@timed(DEFAULT_TIMEOUT)
def test_market_minute_window(self):
# January 2008
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5
# 6 7 8 9 10 11 12
# 13 14 15 16 17 18 19
# 20 21 22 23 24 25 26
# 27 28 29 30 31
us_east = pytz.timezone('US/Eastern')
utc = pytz.utc
# 10:01 AM Eastern on January 7th..
start = us_east.localize(datetime(2008, 1, 7, 10, 1))
utc_start = start.astimezone(utc)
# Get the next 10 minutes
minutes = self.env.market_minute_window(
utc_start, 10,
)
self.assertEqual(len(minutes), 10)
for i in range(10):
self.assertEqual(minutes[i], utc_start + timedelta(minutes=i))
# Get the previous 10 minutes.
minutes = self.env.market_minute_window(
utc_start, 10, step=-1,
)
self.assertEqual(len(minutes), 10)
for i in range(10):
self.assertEqual(minutes[i], utc_start + timedelta(minutes=-i))
# Get the next 900 minutes, including utc_start, rolling over into the
# next two days.
# Should include:
# Today: 10:01 AM -> 4:00 PM (360 minutes)
# Tomorrow: 9:31 AM -> 4:00 PM (390 minutes, 750 total)
# Last Day: 9:31 AM -> 12:00 PM (150 minutes, 900 total)
minutes = self.env.market_minute_window(
utc_start, 900,
)
today = self.env.market_minutes_for_day(start)[30:]
tomorrow = self.env.market_minutes_for_day(
start + timedelta(days=1)
)
last_day = self.env.market_minutes_for_day(
start + timedelta(days=2))[:150]
self.assertEqual(len(minutes), 900)
self.assertEqual(minutes[0], utc_start)
self.assertTrue(all(today == minutes[:360]))
self.assertTrue(all(tomorrow == minutes[360:750]))
self.assertTrue(all(last_day == minutes[750:]))
# Get the previous 801 minutes, including utc_start, rolling over into
# Friday the 4th and Thursday the 3rd.
# Should include:
# Today: 10:01 AM -> 9:31 AM (31 minutes)
# Friday: 4:00 PM -> 9:31 AM (390 minutes, 421 total)
# Thursday: 4:00 PM -> 9:41 AM (380 minutes, 801 total)
minutes = self.env.market_minute_window(
utc_start, 801, step=-1,
)
today = self.env.market_minutes_for_day(start)[30::-1]
# minus an extra two days from each of these to account for the two
# weekend days we skipped
friday = self.env.market_minutes_for_day(
start + timedelta(days=-3),
)[::-1]
thursday = self.env.market_minutes_for_day(
start + timedelta(days=-4),
)[:9:-1]
self.assertEqual(len(minutes), 801)
self.assertEqual(minutes[0], utc_start)
self.assertTrue(all(today == minutes[:31]))
self.assertTrue(all(friday == minutes[31:421]))
self.assertTrue(all(thursday == minutes[421:]))
|
{
"content_hash": "54dcd8af8a5dc597fbca5f32c76ec223",
"timestamp": "",
"source": "github",
"line_count": 515,
"max_line_length": 79,
"avg_line_length": 34.84660194174757,
"alnum_prop": 0.5743898361751922,
"repo_name": "wavelets/zipline",
"id": "a9fa724d19c4ffa46fd098718080772ed1019052",
"size": "18529",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_finance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import os
from vee.commands.main import command, argument, group
from vee.environment import Environment
from vee.cli import style, style_note
from vee import log
from vee.package import Package
def describe(pkg, cache, depth=0):
print (' ' * depth) + style(pkg.name, 'blue'), pkg.id, style(
'***' if pkg.id in cache else str(pkg), faint=True)
if pkg.id in cache:
return
cache[pkg.id] = pkg
for dep in pkg.dependencies:
dep.resolve_existing()
describe(dep, cache, depth + 1)
@command(
argument('-e', '--environments', action='store_true'),
name='list',
)
def list_(args):
if args.environments:
list_environments(args)
return
else:
list_packages(args)
def list_environments(args):
home = args.assert_home()
con = home.db.connect()
cache = {}
for env in con.execute('SELECT * from environments ORDER by created_at ASC'):
print env['created_at'], style(env['name'], 'blue'), env['id']
# print list(row)
def list_packages(args):
home = args.assert_home()
con = home.db.connect()
cache = {}
for row in con.execute('SELECT * from packages ORDER by created_at DESC'):
if row['id'] in cache:
continue
pkg = Package(url='junk', home=home)
pkg.restore_from_row(row)
pkg._load_dependencies()
describe(pkg, cache)
|
{
"content_hash": "1f2ba230421b6dfaf68ee55b8881af40",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 81,
"avg_line_length": 20.565217391304348,
"alnum_prop": 0.6088794926004228,
"repo_name": "westernx/vee",
"id": "2268d821d0cf16d8ac5d5c87e58b82940d3faf0a",
"size": "1419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vee/commands/list.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "590"
},
{
"name": "Makefile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "334426"
},
{
"name": "Ruby",
"bytes": "479"
},
{
"name": "Shell",
"bytes": "1027"
},
{
"name": "Smarty",
"bytes": "810"
}
],
"symlink_target": ""
}
|
"""
Example to show sending, receiving and parsing amqp annotated message(s) to Event Hubs.
"""
import os
from azure.eventhub import EventHubProducerClient, EventHubConsumerClient
from azure.eventhub.amqp import AmqpAnnotatedMessage, AmqpMessageBodyType
CONNECTION_STR = os.environ['EVENT_HUB_CONN_STR']
EVENTHUB_NAME = os.environ['EVENT_HUB_NAME']
def send_data_message(producer):
data_body = [b'aa', b'bb', b'cc']
application_properties = {"body_type": "data"}
delivery_annotations = {"delivery_annotation_key": "value"}
data_message = AmqpAnnotatedMessage(
data_body=data_body,
delivery_annotations=delivery_annotations,
application_properties=application_properties
)
batch = producer.create_batch()
batch.add(data_message)
producer.send_batch(batch)
print("Message of data body sent.")
def send_sequence_message(producer):
sequence_body = [b'message', 123.456, True]
footer = {'footer_key': 'footer_value'}
properties = {"subject": "sequence"}
application_properties = {"body_type": "sequence"}
sequence_message = AmqpAnnotatedMessage(
sequence_body=sequence_body,
footer=footer,
properties=properties,
application_properties=application_properties
)
producer.send_batch([sequence_message])
print("Message of sequence body sent.")
def send_value_message(producer):
value_body = {b"key": [-123, b'data', False]}
header = {"priority": 10}
annotations = {"annotation_key": "value"}
application_properties = {"body_type": "value"}
value_message = AmqpAnnotatedMessage(
value_body=value_body,
header=header,
annotations=annotations,
application_properties=application_properties
)
producer.send_batch([value_message])
print("Message of value body sent.")
def on_event(partition_context, event):
# Put your code here.
# If the operation is i/o intensive, multi-thread will have better performance.
print("Received event from partition: {}".format(partition_context.partition_id))
raw_amqp_message = event.raw_amqp_message
if raw_amqp_message.body_type == AmqpMessageBodyType.DATA:
print("Message of data body received. Body is:")
for data_section in raw_amqp_message.body:
print(data_section)
elif raw_amqp_message.body_type == AmqpMessageBodyType.SEQUENCE:
print("Message of sequence body received. Body is:")
for sequence_section in raw_amqp_message.body:
print(sequence_section)
elif raw_amqp_message.body_type == AmqpMessageBodyType.VALUE:
print("Message of value body received. Body is:")
print(raw_amqp_message.body)
def receive_and_parse_message(consumer):
with consumer:
try:
consumer.receive(
on_event=on_event,
starting_position="-1", # "-1" is from the beginning of the partition.
)
except KeyboardInterrupt:
print('Stopped receiving.')
producer = EventHubProducerClient.from_connection_string(
conn_str=CONNECTION_STR,
eventhub_name=EVENTHUB_NAME
)
with producer:
send_data_message(producer)
send_sequence_message(producer)
send_value_message(producer)
consumer = EventHubConsumerClient.from_connection_string(
conn_str=CONNECTION_STR,
consumer_group='$Default',
eventhub_name=EVENTHUB_NAME,
)
receive_and_parse_message(consumer)
|
{
"content_hash": "55bf002c282df2bc4e44e2285b0501e9",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 87,
"avg_line_length": 33.98039215686274,
"alnum_prop": 0.6846508944027697,
"repo_name": "Azure/azure-sdk-for-python",
"id": "3ce87b62ece2d4d8e5da4185b48344735f9432f7",
"size": "3835",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/eventhub/azure-eventhub/samples/sync_samples/send_and_receive_amqp_annotated_message.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from operator import eq
from rpython.rlib import jit, objectmodel
from rpython.rlib.rarithmetic import r_int, r_uint, intmask
MASK_32 = r_uint(0xFFFFFFFF)
def mask(hash, shift):
return (hash >> shift) & 0x01f
def bitpos(hash, shift):
return (1 << mask(hash, shift)) & MASK_32
def bit_count(i):
# TODO: See about implementing this via the POPCNT instruction on
# supporting architectures
assert isinstance(i, r_uint)
i = i - ((i >> 1) & r_uint(0x55555555))
i = (i & r_uint(0x33333333)) + ((i >> 2) & r_uint(0x33333333))
return (((i + (i >> 4) & r_uint(0xF0F0F0F)) * r_uint(0x1010101)) & r_uint(0xffffffff)) >> 24
def validate_persistent_hash(ht):
"NOT RPYTHON"
root = ht._root
assert (root is None and ht._cnt == 0) or (root is not None and ht._cnt == root._size)
if root is not None:
validate_nodes(root)
def validate_nodes(root):
"NOT RPYTHON"
subnodes = root._subnodes()
entries = root._entries()
subnode_count = sum((node._size for node in subnodes))
total = subnode_count + len(entries)
assert root._size == total
for node in subnodes:
validate_nodes(node)
class Box(object):
_attrs_ = ['_val']
_settled_ = True
def __init__(self):
self._val = False
@objectmodel.always_inline
def reset(self):
self._val = False
@objectmodel.always_inline
def add_leaf(self):
self._val = True
@objectmodel.always_inline
def adjust_size(self, size):
return size + int(self._val)
def make_persistent_hash_type(
super = object,
base = None,
keytype = None,
valtype = None,
name = "PersistentHashMap",
hashfun = hash,
equal = eq):
if base is None:
base = super
if keytype is None:
keytype = super
if valtype is None:
valtype = super
@objectmodel.always_inline
def restrict_key_type(k):
if keytype is not None:
assert k is None or isinstance(k, keytype)
return k
@objectmodel.always_inline
def restrict_val_type(v):
if valtype is not None:
assert v is None or isinstance(v, valtype)
return v
@objectmodel.always_inline
def restrict_types(k, v):
return restrict_key_type(k), restrict_val_type(v)
class Missing(valtype):
def __init__(self):
pass
MISSING = Missing()
class INode(super):
_attrs_ = ['_size']
_immutable_fields_ = ['_size']
_settled_ = True
def __init__(self, size):
self._size = size
def assoc_inode(self, shift, hash_val, key, val, added_leaf):
pass
def without(self, shift, hash, key):
pass
def _get_item_node(self, index):
pass
def _validate_node(self):
"NOT RPYTHON"
pass
def _entries(self):
"NOT RPYTHON"
pass
def _subnodes(self):
"NOT RPYTHON"
pass
INode.__name__ = "INode(%s)" % name
class BitmapIndexedNode(INode):
_attrs_ = ['_bitmap', '_array']
_immutable_fields_ = ['_bitmap', '_array[*]']
_settled_ = True
def __init__(self, bitmap, array, size):
INode.__init__(self, size)
self._bitmap = bitmap
self._array = array
def _entries(self):
"NOT RPYTHON"
entries = []
for x in range(len(self._array) / 2):
key_or_none, val_or_node = self.entry(x)
if key_or_none is not None or val_or_node is None:
entries.append((key_or_none, val_or_node))
return entries
def _subnodes(self):
"NOT RPYTHON"
subnodes = []
for x in range(len(self._array) / 2):
key_or_none, val_or_node = self.entry(x)
if key_or_none is None and val_or_node is not None:
assert isinstance(val_or_node, INode)
subnodes.append(val_or_node)
return subnodes
@objectmodel.always_inline
def _get_item_node(self, index):
for x in range(len(self._array) / 2):
key_or_none, val_or_node = self.entry(x)
if key_or_none is None and val_or_node is not None:
assert isinstance(val_or_node, INode)
size = val_or_node._size
if index < size:
return index, key_or_none, val_or_node
index -= size
else:
if index == 0:
return -1, key_or_none, val_or_node
index -= 1
assert False
def index(self, bit):
return bit_count(self._bitmap & (bit - 1))
@objectmodel.always_inline
def entry(self, index):
""" Helper function to extract the ith key/value pair """
base = index * 2
key = self._array[base]
val = self._array[base + 1]
return key, val
@jit.dont_look_inside
def assoc_inode(self, shift, hash_val, key, val, added_leaf):
bit = bitpos(hash_val, shift)
idx = self.index(bit)
if (self._bitmap & bit) != 0:
key_or_null, val_or_node = self.entry(idx)
if key_or_null is None:
assert isinstance(val_or_node, INode)
n = val_or_node.assoc_inode(shift + 5, hash_val & MASK_32, key, val, added_leaf)
if n is val_or_node:
return self
new_array = clone_and_set(self._array, 2 * idx + 1, n)
newsize = added_leaf.adjust_size(self._size)
return BitmapIndexedNode(self._bitmap, new_array, newsize)
if equal(key, key_or_null):
# Entry already exists for this key
if val is val_or_node:
return self
new_array = clone_and_set(self._array, 2 * idx + 1, val)
return BitmapIndexedNode(self._bitmap, new_array, self._size)
added_leaf.add_leaf()
subnode = create_node(shift + 5, key_or_null, val_or_node, hash_val, key, val)
new_array = clone_and_set2(self._array, 2 * idx, None, 2 * idx + 1, subnode)
return BitmapIndexedNode(self._bitmap, new_array, self._size + 1)
else:
n = bit_count(self._bitmap)
if n >= 16:
nodes = [None] * 32
jdx = mask(hash_val, shift)
nodes[jdx] = EmptyNode.assoc_inode(shift + 5, hash_val, key, val, added_leaf)
j = 0
for i in range(32):
if (self._bitmap >> i) & 1 != 0:
if self._array[j] is None:
# Just copy the subnode
nodes[i] = self._array[j + 1]
else:
# Otherwise we have a key/value pair
nodes[i] = EmptyNode.assoc_inode(shift + 5, hashfun(self._array[j]),
self._array[j], self._array[j + 1], added_leaf)
j += 2
newsize = added_leaf.adjust_size(self._size)
return ArrayNode(n + 1, nodes, newsize)
else:
new_array = [None] * (2 * (n + 1))
list_copy(self._array, 0, new_array, 0, 2 * idx)
new_array[2 * idx] = key
new_array[2 * idx + 1] = val
added_leaf.add_leaf()
list_copy(self._array, 2 * idx, new_array, 2 * (idx + 1), 2 * (n - idx))
return BitmapIndexedNode(self._bitmap | bit, new_array, self._size + 1)
@objectmodel.always_inline
def find_step(self, shift, hash_val, key, not_found):
bit = bitpos(hash_val, shift)
if (self._bitmap & bit) == 0:
return not_found
idx = self.index(bit)
key_or_null, val_or_node = self.entry(idx)
if key_or_null is None:
return val_or_node
if equal(key, key_or_null):
return val_or_node
return not_found
@jit.dont_look_inside
def without_inode(self, shift, hash, key):
bit = bitpos(hash, shift)
if self._bitmap & bit == 0:
return self
idx = self.index(bit)
key_or_none, val_or_node = self.entry(idx)
if key_or_none is None:
assert isinstance(val_or_node, INode)
n = val_or_node.without_inode(shift + 5, hash, key)
if n is val_or_node:
return self
if n is not None:
new_array = clone_and_set(self._array, 2 * idx + 1, n)
return BitmapIndexedNode(self._bitmap, new_array, self._size - 1)
if self._bitmap == bit:
return None
new_array = remove_pair(self._array, idx)
return BitmapIndexedNode(self._bitmap ^ bit, new_array, self._size - 1)
if equal(key, key_or_none):
new_array = remove_pair(self._array, idx)
return BitmapIndexedNode(self._bitmap ^ bit, new_array, self._size - 1)
return self
BitmapIndexedNode.__name__ = "BitmapIndexedNode(%s)" % name
EmptyNode = BitmapIndexedNode(r_uint(0), [], 0)
class ArrayNode(INode):
_attrs_ = ['_cnt', '_array']
_immutable_fields_ = ['_cnt', '_array[*]']
_settled_ = True
def __init__(self, cnt, array, size):
INode.__init__(self, size)
self._cnt = cnt
self._array = array
def _entries(self):
"NOT RPYTHON"
return []
def _subnodes(self):
"NOT RPYTHON"
return [node for node in self._array if node is not None]
@objectmodel.always_inline
def _get_item_node(self, index):
for node in self._array:
if node is None:
continue
assert isinstance(node, INode)
if index < node._size:
return index, None, node
index -= node._size
assert False
@jit.dont_look_inside
def assoc_inode(self, shift, hash_val, key, val, added_leaf):
idx = mask(hash_val, shift)
node = self._array[idx]
if node is None:
subnode = EmptyNode.assoc_inode(shift + 5, hash_val, key, val, added_leaf)
cloned = clone_and_set(self._array, idx, subnode)
newsize = added_leaf.adjust_size(self._size)
return ArrayNode(self._cnt + 1, cloned, newsize)
assert isinstance(node, INode)
n = node.assoc_inode(shift + 5, hash_val, key, val, added_leaf)
if n is node:
return self
new_array = clone_and_set(self._array, idx, n)
newsize = added_leaf.adjust_size(self._size)
return ArrayNode(self._cnt, new_array, newsize)
@jit.dont_look_inside
def without_inode(self, shift, hash_val, key):
idx = r_uint(mask(hash_val, shift))
node = self._array[idx]
if node is None:
return self
n = node.without_inode(shift + 5, hash_val, key)
if n is node:
return self
if n is None:
if self._cnt <= 8: # shrink
return self.pack(idx)
new_array = clone_and_set(self._array, idx, n)
return ArrayNode(self._cnt - 1, new_array, self._size - 1)
else:
new_array = clone_and_set(self._array, idx, n)
return ArrayNode(self._cnt, new_array, self._size - 1)
def pack(self, idx):
new_array = [None] * (2 * (self._cnt - 1))
j = r_uint(1)
bitmap = r_uint(0)
i = r_uint(0)
while i < idx:
if self._array[i] is not None:
new_array[j] = self._array[i]
bitmap |= r_uint(1) << i
j += 2
i += 1
i = r_uint(idx) + 1
while i < len(self._array):
if self._array[i] is not None:
new_array[j] = self._array[i]
bitmap |= r_uint(1) << i
j += 2
i += 1
return BitmapIndexedNode(bitmap, new_array, self._size - 1)
@objectmodel.always_inline
def find_step(self, shift, hash_val, key, not_found):
idx = mask(hash_val, shift)
node = self._array[idx]
if node is None:
return not_found
return node
ArrayNode.__name__ = "ArrayNode(%s)" % name
class HashCollisionNode(INode):
_attrs_ = ['_hash', '_array']
_immutable_fields_ = ['_hash', '_array[*]']
_settled_ = True
def __init__(self, hash, array, size):
INode.__init__(self, size)
self._hash = hash
self._array = array
def entry_count(self):
return len(self._array) / 2
@objectmodel.always_inline
def entry(self, index):
""" Helper function to extract the ith key/value pair """
base = index * 2
key = self._array[base]
val = self._array[base + 1]
return key, val
@objectmodel.always_inline
def keyat(self, index):
return self._array[index * 2]
@objectmodel.always_inline
def valat(self, index):
return self._array[index * 2 + 1]
def _entries(self):
"NOT RPYTHON"
entries = []
for x in range(len(self._array) / 2):
key_or_none= self.keyat(x)
if key_or_none is None:
continue
val = self.valat(x)
entries.append((key_or_none, val))
return entries
def _subnodes(self):
"NOT RPYTHON"
return []
@objectmodel.always_inline
def _get_item_node(self, index):
for x in range(len(self._array) / 2):
key_or_none = self.keyat(x)
if key_or_none is None:
continue
if index == 0:
val_or_node = self.valat(x)
return -1, key_or_none, val_or_node
index -= 1
assert False
@jit.dont_look_inside
def assoc_inode(self, shift, hash_val, key, val, added_leaf):
if hash_val == self._hash:
idx = self.find_index(key)
if idx != -1:
if self.valat(idx) == val:
return self
new_array = clone_and_set(self._array, r_uint(idx * 2 + 1), val)
return HashCollisionNode(hash_val, new_array, self._size)
count = len(self._array)
new_array = [None] * (count + 2)
list_copy(self._array, 0, new_array, 0, count)
new_array[count] = key
added_leaf.add_leaf()
new_array[count + 1] = val
return HashCollisionNode(self._hash, new_array, self._size + 1)
new_array = [None, self]
edit = bitpos(self._hash, shift)
return BitmapIndexedNode(edit, new_array, self._size) \
.assoc_inode(shift, hash_val, key, val, added_leaf)
@objectmodel.always_inline
def find_step(self, shift, hash_val, key, not_found):
for x in range(0, len(self._array), 2):
key_or_nil = self._array[x]
if key_or_nil is not None and equal(key_or_nil, key):
return self._array[x + 1]
return not_found
def find_index(self, key):
i = r_int(0)
while i < len(self._array):
if equal(key, self._array[i]):
return i / 2
i += 2
return r_int(-1)
@jit.dont_look_inside
def without_inode(self, shift, hash, key):
idx = self.find_index(key)
if idx == -1:
return self
if len(self._array) == 1:
return None
new_array = remove_pair(self._array, r_uint(idx))
return HashCollisionNode(self._hash, new_array, self._size - 1)
HashCollisionNode.__name__ = "HashCollisionNode(%s)" % name
class PersistentHashMap(base):
_attrs_ = ['_cnt', '_root']
_immutable_fields_ = ['_cnt', '_root']
_settled_ = True
def __init__(self, cnt, root):
assert root is None or isinstance(root, INode)
self._cnt = cnt
self._root = root
def __len__(self):
return self._cnt
def haskey(self, key):
return self.val_at(key, MISSING) is not MISSING
__contains__ = haskey
def union(self, other):
"""
Performs a right biased union via iterated insertion. This could be
made faster at the cost of me figuring out how to actually implement
a proper union operation.
This skews the asymptotics a little since the implementation of
iteration is O(n lg n) as is insertion.
"""
assert isinstance(other, PersistentHashMap)
if not self._cnt:
return other
if not other._cnt:
return self
# Iterate over the smaller of the two maps
if other._cnt < self._cnt:
self, other = other, self
count = self._cnt
root = self._root
assert root is not None
added_leaf = Box()
for key, val in other.iteritems():
hash = hashfun(key) & MASK_32
root = root.assoc_inode(r_uint(0), hash, key, val, added_leaf)
count = added_leaf.adjust_size(count)
added_leaf.reset()
if root is self._root:
return self
return PersistentHashMap(count, root)
__add__ = union
def iteritems(self):
for i in range(self._cnt):
yield self.get_item(i)
def __iter__(self):
for i in range(self._cnt):
yield self.get_item(i)[0]
def keys(self):
keys = [None] * self._cnt
for i in range(self._cnt):
keys[i] = self.get_item(i)[0]
return keys
def vals(self):
vals = [None] * self._cnt
for i in range(self._cnt):
vals[i] = self.get_item(i)[1]
return vals
def hash_items(self):
return [(k,v) for k, v in self.iteritems()]
@jit.dont_look_inside
def assoc(self, key, val):
key = restrict_key_type(key)
val = restrict_val_type(val)
added_leaf = Box()
root = EmptyNode if self._root is None else self._root
hash = hashfun(key) & MASK_32
new_root = root.assoc_inode(r_uint(0), hash, key, val, added_leaf)
if new_root is self._root:
return self
newcnt = added_leaf.adjust_size(self._cnt)
return PersistentHashMap(newcnt, new_root)
def val_at(self, key, not_found):
key = restrict_key_type(key)
not_found = restrict_val_type(not_found)
if self._root is None:
return not_found
hashval = hashfun(key) & MASK_32
shift = r_uint(0)
val_or_node = self._root
while True:
t = type(val_or_node)
if t is BitmapIndexedNode:
val_or_node = val_or_node.find_step(shift, hashval, key, not_found)
elif t is ArrayNode:
val_or_node = val_or_node.find_step(shift, hashval, key, not_found)
elif t is HashCollisionNode:
val_or_node = val_or_node.find_step(shift, hashval, key, not_found)
else:
return restrict_val_type(val_or_node)
shift += 5
@jit.dont_look_inside
def without(self, key):
key = restrict_key_type(key)
if self._root is None:
return self
new_root = self._root.without_inode(0, hashfun(key) & MASK_32, key)
if new_root is self._root:
return self
return PersistentHashMap(self._cnt - 1, new_root)
@jit.dont_look_inside
def without_many(self, keys):
root = self._root
count = self._cnt
if root is None:
return PersistentHashMap.EMPTY()
for key in keys:
if root is None:
return PersistentHashMap.EMPTY()
key = restrict_key_type(key)
new_root = root.without_inode(0, hashfun(key) & MASK_32, key)
if new_root is not root:
root = new_root
count -= 1
return PersistentHashMap(count, root)
def get_item(self, index):
return self._elidable_get_item(index)
@jit.elidable
def _elidable_get_item(self, index):
if not (0 <= index < self._cnt):
raise IndexError
assert self._root is not None
key_or_none = None
val_or_node = self._root
while index != -1:
t = type(val_or_node)
if t is BitmapIndexedNode:
index, key_or_none, val_or_node = val_or_node._get_item_node(index)
elif t is ArrayNode:
index, key_or_none, val_or_node = val_or_node._get_item_node(index)
elif t is HashCollisionNode:
index, key_or_none, val_or_node = val_or_node._get_item_node(index)
else:
assert False
if not objectmodel.we_are_translated():
assert not isinstance(val_or_node, INode)
assert key_or_none is not None
return restrict_types(key_or_none, val_or_node)
@staticmethod
def EMPTY():
return PersistentHashMap(0, None)
@staticmethod
def singleton(key, val=None):
return PersistentHashMap.EMPTY().assoc(key, val)
def make_copy(self):
return PersistentHashMap(self._cnt, self._root)
PersistentHashMap.__name__ = name
PersistentHashMap.INode = INode
PersistentHashMap.BitmapIndexedNode = BitmapIndexedNode
PersistentHashMap.ArrayNode = ArrayNode
PersistentHashMap.HashCollisionNode = HashCollisionNode
#PersistentHashMap.EMPTY = PersistentHashMap(0, None)
def create_node(shift, key1, val1, key2hash, key2, val2):
key1hash = hashfun(key1) & MASK_32
if key1hash == key2hash:
return HashCollisionNode(key1hash, [key1, val1, key2, val2], 2)
added_leaf = Box()
return EmptyNode.assoc_inode(shift, key1hash, key1, val1, added_leaf) \
.assoc_inode(shift, key2hash, key2, val2, added_leaf)
def list_copy(from_lst, from_loc, to_list, to_loc, count):
from_loc = r_uint(from_loc)
to_loc = r_uint(to_loc)
count = r_uint(count)
i = r_uint(0)
while i < count:
to_list[to_loc+i] = from_lst[from_loc+i]
i += 1
return to_list
def clone_and_set(array, i, a):
clone = array[:]
clone[i] = a
return clone
def clone_and_set2(array, i, a, j, b):
clone = array[:]
clone[i] = a
clone[j] = b
return clone
def remove_pair(array, i):
new_array = [None] * (len(array) - 2)
list_copy(array, 0, new_array, 0, 2 * i)
list_copy(array, 2 * (i + 1), new_array, 2 * i, len(new_array) - (2 * i))
return new_array
return PersistentHashMap
|
{
"content_hash": "74674744bd7d632adad920931691975f",
"timestamp": "",
"source": "github",
"line_count": 724,
"max_line_length": 112,
"avg_line_length": 34.24309392265194,
"alnum_prop": 0.49390932558889966,
"repo_name": "pycket/pycket",
"id": "94f56861331f9b92e348892ad70b72f543b7b054",
"size": "24793",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pycket/hash/persistent_hash_map.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "654"
},
{
"name": "Makefile",
"bytes": "8663"
},
{
"name": "Python",
"bytes": "1476360"
},
{
"name": "Racket",
"bytes": "995994"
},
{
"name": "Scheme",
"bytes": "378681"
},
{
"name": "Shell",
"bytes": "10154"
}
],
"symlink_target": ""
}
|
import logging
import sys
from collections import defaultdict
from typing import Any, Callable, Set
log = logging.getLogger(__name__)
class Tool:
"""
Tool represents a program that runs over source code. It returns a nested
dictionary structure like:
{'relative_filename': {'line_number': [error1, error2]}}
eg: {'imhotep/app.py': {'103': ['line too long']}}
Line numbers are indexed from 1, with the value 0 signifying a file-level
linting violation.
"""
def __init__(self, command_executor: Callable, filenames: Set[Any] = set()) -> None:
self.executor = command_executor
self.filenames = filenames
def get_configs(self):
return list()
def invoke(self, dirname, filenames=set(), linter_configs=set()):
"""
Main entrypoint for all plugins.
Returns results in the format of:
{'filename': {
'line_number': [
'error1',
'error2'
]
}
}
"""
retval = defaultdict(lambda: defaultdict(list))
if len(filenames):
extensions = [e.lstrip(".") for e in self.get_file_extensions()]
filenames = [f for f in filenames if f.split(".")[-1] in extensions]
if not filenames:
# There were a specified set of files, but none were the right
# extension. Different from the else-case below.
return {}
to_find = " -o ".join(['-samefile "%s"' % f for f in filenames])
else:
to_find = " -o ".join(
['-name "*%s"' % ext for ext in self.get_file_extensions()]
)
cmd = 'find {} -path "*/{}" | xargs {}'.format(
dirname,
to_find,
self.get_command(dirname, linter_configs=linter_configs),
)
result = self.executor(cmd)
if type(result) is bytes:
result = result.decode(sys.getdefaultencoding())
for line in result.split("\n"):
output = self.process_line(dirname, line)
if output is not None:
filename, lineno, messages = output
if filename.startswith(dirname):
filename = filename[len(dirname) + 1 :]
retval[filename][lineno].append(messages)
return retval
def process_line(self, dirname, line):
"""
Processes a line return a 3-element tuple representing (filename,
line_number, error_messages) or None to indicate no error.
:param: dirname - directory the code is running in
For default implementation, regex in `self.response_format` is expected
to have the capture groups `filename`, `line`, `message` in order. If
not, override this method.
"""
if not hasattr(self, "response_format"):
raise NotImplementedError()
match = self.response_format.search(line)
if match is not None:
if len(self.filenames) != 0:
if match.group("filename") not in self.filenames:
return
filename, line, messages = match.groups()
return filename, line, messages
def get_file_extensions(self):
"""
Returns a list of file extensions this tool should run against.
eg: ['.py', '.js']
"""
if not self.file_extensions:
raise NotImplementedError()
return self.file_extensions
def get_command(self, dirname, linter_configs=set()):
"""
Returns the command to run for linting. It is piped a list of files to
run on over stdin.
"""
raise NotImplementedError()
|
{
"content_hash": "4fa563b20a0edfb62b9618ff6cb67224",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 88,
"avg_line_length": 32.9646017699115,
"alnum_prop": 0.5653691275167785,
"repo_name": "justinabrahms/imhotep",
"id": "d1c04b866d0fb8519fd057a405c201d9acd7719e",
"size": "3725",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "imhotep/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "248"
},
{
"name": "Python",
"bytes": "75873"
},
{
"name": "Shell",
"bytes": "131"
}
],
"symlink_target": ""
}
|
import unittest
from google.datacatalog_connectors.commons_test import utils
class UtilsTestCase(unittest.TestCase):
def test_instance_should_succeed(self):
utils.Utils()
|
{
"content_hash": "b7ef0a8c4d7d2783102cdab7c220b199",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 60,
"avg_line_length": 20.77777777777778,
"alnum_prop": 0.7647058823529411,
"repo_name": "GoogleCloudPlatform/datacatalog-connectors",
"id": "5b2f8ec22c015716d0e19960417116797ed6d585",
"size": "783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google-datacatalog-connectors-commons-test/tests/google/datacatalog_connectors/commons_test/utils/utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "994"
},
{
"name": "Python",
"bytes": "180712"
},
{
"name": "Shell",
"bytes": "5300"
}
],
"symlink_target": ""
}
|
from .hashable import Hashable, HashableManager, HashableQueryMixin
from .lazy_attributes import LazyAttributes, LazyAttributesMixin
from .queryset import SearchableQuerySetMixin
__all__ = [
# .hashable
'Hashable',
'HashableManager',
'HashableQueryMixin',
# .lazy_attributes
'LazyAttributes',
'LazyAttributesMixin',
# .queryset
'SearchableQuerySetMixin',
]
|
{
"content_hash": "b05746b7f61c72eaa1fe307e67215d8b",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 67,
"avg_line_length": 26.266666666666666,
"alnum_prop": 0.7284263959390863,
"repo_name": "gamernetwork/gn-django",
"id": "6aaa9674a3e8db427ad2fe115ef426ca8ef7d9cd",
"size": "394",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gn_django/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "317"
},
{
"name": "Dockerfile",
"bytes": "361"
},
{
"name": "HTML",
"bytes": "454"
},
{
"name": "JavaScript",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "153563"
},
{
"name": "Shell",
"bytes": "5024"
}
],
"symlink_target": ""
}
|
from bitfinex.client import Client, TradeClient
|
{
"content_hash": "ead43ca4390078f208ff9d2ed65f54e6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 47,
"avg_line_length": 47,
"alnum_prop": 0.8723404255319149,
"repo_name": "chris-ch/coinarb",
"id": "f1108e6acafeb1f2ce87cdb63b4346dc38e7fbbb",
"size": "47",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/resources/bitfinex/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "3562"
},
{
"name": "Python",
"bytes": "74078"
},
{
"name": "Shell",
"bytes": "213"
}
],
"symlink_target": ""
}
|
import scrapy
class Subject(scrapy.Item):
id = scrapy.Field()
campus = scrapy.Field()
semester = scrapy.Field()
name = scrapy.Field()
hours = scrapy.Field()
classes = scrapy.Field()
|
{
"content_hash": "21c65a842bc1e8272b076f35202f33ac",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 29,
"avg_line_length": 20.8,
"alnum_prop": 0.6346153846153846,
"repo_name": "ranisalt/moita-ufsc-crawler",
"id": "58d3a1c4661edac41a591354387cd7b53cdcc8ff",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moita/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9716"
}
],
"symlink_target": ""
}
|
from nesdoc.utils import *
from nesdoc.generators import *
from nesdoc.html import *
__all__ = [ "generate_interface" ]
# A list of all functions with their short description, with links to the
# long description
def generate_fnlist_short(ht, name, fns):
if len(fns) > 0:
ht.tag("p")
ht.heading(name)
for fn in fns:
ht.func_sig_start();
ht.pfnsig(fn, lambda (name): '<a href="#%s">%s</a>' % (name, name))
doc = nd_doc_short(fn)
if doc != None:
ht.push("menu")
ht.pln(doc)
ht.popln()
ht.func_sig_stop();
# A list of all functions with their long description
def generate_fnlist_long(ht, name, fns):
if len(fns) > 0:
ht.tag("p")
ht.heading(name + " - Details")
first = True
for fn in fns:
if not first:
ht.tag("hr")
ht.startline()
name = fn.getAttribute("name")
ht.pln('<a name="%s"></a>' % name)
ht.push("h4"); ht.p(name); ht.popln()
ht.pfnsig(fn, lambda (name): '<b>%s</b>' % name)
doc = nd_doc_long(fn)
if doc:
ht.startline(); ht.tag("p")
ht.pushln("menu")
ht.pdoc(doc)
ht.popln()
first = False
def generate_interface(intf):
nicename = intf.getAttribute("nicename")
ht = Html("ihtml/%s.html" % nicename )
ht.title("Interface: " + nicename)
ht.body()
ht.push("h2");
ht.pq("Interface: " + nicename)
ht.pop()
ht.startline()
ht.push("b")
parameters = xml_tag(intf, "parameters")
ht.p("interface " + intf.getAttribute("qname"))
if parameters:
ht.p("<" + parameter_str(parameters)[1:-1] + ">")
ht.pop()
idoc = nd_doc_long(intf)
if idoc != None:
ht.tag("p")
ht.pdoc(idoc)
ht.tag("p")
functions = intf.getElementsByTagName("function")
commands = filter(lambda (x): x.hasAttribute("command"), functions)
events = filter(lambda (x): x.hasAttribute("event"), functions)
commands.sort(lambda x, y: cmp(x.getAttribute("name").lower(), y.getAttribute("name").lower()));
events.sort(lambda x, y: cmp(x.getAttribute("name").lower(), y.getAttribute("name").lower()));
generate_fnlist_short(ht, "Commands", commands)
generate_fnlist_short(ht, "Events", events)
generate_fnlist_long(ht, "Commands", commands)
generate_fnlist_long(ht, "Events", events)
ht.close()
|
{
"content_hash": "291a8c96b739a038b875cb7be270475c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 98,
"avg_line_length": 30.68,
"alnum_prop": 0.6053889613211647,
"repo_name": "barriquello/iotstack",
"id": "b299f1cbd98b81b4ff730a1491716ada730b8929",
"size": "2678",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openwsn-fw-work/firmware/openos/bootloader/telosb/lib/nesdoc/interfaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "878548"
},
{
"name": "C",
"bytes": "22726436"
},
{
"name": "C++",
"bytes": "3610288"
},
{
"name": "CSS",
"bytes": "1022"
},
{
"name": "Objective-C",
"bytes": "27834"
},
{
"name": "Python",
"bytes": "378566"
},
{
"name": "Shell",
"bytes": "1092"
},
{
"name": "Tcl",
"bytes": "68688"
}
],
"symlink_target": ""
}
|
"""
Should specifically use:
touchtechnology.common.backends.auth.UserSubclassBackend
touchtechnology.common.backends.auth.EmailUserSubclassBackend
"""
|
{
"content_hash": "53396fe6f53958cda4bfbb3b989a5032",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 63,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.8280254777070064,
"repo_name": "goodtune/vitriolic",
"id": "f5860e9bafdaad426155b3f5615b64d9c7c2b4b4",
"size": "157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "touchtechnology/common/backends/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "307509"
},
{
"name": "HTML",
"bytes": "273967"
},
{
"name": "JavaScript",
"bytes": "626908"
},
{
"name": "Less",
"bytes": "1373"
},
{
"name": "Makefile",
"bytes": "369"
},
{
"name": "Python",
"bytes": "962353"
},
{
"name": "Shell",
"bytes": "1490"
},
{
"name": "XSLT",
"bytes": "3510"
}
],
"symlink_target": ""
}
|
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.aiplatform_v1beta1.types import model
from google.cloud.aiplatform_v1beta1.types import model_evaluation
from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice
from google.cloud.aiplatform_v1beta1.types import model_service
class ListModelsPager:
"""A pager for iterating through ``list_models`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` object, and
provides an ``__iter__`` method to iterate through its
``models`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListModels`` requests and continue to iterate
through the ``models`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., model_service.ListModelsResponse],
request: model_service.ListModelsRequest,
response: model_service.ListModelsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest):
The initial request object.
response (google.cloud.aiplatform_v1beta1.types.ListModelsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[model_service.ListModelsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[model.Model]:
for page in self.pages:
yield from page.models
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListModelsAsyncPager:
"""A pager for iterating through ``list_models`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``models`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListModels`` requests and continue to iterate
through the ``models`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[model_service.ListModelsResponse]],
request: model_service.ListModelsRequest,
response: model_service.ListModelsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1beta1.types.ListModelsRequest):
The initial request object.
response (google.cloud.aiplatform_v1beta1.types.ListModelsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[model_service.ListModelsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[model.Model]:
async def async_generator():
async for page in self.pages:
for response in page.models:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListModelEvaluationsPager:
"""A pager for iterating through ``list_model_evaluations`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` object, and
provides an ``__iter__`` method to iterate through its
``model_evaluations`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListModelEvaluations`` requests and continue to iterate
through the ``model_evaluations`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., model_service.ListModelEvaluationsResponse],
request: model_service.ListModelEvaluationsRequest,
response: model_service.ListModelEvaluationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest):
The initial request object.
response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelEvaluationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[model_service.ListModelEvaluationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]:
for page in self.pages:
yield from page.model_evaluations
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListModelEvaluationsAsyncPager:
"""A pager for iterating through ``list_model_evaluations`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``model_evaluations`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListModelEvaluations`` requests and continue to iterate
through the ``model_evaluations`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]],
request: model_service.ListModelEvaluationsRequest,
response: model_service.ListModelEvaluationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest):
The initial request object.
response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelEvaluationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]:
async def async_generator():
async for page in self.pages:
for response in page.model_evaluations:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListModelEvaluationSlicesPager:
"""A pager for iterating through ``list_model_evaluation_slices`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` object, and
provides an ``__iter__`` method to iterate through its
``model_evaluation_slices`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListModelEvaluationSlices`` requests and continue to iterate
through the ``model_evaluation_slices`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., model_service.ListModelEvaluationSlicesResponse],
request: model_service.ListModelEvaluationSlicesRequest,
response: model_service.ListModelEvaluationSlicesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest):
The initial request object.
response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelEvaluationSlicesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[model_service.ListModelEvaluationSlicesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[model_evaluation_slice.ModelEvaluationSlice]:
for page in self.pages:
yield from page.model_evaluation_slices
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListModelEvaluationSlicesAsyncPager:
"""A pager for iterating through ``list_model_evaluation_slices`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``model_evaluation_slices`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListModelEvaluationSlices`` requests and continue to iterate
through the ``model_evaluation_slices`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., Awaitable[model_service.ListModelEvaluationSlicesResponse]
],
request: model_service.ListModelEvaluationSlicesRequest,
response: model_service.ListModelEvaluationSlicesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest):
The initial request object.
response (google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelEvaluationSlicesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterable[model_service.ListModelEvaluationSlicesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[model_evaluation_slice.ModelEvaluationSlice]:
async def async_generator():
async for page in self.pages:
for response in page.model_evaluation_slices:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
{
"content_hash": "38864c1f0fe1d8b41bcc9769899ef20d",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 98,
"avg_line_length": 40.04218362282879,
"alnum_prop": 0.6569994422755159,
"repo_name": "sasha-gitg/python-aiplatform",
"id": "c1c5e47d21c48f319c481368eb95d5e5ba9e457c",
"size": "16737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google/cloud/aiplatform_v1beta1/services/model_service/pagers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "11216304"
},
{
"name": "Shell",
"bytes": "30838"
}
],
"symlink_target": ""
}
|
from setuptools import setup
# Version info -- read without importing
_locals = {}
with open("releases/_version.py") as fp:
exec(fp.read(), None, _locals)
version = _locals["__version__"]
setup(
name="releases",
version=version,
description="A Sphinx extension for changelog manipulation",
long_description=open("README.rst").read(),
author="Jeff Forcier",
author_email="jeff@bitprophet.org",
url="https://github.com/bitprophet/releases",
project_urls={
"Docs": "https://releases.readthedocs.io",
"Source": "https://github.com/bitprophet/releases",
"Changelog": "https://releases.readthedocs.io/en/latest/changelog.html", # noqa
"CI": "https://app.circleci.com/pipelines/github/bitprophet/releases",
},
packages=["releases"],
# NOTE: seems to work OK with Sphinx 1.8 still even tho we only "really"
# support 4.x. May as well not artificially limit folks for now.
install_requires=["semantic_version<2.7", "sphinx>=1.8"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development",
"Topic :: Software Development :: Documentation",
"Topic :: Documentation",
"Topic :: Documentation :: Sphinx",
],
)
|
{
"content_hash": "75c35e27497e758920011a019fd56262",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 88,
"avg_line_length": 39.69565217391305,
"alnum_prop": 0.6215772179627601,
"repo_name": "bitprophet/releases",
"id": "f4cae3c15798154ae10f8204515248436f79ce0d",
"size": "1849",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "91332"
}
],
"symlink_target": ""
}
|
import pytest
import cv2
from plantcv.plantcv import spatial_clustering
@pytest.mark.parametrize("alg, min_size, max_size", [['DBSCAN', 10, None],
['OPTICS', 100, 5000]]
)
def test_spatial_clustering(alg, min_size, max_size, test_data):
"""Test for PlantCV."""
img = cv2.imread(test_data.multi_bin_img, -1)
spmask = spatial_clustering(img, algorithm=alg, min_cluster_size=min_size, max_distance=max_size)
assert len(spmask[1]) == 2
def test_spatial_clustering_badinput(test_data):
"""Test for PlantCV."""
img = cv2.imread(test_data.small_bin_img)
with pytest.raises(NameError):
_ = spatial_clustering(img, algorithm="Hydra", min_cluster_size=5, max_distance=100)
|
{
"content_hash": "b2ae5abe14b26e1444dc844cd9690c89",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 101,
"avg_line_length": 39.05,
"alnum_prop": 0.6235595390524968,
"repo_name": "danforthcenter/plantcv",
"id": "d63dd92713b13db92146cd6cf204f3cb0747e1a1",
"size": "781",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/plantcv/test_spatial_clustering.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1112"
},
{
"name": "Python",
"bytes": "898011"
},
{
"name": "R",
"bytes": "1327"
},
{
"name": "Shell",
"bytes": "3348"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from amaascore.corporate_actions.corporate_action import CorporateAction
class Notification(CorporateAction):
def __init__(self, asset_manager_id, corporate_action_id, record_date, corporate_action_status='Open',
asset_id=None, party_id=None, declared_date=None, settlement_date=None, elective=False, message=None,
description='', references=None, *args, **kwargs):
super(Notification, self).__init__(asset_manager_id=asset_manager_id, corporate_action_id=corporate_action_id,
record_date=record_date, corporate_action_status=corporate_action_status,
asset_id=asset_id, party_id=party_id, declared_date=declared_date,
settlement_date=settlement_date, elective=elective, message=message,
description=description, references=references, *args, **kwargs)
|
{
"content_hash": "bea6c5617014054e489d424a08d9e61e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 118,
"avg_line_length": 70.46666666666667,
"alnum_prop": 0.630085146641438,
"repo_name": "nedlowe/amaas-core-sdk-python",
"id": "3020192121538c0cc1555520347e38260798a1a0",
"size": "1057",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "amaascore/corporate_actions/notification.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "618"
},
{
"name": "Python",
"bytes": "440693"
},
{
"name": "Shell",
"bytes": "42"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from .models import Movie
from django.contrib.auth import logout
from .forms import *
from django.contrib import messages
from django.core.mail import send_mail
# Create your views here.
from django.shortcuts import render
def advanced(request):
form = SearchForm(request.POST or None)
if form.is_valid():
f1 = form.cleaned_data['title'].lower()
f2 = form.cleaned_data['actors']
min_rating = form.cleaned_data['minr']
genres = form.cleaned_data['genres']
m2 = [x.pk for x in Movie.objects.all() if x.average_rating >= min_rating]
movies = (Movie.objects.filter(title__icontains=f1) | Movie.objects.filter(actors__in=f2)).filter(
genre__in=genres).filter(pk__in=m2)
context = {'movies': movies}
return render(request, 'search_results.html', context)
context = {
'form': form
}
return render(request, "advanced_search.html", context)
def search(request):
error = False
if 'q' in request.GET:
q = request.GET['q']
if not q:
error = True
else:
movies = Movie.objects.filter(title__icontains=q)
return render(request, 'search_results.html',
{'movies': movies, 'query': q})
if error is True:
messages.error(request, "Enter something!")
return HttpResponseRedirect('/')
def register_page(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
username = form.cleaned_data['username'],
password = form.cleaned_data['password1'],
User.objects.create_user(username=username, password=password, email=email,
first_name=form.cleaned_data['name'])
send_mail(subject='Welcome to LMDB', message='Greetings!', from_email='lab.movie.database@gmail.com',
recipient_list=[email])
return HttpResponseRedirect('/')
else:
form = RegistrationForm()
return render(request, 'registration/register.html', ({'form': form}))
def logout_page(request):
logout(request)
return HttpResponseRedirect('/')
def base(request):
movies = Movie.objects.all()
context = {
'movies': movies,
'user': request.user
}
return render(request, "index.html", context)
def movie_page(request, pk):
movie = get_object_or_404(Movie, id=pk)
# avg_stars = movie.objects.annotate(Avg('rating__stars'))
context = {
'movie': movie,
# 'stars':avg_stars
}
return render(request, "movie.html", context)
def list_comments(request, pk):
movie = get_object_or_404(Movie, id=pk)
# avg_stars = movie.objects.annotate(Avg('rating__stars'))
context = {
'movie': movie,
# 'stars':avg_stars
}
return render(request, "all_comments.html", context)
def add_comment_to_movie(request, id):
movie = get_object_or_404(Movie, id=id)
x = Comment.objects.filter(movie=movie, author=request.user)
if len(x) > 0:
messages.error(request, "User already commented!")
return HttpResponseRedirect('/movie/%d/' % movie.id)
if request.method == "POST":
form = CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.movie = movie
comment.author = request.user
comment.save()
return HttpResponseRedirect('/movie/%d/' % movie.id)
else:
form = CommentForm()
return render(request, 'add_comment_to_movie.html', {'form': form})
|
{
"content_hash": "047ac0f014bf8db01f7a2bcafe821243",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 113,
"avg_line_length": 30.59375,
"alnum_prop": 0.625638406537283,
"repo_name": "huzaifafaruqui/Movies-Website",
"id": "cd29c32f55cde4675bc2637999a04f018447e8e9",
"size": "3916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lmdb/home/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60303"
},
{
"name": "HTML",
"bytes": "34536"
},
{
"name": "JavaScript",
"bytes": "96495"
},
{
"name": "Python",
"bytes": "23213"
}
],
"symlink_target": ""
}
|
import os
import discord
import re
import time
from wow import *
from util import *
from settings import *
client = discord.Client()
@client.event
async def on_message(message):
"""Listens for specific user messages."""
# Current time (Used for cache busting character thumbnails).
epoch_time = int(time.time())
# If the author is the bot do nothing.
if message.author == client.user:
return
if message.content.startswith("!armory token"):
split = split_query(message.content, "wow_token")
region = split[0]
info = await wow_token_price(region)
# Returns a message to the channel if there's an error fetching.
if info == "not_found":
msg = GOLD_ERROR.format(message)
await client.send_message(message.channel, msg)
elif info == "connection_error":
msg = CONNECTION_ERROR.format(message)
await client.send_message(message.channel, msg)
elif info == "credential_error":
msg = CREDENTIAL_ERROR.format(message)
await client.send_message(message.channel, msg)
else:
msg = (
"`The current price of a WoW Token on %s realms is %s gold.` :moneybag:"
% (region, info)
)
await client.send_message(message.channel, msg)
if message.content.startswith("!armory pve"):
split = split_query(message.content, "pve")
# Assigns the 3rd index in the split to the region
region = split[3]
# Sends the returned data to the character_info function to build a character sheet.
info = await character_info(split[0], split[1], split[2], region)
# Returns a message to the channel if there's an error fetching.
if info == "not_found":
msg = NOT_FOUND_ERROR.format(message)
await client.send_message(message.channel, msg)
elif info == "connection_error":
msg = CONNECTION_ERROR.format(message)
await client.send_message(message.channel, msg)
elif info == "credential_error":
msg = CREDENTIAL_ERROR.format(message)
await client.send_message(message.channel, msg)
elif info == "unknown_error":
msg = UNKNOWN_ERROR.format(message)
await client.send_message(message.channel, msg)
else:
# Format the AOTC/CE strings if they exist.
ud_feat = ""
bod_feat = ""
cos_feat = ""
tep_feat = ""
nya_feat = ""
if info["ud_feat"] != "":
ud_feat = "**`%s`**" % (info["ud_feat"])
if info["bod_feat"] != "":
bod_feat = "**`%s`**" % (info["bod_feat"])
if info["cos_feat"] != "":
cos_feat = "**`%s`**" % (info["cos_feat"])
if info["tep_feat"] != "":
tep_feat = "**`%s`**" % (info["tep_feat"])
if info["nya_feat"] != "":
nya_feat = "**`%s`**" % (info["nya_feat"])
msg = discord.Embed(
title="%s" % (info["name"]),
colour=discord.Colour(info["class_colour"]),
url="%s" % (info["armory"]),
description="%s %s %s %s"
% (info["level"], info["faction"], info["spec"], info["class_type"]),
)
msg.set_thumbnail(
url="https://render-%s.worldofwarcraft.com/character/%s?_%s"
% (region, info["thumb"], epoch_time)
)
msg.set_footer(
text="!armory help | Feedback: https://github.com/JamesIves/discord-wow-armory-bot/issues",
icon_url="https://raw.githubusercontent.com/JamesIves/discord-wow-armory-bot/master/assets/icon.png",
)
msg.add_field(
name="Character",
value="**`Name`:** `%s`\n**`Realm`:** `%s (%s)`\n**`Item Level`:** `%s`"
% (info["name"], info["realm"], region.upper(), info["ilvl"]),
inline=True,
)
msg.add_field(
name="Keystone Achievements (Season 3)",
value="**`Conqueror (+10)`: ** `%s`\n**`Master (+15)`: ** `%s` \n"
% (info["keystone_season_conqueror"], info["keystone_season_master"]),
inline=True,
)
msg.add_field(
name="Uldir",
value="**`Normal`:** `%s/%s`\n**`Heroic`:** `%s/%s`\n**`Mythic`:** `%s/%s`\n%s"
% (
info["uldir"]["normal"],
info["uldir"]["bosses"],
info["uldir"]["heroic"],
info["uldir"]["bosses"],
info["uldir"]["mythic"],
info["uldir"]["bosses"],
ud_feat,
),
inline=True,
)
msg.add_field(
name="Battle of Dazar'alor",
value="**`Normal`:** `%s/%s`\n**`Heroic`:** `%s/%s`\n**`Mythic`:** `%s/%s`\n%s"
% (
info["battle_of_dazaralor"]["normal"],
info["battle_of_dazaralor"]["bosses"],
info["battle_of_dazaralor"]["heroic"],
info["battle_of_dazaralor"]["bosses"],
info["battle_of_dazaralor"]["mythic"],
info["battle_of_dazaralor"]["bosses"],
bod_feat,
),
inline=True,
)
msg.add_field(
name="Crucible of Storms",
value="**`Normal`:** `%s/%s`\n**`Heroic`:** `%s/%s`\n**`Mythic`:** `%s/%s`\n%s"
% (
info["crucible_of_storms"]["normal"],
info["crucible_of_storms"]["bosses"],
info["crucible_of_storms"]["heroic"],
info["crucible_of_storms"]["bosses"],
info["crucible_of_storms"]["mythic"],
info["crucible_of_storms"]["bosses"],
cos_feat,
),
inline=True,
)
msg.add_field(
name="The Eternal Palace",
value="**`Normal`:** `%s/%s`\n**`Heroic`:** `%s/%s`\n**`Mythic`:** `%s/%s`\n%s"
% (
info["the_eternal_palace"]["normal"],
info["the_eternal_palace"]["bosses"],
info["the_eternal_palace"]["heroic"],
info["the_eternal_palace"]["bosses"],
info["the_eternal_palace"]["mythic"],
info["the_eternal_palace"]["bosses"],
tep_feat,
),
inline=True,
)
msg.add_field(
name="Ny'alotha, the Waking City",
value="**`Normal`:** `%s/%s`\n**`Heroic`:** `%s/%s`\n**`Mythic`:** `%s/%s`\n%s"
% (
info["nyalotha"]["normal"],
info["nyalotha"]["bosses"],
info["nyalotha"]["heroic"],
info["nyalotha"]["bosses"],
info["nyalotha"]["mythic"],
info["nyalotha"]["bosses"],
tep_feat,
),
inline=True,
)
await client.send_message(message.channel, embed=msg)
# Same as before, except this time it's building data for PVP.
if message.content.startswith("!armory pvp"):
split = split_query(message.content, "pvp")
region = split[3]
info = await character_info(split[0], split[1], split[2], split[3])
if info == "not_found":
msg = NOT_FOUND_ERROR.format(message)
await client.send_message(message.channel, msg)
elif info == "connection_error":
msg = CONNECTION_ERROR.format(message)
await client.send_message(message.channel, msg)
elif info == "credential_error":
msg = CREDENTIAL_ERROR.format(message)
await client.send_message(message.channel, msg)
elif info == "unknown_error":
msg = UNKNOWN_ERROR.format(message)
await client.send_message(message.channel, msg)
else:
msg = discord.Embed(
title="%s" % (info["name"]),
colour=discord.Colour(info["class_colour"]),
url="%s" % (info["armory"]),
description="%s %s %s %s (BFA)"
% (info["level"], info["faction"], info["spec"], info["class_type"]),
)
msg.set_thumbnail(
url="https://render-%s.worldofwarcraft.com/character/%s?_%s"
% (region, info["thumb"], epoch_time)
)
msg.set_footer(
text="!armory help | Feedback: https://github.com/JamesIves/discord-wow-armory-bot/issues",
icon_url="https://github.com/JamesIves/discord-wow-armory-bot/blob/master/assets/icon.png?raw=true",
)
msg.add_field(
name="Character",
value="**`Name`:** `%s`\n**`Realm`:** `%s (%s)`\n**`Battlegroup`:** `%s`\n**`Item Level`:** `%s`"
% (
info["name"],
info["realm"],
region.upper(),
info["battlegroup"],
info["ilvl"],
),
inline=True,
)
msg.add_field(
name="Arena Achievements",
value="**`Challenger`:** `%s`\n**`Rival`:** `%s`\n**`Duelist`:** `%s`\n**`Gladiator`:** `%s`"
% (
info["arena_challenger"],
info["arena_rival"],
info["arena_duelist"],
info["arena_gladiator"],
),
inline=True,
)
msg.add_field(
name="RBG Achievements",
value="**`%s`:** `%s`\n**`%s`:** `%s`\n**`%s`:** `%s`"
% (
info["rbg_2400_name"],
info["rbg_2400"],
info["rbg_2000_name"],
info["rbg_2000"],
info["rbg_1500_name"],
info["rbg_1500"],
),
inline=True,
)
msg.add_field(
name="Rated 2v2",
value="**`Rating`:** `%s`" % (info["2v2"]),
inline=True,
)
msg.add_field(
name="Rated 3v3",
value="**`Rating`:** `%s`" % (info["3v3"]),
inline=True,
)
msg.add_field(
name="Rated Battlegrounds",
value="**`Rating`:** `%s`" % (info["rbg"]),
inline=True,
)
msg.add_field(
name="Skirmish 2v2",
value="**`Rating`:** `%s`" % (info["2v2s"]),
inline=True,
)
msg.add_field(
name="Lifetime Honorable Kills",
value="`%s`" % (info["kills"]),
inline=True,
)
await client.send_message(message.channel, embed=msg)
# Display a list of available commands and a set of credits.
if message.content.startswith("!armory help"):
msg = """The following commands can be entered:
```
# Displays a players PVE progression, dungeon kills, keystone achievements, etc.
!armory pve <name> <realm>
!armory pve <armory-link>
# Displays a players PVP progression, arena ratings, honorable kills, etc.
!armory pvp <name> <realm>
!armory pvp <armory-link>
# Displays the WoW token price
!armory token
# You can also provide an optional region to each query to display players from other WoW regions outside of the bot default, for example EU, US, etc.
!armory pve <name> <realm> <region>
!armory pvp <armory-link> <region>
!armory token <region>
```
• Bot created by James Ives (https://jamesiv.es)
• Feedback, Issues and Source: https://github.com/JamesIves/discord-wow-armory-bot/issues
"""
msg = "%s".format(message) % re.sub(
r"(^[ \t]+|[ \t]+(?=:))", "", msg, flags=re.M
)
await client.send_message(message.channel, msg)
@client.event
async def on_ready():
if (
WOW_CLIENT_ID is None
or WOW_CLIENT_ID == ""
or WOW_CLIENT_SECRET is None
or WOW_CLIENT_SECRET == ""
):
print(
"Missing World of Warcraft Client ID/Secret. Please refer to https://github.com/JamesIves/discord-wow-armory-bot#configuration for more details"
)
quit()
if WOW_REGION is None or WOW_REGION == "":
print(
"Missing World of Warcraft player region. Please refer to https://github.com/JamesIves/discord-wow-armory-bot#configuration for more details"
)
quit()
if LOCALE is None or LOCALE == "":
print(
"Missing locale. Please refer to https://github.com/JamesIves/discord-wow-armory-bot#configuration for more details"
)
quit()
else:
print("Launch Succesful! The bot is now listening for commands...")
if DISCORD_BOT_TOKEN is None or DISCORD_BOT_TOKEN == "":
print(
"Missing Discord bot token. Please refer to https://github.com/JamesIves/discord-wow-armory-bot#configuration for more details"
)
quit()
else:
client.run(DISCORD_BOT_TOKEN)
|
{
"content_hash": "6d7af1a43e5dc01b2649fb7204ecd39a",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 162,
"avg_line_length": 37.743169398907106,
"alnum_prop": 0.4723468944549008,
"repo_name": "JamesIves/discord-wow-armory-bot",
"id": "b6019d725878fede494d5e1a19a370bf64385537",
"size": "13861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53415"
}
],
"symlink_target": ""
}
|
from keystone.policy import controllers
from keystone.policy.core import *
from keystone.policy import routers
|
{
"content_hash": "5f7e03f9a613ce5fce21a601097d8fa7",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 39,
"avg_line_length": 37,
"alnum_prop": 0.8468468468468469,
"repo_name": "paypal/keystone",
"id": "121905e4644ec0d831980f7cac0d835ccdd89e47",
"size": "735",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "keystone/policy/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from jinja2 import Template
from IPython.display import IFrame, HTML
import os
import json
from .base_plotter import IPlotter
class ChartistPlotter(IPlotter):
"""Class for creating chartist.js charts in ipython notebook."""
head = """
<!-- Load Chartist.js -->
<link rel='stylesheet' href='https://cdn.jsdelivr.net/chartist.js/latest/chartist.min.css'>
<script src='https://cdn.jsdelivr.net/chartist.js/latest/chartist.min.js'></script>
"""
template = """
<div id={{div_id}} class='ct-chart' style='width: 100%; height: 100%' ></div>
<script>
new Chartist.{{chart_type}}('#{{div_id}}', {{data}}, {{options}});
</script>
"""
def __init__(self):
super(ChartistPlotter, self).__init__()
def render(self, data, chart_type, options=None, div_id="chart", head=""):
"""Render the data in HTML template."""
if not self.is_valid_name(div_id):
raise ValueError(
"Name {} is invalid. Only letters, numbers, '_', and '-' are permitted ".format(
div_id))
return Template(head + self.template).render(
div_id=div_id.replace(" ", "_"),
data=json.dumps(
data, indent=4).replace("'", "\\'").replace('"', "'"),
chart_type=chart_type,
options=json.dumps(
options, indent=4).replace("'", "\\'").replace('"', "'"))
def plot_and_save(self,
data,
chart_type,
options=None,
w=800,
h=420,
filename='chart',
overwrite=True):
"""Save the rendered html to a file and return an IFrame to display the plot in the notebook."""
self.save(data, chart_type, options, filename, overwrite)
return IFrame(filename + '.html', w, h)
def plot(self, data, chart_type, options=None, w=800, h=420):
"""Output an iframe containing the plot in the notebook without saving."""
return HTML(
self.iframe.format(
source=self.render(
data=data,
options=options,
chart_type=chart_type,
head=self.head),
w=w,
h=h))
def save(self,
data,
chart_type,
options=None,
filename='chart',
overwrite=True):
"""Save the rendered html to a file in the same directory as the notebook."""
html = self.render(
data=data,
chart_type=chart_type,
options=options,
div_id=filename,
head=self.head)
if overwrite:
with open(filename.replace(" ", "_") + '.html', 'w') as f:
f.write(html)
else:
if not os.path.exists(filename.replace(" ", "_") + '.html'):
with open(filename.replace(" ", "_") + '.html', 'w') as f:
f.write(html)
else:
raise IOError('File Already Exists!')
|
{
"content_hash": "aecaef600ca0acbc0dae7abf702aa18c",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 104,
"avg_line_length": 35.96590909090909,
"alnum_prop": 0.5001579778830963,
"repo_name": "niloch/iplotter",
"id": "277b47787bb8091bc48a941c54a5d7de2e94dd53",
"size": "3165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iplotter/chartist_plotter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20250"
}
],
"symlink_target": ""
}
|
import pandas as pd
import numpy as np
from enum import IntEnum
from . import Utils as tsutil
from . import TimeSeries_Cutting as tscut
from . import DateTime_Functions as dtfunc
class cTimeInfo:
# class data
def __init__(self):
self.mSignalFrame = None
self.mTimeMin = None;
self.mTimeMax = None;
self.mTimeMinMaxDiff = None;
self.mTimeDelta = None;
self.mHorizon = None;
self.mResolution = dtfunc.eTimeResolution.NONE
self.mSplit = None
def info(self):
lStr2 = "TimeVariable='" + self.mTime +"'";
lStr2 += " TimeMin=" + str(self.mTimeMin) +"";
lStr2 += " TimeMax=" + str(self.mTimeMax) +"";
lStr2 += " TimeDelta=" + str(self.mTimeDelta) +"";
lStr2 += " Horizon=" + str(self.mHorizon) +"";
return lStr2;
def to_dict(self):
dict1 = {};
dict1["TimeVariable"] = self.mTime;
dict1["TimeMinMax"] = [str(self.mSignalFrame[self.mTime].min()) ,
str(self.mSignalFrame[self.mTime].max())];
dict1["Horizon"] = self.mHorizon;
return dict1;
def addVars(self, df):
df[self.mRowNumberColumn] = self.mSignalFrame[self.mRowNumberColumn]
df[self.mTime] = self.mSignalFrame[self.mTime]
df[self.mNormalizedTimeColumn] = self.mSignalFrame[self.mNormalizedTimeColumn]
df[self.mSignal] = self.mSignalFrame[self.mSignal]
df[self.mOriginalSignal] = self.mSignalFrame[self.mOriginalSignal]
def get_time_dtype(self):
# print(self.mTimeMax, type(self.mTimeMax))
lType = self.mSignalFrame[self.mTime].dtype;
return lType;
def checkDateTypesForNewDataset(self, df):
if(self.mTimeMax is not None):
lType1 = self.get_time_dtype();
lType2 = df[self.mTime].dtype
if(lType1.kind != lType2.kind):
raise tsutil.PyAF_Error('Incompatible Time Column Type expected=' + str(lType1) + ' got: ' + str(lType2) + "'");
pass
def transformDataset(self, df):
self.checkDateTypesForNewDataset(df);
# new row
lLastRow = df.tail(1).copy();
lNextTime = self.nextTime(df, 1)
lLastRow[self.mTime] = lNextTime
lLastRow[self.mSignal] = np.nan
if(self.mNormalizedTimeColumn in df.columns):
lLastRow[self.mNormalizedTimeColumn] = self.normalizeTime(lNextTime)
lLastRow[self.mRowNumberColumn] = lLastRow[self.mRowNumberColumn].max() + 1
# print(lLastRow.columns , df.columns)
assert(str(lLastRow.columns) == str(df.columns))
df = pd.concat([df, lLastRow], ignore_index=True, verify_integrity = True, sort=False);
if(self.mNormalizedTimeColumn not in df.columns):
df[self.mRowNumberColumn] = np.arange(0, df.shape[0]);
df[self.mNormalizedTimeColumn] = self.compute_normalized_date_column(df[self.mTime])
# print(df.tail());
return df;
def isPhysicalTime(self):
lHelper = dtfunc.cDateTime_Helper()
return lHelper.isPhysicalTime(self.mSignalFrame[self.mTime])
def analyzeSeasonals(self):
if(not self.isPhysicalTime()):
return;
lEstim = self.mSplit.getEstimPart(self.mSignalFrame);
lEstimTime = lEstim[self.mTime]
lHelper = dtfunc.cDateTime_Helper()
self.mResolution = lHelper.guess_time_resolution(lEstimTime);
def checkDateTypes(self):
# print(self.mSignalFrame.info());
type1 = self.mSignalFrame[self.mTime].dtype
if(type1.kind == 'O'):
raise tsutil.PyAF_Error('Invalid Time Column Type ' + self.mTime + '[' + str(type1) + ']');
def adaptTimeDeltaToTimeResolution(self):
if(not self.isPhysicalTime()):
return;
lHelper = dtfunc.cDateTime_Helper()
self.mTimeDelta = lHelper.adaptTimeDeltaToTimeResolution(self.mResolution , self.mTimeDelta);
def computeTimeDelta(self):
#print(self.mSignalFrame.columns);
# print(self.mSignalFrame[self.mTime].head());
lEstim = self.mSplit.getEstimPart(self.mSignalFrame)
lTimeBefore = lEstim[self.mTime].shift(1);
# lTimeBefore.fillna(self.mTimeMin, inplace=True)
N = lEstim.shape[0];
if(N == 1):
if(self.isPhysicalTime()):
self.mTimeDelta = np.timedelta64(1,'D');
else:
self.mTimeDelta = 1
return
#print(self.mSignal, self.mTime, N);
#print(lEstim[self.mTime].head());
#print(lTimeBefore.head());
lDiffs = lEstim[self.mTime][1:N] - lTimeBefore[1:N]
if(self.mOptions.mTimeDeltaComputationMethod == "USER"):
self.mTimeDelta = self.mOptions.mUserTimeDelta;
if(self.mOptions.mTimeDeltaComputationMethod == "AVG"):
self.mTimeDelta = np.mean(lDiffs);
type1 = self.mSignalFrame[self.mTime].dtype
if(type1.kind == 'i' or type1.kind == 'u'):
self.mTimeDelta = int(self.mTimeDelta)
if(self.mOptions.mTimeDeltaComputationMethod == "MODE"):
delta_counts = pd.DataFrame(lDiffs.value_counts());
self.mTimeDelta = delta_counts[self.mTime].argmax();
self.adaptTimeDeltaToTimeResolution();
def estimate(self):
#print(self.mSignalFrame.columns);
#print(self.mSignalFrame[self.mTime].head());
self.checkDateTypes();
self.mRowNumberColumn = "row_number"
self.mNormalizedTimeColumn = self.mTime + "_Normalized";
self.analyzeSeasonals();
lEstim = self.mSplit.getEstimPart(self.mSignalFrame)
self.mTimeMin = lEstim[self.mTime].min();
self.mTimeMax = lEstim[self.mTime].max();
if(self.isPhysicalTime()):
self.mTimeMin = np.datetime64(self.mTimeMin.to_pydatetime());
self.mTimeMax = np.datetime64(self.mTimeMax.to_pydatetime());
self.mTimeMinMaxDiff = self.mTimeMax - self.mTimeMin;
self.mEstimCount = lEstim.shape[0]
# print(self.mTimeMin, self.mTimeMax , self.mTimeMinMaxDiff , (self.mTimeMax - self.mTimeMin)/self.mTimeMinMaxDiff)
self.computeTimeDelta();
self.mSignalFrame[self.mNormalizedTimeColumn] = self.compute_normalized_date_column(self.mSignalFrame[self.mTime])
self.dump();
def dump(self):
time_info = self.info();
def compute_normalized_date_column(self, idate_column):
if(self.mEstimCount == 1):
return 0.0;
return self.normalizeTime(idate_column)
def normalizeTime(self , iTime):
if(self.mEstimCount == 1):
return 0.0;
output = ( iTime- self.mTimeMin) / self.mTimeMinMaxDiff
return output
def cast_to_time_dtype(self, iTimeValue):
lType1 = self.get_time_dtype();
lTimeValue = np.array([iTimeValue]).astype(lType1)[0];
return lTimeValue;
def nextTime(self, df, iSteps):
#print(df.tail(1)[self.mTime]);
lLastTime = df[self.mTime].values[-1]
if(self.isPhysicalTime()):
lLastTime = pd.Timestamp(lLastTime)
# print("NEXT_TIME" , lLastTime, iSteps, self.mTimeDelta);
lNextTime = lLastTime + iSteps * self.mTimeDelta;
lNextTime = self.cast_to_time_dtype(lNextTime.to_datetime64())
else:
lNextTime = lLastTime + iSteps * self.mTimeDelta;
lNextTime = self.cast_to_time_dtype(lNextTime)
return lNextTime;
|
{
"content_hash": "b68720f2cb16494f185d1435e663f2ea",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 128,
"avg_line_length": 38.611111111111114,
"alnum_prop": 0.608763897972531,
"repo_name": "antoinecarme/pyaf",
"id": "19895d1b96fb8695a9c8174a2665856155eec3d7",
"size": "7872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyaf/TS/Time.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import os, sys
import time
from test.testlib import *
from git import *
class TestRepo(object):
def setup(self):
self.repo = Repo(GIT_REPO)
@raises(InvalidGitRepositoryError)
def test_new_should_raise_on_invalid_repo_location(self):
if sys.platform == "win32":
Repo("C:\\WINDOWS\\Temp")
else:
Repo("/tmp")
@raises(NoSuchPathError)
def test_new_should_raise_on_non_existant_path(self):
Repo("repos/foobar")
def test_description(self):
txt = "Test repository"
self.repo.description = txt
assert_equal(self.repo.description, txt)
def test_heads_should_return_array_of_head_objects(self):
for head in self.repo.heads:
assert_equal(Head, head.__class__)
@patch_object(Git, '_call_process')
def test_heads_should_populate_head_data(self, git):
git.return_value = fixture('for_each_ref')
head = self.repo.heads[0]
assert_equal('master', head.name)
assert_equal('634396b2f541a9f2d58b00be1a07f0c358b999b3', head.commit.id)
assert_true(git.called)
assert_equal(git.call_args, (('for_each_ref', 'refs/heads'), {'sort': 'committerdate', 'format': '%(refname)%00%(objectname)'}))
@patch_object(Git, '_call_process')
def test_commits(self, git):
git.return_value = fixture('rev_list')
commits = self.repo.commits('master', max_count=10)
c = commits[0]
assert_equal('4c8124ffcf4039d292442eeccabdeca5af5c5017', c.id)
assert_equal(["634396b2f541a9f2d58b00be1a07f0c358b999b3"], [p.id for p in c.parents])
assert_equal("672eca9b7f9e09c22dcb128c283e8c3c8d7697a4", c.tree.id)
assert_equal("Tom Preston-Werner", c.author.name)
assert_equal("tom@mojombo.com", c.author.email)
assert_equal(time.gmtime(1191999972), c.authored_date)
assert_equal("Tom Preston-Werner", c.committer.name)
assert_equal("tom@mojombo.com", c.committer.email)
assert_equal(time.gmtime(1191999972), c.committed_date)
assert_equal("implement Grit#heads", c.message)
c = commits[1]
assert_equal([], c.parents)
c = commits[2]
assert_equal(["6e64c55896aabb9a7d8e9f8f296f426d21a78c2c", "7f874954efb9ba35210445be456c74e037ba6af2"], map(lambda p: p.id, c.parents))
assert_equal("Merge branch 'site'", c.summary)
assert_true(git.called)
assert_equal(git.call_args, (('rev_list', 'master', '--', ''), {'skip': 0, 'pretty': 'raw', 'max_count': 10}))
@patch_object(Git, '_call_process')
def test_commit_count(self, git):
git.return_value = fixture('rev_list_count')
assert_equal(655, self.repo.commit_count('master'))
assert_true(git.called)
assert_equal(git.call_args, (('rev_list', 'master', '--', ''), {}))
@patch_object(Git, '_call_process')
def test_commit(self, git):
git.return_value = fixture('rev_list_single')
commit = self.repo.commit('4c8124ffcf4039d292442eeccabdeca5af5c5017')
assert_equal("4c8124ffcf4039d292442eeccabdeca5af5c5017", commit.id)
assert_true(git.called)
assert_equal(git.call_args, (('rev_list', '4c8124ffcf4039d292442eeccabdeca5af5c5017', '--', ''), {'pretty': 'raw', 'max_count': 1}))
@patch_object(Git, '_call_process')
def test_tree(self, git):
git.return_value = fixture('ls_tree_a')
tree = self.repo.tree('master')
assert_equal(4, len([c for c in tree.values() if isinstance(c, Blob)]))
assert_equal(3, len([c for c in tree.values() if isinstance(c, Tree)]))
assert_true(git.called)
assert_equal(git.call_args, (('ls_tree', 'master'), {}))
@patch_object(Git, '_call_process')
def test_blob(self, git):
git.return_value = fixture('cat_file_blob')
blob = self.repo.blob("abc")
assert_equal("Hello world", blob.data)
assert_true(git.called)
assert_equal(git.call_args, (('cat_file', 'abc'), {'p': True, 'with_raw_output': True}))
@patch_object(Repo, '__init__')
@patch_object(Git, '_call_process')
def test_init_bare(self, git, repo):
git.return_value = True
repo.return_value = None
Repo.init_bare("repos/foo/bar.git")
assert_true(git.called)
assert_equal(git.call_args, (('init', '--bare'), {}))
assert_true(repo.called)
assert_equal(repo.call_args, (('repos/foo/bar.git',), {}))
@patch_object(Repo, '__init__')
@patch_object(Git, '_call_process')
def test_init_bare_with_options(self, git, repo):
git.return_value = True
repo.return_value = None
Repo.init_bare("repos/foo/bar.git", **{'template': "/baz/sweet"})
assert_true(git.called)
assert_equal(git.call_args, (('init', '--bare'), {'template': '/baz/sweet'}))
assert_true(repo.called)
assert_equal(repo.call_args, (('repos/foo/bar.git',), {}))
@patch_object(Repo, '__init__')
@patch_object(Git, '_call_process')
def test_fork_bare(self, git, repo):
git.return_value = None
repo.return_value = None
self.repo.fork_bare("repos/foo/bar.git")
assert_true(git.called)
path = os.path.join(absolute_project_path(), '.git')
assert_equal(git.call_args, (('clone', path, 'repos/foo/bar.git'), {'bare': True}))
assert_true(repo.called)
@patch_object(Repo, '__init__')
@patch_object(Git, '_call_process')
def test_fork_bare_with_options(self, git, repo):
git.return_value = None
repo.return_value = None
self.repo.fork_bare("repos/foo/bar.git", **{'template': '/awesome'})
assert_true(git.called)
path = os.path.join(absolute_project_path(), '.git')
assert_equal(git.call_args, (('clone', path, 'repos/foo/bar.git'),
{'bare': True, 'template': '/awesome'}))
assert_true(repo.called)
@patch_object(Git, '_call_process')
def test_diff(self, git):
self.repo.diff('master^', 'master')
assert_true(git.called)
assert_equal(git.call_args, (('diff', 'master^', 'master', '--'), {}))
self.repo.diff('master^', 'master', 'foo/bar')
assert_true(git.called)
assert_equal(git.call_args, (('diff', 'master^', 'master', '--', 'foo/bar'), {}))
self.repo.diff('master^', 'master', 'foo/bar', 'foo/baz')
assert_true(git.called)
assert_equal(git.call_args, (('diff', 'master^', 'master', '--', 'foo/bar', 'foo/baz'), {}))
@patch_object(Git, '_call_process')
def test_diff_with_parents(self, git):
git.return_value = fixture('diff_p')
diffs = self.repo.commit_diff('master')
assert_equal(15, len(diffs))
assert_true(git.called)
def test_archive_tar(self):
self.repo.archive_tar()
def test_archive_tar_gz(self):
self.repo.archive_tar_gz()
def test_disable_daemon_export(self):
prev_value = self.repo.daemon_export
self.repo.daemon_export = not prev_value
assert_equal(self.repo.daemon_export, not prev_value)
self.repo.daemon_export = prev_value
assert_equal(self.repo.daemon_export, prev_value)
def test_alternates(self):
cur_alternates = self.repo.alternates
# empty alternates
self.repo.alternates = []
assert self.repo.alternates == []
alts = [ "other/location", "this/location" ]
self.repo.alternates = alts
assert alts == self.repo.alternates
self.repo.alternates = cur_alternates
def test_repr(self):
path = os.path.join(os.path.abspath(GIT_REPO), '.git')
assert_equal('<git.Repo "%s">' % path, repr(self.repo))
@patch_object(Git, '_call_process')
def test_log(self, git):
git.return_value = fixture('rev_list')
assert_equal('4c8124ffcf4039d292442eeccabdeca5af5c5017', self.repo.log()[0].id)
assert_equal('ab25fd8483882c3bda8a458ad2965d2248654335', self.repo.log()[-1].id)
assert_true(git.called)
assert_equal(git.call_count, 2)
assert_equal(git.call_args, (('log', 'master', '--'), {'pretty': 'raw'}))
@patch_object(Git, '_call_process')
def test_log_with_path_and_options(self, git):
git.return_value = fixture('rev_list')
self.repo.log('master', 'file.rb', **{'max_count': 1})
assert_true(git.called)
assert_equal(git.call_args, (('log', 'master', '--', 'file.rb'), {'pretty': 'raw', 'max_count': 1}))
def test_is_dirty_with_bare_repository(self):
self.repo.bare = True
assert_false(self.repo.is_dirty)
@patch_object(Git, '_call_process')
def test_is_dirty_with_clean_working_dir(self, git):
self.repo.bare = False
git.return_value = ''
assert_false(self.repo.is_dirty)
assert_equal(git.call_args, (('diff', 'HEAD', '--'), {}))
@patch_object(Git, '_call_process')
def test_is_dirty_with_dirty_working_dir(self, git):
self.repo.bare = False
git.return_value = '''-aaa\n+bbb'''
assert_true(self.repo.is_dirty)
assert_equal(git.call_args, (('diff', 'HEAD', '--'), {}))
@patch_object(Git, '_call_process')
def test_active_branch(self, git):
git.return_value = 'refs/heads/major-refactoring'
assert_equal(self.repo.active_branch, 'major-refactoring')
assert_equal(git.call_args, (('symbolic_ref', 'HEAD'), {}))
|
{
"content_hash": "92eaa235216c421aeacde1c78b7bf6f6",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 142,
"avg_line_length": 37.531496062992126,
"alnum_prop": 0.603272841707752,
"repo_name": "kergoth/git-origin",
"id": "abf17be866ee552b65dc01b1d3bb6ada40db547b",
"size": "9755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "git-python/test/git/test_repo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "128684"
}
],
"symlink_target": ""
}
|
"""Test the importprunedfunds and removeprunedfunds RPCs."""
from decimal import Decimal
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class ImportPrunedFundsTest(DigiByteTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(101)
self.sync_all()
# address
address1 = self.nodes[0].getnewaddress()
# pubkey
address2 = self.nodes[0].getnewaddress()
# privkey
address3 = self.nodes[0].getnewaddress()
address3_privkey = self.nodes[0].dumpprivkey(address3) # Using privkey
# Check only one address
address_info = self.nodes[0].getaddressinfo(address1)
assert_equal(address_info['ismine'], True)
self.sync_all()
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 101)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address2)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# Send funds to self
txnid1 = self.nodes[0].sendtoaddress(address1, 0.1)
self.nodes[0].generate(1)
rawtxn1 = self.nodes[0].gettransaction(txnid1)['hex']
proof1 = self.nodes[0].gettxoutproof([txnid1])
txnid2 = self.nodes[0].sendtoaddress(address2, 0.05)
self.nodes[0].generate(1)
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
proof2 = self.nodes[0].gettxoutproof([txnid2])
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
self.nodes[0].generate(1)
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
proof3 = self.nodes[0].gettxoutproof([txnid3])
self.sync_all()
# Import with no affiliated address
assert_raises_rpc_error(-5, "No addresses", self.nodes[1].importprunedfunds, rawtxn1, proof1)
balance1 = self.nodes[1].getbalance()
assert_equal(balance1, Decimal(0))
# Import with affiliated address with no rescan
self.nodes[1].importaddress(address=address2, rescan=False)
self.nodes[1].importprunedfunds(rawtransaction=rawtxn2, txoutproof=proof2)
assert [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid2]
# Import with private key with no rescan
self.nodes[1].importprivkey(privkey=address3_privkey, rescan=False)
self.nodes[1].importprunedfunds(rawtxn3, proof3)
assert [tx for tx in self.nodes[1].listtransactions() if tx['txid'] == txnid3]
balance3 = self.nodes[1].getbalance()
assert_equal(balance3, Decimal('0.025'))
# Addresses Test - after import
address_info = self.nodes[1].getaddressinfo(address1)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address2)
assert_equal(address_info['iswatchonly'], True)
assert_equal(address_info['ismine'], False)
address_info = self.nodes[1].getaddressinfo(address3)
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], True)
# Remove transactions
assert_raises_rpc_error(-8, "Transaction does not exist in wallet.", self.nodes[1].removeprunedfunds, txnid1)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid1]
self.nodes[1].removeprunedfunds(txnid2)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid2]
self.nodes[1].removeprunedfunds(txnid3)
assert not [tx for tx in self.nodes[1].listtransactions(include_watchonly=True) if tx['txid'] == txnid3]
if __name__ == '__main__':
ImportPrunedFundsTest().main()
|
{
"content_hash": "1d18a32c49ad9e59d7e16b0b00248f19",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 117,
"avg_line_length": 40.151785714285715,
"alnum_prop": 0.6559928841449856,
"repo_name": "aurarad/auroracoin",
"id": "d665b37c3a3cd4cf39c412889cc0c815ad98cd5f",
"size": "4766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/wallet_importprunedfunds.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "721707"
},
{
"name": "C++",
"bytes": "3060648"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18860"
},
{
"name": "HTML",
"bytes": "50620"
},
{
"name": "Makefile",
"bytes": "31933"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "6330"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "110348"
},
{
"name": "QMake",
"bytes": "2022"
},
{
"name": "Shell",
"bytes": "51195"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2016 - Sean Bailey - All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
{
"content_hash": "6a7a7b6cba1f16dd2f815dcd843b494a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 72,
"avg_line_length": 39.2,
"alnum_prop": 0.7772108843537415,
"repo_name": "LuckehPickle/chat",
"id": "d8034515473bedab96a477c1f3331c073920d790",
"size": "588",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "comet/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "35522"
},
{
"name": "HTML",
"bytes": "20099"
},
{
"name": "JavaScript",
"bytes": "7635"
},
{
"name": "Python",
"bytes": "36925"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ploghubapp', '0011_auto_20170816_1114'),
]
operations = [
migrations.AlterField(
model_name='historicaluserprofile',
name='about',
field=models.CharField(default='[empty]', max_length=1000),
),
migrations.AlterField(
model_name='userprofile',
name='about',
field=models.CharField(default='[empty]', max_length=1000),
),
]
|
{
"content_hash": "e76f773d2b0f0068d750036f69ac7942",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 25.82608695652174,
"alnum_prop": 0.5858585858585859,
"repo_name": "ploggingdev/ploghub",
"id": "0223670c05cbfe6339b815a228446be9a66f6bcd",
"size": "667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ploghubapp/migrations/0012_auto_20170818_0854.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "38789"
},
{
"name": "JavaScript",
"bytes": "8540"
},
{
"name": "Python",
"bytes": "71833"
}
],
"symlink_target": ""
}
|
"""gl publish - Publish commits upstream."""
from . import helpers, pprint
def parser(subparsers, _):
"""Adds the publish parser to the given subparsers object."""
desc = 'publish commits upstream'
publish_parser = subparsers.add_parser(
'publish', help=desc, description=desc.capitalize(), aliases=['pb'])
publish_parser.add_argument(
'dst', nargs='?', help='the branch where to publish commits')
publish_parser.set_defaults(func=main)
def main(args, repo):
current_b = repo.current_branch
dst_b = helpers.get_branch_or_use_upstream(args.dst, 'dst', repo)
current_b.publish(dst_b)
pprint.ok(
'Publish of commits from branch {0} to branch {1} succeeded'.format(
current_b, dst_b))
return True
|
{
"content_hash": "7af8a88e394e632789689efffd9ace58",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 31.041666666666668,
"alnum_prop": 0.6859060402684564,
"repo_name": "sdg-mit/gitless",
"id": "19b62273b9a25724dab84b2fcad86dacdfb31234",
"size": "848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gitless/cli/gl_publish.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "178366"
},
{
"name": "Shell",
"bytes": "264"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.