repo_name
stringlengths 5
104
| path
stringlengths 4
248
| content
stringlengths 102
99.9k
|
|---|---|---|
MarcoBuster/OrarioTreniBot
|
src/updates/global_messages.py
|
# Copyright (c) 2016-2017 The OrarioTreniBot Authors (see AUTHORS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import time
import botogram
import progressbar
import redis
import config
r = redis.StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT, db=config.REDIS_DB, password=config.REDIS_PASSWORD)
bot = botogram.create(config.BOT_TOKEN)
def post(text, parse_mode="HTML", reply_markup=None, disable_web_page_preview=True, message=None):
users = []
for user in r.keys("user:*"):
users.append(int(user[5:]))
print("Sending global message...")
print("> Text", text, sep=": ")
print("> Reply Markup", reply_markup, sep=": ")
print("> Parse mode", parse_mode, sep=": ")
print("> Disable web page preview", disable_web_page_preview, sep=": ")
bar = progressbar.ProgressBar()
for user in bar(users):
if message:
message.edit(
"<b>Sending global message...</b>"
"\n<b>{value}/{max_value}</b> ({percentage}%)"
.format(value=bar.value, max_value=bar.max_value, percentage=round(bar.percentage, 1))
)
time.sleep(0.1)
user_hash = "user:" + str(user)
try:
bot.chat(user)
except botogram.APIError:
r.hset(user_hash, "active", False)
continue
try:
if r.hget(user_hash, "active").decode("utf-8") == "False":
continue
bot.api.call("sendMessage", {
"chat_id": user, "text": text, "parse_mode": parse_mode,
"disable_web_page_preview": disable_web_page_preview,
"reply_markup": json.dumps(reply_markup) if reply_markup else ""
})
except botogram.APIError:
r.hset(user_hash, "active", False)
finally:
time.sleep(0.5)
if message:
message.edit(
"<b>Sending global message...</b>"
"\n<b>{value}/{max_value}</b> ({percentage}%)"
.format(value=bar.value, max_value=bar.max_value, percentage=round(bar.percentage, 1))
)
time.sleep(0.1)
|
IronJew/mc-pop-log
|
poplog.py
|
import sys, datetime, os, time
from mcstatus import MinecraftServer
# GLOBALS
# List containing all the mcstatus server objects
serverList = []
# appendLog() polls the server for population data and appends it to the log
# In: The hostname and population of a server, as well as the timestamp for its retrieval time
# Out: Nada.
def appendLog(host, time, pop):
fileName = host.replace(".", "")+ ".csv"
with open(fileName, "a") as logFile:
# If the file is empty add the column names
if not os.stat(fileName).st_size > 0:
logFile.write('"time","pop"')
# Write the data to the file
logFile.write("\n"+ '"'+ time+ '",'+ str(pop))
# getPopulations() Gets all the populations of the servers and calls appendLog() for each
# In: the list of servers
# Out: Nada
def getPopulations():
global serverList
print("Oy")
# Loop through all the servers
for serverObject in serverList:
status = serverObject.status()
pop = status.players.online
time = datetime.datetime.now().isoformat()
appendLog(serverObject.host, time, pop)
# Entry point.
def main():
global serverList
print("[mpl] Minecraft Population Logger")
print("[mpl] Version 0.1.0")
# No server URLs?
if len(sys.argv[1:]) < 1:
print("[mpl] Error: At least one URL must be specified.")
return 0
# Populate server list
for url in sys.argv[1:]:
serverObject = MinecraftServer.lookup(url)
# If the server was found
if serverObject != None:
print("[mpl]", url, "was found!")
serverList.append(serverObject)
else:
print("[mpl]", url, "not found! It will not be tracked.")
while 1:
getPopulations()
time.sleep(300)
# Run the main.
main()
|
dotKom/onlineweb4
|
apps/profiles/filters.py
|
import django_filters
from django.contrib.auth.models import Group
from apps.authentication.models import OnlineUser as User
class PublicProfileFilter(django_filters.FilterSet):
year = django_filters.NumberFilter(field_name="year", method="filter_year")
group = django_filters.CharFilter(method="filter_group")
class Meta():
model = User
fields = ("year", "group")
def filter_year(self, queryset, name, value):
user_ids = [user.id for user in queryset.all() if user.year == value]
return User.objects.filter(pk__in=user_ids)
def filter_group(self, queryset, name, value):
group = Group.objects.filter(name=value)
return queryset.filter(groups__in=group)
|
luispedro/imread
|
imread/ijrois.py
|
# Copyright: Luis Pedro Coelho <luis@luispedro.org>, 2012-2018
# License: MIT
import numpy as np
def read_roi(fileobj):
'''
points = read_roi(fileobj)
Read ImageJ's ROI format
Parameters
----------
fileobj: should be a file-like object
Returns
-------
points: a list of points
'''
# This is based on:
# http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiDecoder.java.html
# http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiEncoder.java.html
SPLINE_FIT = 1
DOUBLE_HEADED = 2
OUTLINE = 4
OVERLAY_LABELS = 8
OVERLAY_NAMES = 16
OVERLAY_BACKGROUNDS = 32
OVERLAY_BOLD = 64
SUB_PIXEL_RESOLUTION = 128
DRAW_OFFSET = 256
pos = [4]
def get8():
pos[0] += 1
s = fileobj.read(1)
if not s:
raise IOError('readroi: Unexpected EOF')
return ord(s)
def get16():
b0 = get8()
b1 = get8()
return (b0 << 8) | b1
def get32():
s0 = get16()
s1 = get16()
return (s0 << 16) | s1
def getfloat():
v = np.int32(get32())
return v.view(np.float32)
magic = fileobj.read(4)
if magic != b'Iout':
raise IOError('Magic number not found')
version = get16()
# It seems that the roi type field occupies 2 Bytes, but only one is used
roi_type = get8()
# Discard second Byte:
get8()
if not (0 <= roi_type < 11):
raise ValueError('roireader: ROI type %s not supported' % roi_type)
if roi_type != 7:
raise ValueError('roireader: ROI type %s not supported (!= 7)' % roi_type)
top = get16()
left = get16()
bottom = get16()
right = get16()
n_coordinates = get16()
x1 = getfloat()
y1 = getfloat()
x2 = getfloat()
y2 = getfloat()
stroke_width = get16()
shape_roi_size = get32()
stroke_color = get32()
fill_color = get32()
subtype = get16()
if subtype != 0:
raise ValueError('roireader: ROI subtype {} not supported (!= 0)'.format(subtype))
options = get16()
arrow_style = get8()
arrow_head_size = get8()
rect_arc_size = get16()
position = get32()
header2offset = get32()
if options & SUB_PIXEL_RESOLUTION:
getc = getfloat
points = np.empty((n_coordinates, 2), dtype=np.float32)
else:
getc = get16
points = np.empty((n_coordinates, 2), dtype=np.int16)
points[:,1] = [getc() for i in range(n_coordinates)]
points[:,0] = [getc() for i in range(n_coordinates)]
points[:,1] += left
points[:,0] += top
points -= 1
return points
def read_roi_zip(fname):
'''
Reads all ROIs in a ZIP file
Parameters
----------
fname : str
Input filename
Returns
-------
rois: list of ROIs
Each ROI is a vector of 2D points
See Also
--------
read_roi: function, reads a single ROI
'''
import zipfile
with zipfile.ZipFile(fname) as zf:
return [read_roi(zf.open(n))
for n in zf.namelist()]
|
ByStudent666/XsCrypto
|
XsCrypto/zhanzhuanxc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'ByStudent'
def zhanzhuanxc(p,q,e):
def egcd(a, b):
x,y, u,v = 0,1, 1,0
while a != 0:
q, r = b//a, b%a
m, n = x-u*q, y-v*q
b,a, x,y, u,v = a,r, u,v, m,n
gcd = b
return gcd, x, y
def modinv(a, m):
gcd, x, y = egcd(a, m)
if gcd != 1:
return None # modular inverse does not exist
else:
return x % m
phi_n = (p - 1) * (q - 1)
d = modinv(e, phi_n)
return int(d)
# print zhanzhuanxc(18443,49891,19)
|
rbardaji/oceanobs
|
mooda/waterframe/plot/plot_timebar.py
|
""" Implementation of WaterFrame.plot_bar(key, ax=None, average_time=None)"""
import datetime
def plot_timebar(self, keys, ax=None, time_interval_mean=None):
"""
Make a bar plot of the input keys.
The bars are positioned at x with date/time. Their dimensions are given by height.
Parameters
----------
keys: list of str
keys of self.data to plot.
ax: matplotlib.axes object, optional (ax = None)
It is used to add the plot to an input axes object.
time_interval_mean: str, optional (time_interval_mean = None)
It calculates an average value of a time interval. You can find
all of the resample options here:
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
Returns
-------
ax: matplotlib.AxesSubplot
Axes of the plot.
"""
def format_year(x):
return datetime.datetime.\
strptime(x, '%Y-%m-%d %H:%M:%S').strftime('%Y')
def format_day(x):
return datetime.datetime.\
strptime(x, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d')
# Extract data
df = self.data[keys].dropna().reset_index().set_index('TIME')
df.index.rename("Date", inplace=True)
# Resample data
if time_interval_mean is None:
pass
else:
df = df.resample(time_interval_mean).mean()
if isinstance(keys, list):
ax = df[keys].plot.bar(ax=ax, legend=True)
else:
ax = df[keys].plot.bar(ax=ax)
# Write axes
try:
ax.set_ylabel(self.vocabulary[keys]['units'])
except KeyError:
print("Warning: We don't know the units of", keys,
"Please, add info into self.meaning[", keys, "['units']")
if time_interval_mean == 'A':
ax.set_xticklabels([format_year(x.get_text())
for x in ax.get_xticklabels()], rotation=60)
elif time_interval_mean == 'D':
ax.set_xticklabels([format_day(x.get_text())
for x in ax.get_xticklabels()], rotation=60)
return ax
|
eiri/nixie
|
tests/test_nixie_errors.py
|
import unittest, uuid
from nixie.core import Nixie, KeyError
class NixieErrorsTestCase(unittest.TestCase):
def test_read_missing(self):
nx = Nixie()
self.assertIsNone(nx.read('missing'))
def test_update_missing(self):
nx = Nixie()
with self.assertRaises(KeyError):
nx.update('missing')
def test_update_with_wrong_value(self):
nx = Nixie()
key = nx.create()
with self.assertRaises(ValueError):
nx.update(key, 'a')
def test_delete_missing(self):
nx = Nixie()
with self.assertRaises(KeyError):
nx.delete('missing')
|
kagenZhao/cnBeta
|
CnbetaApi/CnbetaApi/urls.py
|
#!/usr/bin/env python3
"""CnbetaApi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^apis/', include('CnbetaApis.urls')),
]
|
Eylesis/Botfriend
|
Cogs/GameTime.py
|
import discord
from discord.ext import commands
import time
import datetime
import pytz
class GameTime(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def time(self, ctx):
"""Displays current game time."""
locationName = self.bot.db.get_val("ServerInfo", "")
print(type(locationName))
print(locationName['CityName'])
embed = discord.Embed(title="Current time in {}".format(locationName['CityName']),description=get_gametime())
await ctx.send(embed=embed)
await ctx.message.delete_message()
def suffix(d):
return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')
def get_rawtime():
return datetime.datetime.now(pytz.timezone('UTC'))
def get_gametime():
months = [
"Hammer",
"Alturiak",
"Ches",
"Tarsakh",
"Mirtul",
"Kythorn",
"Flamerule",
"Eleasis",
"Eleint",
"Marpenoth",
"Uktar",
"Nightal"]
aDate = datetime(2020, 10, 18, tzinfo=pytz.timezone('UTC'))
bDate = datetime.now(pytz.timezone('UTC'))
delta = bDate - aDate
gametime = datetime(2020, 10, 18, bDate.hour, bDate.minute, bDate.second) + timedelta(days=delta.days*3) + (timedelta(days=(bDate.hour//8-2)))
if gametime.hour == 0:
gametime_hour = 12
time_decor = "AM"
else:
gametime_hour = gametime.hour-12 if gametime.hour > 12 else gametime.hour
time_decor = "PM" if gametime.hour > 12 else "AM"
gametime_minute = "0{}".format(gametime.minute) if gametime.minute < 10 else gametime.minute
return "{}:{} {} UTC | {}{} of {}".format(gametime_hour, gametime_minute, time_decor, gametime.day, suffix(gametime.day), months[gametime.month-1])
def setup(bot):
bot.add_cog(GameTime(bot))
|
mhmurray/cloaca
|
cloaca/test/tests.py
|
#!/usr/bin/env python
import unittest
import logging
import logging.config
import sys
import argparse
DESCRIPTION="""
Harness for tests in the cloaca/tests/ directory.
Run all tests with '--all' or provide a list dotted names
of specific tests (eg. legionary.TestLegionary.test_legionary).
"""
# Set up logging. See logging.json for config
def setup_logging(
default_path='test_logging.json',
default_level=logging.INFO):
"""Setup logging configuration
"""
import sys, os, json
path = default_path
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
def main():
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--all', action='store_true',
help='Run all tests instead of matching pattern.')
parser.add_argument('pattern', nargs='*',
help=('pattern(s) to match against, eg. "buildings" or '
'"architect.TestArchitect.test_lead_architect".'))
parser.add_argument('-v', '--verbose', action='store_true',
help='Use verbose test result reporting.')
parser.add_argument('-q', '--quiet', action='store_true',
help=('Suppress individual test result reporting. Still reports '
'summary information. Overrides --verbose.'))
parser.add_argument('--log-level', default='WARNING',
help=('Set app log level during tests. Valid arguments are: '
'DEBUG, INFO, WARNING, ERROR, CRITICAL. See logging module '
'documentation.'))
args = parser.parse_args()
setup_logging()
numeric_level = getattr(logging, args.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: {0!s}'.format(args.log_level))
# This catches the children loggers like cloaca.game
logging.getLogger('cloaca').setLevel(numeric_level)
loader = unittest.defaultTestLoader
if args.all:
sys.stderr.write('Running all tests.\n')
suites = loader.discover('.', pattern='*.py')
else:
if len(args.pattern) == 0:
sys.stderr.write('ERROR: No tests specified.\n\n')
parser.print_help(file=sys.stderr)
return
sys.stderr.write('Running all tests matching the patterns ('
+ ', '.join(args.pattern) + ')\n')
suites = loader.loadTestsFromNames(args.pattern)
test_suite = unittest.TestSuite(suites)
# TextTestRunner takes verbosity that can be 0 (quiet), 1 (default),
# or 2 (verbose). Quiet overrides verbose.
if args.quiet:
verbosity = 0
elif args.verbose:
verbosity = 2
else:
verbosity=1
test_runner = unittest.TextTestRunner(verbosity=verbosity).run(test_suite)
if __name__ == '__main__':
main()
|
rajarahulray/iDetector
|
tests_and_ References/table_view_text_box.py
|
import tkinter as tk
class ExampleApp(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
t = SimpleTable(self, 10,2)
t.pack(side="top", fill="x")
t.set(0,0,"Hello, world")
class SimpleTable(tk.Frame):
def __init__(self, parent, rows=10, columns=2):
# use black background so it "peeks through" to
# form grid lines
tk.Frame.__init__(self, parent, background="black")
self._widgets = []
for row in range(rows):
current_row = []
for column in range(columns):
label = tk.Label(self, text="%s/%s" % (row, column),
borderwidth=0, width=10, height = 10)
label.grid(row=row, column=column, sticky="nsew", padx=1, pady=1)
current_row.append(label)
self._widgets.append(current_row)
for column in range(columns):
self.grid_columnconfigure(column, weight=1)
def set(self, row, column, value):
widget = self._widgets[row][column]
widget.configure(text=value)
if __name__ == "__main__":
app = ExampleApp()
app.mainloop()
|
zayfod/pyfranca
|
pyfranca/ast.py
|
"""
Franca abstract syntax tree representation.
"""
from abc import ABCMeta
from collections import OrderedDict
class ASTException(Exception):
def __init__(self, message):
super(ASTException, self).__init__()
self.message = message
def __str__(self):
return self.message
class Package(object):
"""
AST representation of a Franca package.
"""
def __init__(self, name, file_name=None, imports=None,
interfaces=None, typecollections=None, comments=None):
"""
Constructs a new Package.
"""
self.name = name
self.files = [file_name] if file_name else []
self.imports = imports if imports else []
self.interfaces = interfaces if interfaces else OrderedDict()
self.typecollections = typecollections if typecollections else \
OrderedDict()
self.comments = comments if comments else OrderedDict()
for item in self.interfaces.values():
item.package = self
for item in self.typecollections.values():
item.package = self
def __contains__(self, namespace):
if not isinstance(namespace, str):
raise TypeError
res = namespace in self.typecollections or namespace in self.interfaces
return res
def __getitem__(self, namespace):
if not isinstance(namespace, str):
raise TypeError
elif namespace in self.typecollections:
return self.typecollections[namespace]
elif namespace in self.interfaces:
return self.interfaces[namespace]
else:
raise KeyError
def __iadd__(self, package):
if not isinstance(package, Package):
raise TypeError
# Ignore the name
self.files += package.files
for item in package.imports:
self.imports.append(item)
for item in package.interfaces.values():
if item.name in self:
raise ASTException("Interface member defined more than"
" once '{}'.".format(item.name))
self.interfaces[item.name] = item
item.package = self
for item in package.typecollections.values():
if item.name in self:
raise ASTException("Type collection member defined more than"
" once '{}'.".format(item.name))
self.typecollections[item.name] = item
item.package = self
return self
class Import(object):
def __init__(self, file_name, namespace=None):
self.file = file_name
self.namespace = namespace # None for "import model"
self.package_reference = None
self.namespace_reference = None
class Namespace(object):
__metaclass__ = ABCMeta
def __init__(self, name, flags=None, members=None, comments=None):
self.package = None
self.name = name
self.flags = flags if flags else [] # Unused
self.version = None
self.typedefs = OrderedDict()
self.enumerations = OrderedDict()
self.structs = OrderedDict()
self.arrays = OrderedDict()
self.maps = OrderedDict()
self.constants = OrderedDict()
self.comments = comments if comments else OrderedDict()
if members:
for member in members:
self._add_member(member)
def __contains__(self, name):
if not isinstance(name, str):
raise TypeError
res = name in self.typedefs or \
name in self.enumerations or \
name in self.structs or \
name in self.arrays or \
name in self.maps or \
name in self.constants
return res
def __getitem__(self, name):
if not isinstance(name, str):
raise TypeError
elif name in self.typedefs:
return self.typedefs[name]
elif name in self.enumerations:
return self.enumerations[name]
elif name in self.structs:
return self.structs[name]
elif name in self.arrays:
return self.arrays[name]
elif name in self.maps:
return self.maps[name]
elif name in self.constants[name]:
return self.constants[name]
else:
raise KeyError
def _add_member(self, member):
if isinstance(member, Version):
if not self.version:
self.version = member
else:
raise ASTException("Multiple version definitions.")
elif isinstance(member, Type):
if member.name in self:
raise ASTException(
"Duplicate namespace member '{}'.".format(member.name))
if isinstance(member, Typedef):
self.typedefs[member.name] = member
# Handle anonymous array special case.
if isinstance(member.type, Array):
member.type.namespace = self
elif isinstance(member, Enumeration):
self.enumerations[member.name] = member
elif isinstance(member, Struct):
self.structs[member.name] = member
# Handle anonymous array special case.
for field in member.fields.values():
if isinstance(field.type, Array):
field.type.namespace = self
elif isinstance(member, Array):
self.arrays[member.name] = member
# Handle anonymous array special case.
if isinstance(member.type, Array):
member.type.namespace = self
elif isinstance(member, Map):
self.maps[member.name] = member
# Handle anonymous array special case.
if isinstance(member.key_type, Array):
member.key_type.namespace = self
if isinstance(member.value_type, Array):
member.value_type.namespace = self
elif isinstance(member, Constant):
self.constants[member.name] = member
else:
raise ASTException("Unexpected namespace member type.")
member.namespace = self
else:
raise ValueError("Unexpected namespace member type.")
class TypeCollection(Namespace):
def __init__(self, name, flags=None, members=None, comments=None):
super(TypeCollection, self).__init__(name, flags=flags,
members=members, comments=comments)
class Type(object):
__metaclass__ = ABCMeta
def __init__(self, name=None, comments=None):
self.namespace = None
self.name = name if name else self.__class__.__name__
self.comments = comments if comments else OrderedDict()
class Typedef(Type):
def __init__(self, name, base_type, comments=None):
super(Typedef, self).__init__(name, comments)
self.type = base_type
class PrimitiveType(Type):
__metaclass__ = ABCMeta
def __init__(self):
super(PrimitiveType, self).__init__()
class Int8(PrimitiveType):
def __init__(self):
super(Int8, self).__init__()
class Int16(PrimitiveType):
def __init__(self):
super(Int16, self).__init__()
class Int32(PrimitiveType):
def __init__(self):
super(Int32, self).__init__()
class Int64(PrimitiveType):
def __init__(self):
super(Int64, self).__init__()
class UInt8(PrimitiveType):
def __init__(self):
super(UInt8, self).__init__()
class UInt16(PrimitiveType):
def __init__(self):
super(UInt16, self).__init__()
class UInt32(PrimitiveType):
def __init__(self):
super(UInt32, self).__init__()
class UInt64(PrimitiveType):
def __init__(self):
super(UInt64, self).__init__()
class Boolean(PrimitiveType):
def __init__(self):
super(Boolean, self).__init__()
class Float(PrimitiveType):
def __init__(self):
super(Float, self).__init__()
class Double(PrimitiveType):
def __init__(self):
super(Double, self).__init__()
class String(PrimitiveType):
def __init__(self):
super(String, self).__init__()
class ByteBuffer(PrimitiveType):
def __init__(self):
super(ByteBuffer, self).__init__()
class ComplexType(Type):
__metaclass__ = ABCMeta
def __init__(self, comments=None):
super(ComplexType, self).__init__(comments=comments)
class Value(Type):
_metaclass__ = ABCMeta
def __init__(self, value, value_type=None):
super(Value, self).__init__(value_type if value_type else self.__class__.__name__)
self.value = value
class IntegerValue(Value):
BINARY = 2
DECIMAL = 10
HEXADECIMAL = 16
def __init__(self, value, base=DECIMAL):
super(IntegerValue, self).__init__(value)
self.base = base
class BooleanValue(Value):
def __init__(self, value):
super(BooleanValue, self).__init__(value)
class FloatValue(Value):
def __init__(self, value):
super(FloatValue, self).__init__(value)
class DoubleValue(Value):
def __init__(self, value):
super(DoubleValue, self).__init__(value)
class StringValue(Value):
def __init__(self, value):
super(StringValue, self).__init__(value)
class Enumeration(ComplexType):
def __init__(self, name, enumerators=None, extends=None, flags=None, comments=None):
super(Enumeration, self).__init__(comments=comments)
self.name = name
self.enumerators = enumerators if enumerators else OrderedDict()
self.extends = extends
self.reference = None
self.flags = flags if flags else [] # Unused
class Enumerator(object):
def __init__(self, name, value=None, comments=None):
self.name = name
self.value = value
self.comments = comments if comments else OrderedDict()
class Struct(ComplexType):
def __init__(self, name, fields=None, extends=None, flags=None, comments=None):
super(Struct, self).__init__(comments=comments)
self.name = name
self.fields = fields if fields else OrderedDict()
self.extends = extends
self.reference = None
self.flags = flags if flags else []
class StructField(object):
def __init__(self, name, field_type, comments=None):
self.name = name
self.type = field_type
self.comments = comments if comments else OrderedDict()
class Array(ComplexType):
def __init__(self, name, element_type, comments=None):
super(Array, self).__init__(comments=comments)
self.name = name # None for implicit arrays.
self.type = element_type
class Map(ComplexType):
def __init__(self, name, key_type, value_type, comments=None):
super(Map, self).__init__(comments=comments)
self.name = name
self.key_type = key_type
self.value_type = value_type
class Constant(ComplexType):
def __init__(self, name, element_type, element_value, comments=None):
super(Constant, self).__init__(comments=comments)
self.name = name
self.type = element_type
self.value = element_value
class Reference(Type):
def __init__(self, name):
super(Reference, self).__init__()
self.name = name
self.reference = None
class Interface(Namespace):
def __init__(self, name, flags=None, members=None, extends=None, comments=None):
super(Interface, self).__init__(name=name, flags=flags, members=None, comments=comments)
self.attributes = OrderedDict()
self.methods = OrderedDict()
self.broadcasts = OrderedDict()
self.extends = extends
self.reference = None
if members:
for member in members:
self._add_member(member)
def __contains__(self, name):
if not isinstance(name, str):
raise TypeError
res = super(Interface, self).__contains__(name) or \
name in self.attributes or \
name in self.methods or \
name in self.broadcasts
return res
def __getitem__(self, name):
if not isinstance(name, str):
raise TypeError
elif name in self.attributes:
return self.attributes[name]
elif name in self.methods:
return self.methods[name]
elif name in self.broadcasts:
return self.broadcasts[name]
else:
return super(Interface, self).__getitem__(name)
def _add_member(self, member):
if isinstance(member, Type):
if member.name in self:
raise ASTException(
"Duplicate namespace member '{}'.".format(member.name))
if isinstance(member, Attribute):
self.attributes[member.name] = member
# Handle anonymous array special case.
if isinstance(member.type, Array):
member.type.namespace = self
elif isinstance(member, Method):
self.methods[member.name] = member
# Handle anonymous array special case.
for arg in member.in_args.values():
if isinstance(arg.type, Array):
arg.type.namespace = self
for arg in member.out_args.values():
if isinstance(arg.type, Array):
arg.type.namespace = self
elif isinstance(member, Broadcast):
self.broadcasts[member.name] = member
# Handle anonymous array special case.
for arg in member.out_args.values():
if isinstance(arg.type, Array):
arg.type.namespace = self
else:
super(Interface, self)._add_member(member)
member.namespace = self
else:
super(Interface, self)._add_member(member)
class Version(object):
def __init__(self, major, minor):
self.major = major
self.minor = minor
def __str__(self):
return "{}.{}".format(self.major, self.minor)
class Attribute(Type):
def __init__(self, name, attr_type, flags=None, comments=None):
super(Attribute, self).__init__(name, comments)
self.type = attr_type
self.flags = flags if flags else []
class Method(Type):
def __init__(self, name, flags=None,
in_args=None, out_args=None, errors=None, comments=None):
super(Method, self).__init__(name, comments)
self.flags = flags if flags else []
self.in_args = in_args if in_args else OrderedDict()
self.out_args = out_args if out_args else OrderedDict()
# Errors can be an OrderedDict() or a Reference to an enumeration.
self.errors = errors if errors else OrderedDict()
class Broadcast(Type):
def __init__(self, name, flags=None, out_args=None, comments=None):
super(Broadcast, self).__init__(name, comments)
self.flags = flags if flags else []
self.out_args = out_args if out_args else OrderedDict()
class Argument(object):
def __init__(self, name, arg_type, comments=None):
self.name = name
self.type = arg_type
self.comments = comments if comments else OrderedDict()
|
sampathweb/game_app
|
card_games/test/test_blackjack.py
|
#!/usr/bin/env python
"""
Test code for blackjack game. Tests can be run with py.test or nosetests
"""
from __future__ import print_function
from unittest import TestCase
from card_games import blackjack
from card_games.blackjack import BlackJack
print(blackjack.__file__)
class TestRule(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
mygame = BlackJack()
self.assertEqual(len(mygame.player_hand), 2) # Initial hand for Player
self.assertEqual(len(mygame.dealer_hand), 2) # Initial hand for Dealer
def test_player_bust(self):
mygame = BlackJack()
for cnt in range(10): # Draw 10 cards - Sure to loose
mygame.draw_card_player()
self.assertEqual(len(mygame.player_hand), 12) # Twelve cards in Player's hand
self.assertEqual(mygame.game_result(), 'bust') # Definitely a bust
|
WyllVern/AutomaBot
|
setup.py
|
"""Automabot bot for Discord."""
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name='automabot',
version='0.0.1.dev20170604', # see PEP-0440
python_requires='>=3.6',
author='Maël Pedretti & Chea Dany',
author_email='mael.pedretti@he-arc.ch & dany.chea@he-arc.ch',
url='https://github.com/73VW/AutomaBot',
license='https://opensource.org/licenses/BSD-3-Clause',
description=__doc__,
long_description=long_description,
packages=find_packages(exclude=('contrib', 'docs', 'tests')),
keywords='discord asyncio bot',
classifiers=(
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Education',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Home Automation'
),
install_requires=(
'discord.py>=0.16.8',
'aiohttp>=1.0.0,<1.1.0',
'pyfiglet>=0.7.5',
'toml>=0.9.2'
),
extras_require={
'fast': ('cchardet', 'aiodns'), # making it faster (recommended)
'qa': ('flake8', 'isort', 'pycodestyle', 'pydocstyle', 'rstcheck'),
'docs': ('Sphinx>=1.6.0', 'sphinxcontrib-trio')
},
)
|
unbracketed/tipi
|
tipi/commands/base.py
|
"""
Base classes for writing management commands (named commands which can
be executed through ``tipi.py``).
"""
import os
import sys
from ConfigParser import ConfigParser
from optparse import make_option, OptionParser
from virtualenv import resolve_interpreter
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``tipi.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbose', action='store', dest='verbose', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
make_option('-p', '--python',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
#TODO syntax coloring support
#def __init__(self):
# #self.style = color_style()
# try:
# home = os.getenv('USERPROFILE') or os.getenv('HOME')
# config = ConfigParser(open(os.path.join(home, '.tipirc')))
# except IOError:
# pass
# except:
# pass
#
# self._interpreter = resolve_interpreter('python')
#
#@property
#def python_interpreter(self):
# return self._interpreter
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
#TODO placeholder
return (0, 1, 0,)
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=str(self.get_version()),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested, then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command. If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
try:
#output = self.handle(*args, **options)
print self.handle(*args, **options)
#if output:
# print output
except CommandError, e:
#sys.stderr.write(self.style.ERROR(str('Error: %s\n' % e)))
sys.stderr.write(str('Error: %s\n' % e))
sys.exit(1)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
#class AppCommand(BaseCommand):
# """
# A management command which takes one or more installed application
# names as arguments, and does something with each of them.
#
# Rather than implementing ``handle()``, subclasses must implement
# ``handle_app()``, which will be called once for each application.
#
# """
# args = '<appname appname ...>'
#
# def handle(self, *app_labels, **options):
# from django.db import models
# if not app_labels:
# raise CommandError('Enter at least one appname.')
# try:
# app_list = [models.get_app(app_label) for app_label in app_labels]
# except (ImproperlyConfigured, ImportError), e:
# raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
# output = []
# for app in app_list:
# app_output = self.handle_app(app, **options)
# if app_output:
# output.append(app_output)
# return '\n'.join(output)
#
# def handle_app(self, app, **options):
# """
# Perform the command's actions for ``app``, which will be the
# Python module corresponding to an application name given on
# the command line.
#
# """
# raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
#class NoArgsCommand(BaseCommand):
# """
# A command which takes no arguments on the command line.
#
# Rather than implementing ``handle()``, subclasses must implement
# ``handle_noargs()``; ``handle()`` itself is overridden to ensure
# no arguments are passed to the command.
#
# Attempting to pass arguments will raise ``CommandError``.
#
# """
# args = ''
#
# def handle(self, *args, **options):
# if args:
# raise CommandError("Command doesn't accept any arguments")
# return self.handle_noargs(**options)
#
# def handle_noargs(self, **options):
# """
# Perform this command's actions.
#
# """
# raise NotImplementedError()
#def copy_helper(style, app_or_project, name, directory, other_name=''):
# """
# Copies either a Django application layout template or a Django project
# layout template into the specified directory.
#
# """
# # style -- A color style object (see django.core.management.color).
# # app_or_project -- The string 'app' or 'project'.
# # name -- The name of the application or project.
# # directory -- The directory to which the layout template should be copied.
# # other_name -- When copying an application layout, this should be the name
# # of the project.
# import re
# import shutil
# other = {'project': 'app', 'app': 'project'}[app_or_project]
# if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
# # Provide a smart error message, depending on the error.
# if not re.search(r'^[_a-zA-Z]', name):
# message = 'make sure the name begins with a letter or underscore'
# else:
# message = 'use only numbers, letters and underscores'
# raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
# top_dir = os.path.join(directory, name)
# try:
# os.mkdir(top_dir)
# except OSError, e:
# raise CommandError(e)
#
# # Determine where the app or project templates are. Use
# # django.__path__[0] because we don't know into which directory
# # django has been installed.
# template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
#
# for d, subdirs, files in os.walk(template_dir):
# relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
# if relative_dir:
# os.mkdir(os.path.join(top_dir, relative_dir))
# for i, subdir in enumerate(subdirs):
# if subdir.startswith('.'):
# del subdirs[i]
# for f in files:
# if not f.endswith('.py'):
# # Ignore .pyc, .pyo, .py.class etc, as they cause various
# # breakages.
# continue
# path_old = os.path.join(d, f)
# path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
# fp_old = open(path_old, 'r')
# fp_new = open(path_new, 'w')
# fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
# fp_old.close()
# fp_new.close()
# try:
# shutil.copymode(path_old, path_new)
# _make_writeable(path_new)
# except OSError:
# sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
#
#def _make_writeable(filename):
# """
# Make sure that the file is writeable. Useful if our source is
# read-only.
#
# """
# import stat
# if sys.platform.startswith('java'):
# # On Jython there is no os.access()
# return
# if not os.access(filename, os.W_OK):
# st = os.stat(filename)
# new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
# os.chmod(filename, new_permissions)
|
adamtiger/NNSharp
|
PythonUtils/LSTM.py
|
from keras.models import Sequential
from keras.layers import LSTM
import numpy as np
model = Sequential()
ly = LSTM(2, activation='tanh', recurrent_activation='relu',implementation = 1, stateful=False, batch_input_shape=(5, 3, 3))
model.add(ly)
model.compile(optimizer='sgd', loss='mse')
kernel = np.ones((3, 8))
rec_kernel = np.ones((2, 8))
bias = np.array([1, 2, -1, 0, 3, 4, 5, -2])/10
k = 0
for h in range(0, 3):
for w in range(0, 8):
k += 1
kernel[h, w] = (k % 5 - 2)/10
k = 0
for h in range(0, 2):
for w in range(0, 8):
k += 1
rec_kernel[h, w] = (k % 5 - 2)/10
parameters = [kernel, rec_kernel, bias]
model.set_weights(parameters)
data = np.ndarray((5, 3, 3))
l = 0
for b in range(0, 5):
for h in range(0, 3):
for c in range(0, 3):
l += 1
data[b, h, c] = (l % 5 + 1)/10
output = model.predict(data, batch_size=5) # the batch_size has no impact on the result here
print(output)
print(model.summary())
print(model.get_config())
print(model.get_weights())
|
DayGitH/Python-Challenges
|
DailyProgrammer/DP20150401B.py
|
"""
[2015-04-01] Challenge #208 [Intermediate] ASCII Gradient Generator
https://www.reddit.com/r/dailyprogrammer/comments/3104wu/20150401_challenge_208_intermediate_ascii/
# [](#IntermediateIcon) _(Intermediate)_: ASCII Gradient Generator
A linear colour gradient is where an image transitions through a range of colours, [like
this](http://i.imgur.com/IPwnI8X.png). A gradient doesn't need to be directly horizontal or vertical - it can be
[diagonal](http://i.imgur.com/D4trkEk.png) too, or only be [longer or shorter](http://i.imgur.com/8CHx95i.png) than
usual. It can also cycle through [as many colours as you like](http://i.imgur.com/Br3xwXM.png).
A radial colour gradient is a similar concept, except the colours move [radially outwards like
this](http://i.imgur.com/C6SE6m3.png), rather than linearly across. Radial gradients can also be in [different
positions or with different colours](http://i.imgur.com/S19EOu3.png).
To describe a gradient, you need two things - the colours in it, and its location. Describing the location of a radial
gradient is easy: for a radial gradient [like this](http://i.imgur.com/dTvfj7f.png), you only need to know the center
of the gradient (the red dot), and the radius from the center at which the gradient finishes (`r`). To locate a linear
gradient [like this](http://i.imgur.com/kyZTQnK.png), you need to know two points - the start (red) and end (green)
location. The gradient colours run perpendicular to the line joining the start and end points.
Today, we won't be dealing with colours. Instead, we'll be dealing with characters on the screen. You'll accept the
parameters of a gradient, and you'll output the displayed gradient.
# Formal Inputs and Outputs
## Input Description
You will first accept the size of the output display, as a width and height in characters, like this:
40 30
This corresponds to a grid 40 across and 30 down, like this:
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
........................................
The grid follows **screen space**, so the **top-left** corner is position **(0, 0)**.
Next, you will accept the characters that make up the gradient 'colours', from start to finish (or from inside to
outside, for a radial gradient), like this: (note the space at the start)
.,:;xX&@
Any points outside the gradient will have the first/last character, depending on which side of the gradient they're on.
After this, you will accept the parameters of the gradient. This may take one of two forms:
* For a **radial** gradient, the next line will look like this:
`radial x y r`
Where **(`x`, `y`)** is the center of the gradient, and **`r`** is the radius of the gradient, both in pixels.
* For a **linear** gradient, the next line will look like this:
`linear x1 y1 x2 y2`
Where **(`x1`, `y1`)** is the start point of the gradient, and **(`x2`, `y2`)** is the end point of the gradient,
both in pixel measure.
## Output Description
You are to display the given gradient on a grid with the given size, like this:
@@@@@@@@@@@&&&&&XXXXXXXXX&&&&&@@@@@@@@@@
@@@@@@@@@@&&&&XXXXXXXXXXXXX&&&&@@@@@@@@@
@@@@@@@@&&&&XXXXXXxxxxxXXXXXX&&&&@@@@@@@
@@@@@@@&&&&XXXXxxxxxxxxxxxXXXX&&&&@@@@@@
@@@@@@@&&&XXXxxxxxx;;;xxxxxxXXX&&&@@@@@@
@@@@@@&&&XXXxxxx;;;;;;;;;xxxxXXX&&&@@@@@
@@@@@&&&XXXxxx;;;;;;;;;;;;;xxxXXX&&&@@@@
@@@@@&&XXXxxx;;;;:::::::;;;;xxxXXX&&@@@@
@@@@&&&XXxxx;;;:::::::::::;;;xxxXX&&&@@@
@@@@&&XXXxx;;;::::,,,,,::::;;;xxXXX&&@@@
@@@&&&XXxxx;;:::,,,,,,,,,:::;;xxxXX&&&@@
@@@&&XXXxx;;;::,,,,...,,,,::;;;xxXXX&&@@
@@@&&XXXxx;;:::,,.......,,:::;;xxXXX&&@@
@@@&&XXxxx;;::,,,... ...,,,::;;xxxXX&&@@
@@@&&XXxx;;;::,,... ...,,::;;;xxXX&&@@
@@@&&XXxx;;;::,,.. ..,,::;;;xxXX&&@@
@@@&&XXxx;;;::,,... ...,,::;;;xxXX&&@@
@@@&&XXxxx;;::,,,... ...,,,::;;xxxXX&&@@
@@@&&XXXxx;;:::,,.......,,:::;;xxXXX&&@@
@@@&&XXXxx;;;::,,,,...,,,,::;;;xxXXX&&@@
@@@&&&XXxxx;;:::,,,,,,,,,:::;;xxxXX&&&@@
@@@@&&XXXxx;;;::::,,,,,::::;;;xxXXX&&@@@
@@@@&&&XXxxx;;;:::::::::::;;;xxxXX&&&@@@
@@@@@&&XXXxxx;;;;:::::::;;;;xxxXXX&&@@@@
@@@@@&&&XXXxxx;;;;;;;;;;;;;xxxXXX&&&@@@@
@@@@@@&&&XXXxxxx;;;;;;;;;xxxxXXX&&&@@@@@
@@@@@@@&&&XXXxxxxxx;;;xxxxxxXXX&&&@@@@@@
@@@@@@@&&&&XXXXxxxxxxxxxxxXXXX&&&&@@@@@@
@@@@@@@@&&&&XXXXXXxxxxxXXXXXX&&&&@@@@@@@
@@@@@@@@@@&&&&XXXXXXXXXXXXX&&&&@@@@@@@@@
# Sample Inputs and Outputs
## Gradient 1
### Input
40 30
.,:;xX&@
radial 20 15 20
### Output
(shown above, in **Output Description**)
## Gradient 2
Notice how the colours appear in the reverse order, as the end point is to the *left* of the start point.
### Input
60 30
'"^+$
linear 30 30 0 0
### Output
$$$$$$$$$$$++++++++++^^^^^^^^^^""""""""""'''''''''
$$$$$$$$$$++++++++++^^^^^^^^^^""""""""""'''''''''
$$$$$$$$$++++++++++^^^^^^^^^^""""""""""'''''''''
$$$$$$$$++++++++++^^^^^^^^^^""""""""""'''''''''
$$$$$$$++++++++++^^^^^^^^^^""""""""""'''''''''
$$$$$$++++++++++^^^^^^^^^^""""""""""'''''''''
$$$$$++++++++++^^^^^^^^^^""""""""""'''''''''
$$$$++++++++++^^^^^^^^^^""""""""""'''''''''
$$$++++++++++^^^^^^^^^^""""""""""'''''''''
$$++++++++++^^^^^^^^^^""""""""""'''''''''
$++++++++++^^^^^^^^^^""""""""""'''''''''
++++++++++^^^^^^^^^^""""""""""'''''''''
+++++++++^^^^^^^^^^""""""""""'''''''''
++++++++^^^^^^^^^^""""""""""'''''''''
+++++++^^^^^^^^^^""""""""""'''''''''
++++++^^^^^^^^^^""""""""""'''''''''
+++++^^^^^^^^^^""""""""""'''''''''
++++^^^^^^^^^^""""""""""'''''''''
+++^^^^^^^^^^""""""""""'''''''''
++^^^^^^^^^^""""""""""'''''''''
+^^^^^^^^^^""""""""""'''''''''
^^^^^^^^^^""""""""""'''''''''
^^^^^^^^^""""""""""'''''''''
^^^^^^^^""""""""""'''''''''
^^^^^^^""""""""""'''''''''
^^^^^^""""""""""'''''''''
^^^^^""""""""""'''''''''
^^^^""""""""""'''''''''
^^^""""""""""'''''''''
^^""""""""""'''''''''
## Gradient 3
The gradient start/end/centre points don't have to be inside the grid!
### Input
40 40
aaabcccdeeefggg
radial -10 20 60
### Output
ccccccccccdddddeeeeeeeeeeeeeeeffffgggggg
cccccccccccdddddeeeeeeeeeeeeeefffffggggg
ccccccccccccdddddeeeeeeeeeeeeeeffffggggg
cccccccccccccdddddeeeeeeeeeeeeeffffggggg
cccccccccccccdddddeeeeeeeeeeeeefffffgggg
ccccccccccccccdddddeeeeeeeeeeeeeffffgggg
cccccccccccccccddddeeeeeeeeeeeeeffffgggg
cccccccccccccccdddddeeeeeeeeeeeeeffffggg
bcccccccccccccccddddeeeeeeeeeeeeeffffggg
bbccccccccccccccdddddeeeeeeeeeeeeffffggg
bbbccccccccccccccddddeeeeeeeeeeeeffffggg
bbbbcccccccccccccddddeeeeeeeeeeeeeffffgg
bbbbcccccccccccccddddeeeeeeeeeeeeeffffgg
bbbbbcccccccccccccddddeeeeeeeeeeeeffffgg
abbbbcccccccccccccddddeeeeeeeeeeeeffffgg
abbbbbccccccccccccddddeeeeeeeeeeeeffffgg
aabbbbccccccccccccddddeeeeeeeeeeeeffffgg
aabbbbccccccccccccddddeeeeeeeeeeeeffffgg
aabbbbccccccccccccddddeeeeeeeeeeeeffffgg
aabbbbccccccccccccddddeeeeeeeeeeeeffffgg
aabbbbccccccccccccddddeeeeeeeeeeeeffffgg
aabbbbccccccccccccddddeeeeeeeeeeeeffffgg
aabbbbccccccccccccddddeeeeeeeeeeeeffffgg
aabbbbccccccccccccddddeeeeeeeeeeeeffffgg
aabbbbccccccccccccddddeeeeeeeeeeeeffffgg
abbbbbccccccccccccddddeeeeeeeeeeeeffffgg
abbbbcccccccccccccddddeeeeeeeeeeeeffffgg
bbbbbcccccccccccccddddeeeeeeeeeeeeffffgg
bbbbcccccccccccccddddeeeeeeeeeeeeeffffgg
bbbbcccccccccccccddddeeeeeeeeeeeeeffffgg
bbbccccccccccccccddddeeeeeeeeeeeeffffggg
bbccccccccccccccdddddeeeeeeeeeeeeffffggg
bcccccccccccccccddddeeeeeeeeeeeeeffffggg
cccccccccccccccdddddeeeeeeeeeeeeeffffggg
cccccccccccccccddddeeeeeeeeeeeeeffffgggg
ccccccccccccccdddddeeeeeeeeeeeeeffffgggg
cccccccccccccdddddeeeeeeeeeeeeefffffgggg
cccccccccccccdddddeeeeeeeeeeeeeffffggggg
ccccccccccccdddddeeeeeeeeeeeeeeffffggggg
cccccccccccdddddeeeeeeeeeeeeeefffffggggg
# Notes
Got any cool challenge ideas? Submit them to /r/DailyProgrammer_Ideas!
"""
def main():
pass
if __name__ == "__main__":
main()
|
convexengineering/gplibrary
|
gpkitmodels/SP/aircraft/tail/tail_boom_flex.py
|
" tail boom flexibility "
from numpy import pi
from gpkit import Model, parse_variables, SignomialsEnabled
class TailBoomFlexibility(Model):
""" Tail Boom Flexibility Model
Variables
---------
Fne [-] tail boom flexibility factor
deda [-] wing downwash derivative
SMcorr 0.55 [-] corrected static margin
sph1 [-] flexibility helper variable 1
sph2 [-] flexibility helper variable 2
LaTex Strings
-------------
Fne F_{\mathrm{NE}}
deda d\\epsilon/d\\alpha
SMcorr SM_{\\mathrm{corr}}
"""
@parse_variables(__doc__, globals())
def setup(self, htail, hbending, wing):
mh = htail.mh
mw = wing.mw
Vh = htail.Vh
th = hbending.th
CLhmin = htail.CLhmin
CLwmax = wing.planform.CLmax
Sw = wing.planform.S
bw = wing.planform.b
lh = htail.lh
CM = wing.planform.CM
constraints = [
Fne >= 1 + mh*th,
sph1*(mw*Fne/mh/Vh) + deda <= 1,
sph2 <= Vh*CLhmin/CLwmax,
# (sph1 + sph2).mono_lower_bound({"sph1": .48, "sph2": .52}) >= (
# SMcorr + wing["C_M"]/wing["C_{L_{max}}"]),
deda >= mw*Sw/bw/4/pi/lh]
with SignomialsEnabled():
constraints.extend([sph1 + sph2 >= SMcorr + CM/CLwmax])
return constraints
|
MacHu-GWU/elementary_math-project
|
start-a-project/init_project.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script can generate automate scripts for open source python project.
Scroll to ``if __name__ == "__main__":`` for more info.
"""
from __future__ import print_function
import sys
import datetime
from os import walk, mkdir
from os.path import join, abspath, dirname, basename
def write(s, path, encoding="utf-8"):
"""Write string to text file.
"""
with open(path, "wb") as f:
f.write(s.encode(encoding))
def read(path, encoding="utf-8"):
"""Read string from text file.
"""
with open(path, "rb") as f:
return f.read().decode(encoding)
def initiate_project(
package_name,
repo_name,
python_version,
github_username,
author_name,
author_email,
maintainer_name,
maintainer_email,
year,
s3_bucket,
):
"""
Generate project start files.
"""
print("Initate '%s-project' from template ..." % package_name)
template_dir = join(dirname(abspath(__file__)), "template")
output_dir = join(dirname(abspath(__file__)), "%s-project" % package_name)
for src_dir, dir_list, file_list in walk(template_dir):
# destination directory
dst_dir = src_dir.replace(template_dir, output_dir, 1)
if basename(dst_dir) == "__package__":
dst_dir = join(dirname(dst_dir), package_name)
# make destination directory
try:
print(" Create '%s' ..." % dst_dir)
mkdir(dst_dir)
except:
pass
# files
for filename in file_list:
src = join(src_dir, filename)
dst = join(dst_dir, filename)
content = read(src).\
replace("{{ package_name }}", package_name).\
replace("{{ repo_name }}", repo_name).\
replace("{{ python_version }}", python_version).\
replace("{{ github_username }}", github_username).\
replace("{{ author_name }}", author_name).\
replace("{{ author_email }}", author_email).\
replace("{{ maintainer_name }}", maintainer_name).\
replace("{{ maintainer_email }}", maintainer_email).\
replace("{{ year }}", year).\
replace("{{ s3_bucket }}", s3_bucket)
print(" Create '%s' ..." % dst)
write(content, dst)
print(" Complete!")
if __name__ == "__main__":
# --- EDIT THESE VARIABLE based on your own situation ---
package_name = "picage" # IMPORTANT
repo_name = "{package_name}-project".format(package_name=package_name)
python_version = "python%s%s" % (
sys.version_info.major, sys.version_info.minor)
github_username = "MacHu-GWU" # IMPORTANT
author_name = "Sanhe Hu" # IMPORTANT
author_email = "husanhe@gmail.com" # IMPORTANT
maintainer_name = author_name
maintainer_email = author_email
year = str(datetime.datetime.utcnow().year)
s3_bucket = "www.wbh-doc.com" # IMPORTANT
initiate_project(
package_name,
repo_name,
python_version,
github_username,
author_name,
author_email,
maintainer_name,
maintainer_email,
year,
s3_bucket,
)
|
cgomezfandino/Project_PTX
|
API_Connection_Oanda/PTX_oandaInfo.py
|
from configparser import ConfigParser
import v20
# Create an object config
config = ConfigParser()
# Read the config
config.read("../API_Connection_Oanda/pyalgo.cfg")
ctx = v20.Context(
'api-fxpractice.oanda.com',
443,
True,
application = 'sample_code',
token = config['oanda_v20']['access_token'],
datetime_format = 'RFC3339')
# class oanda_info():
def get_Id_Account():
response = ctx.account.list()
# Ask for the Oanda ID Account
accounts = response.get('accounts')
# Show the ID
for account in accounts:
# account('Account: %s' %account)
print account
def get_instruments():
response = ctx.account.instruments(
config['oanda_v20']['account_id'])
instruments = response.get('instruments')
# instruments[0].dict()
for instrument in instruments:
ins = instrument.dict()
print('%20s | %10s' % (ins['displayName'],
ins['name']))
|
mtskelton/huawei-4g-stats
|
stats/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Stat',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True)),
('up', models.BigIntegerField()),
('down', models.BigIntegerField()),
('live_time', models.BigIntegerField()),
],
options={
},
bases=(models.Model,),
),
]
|
ggilestro/majordomo
|
listeners/pipe.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# pipe.py
#
# Copyright 2014 Giorgio Gilestro <gg@kozak>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Listen from pipefile
# e.g.: echo "TEST COMMAND" > /tmp/pipefile
import os, tempfile
import logging
import threading
class pipe():
def __init__(self, pipefile, queue, actions):
"""
Reads from a pipe
"""
self.pipefile = pipefile
self.queue = queue
actions["pipe"] = {}
self.__makefifo()
self.listening_thread = threading.Thread(target=self.listen_from_pipe)
#self.listening_thread.daemon = True
self.isListening = True
self.listening_thread.start()
def transmit(self, received):
"""
"""
cmd = ("pipe", received)
self.queue.put(cmd)
def __makefifo(self):
"""
"""
try:
os.mkfifo(self.pipefile)
logging.debug("Listening to FIFO Pipe at %s" % self.pipefile)
return True
except:
logging.debug("Error creating FIFO Pipe %s. File already existing?" % self.pipefile)
return False
def listen_from_pipe(self):
"""
"""
while self.isListening:
logging.debug("Listening from PIPE %s" % self.pipefile)
with open(self.pipefile) as fifo:
self.transmit(fifo.read().strip())
if __name__ == '__main__':
p = pipe("pipefile", "none")
|
azer/jsbuild
|
jsbuild/manifest.py
|
from jsbuild.attrdict import AttrDict
from time import strftime
class Manifest(AttrDict):
def __init__(self,*args,**kwargs):
super(AttrDict, self).__init__(*args,**kwargs)
self._buffer_ = None
self._parent_ = None
if not self.__contains__('_dict_'):
self['_dict_'] = {}
self['_dict_']['timestamp'] = int(strftime("%Y%m%d%H%M"))
def __getitem__(self,name):
item = super(Manifest,self).__getitem__(name)
if isinstance(item,Manifest) and not item._parent_:
item._parent_ = self
elif isinstance(item,str):
root = self
while root._parent_: root = root._parent_
item = item%root._dict_
return item
|
Tunous/StringSheet
|
setup.py
|
import os
from io import open
from setuptools import setup
about = {}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'stringsheet', '__init__.py'), encoding='utf-8') as f:
for line in f:
if line.startswith('__'):
(key, value) = line.split('=')
about[key.strip()] = value.strip().strip('\'')
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__description__'],
long_description=readme,
author=about['__author__'],
author_email=about['__author_email__'],
url=about['__url__'],
license=about['__license__'],
packages=['stringsheet'],
install_requires=[
'httplib2',
'apiclient',
'lxml',
'google-api-python-client'
],
entry_points={
'console_scripts': [
'stringsheet = stringsheet.cli:main'
]
}
)
|
jdevesa/gists
|
gists/gists.py
|
# Copyright (c) 2012 <Jaume Devesa (jaumedevesa@gmail.com)>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
gists.gists
~~~~~~~~~~~
This single-function module defines the input parameters and the subparsers,
and coordinates the 'handlers'->'actions'->'formatters' execution workflow
"""
import argparse
from actions import (list_gists, show, get, post, delete, update, authorize,
fork, star, unstar)
from handlers import (handle_list, handle_show, handle_update,
handle_authorize, handle_get, handle_post, handle_delete,
handle_fork, handle_star)
from formatters import (format_list, format_post, format_update,
format_get, format_show, format_delete,
format_authorize, format_star)
from version import VERSION
USER_MSG = ("github username. Use this user instead of the defined one in "
"the configuration file. If action demands authentication, a "
"password request will be prompt")
GIST_ID_MSG = ("identifier of the Gist. Execute 'gists list' to know Gists "
"identifiers")
def run(*args, **kwargs):
# Initialize argument's parser
description = 'Manage Github gists from CLI'
parser = argparse.ArgumentParser(description=description,
epilog="Happy Gisting!")
# Define subparsers to handle each action
subparsers = parser.add_subparsers(help="Available commands.")
# Add the subparsers
__add_list_parser(subparsers)
__add_show_parser(subparsers)
__add_get_parser(subparsers)
__add_create_parser(subparsers)
__add_update_parser(subparsers)
__add_delete_parser(subparsers)
__add_authorize_parser(subparsers)
__add_version_parser(subparsers)
__add_fork_parser(subparsers)
__add_star_parser(subparsers)
__add_unstar_parser(subparsers)
# Parse the arguments
args = parser.parse_args()
# Calling the handle_args function defined, parsing the args and return
# and object with the needed values to execute the function
parameters = args.handle_args(args)
# Passing the 'parameters' object as array of parameters
result = args.func(*parameters)
# Parsing the 'result' object to be output formatted.
# (that must be a single object)
result_formatted = args.formatter(result)
# Print the formatted output
print result_formatted
def __add_list_parser(subparsers):
""" Define the subparser to handle the 'list' functionality.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the list of gists
parser_list = subparsers.add_parser("list", help="list a user's Gists")
parser_list.add_argument("-u", "--user", help=USER_MSG)
group1 = parser_list.add_mutually_exclusive_group()
group1.add_argument("-p", "--private", help="""return the private gists
besides the public ones. Needs authentication""",
action="store_true")
group1.add_argument("-s", "--starred", help="""return ONLY the starred
gists. Needs authentication""", action="store_true")
parser_list.set_defaults(handle_args=handle_list,
func=list_gists, formatter=format_list)
def __add_show_parser(subparsers):
""" Define the subparser to handle with the 'show' functionallity.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the 'show' action
parser_show = subparsers.add_parser("show", help="""show a Gist. Shows
Gist metadata by default.
With '-f' (--filename) option, shows
the content of one of the Gist files
""")
parser_show.add_argument("gist_id", help=GIST_ID_MSG)
parser_show.add_argument("-f", "--filename", help="gist file to show")
parser_show.set_defaults(handle_args=handle_show, func=show,
formatter=format_show)
def __add_get_parser(subparsers):
""" Define the subparser to handle the 'get' functionality.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the 'get' action
parser_get = subparsers.add_parser("get", help="""download a single gist
file. If the gist has just a single
file, argument '-f' (--filename) is not
needed""")
parser_get.add_argument("gist_id", help=GIST_ID_MSG)
parser_get.add_argument("-f", "--filename", help="file to download")
parser_get.add_argument("-o", "--output_dir", help="destination directory",
default=".")
parser_get.set_defaults(handle_args=handle_get, func=get,
formatter=format_get)
def __add_create_parser(subparsers):
""" Define the subparser to handle the 'create' functionality.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the 'create' action
parser_post = subparsers.add_parser("create", help="""create a new gist.
Needs authentication""")
parser_post.add_argument("-u", "--user", help=USER_MSG)
parser_post.add_argument("-f", "--filenames", nargs='+', help="""specify
files to upload with Gist creation""",
required=True)
parser_post.add_argument("-p", "--private", help="""private Gist? ('false'
by default)""", action="store_true")
parser_post.add_argument("-i", "--input_dir", help="""input directory where
the source files are""")
parser_post.add_argument("-d", "--description", help="""description for
the Gist to create""")
parser_post.set_defaults(handle_args=handle_post, func=post,
formatter=format_post)
def __add_update_parser(subparsers):
""" Define the subparser to handle the 'update' functionality.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the 'update' action
parser_update = subparsers.add_parser("update", help="""update a gist.
Needs authentication""")
parser_update.add_argument("gist_id", help=GIST_ID_MSG)
parser_update.add_argument("-u", "--user", help=USER_MSG)
group1 = parser_update.add_argument_group("file options",
"update Gist files")
group1.add_argument("-f", "--filenames", nargs='+',
help="Gist files to update")
group11 = group1.add_mutually_exclusive_group()
group11.add_argument("-n", "--new", action="store_true", help="""files
supplied are new for the Gist. '-f' (--filenames)
argument needed""",
default=False)
group11.add_argument("-r", "--remove", action="store_true",
help="""files supplied will be removed from the Gist.
'-f' (--filenames) argument needed""", default=False)
group1.add_argument("-i", "--input_dir", help="""directory where the files
are. Current directory by default""")
group2 = parser_update.add_argument_group('metadata options',
"update Gist metadata")
group2.add_argument("-d", "--description", help="update Gist description")
parser_update.set_defaults(handle_args=handle_update, func=update,
formatter=format_update)
def __add_delete_parser(subparsers):
""" Define the subparser to handle the 'delete' functionality.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the 'delete' action
parser_delete = subparsers.add_parser("delete", help="""delete a Gist.
Needs authentication""")
parser_delete.add_argument("gist_id", help=GIST_ID_MSG)
parser_delete.add_argument("-u", "--user", help=USER_MSG)
parser_delete.set_defaults(handle_args=handle_delete, func=delete,
formatter=format_delete)
def __add_authorize_parser(subparsers):
""" Define the subparser to handle the 'authorize' functionallity.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the 'authorize' action.
parser_authorize = subparsers.add_parser("authorize", help="""authorize
this project in github""")
parser_authorize.add_argument("-u", "--user", help="""your github user
. Needed to generate the auth token. """,
required=True)
parser_authorize.set_defaults(handle_args=handle_authorize, func=authorize,
formatter=format_authorize)
def __add_version_parser(subparsers):
""" Define the subparser to handle 'version' functionallity.
:param subparsers: the subparser entity
"""
parser_version = subparsers.add_parser("version", help="""print the version
of the release""")
parser_version.set_defaults(handle_args=lambda x: (None,),
func=lambda x: None,
formatter=lambda x: VERSION)
def __add_fork_parser(subparsers):
""" Define the subparser to handle 'fork' functionallity.
:param subparsers: the subparser entity
"""
parser_fork = subparsers.add_parser("fork", help="""fork another users'
Gists""")
parser_fork.add_argument("gist_id", help=GIST_ID_MSG)
parser_fork.add_argument("-u", "--user", help=USER_MSG)
parser_fork.set_defaults(handle_args=handle_fork, func=fork,
formatter=format_post)
def __add_star_parser(subparsers):
""" Define the subparser to handle 'star' functionallity.
:param subparsers: the subparser entity
"""
parser_star = subparsers.add_parser("star", help="star a Gist")
parser_star.add_argument("gist_id", help=GIST_ID_MSG)
parser_star.add_argument("-u", "--user", help=USER_MSG)
parser_star.set_defaults(handle_args=handle_star, func=star,
formatter=format_star)
def __add_unstar_parser(subparsers):
""" Define the subparser to handle 'unstar' functionallity.
:param subparsers: the subparser entity
"""
parser_unstar = subparsers.add_parser("unstar", help="unstar a Gist")
parser_unstar.add_argument("gist_id", help=GIST_ID_MSG)
parser_unstar.add_argument("-u", "--user", help=USER_MSG)
parser_unstar.set_defaults(handle_args=handle_star, func=unstar,
formatter=format_star)
|
lycheng/leetcode
|
tests/test_linked_list.py
|
# -*- coding: utf-8 -*-
import unittest
from linked_list import (delete_node, list_cycle, remove_elements,
reverse_list)
from public import ListNode
class TestLinkedList(unittest.TestCase):
def test_delete_node(self):
so = delete_node.Solution()
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
so.deleteNode(head.next)
self.assertEqual(head.next.val, 3)
def test_has_cycle(self):
so = list_cycle.Solution()
self.assertFalse(so.hasCycle(None))
head = ListNode(1)
self.assertFalse(so.hasCycle(head))
head.next = head
self.assertTrue(so.hasCycle(head))
head.next = ListNode(2)
head.next.next = ListNode(3)
self.assertFalse(so.hasCycle(head))
head.next.next.next = head
self.assertTrue(so.hasCycle(head))
def test_detect_cycle(self):
so = list_cycle.Solution()
head = ListNode(1)
self.assertFalse(so.detectCycle(head))
self.assertFalse(so.detectCycle(None))
head.next = ListNode(2)
self.assertFalse(so.detectCycle(head))
cross = ListNode(3)
head.next.next = cross
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
head.next.next.next.next.next = cross
self.assertEqual(so.detectCycle(head), cross)
def test_remove_elements(self):
so = remove_elements.Solution()
self.assertFalse(so.removeElements(None, 0))
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(2)
head.next.next.next = ListNode(3)
head.next.next.next.next = ListNode(4)
head = so.removeElements(head, 1)
self.assertEqual(head.val, 2)
head = so.removeElements(head, 2)
self.assertEqual(head.val, 3)
head = so.removeElements(head, 4)
self.assertFalse(head.next)
def test_reverse_linked_list(self):
so = reverse_list.Solution()
self.assertFalse(so.reverseList_iteratively(None))
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
self.assertEqual(so.reverseList_iteratively(head).val, 3)
self.assertFalse(so.reverseList_recursively(None))
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
self.assertEqual(so.reverseList_recursively(head).val, 3)
|
pascalgutjahr/Praktikum-1
|
Fourier/rechteck.py
|
import numpy as np
import uncertainties.unumpy as unp
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.optimize import curve_fit
plt.rcParams['figure.figsize'] = (12, 8)
plt.rcParams['font.size'] = 13
plt.rcParams['lines.linewidth'] = 1
csfont = {'fontname': 'Times New Roman'}
# U_mess = b_n
n, b_n = np.genfromtxt('rechteck.txt', unpack=True, skip_header=2)
x = np.log(n)
y = np.log(b_n)
def f(x, a, b):
return a * x + b
params, covariance = curve_fit(f, x, y)
errors = np.sqrt(np.diag(covariance))
print('a =', params[0], '+-', errors[0])
print('b =', params[1], '+-', errors[1])
# a =-1.19204784746 +- 0.0898910034039
# b = 0.326461420388 +- 0.123423011824
# mit ungeraden Oberwellen:
# a = -0.909247906044 +- 0.0770070259187
# b = 0.409244475522 +- 0.144772240047
x_plot = np.linspace(min(x), max(x))
plt.plot(x_plot, f(x_plot, *params), 'b-', label='linearer Fit')
plt.plot(x, y, 'rx', label='Messwerte')
plt.ylabel(r'$\mathrm{log(b_n)}$')
plt.xlabel(r'$\mathrm{log(n)}$')
# plt.title('Messungen')
plt.grid()
plt.legend()
plt.tight_layout()
plt.savefig('bilder/rechteck.pdf')
plt.show()
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/virtual_network_gateway_connection_list_entity_py3.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class VirtualNetworkGatewayConnectionListEntity(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param authorization_key: The authorizationKey.
:type authorization_key: str
:param virtual_network_gateway1: Required. The reference to virtual
network gateway resource.
:type virtual_network_gateway1:
~azure.mgmt.network.v2017_11_01.models.VirtualNetworkConnectionGatewayReference
:param virtual_network_gateway2: The reference to virtual network gateway
resource.
:type virtual_network_gateway2:
~azure.mgmt.network.v2017_11_01.models.VirtualNetworkConnectionGatewayReference
:param local_network_gateway2: The reference to local network gateway
resource.
:type local_network_gateway2:
~azure.mgmt.network.v2017_11_01.models.VirtualNetworkConnectionGatewayReference
:param connection_type: Required. Gateway connection type. Possible values
are: 'Ipsec','Vnet2Vnet','ExpressRoute', and 'VPNClient. Possible values
include: 'IPsec', 'Vnet2Vnet', 'ExpressRoute', 'VPNClient'
:type connection_type: str or
~azure.mgmt.network.v2017_11_01.models.VirtualNetworkGatewayConnectionType
:param routing_weight: The routing weight.
:type routing_weight: int
:param shared_key: The IPSec shared key.
:type shared_key: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values are 'Unknown', 'Connecting', 'Connected' and
'NotConnected'. Possible values include: 'Unknown', 'Connecting',
'Connected', 'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2017_11_01.models.VirtualNetworkGatewayConnectionStatus
:ivar tunnel_connection_status: Collection of all tunnels' connection
health status.
:vartype tunnel_connection_status:
list[~azure.mgmt.network.v2017_11_01.models.TunnelConnectionHealth]
:ivar egress_bytes_transferred: The egress bytes transferred in this
connection.
:vartype egress_bytes_transferred: long
:ivar ingress_bytes_transferred: The ingress bytes transferred in this
connection.
:vartype ingress_bytes_transferred: long
:param peer: The reference to peerings resource.
:type peer: ~azure.mgmt.network.v2017_11_01.models.SubResource
:param enable_bgp: EnableBgp flag
:type enable_bgp: bool
:param use_policy_based_traffic_selectors: Enable policy-based traffic
selectors.
:type use_policy_based_traffic_selectors: bool
:param ipsec_policies: The IPSec Policies to be considered by this
connection.
:type ipsec_policies:
list[~azure.mgmt.network.v2017_11_01.models.IpsecPolicy]
:param resource_guid: The resource GUID property of the
VirtualNetworkGatewayConnection resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
VirtualNetworkGatewayConnection resource. Possible values are: 'Updating',
'Deleting', and 'Failed'.
:vartype provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'virtual_network_gateway1': {'required': True},
'connection_type': {'required': True},
'connection_status': {'readonly': True},
'tunnel_connection_status': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'authorization_key': {'key': 'properties.authorizationKey', 'type': 'str'},
'virtual_network_gateway1': {'key': 'properties.virtualNetworkGateway1', 'type': 'VirtualNetworkConnectionGatewayReference'},
'virtual_network_gateway2': {'key': 'properties.virtualNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'local_network_gateway2': {'key': 'properties.localNetworkGateway2', 'type': 'VirtualNetworkConnectionGatewayReference'},
'connection_type': {'key': 'properties.connectionType', 'type': 'str'},
'routing_weight': {'key': 'properties.routingWeight', 'type': 'int'},
'shared_key': {'key': 'properties.sharedKey', 'type': 'str'},
'connection_status': {'key': 'properties.connectionStatus', 'type': 'str'},
'tunnel_connection_status': {'key': 'properties.tunnelConnectionStatus', 'type': '[TunnelConnectionHealth]'},
'egress_bytes_transferred': {'key': 'properties.egressBytesTransferred', 'type': 'long'},
'ingress_bytes_transferred': {'key': 'properties.ingressBytesTransferred', 'type': 'long'},
'peer': {'key': 'properties.peer', 'type': 'SubResource'},
'enable_bgp': {'key': 'properties.enableBgp', 'type': 'bool'},
'use_policy_based_traffic_selectors': {'key': 'properties.usePolicyBasedTrafficSelectors', 'type': 'bool'},
'ipsec_policies': {'key': 'properties.ipsecPolicies', 'type': '[IpsecPolicy]'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, virtual_network_gateway1, connection_type, id: str=None, location: str=None, tags=None, authorization_key: str=None, virtual_network_gateway2=None, local_network_gateway2=None, routing_weight: int=None, shared_key: str=None, peer=None, enable_bgp: bool=None, use_policy_based_traffic_selectors: bool=None, ipsec_policies=None, resource_guid: str=None, etag: str=None, **kwargs) -> None:
super(VirtualNetworkGatewayConnectionListEntity, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.authorization_key = authorization_key
self.virtual_network_gateway1 = virtual_network_gateway1
self.virtual_network_gateway2 = virtual_network_gateway2
self.local_network_gateway2 = local_network_gateway2
self.connection_type = connection_type
self.routing_weight = routing_weight
self.shared_key = shared_key
self.connection_status = None
self.tunnel_connection_status = None
self.egress_bytes_transferred = None
self.ingress_bytes_transferred = None
self.peer = peer
self.enable_bgp = enable_bgp
self.use_policy_based_traffic_selectors = use_policy_based_traffic_selectors
self.ipsec_policies = ipsec_policies
self.resource_guid = resource_guid
self.provisioning_state = None
self.etag = etag
|
1065865483/0python_script
|
test/imag_test.py
|
# -*- coding: utf-8 -*-
# python+selenium识别验证码
#
import re
import requests
import pytesseract
from selenium import webdriver
from PIL import Image,Image
import time
#
driver = webdriver.Chrome()
driver.maximize_window()
driver.get("https://higo.flycua.com/hp/html/login.html")
driver.implicitly_wait(30)
# 下面用户名和密码涉及到我个人信息,所以隐藏
driver.find_element_by_name('memberId').send_keys('xxxxxx')
driver.find_element_by_name('password').send_keys('xxxxxx')
# 因为验证码不能一次就正确识别,我加了循环,一直识别,直到登录成功
while True:
# 清空验证码输入框,因为可能已经识别过一次了,里面有之前识别的错的验证码
driver.find_element_by_name("verificationCode").clear()
# 截图或验证码图片保存地址
screenImg = "H:\screenImg.png"
# 浏览器页面截屏
driver.get_screenshot_as_file(screenImg)
# 定位验证码位置及大小
location = driver.find_element_by_name('authImage').location
size = driver.find_element_by_name('authImage').size
# 下面四行我都在后面加了数字,理论上是不用加的,但是不加我这截的不是验证码那一块的图,可以看保存的截图,根据截图修改截图位置
left = location['x'] + 530
top = location['y'] + 175
right = location['x'] + size['width'] + 553
bottom = location['y'] + size['height'] + 200
# 从文件读取截图,截取验证码位置再次保存
img = Image.open(screenImg).crop((left, top, right, bottom))
# 下面对图片做了一些处理,能更好识别一些,相关处理再百度看吧
img = img.convert('RGBA') # 转换模式:L | RGB
img = img.convert('L') # 转换模式:L | RGB
img = Image.Contrast(img) # 增强对比度
img = img.enhance(2.0) # 增加饱和度
img.save(screenImg)
# 再次读取识别验证码
img = Image.open(screenImg)
code = pytesseract.image_to_string(img)
# 打印识别的验证码
# print(code.strip())
# 识别出来验证码去特殊符号,用到了正则表达式,这是我第一次用,之前也没研究过,所以用的可能粗糙,请见谅
b = ''
for i in code.strip():
pattern = re.compile(r'[a-zA-Z0-9]')
m = pattern.search(i)
if m != None:
b += i
# 输出去特殊符号以后的验证码
print(b)
# 把b的值输入验证码输入框
driver.find_element_by_name("verificationCode").send_keys(b)
# 点击登录按钮
driver.find_element_by_class_name('login-form-btn-submit').click()
# 定时等待5秒,如果验证码识别错误,提示验证码错误需要等一会儿才能继续操作
time.sleep(5)
# 获取cookie,并把cookie转化为字符串格式
cookie1 = str(driver.get_cookies())
print(cookie1)
# 第二次用正则表达式,同样有点粗糙,代码实现的功能就是看cookie里是否有tokenId这个词,如果有说明登录成功,跳出循环,可以进行后面的自动化操作,如果没有,则表示登录失败,继续识别验证码
matchObj = re.search(r'tokenId', cookie1, re.M | re.I)
if matchObj:
print(matchObj.group())
break
else:
print("No match!!")
print('结束')
|
alphagov/backdropsend
|
backdropsend/argumentsparser.py
|
import argparse
import select
def no_piped_input(arguments):
inputs_ready, _, _ = select.select([arguments.file], [], [], 0)
return not bool(inputs_ready)
def parse_args(args, input):
parser = argparse.ArgumentParser()
parser.add_argument('--url', help="URL of the target data-set",
required=True)
parser.add_argument('--token', help="Bearer token for the target data-set",
required=True)
parser.add_argument('--timeout', help="Request timeout. Default: 5 seconds",
required=False, default=5, type=float)
parser.add_argument('--attempts', help="Number of times to attempt sending data. Default: 3",
required=False, default=3, type=int)
parser.add_argument('--failfast', help="Don't retry sending data",
required=False, default=False, action='store_true')
parser.add_argument('--sleep', help=argparse.SUPPRESS,
required=False, default=3, type=int)
parser.add_argument('file', help="File containing JSON to send", nargs='?',
type=argparse.FileType('r'),
default=input)
arguments = parser.parse_args(args)
if arguments.failfast:
arguments.attempts = 1
if no_piped_input(arguments):
parser.error("No input provided")
return arguments
|
giantas/minor-python-tests
|
Operate List/operate_list.py
|
# Define a function sum() and a function multiply()
# that sums and multiplies (respectively) all the numbers in a list of numbers.
# For example, sum([1, 2, 3, 4]) should return 10,
# and multiply([1, 2, 3, 4]) should return 24.
def check_list(num_list):
"""Check if input is list"""
if num_list is None:
return False
if len(num_list) == 0:
return False
new_list = []
for i in num_list:
if i!='[' and i!=']' and i!=',':
new_list.append(i)
for x in new_list:
if type(x) != int:
return False
return True
def sum(num_list):
"""Compute sum of list values"""
if check_list(num_list):
final_sum = 0
for i in num_list:
final_sum = final_sum + i
return final_sum
else:
return False
def multiply(num_list):
"""Multiply list values"""
if check_list(num_list):
final_sum = 1
for i in num_list:
final_sum = final_sum * i
return final_sum
else:
return False
def main():
get_list = input("Enter list: ")
operations = [sum, multiply]
print map(lambda x: x(get_list), operations)
if __name__ == "__main__":
main()
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/IPython/core/completerlib.py
|
# encoding: utf-8
"""Implementations for various useful completers.
These are all loaded by default by IPython.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import glob
import inspect
import os
import re
import sys
try:
# Python >= 3.3
from importlib.machinery import all_suffixes
_suffixes = all_suffixes()
except ImportError:
from imp import get_suffixes
_suffixes = [ s[0] for s in get_suffixes() ]
# Third-party imports
from time import time
from zipimport import zipimporter
# Our own imports
from IPython.core.completer import expand_user, compress_user
from IPython.core.error import TryNext
from IPython.utils._process_common import arg_split
from IPython.utils.py3compat import string_types
# FIXME: this should be pulled in with the right call via the component system
from IPython import get_ipython
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
# Time in seconds after which the rootmodules will be stored permanently in the
# ipython ip.db database (kept in the user's .ipython dir).
TIMEOUT_STORAGE = 2
# Time in seconds after which we give up
TIMEOUT_GIVEUP = 20
# Regular expression for the python import statement
import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
r'(?P<package>[/\\]__init__)?'
r'(?P<suffix>%s)$' %
r'|'.join(re.escape(s) for s in _suffixes))
# RE for the ipython %run command (python + ipython scripts)
magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
def module_list(path):
"""
Return the list containing the names of the modules available in the given
folder.
"""
# sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
if path == '':
path = '.'
# A few local constants to be used in loops below
pjoin = os.path.join
if os.path.isdir(path):
# Build a list of all files in the directory and all files
# in its subdirectories. For performance reasons, do not
# recurse more than one level into subdirectories.
files = []
for root, dirs, nondirs in os.walk(path, followlinks=True):
subdir = root[len(path)+1:]
if subdir:
files.extend(pjoin(subdir, f) for f in nondirs)
dirs[:] = [] # Do not recurse into additional subdirectories.
else:
files.extend(nondirs)
else:
try:
files = list(zipimporter(path)._files.keys())
except:
files = []
# Build a list of modules which match the import_re regex.
modules = []
for f in files:
m = import_re.match(f)
if m:
modules.append(m.group('name'))
return list(set(modules))
def get_root_modules():
"""
Returns a list containing the names of all the modules available in the
folders of the pythonpath.
ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
"""
ip = get_ipython()
rootmodules_cache = ip.db.get('rootmodules_cache', {})
rootmodules = list(sys.builtin_module_names)
start_time = time()
store = False
for path in sys.path:
try:
modules = rootmodules_cache[path]
except KeyError:
modules = module_list(path)
try:
modules.remove('__init__')
except ValueError:
pass
if path not in ('', '.'): # cwd modules should not be cached
rootmodules_cache[path] = modules
if time() - start_time > TIMEOUT_STORAGE and not store:
store = True
print("\nCaching the list of root modules, please wait!")
print("(This will only be done once - type '%rehashx' to "
"reset cache!)\n")
sys.stdout.flush()
if time() - start_time > TIMEOUT_GIVEUP:
print("This is taking too long, we give up.\n")
return []
rootmodules.extend(modules)
if store:
ip.db['rootmodules_cache'] = rootmodules_cache
rootmodules = list(set(rootmodules))
return rootmodules
def is_importable(module, attr, only_modules):
if only_modules:
return inspect.ismodule(getattr(module, attr))
else:
return not(attr[:2] == '__' and attr[-2:] == '__')
def try_import(mod, only_modules=False):
try:
m = __import__(mod)
except:
return []
mods = mod.split('.')
for module in mods[1:]:
m = getattr(m, module)
m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
completions = []
if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
completions.extend( [attr for attr in dir(m) if
is_importable(m, attr, only_modules)])
completions.extend(getattr(m, '__all__', []))
if m_is_init:
completions.extend(module_list(os.path.dirname(m.__file__)))
completions = {c for c in completions if isinstance(c, string_types)}
completions.discard('__init__')
return list(completions)
#-----------------------------------------------------------------------------
# Completion-related functions.
#-----------------------------------------------------------------------------
def quick_completer(cmd, completions):
""" Easily create a trivial completer for a command.
Takes either a list of completions, or all completions in string (that will
be split on whitespace).
Example::
[d:\ipython]|1> import ipy_completers
[d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
[d:\ipython]|3> foo b<TAB>
bar baz
[d:\ipython]|3> foo ba
"""
if isinstance(completions, string_types):
completions = completions.split()
def do_complete(self, event):
return completions
get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
def module_completion(line):
"""
Returns a list containing the completion possibilities for an import line.
The line looks like this :
'import xml.d'
'from xml.dom import'
"""
words = line.split(' ')
nwords = len(words)
# from whatever <tab> -> 'import '
if nwords == 3 and words[0] == 'from':
return ['import ']
# 'from xy<tab>' or 'import xy<tab>'
if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}) :
if nwords == 1:
return get_root_modules()
mod = words[1].split('.')
if len(mod) < 2:
return get_root_modules()
completion_list = try_import('.'.join(mod[:-1]), True)
return ['.'.join(mod[:-1] + [el]) for el in completion_list]
# 'from xyz import abc<tab>'
if nwords >= 3 and words[0] == 'from':
mod = words[1]
return try_import(mod)
#-----------------------------------------------------------------------------
# Completers
#-----------------------------------------------------------------------------
# These all have the func(self, event) signature to be used as custom
# completers
def module_completer(self,event):
"""Give completions after user has typed 'import ...' or 'from ...'"""
# This works in all versions of python. While 2.5 has
# pkgutil.walk_packages(), that particular routine is fairly dangerous,
# since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
# of possibly problematic side effects.
# This search the folders in the sys.path for available modules.
return module_completion(event.line)
# FIXME: there's a lot of logic common to the run, cd and builtin file
# completers, that is currently reimplemented in each.
def magic_run_completer(self, event):
"""Complete files that end in .py or .ipy or .ipynb for the %run command.
"""
comps = arg_split(event.line, strict=False)
# relpath should be the current token that we need to complete.
if (len(comps) > 1) and (not event.line.endswith(' ')):
relpath = comps[-1].strip("'\"")
else:
relpath = ''
#print("\nev=", event) # dbg
#print("rp=", relpath) # dbg
#print('comps=', comps) # dbg
lglob = glob.glob
isdir = os.path.isdir
relpath, tilde_expand, tilde_val = expand_user(relpath)
# Find if the user has already typed the first filename, after which we
# should complete on all files, since after the first one other files may
# be arguments to the input script.
if any(magic_run_re.match(c) for c in comps):
matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
for f in lglob(relpath+'*')]
else:
dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
pys = [f.replace('\\','/')
for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
matches = dirs + pys
#print('run comp:', dirs+pys) # dbg
return [compress_user(p, tilde_expand, tilde_val) for p in matches]
def cd_completer(self, event):
"""Completer function for cd, which only returns directories."""
ip = get_ipython()
relpath = event.symbol
#print(event) # dbg
if event.line.endswith('-b') or ' -b ' in event.line:
# return only bookmark completions
bkms = self.db.get('bookmarks', None)
if bkms:
return bkms.keys()
else:
return []
if event.symbol == '-':
width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
# jump in directory history by number
fmt = '-%0' + width_dh +'d [%s]'
ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
if len(ents) > 1:
return ents
return []
if event.symbol.startswith('--'):
return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
# Expand ~ in path and normalize directory separators.
relpath, tilde_expand, tilde_val = expand_user(relpath)
relpath = relpath.replace('\\','/')
found = []
for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
if os.path.isdir(f)]:
if ' ' in d:
# we don't want to deal with any of that, complex code
# for this is elsewhere
raise TryNext
found.append(d)
if not found:
if os.path.isdir(relpath):
return [compress_user(relpath, tilde_expand, tilde_val)]
# if no completions so far, try bookmarks
bks = self.db.get('bookmarks',{})
bkmatches = [s for s in bks if s.startswith(event.symbol)]
if bkmatches:
return bkmatches
raise TryNext
return [compress_user(p, tilde_expand, tilde_val) for p in found]
def reset_completer(self, event):
"A completer for %reset magic"
return '-f -s in out array dhist'.split()
|
osmanbaskaya/text-entail
|
run/entail_utils.py
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
"""
Some utility functions for entailment project
"""
from collections import defaultdict as dd
from metrics import *
def get_eval_metric(metric_name):
if metric_name == "jaccard":
return jaccard_index
elif metric_name == "1":
return entail_score1
elif metric_name == "2":
return entail_score2
elif metric_name == "3":
return entail_score3
def get_test_pairs(test_pairs):
pairs = []
for line in open(test_pairs):
w1, w2, tag = line.split()
pairs.append((w1, w2, tag))
return pairs
def get_contexts_above_threshold(test_set, subs_file, threshold):
words = dd(set)
for line_num, line in enumerate(subs_file):
line = line.split()
#tw = line[0]
for i in xrange(1, len(line)-1, 2):
word = line[i]
if word in test_set:
prob = float(line[i+1])
if prob >= threshold:
words[word].add(line_num)
return words, line_num + 1
|
Tianyi94/EC601Project_Somatic-Parkour-Game-based-on-OpenCV
|
Old Code/ControlPart/FaceDetection+BackgroundReduction.py
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
face_cascade = cv2.CascadeClassifier('/home/tianyiz/user/601project/c/haarcascade_frontalface_alt.xml')
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#Background reduce
fgmask = fgbg.apply(img)
cv2.imshow('Reduce',fgmask)
for (x,y,w,h) in faces:
print(x,y,w,h)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
Fogapod/VKBot
|
bot/plugins/plugin_stop.py
|
# coding:utf8
class Plugin(object):
__doc__ = '''Плагин предназначен для остановки бота.
Для использования необходимо иметь уровень доступа {protection} или выше
Ключевые слова: [{keywords}]
Использование: {keyword}
Пример: {keyword}'''
name = 'stop'
keywords = (u'стоп', name, '!')
protection = 3
argument_required = False
def respond(self, msg, rsp, utils, *args, **kwargs):
utils.stop_bot()
rsp.text = u'Завершаю работу. Удачного времени суток!'
return rsp
|
IgniparousTempest/py-minutiae-viewer
|
pyminutiaeviewer/gui_editor.py
|
import math
from pathlib import Path
from tkinter import W, N, E, StringVar, PhotoImage
from tkinter.ttk import Button, Label, LabelFrame
from overrides import overrides
from pyminutiaeviewer.gui_common import NotebookTabBase
from pyminutiaeviewer.minutia import Minutia, MinutiaType
class MinutiaeEditorFrame(NotebookTabBase):
# TODO: I'd like to remove the <minutiae> parameter
def __init__(self, parent, load_fingerprint_func, load_minutiae_func, save_minutiae_file):
super(self.__class__, self).__init__(parent, load_fingerprint_func)
self.root = parent
self.minutiae_count = StringVar()
self._update_minutiae_count()
self.current_minutiae = None
self.load_minutiae_btn = Button(self, text="Load Minutiae", command=load_minutiae_func)
self.load_minutiae_btn.grid(row=1, column=0, sticky=N + W + E)
self.export_minutiae_btn = Button(self, text="Export Minutiae", command=save_minutiae_file)
self.export_minutiae_btn.grid(row=2, column=0, sticky=N + W + E)
self.info_frame = InfoFrame(self, "Info", self.minutiae_count)
self.info_frame.grid(row=3, column=0, padx=4, sticky=N + W + E)
@overrides
def load_fingerprint_image(self, image):
self._update_minutiae_count()
@overrides
def load_minutiae_file(self):
self._update_minutiae_count()
def _update_minutiae_count(self):
self.minutiae_count.set("Minutiae: {}".format(self.root.number_of_minutiae()))
@overrides
def on_canvas_mouse_left_click(self, event):
"""
Adds a new bifurcation at the mouse click.
"""
x, y = event.x, event.y
if not self.root.is_point_in_canvas_image(x, y):
return
self.current_minutiae = ((x, y), MinutiaType.RIDGE_ENDING)
@overrides
def on_canvas_ctrl_mouse_left_click(self, event):
"""
Adds a new ridge ending at the mouse click.
"""
x, y = event.x, event.y
if not self.root.is_point_in_canvas_image(x, y):
return
self.current_minutiae = ((x, y), MinutiaType.BIFURCATION)
@overrides
def on_canvas_mouse_right_click(self, event):
"""
Removes a minutiae close to the mouse click.
"""
x, y = event.x, event.y
if not self.root.is_point_in_canvas_image(x, y):
return
scale_factor = self.root.canvas_image_scale_factor()
x, y = x * scale_factor, y * scale_factor
possible_minutiae = []
for i in range(self.root.number_of_minutiae()):
m = self.root.minutiae[i]
dist = abs(m.x - x) + abs(m.y - y)
if dist < 10:
possible_minutiae.append((dist, i))
# Sort ascending, in-place.
possible_minutiae.sort(key=lambda tup: tup[0])
if len(possible_minutiae) == 0:
return
else:
del self.root.minutiae[possible_minutiae[0][1]]
self.root.draw_minutiae()
self._update_minutiae_count()
@overrides
def on_canvas_mouse_left_drag(self, event):
"""
Sets the angle of the minutiae being placed.
"""
x, y = event.x, event.y
((sx, sy), minutiae_type) = self.current_minutiae
angle = math.degrees(math.atan2(y - sy, x - sx)) + 90
minutia = Minutia(round(sx), round(sy), angle, minutiae_type, 1.0)
self.root.draw_single_minutia(minutia)
@overrides
def on_canvas_mouse_left_release(self, event):
"""
Places the minutiae currently being edited..
"""
x, y = event.x, event.y
scale_factor = self.root.canvas_image_scale_factor()
((px, py), minutiae_type) = self.current_minutiae
angle = math.degrees(math.atan2(y - py, x - px)) + 90
self.root.minutiae.append(Minutia(round(px * scale_factor), round(py * scale_factor), angle, minutiae_type, 1.0))
self.current_minutiae = None
self.root.draw_minutiae()
self._update_minutiae_count()
class InfoFrame(LabelFrame):
def __init__(self, parent, title, minutiae_count):
super(self.__class__, self).__init__(parent, text=title)
self.current_number_minutiae_label = Label(self, textvariable=minutiae_count)
self.current_number_minutiae_label.grid(row=0, column=0, sticky=N + W + E)
self.bifurcation_label = Label(self, text="Bifurcation (LMB):")
self.bifurcation_label.grid(row=1, column=0, sticky=W)
self.bifurcation_image = PhotoImage(file=Path(__file__).resolve().parent / 'images' / 'bifurcation.png')
self.bifurcation_image_label = Label(self, image=self.bifurcation_image)
self.bifurcation_image_label.grid(row=2, column=0, sticky=W)
self.ridge_ending_label = Label(self, text="Ridge Ending (CTRL + LMB):")
self.ridge_ending_label.grid(row=3, column=0, sticky=W)
self.ridge_ending_image = PhotoImage(file=Path(__file__).resolve().parent / 'images' / 'ridge_ending.png')
self.ridge_ending_image_label = Label(self, image=self.ridge_ending_image)
self.ridge_ending_image_label.grid(row=4, column=0, sticky=W)
|
p-morais/rl
|
rl/utils/plotting.py
|
"""This screws up visualize.py"""
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
from torch.autograd import Variable as Var
from torch import Tensor
class RealtimePlot():
def __init__(self, style='ggplot'):
plt.style.use(style)
plt.ion()
self.fig, self.ax = plt.subplots()
self.xlim = 0
self.yvals = []
self.line = Line2D([], [])
self.ax.add_line(self.line)
def config(self, ylabel, xlabel):
self.ax.set_ylabel(ylabel)
self.ax.set_xlabel(xlabel)
self.fig.tight_layout()
def plot(self, y):
self.yvals.append(y)
self.line.set_data(np.arange(len(self.yvals)), self.yvals)
self.ax.relim()
self.ax.autoscale_view()
self.ax.set_xlim(0, self.xlim)
self.xlim += 1
self.fig.canvas.flush_events()
def done(self):
plt.ioff()
plt.show()
def policyplot(env, policy, trj_len):
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
y = np.zeros((trj_len, action_dim))
X = np.zeros((trj_len, obs_dim))
obs = env.reset()
for t in range(trj_len):
X[t, :] = obs
action = policy(Var(Tensor(obs[None, :]))).data.numpy()[0]
y[t, :] = action
obs = env.step(action)[0]
fig, axes = plt.subplots(1, action_dim)
for a in range(action_dim):
axes[a].plot(np.arange(trj_len), y[:, a])
plt.show()
"""
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-relay/tests/test_azure_mgmt_wcfrelay.py
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import time
from msrestazure.azure_exceptions import CloudError
import azure.mgmt.relay.models
from azure.mgmt.relay.models import RelayNamespace, Sku, SkuTier, Relaytype, AuthorizationRule, AccessRights, AccessKeys, WcfRelay, ErrorResponseException, ErrorResponse
from azure.common.credentials import ServicePrincipalCredentials
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
class MgmtWcfRelayTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtWcfRelayTest, self).setUp()
self.relay_client = self.create_mgmt_client(
azure.mgmt.relay.RelayManagementClient
)
@ResourceGroupPreparer()
def test_wcfrelay_curd(self, resource_group, location):
resource_group_name = resource_group.name
#Create a Namespace
namespace_name = "testingpythontestcaseeventhubnamespaceEventhub"
namespaceparameter = RelayNamespace(location, {'tag1': 'value1', 'tag2': 'value2'}, Sku(SkuTier.standard))
creatednamespace = self.relay_client.namespaces.create_or_update(resource_group_name, namespace_name, namespaceparameter).result()
self.assertEqual(creatednamespace.name, namespace_name)
#
# # Get created Namespace
#
getnamespaceresponse = self.relay_client.namespaces.get(resource_group_name, namespace_name)
self.assertEqual(getnamespaceresponse.name, namespace_name)
# Create a WcfRelay
wcfrelay_name = "testingpythontestcasewcfrelay"
wcfrelayparameter = WcfRelay(
relay_type=Relaytype.net_tcp,
requires_client_authorization=True,
requires_transport_security=True,
user_metadata="User data for WcfRelay"
)
createdwcfrelayresponse = self.relay_client.wcf_relays.create_or_update(resource_group_name, namespace_name, wcfrelay_name, wcfrelayparameter)
self.assertEqual(createdwcfrelayresponse.name, wcfrelay_name)
self.assertEqual(createdwcfrelayresponse.requires_client_authorization, True)
#Get the created wcfRelay
geteventhubresponse = self.relay_client.wcf_relays.get(resource_group_name, namespace_name, wcfrelay_name)
self.assertEqual(geteventhubresponse.name, wcfrelay_name)
self.assertEqual(geteventhubresponse.requires_transport_security, True)
self.assertEqual(geteventhubresponse.user_metadata, "User data for WcfRelay")
#Get the List of wcfRealy by namespaces
getlistbynamespacewcfrelayresponse = list(self.relay_client.wcf_relays.list_by_namespace(resource_group_name, namespace_name))
self.assertGreater(len(getlistbynamespacewcfrelayresponse), 0)
# update the Created eventhub
wcfrelayupdateparameter = WcfRelay(
relay_type=Relaytype.net_tcp,
user_metadata="User data for WcfRelay updated"
)
updatewcfrelayresponse = self.relay_client.wcf_relays.create_or_update(resource_group_name, namespace_name,
wcfrelay_name, wcfrelayupdateparameter)
self.assertEqual(updatewcfrelayresponse.name, wcfrelay_name)
self.assertEqual(updatewcfrelayresponse.requires_transport_security, True)
self.assertEqual(updatewcfrelayresponse.requires_client_authorization, True)
self.assertEqual(updatewcfrelayresponse.user_metadata, "User data for WcfRelay updated")
# Create a new authorizationrule
authoRule_name = "testingauthrulepy"
createwcfrelayauthorule = self.relay_client.wcf_relays.create_or_update_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name,[AccessRights('Send'),AccessRights('Listen')])
self.assertEqual(createwcfrelayauthorule.name, authoRule_name, "Authorization rule name not as created - create_or_update_authorization_rule ")
self.assertEqual(len(createwcfrelayauthorule.rights), 2)
# Get the created authorizationrule
getwcfrelayauthorule = self.relay_client.wcf_relays.get_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name)
self.assertEqual(getwcfrelayauthorule.name, authoRule_name, "Authorization rule name not as passed as parameter - get_authorization_rule ")
self.assertEqual(len(getwcfrelayauthorule.rights), 2, "Access rights mis match as created - get_authorization_rule ")
# update the rights of the authorizatiorule
getwcfrelayauthorule.rights.append('Manage')
updatewcfrelayauthorule = self.relay_client.wcf_relays.create_or_update_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name, getwcfrelayauthorule.rights)
self.assertEqual(updatewcfrelayauthorule.name, authoRule_name, "Authorization rule name not as passed as parameter for update call - create_or_update_authorization_rule ")
self.assertEqual(len(updatewcfrelayauthorule.rights), 3, "Access rights mis match as updated - create_or_update_authorization_rule ")
#list all the authorization ruels for the given namespace
wcfrelayauthorulelist = list(self.relay_client.wcf_relays.list_authorization_rules(resource_group_name, namespace_name, wcfrelay_name))
self.assertEqual(len(wcfrelayauthorulelist), 1, "number of authorization rule mismatch with the created + default = 2 - list_authorization_rules")
#List keys for the authorization rule
listkeysauthorizationrule = self.relay_client.wcf_relays.list_keys(resource_group_name, namespace_name, wcfrelay_name, authoRule_name)
self.assertIsNotNone(listkeysauthorizationrule)
# regenerate Keys for authorizationrule - Primary
regenratePrimarykeyauthorizationrule = self.relay_client.wcf_relays.regenerate_keys(resource_group_name, namespace_name, wcfrelay_name, authoRule_name, 'PrimaryKey')
self.assertNotEqual(listkeysauthorizationrule.primary_key,regenratePrimarykeyauthorizationrule.primary_key)
# regenerate Keys for authorizationrule - Primary
regenrateSecondarykeyauthorizationrule = self.relay_client.wcf_relays.regenerate_keys(resource_group_name,namespace_name, wcfrelay_name, authoRule_name, 'SecondaryKey')
self.assertNotEqual(listkeysauthorizationrule.secondary_key, regenrateSecondarykeyauthorizationrule.secondary_key)
# delete the authorizationrule
self.relay_client.wcf_relays.delete_authorization_rule(resource_group_name, namespace_name, wcfrelay_name, authoRule_name)
# Delete the created WcfRelay
getwcfrelayresponse = self.relay_client.wcf_relays.delete(resource_group_name, namespace_name, wcfrelay_name)
# Delete the create namespace
self.relay_client.namespaces.delete(resource_group_name, namespace_name).result()
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
ACBL-Bridge/Bridge-Application
|
Home Files/LoginandSignupV10.py
|
from tkinter import *
import mysql.connector as mysql
from MySQLdb import dbConnect
from HomeOOP import *
import datetime
from PIL import Image, ImageTk
class MainMenu(Frame):
def __init__(self, parent): #The very first screen of the web app
Frame.__init__(self, parent)
w, h = parent.winfo_screenwidth(), parent.winfo_screenheight()
#parent.overrideredirect(1)
parent.geometry("%dx%d+0+0" % (w, h))
frame = Frame(parent, width=w, height=h).place(x=350, y=450)
# frame.pack(expand=True)
# canvas = Canvas(parent, width=w, height=h)
# scale_width = w / 3900
# scale_height = h / 2613
web = "https://raw.githubusercontent.com/ACBL-Bridge/Bridge-Application/master/Login/"
URL = "login_background_resized.jpg"
u = urlopen(web + URL)
raw_data = u.read()
u.close()
im = Image.open(BytesIO(raw_data))
bckgrd = ImageTk.PhotoImage(im)
login_bckgrd = Label(frame, image=bckgrd)
login_bckgrd.image = bckgrd
login_bckgrd.place(x=0, y=0, relwidth=1, relheight=1)
titleLabel = Label(frame, text="LET'S PLAY BRIDGE", fg="black", font='Arial 36')
titleLabel.pack(side="top", pady=150)
loginButton = Button(frame, text="Existing User", fg="blue", font="Arial 14", command=lambda: self.LoginScreen(parent))
loginButton.pack(side='top')
signupButton = Button(frame, text="Sign up", fg="blue", font="Arial 14", command=self.SignupScreen)
signupButton.pack(side="top")
quitButton = Button(frame, text="Quit", font="Arial 14", command=self.SignupScreen)
quitButton.pack(side="top")
####################################Login - GUI ###########################
def LoginScreen(self,parent):
global entry_user
global entry_pass
top = Toplevel(self)
top.title("Log In - ABCL")
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
top.geometry("550x400+%d+%d" % (w/2-275, h/2-125)) #250
#top.configure(background = 'white')
quitButton = Button(top, text="Go Back", font="Arial 14", command= top.destroy).pack(side="bottom", padx=20)
#entry_user = StringVar()
#entry_pass = StringVar()
# Frames to divide the window into three parts.. makes it easier to organize the widgets
topFrame = Frame(top)
topFrame.pack()
middleFrame = Frame(top)
middleFrame.pack(pady=50)
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
# Widgets and which frame they are in
#label = Label(topFrame, text="LET'S PLAY BRIDGE")
userLabel = Label(middleFrame, text='Username:', font="Arial 14")
passLabel = Label(middleFrame, text='Password:', font="Arial 14")
entry_user = Entry(middleFrame) # For DB
entry_pass = Entry(middleFrame, show ='*') # For DB
b = Button(bottomFrame, text="Log In",fg ="blue", font ="Arial 14", command=lambda: get_Login_input(self, parent))
#Location of the Widgets in their frames
#label.pack(side="top", fill="both", expand=True, padx=20, pady=20)
userLabel.grid(row=10, column=0, sticky=W, padx=20)
entry_user.grid(row=10, column=1, padx=20)
passLabel.grid(row=11, column=0, sticky=W, padx=20)
entry_pass.grid(row=11, column=1, padx=20)
b.grid(row=12, columnspan=2)
###############################################DATABASE Check Login!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def go_to_HomePage(user):
root = Tk()
app = Home(root,user)
root.mainloop()
def get_Login_input(self, parent):
var = dbConnect()
dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db)
cur = dbconn.cursor() # Cursor object - required to execute all queries
cur.execute("SELECT username FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get()))
rows = cur.fetchall()
if rows:
cur.execute("SELECT firstname, lastname, username FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get()))
for namerow in cur.fetchall(): # print all the first cell
fn = namerow[0] #store firstname
ln = namerow[1] #store lastname
user = namerow[2]
self.destroy()
parent.destroy()
go_to_HomePage(user)
'''top = Toplevel(self)
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
top.geometry("%dx%d+0+0" % (w, h))
# Frames to divide the window into three parts.. makes it easier to organize the widgets
topFrame = Frame(top)
topFrame.pack()
middleFrame = Frame(top)
middleFrame.pack(pady=250)
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
myProfileButton = Button(middleFrame, text="My Profile", fg="blue", font="Arial 14", command=self.myProfileScreen)
myProfileButton.pack()
quitButton = Button(top, text="Log Out", font="Arial 14", command=top.destroy).pack(side="bottom", padx=20)
#top.title(':D')
#top.geometry('250x200')
#get first name and last name of current player
cur.execute("SELECT firstname, lastname FROM playerinfo WHERE username = '%s' AND password = '%s'" % (entry_user.get(), entry_pass.get()))
for namerow in cur.fetchall(): # print all the first cell
fn = namerow[0] #store firstname
ln = namerow[1] #store lastname
rlb1 = Label(middleFrame, text='\nWelcome %s %s\n' % (fn, ln), font="Arial 14")
rlb1.pack()
rlb2 = Label(middleFrame, text='\nUserName: %s' % entry_user.get(), font="Arial 14")
rlb2.pack()
top.mainloop()
self.destroy()
parent.destroy()
go_to_HomePage()'''
else:
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[!] Invalid Login')
rlbl.pack()
r.mainloop()
dbconn.close()
########################################## SIGN UP SCREEN - GUI ####################################################
def SignupScreen(self):
global entry_fname
global entry_lname
global entry_user
global entry_pass
global entry_repass
global entry_email
global entry_ACBL
global entry_disID
top = Toplevel(self)
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
top.geometry("550x450+%d+%d" % (w / 2 - 275, h / 2 - 140)) # 250
#top.configure(background='white')
quitButton = Button(top, text="Go Back", font="Arial 14", command= top.destroy).pack(side="bottom", padx=20)
#topFrame = Frame(top)
#topFrame.pack()
middleFrame = Frame(top)
middleFrame.pack(pady=50)
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
# Widgets and which frame they are in
#label = Label(topFrame, text="LET'S PLAY BRIDGE")
fnameLabel = Label(middleFrame,text = 'First Name:',font="Arial 14")
lnameLabel = Label(middleFrame, text='Last Name:',font="Arial 14")
userLabel = Label(middleFrame, text='Username:',font="Arial 14")
passLabel = Label(middleFrame, text='Password:',font="Arial 14")
repassLabel = Label(middleFrame, text='Re-Enter Password:',font="Arial 14")
emailLabel = Label(middleFrame, text='Email(optional):',font="Arial 14")
ACBLnumLabel = Label(middleFrame, text='ACBLnum(optional):',font="Arial 14")
disIDLabel = Label(middleFrame, text='DistrictID(optional):',font="Arial 14")
entry_fname = Entry(middleFrame) #For DB
entry_lname = Entry(middleFrame) #For DB
entry_user = Entry(middleFrame)#For DB
entry_pass = Entry(middleFrame, show = '*')#For DB
entry_repass = Entry(middleFrame, show = '*')#For DB
entry_email = Entry(middleFrame)#For DB
entry_ACBL = Entry(middleFrame)#For DB
entry_disID = Entry(middleFrame)#For DB
b = Button(bottomFrame, text="Sign up", font="Arial 14", command=lambda : combined_Functions(self))
# Location of the Widgets in their frames
#label.pack(side="top", fill="both", expand=True, padx=20, pady=20)
fnameLabel.grid(row=1, column=0, sticky=W)
entry_fname.grid(row=1, column=1)
lnameLabel.grid(row=2, column=0, sticky=W)
entry_lname.grid(row=2, column=1)
userLabel.grid(row=3, column=0, sticky=W)
entry_user.grid(row=3, column=1)
passLabel.grid(row=4, column=0, sticky=W)
entry_pass.grid(row=4, column=1)
repassLabel.grid(row=5, column=0, sticky=W)
entry_repass.grid(row=5, column=1)
emailLabel.grid(row=6, column=0, sticky=W)
entry_email.grid(row=6, column=1, padx=20, sticky= W)
ACBLnumLabel.grid(row=7, column=0, sticky=W)
entry_ACBL.grid(row=7, column=1, padx=20)
disIDLabel.grid(row=8, column=0, sticky=W)
entry_disID.grid(row=8, column=1)
b.grid(row=10, columnspan=2)
####################################DATABASE Check if Username is available, check if passwords Match -> if so SIGN UP!!!!!!!!!!!!!!!
def get_Signup_input():
var = dbConnect()
dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db)
cur = dbconn.cursor() # Cursor object - required to execute all queries
cur.execute("SELECT username FROM playerinfo WHERE username = '%s'" % entry_user.get())
rows = cur.fetchall()
if not rows:
# print(userInput + " is available")
if (entry_pass.get() == entry_repass.get()) and (entry_pass.get()!= "") and (entry_repass.get()!= ""):
# print("passwords match, good job brotha")
# INSERT new player ... playerinfo check
todaysdate = datetime.datetime.today().strftime('%Y-%m-%d') # current date
cur.execute("INSERT INTO playerinfo(username, password, signUpDate, firstname, lastname, email, ACLnum, districtID) VALUES('%s','%s','%s','%s','%s','%s','%s','%s')" % (
entry_user.get(), entry_pass.get(), todaysdate, entry_fname.get(), entry_lname.get(), entry_email.get(),entry_ACBL.get(), entry_disID.get()))
#get new player's ID
cur.execute("SELECT ID FROM playerinfo WHERE username='%s'" % entry_user.get())
for namerow in cur.fetchall(): # print all the first cell
idNum = namerow[0] # store ID number
# new player's...playerstats inserted by ID
cur.execute("INSERT INTO playerstats(ID) VALUES('%s')" % idNum)
dbconn.commit() #database commit aka save
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[+] Signed Up!')
rlbl.pack()
r.mainloop()
else:
# print("passwords don't match bruh or are NULL")
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[!] Retype your passwords')
rlbl.pack()
r.mainloop()
else:
r = Tk()
r.title(':D')
r.geometry('150x150')
rlbl = Label(r, text='\n[!] Username Not Available ')
rlbl.pack()
r.mainloop()
dbconn.close()
def go_to_Tutorial():
window = Toplevel()
window.geometry("600x500")
quitButton = Button(window, text="Cancel", font="Arial 14", command= window.destroy).pack(side="bottom", padx=20)
top_Frame = Frame(window)
top_Frame.pack()
tLabel = Label(top_Frame, text="TUTORIAL", font="Arial 36").pack(side="top", fill="both", expand=True, padx=20, pady=20)
def combined_Functions(self): # for the Sign Up button - store data, exits Sign Up screen, goes to Tutorial screen
get_Signup_input()
# top.destroy()
#go_to_Tutorial()
#####################################My Profile - GUI #########################################
def myProfileScreen(self):
top = Toplevel(self)
w, h = top.winfo_screenwidth(), top.winfo_screenheight()
top.overrideredirect(1)
w, h = self.winfo_screenwidth(), self.winfo_screenheight()
top.overrideredirect(1)
top.geometry("%dx%d+0+0" % (w, h))
topFrame = Frame(top)
topFrame.pack()
bottomFrame = Frame(top)
bottomFrame.pack(side=BOTTOM)
rightFrame = Frame(top)
rightFrame.pack(side= RIGHT)
leftFrame = Frame(top)
leftFrame.pack(side=LEFT)
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@DB stuff@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#entry_user.get() //username
var = dbConnect()
dbconn = mysql.connect(host=var.host, user=var.user, password=var.password, db=var.db)
cur = dbconn.cursor() # Cursor object - required to execute all queries
global data
data=[]
# get all info from playerinfo and playerstats using current username
cur.execute(
"SELECT playerinfo.firstname, playerinfo.lastname, playerinfo.username, playerinfo.email, playerinfo.signUpDate, playerinfo.districtID, playerinfo.ACLnum, playerstats.dealsplayed, playerstats.level, playerstats.exp, playerstats.coins, playerstats.tournys FROM playerstats INNER JOIN playerinfo ON playerinfo.ID=playerstats.ID AND playerinfo.username='%s'" % entry_user.get())
for namerow in cur.fetchall(): # print all info
fn = namerow[0] # firstname
ln = namerow[1] # lastname
un = namerow[2] #username
em = namerow[3] # email
sData = namerow[4] # signUpDate
districtID = namerow[5] # District ID
acblNumba = namerow[6] # ACBL Number
dPlay = namerow[7] #deals played
lvl = namerow[8] # level
exp = namerow[9] # experience
coins = namerow[10] # coins
tornys = namerow[11] # tournaments
dbconn.close() #close db connection
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
label = Label(topFrame, text="LET'S PLAY BRIDGE",font =('Coralva', 42)).pack(side="top", fill="both", expand=True)
mpLabel = Label(rightFrame, text='My Profile: ', font = ('Comic Sans MS',24)).grid(ipadx = 200, columnspan = 2)
nameLabel = Label(rightFrame, text="Name: %s %s" % (fn, ln), font = ('Comic Sans MS',14)).grid(row=1, column=0, sticky = W)
userLabel = Label(rightFrame, text='Username: %s' % un, font = ('Comic Sans MS',14)).grid(row=2, column=0, sticky = W)
emailLabel = Label (rightFrame, text='Email: %s' % em, font = ('Comic Sans MS',14)).grid(row=3, column=0, sticky = W)
sLabel = Label(rightFrame, text='Signup Date: %s' %sData, font = ('Comic Sans MS',14)).grid(row=4, column=0, sticky = W)
disIDLabel = Label(rightFrame, text='DistrictID: %s' % districtID , font = ('Comic Sans MS',14)).grid(row=5, column=0, sticky = W)
ACBLnumLabel = Label(rightFrame, text='ACBL #: %s' % acblNumba, font = ('Comic Sans MS',14)).grid(row=6, column=0, sticky = W)
nothing = Label(rightFrame).grid(row=7, column=0)
msLabel= Label(rightFrame, text='My Stats', font = ('Comic Sans MS',14, 'bold')).grid(row=8, column=0, sticky = W)
dpLabel = Label(rightFrame, text='Deals Played: %s' %dPlay, font = ('Comic Sans MS',14)).grid(row=9, column=0, sticky = W)
levelLabel = Label(rightFrame, text='Level: %s' % lvl, font = ('Comic Sans MS',14)).grid(row=10, column=0, sticky = W)
expLabel = Label(rightFrame, text='Experience: %s' % exp, font = ('Comic Sans MS',14)).grid(row=11, column=0, sticky = W)
coinsLabel = Label(rightFrame, text='Coins: %s' % coins, font = ('Comic Sans MS',14)).grid(row=12, column=0, sticky = W)
tourLabel = Label(rightFrame, text='Tournaments: %s' % tornys, font = ('Comic Sans MS',14)).grid(row=13, column=0, sticky = W)
#b = Button(bottomFrame, text="HOME",font = 'Arial 12').pack(side=LEFT) #FIND A IMAGE OF A HOUSE
quitButton = Button(bottomFrame, text="Go Back", command=top.destroy, font = 'Arial 12').pack(side = RIGHT)
root = Tk()
MainMenu(root).pack(fill="both", expand=True)
root.mainloop()
|
devopshq/youtrack
|
youtrack/import_helper.py
|
# -*- coding: utf-8 -*-
from youtrack import YouTrackException
def utf8encode(source):
if isinstance(source, str):
source = source.encode('utf-8')
return source
def _create_custom_field_prototype(connection, cf_type, cf_name, auto_attached=False, additional_params=None):
if additional_params is None:
additional_params = dict([])
field = _get_custom_field(connection, cf_name)
if field is not None:
if field.type != cf_type:
msg = "Custom field with name [ %s ] already exists. It has type [ %s ] instead of [ %s ]" % \
(utf8encode(cf_name), field.type, cf_type)
raise LogicException(msg)
else:
connection.create_custom_field_detailed(cf_name, cf_type, False, True, auto_attached, additional_params)
def _get_custom_field(connection, cf_name):
existing_fields = [item for item in connection.get_custom_fields() if utf8encode(item.name).lower() ==
utf8encode(cf_name).lower()]
if len(existing_fields):
return existing_fields[0]
return None
def create_custom_field(connection, cf_type, cf_name, auto_attached, value_names=None, bundle_policy="0"):
"""
Creates custom field prototype(if not exist) and sets default values bundle if needed
Args:
connection: An opened Connection instance.
cf_type: Type of custom field to be created
cf_name: Name of custom field that should be created (if not exists)
auto_attached: If this field should be auto attached or not.
value_names: Values, that should be attached with this cf by default.
If None, no bundle is created to this field, if empty, empty bundle is created.
bundle_policy: ???
Raises:
LogicException: If custom field already exists, but has wrong type.
YouTrackException: If something is wrong with queries.
"""
if (value_names is None) and (not auto_attached or "[" not in cf_type):
_create_custom_field_prototype(connection, cf_type, cf_name, auto_attached)
return
if value_names is None:
value_names = set([])
else:
value_names = set(value_names)
field = _get_custom_field(connection, cf_name)
if field is not None:
if hasattr(field, "defaultBundle"):
bundle = connection.get_bundle(field.type, field.defaultBundle)
elif field.autoAttached:
return
else:
bundle = create_bundle_safe(connection, cf_name + "_bundle", cf_type)
else:
bundle = create_bundle_safe(connection, cf_name + "_bundle", cf_type)
_create_custom_field_prototype(connection, cf_type, cf_name, auto_attached,
{"defaultBundle": bundle.name,
"attachBundlePolicy": bundle_policy})
for value_name in value_names:
try:
connection.add_value_to_bundle(bundle, value_name)
except YouTrackException:
pass
#
# values_to_add = calculate_missing_value_names(bundle, value_names)
# [connection.addValueToBundle(bundle, name) for name in values_to_add]
# if field is None:
# bundle_name = cf_name + "_bundle"
# _create_bundle_safe(connection, bundle_name, cf_type)
# bundle = connection.getBundle(cf_type, bundle_name)
# values_to_add = calculate_missing_value_names(bundle, value_names)
#
#
# for value in values_to_add:
# connection.addValueToBundle(bundle, value)
#
#
def process_custom_field(connection, project_id, cf_type, cf_name, value_names=None):
"""
Creates custom field and attaches it to the project. If custom field already exists and has type
cf_type it is attached to the project. If it has another type, LogicException is raised. If project field already
exists, uses it and bundle from it. If not, creates project field and bundle with name
<cf_name>_bundle_<project_id> for it.
Adds value_names to bundle.
Args:
connection: An opened Connection instance.
project_id: Id of the project to attach CF to.
cf_type: Type of cf to be created.
cf_name: Name of cf that should be created (if not exists) and attached to the project (if not yet attached)
value_names: Values, that cf must have. If None, does not create any bundle for the field. If empty list,
creates bundle, but does not create any value_names in it. If bundle already contains
some value_names, only value_names that do not already exist are added.
Raises:
LogicException: If custom field already exists, but has wrong type.
YouTrackException: If something is wrong with queries.
"""
_create_custom_field_prototype(connection, cf_type, cf_name)
if cf_type[0:-3] not in connection.bundle_types:
value_names = None
elif value_names is None:
value_names = []
existing_project_fields = [item for item in connection.getProjectCustomFields(project_id) if
utf8encode(item.name) == cf_name]
if len(existing_project_fields):
if value_names is None:
return
bundle = connection.getBundle(cf_type, existing_project_fields[0].bundle)
values_to_add = calculate_missing_value_names(bundle, value_names)
else:
if value_names is None:
connection.createProjectCustomFieldDetailed(project_id, cf_name, "No " + cf_name)
return
bundle = create_bundle_safe(connection, cf_name + "_bundle_" + project_id, cf_type)
values_to_add = calculate_missing_value_names(bundle, value_names)
connection.createProjectCustomFieldDetailed(project_id, cf_name, "No " + cf_name,
params={"bundle": bundle.name})
for name in values_to_add:
connection.addValueToBundle(bundle, bundle.createElement(name))
def add_values_to_bundle_safe(connection, bundle, values):
"""
Adds values to specified bundle. Checks, whether each value already contains in bundle. If yes, it is not added.
Args:
connection: An opened Connection instance.
bundle: Bundle instance to add values in.
values: Values, that should be added in bundle.
Raises:
YouTrackException: if something is wrong with queries.
"""
for value in values:
try:
connection.addValueToBundle(bundle, value)
except YouTrackException as e:
if e.response.status == 409:
print("Value with name [ %s ] already exists in bundle [ %s ]" %
(utf8encode(value.name), utf8encode(bundle.name)))
else:
raise e
def create_bundle_safe(connection, bundle_name, bundle_type):
bundle = connection.bundle_types[bundle_type[0:-3]](None, None)
bundle.name = bundle_name
try:
connection.createBundle(bundle)
except YouTrackException as e:
if e.response.status == 409:
print("Bundle with name [ %s ] already exists" % bundle_name)
else:
raise e
return connection.getBundle(bundle_type, bundle_name)
def calculate_missing_value_names(bundle, value_names):
bundle_elements_names = [elem.name.lower() for elem in bundle.values]
return [value for value in value_names if value.lower() not in bundle_elements_names]
class LogicException(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
|
ianlini/flatten-dict
|
src/flatten_dict/flatten_dict.py
|
import inspect
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
import six
from .reducers import tuple_reducer, path_reducer, dot_reducer, underscore_reducer
from .splitters import tuple_splitter, path_splitter, dot_splitter, underscore_splitter
REDUCER_DICT = {
"tuple": tuple_reducer,
"path": path_reducer,
"dot": dot_reducer,
"underscore": underscore_reducer,
}
SPLITTER_DICT = {
"tuple": tuple_splitter,
"path": path_splitter,
"dot": dot_splitter,
"underscore": underscore_splitter,
}
def flatten(
d,
reducer="tuple",
inverse=False,
max_flatten_depth=None,
enumerate_types=(),
keep_empty_types=(),
):
"""Flatten `Mapping` object.
Parameters
----------
d : dict-like object
The dict that will be flattened.
reducer : {'tuple', 'path', 'underscore', 'dot', Callable}
The key joining method. If a `Callable` is given, the `Callable` will be
used to reduce.
'tuple': The resulting key will be tuple of the original keys.
'path': Use `os.path.join` to join keys.
'underscore': Use underscores to join keys.
'dot': Use dots to join keys.
inverse : bool
Whether you want invert the resulting key and value.
max_flatten_depth : Optional[int]
Maximum depth to merge.
enumerate_types : Sequence[type]
Flatten these types using `enumerate`.
For example, if we set `enumerate_types` to ``(list,)``,
`list` indices become keys: ``{'a': ['b', 'c']}`` -> ``{('a', 0): 'b', ('a', 1): 'c'}``.
keep_empty_types : Sequence[type]
By default, ``flatten({1: 2, 3: {}})`` will give you ``{(1,): 2}``, that is, the key ``3``
will disappear.
This is also applied for the types in `enumerate_types`, that is,
``flatten({1: 2, 3: []}, enumerate_types=(list,))`` will give you ``{(1,): 2}``.
If you want to keep those empty values, you can specify the types in `keep_empty_types`:
>>> flatten({1: 2, 3: {}}, keep_empty_types=(dict,))
{(1,): 2, (3,): {}}
Returns
-------
flat_dict : dict
"""
enumerate_types = tuple(enumerate_types)
flattenable_types = (Mapping,) + enumerate_types
if not isinstance(d, flattenable_types):
raise ValueError(
"argument type %s is not in the flattenalbe types %s"
% (type(d), flattenable_types)
)
# check max_flatten_depth
if max_flatten_depth is not None and max_flatten_depth < 1:
raise ValueError("max_flatten_depth should not be less than 1.")
if isinstance(reducer, str):
reducer = REDUCER_DICT[reducer]
try:
# Python 3
reducer_accepts_parent_obj = len(inspect.signature(reducer).parameters) == 3
except AttributeError:
# Python 2
reducer_accepts_parent_obj = len(inspect.getargspec(reducer)[0]) == 3
flat_dict = {}
def _flatten(_d, depth, parent=None):
key_value_iterable = (
enumerate(_d) if isinstance(_d, enumerate_types) else six.viewitems(_d)
)
has_item = False
for key, value in key_value_iterable:
has_item = True
if reducer_accepts_parent_obj:
flat_key = reducer(parent, key, _d)
else:
flat_key = reducer(parent, key)
if isinstance(value, flattenable_types) and (
max_flatten_depth is None or depth < max_flatten_depth
):
# recursively build the result
has_child = _flatten(value, depth=depth + 1, parent=flat_key)
if has_child or not isinstance(value, keep_empty_types):
# ignore the key in this level because it already has child key
# or its value is empty
continue
# add an item to the result
if inverse:
flat_key, value = value, flat_key
if flat_key in flat_dict:
raise ValueError("duplicated key '{}'".format(flat_key))
flat_dict[flat_key] = value
return has_item
_flatten(d, depth=1)
return flat_dict
def nested_set_dict(d, keys, value):
"""Set a value to a sequence of nested keys.
Parameters
----------
d : Mapping
keys : Sequence[str]
value : Any
"""
assert keys
key = keys[0]
if len(keys) == 1:
if key in d:
raise ValueError("duplicated key '{}'".format(key))
d[key] = value
return
d = d.setdefault(key, {})
nested_set_dict(d, keys[1:], value)
def unflatten(d, splitter="tuple", inverse=False):
"""Unflatten dict-like object.
Parameters
----------
d : dict-like object
The dict that will be unflattened.
splitter : {'tuple', 'path', 'underscore', 'dot', Callable}
The key splitting method. If a Callable is given, the Callable will be
used to split `d`.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use `pathlib.Path.parts` to split keys.
'underscore': Use underscores to split keys.
'dot': Use underscores to split keys.
inverse : bool
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict : dict
"""
if isinstance(splitter, str):
splitter = SPLITTER_DICT[splitter]
unflattened_dict = {}
for flat_key, value in six.viewitems(d):
if inverse:
flat_key, value = value, flat_key
key_tuple = splitter(flat_key)
nested_set_dict(unflattened_dict, key_tuple, value)
return unflattened_dict
|
bumshakabum/Kim_CSCI2270_FinalProject
|
websiteParser.py
|
from IPython.display import HTML
from bs4 import BeautifulSoup
import urllib
f = open('chars.txt', 'w')
r = urllib.urlopen('http://www.eventhubs.com/tiers/ssb4/').read()
soup = BeautifulSoup(r, "lxml")
characters = soup.find_all("td", class_="tierstdnorm")
count = 1
tierCharList=[]
for element in characters:
if count==1:
tier = element.get_text()
elif count==2:
character = element.get_text()
tierChar = tier + "," + character
tierCharList.append(tierChar)
elif count%12==1:
tier = element.get_text()
elif count%12==2:
character = element.get_text()
tierChar = tier + "," + character
tierCharList.append(tierChar)
count+=1
tierCharList.remove(tierCharList[len(tierCharList)-1])
for x in range(0,len(tierCharList)):
f.write(tierCharList[x])
f.write("\n")
|
beetbox/beets
|
beets/util/__init__.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Miscellaneous utility functions."""
import os
import sys
import errno
import locale
import re
import tempfile
import shutil
import fnmatch
import functools
from collections import Counter, namedtuple
from multiprocessing.pool import ThreadPool
import traceback
import subprocess
import platform
import shlex
from beets.util import hidden
from unidecode import unidecode
from enum import Enum
MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = '\\\\?\\'
class HumanReadableException(Exception):
"""An Exception that can include a human-readable error message to
be logged without a traceback. Can preserve a traceback for
debugging purposes as well.
Has at least two fields: `reason`, the underlying exception or a
string describing the problem; and `verb`, the action being
performed during the error.
If `tb` is provided, it is a string containing a traceback for the
associated exception. (Note that this is not necessary in Python 3.x
and should be removed when we make the transition.)
"""
error_kind = 'Error' # Human-readable description of error type.
def __init__(self, reason, verb, tb=None):
self.reason = reason
self.verb = verb
self.tb = tb
super().__init__(self.get_message())
def _gerund(self):
"""Generate a (likely) gerund form of the English verb.
"""
if ' ' in self.verb:
return self.verb
gerund = self.verb[:-1] if self.verb.endswith('e') else self.verb
gerund += 'ing'
return gerund
def _reasonstr(self):
"""Get the reason as a string."""
if isinstance(self.reason, str):
return self.reason
elif isinstance(self.reason, bytes):
return self.reason.decode('utf-8', 'ignore')
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
return self.reason.strerror
else:
return '"{}"'.format(str(self.reason))
def get_message(self):
"""Create the human-readable description of the error, sans
introduction.
"""
raise NotImplementedError
def log(self, logger):
"""Log to the provided `logger` a human-readable message as an
error and a verbose traceback as a debug message.
"""
if self.tb:
logger.debug(self.tb)
logger.error('{0}: {1}', self.error_kind, self.args[0])
class FilesystemError(HumanReadableException):
"""An error that occurred while performing a filesystem manipulation
via a function in this module. The `paths` field is a sequence of
pathnames involved in the operation.
"""
def __init__(self, reason, verb, paths, tb=None):
self.paths = paths
super().__init__(reason, verb, tb)
def get_message(self):
# Use a nicer English phrasing for some specific verbs.
if self.verb in ('move', 'copy', 'rename'):
clause = 'while {} {} to {}'.format(
self._gerund(),
displayable_path(self.paths[0]),
displayable_path(self.paths[1])
)
elif self.verb in ('delete', 'write', 'create', 'read'):
clause = 'while {} {}'.format(
self._gerund(),
displayable_path(self.paths[0])
)
else:
clause = 'during {} of paths {}'.format(
self.verb, ', '.join(displayable_path(p) for p in self.paths)
)
return f'{self._reasonstr()} {clause}'
class MoveOperation(Enum):
"""The file operations that e.g. various move functions can carry out.
"""
MOVE = 0
COPY = 1
LINK = 2
HARDLINK = 3
REFLINK = 4
REFLINK_AUTO = 5
def normpath(path):
"""Provide the canonical form of the path suitable for storing in
the database.
"""
path = syspath(path, prefix=False)
path = os.path.normpath(os.path.abspath(os.path.expanduser(path)))
return bytestring_path(path)
def ancestry(path):
"""Return a list consisting of path's parent directory, its
grandparent, and so on. For instance:
>>> ancestry('/a/b/c')
['/', '/a', '/a/b']
The argument should *not* be the result of a call to `syspath`.
"""
out = []
last_path = None
while path:
path = os.path.dirname(path)
if path == last_path:
break
last_path = path
if path:
# don't yield ''
out.insert(0, path)
return out
def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
"""Like `os.walk`, but yields things in case-insensitive sorted,
breadth-first order. Directory and file names matching any glob
pattern in `ignore` are skipped. If `logger` is provided, then
warning messages are logged there when a directory cannot be listed.
"""
# Make sure the pathes aren't Unicode strings.
path = bytestring_path(path)
ignore = [bytestring_path(i) for i in ignore]
# Get all the directories and files at this level.
try:
contents = os.listdir(syspath(path))
except OSError as exc:
if logger:
logger.warning('could not list directory {}: {}'.format(
displayable_path(path), exc.strerror
))
return
dirs = []
files = []
for base in contents:
base = bytestring_path(base)
# Skip ignored filenames.
skip = False
for pat in ignore:
if fnmatch.fnmatch(base, pat):
if logger:
logger.debug('ignoring {} due to ignore rule {}'.format(
base, pat
))
skip = True
break
if skip:
continue
# Add to output as either a file or a directory.
cur = os.path.join(path, base)
if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden:
if os.path.isdir(syspath(cur)):
dirs.append(base)
else:
files.append(base)
# Sort lists (case-insensitive) and yield the current level.
dirs.sort(key=bytes.lower)
files.sort(key=bytes.lower)
yield (path, dirs, files)
# Recurse into directories.
for base in dirs:
cur = os.path.join(path, base)
# yield from sorted_walk(...)
yield from sorted_walk(cur, ignore, ignore_hidden, logger)
def path_as_posix(path):
"""Return the string representation of the path with forward (/)
slashes.
"""
return path.replace(b'\\', b'/')
def mkdirall(path):
"""Make all the enclosing directories of path (like mkdir -p on the
parent).
"""
for ancestor in ancestry(path):
if not os.path.isdir(syspath(ancestor)):
try:
os.mkdir(syspath(ancestor))
except OSError as exc:
raise FilesystemError(exc, 'create', (ancestor,),
traceback.format_exc())
def fnmatch_all(names, patterns):
"""Determine whether all strings in `names` match at least one of
the `patterns`, which should be shell glob expressions.
"""
for name in names:
matches = False
for pattern in patterns:
matches = fnmatch.fnmatch(name, pattern)
if matches:
break
if not matches:
return False
return True
def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are
empty directories. If path is not contained in root, then nothing is
removed. Glob patterns in clutter are ignored when determining
emptiness. If root is not provided, then only path may be removed
(i.e., no recursive removal).
"""
path = normpath(path)
if root is not None:
root = normpath(root)
ancestors = ancestry(path)
if root is None:
# Only remove the top directory.
ancestors = []
elif root in ancestors:
# Only remove directories below the root.
ancestors = ancestors[ancestors.index(root) + 1:]
else:
# Remove nothing.
return
# Traverse upward from path.
ancestors.append(path)
ancestors.reverse()
for directory in ancestors:
directory = syspath(directory)
if not os.path.exists(directory):
# Directory gone already.
continue
clutter = [bytestring_path(c) for c in clutter]
match_paths = [bytestring_path(d) for d in os.listdir(directory)]
try:
if fnmatch_all(match_paths, clutter):
# Directory contains only clutter (or nothing).
shutil.rmtree(directory)
else:
break
except OSError:
break
def components(path):
"""Return a list of the path components in path. For instance:
>>> components('/a/b/c')
['a', 'b', 'c']
The argument should *not* be the result of a call to `syspath`.
"""
comps = []
ances = ancestry(path)
for anc in ances:
comp = os.path.basename(anc)
if comp:
comps.append(comp)
else: # root
comps.append(anc)
last = os.path.basename(path)
if last:
comps.append(last)
return comps
def arg_encoding():
"""Get the encoding for command-line arguments (and other OS
locale-sensitive strings).
"""
try:
return locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf-8'
def _fsencoding():
"""Get the system's filesystem encoding. On Windows, this is always
UTF-8 (not MBCS).
"""
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
if encoding == 'mbcs':
# On Windows, a broken encoding known to Python as "MBCS" is
# used for the filesystem. However, we only use the Unicode API
# for Windows paths, so the encoding is actually immaterial so
# we can avoid dealing with this nastiness. We arbitrarily
# choose UTF-8.
encoding = 'utf-8'
return encoding
def bytestring_path(path):
"""Given a path, which is either a bytes or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames).
"""
# Pass through bytestrings.
if isinstance(path, bytes):
return path
# On Windows, remove the magic prefix added by `syspath`. This makes
# ``bytestring_path(syspath(X)) == X``, i.e., we can safely
# round-trip through `syspath`.
if os.path.__name__ == 'ntpath' and path.startswith(WINDOWS_MAGIC_PREFIX):
path = path[len(WINDOWS_MAGIC_PREFIX):]
# Try to encode with default encodings, but fall back to utf-8.
try:
return path.encode(_fsencoding())
except (UnicodeError, LookupError):
return path.encode('utf-8')
PATH_SEP = bytestring_path(os.sep)
def displayable_path(path, separator='; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, str):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return str(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf-8', 'ignore')
def syspath(path, prefix=True):
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to Unicode before they are sent to the OS. To disable the magic
prefix on Windows, set `prefix` to False---but only do this if you
*really* know what you're doing.
"""
# Don't do anything if we're not on windows
if os.path.__name__ != 'ntpath':
return path
if not isinstance(path, str):
# Beets currently represents Windows paths internally with UTF-8
# arbitrarily. But earlier versions used MBCS because it is
# reported as the FS encoding by Windows. Try both.
try:
path = path.decode('utf-8')
except UnicodeError:
# The encoding should always be MBCS, Windows' broken
# Unicode representation.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
path = path.decode(encoding, 'replace')
# Add the magic prefix if it isn't already there.
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
if path.startswith('\\\\'):
# UNC path. Final path should look like \\?\UNC\...
path = 'UNC' + path[1:]
path = WINDOWS_MAGIC_PREFIX + path
return path
def samefile(p1, p2):
"""Safer equality for paths."""
if p1 == p2:
return True
return shutil._samefile(syspath(p1), syspath(p2))
def remove(path, soft=True):
"""Remove the file. If `soft`, then no error will be raised if the
file does not exist.
"""
path = syspath(path)
if soft and not os.path.exists(path):
return
try:
os.remove(path)
except OSError as exc:
raise FilesystemError(exc, 'delete', (path,), traceback.format_exc())
def copy(path, dest, replace=False):
"""Copy a plain file. Permissions are not copied. If `dest` already
exists, raises a FilesystemError unless `replace` is True. Has no
effect if `path` is the same as `dest`. Paths are translated to
system paths before the syscall.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if not replace and os.path.exists(dest):
raise FilesystemError('file exists', 'copy', (path, dest))
try:
shutil.copyfile(path, dest)
except OSError as exc:
raise FilesystemError(exc, 'copy', (path, dest),
traceback.format_exc())
def move(path, dest, replace=False):
"""Rename a file. `dest` may not be a directory. If `dest` already
exists, raises an OSError unless `replace` is True. Has no effect if
`path` is the same as `dest`. If the paths are on different
filesystems (or the rename otherwise fails), a copy is attempted
instead, in which case metadata will *not* be preserved. Paths are
translated to system paths.
"""
if os.path.isdir(syspath(path)):
raise FilesystemError(u'source is directory', 'move', (path, dest))
if os.path.isdir(syspath(dest)):
raise FilesystemError(u'destination is directory', 'move',
(path, dest))
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
# First, try renaming the file.
try:
os.replace(syspath(path), syspath(dest))
except OSError:
# Copy the file to a temporary destination.
basename = os.path.basename(bytestring_path(dest))
dirname = os.path.dirname(bytestring_path(dest))
tmp = tempfile.NamedTemporaryFile(
suffix=syspath(b'.beets', prefix=False),
prefix=syspath(b'.' + basename, prefix=False),
dir=syspath(dirname),
delete=False,
)
try:
with open(syspath(path), 'rb') as f:
shutil.copyfileobj(f, tmp)
finally:
tmp.close()
# Move the copied file into place.
try:
os.replace(tmp.name, syspath(dest))
tmp = None
os.remove(syspath(path))
except OSError as exc:
raise FilesystemError(exc, 'move', (path, dest),
traceback.format_exc())
finally:
if tmp is not None:
os.remove(tmp)
def link(path, dest, replace=False):
"""Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
os.symlink(syspath(path), syspath(dest))
except NotImplementedError:
# raised on python >= 3.2 and Windows versions before Vista
raise FilesystemError('OS does not support symbolic links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
# TODO: Windows version checks can be removed for python 3
if hasattr('sys', 'getwindowsversion'):
if sys.getwindowsversion()[0] < 6: # is before Vista
exc = 'OS does not support symbolic links.'
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
def hardlink(path, dest, replace=False):
"""Create a hard link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
os.link(syspath(path), syspath(dest))
except NotImplementedError:
raise FilesystemError('OS does not support hard links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
if exc.errno == errno.EXDEV:
raise FilesystemError('Cannot hard link across devices.'
'link', (path, dest), traceback.format_exc())
else:
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
def reflink(path, dest, replace=False, fallback=False):
"""Create a reflink from `dest` to `path`.
Raise an `OSError` if `dest` already exists, unless `replace` is
True. If `path` == `dest`, then do nothing.
If reflinking fails and `fallback` is enabled, try copying the file
instead. Otherwise, raise an error without trying a plain copy.
May raise an `ImportError` if the `reflink` module is not available.
"""
import reflink as pyreflink
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError('file exists', 'rename', (path, dest))
try:
pyreflink.reflink(path, dest)
except (NotImplementedError, pyreflink.ReflinkImpossibleError):
if fallback:
copy(path, dest, replace)
else:
raise FilesystemError('OS/filesystem does not support reflinks.',
'link', (path, dest), traceback.format_exc())
def unique_path(path):
"""Returns a version of ``path`` that does not exist on the
filesystem. Specifically, if ``path` itself already exists, then
something unique is appended to the path.
"""
if not os.path.exists(syspath(path)):
return path
base, ext = os.path.splitext(path)
match = re.search(br'\.(\d)+$', base)
if match:
num = int(match.group(1))
base = base[:match.start()]
else:
num = 0
while True:
num += 1
suffix = f'.{num}'.encode() + ext
new_path = base + suffix
if not os.path.exists(new_path):
return new_path
# Note: The Windows "reserved characters" are, of course, allowed on
# Unix. They are forbidden here because they cause problems on Samba
# shares, which are sufficiently common as to cause frequent problems.
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
CHAR_REPLACE = [
(re.compile(r'[\\/]'), '_'), # / and \ -- forbidden everywhere.
(re.compile(r'^\.'), '_'), # Leading dot (hidden files on Unix).
(re.compile(r'[\x00-\x1f]'), ''), # Control characters.
(re.compile(r'[<>:"\?\*\|]'), '_'), # Windows "reserved characters".
(re.compile(r'\.$'), '_'), # Trailing dots.
(re.compile(r'\s+$'), ''), # Trailing whitespace.
]
def sanitize_path(path, replacements=None):
"""Takes a path (as a Unicode string) and makes sure that it is
legal. Returns a new path. Only works with fragments; won't work
reliably on Windows when a path begins with a drive letter. Path
separators (including altsep!) should already be cleaned from the
path components. If replacements is specified, it is used *instead*
of the default set of replacements; it must be a list of (compiled
regex, replacement string) pairs.
"""
replacements = replacements or CHAR_REPLACE
comps = components(path)
if not comps:
return ''
for i, comp in enumerate(comps):
for regex, repl in replacements:
comp = regex.sub(repl, comp)
comps[i] = comp
return os.path.join(*comps)
def truncate_path(path, length=MAX_FILENAME_LENGTH):
"""Given a bytestring path or a Unicode path fragment, truncate the
components to a legal length. In the last component, the extension
is preserved.
"""
comps = components(path)
out = [c[:length] for c in comps]
base, ext = os.path.splitext(comps[-1])
if ext:
# Last component has an extension.
base = base[:length - len(ext)]
out[-1] = base + ext
return os.path.join(*out)
def _legalize_stage(path, replacements, length, extension, fragment):
"""Perform a single round of path legalization steps
(sanitation/replacement, encoding from Unicode to bytes,
extension-appending, and truncation). Return the path (Unicode if
`fragment` is set, `bytes` otherwise) and whether truncation was
required.
"""
# Perform an initial sanitization including user replacements.
path = sanitize_path(path, replacements)
# Encode for the filesystem.
if not fragment:
path = bytestring_path(path)
# Preserve extension.
path += extension.lower()
# Truncate too-long components.
pre_truncate_path = path
path = truncate_path(path, length)
return path, path != pre_truncate_path
def legalize_path(path, replacements, length, extension, fragment):
"""Given a path-like Unicode string, produce a legal path. Return
the path and a flag indicating whether some replacements had to be
ignored (see below).
The legalization process (see `_legalize_stage`) consists of
applying the sanitation rules in `replacements`, encoding the string
to bytes (unless `fragment` is set), truncating components to
`length`, appending the `extension`.
This function performs up to three calls to `_legalize_stage` in
case truncation conflicts with replacements (as can happen when
truncation creates whitespace at the end of the string, for
example). The limited number of iterations iterations avoids the
possibility of an infinite loop of sanitation and truncation
operations, which could be caused by replacement rules that make the
string longer. The flag returned from this function indicates that
the path has to be truncated twice (indicating that replacements
made the string longer again after it was truncated); the
application should probably log some sort of warning.
"""
if fragment:
# Outputting Unicode.
extension = extension.decode('utf-8', 'ignore')
first_stage_path, _ = _legalize_stage(
path, replacements, length, extension, fragment
)
# Convert back to Unicode with extension removed.
first_stage_path, _ = os.path.splitext(displayable_path(first_stage_path))
# Re-sanitize following truncation (including user replacements).
second_stage_path, retruncated = _legalize_stage(
first_stage_path, replacements, length, extension, fragment
)
# If the path was once again truncated, discard user replacements
# and run through one last legalization stage.
if retruncated:
second_stage_path, _ = _legalize_stage(
first_stage_path, None, length, extension, fragment
)
return second_stage_path, retruncated
def py3_path(path):
"""Convert a bytestring path to Unicode on Python 3 only. On Python
2, return the bytestring path unchanged.
This helps deal with APIs on Python 3 that *only* accept Unicode
(i.e., `str` objects). I philosophically disagree with this
decision, because paths are sadly bytes on Unix, but that's the way
it is. So this function helps us "smuggle" the true bytes data
through APIs that took Python 3's Unicode mandate too seriously.
"""
if isinstance(path, str):
return path
assert isinstance(path, bytes)
return os.fsdecode(path)
def str2bool(value):
"""Returns a boolean reflecting a human-entered string."""
return value.lower() in ('yes', '1', 'true', 't', 'y')
def as_string(value):
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
"""
if value is None:
return ''
elif isinstance(value, memoryview):
return bytes(value).decode('utf-8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf-8', 'ignore')
else:
return str(value)
def text_string(value, encoding='utf-8'):
"""Convert a string, which can either be bytes or unicode, to
unicode.
Text (unicode) is left untouched; bytes are decoded. This is useful
to convert from a "native string" (bytes on Python 2, str on Python
3) to a consistently unicode value.
"""
if isinstance(value, bytes):
return value.decode(encoding)
return value
def plurality(objs):
"""Given a sequence of hashble objects, returns the object that
is most common in the set and the its number of appearance. The
sequence must contain at least one object.
"""
c = Counter(objs)
if not c:
raise ValueError('sequence must be non-empty')
return c.most_common(1)[0]
def cpu_count():
"""Return the number of hardware thread contexts (cores or SMT
threads) in the system.
"""
# Adapted from the soundconverter project:
# https://github.com/kassoulet/soundconverter
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif sys.platform == 'darwin':
try:
num = int(command_output([
'/usr/sbin/sysctl',
'-n',
'hw.ncpu',
]).stdout)
except (ValueError, OSError, subprocess.CalledProcessError):
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
return 1
def convert_command_args(args):
"""Convert command arguments to bytestrings on Python 2 and
surrogate-escaped strings on Python 3."""
assert isinstance(args, list)
def convert(arg):
if isinstance(arg, bytes):
arg = arg.decode(arg_encoding(), 'surrogateescape')
return arg
return [convert(a) for a in args]
# stdout and stderr as bytes
CommandOutput = namedtuple("CommandOutput", ("stdout", "stderr"))
def command_output(cmd, shell=False):
"""Runs the command and returns its output after it has exited.
Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain
byte strings of the respective output streams.
``cmd`` is a list of arguments starting with the command names. The
arguments are bytes on Unix and strings on Windows.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute.
If the process exits with a non-zero return code
``subprocess.CalledProcessError`` is raised. May also raise
``OSError``.
This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr.
"""
cmd = convert_command_args(cmd)
try: # python >= 3.3
devnull = subprocess.DEVNULL
except AttributeError:
devnull = open(os.devnull, 'r+b')
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=devnull,
close_fds=platform.system() != 'Windows',
shell=shell
)
stdout, stderr = proc.communicate()
if proc.returncode:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=' '.join(cmd),
output=stdout + stderr,
)
return CommandOutput(stdout, stderr)
def max_filename_length(path, limit=MAX_FILENAME_LENGTH):
"""Attempt to determine the maximum filename length for the
filesystem containing `path`. If the value is greater than `limit`,
then `limit` is used instead (to prevent errors when a filesystem
misreports its capacity). If it cannot be determined (e.g., on
Windows), return `limit`.
"""
if hasattr(os, 'statvfs'):
try:
res = os.statvfs(path)
except OSError:
return limit
return min(res[9], limit)
else:
return limit
def open_anything():
"""Return the system command that dispatches execution to the correct
program.
"""
sys_name = platform.system()
if sys_name == 'Darwin':
base_cmd = 'open'
elif sys_name == 'Windows':
base_cmd = 'start'
else: # Assume Unix
base_cmd = 'xdg-open'
return base_cmd
def editor_command():
"""Get a command for opening a text file.
Use the `EDITOR` environment variable by default. If it is not
present, fall back to `open_anything()`, the platform-specific tool
for opening files in general.
"""
editor = os.environ.get('EDITOR')
if editor:
return editor
return open_anything()
def interactive_open(targets, command):
"""Open the files in `targets` by `exec`ing a new `command`, given
as a Unicode string. (The new program takes over, and Python
execution ends: this does not fork a subprocess.)
Can raise `OSError`.
"""
assert command
# Split the command string into its arguments.
try:
args = shlex.split(command)
except ValueError: # Malformed shell tokens.
args = [command]
args.insert(0, args[0]) # for argv[0]
args += targets
return os.execlp(*args)
def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename.
"""
if not isinstance(short_path, str):
short_path = short_path.decode(_fsencoding())
import ctypes
buf = ctypes.create_unicode_buffer(260)
get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW
return_value = get_long_path_name_w(short_path, buf, 260)
if return_value == 0 or return_value > 260:
# An error occurred
return short_path
else:
long_path = buf.value
# GetLongPathNameW does not change the case of the drive
# letter.
if len(long_path) > 1 and long_path[1] == ':':
long_path = long_path[0].upper() + long_path[1:]
return long_path
def case_sensitive(path):
"""Check whether the filesystem at the given path is case sensitive.
To work best, the path should point to a file or a directory. If the path
does not exist, assume a case sensitive file system on every platform
except Windows.
"""
# A fallback in case the path does not exist.
if not os.path.exists(syspath(path)):
# By default, the case sensitivity depends on the platform.
return platform.system() != 'Windows'
# If an upper-case version of the path exists but a lower-case
# version does not, then the filesystem must be case-sensitive.
# (Otherwise, we have more work to do.)
if not (os.path.exists(syspath(path.lower())) and
os.path.exists(syspath(path.upper()))):
return True
# Both versions of the path exist on the file system. Check whether
# they refer to different files by their inodes. Alas,
# `os.path.samefile` is only available on Unix systems on Python 2.
if platform.system() != 'Windows':
return not os.path.samefile(syspath(path.lower()),
syspath(path.upper()))
# On Windows, we check whether the canonical, long filenames for the
# files are the same.
lower = _windows_long_path_name(path.lower())
upper = _windows_long_path_name(path.upper())
return lower != upper
def raw_seconds_short(string):
"""Formats a human-readable M:SS string as a float (number of seconds).
Raises ValueError if the conversion cannot take place due to `string` not
being in the right format.
"""
match = re.match(r'^(\d+):([0-5]\d)$', string)
if not match:
raise ValueError('String not in M:SS format')
minutes, seconds = map(int, match.groups())
return float(minutes * 60 + seconds)
def asciify_path(path, sep_replace):
"""Decodes all unicode characters in a path into ASCII equivalents.
Substitutions are provided by the unidecode module. Path separators in the
input are preserved.
Keyword arguments:
path -- The path to be asciified.
sep_replace -- the string to be used to replace extraneous path separators.
"""
# if this platform has an os.altsep, change it to os.sep.
if os.altsep:
path = path.replace(os.altsep, os.sep)
path_components = path.split(os.sep)
for index, item in enumerate(path_components):
path_components[index] = unidecode(item).replace(os.sep, sep_replace)
if os.altsep:
path_components[index] = unidecode(item).replace(
os.altsep,
sep_replace
)
return os.sep.join(path_components)
def par_map(transform, items):
"""Apply the function `transform` to all the elements in the
iterable `items`, like `map(transform, items)` but with no return
value. The map *might* happen in parallel: it's parallel on Python 3
and sequential on Python 2.
The parallelism uses threads (not processes), so this is only useful
for IO-bound `transform`s.
"""
pool = ThreadPool()
pool.map(transform, items)
pool.close()
pool.join()
def lazy_property(func):
"""A decorator that creates a lazily evaluated property. On first access,
the property is assigned the return value of `func`. This first value is
stored, so that future accesses do not have to evaluate `func` again.
This behaviour is useful when `func` is expensive to evaluate, and it is
not certain that the result will be needed.
"""
field_name = '_' + func.__name__
@property
@functools.wraps(func)
def wrapper(self):
if hasattr(self, field_name):
return getattr(self, field_name)
value = func(self)
setattr(self, field_name, value)
return value
return wrapper
def decode_commandline_path(path):
"""Prepare a path for substitution into commandline template.
On Python 3, we need to construct the subprocess commands to invoke as a
Unicode string. On Unix, this is a little unfortunate---the OS is
expecting bytes---so we use surrogate escaping and decode with the
argument encoding, which is the same encoding that will then be
*reversed* to recover the same bytes before invoking the OS. On
Windows, we want to preserve the Unicode filename "as is."
"""
# On Python 3, the template is a Unicode string, which only supports
# substitution of Unicode variables.
if platform.system() == 'Windows':
return path.decode(_fsencoding())
else:
return path.decode(arg_encoding(), 'surrogateescape')
|
CSC591ADBI-TeamProjects/Product-Search-Relevance
|
build_tfidf.py
|
import pandas as pd
import numpy as np
import re
from gensim import corpora, models, similarities
from gensim.parsing.preprocessing import STOPWORDS
def split(text):
'''
Split the input text into words/tokens; ignoring stopwords and empty strings
'''
delimiters = ".", ",", ";", ":", "-", "(", ")", " ", "\t"
regexPattern = '|'.join(map(re.escape, delimiters))
return [word for word in re.split(regexPattern, text.lower()) if word not in STOPWORDS and word != ""]
def main():
# Load data
df_train = pd.read_csv('data/train.csv', encoding="ISO-8859-1")
df_desc = pd.read_csv('data/product_descriptions.csv', encoding="ISO-8859-1")
df_attr = pd.read_csv('data/attributes_combined.csv', encoding="ISO-8859-1")
# split the texts
titles = [split(line) for line in df_train["product_title"]]
descs = [split(line) for line in df_desc["product_description"]]
attrs = [[str(line)] if isinstance(line, float) else split(line) for line in df_attr["attr_value"]]
queries = [split(line) for line in df_train["search_term"]]
texts = np.concatenate((titles, descs, attrs, queries))
# remove infrequent words
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 2] for text in texts]
# build dictionary
dictionary = corpora.Dictionary(texts)
dictionary.save('homedepot.dict')
print dictionary
# actually build a bag-of-words corpus
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('homedepot.mm', corpus)
# build Tf-idf model
tfidf = models.TfidfModel(corpus)
tfidf.save('homedepot.tfidf')
if __name__ == "__main__":
main()
|
holdlg/PythonScript
|
Python2/bak_2014/sys3_process.py
|
# -*- coding: utf-8 -*-
import win32process
import win32api
import win32con
import ctypes
import os, sys, string
TH32CS_SNAPPROCESS = 0x00000002
class PROCESSENTRY32(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_ulong),
("cntUsage", ctypes.c_ulong),
("th32ProcessID", ctypes.c_ulong),
("th32DefaultHeapID", ctypes.c_ulong),
("th32ModuleID", ctypes.c_ulong),
("cntThreads", ctypes.c_ulong),
("th32ParentProcessID", ctypes.c_ulong),
("pcPriClassBase", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("szExeFile", ctypes.c_char * 260)]
def getProcList():
CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot
Process32First = ctypes.windll.kernel32.Process32First
Process32Next = ctypes.windll.kernel32.Process32Next
CloseHandle = ctypes.windll.kernel32.CloseHandle
hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
pe32 = PROCESSENTRY32()
pe32.dwSize = ctypes.sizeof(PROCESSENTRY32)
if Process32First(hProcessSnap,ctypes.byref(pe32)) == False:
return
while True:
yield pe32
if Process32Next(hProcessSnap,ctypes.byref(pe32)) == False:
break
CloseHandle(hProcessSnap)
def GetProcessModules( pid ):
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, False, pid )
hModule = win32process.EnumProcessModules(handle)
temp=[]
for i in hModule:
temp.append([hex(i),debugfile(win32process.GetModuleFileNameEx(handle,i))])
win32api.CloseHandle(handle)
return temp
def CloseProcess( pid ):
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, False, pid )
exitcode = win32process.GetExitCodeProcess( handle )
win32api.TerminateProcess(handle, exitcode)
win32api.CloseHandle(handle)
def debugfile(file):
if (file.split("\\")[-1]=="smss.exe"):
file = "C:\\WINDOWS\\system32\\smss.exe"
return file
elif (file.split("\\")[-1]=="csrss.exe"):
file = "C:\\WINDOWS\\system32\\csrss.exe"
return file
elif (file.split("\\")[-1]=="winlogon.exe"):
file = "C:\\WINDOWS\\system32\\winlogon.exe"
return file
else:
return file
if __name__ =='__main__':
#调用procup.dll的enableDebugPriv函数对本进程提权
procupdll=ctypes.cdll.LoadLibrary("InjectAssist.dll")
self_pid = procupdll.GetPIDbyName('services.exe')
print self_pid
if procupdll.EnableOpenprocPriv()==0:
print "提权失败"
count = 0
procList = getProcList()
for proc in procList:
count+=1
print("name=%s\tfather=%d\tid=%d" % (proc.szExeFile, proc.th32ParentProcessID, proc.th32ProcessID))
try:
TempGet=GetProcessModules(proc.th32ProcessID)
except Exception, e:
print "pid:%d can't read"%(proc.th32ProcessID)
continue
#TempGet[0][1].split("\\")[-1] 路径的最后一部分
#'''
#枚举进程调用所有模块
for tempnum in range(0,len(TempGet)):
try:
print TempGet
except Exception,e:
print e
#'''
print "进程数:%d"%(count)
|
Zertifica/evosnap
|
evosnap/merchant_applications/pos_device.py
|
from evosnap import constants
class POSDevice:
def __init__(self,**kwargs):
self.__order = [
'posDeviceType', 'posDeviceConnection', 'posDeviceColour', 'posDeviceQuantity',
]
self.__lower_camelcase = constants.ALL_FIELDS
self.pos_device_type = kwargs.get('pos_device_type')
self.pos_device_connection = kwargs.get('pos_device_connection')
self.pos_device_colour = kwargs.get('pos_device_colour')
self.pos_device_quantity = kwargs.get('pos_device_quantity')
@property
def hash_str(self):
required = [
'pos_device_type', 'pos_device_connection', 'pos_device_colour', 'pos_device_quantity',
]
return ''.join([str(getattr(self,f)).strip() for f in required if getattr(self,f) is not None])
|
andycasey/snob
|
sandbox_mixture_slf.py
|
import numpy as np
from snob import mixture_slf as slf
n_samples, n_features, n_clusters, rank = 1000, 50, 6, 1
sigma = 0.5
true_homo_specific_variances = sigma**2 * np.ones((1, n_features))
rng = np.random.RandomState(321)
U, _, _ = np.linalg.svd(rng.randn(n_features, n_features))
true_factor_loads = U[:, :rank].T
true_factor_scores = rng.randn(n_samples, rank)
X = np.dot(true_factor_scores, true_factor_loads)
# Assign objects to different clusters.
indices = rng.randint(0, n_clusters, size=n_samples)
true_weights = np.zeros(n_clusters)
true_means = rng.randn(n_clusters, n_features)
for index in range(n_clusters):
X[indices==index] += true_means[index]
true_weights[index] = (indices==index).sum()
true_weights = true_weights/n_samples
# Adding homoscedastic noise
bar = rng.randn(n_samples, n_features)
X_homo = X + sigma * bar
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
true_hetero_specific_variances = sigmas**2
data = X_hetero
model = slf.SLFGMM(n_clusters)
model.fit(data)
def scatter_common(x, y, title=None):
fig, ax = plt.subplots()
ax.scatter(x,y)
ax.set_title(title or "")
limits = np.array([ax.get_xlim(), ax.get_ylim()])
limits = (limits.min(), limits.max())
ax.plot(limits, limits, c="#666666", linestyle=":", linewidth=0.5, zorder=-1)
ax.set_xlim(limits)
ax.set_ylim(limits)
return fig
scatter_common(true_factor_loads, model.factor_loads, "factor loads")
scatter_common(true_factor_scores, model.factor_scores, "factor scores")
scatter_common(true_homo_specific_variances, model.specific_variances, "specific variances")
# means
# This one is tricky because the indices are not necessarily the same.
# So just take whichever is closest.
idx = np.zeros(n_clusters, dtype=int)
for index, true_mean in enumerate(true_means):
distance = np.sum(np.abs(model._means - true_mean), axis=1) \
+ np.abs(model.weights.flatten()[index] - true_weights)
idx[index] = np.argmin(distance)
assert len(idx) == len(set(idx))
true = true_means.flatten()
inferred = model._means[idx].flatten()
scatter_common(true, inferred, "means")
# Plot some data...
fig, ax = plt.subplots()
ax.scatter(data[:, 0], data[:, 1], facecolor="g")
raise a
# factor scores
ax = axes[1]
true = true_factor_scores.flatten()
inferred = model._factor_scores.flatten()
ax.scatter(true, inferred)
# factor loads
ax = axes[2]
true = true_factor_loads.flatten()
inferred = model._factor_loads.flatten()
ax.scatter(true, inferred)
raise a
true = np.hstack([each.flatten() for each in (true_means, true_factor_scores, true_factor_loads, true_specific_variances)])
inferred = np.hstack([each.flatten() for each in (model.means, model.factor_scores, model.factor_loads, model.specific_variances)])
fig, ax = plt.subplots()
ax.scatter(true, inferred, alpha=0.5)
raise a
|
kapadia/geoblend
|
setup.py
|
import os
import shutil
from codecs import open as codecs_open
import numpy as np
from setuptools import setup, find_packages
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
from distutils import errors
from Cython.Build import cythonize
from Cython.Compiler.Errors import CompileError
def check_for_openmp():
"""
There does not seem to be a cross platform and standard way to check for
OpenMP support. Attempt to compile a test script. Proceed with OpenMP
implementation if it works.
"""
distribution = Distribution()
ext_options = {
'extra_compile_args': ['-fopenmp'],
'extra_link_args': ['-fopenmp']
}
extensions = [
Extension('geoblend.openmp_check', ['geoblend/openmp_check.pyx'], **ext_options)
]
build_extension = build_ext(distribution)
build_extension.finalize_options()
build_extension.extensions = cythonize(extensions, force=True)
build_extension.run()
ext_options = {
'include_dirs': [ np.get_include() ]
}
extensions = [
Extension('geoblend.vector', ['geoblend/vector.pyx'], **ext_options),
Extension('geoblend.convolve', ['geoblend/convolve.pyx'], **ext_options)
]
pkg_dir = os.path.dirname(os.path.realpath(__file__))
dst = os.path.join(pkg_dir, 'geoblend', 'coefficients.pyx')
try:
check_for_openmp()
ext_options['extra_compile_args'] = ['-fopenmp']
ext_options['extra_link_args'] = ['-fopenmp']
src = os.path.join(pkg_dir, 'geoblend', '_coefficients_omp.pyx')
except (errors.LinkError, errors.CompileError, CompileError):
src = os.path.join(pkg_dir, 'geoblend', '_coefficients.pyx')
shutil.copy(src, dst)
extensions.append(
Extension('geoblend.coefficients', ['geoblend/coefficients.pyx'], **ext_options),
)
# Get the long description from the relevant file
with codecs_open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(name='geoblend',
version='0.2.3',
description=u"Geo-aware poisson blending.",
long_description=long_description,
classifiers=[],
keywords='',
author=u"Amit Kapadia",
author_email='amit@planet.com',
url='https://github.com/kapadia/geoblend',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
ext_modules=cythonize(extensions),
zip_safe=False,
install_requires=[
'click',
# 'rasterio',
'pyamg',
'scipy',
'scikit-image'
],
extras_require={
'test': ['pytest'],
'development': [
'cython>=0.23.0',
'benchmark'
]
},
entry_points="""
[console_scripts]
geoblend=geoblend.scripts.cli:geoblend
"""
)
|
msoulier/tftpy
|
tftpy/TftpContexts.py
|
# vim: ts=4 sw=4 et ai:
"""This module implements all contexts for state handling during uploads and
downloads, the main interface to which being the TftpContext base class.
The concept is simple. Each context object represents a single upload or
download, and the state object in the context object represents the current
state of that transfer. The state object has a handle() method that expects
the next packet in the transfer, and returns a state object until the transfer
is complete, at which point it returns None. That is, unless there is a fatal
error, in which case a TftpException is returned instead."""
import logging
import os
import socket
import sys
import time
from .TftpPacketFactory import TftpPacketFactory
from .TftpPacketTypes import *
from .TftpShared import *
from .TftpStates import *
log = logging.getLogger("tftpy.TftpContext")
###############################################################################
# Utility classes
###############################################################################
class TftpMetrics:
"""A class representing metrics of the transfer."""
def __init__(self):
# Bytes transferred
self.bytes = 0
# Bytes re-sent
self.resent_bytes = 0
# Duplicate packets received
self.dups = {}
self.dupcount = 0
# Times
self.start_time = 0
self.end_time = 0
self.duration = 0
# Rates
self.bps = 0
self.kbps = 0
# Generic errors
self.errors = 0
def compute(self):
# Compute transfer time
self.duration = self.end_time - self.start_time
if self.duration == 0:
self.duration = 1
log.debug("TftpMetrics.compute: duration is %s", self.duration)
self.bps = (self.bytes * 8.0) / self.duration
self.kbps = self.bps / 1024.0
log.debug("TftpMetrics.compute: kbps is %s", self.kbps)
for key in self.dups:
self.dupcount += self.dups[key]
def add_dup(self, pkt):
"""This method adds a dup for a packet to the metrics."""
log.debug("Recording a dup of %s", pkt)
s = str(pkt)
if s in self.dups:
self.dups[s] += 1
else:
self.dups[s] = 1
tftpassert(self.dups[s] < MAX_DUPS, "Max duplicates reached")
###############################################################################
# Context classes
###############################################################################
class TftpContext:
"""The base class of the contexts."""
def __init__(self, host, port, timeout, retries=DEF_TIMEOUT_RETRIES, localip=""):
"""Constructor for the base context, setting shared instance
variables."""
self.file_to_transfer = None
self.fileobj = None
self.options = None
self.packethook = None
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if localip != "":
self.sock.bind((localip, 0))
self.sock.settimeout(timeout)
self.timeout = timeout
self.retries = retries
self.state = None
self.next_block = 0
self.factory = TftpPacketFactory()
# Note, setting the host will also set self.address, as it's a property.
self.host = host
self.port = port
# The port associated with the TID
self.tidport = None
# Metrics
self.metrics = TftpMetrics()
# Fluag when the transfer is pending completion.
self.pending_complete = False
# Time when this context last received any traffic.
# FIXME: does this belong in metrics?
self.last_update = 0
# The last packet we sent, if applicable, to make resending easy.
self.last_pkt = None
# Count the number of retry attempts.
self.retry_count = 0
def getBlocksize(self):
"""Fetch the current blocksize for this session."""
return int(self.options.get("blksize", 512))
def __del__(self):
"""Simple destructor to try to call housekeeping in the end method if
not called explicitly. Leaking file descriptors is not a good
thing."""
self.end()
def checkTimeout(self, now):
"""Compare current time with last_update time, and raise an exception
if we're over the timeout time."""
log.debug("checking for timeout on session %s", self)
if now - self.last_update > self.timeout:
raise TftpTimeout("Timeout waiting for traffic")
def start(self):
raise NotImplementedError("Abstract method")
def end(self, close_fileobj=True):
"""Perform session cleanup, since the end method should always be
called explicitly by the calling code, this works better than the
destructor.
Set close_fileobj to False so fileobj can be returned open."""
log.debug("in TftpContext.end - closing socket")
self.sock.close()
if close_fileobj and self.fileobj is not None and not self.fileobj.closed:
log.debug("self.fileobj is open - closing")
self.fileobj.close()
def gethost(self):
"""
Simple getter method for use in a property.
"""
return self.__host
def sethost(self, host):
"""
Setter method that also sets the address property as a result
of the host that is set.
"""
self.__host = host
self.address = socket.gethostbyname(host)
host = property(gethost, sethost)
def setNextBlock(self, block):
if block >= 2 ** 16:
log.debug("Block number rollover to 0 again")
block = 0
self.__eblock = block
def getNextBlock(self):
return self.__eblock
next_block = property(getNextBlock, setNextBlock)
def cycle(self):
"""
Here we wait for a response from the server after sending it
something, and dispatch appropriate action to that response.
"""
try:
(buffer, (raddress, rport)) = self.sock.recvfrom(MAX_BLKSIZE)
except socket.timeout:
log.warning("Timeout waiting for traffic, retrying...")
raise TftpTimeout("Timed-out waiting for traffic")
# Ok, we've received a packet. Log it.
log.debug("Received %d bytes from %s:%s", len(buffer), raddress, rport)
# And update our last updated time.
self.last_update = time.time()
# Decode it.
recvpkt = self.factory.parse(buffer)
# Check for known "connection".
if raddress != self.address:
log.warning(
"Received traffic from %s, expected host %s. Discarding"
% (raddress, self.host)
)
if self.tidport and self.tidport != rport:
log.warning(
"Received traffic from %s:%s but we're "
"connected to %s:%s. Discarding."
% (raddress, rport, self.host, self.tidport)
)
# If there is a packethook defined, call it. We unconditionally
# pass all packets, it's up to the client to screen out different
# kinds of packets. This way, the client is privy to things like
# negotiated options.
if self.packethook:
self.packethook(recvpkt)
# And handle it, possibly changing state.
self.state = self.state.handle(recvpkt, raddress, rport)
# If we didn't throw any exceptions here, reset the retry_count to
# zero.
self.retry_count = 0
class TftpContextServer(TftpContext):
"""The context for the server."""
def __init__(
self,
host,
port,
timeout,
root,
dyn_file_func=None,
upload_open=None,
retries=DEF_TIMEOUT_RETRIES,
):
TftpContext.__init__(self, host, port, timeout, retries)
# At this point we have no idea if this is a download or an upload. We
# need to let the start state determine that.
self.state = TftpStateServerStart(self)
self.root = root
self.dyn_file_func = dyn_file_func
self.upload_open = upload_open
def __str__(self):
return f"{self.host}:{self.port} {self.state}"
def start(self, buffer):
"""
Start the state cycle. Note that the server context receives an
initial packet in its start method. Also note that the server does not
loop on cycle(), as it expects the TftpServer object to manage
that.
"""
log.debug("In TftpContextServer.start")
self.metrics.start_time = time.time()
log.debug("Set metrics.start_time to %s", self.metrics.start_time)
# And update our last updated time.
self.last_update = time.time()
pkt = self.factory.parse(buffer)
log.debug("TftpContextServer.start() - factory returned a %s", pkt)
# Call handle once with the initial packet. This should put us into
# the download or the upload state.
self.state = self.state.handle(pkt, self.host, self.port)
def end(self):
"""Finish up the context."""
TftpContext.end(self)
self.metrics.end_time = time.time()
log.debug("Set metrics.end_time to %s", self.metrics.end_time)
self.metrics.compute()
class TftpContextClientUpload(TftpContext):
"""The upload context for the client during an upload.
Note: If input is a hyphen, then we will use stdin."""
def __init__(
self,
host,
port,
filename,
input,
options,
packethook,
timeout,
retries=DEF_TIMEOUT_RETRIES,
localip="",
):
TftpContext.__init__(self, host, port, timeout, retries, localip)
self.file_to_transfer = filename
self.options = options
self.packethook = packethook
# If the input object has a read() function,
# assume it is file-like.
if hasattr(input, "read"):
self.fileobj = input
elif input == "-":
self.fileobj = sys.stdin.buffer
else:
self.fileobj = open(input, "rb")
log.debug("TftpContextClientUpload.__init__()")
log.debug(
"file_to_transfer = %s, options = %s"
% (self.file_to_transfer, self.options)
)
def __str__(self):
return f"{self.host}:{self.port} {self.state}"
def start(self):
log.info("Sending tftp upload request to %s" % self.host)
log.info(" filename -> %s" % self.file_to_transfer)
log.info(" options -> %s" % self.options)
self.metrics.start_time = time.time()
log.debug("Set metrics.start_time to %s" % self.metrics.start_time)
# FIXME: put this in a sendWRQ method?
pkt = TftpPacketWRQ()
pkt.filename = self.file_to_transfer
pkt.mode = "octet" # FIXME - shouldn't hardcode this
pkt.options = self.options
self.sock.sendto(pkt.encode().buffer, (self.host, self.port))
self.next_block = 1
self.last_pkt = pkt
# FIXME: should we centralize sendto operations so we can refactor all
# saving of the packet to the last_pkt field?
self.state = TftpStateSentWRQ(self)
while self.state:
try:
log.debug("State is %s" % self.state)
self.cycle()
except TftpTimeout as err:
log.error(str(err))
self.retry_count += 1
if self.retry_count >= self.retries:
log.debug("hit max retries, giving up")
raise
else:
log.warning("resending last packet")
self.state.resendLast()
def end(self):
"""Finish up the context."""
TftpContext.end(self)
self.metrics.end_time = time.time()
log.debug("Set metrics.end_time to %s" % self.metrics.end_time)
self.metrics.compute()
class TftpContextClientDownload(TftpContext):
"""The download context for the client during a download.
Note: If output is a hyphen, then the output will be sent to stdout."""
def __init__(
self,
host,
port,
filename,
output,
options,
packethook,
timeout,
retries=DEF_TIMEOUT_RETRIES,
localip="",
):
TftpContext.__init__(self, host, port, timeout, retries, localip)
# FIXME: should we refactor setting of these params?
self.file_to_transfer = filename
self.options = options
self.packethook = packethook
self.filelike_fileobj = False
# If the output object has a write() function,
# assume it is file-like.
if hasattr(output, "write"):
self.fileobj = output
self.filelike_fileobj = True
# If the output filename is -, then use stdout
elif output == "-":
self.fileobj = sys.stdout
self.filelike_fileobj = True
else:
self.fileobj = open(output, "wb")
log.debug("TftpContextClientDownload.__init__()")
log.debug(
"file_to_transfer = %s, options = %s"
% (self.file_to_transfer, self.options)
)
def __str__(self):
return f"{self.host}:{self.port} {self.state}"
def start(self):
"""Initiate the download."""
log.info("Sending tftp download request to %s" % self.host)
log.info(" filename -> %s" % self.file_to_transfer)
log.info(" options -> %s" % self.options)
self.metrics.start_time = time.time()
log.debug("Set metrics.start_time to %s" % self.metrics.start_time)
# FIXME: put this in a sendRRQ method?
pkt = TftpPacketRRQ()
pkt.filename = self.file_to_transfer
pkt.mode = "octet" # FIXME - shouldn't hardcode this
pkt.options = self.options
self.sock.sendto(pkt.encode().buffer, (self.host, self.port))
self.next_block = 1
self.last_pkt = pkt
self.state = TftpStateSentRRQ(self)
while self.state:
try:
log.debug("State is %s" % self.state)
self.cycle()
except TftpTimeout as err:
log.error(str(err))
self.retry_count += 1
if self.retry_count >= self.retries:
log.debug("hit max retries, giving up")
raise
else:
log.warning("resending last packet")
self.state.resendLast()
except TftpFileNotFoundError as err:
# If we received file not found, then we should not save the open
# output file or we'll be left with a size zero file. Delete it,
# if it exists.
log.error("Received File not found error")
if self.fileobj is not None and not self.filelike_fileobj:
if os.path.exists(self.fileobj.name):
log.debug("unlinking output file of %s", self.fileobj.name)
os.unlink(self.fileobj.name)
raise
def end(self):
"""Finish up the context."""
TftpContext.end(self, not self.filelike_fileobj)
self.metrics.end_time = time.time()
log.debug("Set metrics.end_time to %s" % self.metrics.end_time)
self.metrics.compute()
|
arek125/remote-GPIO-control-server
|
tsl2561.py
|
#!/usr/bin/python
# Code sourced from AdaFruit discussion board: https://www.adafruit.com/forums/viewtopic.php?f=8&t=34922 and https://github.com/seanbechhofer/raspberrypi/blob/master/python/TSL2561.py
import sys
import time
import re
import smbus
class Adafruit_I2C(object):
@staticmethod
def getPiRevision():
"Gets the version number of the Raspberry Pi board"
# Revision list available at: http://elinux.org/RPi_HardwareHistory#Board_Revision_History
try:
with open('/proc/cpuinfo', 'r') as infile:
for line in infile:
# Match a line of the form "Revision : 0002" while ignoring extra
# info in front of the revsion (like 1000 when the Pi was over-volted).
match = re.match('Revision\s+:\s+.*(\w{4})$', line)
if match and match.group(1) in ['0000', '0002', '0003']:
# Return revision 1 if revision ends with 0000, 0002 or 0003.
return 1
elif match:
# Assume revision 2 if revision ends with any other 4 chars.
return 2
# Couldn't find the revision, assume revision 0 like older code for compatibility.
return 0
except:
return 0
@staticmethod
def getPiI2CBusNumber():
# Gets the I2C bus number /dev/i2c#
return 1 if Adafruit_I2C.getPiRevision() > 1 else 0
def __init__(self, address, busnum=-1, debug=False):
self.address = address
# By default, the correct I2C bus is auto-detected using /proc/cpuinfo
# Alternatively, you can hard-code the bus version below:
# self.bus = smbus.SMBus(0); # Force I2C0 (early 256MB Pi's)
# self.bus = smbus.SMBus(1); # Force I2C1 (512MB Pi's)
self.bus = smbus.SMBus(busnum if busnum >= 0 else Adafruit_I2C.getPiI2CBusNumber())
self.debug = debug
def reverseByteOrder(self, data):
"Reverses the byte order of an int (16-bit) or long (32-bit) value"
# Courtesy Vishal Sapre
byteCount = len(hex(data)[2:].replace('L','')[::2])
val = 0
for i in range(byteCount):
val = (val << 8) | (data & 0xff)
data >>= 8
return val
def errMsg(self):
print "Error accessing 0x%02X: Check your I2C address" % self.address
return -1
def write8(self, reg, value):
"Writes an 8-bit value to the specified register/address"
try:
self.bus.write_byte_data(self.address, reg, value)
if self.debug:
print "I2C: Wrote 0x%02X to register 0x%02X" % (value, reg)
except IOError, err:
return self.errMsg()
def write16(self, reg, value):
"Writes a 16-bit value to the specified register/address pair"
try:
self.bus.write_word_data(self.address, reg, value)
if self.debug:
print ("I2C: Wrote 0x%02X to register pair 0x%02X,0x%02X" %
(value, reg, reg+1))
except IOError, err:
return self.errMsg()
def writeRaw8(self, value):
"Writes an 8-bit value on the bus"
try:
self.bus.write_byte(self.address, value)
if self.debug:
print "I2C: Wrote 0x%02X" % value
except IOError, err:
return self.errMsg()
def writeList(self, reg, list):
"Writes an array of bytes using I2C format"
try:
if self.debug:
print "I2C: Writing list to register 0x%02X:" % reg
print list
self.bus.write_i2c_block_data(self.address, reg, list)
except IOError, err:
return self.errMsg()
def readList(self, reg, length):
"Read a list of bytes from the I2C device"
try:
results = self.bus.read_i2c_block_data(self.address, reg, length)
if self.debug:
print ("I2C: Device 0x%02X returned the following from reg 0x%02X" %
(self.address, reg))
print results
return results
except IOError, err:
return self.errMsg()
def readU8(self, reg):
"Read an unsigned byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readS8(self, reg):
"Reads a signed byte from the I2C device"
try:
result = self.bus.read_byte_data(self.address, reg)
if result > 127: result -= 256
if self.debug:
print ("I2C: Device 0x%02X returned 0x%02X from reg 0x%02X" %
(self.address, result & 0xFF, reg))
return result
except IOError, err:
return self.errMsg()
def readU16(self, reg, little_endian=True):
"Reads an unsigned 16-bit value from the I2C device"
try:
result = self.bus.read_word_data(self.address,reg)
# Swap bytes if using big endian because read_word_data assumes little
# endian on ARM (little endian) systems.
if not little_endian:
result = ((result << 8) & 0xFF00) + (result >> 8)
if (self.debug):
print "I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, result & 0xFFFF, reg)
return result
except IOError, err:
return self.errMsg()
def readS16(self, reg, little_endian=True):
"Reads a signed 16-bit value from the I2C device"
try:
result = self.readU16(reg,little_endian)
if result > 32767: result -= 65536
return result
except IOError, err:
return self.errMsg()
class TSL2561:
i2c = None
def __init__(self, address=0x39, debug=0, pause=0.8, gain=0):
self.i2c = Adafruit_I2C(address)
self.address = address
self.pause = pause
self.debug = debug
self.gain = gain
self.i2c.write8(0x80, 0x03) # enable the device
def setGain(self,gain=1):
""" Set the gain """
if (gain != self.gain):
if (gain==1):
self.i2c.write8(0x81, 0x02) # set gain = 1X and timing = 402 mSec
if (self.debug):
print "Setting low gain"
else:
self.i2c.write8(0x81, 0x12) # set gain = 16X and timing = 402 mSec
if (self.debug):
print "Setting high gain"
self.gain=gain # safe gain for calculation
time.sleep(self.pause) # pause for integration (self.pause must be bigger than integration time)
def readWord(self, reg):
"""Reads a word from the I2C device"""
try:
wordval = self.i2c.readU16(reg)
newval = self.i2c.reverseByteOrder(wordval)
if (self.debug):
print("I2C: Device 0x%02X returned 0x%04X from reg 0x%02X" % (self.address, wordval & 0xFFFF, reg))
return newval
except IOError:
print("Error accessing 0x%02X: Check your I2C address" % self.address)
return -1
def readFull(self, reg=0x8C):
"""Reads visible+IR diode from the I2C device"""
return self.readWord(reg)
def readIR(self, reg=0x8E):
"""Reads IR only diode from the I2C device"""
return self.readWord(reg)
def readLux(self, gain = 0):
"""Grabs a lux reading either with autoranging (gain=0) or with a specified gain (1, 16)"""
if (gain == 1 or gain == 16):
self.setGain(gain) # low/highGain
ambient = self.readFull()
IR = self.readIR()
elif (gain==0): # auto gain
self.setGain(16) # first try highGain
ambient = self.readFull()
if (ambient < 65535):
IR = self.readIR()
if (ambient >= 65535 or IR >= 65535): # value(s) exeed(s) datarange
self.setGain(1) # set lowGain
ambient = self.readFull()
IR = self.readIR()
if (self.gain==1):
ambient *= 16 # scale 1x to 16x
IR *= 16 # scale 1x to 16x
if ambient != 0: ratio = (IR / float(ambient)) # changed to make it run under python 2
else: ratio = 0
if (self.debug):
print "IR Result", IR
print "Ambient Result", ambient
if ((ratio >= 0) & (ratio <= 0.52)):
lux = (0.0315 * ambient) - (0.0593 * ambient * (ratio**1.4))
elif (ratio <= 0.65):
lux = (0.0229 * ambient) - (0.0291 * IR)
elif (ratio <= 0.80):
lux = (0.0157 * ambient) - (0.018 * IR)
elif (ratio <= 1.3):
lux = (0.00338 * ambient) - (0.0026 * IR)
elif (ratio > 1.3):
lux = 0
return lux
def readLuxRetry(self):
test_count = 0
lux = 0
while test_count < 10:
lux = self.readLux(self.gain)
test_count += 1
if lux != 0:
lux = float(round(lux,2))
break
time.sleep(2)
return lux
# if __name__ == "__main__":
# tsl=TSL2561(debug=True)
# tsl2=TSL2561(debug=True,gain=16)
# tsl3=TSL2561(debug=True,gain=1)
# print "LUX HIGH GAIN ", tsl2.readLuxRetry()
# print "LUX LOW GAIN ", tsl3.readLuxRetry()
# print "LUX AUTO GAIN ", tsl.readLuxRetry()
|
graingert/alembic
|
alembic/operations/ops.py
|
from .. import util
from ..util import sqla_compat
from . import schemaobj
from sqlalchemy.types import NULLTYPE
from .base import Operations, BatchOperations
import re
class MigrateOperation(object):
"""base class for migration command and organization objects.
This system is part of the operation extensibility API.
.. versionadded:: 0.8.0
.. seealso::
:ref:`operation_objects`
:ref:`operation_plugins`
:ref:`customizing_revision`
"""
@util.memoized_property
def info(self):
"""A dictionary that may be used to store arbitrary information
along with this :class:`.MigrateOperation` object.
"""
return {}
class AddConstraintOp(MigrateOperation):
"""Represent an add constraint operation."""
@property
def constraint_type(self):
raise NotImplementedError()
@classmethod
def from_constraint(cls, constraint):
funcs = {
"unique_constraint": CreateUniqueConstraintOp.from_constraint,
"foreign_key_constraint": CreateForeignKeyOp.from_constraint,
"primary_key_constraint": CreatePrimaryKeyOp.from_constraint,
"check_constraint": CreateCheckConstraintOp.from_constraint,
"column_check_constraint": CreateCheckConstraintOp.from_constraint,
}
return funcs[constraint.__visit_name__](constraint)
def reverse(self):
return DropConstraintOp.from_constraint(self.to_constraint())
def to_diff_tuple(self):
return ("add_constraint", self.to_constraint())
@Operations.register_operation("drop_constraint")
@BatchOperations.register_operation("drop_constraint", "batch_drop_constraint")
class DropConstraintOp(MigrateOperation):
"""Represent a drop constraint operation."""
def __init__(
self,
constraint_name, table_name, type_=None, schema=None,
_orig_constraint=None):
self.constraint_name = constraint_name
self.table_name = table_name
self.constraint_type = type_
self.schema = schema
self._orig_constraint = _orig_constraint
def reverse(self):
if self._orig_constraint is None:
raise ValueError(
"operation is not reversible; "
"original constraint is not present")
return AddConstraintOp.from_constraint(self._orig_constraint)
def to_diff_tuple(self):
if self.constraint_type == "foreignkey":
return ("remove_fk", self.to_constraint())
else:
return ("remove_constraint", self.to_constraint())
@classmethod
def from_constraint(cls, constraint):
types = {
"unique_constraint": "unique",
"foreign_key_constraint": "foreignkey",
"primary_key_constraint": "primary",
"check_constraint": "check",
"column_check_constraint": "check",
}
constraint_table = sqla_compat._table_for_constraint(constraint)
return cls(
constraint.name,
constraint_table.name,
schema=constraint_table.schema,
type_=types[constraint.__visit_name__],
_orig_constraint=constraint
)
def to_constraint(self):
if self._orig_constraint is not None:
return self._orig_constraint
else:
raise ValueError(
"constraint cannot be produced; "
"original constraint is not present")
@classmethod
@util._with_legacy_names([
("type", "type_"),
("name", "constraint_name"),
])
def drop_constraint(
cls, operations, constraint_name, table_name,
type_=None, schema=None):
"""Drop a constraint of the given name, typically via DROP CONSTRAINT.
:param constraint_name: name of the constraint.
:param table_name: table name.
:param ``type_``: optional, required on MySQL. can be
'foreignkey', 'primary', 'unique', or 'check'.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
"""
op = cls(constraint_name, table_name, type_=type_, schema=schema)
return operations.invoke(op)
@classmethod
def batch_drop_constraint(cls, operations, constraint_name, type_=None):
"""Issue a "drop constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``table_name`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.drop_constraint`
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
"""
op = cls(
constraint_name, operations.impl.table_name,
type_=type_, schema=operations.impl.schema
)
return operations.invoke(op)
@Operations.register_operation("create_primary_key")
@BatchOperations.register_operation(
"create_primary_key", "batch_create_primary_key")
class CreatePrimaryKeyOp(AddConstraintOp):
"""Represent a create primary key operation."""
constraint_type = "primarykey"
def __init__(
self, constraint_name, table_name, columns,
schema=None, _orig_constraint=None, **kw):
self.constraint_name = constraint_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self._orig_constraint = _orig_constraint
self.kw = kw
@classmethod
def from_constraint(cls, constraint):
constraint_table = sqla_compat._table_for_constraint(constraint)
return cls(
constraint.name,
constraint_table.name,
constraint.columns,
schema=constraint_table.schema,
_orig_constraint=constraint
)
def to_constraint(self, migration_context=None):
if self._orig_constraint is not None:
return self._orig_constraint
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.primary_key_constraint(
self.constraint_name, self.table_name,
self.columns, schema=self.schema)
@classmethod
@util._with_legacy_names([
('name', 'constraint_name'),
('cols', 'columns')
])
def create_primary_key(
cls, operations,
constraint_name, table_name, columns, schema=None):
"""Issue a "create primary key" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_primary_key(
"pk_my_table", "my_table",
["id", "version"]
)
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.PrimaryKeyConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the primary key constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the target table.
:param columns: a list of string column names to be applied to the
primary key constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
* cols -> columns
"""
op = cls(constraint_name, table_name, columns, schema)
return operations.invoke(op)
@classmethod
def batch_create_primary_key(cls, operations, constraint_name, columns):
"""Issue a "create primary key" instruction using the
current batch migration context.
The batch form of this call omits the ``table_name`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_primary_key`
"""
op = cls(
constraint_name, operations.impl.table_name, columns,
schema=operations.impl.schema
)
return operations.invoke(op)
@Operations.register_operation("create_unique_constraint")
@BatchOperations.register_operation(
"create_unique_constraint", "batch_create_unique_constraint")
class CreateUniqueConstraintOp(AddConstraintOp):
"""Represent a create unique constraint operation."""
constraint_type = "unique"
def __init__(
self, constraint_name, table_name,
columns, schema=None, _orig_constraint=None, **kw):
self.constraint_name = constraint_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self._orig_constraint = _orig_constraint
self.kw = kw
@classmethod
def from_constraint(cls, constraint):
constraint_table = sqla_compat._table_for_constraint(constraint)
kw = {}
if constraint.deferrable:
kw['deferrable'] = constraint.deferrable
if constraint.initially:
kw['initially'] = constraint.initially
return cls(
constraint.name,
constraint_table.name,
[c.name for c in constraint.columns],
schema=constraint_table.schema,
_orig_constraint=constraint,
**kw
)
def to_constraint(self, migration_context=None):
if self._orig_constraint is not None:
return self._orig_constraint
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.unique_constraint(
self.constraint_name, self.table_name, self.columns,
schema=self.schema, **self.kw)
@classmethod
@util._with_legacy_names([
('name', 'constraint_name'),
('source', 'table_name'),
('local_cols', 'columns'),
])
def create_unique_constraint(
cls, operations, constraint_name, table_name, columns,
schema=None, **kw):
"""Issue a "create unique constraint" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_unique_constraint("uq_user_name", "user", ["name"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.UniqueConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the unique constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the source table.
:param columns: a list of string column names in the
source table.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
* source -> table_name
* local_cols -> columns
"""
op = cls(
constraint_name, table_name, columns,
schema=schema, **kw
)
return operations.invoke(op)
@classmethod
@util._with_legacy_names([('name', 'constraint_name')])
def batch_create_unique_constraint(
cls, operations, constraint_name, columns, **kw):
"""Issue a "create unique constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_unique_constraint`
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
"""
kw['schema'] = operations.impl.schema
op = cls(
constraint_name, operations.impl.table_name, columns,
**kw
)
return operations.invoke(op)
@Operations.register_operation("create_foreign_key")
@BatchOperations.register_operation(
"create_foreign_key", "batch_create_foreign_key")
class CreateForeignKeyOp(AddConstraintOp):
"""Represent a create foreign key constraint operation."""
constraint_type = "foreignkey"
def __init__(
self, constraint_name, source_table, referent_table, local_cols,
remote_cols, _orig_constraint=None, **kw):
self.constraint_name = constraint_name
self.source_table = source_table
self.referent_table = referent_table
self.local_cols = local_cols
self.remote_cols = remote_cols
self._orig_constraint = _orig_constraint
self.kw = kw
def to_diff_tuple(self):
return ("add_fk", self.to_constraint())
@classmethod
def from_constraint(cls, constraint):
kw = {}
if constraint.onupdate:
kw['onupdate'] = constraint.onupdate
if constraint.ondelete:
kw['ondelete'] = constraint.ondelete
if constraint.initially:
kw['initially'] = constraint.initially
if constraint.deferrable:
kw['deferrable'] = constraint.deferrable
if constraint.use_alter:
kw['use_alter'] = constraint.use_alter
source_schema, source_table, \
source_columns, target_schema, \
target_table, target_columns = sqla_compat._fk_spec(constraint)
kw['source_schema'] = source_schema
kw['referent_schema'] = target_schema
return cls(
constraint.name,
source_table,
target_table,
source_columns,
target_columns,
_orig_constraint=constraint,
**kw
)
def to_constraint(self, migration_context=None):
if self._orig_constraint is not None:
return self._orig_constraint
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.foreign_key_constraint(
self.constraint_name,
self.source_table, self.referent_table,
self.local_cols, self.remote_cols,
**self.kw)
@classmethod
@util._with_legacy_names([
('name', 'constraint_name'),
('source', 'source_table'),
('referent', 'referent_table'),
])
def create_foreign_key(cls, operations, constraint_name,
source_table, referent_table, local_cols,
remote_cols, onupdate=None, ondelete=None,
deferrable=None, initially=None, match=None,
source_schema=None, referent_schema=None,
**dialect_kw):
"""Issue a "create foreign key" instruction using the
current migration context.
e.g.::
from alembic import op
op.create_foreign_key(
"fk_user_address", "address",
"user", ["user_id"], ["id"])
This internally generates a :class:`~sqlalchemy.schema.Table` object
containing the necessary columns, then generates a new
:class:`~sqlalchemy.schema.ForeignKeyConstraint`
object which it then associates with the
:class:`~sqlalchemy.schema.Table`.
Any event listeners associated with this action will be fired
off normally. The :class:`~sqlalchemy.schema.AddConstraint`
construct is ultimately used to generate the ALTER statement.
:param name: Name of the foreign key constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param source_table: String name of the source table.
:param referent_table: String name of the destination table.
:param local_cols: a list of string column names in the
source table.
:param remote_cols: a list of string column names in the
remote table.
:param onupdate: Optional string. If set, emit ON UPDATE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param ondelete: Optional string. If set, emit ON DELETE <value> when
issuing DDL for this constraint. Typical values include CASCADE,
DELETE and RESTRICT.
:param deferrable: optional bool. If set, emit DEFERRABLE or NOT
DEFERRABLE when issuing DDL for this constraint.
:param source_schema: Optional schema name of the source table.
:param referent_schema: Optional schema name of the destination table.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
* source -> source_table
* referent -> referent_table
"""
op = cls(
constraint_name,
source_table, referent_table,
local_cols, remote_cols,
onupdate=onupdate, ondelete=ondelete,
deferrable=deferrable,
source_schema=source_schema,
referent_schema=referent_schema,
initially=initially, match=match,
**dialect_kw
)
return operations.invoke(op)
@classmethod
@util._with_legacy_names([
('name', 'constraint_name'),
('referent', 'referent_table')
])
def batch_create_foreign_key(
cls, operations, constraint_name, referent_table,
local_cols, remote_cols,
referent_schema=None,
onupdate=None, ondelete=None,
deferrable=None, initially=None, match=None,
**dialect_kw):
"""Issue a "create foreign key" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``source_schema``
arguments from the call.
e.g.::
with batch_alter_table("address") as batch_op:
batch_op.create_foreign_key(
"fk_user_address",
"user", ["user_id"], ["id"])
.. seealso::
:meth:`.Operations.create_foreign_key`
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
* referent -> referent_table
"""
op = cls(
constraint_name,
operations.impl.table_name, referent_table,
local_cols, remote_cols,
onupdate=onupdate, ondelete=ondelete,
deferrable=deferrable,
source_schema=operations.impl.schema,
referent_schema=referent_schema,
initially=initially, match=match,
**dialect_kw
)
return operations.invoke(op)
@Operations.register_operation("create_check_constraint")
@BatchOperations.register_operation(
"create_check_constraint", "batch_create_check_constraint")
class CreateCheckConstraintOp(AddConstraintOp):
"""Represent a create check constraint operation."""
constraint_type = "check"
def __init__(
self, constraint_name, table_name,
condition, schema=None, _orig_constraint=None, **kw):
self.constraint_name = constraint_name
self.table_name = table_name
self.condition = condition
self.schema = schema
self._orig_constraint = _orig_constraint
self.kw = kw
@classmethod
def from_constraint(cls, constraint):
constraint_table = sqla_compat._table_for_constraint(constraint)
return cls(
constraint.name,
constraint_table.name,
constraint.sqltext,
schema=constraint_table.schema,
_orig_constraint=constraint
)
def to_constraint(self, migration_context=None):
if self._orig_constraint is not None:
return self._orig_constraint
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.check_constraint(
self.constraint_name, self.table_name,
self.condition, schema=self.schema, **self.kw)
@classmethod
@util._with_legacy_names([
('name', 'constraint_name'),
('source', 'table_name')
])
def create_check_constraint(
cls, operations,
constraint_name, table_name, condition,
schema=None, **kw):
"""Issue a "create check constraint" instruction using the
current migration context.
e.g.::
from alembic import op
from sqlalchemy.sql import column, func
op.create_check_constraint(
"ck_user_name_len",
"user",
func.len(column('name')) > 5
)
CHECK constraints are usually against a SQL expression, so ad-hoc
table metadata is usually needed. The function will convert the given
arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound
to an anonymous table in order to emit the CREATE statement.
:param name: Name of the check constraint. The name is necessary
so that an ALTER statement can be emitted. For setups that
use an automated naming scheme such as that described at
:ref:`sqla:constraint_naming_conventions`,
``name`` here can be ``None``, as the event listener will
apply the name to the constraint object when it is associated
with the table.
:param table_name: String name of the source table.
:param condition: SQL expression that's the condition of the
constraint. Can be a string or SQLAlchemy expression language
structure.
:param deferrable: optional bool. If set, emit DEFERRABLE or
NOT DEFERRABLE when issuing DDL for this constraint.
:param initially: optional string. If set, emit INITIALLY <value>
when issuing DDL for this constraint.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
* source -> table_name
"""
op = cls(constraint_name, table_name, condition, schema=schema, **kw)
return operations.invoke(op)
@classmethod
@util._with_legacy_names([('name', 'constraint_name')])
def batch_create_check_constraint(
cls, operations, constraint_name, condition, **kw):
"""Issue a "create check constraint" instruction using the
current batch migration context.
The batch form of this call omits the ``source`` and ``schema``
arguments from the call.
.. seealso::
:meth:`.Operations.create_check_constraint`
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> constraint_name
"""
op = cls(
constraint_name, operations.impl.table_name,
condition, schema=operations.impl.schema, **kw)
return operations.invoke(op)
@Operations.register_operation("create_index")
@BatchOperations.register_operation("create_index", "batch_create_index")
class CreateIndexOp(MigrateOperation):
"""Represent a create index operation."""
def __init__(
self, index_name, table_name, columns, schema=None,
unique=False, _orig_index=None, **kw):
self.index_name = index_name
self.table_name = table_name
self.columns = columns
self.schema = schema
self.unique = unique
self.kw = kw
self._orig_index = _orig_index
def reverse(self):
return DropIndexOp.from_index(self.to_index())
def to_diff_tuple(self):
return ("add_index", self.to_index())
@classmethod
def from_index(cls, index):
return cls(
index.name,
index.table.name,
sqla_compat._get_index_expressions(index),
schema=index.table.schema,
unique=index.unique,
_orig_index=index,
**index.kwargs
)
def to_index(self, migration_context=None):
if self._orig_index:
return self._orig_index
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.index(
self.index_name, self.table_name, self.columns, schema=self.schema,
unique=self.unique, **self.kw)
@classmethod
@util._with_legacy_names([('name', 'index_name')])
def create_index(
cls, operations,
index_name, table_name, columns, schema=None,
unique=False, **kw):
"""Issue a "create index" instruction using the current
migration context.
e.g.::
from alembic import op
op.create_index('ik_test', 't1', ['foo', 'bar'])
Functional indexes can be produced by using the
:func:`sqlalchemy.sql.expression.text` construct::
from alembic import op
from sqlalchemy import text
op.create_index('ik_test', 't1', [text('lower(foo)')])
.. versionadded:: 0.6.7 support for making use of the
:func:`~sqlalchemy.sql.expression.text` construct in
conjunction with
:meth:`.Operations.create_index` in
order to produce functional expressions within CREATE INDEX.
:param index_name: name of the index.
:param table_name: name of the owning table.
:param columns: a list consisting of string column names and/or
:func:`~sqlalchemy.sql.expression.text` constructs.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
:param unique: If True, create a unique index.
:param quote:
Force quoting of this column's name on or off, corresponding
to ``True`` or ``False``. When left at its default
of ``None``, the column identifier will be quoted according to
whether the name is case sensitive (identifiers with at least one
upper case character are treated as case sensitive), or if it's a
reserved word. This flag is only needed to force quoting of a
reserved word which is not known by the SQLAlchemy dialect.
:param \**kw: Additional keyword arguments not mentioned above are
dialect specific, and passed in the form
``<dialectname>_<argname>``.
See the documentation regarding an individual dialect at
:ref:`dialect_toplevel` for detail on documented arguments.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> index_name
"""
op = cls(
index_name, table_name, columns, schema=schema,
unique=unique, **kw
)
return operations.invoke(op)
@classmethod
def batch_create_index(cls, operations, index_name, columns, **kw):
"""Issue a "create index" instruction using the
current batch migration context.
.. seealso::
:meth:`.Operations.create_index`
"""
op = cls(
index_name, operations.impl.table_name, columns,
schema=operations.impl.schema, **kw
)
return operations.invoke(op)
@Operations.register_operation("drop_index")
@BatchOperations.register_operation("drop_index", "batch_drop_index")
class DropIndexOp(MigrateOperation):
"""Represent a drop index operation."""
def __init__(
self, index_name, table_name=None, schema=None, _orig_index=None):
self.index_name = index_name
self.table_name = table_name
self.schema = schema
self._orig_index = _orig_index
def to_diff_tuple(self):
return ("remove_index", self.to_index())
def reverse(self):
if self._orig_index is None:
raise ValueError(
"operation is not reversible; "
"original index is not present")
return CreateIndexOp.from_index(self._orig_index)
@classmethod
def from_index(cls, index):
return cls(
index.name,
index.table.name,
schema=index.table.schema,
_orig_index=index
)
def to_index(self, migration_context=None):
if self._orig_index is not None:
return self._orig_index
schema_obj = schemaobj.SchemaObjects(migration_context)
# need a dummy column name here since SQLAlchemy
# 0.7.6 and further raises on Index with no columns
return schema_obj.index(
self.index_name, self.table_name, ['x'], schema=self.schema)
@classmethod
@util._with_legacy_names([
('name', 'index_name'),
('tablename', 'table_name')
])
def drop_index(cls, operations, index_name, table_name=None, schema=None):
"""Issue a "drop index" instruction using the current
migration context.
e.g.::
drop_index("accounts")
:param index_name: name of the index.
:param table_name: name of the owning table. Some
backends such as Microsoft SQL Server require this.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> index_name
"""
op = cls(index_name, table_name=table_name, schema=schema)
return operations.invoke(op)
@classmethod
@util._with_legacy_names([('name', 'index_name')])
def batch_drop_index(cls, operations, index_name, **kw):
"""Issue a "drop index" instruction using the
current batch migration context.
.. seealso::
:meth:`.Operations.drop_index`
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> index_name
"""
op = cls(
index_name, table_name=operations.impl.table_name,
schema=operations.impl.schema
)
return operations.invoke(op)
@Operations.register_operation("create_table")
class CreateTableOp(MigrateOperation):
"""Represent a create table operation."""
def __init__(
self, table_name, columns, schema=None, _orig_table=None, **kw):
self.table_name = table_name
self.columns = columns
self.schema = schema
self.kw = kw
self._orig_table = _orig_table
def reverse(self):
return DropTableOp.from_table(self.to_table())
def to_diff_tuple(self):
return ("add_table", self.to_table())
@classmethod
def from_table(cls, table):
return cls(
table.name,
list(table.c) + list(table.constraints),
schema=table.schema,
_orig_table=table,
**table.kwargs
)
def to_table(self, migration_context=None):
if self._orig_table is not None:
return self._orig_table
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.table(
self.table_name, *self.columns, schema=self.schema, **self.kw
)
@classmethod
@util._with_legacy_names([('name', 'table_name')])
def create_table(cls, operations, table_name, *columns, **kw):
"""Issue a "create table" instruction using the current migration
context.
This directive receives an argument list similar to that of the
traditional :class:`sqlalchemy.schema.Table` construct, but without the
metadata::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
op.create_table(
'account',
Column('id', INTEGER, primary_key=True),
Column('name', VARCHAR(50), nullable=False),
Column('description', NVARCHAR(200)),
Column('timestamp', TIMESTAMP, server_default=func.now())
)
Note that :meth:`.create_table` accepts
:class:`~sqlalchemy.schema.Column`
constructs directly from the SQLAlchemy library. In particular,
default values to be created on the database side are
specified using the ``server_default`` parameter, and not
``default`` which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the "timestamp" column
op.create_table('account',
Column('id', INTEGER, primary_key=True),
Column('timestamp', TIMESTAMP, server_default=func.now())
)
The function also returns a newly created
:class:`~sqlalchemy.schema.Table` object, corresponding to the table
specification given, which is suitable for
immediate SQL operations, in particular
:meth:`.Operations.bulk_insert`::
from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column
from alembic import op
account_table = op.create_table(
'account',
Column('id', INTEGER, primary_key=True),
Column('name', VARCHAR(50), nullable=False),
Column('description', NVARCHAR(200)),
Column('timestamp', TIMESTAMP, server_default=func.now())
)
op.bulk_insert(
account_table,
[
{"name": "A1", "description": "account 1"},
{"name": "A2", "description": "account 2"},
]
)
.. versionadded:: 0.7.0
:param table_name: Name of the table
:param \*columns: collection of :class:`~sqlalchemy.schema.Column`
objects within
the table, as well as optional :class:`~sqlalchemy.schema.Constraint`
objects
and :class:`~.sqlalchemy.schema.Index` objects.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
:return: the :class:`~sqlalchemy.schema.Table` object corresponding
to the parameters given.
.. versionadded:: 0.7.0 - the :class:`~sqlalchemy.schema.Table`
object is returned.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> table_name
"""
op = cls(table_name, columns, **kw)
return operations.invoke(op)
@Operations.register_operation("drop_table")
class DropTableOp(MigrateOperation):
"""Represent a drop table operation."""
def __init__(
self, table_name, schema=None, table_kw=None, _orig_table=None):
self.table_name = table_name
self.schema = schema
self.table_kw = table_kw or {}
self._orig_table = _orig_table
def to_diff_tuple(self):
return ("remove_table", self.to_table())
def reverse(self):
if self._orig_table is None:
raise ValueError(
"operation is not reversible; "
"original table is not present")
return CreateTableOp.from_table(self._orig_table)
@classmethod
def from_table(cls, table):
return cls(table.name, schema=table.schema, _orig_table=table)
def to_table(self, migration_context=None):
if self._orig_table is not None:
return self._orig_table
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.table(
self.table_name,
schema=self.schema,
**self.table_kw)
@classmethod
@util._with_legacy_names([('name', 'table_name')])
def drop_table(cls, operations, table_name, schema=None, **kw):
"""Issue a "drop table" instruction using the current
migration context.
e.g.::
drop_table("accounts")
:param table_name: Name of the table
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
:param \**kw: Other keyword arguments are passed to the underlying
:class:`sqlalchemy.schema.Table` object created for the command.
.. versionchanged:: 0.8.0 The following positional argument names
have been changed:
* name -> table_name
"""
op = cls(table_name, schema=schema, table_kw=kw)
operations.invoke(op)
class AlterTableOp(MigrateOperation):
"""Represent an alter table operation."""
def __init__(self, table_name, schema=None):
self.table_name = table_name
self.schema = schema
@Operations.register_operation("rename_table")
class RenameTableOp(AlterTableOp):
"""Represent a rename table operation."""
def __init__(self, old_table_name, new_table_name, schema=None):
super(RenameTableOp, self).__init__(old_table_name, schema=schema)
self.new_table_name = new_table_name
@classmethod
def rename_table(
cls, operations, old_table_name, new_table_name, schema=None):
"""Emit an ALTER TABLE to rename a table.
:param old_table_name: old name.
:param new_table_name: new name.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
"""
op = cls(old_table_name, new_table_name, schema=schema)
return operations.invoke(op)
@Operations.register_operation("alter_column")
@BatchOperations.register_operation("alter_column", "batch_alter_column")
class AlterColumnOp(AlterTableOp):
"""Represent an alter column operation."""
def __init__(
self, table_name, column_name, schema=None,
existing_type=None,
existing_server_default=False,
existing_nullable=None,
modify_nullable=None,
modify_server_default=False,
modify_name=None,
modify_type=None,
**kw
):
super(AlterColumnOp, self).__init__(table_name, schema=schema)
self.column_name = column_name
self.existing_type = existing_type
self.existing_server_default = existing_server_default
self.existing_nullable = existing_nullable
self.modify_nullable = modify_nullable
self.modify_server_default = modify_server_default
self.modify_name = modify_name
self.modify_type = modify_type
self.kw = kw
def to_diff_tuple(self):
col_diff = []
schema, tname, cname = self.schema, self.table_name, self.column_name
if self.modify_type is not None:
col_diff.append(
("modify_type", schema, tname, cname,
{
"existing_nullable": self.existing_nullable,
"existing_server_default": self.existing_server_default,
},
self.existing_type,
self.modify_type)
)
if self.modify_nullable is not None:
col_diff.append(
("modify_nullable", schema, tname, cname,
{
"existing_type": self.existing_type,
"existing_server_default": self.existing_server_default
},
self.existing_nullable,
self.modify_nullable)
)
if self.modify_server_default is not False:
col_diff.append(
("modify_default", schema, tname, cname,
{
"existing_nullable": self.existing_nullable,
"existing_type": self.existing_type
},
self.existing_server_default,
self.modify_server_default)
)
return col_diff
def has_changes(self):
hc1 = self.modify_nullable is not None or \
self.modify_server_default is not False or \
self.modify_type is not None
if hc1:
return True
for kw in self.kw:
if kw.startswith('modify_'):
return True
else:
return False
def reverse(self):
kw = self.kw.copy()
kw['existing_type'] = self.existing_type
kw['existing_nullable'] = self.existing_nullable
kw['existing_server_default'] = self.existing_server_default
if self.modify_type is not None:
kw['modify_type'] = self.modify_type
if self.modify_nullable is not None:
kw['modify_nullable'] = self.modify_nullable
if self.modify_server_default is not False:
kw['modify_server_default'] = self.modify_server_default
# TODO: make this a little simpler
all_keys = set(m.group(1) for m in [
re.match(r'^(?:existing_|modify_)(.+)$', k)
for k in kw
] if m)
for k in all_keys:
if 'modify_%s' % k in kw:
swap = kw['existing_%s' % k]
kw['existing_%s' % k] = kw['modify_%s' % k]
kw['modify_%s' % k] = swap
return self.__class__(
self.table_name, self.column_name, schema=self.schema,
**kw
)
@classmethod
@util._with_legacy_names([('name', 'new_column_name')])
def alter_column(
cls, operations, table_name, column_name,
nullable=None,
server_default=False,
new_column_name=None,
type_=None,
existing_type=None,
existing_server_default=False,
existing_nullable=None,
schema=None, **kw
):
"""Issue an "alter column" instruction using the
current migration context.
Generally, only that aspect of the column which
is being changed, i.e. name, type, nullability,
default, needs to be specified. Multiple changes
can also be specified at once and the backend should
"do the right thing", emitting each change either
separately or together as the backend allows.
MySQL has special requirements here, since MySQL
cannot ALTER a column without a full specification.
When producing MySQL-compatible migration files,
it is recommended that the ``existing_type``,
``existing_server_default``, and ``existing_nullable``
parameters be present, if not being altered.
Type changes which are against the SQLAlchemy
"schema" types :class:`~sqlalchemy.types.Boolean`
and :class:`~sqlalchemy.types.Enum` may also
add or drop constraints which accompany those
types on backends that don't support them natively.
The ``existing_server_default`` argument is
used in this case as well to remove a previous
constraint.
:param table_name: string name of the target table.
:param column_name: string name of the target column,
as it exists before the operation begins.
:param nullable: Optional; specify ``True`` or ``False``
to alter the column's nullability.
:param server_default: Optional; specify a string
SQL expression, :func:`~sqlalchemy.sql.expression.text`,
or :class:`~sqlalchemy.schema.DefaultClause` to indicate
an alteration to the column's default value.
Set to ``None`` to have the default removed.
:param new_column_name: Optional; specify a string name here to
indicate the new name within a column rename operation.
:param ``type_``: Optional; a :class:`~sqlalchemy.types.TypeEngine`
type object to specify a change to the column's type.
For SQLAlchemy types that also indicate a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`),
the constraint is also generated.
:param autoincrement: set the ``AUTO_INCREMENT`` flag of the column;
currently understood by the MySQL dialect.
:param existing_type: Optional; a
:class:`~sqlalchemy.types.TypeEngine`
type object to specify the previous type. This
is required for all MySQL column alter operations that
don't otherwise specify a new type, as well as for
when nullability is being changed on a SQL Server
column. It is also used if the type is a so-called
SQLlchemy "schema" type which may define a constraint (i.e.
:class:`~sqlalchemy.types.Boolean`,
:class:`~sqlalchemy.types.Enum`),
so that the constraint can be dropped.
:param existing_server_default: Optional; The existing
default value of the column. Required on MySQL if
an existing default is not being changed; else MySQL
removes the default.
:param existing_nullable: Optional; the existing nullability
of the column. Required on MySQL if the existing nullability
is not being changed; else MySQL sets this to NULL.
:param existing_autoincrement: Optional; the existing autoincrement
of the column. Used for MySQL's system of altering a column
that specifies ``AUTO_INCREMENT``.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
"""
alt = cls(
table_name, column_name, schema=schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
modify_name=new_column_name,
modify_type=type_,
modify_server_default=server_default,
modify_nullable=nullable,
**kw
)
return operations.invoke(alt)
@classmethod
def batch_alter_column(
cls, operations, column_name,
nullable=None,
server_default=False,
new_column_name=None,
type_=None,
existing_type=None,
existing_server_default=False,
existing_nullable=None,
**kw
):
"""Issue an "alter column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.add_column`
"""
alt = cls(
operations.impl.table_name, column_name,
schema=operations.impl.schema,
existing_type=existing_type,
existing_server_default=existing_server_default,
existing_nullable=existing_nullable,
modify_name=new_column_name,
modify_type=type_,
modify_server_default=server_default,
modify_nullable=nullable,
**kw
)
return operations.invoke(alt)
@Operations.register_operation("add_column")
@BatchOperations.register_operation("add_column", "batch_add_column")
class AddColumnOp(AlterTableOp):
"""Represent an add column operation."""
def __init__(self, table_name, column, schema=None):
super(AddColumnOp, self).__init__(table_name, schema=schema)
self.column = column
def reverse(self):
return DropColumnOp.from_column_and_tablename(
self.schema, self.table_name, self.column)
def to_diff_tuple(self):
return ("add_column", self.schema, self.table_name, self.column)
def to_column(self):
return self.column
@classmethod
def from_column(cls, col):
return cls(col.table.name, col, schema=col.table.schema)
@classmethod
def from_column_and_tablename(cls, schema, tname, col):
return cls(tname, col, schema=schema)
@classmethod
def add_column(cls, operations, table_name, column, schema=None):
"""Issue an "add column" instruction using the current
migration context.
e.g.::
from alembic import op
from sqlalchemy import Column, String
op.add_column('organization',
Column('name', String())
)
The provided :class:`~sqlalchemy.schema.Column` object can also
specify a :class:`~sqlalchemy.schema.ForeignKey`, referencing
a remote table name. Alembic will automatically generate a stub
"referenced" table and emit a second ALTER statement in order
to add the constraint separately::
from alembic import op
from sqlalchemy import Column, INTEGER, ForeignKey
op.add_column('organization',
Column('account_id', INTEGER, ForeignKey('accounts.id'))
)
Note that this statement uses the :class:`~sqlalchemy.schema.Column`
construct as is from the SQLAlchemy library. In particular,
default values to be created on the database side are
specified using the ``server_default`` parameter, and not
``default`` which only specifies Python-side defaults::
from alembic import op
from sqlalchemy import Column, TIMESTAMP, func
# specify "DEFAULT NOW" along with the column add
op.add_column('account',
Column('timestamp', TIMESTAMP, server_default=func.now())
)
:param table_name: String name of the parent table.
:param column: a :class:`sqlalchemy.schema.Column` object
representing the new column.
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
"""
op = cls(table_name, column, schema=schema)
return operations.invoke(op)
@classmethod
def batch_add_column(cls, operations, column):
"""Issue an "add column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.add_column`
"""
op = cls(
operations.impl.table_name, column,
schema=operations.impl.schema
)
return operations.invoke(op)
@Operations.register_operation("drop_column")
@BatchOperations.register_operation("drop_column", "batch_drop_column")
class DropColumnOp(AlterTableOp):
"""Represent a drop column operation."""
def __init__(
self, table_name, column_name, schema=None,
_orig_column=None, **kw):
super(DropColumnOp, self).__init__(table_name, schema=schema)
self.column_name = column_name
self.kw = kw
self._orig_column = _orig_column
def to_diff_tuple(self):
return (
"remove_column", self.schema, self.table_name, self.to_column())
def reverse(self):
if self._orig_column is None:
raise ValueError(
"operation is not reversible; "
"original column is not present")
return AddColumnOp.from_column_and_tablename(
self.schema, self.table_name, self._orig_column)
@classmethod
def from_column_and_tablename(cls, schema, tname, col):
return cls(tname, col.name, schema=schema, _orig_column=col)
def to_column(self, migration_context=None):
if self._orig_column is not None:
return self._orig_column
schema_obj = schemaobj.SchemaObjects(migration_context)
return schema_obj.column(self.column_name, NULLTYPE)
@classmethod
def drop_column(
cls, operations, table_name, column_name, schema=None, **kw):
"""Issue a "drop column" instruction using the current
migration context.
e.g.::
drop_column('organization', 'account_id')
:param table_name: name of table
:param column_name: name of column
:param schema: Optional schema name to operate within. To control
quoting of the schema outside of the default behavior, use
the SQLAlchemy construct
:class:`~sqlalchemy.sql.elements.quoted_name`.
.. versionadded:: 0.7.0 'schema' can now accept a
:class:`~sqlalchemy.sql.elements.quoted_name` construct.
:param mssql_drop_check: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the CHECK constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.check_constraints,
then exec's a separate DROP CONSTRAINT for that constraint.
:param mssql_drop_default: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop the DEFAULT constraint on the column using a
SQL-script-compatible
block that selects into a @variable from sys.default_constraints,
then exec's a separate DROP CONSTRAINT for that default.
:param mssql_drop_foreign_key: Optional boolean. When ``True``, on
Microsoft SQL Server only, first
drop a single FOREIGN KEY constraint on the column using a
SQL-script-compatible
block that selects into a @variable from
sys.foreign_keys/sys.foreign_key_columns,
then exec's a separate DROP CONSTRAINT for that default. Only
works if the column has exactly one FK constraint which refers to
it, at the moment.
.. versionadded:: 0.6.2
"""
op = cls(table_name, column_name, schema=schema, **kw)
return operations.invoke(op)
@classmethod
def batch_drop_column(cls, operations, column_name):
"""Issue a "drop column" instruction using the current
batch migration context.
.. seealso::
:meth:`.Operations.drop_column`
"""
op = cls(
operations.impl.table_name, column_name,
schema=operations.impl.schema)
return operations.invoke(op)
@Operations.register_operation("bulk_insert")
class BulkInsertOp(MigrateOperation):
"""Represent a bulk insert operation."""
def __init__(self, table, rows, multiinsert=True):
self.table = table
self.rows = rows
self.multiinsert = multiinsert
@classmethod
def bulk_insert(cls, operations, table, rows, multiinsert=True):
"""Issue a "bulk insert" operation using the current
migration context.
This provides a means of representing an INSERT of multiple rows
which works equally well in the context of executing on a live
connection as well as that of generating a SQL script. In the
case of a SQL script, the values are rendered inline into the
statement.
e.g.::
from alembic import op
from datetime import date
from sqlalchemy.sql import table, column
from sqlalchemy import String, Integer, Date
# Create an ad-hoc table to use for the insert statement.
accounts_table = table('account',
column('id', Integer),
column('name', String),
column('create_date', Date)
)
op.bulk_insert(accounts_table,
[
{'id':1, 'name':'John Smith',
'create_date':date(2010, 10, 5)},
{'id':2, 'name':'Ed Williams',
'create_date':date(2007, 5, 27)},
{'id':3, 'name':'Wendy Jones',
'create_date':date(2008, 8, 15)},
]
)
When using --sql mode, some datatypes may not render inline
automatically, such as dates and other special types. When this
issue is present, :meth:`.Operations.inline_literal` may be used::
op.bulk_insert(accounts_table,
[
{'id':1, 'name':'John Smith',
'create_date':op.inline_literal("2010-10-05")},
{'id':2, 'name':'Ed Williams',
'create_date':op.inline_literal("2007-05-27")},
{'id':3, 'name':'Wendy Jones',
'create_date':op.inline_literal("2008-08-15")},
],
multiinsert=False
)
When using :meth:`.Operations.inline_literal` in conjunction with
:meth:`.Operations.bulk_insert`, in order for the statement to work
in "online" (e.g. non --sql) mode, the
:paramref:`~.Operations.bulk_insert.multiinsert`
flag should be set to ``False``, which will have the effect of
individual INSERT statements being emitted to the database, each
with a distinct VALUES clause, so that the "inline" values can
still be rendered, rather than attempting to pass the values
as bound parameters.
.. versionadded:: 0.6.4 :meth:`.Operations.inline_literal` can now
be used with :meth:`.Operations.bulk_insert`, and the
:paramref:`~.Operations.bulk_insert.multiinsert` flag has
been added to assist in this usage when running in "online"
mode.
:param table: a table object which represents the target of the INSERT.
:param rows: a list of dictionaries indicating rows.
:param multiinsert: when at its default of True and --sql mode is not
enabled, the INSERT statement will be executed using
"executemany()" style, where all elements in the list of
dictionaries are passed as bound parameters in a single
list. Setting this to False results in individual INSERT
statements being emitted per parameter set, and is needed
in those cases where non-literal values are present in the
parameter sets.
.. versionadded:: 0.6.4
"""
op = cls(table, rows, multiinsert=multiinsert)
operations.invoke(op)
@Operations.register_operation("execute")
class ExecuteSQLOp(MigrateOperation):
"""Represent an execute SQL operation."""
def __init__(self, sqltext, execution_options=None):
self.sqltext = sqltext
self.execution_options = execution_options
@classmethod
def execute(cls, operations, sqltext, execution_options=None):
"""Execute the given SQL using the current migration context.
In a SQL script context, the statement is emitted directly to the
output stream. There is *no* return result, however, as this
function is oriented towards generating a change script
that can run in "offline" mode. For full interaction
with a connected database, use the "bind" available
from the context::
from alembic import op
connection = op.get_bind()
Also note that any parameterized statement here *will not work*
in offline mode - INSERT, UPDATE and DELETE statements which refer
to literal values would need to render
inline expressions. For simple use cases, the
:meth:`.inline_literal` function can be used for **rudimentary**
quoting of string values. For "bulk" inserts, consider using
:meth:`.bulk_insert`.
For example, to emit an UPDATE statement which is equally
compatible with both online and offline mode::
from sqlalchemy.sql import table, column
from sqlalchemy import String
from alembic import op
account = table('account',
column('name', String)
)
op.execute(
account.update().\\
where(account.c.name==op.inline_literal('account 1')).\\
values({'name':op.inline_literal('account 2')})
)
Note above we also used the SQLAlchemy
:func:`sqlalchemy.sql.expression.table`
and :func:`sqlalchemy.sql.expression.column` constructs to
make a brief, ad-hoc table construct just for our UPDATE
statement. A full :class:`~sqlalchemy.schema.Table` construct
of course works perfectly fine as well, though note it's a
recommended practice to at least ensure the definition of a
table is self-contained within the migration script, rather
than imported from a module that may break compatibility with
older migrations.
:param sql: Any legal SQLAlchemy expression, including:
* a string
* a :func:`sqlalchemy.sql.expression.text` construct.
* a :func:`sqlalchemy.sql.expression.insert` construct.
* a :func:`sqlalchemy.sql.expression.update`,
:func:`sqlalchemy.sql.expression.insert`,
or :func:`sqlalchemy.sql.expression.delete` construct.
* Pretty much anything that's "executable" as described
in :ref:`sqlexpression_toplevel`.
:param execution_options: Optional dictionary of
execution options, will be passed to
:meth:`sqlalchemy.engine.Connection.execution_options`.
"""
op = cls(sqltext, execution_options=execution_options)
return operations.invoke(op)
class OpContainer(MigrateOperation):
"""Represent a sequence of operations operation."""
def __init__(self, ops=()):
self.ops = ops
def is_empty(self):
return not self.ops
def as_diffs(self):
return list(OpContainer._ops_as_diffs(self))
@classmethod
def _ops_as_diffs(cls, migrations):
for op in migrations.ops:
if hasattr(op, 'ops'):
for sub_op in cls._ops_as_diffs(op):
yield sub_op
else:
yield op.to_diff_tuple()
class ModifyTableOps(OpContainer):
"""Contains a sequence of operations that all apply to a single Table."""
def __init__(self, table_name, ops, schema=None):
super(ModifyTableOps, self).__init__(ops)
self.table_name = table_name
self.schema = schema
def reverse(self):
return ModifyTableOps(
self.table_name,
ops=list(reversed(
[op.reverse() for op in self.ops]
)),
schema=self.schema
)
class UpgradeOps(OpContainer):
"""contains a sequence of operations that would apply to the
'upgrade' stream of a script.
.. seealso::
:ref:`customizing_revision`
"""
def reverse_into(self, downgrade_ops):
downgrade_ops.ops[:] = list(reversed(
[op.reverse() for op in self.ops]
))
return downgrade_ops
def reverse(self):
return self.reverse_into(DowngradeOps(ops=[]))
class DowngradeOps(OpContainer):
"""contains a sequence of operations that would apply to the
'downgrade' stream of a script.
.. seealso::
:ref:`customizing_revision`
"""
def reverse(self):
return UpgradeOps(
ops=list(reversed(
[op.reverse() for op in self.ops]
))
)
class MigrationScript(MigrateOperation):
"""represents a migration script.
E.g. when autogenerate encounters this object, this corresponds to the
production of an actual script file.
A normal :class:`.MigrationScript` object would contain a single
:class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive.
.. seealso::
:ref:`customizing_revision`
"""
def __init__(
self, rev_id, upgrade_ops, downgrade_ops,
message=None,
imports=None, head=None, splice=None,
branch_label=None, version_path=None, depends_on=None):
self.rev_id = rev_id
self.message = message
self.imports = imports
self.head = head
self.splice = splice
self.branch_label = branch_label
self.version_path = version_path
self.depends_on = depends_on
self.upgrade_ops = upgrade_ops
self.downgrade_ops = downgrade_ops
|
APSL/puput-demo
|
config/settings/common.py
|
# -*- coding: utf-8 -*-
"""
Django settings for puput_demo project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
from puput import PUPUT_APPS
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('puput-demo')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Admin
'django.contrib.admin',
)
INSTALLED_APPS = DJANGO_APPS + PUPUT_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware'
)
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///puput-demo"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'loggers': {
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True,
},
},
}
}
WAGTAIL_SITE_NAME = 'Demo'
|
semk/iDiscover
|
idiscover/discover.py
|
# -*- coding: utf-8 -*-
#
# Discover the target host types in the subnet
#
# @author: Sreejith Kesavan <sreejithemk@gmail.com>
import arp
import oui
import ipcalc
import sys
class Discovery(object):
""" Find out the host types in the Ip range (CIDR)
NOTE: This finds mac addresses only within the subnet.
It doesn't fetch mac addresses for routed network ip's.
"""
def __init__(self):
self.__arp = arp.ARP()
self.__oui = oui.OUI()
def discover(self, address):
"""
Traverse the IP subnets and return manufacturer info.
"""
network = ipcalc.Network(address)
for ip in network:
ip = str(ip)
# Ignore broadcast IP Addresses
if '/' in address and ip == str(network.broadcast()):
print 'Ignoring broadcast ip: {broadcast}'.format(broadcast=str(network.broadcast()))
continue
mac = self.__arp.find_mac(ip)
if mac:
if len(mac.split(':')[0]) == 1:
mac = '0' + mac
manuf_str = mac.replace(':', '')[:6].upper()
manuf = self.__oui.find_manuf(manuf_str)
if manuf:
yield (ip, manuf)
def run():
if len(sys.argv) < 2:
print
print 'Usage:\t\tidiscover <ip-address/cidr>'
print 'Examples:'
print '\t\tidiscover 10.73.19.0'
print '\t\tidiscover 10.74.215/24'
print
else:
addrs = sys.argv[1:]
d = Discovery()
try:
for addr in addrs:
for ip, manuf in d.discover(addr):
print 'IP Address: {ip} Manufacturer: {manuf}'.format(ip=ip, manuf=manuf)
except KeyboardInterrupt:
print 'Exiting...'
if __name__ == '__main__':
run()
|
denverfoundation/storybase
|
apps/storybase_geo/migrations/0004_auto.py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Place', fields ['place_id']
db.create_index('storybase_geo_place', ['place_id'])
# Adding index on 'Location', fields ['location_id']
db.create_index('storybase_geo_location', ['location_id'])
def backwards(self, orm):
# Removing index on 'Location', fields ['location_id']
db.delete_index('storybase_geo_location', ['location_id'])
# Removing index on 'Place', fields ['place_id']
db.delete_index('storybase_geo_place', ['place_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_geo.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_geo.location': {
'Meta': {'object_name': 'Location'},
'address': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'address2': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['auth.User']"}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'storybase_geo.place': {
'Meta': {'object_name': 'Place'},
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_geo.Place']", 'null': 'True', 'through': "orm['storybase_geo.PlaceRelation']", 'blank': 'True'}),
'geolevel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'places'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'place_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_geo.placerelation': {
'Meta': {'unique_together': "(('parent', 'child'),)", 'object_name': 'PlaceRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_parent'", 'to': "orm['storybase_geo.Place']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_child'", 'to': "orm['storybase_geo.Place']"})
}
}
complete_apps = ['storybase_geo']
|
EvaErzin/DragonHack
|
DragonHack/management/commands/dolocanjeGostisc.py
|
import csv
#import datetime
import numpy as np
import HackApp.models as database
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
def handle(self, *args, **options):
percentageAfterNine = 0.05
percentage3to50 = 0.7
longBound = [13.375556, 16.610556]
latBound = [45.421944, 46.876667]
longN = 2000
padec = 0.005
def razporedi(grid, power, padec, x, y, davcna, poslovalnica):
obiskani = set()
def rekurzija(power, x, y):
if (x, y) in obiskani:
return
else:
obiskani.add((x, y))
try:
if grid[x][y][2] > power or power < 3:
return
else:
grid[x][y][2] = power
grid[x][y][0] = davcna
grid[x][y][1] = poslovalnica
rekurzija(power - padec, x - 1, y)
rekurzija(power - padec, x + 1, y)
rekurzija(power - padec, x, y - 1)
rekurzija(power - padec, x, y + 1)
except IndexError:
return
rekurzija(power, x, y)
return grid
def dolociBarvoInMeje(grid, squareSize, longBound, latBound):
obiskani = set()
novoObiskani = set()
mejneTocke = []
mejneBarve = set()
def rekurzija(x, y):
if (x, y) in obiskani or (x, y) in novoObiskani:
return
else:
novoObiskani.add((x, y))
try:
if grid[x][y][0] == 0:
return
if grid[x + 1][y][0] != grid[x][y][0] or grid[x + 1][y][1] != grid[x][y][1]:
mejneTocke.append(str(longBound[0] + squareSize * (x + 1)) + "," + str(latBound[1] - squareSize * y))
mejneTocke.append(str(longBound[0] + squareSize * (x + 1)) + "," + str(latBound[1] - squareSize * (y + 1)))
mejneBarve.add(grid[x + 1][y][3])
else:
rekurzija(x + 1, y)
if grid[x - 1][y][0] != grid[x][y][0] or grid[x - 1][y][1] != grid[x][y][1]:
mejneTocke.append(str(longBound[0] + squareSize * (x)) + "," + str(latBound[1] - squareSize * y))
mejneTocke.append(str(longBound[0] + squareSize * (x)) + "," + str(latBound[1] - squareSize * (y + 1)))
mejneBarve.add(grid[x - 1][y][3])
else:
rekurzija(x - 1, y)
if grid[x][y + 1][0] != grid[x][y][0] or grid[x][y + 1][1] != grid[x][y][1]:
mejneTocke.append(str(longBound[0] + squareSize * x) + "," + str(latBound[1] - squareSize * (y + 1)))
mejneTocke.append(
str(longBound[0] + squareSize * (x + 1)) + "," + str(latBound[1] - squareSize * (y + 1)))
mejneBarve.add(grid[x][y + 1][3])
else:
rekurzija(x, y + 1)
if grid[x][y - 1][0] != grid[x][y][0] or grid[x][y - 1][1] != grid[x][y][1]:
mejneTocke.append(str(longBound[0] + squareSize * x) + "," + str(latBound[1] - squareSize * y))
mejneTocke.append(str(longBound[0] + squareSize * (x + 1)) + "," + str(latBound[1] - squareSize * y))
mejneBarve.add(grid[x][y - 1][3])
else:
rekurzija(x, y - 1)
except IndexError:
return
for i in range(len(grid)):
for j in range(len(grid[i])):
novoObiskani = set()
mejneBarve = set()
mejneTocke = []
davcna = 0
poslovalnica = 0
rekurzija(i, j)
if mejneTocke == []:
continue
# n najmanjsa barva, ki je nima noben sosed
n = 1
while True:
if n not in mejneBarve:
setData = True
for k in novoObiskani:
grid[k[0]][k[1]][3] = n
if setData:
davcna = grid[k[0]][k[1]][0]
poslovalnica = grid[k[0]][k[1]][1]
setData = False
break
else:
n += 1
koordinate = []
for l in mejneTocke:
koordinate.append(list(map(float, l.split(","))))
koordinate = np.array(koordinate).transpose()
plt.plot(koordinate[0], koordinate[1], 'r-')
koordinate = koordinate.transpose()
koordinate = np.array(koordinate).transpose()
kot = np.arctan2(koordinate[1], koordinate[0])
ureditev = np.argsort(kot)
koordinate = koordinate.transpose()[ureditev]
mejneTocke = []
for l in koordinate:
mejneTocke.append(",".join(list(map(str, l))))
koordinate = koordinate.transpose()
plt.plot(koordinate[0], koordinate[1], 'b-')
prikaz = database.Prikaz(lokacija=database.Lokacija.objects.get(poslovniProstor=hex(int(poslovalnica))[2:].upper(), davcnaStevilka = str(int(davcna))))
prikaz.koordinate = mejneTocke
prikaz.barva = str(n)
prikaz.save()
obiskani = obiskani | novoObiskani
return
with open("dragon_dump.csv") as file:
data = csv.reader(file, delimiter = ",")
n = 0
firme = {}
for i in data:
n += 1
if n % 100000 == 0:
print(n)
break
identifier = i[1] + i[3]
if identifier not in firme:
#davcna lokacija vsiRacuni vrednostVsehRacunov racuni3-50 racuniPo9
firme[identifier] = [i[1], int(i[3], 16), 1, float(i[7]), 0, 0, i[15], i[14]]
else:
firme[identifier][2] += 1
firme[identifier][3] += float(i[7])
timeArray = i[2].split(' ')
cas = list(map(int, timeArray[1].split(':')))
if float(i[7]) > 3 and float(i[7]) < 50:
firme[identifier][4] += 1
if cas[0] >= 21:
firme[identifier][5] += 1
kandidati = []
for i in firme:
kandidati.append(list(map(float, firme[i])))
# davcna lokacija vsiRacuni vrednostVsehRacunov racuni3-50 racuniPo9 long lat
kandidati = np.array(kandidati)
# kandidati = np.loadtxt("dolocanjeGostisc.txt", delimiter = "\t", skiprows = 1)
transKand = kandidati.transpose()
kandidati = kandidati[transKand[5] >= percentageAfterNine * transKand[2]]
transKand = kandidati.transpose()
kandidati = kandidati[transKand[4] >= percentage3to50 * transKand[2]]
squareSize = (longBound[1] - longBound[0]) / longN
latN = int(np.ceil((latBound[1] - latBound[0]) / squareSize))
# poravnano zgoraj
# notri [davcna, poslovalnica, moc, barva]
grid = np.zeros((longN, latN, 4))
tip = database.Tip.objects.get(id=45635)
for i in kandidati:
lokacija = database.Lokacija(poslovniProstor=hex(int(i[1]))[2:].upper(), davcnaStevilka = str(int(i[0])))
lokacija.long = i[6]
lokacija.lat = i[7]
lokacija.tip = tip
lokacija.save()
x = int((i[6] - longBound[0]) // squareSize)
y = int((latBound[1] - i[7]) // squareSize)
grid = razporedi(grid, i[3], i[3] * padec, x, y, i[0], i[1])
dolociBarvoInMeje(grid, squareSize, longBound, latBound)
|
wagoodman/bridgy
|
tests/test_inventory_set.py
|
import os
import mock
import pytest
import bridgy.inventory
from bridgy.inventory import InventorySet, Instance
from bridgy.inventory.aws import AwsInventory
from bridgy.config import Config
def get_aws_inventory(name):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
aws_obj = AwsInventory(name=name, cache_dir=cache_dir, access_key_id='access_key_id',
secret_access_key='secret_access_key', session_token='session_token',
region='region')
return aws_obj
def test_inventory_set(mocker):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
aws_obj = get_aws_inventory(name='aws')
inventorySet = InventorySet()
inventorySet.add(aws_obj)
inventorySet.add(aws_obj)
print(aws_obj.instances())
all_instances = inventorySet.instances()
aws_instances = [
Instance(name=u'test-forms', address=u'devbox', aliases=(u'devbox', u'ip-172-31-8-185.us-west-2.compute.internal', u'i-e54cbaeb'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-forms', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-138.us-west-2.compute.internal', u'i-f7d726f9'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-account-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-139.us-west-2.compute.internal', u'i-f4d726fa'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-142.us-west-2.compute.internal', u'i-f5d726fb'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'devlab-game-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-140.us-west-2.compute.internal', u'i-f2d726fc'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-game-svc', address=u'devbox', aliases=(u'devbox', u'ip-172-31-0-141.us-west-2.compute.internal', u'i-f3d726fd'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-2-38.us-west-2.compute.internal', u'i-0f500447384e95942'), source='aws (aws)', container_id=None, type='VM'),
Instance(name=u'test-pubsrv', address=u'devbox', aliases=(u'devbox', u'ip-172-31-2-39.us-west-2.compute.internal', u'i-0f500447384e95943'), source='aws (aws)', container_id=None, type='VM')
]
expected_instances = aws_instances + aws_instances
assert len(all_instances) == len(expected_instances)
assert set(all_instances) == set(expected_instances)
def test_inventory_set_filter_sources(mocker):
test_dir = os.path.dirname(os.path.abspath(__file__))
cache_dir = os.path.join(test_dir, 'aws_stubs')
inventorySet = InventorySet()
inventorySet.add(get_aws_inventory(name='aws'))
inventorySet.add(get_aws_inventory(name='awesome'))
print(inventorySet.instances())
all_instances = inventorySet.instances(filter_sources='awesome')
# aws_instances = [
# Instance(name='test-forms', address='devbox', aliases=('devbox', 'ip-172-31-8-185.us-west-2.compute.internal', 'i-e54cbaeb'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-forms', address='devbox', aliases=('devbox', 'ip-172-31-0-138.us-west-2.compute.internal', 'i-f7d726f9'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-account-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-139.us-west-2.compute.internal', 'i-f4d726fa'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-0-142.us-west-2.compute.internal', 'i-f5d726fb'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='devlab-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-140.us-west-2.compute.internal', 'i-f2d726fc'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-141.us-west-2.compute.internal', 'i-f3d726fd'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-38.us-west-2.compute.internal', 'i-0f500447384e95942'), source='aws (aws)', container_id=None, type='VM'),
# Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-39.us-west-2.compute.internal', 'i-0f500447384e95943'), source='aws (aws)', container_id=None, type='VM')
# ]
awesome_instances = [
Instance(name='test-forms', address='devbox', aliases=('devbox', 'ip-172-31-8-185.us-west-2.compute.internal', 'i-e54cbaeb'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-forms', address='devbox', aliases=('devbox', 'ip-172-31-0-138.us-west-2.compute.internal', 'i-f7d726f9'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-account-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-139.us-west-2.compute.internal', 'i-f4d726fa'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-0-142.us-west-2.compute.internal', 'i-f5d726fb'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='devlab-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-140.us-west-2.compute.internal', 'i-f2d726fc'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-game-svc', address='devbox', aliases=('devbox', 'ip-172-31-0-141.us-west-2.compute.internal', 'i-f3d726fd'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-38.us-west-2.compute.internal', 'i-0f500447384e95942'), source='awesome (aws)', container_id=None, type='VM'),
Instance(name='test-pubsrv', address='devbox', aliases=('devbox', 'ip-172-31-2-39.us-west-2.compute.internal', 'i-0f500447384e95943'), source='awesome (aws)', container_id=None, type='VM')
]
assert len(all_instances) == len(awesome_instances)
assert set(all_instances) == set(awesome_instances)
all_instances = inventorySet.instances(filter_sources='bogus')
assert len(all_instances) == 0
|
ac769/continuum_technologies
|
software/ble_live_read_graphical.py
|
from bluepy.btle import *
import time
import serial
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
start_time = time.time()
data = []
data2 = []
data3 = []
data4 = []
angles = []
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pen = pg.mkPen('k', width=8)
app = QtGui.QApplication([])
plotWidget = pg.plot(title='biomechanics')
plotWidget.setWindowTitle('elbow angle')
plotWidget.setLabels(left=('angle', 'degrees'))
plotWidget.plotItem.getAxis('left').setPen(pen)
plotWidget.plotItem.getAxis('bottom').setPen(pen)
curve = plotWidget.plot(pen=pen)
plotWidget.setYRange(20, 210)
data = [0]
ser = serial.Serial("/dev/rfcomm0", 9600, timeout=0.5)
t = [0]
# from calibration
arm_straight = 957
arm_bent = 987
class MyDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
def handleNotification(self, cHandle, data):
global data2, data3, data4, angle
if cHandle == 37:
data = data.decode("utf-8")
data2.append(data)
data3 = ''.join(data2)
data4 = data3.splitlines()
angle = 180 - (float(data4[-1]) - arm_straight) / (arm_bent - arm_straight) * 135
print(data4[-1])
angles.append(angle)
# print(data4[-1], angle)
else:
print('received an unexpected handle')
print('Attempting to connect...')
mac1 = 'a4:d5:78:0d:1c:53'
mac2 = 'a4:d5:78:0d:2e:fc'
per = Peripheral(mac1, "public")
per.setDelegate(MyDelegate())
print("Connected")
def update():
global curve, data, angles2
if per.waitForNotifications(1):
t.append(time.time() - start_time)
x = list(range(0, len(angles), 1))
angles2 = [float(i) for i in angles]
curve.setData(x[-50:-1], angles2[-50:-1])
app.processEvents()
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
XKNX/xknx
|
xknx/io/gateway_scanner.py
|
"""
GatewayScanner is an abstraction for searching for KNX/IP devices on the local network.
* It walks through all network interfaces
* and sends UDP multicast search requests
* it returns the first found device
"""
from __future__ import annotations
import asyncio
from functools import partial
import logging
from typing import TYPE_CHECKING
import netifaces
from xknx.knxip import (
DIB,
HPAI,
DIBDeviceInformation,
DIBServiceFamily,
DIBSuppSVCFamilies,
KNXIPFrame,
KNXIPServiceType,
SearchRequest,
SearchResponse,
)
from xknx.telegram import IndividualAddress
from .transport import UDPTransport
if TYPE_CHECKING:
from xknx.xknx import XKNX
logger = logging.getLogger("xknx.log")
class GatewayDescriptor:
"""Used to return information about the discovered gateways."""
def __init__(
self,
ip_addr: str,
port: int,
local_ip: str = "",
local_interface: str = "",
name: str = "UNKNOWN",
supports_routing: bool = False,
supports_tunnelling: bool = False,
supports_tunnelling_tcp: bool = False,
individual_address: IndividualAddress | None = None,
):
"""Initialize GatewayDescriptor class."""
self.name = name
self.ip_addr = ip_addr
self.port = port
self.local_interface = local_interface
self.local_ip = local_ip
self.supports_routing = supports_routing
self.supports_tunnelling = supports_tunnelling
self.supports_tunnelling_tcp = supports_tunnelling_tcp
self.individual_address = individual_address
def parse_dibs(self, dibs: list[DIB]) -> None:
"""Parse DIBs for gateway information."""
for dib in dibs:
if isinstance(dib, DIBSuppSVCFamilies):
self.supports_routing = dib.supports(DIBServiceFamily.ROUTING)
if dib.supports(DIBServiceFamily.TUNNELING):
self.supports_tunnelling = True
self.supports_tunnelling_tcp = dib.supports(
DIBServiceFamily.TUNNELING, version=2
)
continue
if isinstance(dib, DIBDeviceInformation):
self.name = dib.name
self.individual_address = dib.individual_address
continue
def __repr__(self) -> str:
"""Return object as representation string."""
return (
"GatewayDescriptor(\n"
f" name={self.name},\n"
f" ip_addr={self.ip_addr},\n"
f" port={self.port},\n"
f" local_interface={self.local_interface},\n"
f" local_ip={self.local_ip},\n"
f" supports_routing={self.supports_routing},\n"
f" supports_tunnelling={self.supports_tunnelling},\n"
f" supports_tunnelling_tcp={self.supports_tunnelling_tcp},\n"
f" individual_address={self.individual_address}\n"
")"
)
def __str__(self) -> str:
"""Return object as readable string."""
return f"{self.individual_address} - {self.name} @ {self.ip_addr}:{self.port}"
class GatewayScanFilter:
"""Filter to limit gateway scan attempts.
If `tunnelling` and `routing` are set it is treated as AND.
KNX/IP devices that don't support `tunnelling` or `routing` aren't matched.
"""
def __init__(
self,
name: str | None = None,
tunnelling: bool | None = None,
tunnelling_tcp: bool | None = None,
routing: bool | None = None,
):
"""Initialize GatewayScanFilter class."""
self.name = name
self.tunnelling = tunnelling
self.tunnelling_tcp = tunnelling_tcp
self.routing = routing
def match(self, gateway: GatewayDescriptor) -> bool:
"""Check whether the device is a gateway and given GatewayDescriptor matches the filter."""
if self.name is not None and self.name != gateway.name:
return False
if (
self.tunnelling is not None
and self.tunnelling != gateway.supports_tunnelling
):
return False
if (
self.tunnelling_tcp is not None
and self.tunnelling_tcp != gateway.supports_tunnelling_tcp
):
return False
if self.routing is not None and self.routing != gateway.supports_routing:
return False
return (
gateway.supports_tunnelling
or gateway.supports_tunnelling_tcp
or gateway.supports_routing
)
class GatewayScanner:
"""Class for searching KNX/IP devices."""
def __init__(
self,
xknx: XKNX,
timeout_in_seconds: float = 4.0,
stop_on_found: int | None = 1,
scan_filter: GatewayScanFilter = GatewayScanFilter(),
):
"""Initialize GatewayScanner class."""
self.xknx = xknx
self.timeout_in_seconds = timeout_in_seconds
self.stop_on_found = stop_on_found
self.scan_filter = scan_filter
self.found_gateways: list[GatewayDescriptor] = []
self._udp_transports: list[UDPTransport] = []
self._response_received_event = asyncio.Event()
self._count_upper_bound = 0
"""Clean value of self.stop_on_found, computed when ``scan`` is called."""
async def scan(self) -> list[GatewayDescriptor]:
"""Scan and return a list of GatewayDescriptors on success."""
if self.stop_on_found is None:
self._count_upper_bound = 0
else:
self._count_upper_bound = max(0, self.stop_on_found)
await self._send_search_requests()
try:
await asyncio.wait_for(
self._response_received_event.wait(),
timeout=self.timeout_in_seconds,
)
except asyncio.TimeoutError:
pass
finally:
self._stop()
return self.found_gateways
def _stop(self) -> None:
"""Stop tearing down udp_transport."""
for udp_transport in self._udp_transports:
udp_transport.stop()
async def _send_search_requests(self) -> None:
"""Find all interfaces with active IPv4 connection to search for gateways."""
for interface in netifaces.interfaces():
try:
af_inet = netifaces.ifaddresses(interface)[netifaces.AF_INET]
ip_addr = af_inet[0]["addr"]
except KeyError:
logger.debug("No IPv4 address found on %s", interface)
continue
except ValueError as err:
# rare case when an interface disappears during search initialisation
logger.debug("Invalid interface %s: %s", interface, err)
continue
else:
await self._search_interface(interface, ip_addr)
async def _search_interface(self, interface: str, ip_addr: str) -> None:
"""Send a search request on a specific interface."""
logger.debug("Searching on %s / %s", interface, ip_addr)
udp_transport = UDPTransport(
self.xknx,
(ip_addr, 0),
(self.xknx.multicast_group, self.xknx.multicast_port),
multicast=True,
)
udp_transport.register_callback(
partial(self._response_rec_callback, interface=interface),
[KNXIPServiceType.SEARCH_RESPONSE],
)
await udp_transport.connect()
self._udp_transports.append(udp_transport)
discovery_endpoint = HPAI(
ip_addr=self.xknx.multicast_group, port=self.xknx.multicast_port
)
search_request = SearchRequest(self.xknx, discovery_endpoint=discovery_endpoint)
udp_transport.send(KNXIPFrame.init_from_body(search_request))
def _response_rec_callback(
self,
knx_ip_frame: KNXIPFrame,
source: HPAI,
udp_transport: UDPTransport,
interface: str = "",
) -> None:
"""Verify and handle knxipframe. Callback from internal udp_transport."""
if not isinstance(knx_ip_frame.body, SearchResponse):
logger.warning("Could not understand knxipframe")
return
gateway = GatewayDescriptor(
ip_addr=knx_ip_frame.body.control_endpoint.ip_addr,
port=knx_ip_frame.body.control_endpoint.port,
local_ip=udp_transport.local_addr[0],
local_interface=interface,
)
gateway.parse_dibs(knx_ip_frame.body.dibs)
logger.debug("Found KNX/IP device at %s: %s", source, repr(gateway))
self._add_found_gateway(gateway)
def _add_found_gateway(self, gateway: GatewayDescriptor) -> None:
if self.scan_filter.match(gateway) and not any(
_gateway.individual_address == gateway.individual_address
for _gateway in self.found_gateways
):
self.found_gateways.append(gateway)
if 0 < self._count_upper_bound <= len(self.found_gateways):
self._response_received_event.set()
|
ciechowoj/minion
|
output.py
|
import sublime, sublime_plugin
def clean_layout(layout):
row_set = set()
col_set = set()
for cell in layout["cells"]:
row_set.add(cell[1])
row_set.add(cell[3])
col_set.add(cell[0])
col_set.add(cell[2])
row_set = sorted(row_set)
col_set = sorted(col_set)
rows = layout["rows"]
cols = layout["cols"]
layout["rows"] = [row for i, row in enumerate(rows) if i in row_set]
layout["cols"] = [col for i, col in enumerate(cols) if i in col_set]
row_map = { row : i for i, row in enumerate(row_set) }
col_map = { col : i for i, col in enumerate(col_set) }
layout["cells"] = [[col_map[cell[0]], row_map[cell[1]], col_map[cell[2]], row_map[cell[3]]] for cell in layout["cells"]]
return layout
def collapse_group(group):
LEFT = 0
TOP = 1
RIGHT = 2
BOTTOM = 3
window = sublime.active_window()
layout = window.get_layout()
cells = layout["cells"]
new_cells = []
group_cell = cells[group]
cells = cells[:group] + cells[group + 1:]
for cell in cells:
if cell[BOTTOM] == group_cell[TOP] and cell[LEFT] >= group_cell[LEFT] and cell[RIGHT] <= group_cell[RIGHT]:
new_cells.append([
cell[LEFT],
cell[TOP],
cell[RIGHT],
group_cell[BOTTOM]
])
elif cell != group_cell:
new_cells.append(cell)
layout["cells"] = new_cells
window.set_layout(clean_layout(layout))
class OutputView:
content = ""
position = 0.0
id = None
def __init__(self, view):
self.view = view
def __getattr__(self, name):
if self.view.id() != id:
output = OutputView.find_view()
if output:
self.view = output.view
return getattr(self.view, name)
def clear(self):
OutputView.content = ""
self.run_command("output_view_clear")
def append(self, text):
OutputView.content += text
self.run_command("output_view_append", { "text" : text })
def append_finish_message(self, command, working_dir, return_code, elapsed_time):
if return_code != 0:
templ = "[Finished in {:.2f}s with exit code {}]\n"
self.append(templ.format(elapsed_time, return_code))
self.append("[cmd: {}]\n".format(command))
self.append("[dir: {}]\n".format(working_dir))
else:
self.append("[Finished in {:.2f}s]\n".format(elapsed_time))
def _collapse(self, group):
window = sublime.active_window()
views = window.views_in_group(group)
if (len(views) == 0 or len(views) == 1 and
views[0].id() == self.view.id()):
collapse_group(group)
def _close(self):
window = sublime.active_window()
group, index = window.get_view_index(self.view)
window.run_command("close_by_index", {"group": group, "index": index})
self._collapse(group)
OutputView.id = None
@staticmethod
def close():
window = sublime.active_window()
for view in window.views():
if view.is_scratch() and view.name() == "Output":
OutputView(view)._close()
@staticmethod
def find_view():
window = sublime.active_window()
for view in window.views():
if view.is_scratch() and view.name() == "Output":
return OutputView(view)
return None
@staticmethod
def create():
view = OutputView.request()
view.clear()
return view
@staticmethod
def request():
window = sublime.active_window()
num_groups = window.num_groups()
if num_groups < 3:
layout = window.get_layout()
num_rows = len(layout["rows"]) - 1
num_cols = len(layout["cols"]) - 1
if len(layout["rows"]) < 3:
begin = layout["rows"][-2]
end = layout["rows"][-1]
layout["rows"] = layout["rows"][:-1] + [begin * 0.33 + end * 0.66, layout["rows"][-1]]
cells = []
new_num_rows = len(layout["rows"]) - 1
for cell in layout["cells"]:
if cell[3] == num_rows and cell[2] != num_cols:
cells.append([cell[0], cell[1], cell[2], new_num_rows])
else:
cells.append(cell)
cells.append([num_cols - 1, new_num_rows - 1, num_cols, new_num_rows])
layout["cells"] = cells
window.set_layout(layout)
num_groups = window.num_groups()
views = window.views_in_group(num_groups - 1)
output = None
for view in views:
if view.name() == "Output" and view.is_scratch():
output = view
if output == None:
active = window.active_view()
output = window.new_file()
output.settings().set("line_numbers", False)
output.settings().set("scroll_past_end", False)
output.settings().set("scroll_speed", 0.0)
output.settings().set("gutter", False)
output.settings().set("spell_check", False)
output.set_scratch(True)
output.set_name("Output")
output.run_command("output_view_append", { "text" : OutputView.content })
def update():
output.set_viewport_position((0, OutputView.position), False)
sublime.set_timeout(update, 0.0)
OutputView.id = output.id()
window.set_view_index(output, num_groups - 1, len(views))
window.focus_view(active)
return OutputView(output)
class OutputViewClearCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.view.erase(edit, sublime.Region(0, self.view.size()))
class OutputViewAppendCommand(sublime_plugin.TextCommand):
def run(self, edit, text):
scroll = self.view.visible_region().end() == self.view.size()
view = self.view
view.insert(edit, view.size(), text)
if scroll:
viewport = view.viewport_extent()
last_line = view.text_to_layout(view.size())
view.set_viewport_position((0, last_line[1] - viewport[1]), False)
class OpenOutputCommand(sublime_plugin.WindowCommand):
def run(self):
OutputView.request()
class CloseOutputCommand(sublime_plugin.ApplicationCommand):
def run(self):
OutputView.close()
class OutputEventListener(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
print(key)
if key == "output_visible":
return OutputView.find_view() != None
else:
return None
def on_close(self, view):
if view.is_scratch() and view.name() == "Output":
OutputView.position = view.viewport_position()[1]
|
yeleman/uninond
|
uninond/tools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import re
import unicodedata
import datetime
import subprocess
from py3compat import string_types, text_type
from django.utils import timezone
from django.conf import settings
from uninond.models.SMSMessages import SMSMessage
# default country prefix
COUNTRY_PREFIX = getattr(settings, 'COUNTRY_PREFIX', 223)
ALL_COUNTRY_CODES = [1242, 1246, 1264, 1268, 1284, 1340, 1345, 1441, 1473,
1599, 1649, 1664, 1670, 1671, 1684, 1758, 1767, 1784,
1809, 1868, 1869, 1876, 1, 20, 212, 213, 216, 218, 220,
221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
243, 244, 245, 248, 249, 250, 251, 252, 253, 254, 255,
256, 257, 258, 260, 261, 262, 263, 264, 265, 266, 267,
268, 269, 27, 290, 291, 297, 298, 299, 30, 31, 32, 33,
34, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359,
36, 370, 371, 372, 373, 374, 375, 376, 377, 378, 380,
381, 382, 385, 386, 387, 389, 39, 40, 41, 420, 421, 423,
43, 44, 45, 46, 47, 48, 49, 500, 501, 502, 503, 504,
505, 506, 507, 508, 509, 51, 52, 53, 54, 55, 56, 57, 58,
590, 591, 592, 593, 595, 597, 598, 599, 60, 61, 62, 63,
64, 65, 66, 670, 672, 673, 674, 675, 676, 677, 678, 679,
680, 681, 682, 683, 685, 686, 687, 688, 689, 690, 691,
692, 7, 81, 82, 84, 850, 852, 853, 855, 856, 86, 870,
880, 886, 90, 91, 92, 93, 94, 95, 960, 961, 962, 963,
964, 965, 966, 967, 968, 970, 971, 972, 973, 974, 975,
976, 977, 98, 992, 993, 994, 995, 996, 998]
MONTHS = ['J', 'F', 'M', 'A', 'Y', 'U', 'L', 'G', 'S', 'O', 'N', 'D']
ALPHA = 'abcdefghijklmnopqrstuvwxyz'
def phonenumber_isint(number):
''' whether number is in international format '''
if re.match(r'^[+|(]', number):
return True
if re.match(r'^\d{1,4}\.\d+$', number):
return True
return False
def phonenumber_indicator(number):
''' extract indicator from number or "" '''
for indic in ALL_COUNTRY_CODES:
if number.startswith("%{}".format(indic)) \
or number.startswith("+{}".format(indic)):
return str(indic)
return ""
def phonenumber_cleaned(number):
''' return (indicator, number) cleaned of space and other '''
# clean up
if not isinstance(number, string_types):
number = number.__str__()
# cleanup markup
clean_number = re.sub(r'[^\d\+]', '', number)
if phonenumber_isint(clean_number):
h, indicator, clean_number = \
clean_number.partition(phonenumber_indicator(clean_number))
return (indicator, clean_number)
return (None, clean_number)
def join_phonenumber(prefix, number, force_intl=True):
if not number:
return None
if not prefix and force_intl:
prefix = COUNTRY_PREFIX
return "+{prefix}{number}".format(prefix=prefix, number=number)
def phonenumber_repr(number, skip_indicator=str(COUNTRY_PREFIX)):
''' properly formated for visualization: (xxx) xx xx xx xx '''
def format(number):
if len(number) % 2 == 0:
span = 2
else:
span = 3
# use NBSP
return " ".join(["".join(number[i:i + span])
for i in range(0, len(number), span)])
indicator, clean_number = phonenumber_cleaned(number)
# string-only identity goes into indicator
if indicator is None and not clean_number:
return number.strip()
if indicator and indicator != skip_indicator:
return "(%(ind)s) %(num)s" \
% {'ind': indicator,
'num': format(clean_number)}
return format(clean_number)
def normalized_phonenumber(number_text):
if number_text is None or not number_text.strip():
return None
return join_phonenumber(*phonenumber_cleaned(number_text))
def operator_from_malinumber(number, default=settings.FOREIGN):
''' ORANGE or MALITEL based on the number prefix '''
indicator, clean_number = phonenumber_cleaned(
normalized_phonenumber(number))
if indicator is not None and indicator != str(COUNTRY_PREFIX):
return default
for operator, opt in settings.OPERATORS.items():
for prefix in opt[1]:
if clean_number.startswith(str(prefix)):
return operator
return default
def send_sms(to, text):
return SMSMessage.objects.create(
direction=SMSMessage.OUTGOING,
identity=to,
event_on=timezone.now(),
text=text)
def fake_message(to, text):
message = send_sms(to, text)
message.handled = True
message.save()
return message
def to_ascii(text):
return unicodedata.normalize('NFKD', unicode(text)) \
.encode('ASCII', 'ignore').strip()
def date_to_ident(adate):
year, month, day = adate.timetuple()[0:3]
hyear = text_type(year)[-1]
if day > 16:
hmonth = ALPHA[month * 2]
hday = hex(day // 2)[2:]
else:
hmonth = ALPHA[month]
hday = hex(day)[2:]
return "{y}{m}{d}".format(m=hmonth, d=hday, y=hyear)
def ident_to_date(ident):
hyear, hmonth, hday = ident[0], ident[1], ident[2:]
year = int('201{}'.format(hyear))
day = int(hday, 16)
month = ALPHA.index(hmonth)
if month > 12:
month //= 2
day *= 2
return datetime.date(year, month, day)
def dispatch_sms(text, roles, root):
sent_messages = []
for identity in root.ancestors_contacts(roles, identies_only=True):
sent_messages.append(send_sms(identity, text))
return sent_messages
def datetime_repr(adatetime):
return ("{date} à {time}"
.format(date=adatetime.strftime("%A %-d"),
time=adatetime.strftime("%Hh%M")).lower())
def exec_cmd(command):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
process.wait()
return process.returncode
|
marceloomens/appointments
|
appointments/apps/common/views.py
|
from django.conf import settings
from django.contrib import messages
from django.forms import Form
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from django.utils.translation import ugettext as _
import dateutil.parser, json
from itsdangerous import BadSignature
from appointments.apps.timeslots.models import Action, Constraint
from appointments.apps.timeslots.utils import strfdate, strftime, strptime, is_available
from .forms import ReminderForm
from .models import Appointment, User
from .utils import get_logger, get_serializer, send_confirmation, send_receipt, send_reminder
# Create your views here.
def book(request):
logger = get_logger(__name__, request)
if 'POST' == request.method and request.is_ajax():
fields = json.loads(request.body)
try:
user = User.objects.get(email__iexact=fields['email'])
except KeyError:
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (email)")
return HttpResponseBadRequest()
except User.DoesNotExist:
user = User(email=fields['email'], is_active=False)
user.save()
logger.info("New user %s" % (str(user)))
try:
action = Action.objects.get(slug=fields['action'])
except (KeyError, Action.DoesNotExist):
logger.warning("Bad form submission: KeyError (action) or Action.DoesNotExist")
# This is an error; time to log, then fail
return HttpResponseBadRequest()
try:
constraint = Constraint.objects.get(slug=fields['constraint'])
except (KeyError, Constraint.DoesNotExist):
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (constraint) or Constraint.DoesNotExist")
return HttpResponseBadRequest()
if action not in constraint.actions.all():
# This is an error; time to log, then fail
logger.warning("Bad form submission: bad constraint/action combination")
return HttpResponseBadRequest()
# Ignore timezone to prevent one-off problems
try:
date = dateutil.parser.parse(fields['date'], ignoretz=True).date()
time = strptime(fields['time'])
except KeyError:
# This is an error; time to log, then fail
logger.warning("Bad form submission: KeyError (date and/or time)")
return HttpResponseBadRequest()
# Check if timeslot is available
if not is_available(constraint, date, time):
# Return some meaningful JSON to say that time is not available
logger.warning("Bad form submission: timeslot not available")
return HttpResponseBadRequest()
# Preprocess sex to ensure it's a valid value
sex = fields['sex'][0].upper() if fields.get('sex', None) else None
if sex not in ['M', 'F']:
sex = ''
appointment = Appointment(
user=user,
action=action,
constraint=constraint,
date=date,
time=time,
# Optional fields...
first_name=fields.get('first_name',''),
last_name=fields.get('last_name',''),
nationality = fields.get('nationality',''),
sex=sex,
# See if this works without any changes...
identity_number=fields.get('identity_number',''),
document_number=fields.get('document_number',''),
phone_number=fields.get('phone_number',''),
mobile_number=fields.get('mobile_number',''),
comment=fields.get('comment',''),
)
# Save the appointment; then log it
appointment.save()
logger.info("New appointment by %s in %s/%s on %s at %s" % (
str(appointment.user),
appointment.constraint.key.slug,
appointment.constraint.slug,
strfdate(appointment.date),
strftime(appointment.time),
)
)
send_receipt(appointment)
messages.success(request, _("We've send you an e-mail receipt. Please confirm your appointment by following the instructions."))
# Return some JSON...
return HttpResponse("Ok")
elif 'POST' == request.method:
logger.warning("XMLHttpRequest header not set on POST request")
return HttpResponseBadRequest("XMLHttpRequest (AJAX) form submissions only please!")
return render(request, 'book.html')
def cancel(request, payload):
from itsdangerous import BadSignature
s = get_serializer()
try:
appointment_id = s.loads(payload)
except BadSignature:
return Http404
appointment = get_object_or_404(Appointment, pk=appointment_id)
if appointment.is_cancelled():
messages.warning(request, _("You've already cancelled this appointment."))
return redirect('finish')
if 'POST' == request.method:
form = Form(request.POST)
if form.is_valid():
appointment.cancel()
messages.info(request, _("You successfully cancelled your appointment."))
return redirect('finish')
# This doesn't seem to be the correct return code
return Http404
form = Form()
return render(request, 'cancel.html', {'form': form})
def confirm(request, payload):
s = get_serializer()
try:
appointment_id = s.loads(payload)
except BadSignature:
return Http404
appointment = get_object_or_404(Appointment, pk=appointment_id)
if appointment.is_cancelled():
messages.error(request, _("You cannot reconfirm a cancelled appointment. Please book again."))
elif appointment.is_confirmed():
messages.warning(request, _("Thank you, no need to reconfirm."))
else:
appointment.confirm()
appointment.user.verify()
send_confirmation(appointment)
messages.success(request, _("Thank you for confirming your appointment."))
return redirect('finish')
def reminder(request):
if 'POST' == request.method:
form = ReminderForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
try:
user = User.objects.get(email=email)
date = timezone.now().date()
appointments = user.appointments.filter(date__gte=date)
send_reminder(user, appointments)
except User.DoesNotExist:
pass
messages.success(request, _("We'll send you an e-mail with all your appointments."))
return redirect('finish')
else:
form = ReminderForm()
return render(request, 'reminder.html', {'form': form})
# Custom error views
def handler404(request):
return render(request, '404.html')
|
the-blue-alliance/the-blue-alliance
|
src/backend/common/queries/dict_converters/event_details_converter.py
|
from collections import defaultdict
from typing import cast, Dict, List, NewType
from backend.common.consts.api_version import ApiMajorVersion
from backend.common.models.event_details import EventDetails
from backend.common.models.keys import TeamKey
from backend.common.queries.dict_converters.converter_base import ConverterBase
EventDetailsDict = NewType("EventDetailsDict", Dict)
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
ApiMajorVersion.API_V3: 3,
}
@classmethod
def _convert_list(cls, model_list: List[EventDetails], version: ApiMajorVersion):
CONVERTERS = {
3: cls.eventsDetailsConverter_v3,
}
return CONVERTERS[version](model_list)
@classmethod
def eventsDetailsConverter_v3(cls, event_details: List[EventDetails]):
return list(map(cls.eventDetailsConverter_v3, event_details))
@classmethod
def eventDetailsConverter_v3(cls, event_details: EventDetails) -> EventDetailsDict:
normalized_oprs = defaultdict(dict)
if event_details and event_details.matchstats:
for stat_type, stats in event_details.matchstats.items():
if stat_type in {"oprs", "dprs", "ccwms"}:
for team, value in cast(Dict[TeamKey, float], stats).items():
if "frc" not in team: # Normalize output
team = "frc{}".format(team)
normalized_oprs[stat_type][team] = value
rankings = {}
if event_details:
rankings = event_details.renderable_rankings
else:
rankings = {
"extra_stats_info": [],
"rankings": [],
"sort_order_info": None,
}
event_details_dict = {
"alliances": event_details.alliance_selections if event_details else [],
"district_points": event_details.district_points if event_details else {},
"insights": event_details.insights
if event_details
else {"qual": {}, "playoff": {}},
"oprs": normalized_oprs if normalized_oprs else {}, # OPRs, DPRs, CCWMs
"predictions": event_details.predictions if event_details else {},
"rankings": rankings,
}
return EventDetailsDict(event_details_dict)
|
jbradberry/django-diplomacy
|
setup.py
|
import setuptools
with open("README.rst") as f:
long_description = f.read()
setuptools.setup(
name='django-diplomacy',
version="0.8.0",
author='Jeff Bradberry',
author_email='jeff.bradberry@gmail.com',
description='A play-by-web app for Diplomacy',
long_description=long_description,
long_description_content_type='test/x-rst',
url='http://github.com/jbradberry/django-diplomacy',
packages=setuptools.find_packages(),
entry_points={
'turngeneration.plugins': ['diplomacy = diplomacy.plugins:TurnGeneration'],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Games/Entertainment :: Turn Based Strategy'
],
)
|
brunosmmm/hdltools
|
hdltools/patterns/__init__.py
|
"""Signal pattern matching."""
import re
from typing import Union
class PatternError(Exception):
"""Pattern error."""
class Pattern:
"""Signal pattern representation."""
PATTERN_REGEX = re.compile(r"[01xX]+")
PATTERN_REGEX_BYTES = re.compile(b"[01xX]+")
def __init__(self, pattern: Union[str, bytes]):
"""Initialize."""
if not isinstance(pattern, (str, bytes)):
raise TypeError("pattern must be a string or bytes")
# tolerate some variations
if isinstance(pattern, str):
if pattern.endswith("h"):
pattern = self.hex_to_bin(pattern)
if pattern.startswith("0b"):
pattern = pattern[2:]
m = self.PATTERN_REGEX.match(pattern)
elif isinstance(pattern, int):
self._pattern = bin(pattern)
return
else:
m = self.PATTERN_REGEX_BYTES.match(pattern)
if m is None:
raise PatternError(f"pattern is invalid: {pattern}")
self._pattern = pattern
@property
def pattern(self):
"""Get pattern."""
return self._pattern
def __repr__(self):
"""Get representation."""
return self.pattern
def __len__(self):
"""Get length."""
return len(self._pattern)
def match(self, value: Union[str, bytes]) -> bool:
"""Match against value."""
if not isinstance(value, (str, bytes)):
raise TypeError(
f"value must be string or bytes, got {type(value)}"
)
if type(value) != type(self._pattern):
raise TypeError("incompatible types for value and pattern")
pattern = self._pattern
if len(value) < len(self._pattern):
# zero-extend incomin value
count = len(self._pattern) - len(value)
value = "0" * count + value
elif len(value) > len(self._pattern):
# zero-extend pattern
count = len(value) - len(self._pattern)
pattern = "0" * count + self._pattern
for value_bit, expected_bit in zip(value, pattern):
if expected_bit in ("x", "X"):
# don't care
continue
if expected_bit != value_bit:
return False
return True
@staticmethod
def hex_to_bin(hexstr):
"""Convert hex to binary including don't cares."""
if hexstr.endswith("h"):
hexstr = hexstr[:-1]
hexstr = hexstr.replace("x", "X")
split = hexstr.split("X")
ret = ""
for fragment in split:
ret += bin(int(fragment, 16)) if fragment else "xxxx"
return ret
|
rawrgulmuffins/presentation_notes
|
pycon2016/tutorials/measure_dont_guess/handout/pi/numpy_pi.py
|
# file: numpy_pi.py
"""Calculating pi with Monte Carlo Method and NumPy.
"""
from __future__ import print_function
import numpy #1
@profile
def pi_numpy(total): #2
"""Compute pi.
"""
x = numpy.random.rand(total) #3
y = numpy.random.rand(total) #4
dist = numpy.sqrt(x * x + y * y) #5
count_inside = len(dist[dist < 1]) #6
return 4.0 * count_inside / total
if __name__ == '__main__':
def test():
"""Time the execution.
"""
import timeit
start = timeit.default_timer()
pi_numpy(int(1e6))
print('run time', timeit.default_timer() - start)
test()
|
passuf/WunderHabit
|
wunderlist/migrations/0012_auto_20151230_1853.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 17:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wunderlist', '0011_auto_20151230_1843'),
]
operations = [
migrations.AlterField(
model_name='connection',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='connections', to=settings.AUTH_USER_MODEL),
),
]
|
rohitkyadav/blog-api
|
src/posts/api/serializers.py
|
from rest_framework.serializers import (
HyperlinkedIdentityField,
ModelSerializer,
SerializerMethodField,
)
from comments.api.serializers import CommentSerializer
from accounts.api.serializers import UserDetailSerializer
from comments.models import Comment
from posts.models import Post
class PostCreateUpdateSerializer(ModelSerializer):
class Meta:
model = Post
fields = [
#'id',
'title',
#'slug',
'content',
'publish',
]
post_detail_url = HyperlinkedIdentityField(
view_name = 'posts-api:detail',
lookup_field = 'slug',
)
class PostDetailSerializer(ModelSerializer):
url = post_detail_url
user = UserDetailSerializer(read_only=True)
image = SerializerMethodField()
html = SerializerMethodField()
comments = SerializerMethodField()
class Meta:
model = Post
fields = [
'url',
'id',
'user',
'title',
'slug',
'content',
'html',
'publish',
'image',
'comments',
]
def get_html(self, obj):
return obj.get_markdown()
def get_image(self, obj):
try:
image = obj.image.url
except:
image = None
return image
def get_comments(self, obj):
#content_type = obj.get_content_type
#object_id = obj.id
c_qs = Comment.objects.filter_by_instance(obj)
comments = CommentSerializer(c_qs, many=True).data
return comments
class PostListSerializer(ModelSerializer):
url = post_detail_url
user = UserDetailSerializer(read_only=True)
class Meta:
model = Post
fields = [
'url',
'user',
'title',
'slug',
'content',
'publish',
]
|
bogdal/freepacktbook
|
freepacktbook/slack.py
|
import json
import requests
class SlackNotification(object):
icon_url = "https://github-bogdal.s3.amazonaws.com/freepacktbook/icon.png"
def __init__(self, slack_url, channel):
self.slack_url = slack_url
self.channel = channel
if not self.channel.startswith("#"):
self.channel = "#%s" % (self.channel,)
def notify(self, data):
if not all([self.slack_url, self.channel]):
return
payload = {
"channel": self.channel,
"username": "PacktPub Free Learning",
"icon_url": self.icon_url,
"attachments": [
{
"fallback": "Today's Free eBook: %s" % data["title"],
"pretext": "Today's Free eBook:",
"title": data["title"],
"title_link": data["book_url"],
"color": "#ff7f00",
"text": "%s\n%s" % (data["description"], data.get("url", "")),
"thumb_url": data["image_url"].replace(" ", "%20"),
}
],
}
requests.post(self.slack_url, data={"payload": json.dumps(payload)})
|
memento7/KINCluster
|
KINCluster/__init__.py
|
"""
KINCluster is clustering like KIN.
release note:
- version 0.1.6
fix settings
update pipeline
delete unused arguments
fix convention by pylint
now logging
- version 0.1.5.5
fix using custom settings
support both moudle and dict
- version 0.1.5.4
Update tokenizer, remove stopwords eff
- version 0.1.5.3
now custom setting available.
see settings.py
- version 0.1.5.2
change item, extractor, pipeline module
now, pipeline.dress_item pass just item(extractor.dump)
fix prev versions error (too many value to unpack)
"""
__version__ = '0.1.6'
__all__ = ['KINCluster',
'Cluster', 'Extractor', 'Item', 'Pipeline',
'tokenizer', 'stopwords']
from KINCluster.KINCluster import KINCluster
from KINCluster.core.cluster import Cluster
from KINCluster.core.extractor import Extractor
from KINCluster.core.item import Item
from KINCluster.core.pipeline import Pipeline
from KINCluster.lib.tokenizer import tokenizer
from KINCluster.lib.stopwords import stopwords
|
app-git-hub/SendTo
|
examples/save.py
|
import sublime, sublime_plugin
class SaveAllExistingFilesCommand(sublime_plugin.ApplicationCommand):
def run(self):
for w in sublime.windows():
self._save_files_in_window(w)
def _save_files_in_window(self, w):
for v in w.views():
self._save_existing_file_in_view(v)
def _save_existing_file_in_view(self, v):
if v.file_name() and v.is_dirty():
v.run_command("save")
r"""
append to file sublime plugin OR api
sublime save dirty file plugin stackoverflow
"""
|
gnozell/Yar-Ha-Har
|
lib/riotwatcher/riotwatcher.py
|
from collections import deque
import time
import requests
# Constants
BRAZIL = 'br'
EUROPE_NORDIC_EAST = 'eune'
EUROPE_WEST = 'euw'
KOREA = 'kr'
LATIN_AMERICA_NORTH = 'lan'
LATIN_AMERICA_SOUTH = 'las'
NORTH_AMERICA = 'na'
OCEANIA = 'oce'
RUSSIA = 'ru'
TURKEY = 'tr'
# Platforms
platforms = {
BRAZIL: 'BR1',
EUROPE_NORDIC_EAST: 'EUN1',
EUROPE_WEST: 'EUW1',
KOREA: 'KR',
LATIN_AMERICA_NORTH: 'LA1',
LATIN_AMERICA_SOUTH: 'LA2',
NORTH_AMERICA: 'NA1',
OCEANIA: 'OC1',
RUSSIA: 'RU',
TURKEY: 'TR1'
}
queue_types = [
'CUSTOM', # Custom games
'NORMAL_5x5_BLIND', # Normal 5v5 blind pick
'BOT_5x5', # Historical Summoners Rift coop vs AI games
'BOT_5x5_INTRO', # Summoners Rift Intro bots
'BOT_5x5_BEGINNER', # Summoner's Rift Coop vs AI Beginner Bot games
'BOT_5x5_INTERMEDIATE', # Historical Summoner's Rift Coop vs AI Intermediate Bot games
'NORMAL_3x3', # Normal 3v3 games
'NORMAL_5x5_DRAFT', # Normal 5v5 Draft Pick games
'ODIN_5x5_BLIND', # Dominion 5v5 Blind Pick games
'ODIN_5x5_DRAFT', # Dominion 5v5 Draft Pick games
'BOT_ODIN_5x5', # Dominion Coop vs AI games
'RANKED_SOLO_5x5', # Ranked Solo 5v5 games
'RANKED_PREMADE_3x3', # Ranked Premade 3v3 games
'RANKED_PREMADE_5x5', # Ranked Premade 5v5 games
'RANKED_TEAM_3x3', # Ranked Team 3v3 games
'RANKED_TEAM_5x5', # Ranked Team 5v5 games
'BOT_TT_3x3', # Twisted Treeline Coop vs AI games
'GROUP_FINDER_5x5', # Team Builder games
'ARAM_5x5', # ARAM games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1v1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2v2 games
'SR_6x6', # Hexakill games
'URF_5x5', # Ultra Rapid Fire games
'BOT_URF_5x5', # Ultra Rapid Fire games played against AI games
'NIGHTMARE_BOT_5x5_RANK1', # Doom Bots Rank 1 games
'NIGHTMARE_BOT_5x5_RANK2', # Doom Bots Rank 2 games
'NIGHTMARE_BOT_5x5_RANK5', # Doom Bots Rank 5 games
'ASCENSION_5x5', # Ascension games
'HEXAKILL', # 6v6 games on twisted treeline
'KING_PORO_5x5', # King Poro game games
'COUNTER_PICK', # Nemesis games,
'BILGEWATER_5x5', # Black Market Brawlers games
]
game_maps = [
{'map_id': 1, 'name': "Summoner's Rift", 'notes': "Summer Variant"},
{'map_id': 2, 'name': "Summoner's Rift", 'notes': "Autumn Variant"},
{'map_id': 3, 'name': "The Proving Grounds", 'notes': "Tutorial Map"},
{'map_id': 4, 'name': "Twisted Treeline", 'notes': "Original Version"},
{'map_id': 8, 'name': "The Crystal Scar", 'notes': "Dominion Map"},
{'map_id': 10, 'name': "Twisted Treeline", 'notes': "Current Version"},
{'map_id': 11, 'name': "Summoner's Rift", 'notes': "Current Version"},
{'map_id': 12, 'name': "Howling Abyss", 'notes': "ARAM Map"},
{'map_id': 14, 'name': "Butcher's Bridge", 'notes': "ARAM Map"},
]
game_modes = [
'CLASSIC', # Classic Summoner's Rift and Twisted Treeline games
'ODIN', # Dominion/Crystal Scar games
'ARAM', # ARAM games
'TUTORIAL', # Tutorial games
'ONEFORALL', # One for All games
'ASCENSION', # Ascension games
'FIRSTBLOOD', # Snowdown Showdown games
'KINGPORO', # King Poro games
]
game_types = [
'CUSTOM_GAME', # Custom games
'TUTORIAL_GAME', # Tutorial games
'MATCHED_GAME', # All other games
]
sub_types = [
'NONE', # Custom games
'NORMAL', # Summoner's Rift unranked games
'NORMAL_3x3', # Twisted Treeline unranked games
'ODIN_UNRANKED', # Dominion/Crystal Scar games
'ARAM_UNRANKED_5v5', # ARAM / Howling Abyss games
'BOT', # Summoner's Rift and Crystal Scar games played against AI
'BOT_3x3', # Twisted Treeline games played against AI
'RANKED_SOLO_5x5', # Summoner's Rift ranked solo queue games
'RANKED_TEAM_3x3', # Twisted Treeline ranked team games
'RANKED_TEAM_5x5', # Summoner's Rift ranked team games
'ONEFORALL_5x5', # One for All games
'FIRSTBLOOD_1x1', # Snowdown Showdown 1x1 games
'FIRSTBLOOD_2x2', # Snowdown Showdown 2x2 games
'SR_6x6', # Hexakill games
'CAP_5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URF_BOT', # Ultra Rapid Fire games against AI
'NIGHTMARE_BOT', # Nightmare bots
'ASCENSION', # Ascension games
'HEXAKILL', # Twisted Treeline 6x6 Hexakill
'KING_PORO', # King Poro games
'COUNTER_PICK', # Nemesis games
'BILGEWATER', # Black Market Brawlers games
]
player_stat_summary_types = [
'Unranked', # Summoner's Rift unranked games
'Unranked3x3', # Twisted Treeline unranked games
'OdinUnranked', # Dominion/Crystal Scar games
'AramUnranked5x5', # ARAM / Howling Abyss games
'CoopVsAI', # Summoner's Rift and Crystal Scar games played against AI
'CoopVsAI3x3', # Twisted Treeline games played against AI
'RankedSolo5x5', # Summoner's Rift ranked solo queue games
'RankedTeams3x3', # Twisted Treeline ranked team games
'RankedTeams5x5', # Summoner's Rift ranked team games
'OneForAll5x5', # One for All games
'FirstBlood1x1', # Snowdown Showdown 1x1 games
'FirstBlood2x2', # Snowdown Showdown 2x2 games
'SummonersRift6x6', # Hexakill games
'CAP5x5', # Team Builder games
'URF', # Ultra Rapid Fire games
'URFBots', # Ultra Rapid Fire games played against AI
'NightmareBot', # Summoner's Rift games played against Nightmare AI
'Hexakill', # Twisted Treeline 6x6 Hexakill games
'KingPoro', # King Poro games
'CounterPick', # Nemesis games
'Bilgewater', # Black Market Brawlers games
]
solo_queue, ranked_5s, ranked_3s = 'RANKED_SOLO_5x5', 'RANKED_TEAM_5x5', 'RANKED_TEAM_3x3'
api_versions = {
'champion': 1.2,
'current-game': 1.0,
'featured-games': 1.0,
'game': 1.3,
'league': 2.5,
'lol-static-data': 1.2,
'lol-status': 1.0,
'match': 2.2,
'matchhistory': 2.2,
'matchlist': 2.2,
'stats': 1.3,
'summoner': 1.4,
'team': 2.4
}
class LoLException(Exception):
def __init__(self, error, response):
self.error = error
self.response = response
def __str__(self):
return self.error
error_400 = "Bad request"
error_401 = "Unauthorized"
error_404 = "Game data not found"
error_429 = "Too many requests"
error_500 = "Internal server error"
error_503 = "Service unavailable"
def raise_status(response):
if response.status_code == 400:
raise LoLException(error_400, response)
elif response.status_code == 401:
raise LoLException(error_401, response)
elif response.status_code == 404:
raise LoLException(error_404, response)
elif response.status_code == 429:
raise LoLException(error_429, response)
elif response.status_code == 500:
raise LoLException(error_500, response)
elif response.status_code == 503:
raise LoLException(error_503, response)
else:
response.raise_for_status()
class RateLimit:
def __init__(self, allowed_requests, seconds):
self.allowed_requests = allowed_requests
self.seconds = seconds
self.made_requests = deque()
def __reload(self):
t = time.time()
while len(self.made_requests) > 0 and self.made_requests[0] < t:
self.made_requests.popleft()
def add_request(self):
self.made_requests.append(time.time() + self.seconds)
def request_available(self):
self.__reload()
return len(self.made_requests) < self.allowed_requests
class RiotWatcher:
def __init__(self, key, default_region=NORTH_AMERICA, limits=(RateLimit(10, 10), RateLimit(500, 600), )):
self.key = key
self.default_region = default_region
self.limits = limits
def can_make_request(self):
for lim in self.limits:
if not lim.request_available():
return False
return True
def base_request(self, url, region, static=False, **kwargs):
if region is None:
region = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/api/lol/{static}{region}/{url}'.format(
proxy='global' if static else region,
static='static-data/' if static else '',
region=region,
url=url
),
params=args
)
if not static:
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
def _observer_mode_request(self, url, proxy=None, **kwargs):
if proxy is None:
proxy = self.default_region
args = {'api_key': self.key}
for k in kwargs:
if kwargs[k] is not None:
args[k] = kwargs[k]
r = requests.get(
'https://{proxy}.api.pvp.net/observer-mode/rest/{url}'.format(
proxy=proxy,
url=url
),
params=args
)
for lim in self.limits:
lim.add_request()
raise_status(r)
return r.json()
@staticmethod
def sanitized_name(name):
return name.replace(' ', '').lower()
# champion-v1.2
def _champion_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/champion/{end_url}'.format(
version=api_versions['champion'],
end_url=end_url
),
region,
**kwargs
)
def get_all_champions(self, region=None, free_to_play=False):
return self._champion_request('', region, freeToPlay=free_to_play)
def get_champion(self, champion_id, region=None):
return self._champion_request('{id}'.format(id=champion_id), region)
# current-game-v1.0
def get_current_game(self, summoner_id, platform_id=None, region=None):
if platform_id is None:
platform_id = platforms[self.default_region]
return self._observer_mode_request(
'consumer/getSpectatorGameInfo/{platform}/{summoner_id}'.format(
platform=platform_id,
summoner_id=summoner_id
),
region
)
# featured-game-v1.0
def get_featured_games(self, proxy=None):
return self._observer_mode_request('featured', proxy)
# game-v1.3
def _game_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/game/{end_url}'.format(
version=api_versions['game'],
end_url=end_url
),
region,
**kwargs
)
def get_recent_games(self, summoner_id, region=None):
return self._game_request('by-summoner/{summoner_id}/recent'.format(summoner_id=summoner_id), region)
# league-v2.5
def _league_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/league/{end_url}'.format(
version=api_versions['league'],
end_url=end_url
),
region,
**kwargs
)
def get_league(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
else:
return self._league_request(
'by-team/{team_ids}'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_league_entry(self, summoner_ids=None, team_ids=None, region=None):
"""summoner_ids and team_ids arguments must be iterable, only one should be specified, not both"""
if (summoner_ids is None) != (team_ids is None):
if summoner_ids is not None:
return self._league_request(
'by-summoner/{summoner_ids}/entry'.format(
summoner_ids=','.join([str(s) for s in summoner_ids])
),
region
)
else:
return self._league_request(
'by-team/{team_ids}/entry'.format(team_ids=','.join([str(t) for t in team_ids])),
region
)
def get_challenger(self, region=None, queue=solo_queue):
return self._league_request('challenger', region, type=queue)
def get_master(self, region=None, queue=solo_queue):
return self._league_request('master', region, type=queue)
# lol-static-data-v1.2
def _static_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/{end_url}'.format(
version=api_versions['lol-static-data'],
end_url=end_url
),
region,
static=True,
**kwargs
)
def static_get_champion_list(self, region=None, locale=None, version=None, data_by_id=None, champ_data=None):
return self._static_request(
'champion',
region,
locale=locale,
version=version,
dataById=data_by_id,
champData=champ_data
)
def static_get_champion(self, champ_id, region=None, locale=None, version=None, champ_data=None):
return self._static_request(
'champion/{id}'.format(id=champ_id),
region,
locale=locale,
version=version,
champData=champ_data
)
def static_get_item_list(self, region=None, locale=None, version=None, item_list_data=None):
return self._static_request('item', region, locale=locale, version=version, itemListData=item_list_data)
def static_get_item(self, item_id, region=None, locale=None, version=None, item_data=None):
return self._static_request(
'item/{id}'.format(id=item_id),
region,
locale=locale,
version=version,
itemData=item_data
)
def static_get_mastery_list(self, region=None, locale=None, version=None, mastery_list_data=None):
return self._static_request(
'mastery',
region,
locale=locale,
version=version,
masteryListData=mastery_list_data
)
def static_get_mastery(self, mastery_id, region=None, locale=None, version=None, mastery_data=None):
return self._static_request(
'mastery/{id}'.format(id=mastery_id),
region,
locale=locale,
version=version,
masteryData=mastery_data
)
def static_get_realm(self, region=None):
return self._static_request('realm', region)
def static_get_rune_list(self, region=None, locale=None, version=None, rune_list_data=None):
return self._static_request('rune', region, locale=locale, version=version, runeListData=rune_list_data)
def static_get_rune(self, rune_id, region=None, locale=None, version=None, rune_data=None):
return self._static_request(
'rune/{id}'.format(id=rune_id),
region,
locale=locale,
version=version,
runeData=rune_data
)
def static_get_summoner_spell_list(self, region=None, locale=None, version=None, data_by_id=None, spell_data=None):
return self._static_request(
'summoner-spell',
region,
locale=locale,
version=version,
dataById=data_by_id,
spellData=spell_data
)
def static_get_summoner_spell(self, spell_id, region=None, locale=None, version=None, spell_data=None):
return self._static_request(
'summoner-spell/{id}'.format(id=spell_id),
region,
locale=locale,
version=version,
spellData=spell_data
)
def static_get_versions(self, region=None):
return self._static_request('versions', region)
# match-v2.2
def _match_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/match/{end_url}'.format(
version=api_versions['match'],
end_url=end_url
),
region,
**kwargs
)
def get_match(self, match_id, region=None, include_timeline=False):
return self._match_request(
'{match_id}'.format(match_id=match_id),
region,
includeTimeline=include_timeline
)
# lol-status-v1.0
@staticmethod
def get_server_status(region=None):
if region is None:
url = 'shards'
else:
url = 'shards/{region}'.format(region=region)
r = requests.get('http://status.leagueoflegends.com/{url}'.format(url=url))
raise_status(r)
return r.json()
# match history-v2.2
def _match_history_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchhistory/{end_url}'.format(
version=api_versions['matchhistory'],
end_url=end_url
),
region,
**kwargs
)
def get_match_history(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, begin_index=None,
end_index=None):
return self._match_history_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championIds=champion_ids,
rankedQueues=ranked_queues,
beginIndex=begin_index,
endIndex=end_index
)
# match list-v2.2
def _match_list_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/matchlist/by-summoner/{end_url}'.format(
version=api_versions['matchlist'],
end_url=end_url,
),
region,
**kwargs
)
def get_match_list(self, summoner_id, region=None, champion_ids=None, ranked_queues=None, seasons=None,
begin_time=None, end_time=None, begin_index=None, end_index=None):
return self._match_list_request(
'{summoner_id}'.format(summoner_id=summoner_id),
region,
championsIds=champion_ids,
rankedQueues=ranked_queues,
seasons=seasons,
beginTime=begin_time,
endTime=end_time,
beginIndex=begin_index,
endIndex=end_index
)
# stats-v1.3
def _stats_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/stats/{end_url}'.format(
version=api_versions['stats'],
end_url=end_url
),
region,
**kwargs
)
def get_stat_summary(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/summary'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None)
def get_ranked_stats(self, summoner_id, region=None, season=None):
return self._stats_request(
'by-summoner/{summoner_id}/ranked'.format(summoner_id=summoner_id),
region,
season='SEASON{}'.format(season) if season is not None else None
)
# summoner-v1.4
def _summoner_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/summoner/{end_url}'.format(
version=api_versions['summoner'],
end_url=end_url
),
region,
**kwargs
)
def get_mastery_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/masteries'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_rune_pages(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/runes'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
def get_summoners(self, names=None, ids=None, region=None):
if (names is None) != (ids is None):
return self._summoner_request(
'by-name/{summoner_names}'.format(
summoner_names=','.join([self.sanitized_name(n) for n in names])) if names is not None
else '{summoner_ids}'.format(summoner_ids=','.join([str(i) for i in ids])),
region
)
else:
return None
def get_summoner(self, name=None, _id=None, region=None):
if (name is None) != (_id is None):
if name is not None:
name = self.sanitized_name(name)
return self.get_summoners(names=[name, ], region=region)[name]
else:
return self.get_summoners(ids=[_id, ], region=region)[str(_id)]
return None
def get_summoner_name(self, summoner_ids, region=None):
return self._summoner_request(
'{summoner_ids}/name'.format(summoner_ids=','.join([str(s) for s in summoner_ids])),
region
)
# team-v2.4
def _team_request(self, end_url, region, **kwargs):
return self.base_request(
'v{version}/team/{end_url}'.format(
version=api_versions['team'],
end_url=end_url
),
region,
**kwargs
)
def get_teams_for_summoner(self, summoner_id, region=None):
return self.get_teams_for_summoners([summoner_id, ], region=region)[str(summoner_id)]
def get_teams_for_summoners(self, summoner_ids, region=None):
return self._team_request(
'by-summoner/{summoner_id}'.format(summoner_id=','.join([str(s) for s in summoner_ids])),
region
)
def get_team(self, team_id, region=None):
return self.get_teams([team_id, ], region=region)[str(team_id)]
def get_teams(self, team_ids, region=None):
return self._team_request('{team_ids}'.format(team_ids=','.join(str(t) for t in team_ids)), region)
|
gomjellie/SoongSiri
|
legacy_codes/app/managers.py
|
from .message import *
from functools import wraps
import datetime
import pymongo
import re
from app import session
class Singleton(type):
instance = None
def __call__(cls, *args, **kwargs):
if not cls.instance:
cls.instance = super(Singleton, cls).__call__(*args, **kwargs)
return cls.instance
class APIManager(metaclass=Singleton):
STATELESS_PROCESS = {
'오늘의 식단': FoodMessage,
'운영시간': TimeTableMessage,
'학식': PupilFoodMessage,
'교식': FacultyFoodMessage,
# '기식': DormFoodMessage,
'푸드코트': FoodCourtMessage,
'스낵코너': SnackCornerMessage,
'더 키친': TheKitchenMessage,
'버스': BusMessage,
'정문(20166)': BusFrontMessage,
'베라 앞(20165)': BusBeraMessage,
'중문(20169)': BusMiddleMessage,
'지하철': SubMessage,
'도서관': LibMessage,
}
PROCESS = {
'내일의 식단': [
{
'내일의 식단': TomorrowFoodMessage,
},
{
'학식': TomorrowPupilFoodMessage,
'교식': TomorrowFacultyFoodMessage,
# '기식': TomorrowDormFoodMessage,
'푸드코트': TomorrowFoodCourtMessage,
'스낵코너': TomorrowSnackCornerMessage,
'더 키친': TomorrowTheKitchenMessage,
},
],
# '도서관': [
# {
# '도서관': LibMessage,
# },
# {
# # 일단 예외로 둔다
# '*': OnGoingMessage,
# }
# ],
'식단 리뷰': [
{
'식단 리뷰': ReviewInitMessage,
},
{
'리뷰 보기': ReviewBrowseMessage,
'리뷰 남기기': ReviewPostMessage,
'리뷰 삭제하기': OnGoingMessage,
},
{
# 리뷰 남기기 하면 3단계까지 옴 키보드로 입력받은 문자열이 오기때문에 가능성이 다양함
'*': OnGoingMessage,
}
],
}
def handle_process(self, process, user_key, content):
"""
연속되는 문답이 필요한 항목들을 처리한다.
:return: Message Object
"""
if process == '도서관':
if '열람실' in content:
room = content[0] # '1 열람실 (이용률: 9.11%)'[0]하면 1만 빠져나온다
msg = LibStatMessage(room=room)
UserSessionAdmin.delete(user_key)
else:
UserSessionAdmin.delete(user_key)
return FailMessage('도서관 process에서 문제가 발생하였습니다 해당 세션을 초기화합니다.')
return msg
elif process == '식단 리뷰':
if content in self.PROCESS[process][1]:
new_msg = self.PROCESS[process][1][content]
if content in ['리뷰 보기', '리뷰 삭제']:
UserSessionAdmin.delete(user_key)
return new_msg()
else:
UserSessionAdmin.delete(user_key)
return ReviewPostSuccess(user_key, content)
elif process == '내일의 식단':
if content in self.PROCESS[process][1]:
new_msg = self.PROCESS[process][1][content]
UserSessionAdmin.delete(user_key)
else:
UserSessionAdmin.delete(user_key)
return FailMessage('내일의 식단 process에서 문제가 발생하였습니다 해당 세션을 초기화합니다.')
return new_msg()
return FailMessage('Unhandled process {}'.format(process))
def handle_stateless_process(self, user_key, content):
"""
연속적이지 않은 항목들을 처리한다.
:param user_key:
:param content:
:return: Message Object
"""
if content in self.PROCESS:
UserSessionAdmin.init_process(user_key, content)
new_msg = self.PROCESS[content][0][content]
return new_msg()
else:
new_msg = self.STATELESS_PROCESS[content]
return new_msg()
def get_msg(self, user_key, content):
has_session = UserSessionAdmin.check_user_key(user_key)
process = UserSessionAdmin.get_process(user_key)
if not has_session:
UserSessionAdmin.init(user_key, content)
if content == '취소':
UserSessionAdmin.delete(user_key)
return CancelMessage()
UserSessionAdmin.add_history(user_key, content)
if process:
return self.handle_process(process, user_key, content)
else:
return self.handle_stateless_process(user_key, content)
def process(self, stat, req=None):
if stat is 'home':
home_message = HomeMessage()
return home_message
elif stat is 'message':
content = req['content']
user_key = req['user_key']
return self.get_msg(user_key, content)
elif stat is 'fail':
log = req['log']
user_key = req['user_key']
fail_message = FailMessage('파악할수 없는 에러가 발생하여 해당 세션을 초기화 합니다\n{}'.format(log))
UserSessionAdmin.delete(user_key)
return fail_message
elif stat is 'etc':
return SuccessMessage()
elif stat is "scheduler":
return CronUpdateMessage()
elif stat is "refresh_tomorrow":
return CronUpdateTomorrowMessage()
else:
return FailMessage("stat not in list('home', 'message', 'fail')")
class SessionManager(metaclass=Singleton):
@staticmethod
def check_user_key(user_key):
if session.find_one({'user_key': user_key}):
return True
else:
return False
def verify_session(func):
@wraps(func)
def session_wrapper(*args, **kwargs):
user_key = args[1]
if session.find_one({'user_key': user_key}):
return func(*args, **kwargs)
else:
return False
return session_wrapper
def init(self, user_key, content=None, process=None):
session.insert_one({
'user_key': user_key,
'history': [content],
'process': process,
})
@verify_session
def delete(self, user_key):
session.remove({'user_key': user_key})
@verify_session
def add_history(self, user_key, content):
user = session.find_one({'user_key': user_key})
history = user['history']
history.append(content)
user.update({'history': history})
session.save(user)
@verify_session
def get_history(self, user_key):
user = session.find_one({'user_key': user_key})
history = user['history']
return history[:]
@verify_session
def init_process(self, user_key, process):
user = session.find_one({'user_key': user_key})
user.update({'process': process})
session.save(user)
@verify_session
def expire_process(self, user_key):
user = session.find_one({'user_key': user_key})
user.update({'process': None})
session.save(user)
@verify_session
def get_process(self, user_key):
user = session.find_one({'user_key': user_key})
return user['process']
class DBManager:
def __init__(self):
_conn = pymongo.MongoClient()
_food_db = _conn.food_db
self.hakusiku = _food_db.hakusiku
self.review = _food_db.review
self.ban_list = _food_db.ban_list
if self._get_black_list() is None:
self.ban_list.insert_one({'black_list': []})
def get_hakusiku_data(self, date=None):
date = date or datetime.date.today()
date_str = date.__str__()
data = self.hakusiku.find_one({'날짜': date_str})
return data
def set_hakusiku_data(self, data, date=None):
date = date or datetime.date.today()
date_str = date.__str__()
if self.get_hakusiku_data(date=date_str) is None:
self.hakusiku.insert_one(data)
else:
self.hakusiku.replace_one({"날짜": date_str}, data)
def is_banned_user(self, user_key):
return True if user_key in self._get_black_list() else False
def _get_black_list(self):
return self.ban_list.find_one({}, {'_id': 0, 'black_list': 1})
def ban_user(self, user_key):
black_list = self._get_black_list()
black_list.append(user_key)
def get_review(self):
date = datetime.date.today().__str__()
data = self.review.find_one({'날짜': date}) or self.init_review()
return data
def init_review(self):
date = datetime.date.today().__str__()
self.review.insert_one({
'날짜': date,
'리뷰': [],
})
return self.get_review()
def append_review(self, user_key: str, new_review: str):
def count_user_key(lst):
# TODO: mongodb 기능에 count 하는게 있을듯 그걸로 대체
s = 0
for i in lst:
if i.get('user_key') == user_key:
s += 1
return s
def remove_special_char(src):
return re.sub("[!@#$%^&*()]", "", src)
review = self.get_review()
if count_user_key(review['리뷰']) < 5:
review['리뷰'].append({'user_key': user_key, 'content': remove_special_char(new_review)})
self.review.find_one_and_replace({'날짜': datetime.date.today().__str__()}, review)
else:
raise Exception('5회 이상 작성하셨습니다.')
APIAdmin = APIManager()
UserSessionAdmin = SessionManager()
DBAdmin = DBManager()
|
natemara/aloft.py
|
setup.py
|
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name="aloft.py",
version="0.0.4",
author="Nate Mara",
author_email="natemara@gmail.com",
description="A simple API for getting winds aloft data from NOAA",
license="MIT",
test_suite="tests",
keywords="aviation weather winds aloft",
url="https://github.com/natemara/aloft.py",
packages=['aloft'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
install_requires=required,
)
|
DailyActie/Surrogate-Model
|
01-codes/scipy-master/scipy/sparse/linalg/__init__.py
|
"""
==================================================
Sparse linear algebra (:mod:`scipy.sparse.linalg`)
==================================================
.. currentmodule:: scipy.sparse.linalg
Abstract linear operators
-------------------------
.. autosummary::
:toctree: generated/
LinearOperator -- abstract representation of a linear operator
aslinearoperator -- convert an object to an abstract linear operator
Matrix Operations
-----------------
.. autosummary::
:toctree: generated/
inv -- compute the sparse matrix inverse
expm -- compute the sparse matrix exponential
expm_multiply -- compute the product of a matrix exponential and a matrix
Matrix norms
------------
.. autosummary::
:toctree: generated/
norm -- Norm of a sparse matrix
onenormest -- Estimate the 1-norm of a sparse matrix
Solving linear problems
-----------------------
Direct methods for linear equation systems:
.. autosummary::
:toctree: generated/
spsolve -- Solve the sparse linear system Ax=b
factorized -- Pre-factorize matrix to a function solving a linear system
MatrixRankWarning -- Warning on exactly singular matrices
use_solver -- Select direct solver to use
Iterative methods for linear equation systems:
.. autosummary::
:toctree: generated/
bicg -- Use BIConjugate Gradient iteration to solve A x = b
bicgstab -- Use BIConjugate Gradient STABilized iteration to solve A x = b
cg -- Use Conjugate Gradient iteration to solve A x = b
cgs -- Use Conjugate Gradient Squared iteration to solve A x = b
gmres -- Use Generalized Minimal RESidual iteration to solve A x = b
lgmres -- Solve a matrix equation using the LGMRES algorithm
minres -- Use MINimum RESidual iteration to solve Ax = b
qmr -- Use Quasi-Minimal Residual iteration to solve A x = b
Iterative methods for least-squares problems:
.. autosummary::
:toctree: generated/
lsqr -- Find the least-squares solution to a sparse linear equation system
lsmr -- Find the least-squares solution to a sparse linear equation system
Matrix factorizations
---------------------
Eigenvalue problems:
.. autosummary::
:toctree: generated/
eigs -- Find k eigenvalues and eigenvectors of the square matrix A
eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix
lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning
Singular values problems:
.. autosummary::
:toctree: generated/
svds -- Compute k singular values/vectors for a sparse matrix
Complete or incomplete LU factorizations
.. autosummary::
:toctree: generated/
splu -- Compute a LU decomposition for a sparse matrix
spilu -- Compute an incomplete LU decomposition for a sparse matrix
SuperLU -- Object representing an LU factorization
Exceptions
----------
.. autosummary::
:toctree: generated/
ArpackNoConvergence
ArpackError
"""
from __future__ import division, print_function, absolute_import
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.testing import Tester
test = Tester().test
|
ngageoint/geoq
|
geoq/agents/admin.py
|
# -*- coding: utf-8 -*-
# This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from reversion.admin import VersionAdmin
from django.contrib.gis import admin
from .models import Feedback,Topic
@admin.register(Feedback)
class FeedbackAdmin(VersionAdmin, admin.ModelAdmin):
model = Feedback
list_display = ['name', 'email', 'topic', 'message']
save_as = True
ordering = ['topic']
@admin.register(Topic)
class TopicAdmin(VersionAdmin, admin.ModelAdmin):
model = Topic
list_display = ['name']
save_as = True
ordering = ['name']
|
oblique-labs/pyVM
|
rpython/jit/metainterp/test/test_recursive.py
|
import py
from rpython.rlib.jit import JitDriver, hint, set_param
from rpython.rlib.jit import unroll_safe, dont_look_inside, promote
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib.debug import fatalerror
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.rtyper.annlowlevel import hlstr
from rpython.jit.metainterp.warmspot import get_stats
from rpython.jit.backend.llsupport import codemap
class RecursiveTests:
def test_simple_recursion(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'm'])
def f(n):
m = n - 2
while True:
myjitdriver.jit_merge_point(n=n, m=m)
n -= 1
if m == n:
return main(n) * 2
myjitdriver.can_enter_jit(n=n, m=m)
def main(n):
if n > 0:
return f(n+1)
else:
return 1
res = self.meta_interp(main, [20], enable_opts='')
assert res == main(20)
self.check_history(call_i=0)
def test_simple_recursion_with_exc(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'm'])
class Error(Exception):
pass
def f(n):
m = n - 2
while True:
myjitdriver.jit_merge_point(n=n, m=m)
n -= 1
if n == 10:
raise Error
if m == n:
try:
return main(n) * 2
except Error:
return 2
myjitdriver.can_enter_jit(n=n, m=m)
def main(n):
if n > 0:
return f(n+1)
else:
return 1
res = self.meta_interp(main, [20], enable_opts='')
assert res == main(20)
def test_recursion_three_times(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'm', 'total'])
def f(n):
m = n - 3
total = 0
while True:
myjitdriver.jit_merge_point(n=n, m=m, total=total)
n -= 1
total += main(n)
if m == n:
return total + 5
myjitdriver.can_enter_jit(n=n, m=m, total=total)
def main(n):
if n > 0:
return f(n)
else:
return 1
print
for i in range(1, 11):
print '%3d %9d' % (i, f(i))
res = self.meta_interp(main, [10], enable_opts='')
assert res == main(10)
self.check_enter_count_at_most(11)
def test_bug_1(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'i', 'stack'])
def opaque(n, i):
if n == 1 and i == 19:
for j in range(20):
res = f(0) # recurse repeatedly, 20 times
assert res == 0
def f(n):
stack = [n]
i = 0
while i < 20:
myjitdriver.can_enter_jit(n=n, i=i, stack=stack)
myjitdriver.jit_merge_point(n=n, i=i, stack=stack)
opaque(n, i)
i += 1
return stack.pop()
res = self.meta_interp(f, [1], enable_opts='', repeat=2,
policy=StopAtXPolicy(opaque))
assert res == 1
def get_interpreter(self, codes):
ADD = "0"
JUMP_BACK = "1"
CALL = "2"
EXIT = "3"
def getloc(i, code):
return 'code="%s", i=%d' % (code, i)
jitdriver = JitDriver(greens = ['i', 'code'], reds = ['n'],
get_printable_location = getloc)
def interpret(codenum, n, i):
code = codes[codenum]
while i < len(code):
jitdriver.jit_merge_point(n=n, i=i, code=code)
op = code[i]
if op == ADD:
n += 1
i += 1
elif op == CALL:
n = interpret(1, n, 1)
i += 1
elif op == JUMP_BACK:
if n > 20:
return 42
i -= 2
jitdriver.can_enter_jit(n=n, i=i, code=code)
elif op == EXIT:
return n
else:
raise NotImplementedError
return n
return interpret
def test_inline(self):
code = "021"
subcode = "00"
codes = [code, subcode]
f = self.get_interpreter(codes)
assert self.meta_interp(f, [0, 0, 0], enable_opts='') == 42
self.check_resops(call_may_force_i=1, int_add=1, call=0)
assert self.meta_interp(f, [0, 0, 0], enable_opts='',
inline=True) == 42
self.check_resops(call=0, int_add=2, call_may_force_i=0,
guard_no_exception=0)
def test_inline_jitdriver_check(self):
code = "021"
subcode = "100"
codes = [code, subcode]
f = self.get_interpreter(codes)
assert self.meta_interp(f, [0, 0, 0], enable_opts='',
inline=True) == 42
# the call is fully inlined, because we jump to subcode[1], thus
# skipping completely the JUMP_BACK in subcode[0]
self.check_resops(call=0, call_may_force=0, call_assembler=0)
def test_guard_failure_in_inlined_function(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
n = f("---i---", n)
elif op == "i":
if n % 5 == 1:
return n
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
return f("c-l", n)
print main(100)
res = self.meta_interp(main, [100], enable_opts='', inline=True)
assert res == 0
def test_guard_failure_and_then_exception_in_inlined_function(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n', 'flag'],
get_printable_location=p)
def f(code, n):
pc = 0
flag = False
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc, flag=flag)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
try:
n = f("---ir---", n)
except Exception:
return n
elif op == "i":
if n < 200:
flag = True
elif op == "r":
if flag:
raise Exception
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0, flag=flag)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
return f("c-l", n)
print main(1000)
res = self.meta_interp(main, [1000], enable_opts='', inline=True)
assert res == main(1000)
def test_exception_in_inlined_function(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
class Exc(Exception):
pass
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
try:
n = f("---i---", n)
except Exc:
pass
elif op == "i":
if n % 5 == 1:
raise Exc
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
return f("c-l", n)
res = self.meta_interp(main, [100], enable_opts='', inline=True)
assert res == main(100)
def test_recurse_during_blackholing(self):
# this passes, if the blackholing shortcut for calls is turned off
# it fails, it is very delicate in terms of parameters,
# bridge/loop creation order
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
if n < 70 and n % 3 == 1:
n = f("--", n)
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def main(n):
set_param(None, 'threshold', 3)
set_param(None, 'trace_eagerness', 5)
return f("c-l", n)
expected = main(100)
res = self.meta_interp(main, [100], enable_opts='', inline=True)
assert res == expected
def check_max_trace_length(self, length):
for loop in get_stats().loops:
assert len(loop.operations) <= length + 5 # because we only check once per metainterp bytecode
for op in loop.operations:
if op.is_guard() and hasattr(op.getdescr(), '_debug_suboperations'):
assert len(op.getdescr()._debug_suboperations) <= length + 5
def test_inline_trace_limit(self):
myjitdriver = JitDriver(greens=[], reds=['n'])
def recursive(n):
if n > 0:
return recursive(n - 1) + 1
return 0
def loop(n):
set_param(myjitdriver, "threshold", 10)
pc = 0
while n:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
n = recursive(n)
n -= 1
return n
TRACE_LIMIT = 66
res = self.meta_interp(loop, [100], enable_opts='', inline=True, trace_limit=TRACE_LIMIT)
assert res == 0
self.check_max_trace_length(TRACE_LIMIT)
self.check_enter_count_at_most(10) # maybe
self.check_aborted_count(6)
def test_trace_limit_bridge(self):
def recursive(n):
if n > 0:
return recursive(n - 1) + 1
return 0
myjitdriver = JitDriver(greens=[], reds=['n'])
def loop(n):
set_param(None, "threshold", 4)
set_param(None, "trace_eagerness", 2)
while n:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
if n % 5 == 0:
n -= 1
if n < 50:
n = recursive(n)
n -= 1
return n
TRACE_LIMIT = 20
res = self.meta_interp(loop, [100], enable_opts='', inline=True, trace_limit=TRACE_LIMIT)
self.check_max_trace_length(TRACE_LIMIT)
self.check_aborted_count(8)
self.check_enter_count_at_most(30)
def test_trace_limit_with_exception_bug(self):
myjitdriver = JitDriver(greens=[], reds=['n'])
@unroll_safe
def do_stuff(n):
while n > 0:
n -= 1
raise ValueError
def loop(n):
pc = 0
while n > 80:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
try:
do_stuff(n)
except ValueError:
# the trace limit is checked when we arrive here, and we
# have the exception still in last_exc_value_box at this
# point -- so when we abort because of a trace too long,
# the exception is passed to the blackhole interp and
# incorrectly re-raised from here
pass
n -= 1
return n
TRACE_LIMIT = 66
res = self.meta_interp(loop, [100], trace_limit=TRACE_LIMIT)
assert res == 80
def test_max_failure_args(self):
FAILARGS_LIMIT = 10
jitdriver = JitDriver(greens = [], reds = ['i', 'n', 'o'])
class A(object):
def __init__(self, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9):
self.i0 = i0
self.i1 = i1
self.i2 = i2
self.i3 = i3
self.i4 = i4
self.i5 = i5
self.i6 = i6
self.i7 = i7
self.i8 = i8
self.i9 = i9
def loop(n):
i = 0
o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
while i < n:
jitdriver.can_enter_jit(o=o, i=i, n=n)
jitdriver.jit_merge_point(o=o, i=i, n=n)
o = A(i, i + 1, i + 2, i + 3, i + 4, i + 5,
i + 6, i + 7, i + 8, i + 9)
i += 1
return o
res = self.meta_interp(loop, [20], failargs_limit=FAILARGS_LIMIT,
listops=True)
self.check_aborted_count(4)
def test_max_failure_args_exc(self):
FAILARGS_LIMIT = 10
jitdriver = JitDriver(greens = [], reds = ['i', 'n', 'o'])
class A(object):
def __init__(self, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9):
self.i0 = i0
self.i1 = i1
self.i2 = i2
self.i3 = i3
self.i4 = i4
self.i5 = i5
self.i6 = i6
self.i7 = i7
self.i8 = i8
self.i9 = i9
def loop(n):
i = 0
o = A(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)
while i < n:
jitdriver.can_enter_jit(o=o, i=i, n=n)
jitdriver.jit_merge_point(o=o, i=i, n=n)
o = A(i, i + 1, i + 2, i + 3, i + 4, i + 5,
i + 6, i + 7, i + 8, i + 9)
i += 1
raise ValueError
def main(n):
try:
loop(n)
return 1
except ValueError:
return 0
res = self.meta_interp(main, [20], failargs_limit=FAILARGS_LIMIT,
listops=True)
assert not res
self.check_aborted_count(4)
def test_set_param_inlining(self):
myjitdriver = JitDriver(greens=[], reds=['n', 'recurse'])
def loop(n, recurse=False):
while n:
myjitdriver.jit_merge_point(n=n, recurse=recurse)
n -= 1
if not recurse:
loop(10, True)
myjitdriver.can_enter_jit(n=n, recurse=recurse)
return n
TRACE_LIMIT = 66
def main(inline):
set_param(None, "threshold", 10)
set_param(None, 'function_threshold', 60)
if inline:
set_param(None, 'inlining', True)
else:
set_param(None, 'inlining', False)
return loop(100)
res = self.meta_interp(main, [0], enable_opts='', trace_limit=TRACE_LIMIT)
self.check_resops(call=0, call_may_force_i=1)
res = self.meta_interp(main, [1], enable_opts='', trace_limit=TRACE_LIMIT)
self.check_resops(call=0, call_may_force=0)
def test_trace_from_start(self):
def p(pc, code):
code = hlstr(code)
return "'%s' at %d: %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "+":
n += 7
elif op == "-":
n -= 1
elif op == "c":
n = f('---', n)
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=1)
pc = 1
continue
else:
assert 0
pc += 1
return n
def g(m):
if m > 1000000:
f('', 0)
result = 0
for i in range(m):
result += f('+-cl--', i)
res = self.meta_interp(g, [50], backendopt=True)
assert res == g(50)
py.test.skip("tracing from start is by now only longer enabled "
"if a trace gets too big")
self.check_tree_loop_count(3)
self.check_history(int_add=1)
def test_dont_inline_huge_stuff(self):
def p(pc, code):
code = hlstr(code)
return "%s %d %s" % (code, pc, code[pc])
myjitdriver = JitDriver(greens=['pc', 'code'], reds=['n'],
get_printable_location=p,
is_recursive=True)
def f(code, n):
pc = 0
while pc < len(code):
myjitdriver.jit_merge_point(n=n, code=code, pc=pc)
op = code[pc]
if op == "-":
n -= 1
elif op == "c":
f('--------------------', n)
elif op == "l":
if n > 0:
myjitdriver.can_enter_jit(n=n, code=code, pc=0)
pc = 0
continue
else:
assert 0
pc += 1
return n
def g(m):
set_param(None, 'inlining', True)
# carefully chosen threshold to make sure that the inner function
# cannot be inlined, but the inner function on its own is small
# enough
set_param(None, 'trace_limit', 40)
if m > 1000000:
f('', 0)
result = 0
for i in range(m):
result += f('-c-----------l-', i+100)
self.meta_interp(g, [10], backendopt=True)
self.check_aborted_count(1)
self.check_resops(call=0, call_assembler_i=2)
self.check_jitcell_token_count(2)
def test_directly_call_assembler(self):
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
while i < 10:
driver.can_enter_jit(codeno = codeno, i = i)
driver.jit_merge_point(codeno = codeno, i = i)
if codeno == 2:
portal(1)
i += 1
self.meta_interp(portal, [2], inline=True)
self.check_history(call_assembler_n=1)
def test_recursion_cant_call_assembler_directly(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'j'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, j):
i = 1
while 1:
driver.jit_merge_point(codeno=codeno, i=i, j=j)
if (i >> 1) == 1:
if j == 0:
return
portal(2, j - 1)
elif i == 5:
return
i += 1
driver.can_enter_jit(codeno=codeno, i=i, j=j)
portal(2, 5)
from rpython.jit.metainterp import compile, pyjitpl
pyjitpl._warmrunnerdesc = None
trace = []
def my_ctc(*args):
looptoken = original_ctc(*args)
trace.append(looptoken)
return looptoken
original_ctc = compile.compile_tmp_callback
try:
compile.compile_tmp_callback = my_ctc
self.meta_interp(portal, [2, 5], inline=True)
self.check_resops(call_may_force=0, call_assembler_n=2)
finally:
compile.compile_tmp_callback = original_ctc
# check that we made a temporary callback
assert len(trace) == 1
# and that we later redirected it to something else
try:
redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler
except AttributeError:
pass # not the llgraph backend
else:
print redirected
assert redirected.keys() == trace
def test_recursion_cant_call_assembler_directly_with_virtualizable(self):
# exactly the same logic as the previous test, but with 'frame.j'
# instead of just 'j'
class Frame(object):
_virtualizable_ = ['j']
def __init__(self, j):
self.j = j
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, frame):
i = 1
while 1:
driver.jit_merge_point(codeno=codeno, i=i, frame=frame)
if (i >> 1) == 1:
if frame.j == 0:
return
portal(2, Frame(frame.j - 1))
elif i == 5:
return
i += 1
driver.can_enter_jit(codeno=codeno, i=i, frame=frame)
def main(codeno, j):
portal(codeno, Frame(j))
main(2, 5)
from rpython.jit.metainterp import compile, pyjitpl
pyjitpl._warmrunnerdesc = None
trace = []
def my_ctc(*args):
looptoken = original_ctc(*args)
trace.append(looptoken)
return looptoken
original_ctc = compile.compile_tmp_callback
try:
compile.compile_tmp_callback = my_ctc
self.meta_interp(main, [2, 5], inline=True)
self.check_resops(call_may_force=0, call_assembler_n=2)
finally:
compile.compile_tmp_callback = original_ctc
# check that we made a temporary callback
assert len(trace) == 1
# and that we later redirected it to something else
try:
redirected = pyjitpl._warmrunnerdesc.cpu._redirected_call_assembler
except AttributeError:
pass # not the llgraph backend
else:
print redirected
assert redirected.keys() == trace
def test_directly_call_assembler_return(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
k = codeno
while i < 10:
driver.can_enter_jit(codeno = codeno, i = i, k = k)
driver.jit_merge_point(codeno = codeno, i = i, k = k)
if codeno == 2:
k = portal(1)
i += 1
return k
self.meta_interp(portal, [2], inline=True)
self.check_history(call_assembler_i=1)
def test_directly_call_assembler_raise(self):
class MyException(Exception):
def __init__(self, x):
self.x = x
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
while i < 10:
driver.can_enter_jit(codeno = codeno, i = i)
driver.jit_merge_point(codeno = codeno, i = i)
if codeno == 2:
try:
portal(1)
except MyException as me:
i += me.x
i += 1
if codeno == 1:
raise MyException(1)
self.meta_interp(portal, [2], inline=True)
self.check_history(call_assembler_n=1)
def test_directly_call_assembler_fail_guard(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, k):
i = 0
while i < 10:
driver.can_enter_jit(codeno=codeno, i=i, k=k)
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno == 2:
k += portal(1, k)
elif k > 40:
if i % 2:
k += 1
else:
k += 2
k += 1
i += 1
return k
res = self.meta_interp(portal, [2, 0], inline=True)
assert res == 13542
def test_directly_call_assembler_virtualizable(self):
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 's', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
def main(codeno):
frame = Frame()
frame.thing = Thing(0)
result = portal(codeno, frame)
return result
def portal(codeno, frame):
i = 0
s = 0
while i < 10:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i, s=s)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i, s=s)
nextval = frame.thing.val
if codeno == 0:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(1, subframe)
s += subframe.thing.val
frame.thing = Thing(nextval + 1)
i += 1
return frame.thing.val + s
res = self.meta_interp(main, [0], inline=True)
self.check_resops(call=0, cond_call=2)
assert res == main(0)
def test_directly_call_assembler_virtualizable_reset_token(self):
py.test.skip("not applicable any more, I think")
from rpython.rtyper.lltypesystem import lltype
from rpython.rlib.debug import llinterpcall
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
@dont_look_inside
def check_frame(subframe):
if we_are_translated():
llinterpcall(lltype.Void, check_ll_frame, subframe)
def check_ll_frame(ll_subframe):
# This is called with the low-level Struct that is the frame.
# Check that the vable_token was correctly reset to zero.
# Note that in order for that test to catch failures, it needs
# three levels of recursion: the vable_token of the subframe
# at the level 2 is set to a non-zero value when doing the
# call to the level 3 only. This used to fail when the test
# is run via rpython.jit.backend.x86.test.test_recursive.
from rpython.jit.metainterp.virtualizable import TOKEN_NONE
assert ll_subframe.vable_token == TOKEN_NONE
def main(codeno):
frame = Frame()
frame.thing = Thing(0)
portal(codeno, frame)
return frame.thing.val
def portal(codeno, frame):
i = 0
while i < 5:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i)
nextval = frame.thing.val
if codeno < 2:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(codeno + 1, subframe)
check_frame(subframe)
frame.thing = Thing(nextval + 1)
i += 1
return frame.thing.val
res = self.meta_interp(main, [0], inline=True)
assert res == main(0)
def test_directly_call_assembler_virtualizable_force1(self):
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
class SomewhereElse(object):
pass
somewhere_else = SomewhereElse()
def change(newthing):
somewhere_else.frame.thing = newthing
def main(codeno):
frame = Frame()
somewhere_else.frame = frame
frame.thing = Thing(0)
portal(codeno, frame)
return frame.thing.val
def portal(codeno, frame):
print 'ENTER:', codeno, frame.thing.val
i = 0
while i < 10:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i)
nextval = frame.thing.val
if codeno == 0:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(1, subframe)
elif codeno == 1:
if frame.thing.val > 40:
change(Thing(13))
nextval = 13
else:
fatalerror("bad codeno = " + str(codeno))
frame.thing = Thing(nextval + 1)
i += 1
print 'LEAVE:', codeno, frame.thing.val
return frame.thing.val
res = self.meta_interp(main, [0], inline=True,
policy=StopAtXPolicy(change))
assert res == main(0)
def test_directly_call_assembler_virtualizable_with_array(self):
myjitdriver = JitDriver(greens = ['codeno'], reds = ['n', 'x', 'frame'],
virtualizables = ['frame'])
class Frame(object):
_virtualizable_ = ['l[*]', 's']
def __init__(self, l, s):
self = hint(self, access_directly=True,
fresh_virtualizable=True)
self.l = l
self.s = s
def main(codeno, n, a):
frame = Frame([a, a+1, a+2, a+3], 0)
return f(codeno, n, a, frame)
def f(codeno, n, a, frame):
x = 0
while n > 0:
myjitdriver.can_enter_jit(codeno=codeno, frame=frame, n=n, x=x)
myjitdriver.jit_merge_point(codeno=codeno, frame=frame, n=n,
x=x)
frame.s = promote(frame.s)
n -= 1
s = frame.s
assert s >= 0
x += frame.l[s]
frame.s += 1
if codeno == 0:
subframe = Frame([n, n+1, n+2, n+3], 0)
x += f(1, 10, 1, subframe)
s = frame.s
assert s >= 0
x += frame.l[s]
x += len(frame.l)
frame.s -= 1
return x
res = self.meta_interp(main, [0, 10, 1], listops=True, inline=True)
assert res == main(0, 10, 1)
def test_directly_call_assembler_virtualizable_force_blackhole(self):
class Thing(object):
def __init__(self, val):
self.val = val
class Frame(object):
_virtualizable_ = ['thing']
driver = JitDriver(greens = ['codeno'], reds = ['i', 'frame'],
virtualizables = ['frame'],
get_printable_location = lambda codeno : str(codeno))
class SomewhereElse(object):
pass
somewhere_else = SomewhereElse()
def change(newthing, arg):
print arg
if arg > 30:
somewhere_else.frame.thing = newthing
arg = 13
return arg
def main(codeno):
frame = Frame()
somewhere_else.frame = frame
frame.thing = Thing(0)
portal(codeno, frame)
return frame.thing.val
def portal(codeno, frame):
i = 0
while i < 10:
driver.can_enter_jit(frame=frame, codeno=codeno, i=i)
driver.jit_merge_point(frame=frame, codeno=codeno, i=i)
nextval = frame.thing.val
if codeno == 0:
subframe = Frame()
subframe.thing = Thing(nextval)
nextval = portal(1, subframe)
else:
nextval = change(Thing(13), frame.thing.val)
frame.thing = Thing(nextval + 1)
i += 1
return frame.thing.val
res = self.meta_interp(main, [0], inline=True,
policy=StopAtXPolicy(change))
assert res == main(0)
def test_assembler_call_red_args(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def residual(k):
if k > 150:
return 0
return 1
def portal(codeno, k):
i = 0
while i < 15:
driver.can_enter_jit(codeno=codeno, i=i, k=k)
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno == 2:
k += portal(residual(k), k)
if codeno == 0:
k += 2
elif codeno == 1:
k += 1
i += 1
return k
res = self.meta_interp(portal, [2, 0], inline=True,
policy=StopAtXPolicy(residual))
assert res == portal(2, 0)
self.check_resops(call_assembler_i=4)
def test_inline_without_hitting_the_loop(self):
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno):
i = 0
while True:
driver.jit_merge_point(codeno=codeno, i=i)
if codeno < 10:
i += portal(20)
codeno += 1
elif codeno == 10:
if i > 63:
return i
codeno = 0
driver.can_enter_jit(codeno=codeno, i=i)
else:
return 1
assert portal(0) == 70
res = self.meta_interp(portal, [0], inline=True)
assert res == 70
self.check_resops(call_assembler=0)
def test_inline_with_hitting_the_loop_sometimes(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
def portal(codeno, k):
if k > 2:
return 1
i = 0
while True:
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno < 10:
i += portal(codeno + 5, k+1)
codeno += 1
elif codeno == 10:
if i > [-1, 2000, 63][k]:
return i
codeno = 0
driver.can_enter_jit(codeno=codeno, i=i, k=k)
else:
return 1
assert portal(0, 1) == 2095
res = self.meta_interp(portal, [0, 1], inline=True)
assert res == 2095
self.check_resops(call_assembler_i=12)
def test_inline_with_hitting_the_loop_sometimes_exc(self):
driver = JitDriver(greens = ['codeno'], reds = ['i', 'k'],
get_printable_location = lambda codeno : str(codeno))
class GotValue(Exception):
def __init__(self, result):
self.result = result
def portal(codeno, k):
if k > 2:
raise GotValue(1)
i = 0
while True:
driver.jit_merge_point(codeno=codeno, i=i, k=k)
if codeno < 10:
try:
portal(codeno + 5, k+1)
except GotValue as e:
i += e.result
codeno += 1
elif codeno == 10:
if i > [-1, 2000, 63][k]:
raise GotValue(i)
codeno = 0
driver.can_enter_jit(codeno=codeno, i=i, k=k)
else:
raise GotValue(1)
def main(codeno, k):
try:
portal(codeno, k)
except GotValue as e:
return e.result
assert main(0, 1) == 2095
res = self.meta_interp(main, [0, 1], inline=True)
assert res == 2095
self.check_resops(call_assembler_n=12)
def test_inline_recursion_limit(self):
driver = JitDriver(greens = ["threshold", "loop"], reds=["i"])
@dont_look_inside
def f():
set_param(driver, "max_unroll_recursion", 10)
def portal(threshold, loop, i):
f()
if i > threshold:
return i
while True:
driver.jit_merge_point(threshold=threshold, loop=loop, i=i)
if loop:
portal(threshold, False, 0)
else:
portal(threshold, False, i + 1)
return i
if i > 10:
return 1
i += 1
driver.can_enter_jit(threshold=threshold, loop=loop, i=i)
res1 = portal(10, True, 0)
res2 = self.meta_interp(portal, [10, True, 0], inline=True)
assert res1 == res2
self.check_resops(call_assembler_i=2)
res1 = portal(9, True, 0)
res2 = self.meta_interp(portal, [9, True, 0], inline=True)
assert res1 == res2
self.check_resops(call_assembler=0)
def test_handle_jitexception_in_portal(self):
# a test for _handle_jitexception_in_portal in blackhole.py
driver = JitDriver(greens = ['codeno'], reds = ['i', 'str'],
get_printable_location = lambda codeno: str(codeno))
def do_can_enter_jit(codeno, i, str):
i = (i+1)-1 # some operations
driver.can_enter_jit(codeno=codeno, i=i, str=str)
def intermediate(codeno, i, str):
if i == 9:
do_can_enter_jit(codeno, i, str)
def portal(codeno, str):
i = value.initial
while i < 10:
intermediate(codeno, i, str)
driver.jit_merge_point(codeno=codeno, i=i, str=str)
i += 1
if codeno == 64 and i == 10:
str = portal(96, str)
str += chr(codeno+i)
return str
class Value:
initial = -1
value = Value()
def main():
value.initial = 0
return (portal(64, '') +
portal(64, '') +
portal(64, '') +
portal(64, '') +
portal(64, ''))
assert main() == 'ABCDEFGHIabcdefghijJ' * 5
for tlimit in [95, 90, 102]:
print 'tlimit =', tlimit
res = self.meta_interp(main, [], inline=True, trace_limit=tlimit)
assert ''.join(res.chars) == 'ABCDEFGHIabcdefghijJ' * 5
def test_handle_jitexception_in_portal_returns_void(self):
# a test for _handle_jitexception_in_portal in blackhole.py
driver = JitDriver(greens = ['codeno'], reds = ['i', 'str'],
get_printable_location = lambda codeno: str(codeno))
def do_can_enter_jit(codeno, i, str):
i = (i+1)-1 # some operations
driver.can_enter_jit(codeno=codeno, i=i, str=str)
def intermediate(codeno, i, str):
if i == 9:
do_can_enter_jit(codeno, i, str)
def portal(codeno, str):
i = value.initial
while i < 10:
intermediate(codeno, i, str)
driver.jit_merge_point(codeno=codeno, i=i, str=str)
i += 1
if codeno == 64 and i == 10:
portal(96, str)
str += chr(codeno+i)
class Value:
initial = -1
value = Value()
def main():
value.initial = 0
portal(64, '')
portal(64, '')
portal(64, '')
portal(64, '')
portal(64, '')
main()
for tlimit in [95, 90, 102]:
print 'tlimit =', tlimit
self.meta_interp(main, [], inline=True, trace_limit=tlimit)
def test_no_duplicates_bug(self):
driver = JitDriver(greens = ['codeno'], reds = ['i'],
get_printable_location = lambda codeno: str(codeno))
def portal(codeno, i):
while i > 0:
driver.can_enter_jit(codeno=codeno, i=i)
driver.jit_merge_point(codeno=codeno, i=i)
if codeno > 0:
break
portal(i, i)
i -= 1
self.meta_interp(portal, [0, 10], inline=True)
def test_trace_from_start_always(self):
from rpython.rlib.nonconst import NonConstant
driver = JitDriver(greens = ['c'], reds = ['i', 'v'])
def portal(c, i, v):
while i > 0:
driver.jit_merge_point(c=c, i=i, v=v)
portal(c, i - 1, v)
if v:
driver.can_enter_jit(c=c, i=i, v=v)
break
def main(c, i, _set_param, v):
if _set_param:
set_param(driver, 'function_threshold', 0)
portal(c, i, v)
self.meta_interp(main, [10, 10, False, False], inline=True)
self.check_jitcell_token_count(1)
self.check_trace_count(1)
self.meta_interp(main, [3, 10, True, False], inline=True)
self.check_jitcell_token_count(0)
self.check_trace_count(0)
def test_trace_from_start_does_not_prevent_inlining(self):
driver = JitDriver(greens = ['c', 'bc'], reds = ['i'])
def portal(bc, c, i):
while True:
driver.jit_merge_point(c=c, bc=bc, i=i)
if bc == 0:
portal(1, 8, 0)
c += 1
else:
return
if c == 10: # bc == 0
c = 0
if i >= 100:
return
driver.can_enter_jit(c=c, bc=bc, i=i)
i += 1
self.meta_interp(portal, [0, 0, 0], inline=True)
self.check_resops(call_may_force=0, call=0)
def test_dont_repeatedly_trace_from_the_same_guard(self):
driver = JitDriver(greens = [], reds = ['level', 'i'])
def portal(level):
if level == 0:
i = -10
else:
i = 0
#
while True:
driver.jit_merge_point(level=level, i=i)
if level == 25:
return 42
i += 1
if i <= 0: # <- guard
continue # first make a loop
else:
# then we fail the guard above, doing a recursive call,
# which will itself fail the same guard above, and so on
return portal(level + 1)
self.meta_interp(portal, [0])
self.check_trace_count_at_most(2) # and not, e.g., 24
def test_get_unique_id(self):
lst = []
def reg_codemap(self, (start, size, l)):
lst.append((start, size))
old_reg_codemap(self, (start, size, l))
old_reg_codemap = codemap.CodemapStorage.register_codemap
try:
codemap.CodemapStorage.register_codemap = reg_codemap
def get_unique_id(pc, code):
return (code + 1) * 2
driver = JitDriver(greens=["pc", "code"], reds='auto',
get_unique_id=get_unique_id, is_recursive=True)
def f(pc, code):
i = 0
while i < 10:
driver.jit_merge_point(pc=pc, code=code)
pc += 1
if pc == 3:
if code == 1:
f(0, 0)
pc = 0
i += 1
self.meta_interp(f, [0, 1], inline=True)
self.check_get_unique_id(lst) # overloaded on assembler backends
finally:
codemap.CodemapStorage.register_codemap = old_reg_codemap
def check_get_unique_id(self, lst):
pass
class TestLLtype(RecursiveTests, LLJitMixin):
pass
|
wevote/WeVoteServer
|
follow/models.py
|
# follow/models.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from datetime import datetime, timedelta
from django.db import models
from election.models import ElectionManager
from exception.models import handle_exception, handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception, print_to_log
from issue.models import IssueManager
from organization.models import OrganizationManager
import pytz
import wevote_functions.admin
from wevote_functions.functions import positive_value_exists
from voter.models import VoterManager
FOLLOWING = 'FOLLOWING'
STOP_FOLLOWING = 'STOP_FOLLOWING'
FOLLOW_IGNORE = 'FOLLOW_IGNORE'
STOP_IGNORING = 'STOP_IGNORING'
FOLLOWING_CHOICES = (
(FOLLOWING, 'Following'),
(STOP_FOLLOWING, 'Not Following'),
(FOLLOW_IGNORE, 'Ignoring'),
(STOP_IGNORING, 'Not Ignoring'),
)
# Kinds of lists of suggested organization
UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW = 'UPDATE_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW = 'UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW_ON_TWITTER = \
'UPDATE_SUGGESTIONS_FROM_WHAT_FRIENDS_FOLLOW_ON_TWITTER'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS = 'UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS'
UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS_ON_TWITTER = \
'UPDATE_SUGGESTIONS_FROM_WHAT_FRIEND_FOLLOWS_ON_TWITTER'
UPDATE_SUGGESTIONS_ALL = 'UPDATE_SUGGESTIONS_ALL'
FOLLOW_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW = 'FOLLOW_SUGGESTIONS_FROM_TWITTER_IDS_I_FOLLOW'
FOLLOW_SUGGESTIONS_FROM_FRIENDS = 'FOLLOW_SUGGESTIONS_FROM_FRIENDS'
FOLLOW_SUGGESTIONS_FROM_FRIENDS_ON_TWITTER = 'FOLLOW_SUGGESTIONS_FROM_FRIENDS_ON_TWITTER'
logger = wevote_functions.admin.get_logger(__name__)
class FollowCampaignX(models.Model):
voter_we_vote_id = models.CharField(max_length=255, null=True, blank=True, unique=False, db_index=True)
organization_we_vote_id = models.CharField(max_length=255, null=True, blank=True, unique=False)
campaignx_id = models.PositiveIntegerField(null=True, blank=True)
campaignx_we_vote_id = models.CharField(max_length=255, null=True, blank=True, unique=False)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True, db_index=True)
class FollowCampaignXManager(models.Manager):
def __unicode__(self):
return "FollowCampaignXManager"
def toggle_on_follow_campaignx(self, voter_we_vote_id, issue_id, issue_we_vote_id, following_status):
follow_campaignx_on_stage_found = False
follow_campaignx_changed = False
follow_campaignx_on_stage_id = 0
follow_campaignx_on_stage = FollowIssue()
status = ''
issue_identifier_exists = positive_value_exists(issue_we_vote_id) or positive_value_exists(issue_id)
if not positive_value_exists(voter_we_vote_id) and not issue_identifier_exists:
results = {
'success': True if follow_campaignx_on_stage_found else False,
'status': 'Insufficient inputs to toggle issue link, try passing ids for voter and issue ',
'follow_campaignx_found': follow_campaignx_on_stage_found,
'follow_campaignx_id': follow_campaignx_on_stage_id,
'follow_campaignx': follow_campaignx_on_stage,
}
return results
# Does a follow_campaignx entry exist from this voter already exist?
follow_campaignx_manager = FollowIssueManager()
follow_campaignx_id = 0
results = follow_campaignx_manager.retrieve_follow_campaignx(follow_campaignx_id, voter_we_vote_id, issue_id,
issue_we_vote_id)
if results['MultipleObjectsReturned']:
status += 'TOGGLE_FOLLOWING_ISSUE MultipleObjectsReturned ' + following_status
delete_results = follow_campaignx_manager.delete_follow_campaignx(
follow_campaignx_id, voter_we_vote_id, issue_id, issue_we_vote_id)
status += delete_results['status']
results = follow_campaignx_manager.retrieve_follow_campaignx(follow_campaignx_id, voter_we_vote_id, issue_id,
issue_we_vote_id)
if results['follow_campaignx_found']:
follow_campaignx_on_stage = results['follow_campaignx']
# Update this follow_campaignx entry with new values - we do not delete because we might be able to use
try:
follow_campaignx_on_stage.following_status = following_status
# We don't need to update here because set set auto_now=True in the field
# follow_campaignx_on_stage.date_last_changed =
follow_campaignx_on_stage.save()
follow_campaignx_changed = True
follow_campaignx_on_stage_id = follow_campaignx_on_stage.id
follow_campaignx_on_stage_found = True
status += 'FOLLOW_STATUS_UPDATED_AS ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
elif results['DoesNotExist']:
try:
# Create new follow_campaignx entry
# First make sure that issue_id is for a valid issue
issue_manager = IssueManager()
if positive_value_exists(issue_id):
results = issue_manager.retrieve_issue(issue_id)
else:
results = issue_manager.retrieve_issue(0, issue_we_vote_id)
if results['issue_found']:
issue = results['issue']
follow_campaignx_on_stage = FollowIssue(
voter_we_vote_id=voter_we_vote_id,
issue_id=issue.id,
issue_we_vote_id=issue.we_vote_id,
following_status=following_status,
)
# if auto_followed_from_twitter_suggestion:
# follow_campaignx_on_stage.auto_followed_from_twitter_suggestion = True
follow_campaignx_on_stage.save()
follow_campaignx_changed = True
follow_campaignx_on_stage_id = follow_campaignx_on_stage.id
follow_campaignx_on_stage_found = True
status += 'CREATE ' + following_status
else:
status = 'ISSUE_NOT_FOUND_ON_CREATE ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
status += results['status']
results = {
'success': True if follow_campaignx_on_stage_found else False,
'status': status,
'follow_campaignx_found': follow_campaignx_on_stage_found,
'follow_campaignx_id': follow_campaignx_on_stage_id,
'follow_campaignx': follow_campaignx_on_stage,
}
return results
def retrieve_follow_campaignx(self, follow_campaignx_id, voter_we_vote_id, issue_id, issue_we_vote_id):
"""
follow_campaignx_id is the identifier for records stored in this table (it is NOT the issue_id)
"""
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
follow_campaignx_on_stage = FollowIssue()
follow_campaignx_on_stage_id = 0
try:
if positive_value_exists(follow_campaignx_id):
follow_campaignx_on_stage = FollowIssue.objects.get(id=follow_campaignx_id)
follow_campaignx_on_stage_id = issue_id.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_ID'
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_id):
follow_campaignx_on_stage = FollowIssue.objects.get(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_id=issue_id)
follow_campaignx_on_stage_id = follow_campaignx_on_stage.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_VOTER_WE_VOTE_ID_AND_ISSUE_ID'
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_we_vote_id):
follow_campaignx_on_stage = FollowIssue.objects.get(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_we_vote_id__iexact=issue_we_vote_id)
follow_campaignx_on_stage_id = follow_campaignx_on_stage.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_VOTER_WE_VOTE_ID_AND_ISSUE_WE_VOTE_ID'
else:
success = False
status = 'FOLLOW_ISSUE_MISSING_REQUIRED_VARIABLES'
except FollowIssue.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status = 'FOLLOW_ISSUE_NOT_FOUND_MultipleObjectsReturned'
except FollowIssue.DoesNotExist:
error_result = False
exception_does_not_exist = True
success = True
status = 'FOLLOW_ISSUE_NOT_FOUND_DoesNotExist'
if positive_value_exists(follow_campaignx_on_stage_id):
follow_campaignx_on_stage_found = True
is_following = follow_campaignx_on_stage.is_following()
is_not_following = follow_campaignx_on_stage.is_not_following()
is_ignoring = follow_campaignx_on_stage.is_ignoring()
else:
follow_campaignx_on_stage_found = False
is_following = False
is_not_following = True
is_ignoring = False
results = {
'status': status,
'success': success,
'follow_campaignx_found': follow_campaignx_on_stage_found,
'follow_campaignx_id': follow_campaignx_on_stage_id,
'follow_campaignx': follow_campaignx_on_stage,
'is_following': is_following,
'is_not_following': is_not_following,
'is_ignoring': is_ignoring,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
def delete_follow_campaignx(self, follow_campaignx_id, voter_we_vote_id, issue_id, issue_we_vote_id):
"""
Remove any follow issue entries (we may have duplicate entries)
"""
follow_campaignx_deleted = False
status = ''
try:
if positive_value_exists(follow_campaignx_id):
follow_campaignx_on_stage = FollowIssue.objects.get(id=follow_campaignx_id)
follow_campaignx_on_stage.delete()
follow_campaignx_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETED_BY_ID '
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_id):
follow_campaignx_query = FollowIssue.objects.filter(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_id=issue_id)
follow_campaignx_list = list(follow_campaignx_query)
for one_follow_campaignx in follow_campaignx_list:
one_follow_campaignx.delete()
follow_campaignx_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETED_BY_VOTER_WE_VOTE_ID_AND_ISSUE_ID '
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_we_vote_id):
follow_campaignx_query = FollowIssue.objects.filter(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_we_vote_id__iexact=issue_we_vote_id)
follow_campaignx_list = list(follow_campaignx_query)
for one_follow_campaignx in follow_campaignx_list:
one_follow_campaignx.delete()
follow_campaignx_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETE_BY_VOTER_WE_VOTE_ID_AND_ISSUE_WE_VOTE_ID '
else:
success = False
status += 'FOLLOW_ISSUE_DELETE_MISSING_REQUIRED_VARIABLES '
except FollowIssue.DoesNotExist:
success = True
status = 'FOLLOW_ISSUE_DELETE_NOT_FOUND_DoesNotExist '
results = {
'status': status,
'success': success,
'follow_campaignx_deleted': follow_campaignx_deleted,
}
return results
class FollowIssue(models.Model):
# We are relying on built-in Python id field
# The voter following the issue
voter_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False, db_index=True)
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
# The issue being followed
issue_id = models.PositiveIntegerField(null=True, blank=True)
# This is used when we want to export the issues that are being following
issue_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False)
# Is this person following, not following, or ignoring this issue?
following_status = models.CharField(max_length=15, choices=FOLLOWING_CHOICES, default=FOLLOWING, db_index=True)
# Is the fact that this issue is being followed visible to the public (if linked to organization)?
is_follow_visible_publicly = models.BooleanField(verbose_name='', default=False)
# The date the voter followed or stopped following this issue
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True, db_index=True)
def __unicode__(self):
return self.issue_we_vote_id
def is_following(self):
if self.following_status == FOLLOWING:
return True
return False
def is_not_following(self):
if self.following_status == STOP_FOLLOWING:
return True
return False
def is_ignoring(self):
if self.following_status == FOLLOW_IGNORE:
return True
return False
class FollowIssueManager(models.Manager):
def __unicode__(self):
return "FollowIssueManager"
def toggle_on_voter_following_issue(self, voter_we_vote_id, issue_id, issue_we_vote_id):
following_status = FOLLOWING
follow_issue_manager = FollowIssueManager()
return follow_issue_manager.toggle_following_issue(voter_we_vote_id, issue_id, issue_we_vote_id,
following_status)
def toggle_off_voter_following_issue(self, voter_we_vote_id, issue_id, issue_we_vote_id):
following_status = STOP_FOLLOWING
follow_issue_manager = FollowIssueManager()
return follow_issue_manager.toggle_following_issue(voter_we_vote_id, issue_id, issue_we_vote_id,
following_status)
def toggle_ignore_voter_following_issue(self, voter_we_vote_id, issue_id, issue_we_vote_id):
following_status = FOLLOW_IGNORE
follow_issue_manager = FollowIssueManager()
return follow_issue_manager.toggle_following_issue(voter_we_vote_id, issue_id, issue_we_vote_id,
following_status)
def toggle_following_issue(self, voter_we_vote_id, issue_id, issue_we_vote_id, following_status):
follow_issue_on_stage_found = False
follow_issue_changed = False
follow_issue_on_stage_id = 0
follow_issue_on_stage = FollowIssue()
status = ''
issue_identifier_exists = positive_value_exists(issue_we_vote_id) or positive_value_exists(issue_id)
if not positive_value_exists(voter_we_vote_id) and not issue_identifier_exists:
results = {
'success': True if follow_issue_on_stage_found else False,
'status': 'Insufficient inputs to toggle issue link, try passing ids for voter and issue ',
'follow_issue_found': follow_issue_on_stage_found,
'follow_issue_id': follow_issue_on_stage_id,
'follow_issue': follow_issue_on_stage,
}
return results
# Does a follow_issue entry exist from this voter already exist?
follow_issue_manager = FollowIssueManager()
follow_issue_id = 0
results = follow_issue_manager.retrieve_follow_issue(follow_issue_id, voter_we_vote_id, issue_id,
issue_we_vote_id)
if results['MultipleObjectsReturned']:
status += 'TOGGLE_FOLLOWING_ISSUE MultipleObjectsReturned ' + following_status
delete_results = follow_issue_manager.delete_follow_issue(
follow_issue_id, voter_we_vote_id, issue_id, issue_we_vote_id)
status += delete_results['status']
results = follow_issue_manager.retrieve_follow_issue(follow_issue_id, voter_we_vote_id, issue_id,
issue_we_vote_id)
if results['follow_issue_found']:
follow_issue_on_stage = results['follow_issue']
# Update this follow_issue entry with new values - we do not delete because we might be able to use
try:
follow_issue_on_stage.following_status = following_status
# We don't need to update here because set set auto_now=True in the field
# follow_issue_on_stage.date_last_changed =
follow_issue_on_stage.save()
follow_issue_changed = True
follow_issue_on_stage_id = follow_issue_on_stage.id
follow_issue_on_stage_found = True
status += 'FOLLOW_STATUS_UPDATED_AS ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
elif results['DoesNotExist']:
try:
# Create new follow_issue entry
# First make sure that issue_id is for a valid issue
issue_manager = IssueManager()
if positive_value_exists(issue_id):
results = issue_manager.retrieve_issue(issue_id)
else:
results = issue_manager.retrieve_issue(0, issue_we_vote_id)
if results['issue_found']:
issue = results['issue']
follow_issue_on_stage = FollowIssue(
voter_we_vote_id=voter_we_vote_id,
issue_id=issue.id,
issue_we_vote_id=issue.we_vote_id,
following_status=following_status,
)
# if auto_followed_from_twitter_suggestion:
# follow_issue_on_stage.auto_followed_from_twitter_suggestion = True
follow_issue_on_stage.save()
follow_issue_changed = True
follow_issue_on_stage_id = follow_issue_on_stage.id
follow_issue_on_stage_found = True
status += 'CREATE ' + following_status
else:
status = 'ISSUE_NOT_FOUND_ON_CREATE ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
status += results['status']
results = {
'success': True if follow_issue_on_stage_found else False,
'status': status,
'follow_issue_found': follow_issue_on_stage_found,
'follow_issue_id': follow_issue_on_stage_id,
'follow_issue': follow_issue_on_stage,
}
return results
def retrieve_follow_issue(self, follow_issue_id, voter_we_vote_id, issue_id, issue_we_vote_id):
"""
follow_issue_id is the identifier for records stored in this table (it is NOT the issue_id)
"""
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
follow_issue_on_stage = FollowIssue()
follow_issue_on_stage_id = 0
try:
if positive_value_exists(follow_issue_id):
follow_issue_on_stage = FollowIssue.objects.get(id=follow_issue_id)
follow_issue_on_stage_id = issue_id.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_ID'
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_id):
follow_issue_on_stage = FollowIssue.objects.get(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_id=issue_id)
follow_issue_on_stage_id = follow_issue_on_stage.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_VOTER_WE_VOTE_ID_AND_ISSUE_ID'
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_we_vote_id):
follow_issue_on_stage = FollowIssue.objects.get(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_we_vote_id__iexact=issue_we_vote_id)
follow_issue_on_stage_id = follow_issue_on_stage.id
success = True
status = 'FOLLOW_ISSUE_FOUND_WITH_VOTER_WE_VOTE_ID_AND_ISSUE_WE_VOTE_ID'
else:
success = False
status = 'FOLLOW_ISSUE_MISSING_REQUIRED_VARIABLES'
except FollowIssue.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status = 'FOLLOW_ISSUE_NOT_FOUND_MultipleObjectsReturned'
except FollowIssue.DoesNotExist:
error_result = False
exception_does_not_exist = True
success = True
status = 'FOLLOW_ISSUE_NOT_FOUND_DoesNotExist'
if positive_value_exists(follow_issue_on_stage_id):
follow_issue_on_stage_found = True
is_following = follow_issue_on_stage.is_following()
is_not_following = follow_issue_on_stage.is_not_following()
is_ignoring = follow_issue_on_stage.is_ignoring()
else:
follow_issue_on_stage_found = False
is_following = False
is_not_following = True
is_ignoring = False
results = {
'status': status,
'success': success,
'follow_issue_found': follow_issue_on_stage_found,
'follow_issue_id': follow_issue_on_stage_id,
'follow_issue': follow_issue_on_stage,
'is_following': is_following,
'is_not_following': is_not_following,
'is_ignoring': is_ignoring,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
def delete_follow_issue(self, follow_issue_id, voter_we_vote_id, issue_id, issue_we_vote_id):
"""
Remove any follow issue entries (we may have duplicate entries)
"""
follow_issue_deleted = False
status = ''
try:
if positive_value_exists(follow_issue_id):
follow_issue_on_stage = FollowIssue.objects.get(id=follow_issue_id)
follow_issue_on_stage.delete()
follow_issue_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETED_BY_ID '
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_id):
follow_issue_query = FollowIssue.objects.filter(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_id=issue_id)
follow_issue_list = list(follow_issue_query)
for one_follow_issue in follow_issue_list:
one_follow_issue.delete()
follow_issue_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETED_BY_VOTER_WE_VOTE_ID_AND_ISSUE_ID '
elif positive_value_exists(voter_we_vote_id) and positive_value_exists(issue_we_vote_id):
follow_issue_query = FollowIssue.objects.filter(
voter_we_vote_id__iexact=voter_we_vote_id,
issue_we_vote_id__iexact=issue_we_vote_id)
follow_issue_list = list(follow_issue_query)
for one_follow_issue in follow_issue_list:
one_follow_issue.delete()
follow_issue_deleted = True
success = True
status += 'FOLLOW_ISSUE_DELETE_BY_VOTER_WE_VOTE_ID_AND_ISSUE_WE_VOTE_ID '
else:
success = False
status += 'FOLLOW_ISSUE_DELETE_MISSING_REQUIRED_VARIABLES '
except FollowIssue.DoesNotExist:
success = True
status = 'FOLLOW_ISSUE_DELETE_NOT_FOUND_DoesNotExist '
results = {
'status': status,
'success': success,
'follow_issue_deleted': follow_issue_deleted,
}
return results
def update_or_create_suggested_issue_to_follow(self, viewer_voter_we_vote_id, issue_we_vote_id,
from_twitter=False):
"""
Create or update the SuggestedIssueToFollow table with suggested issues from twitter ids i follow
or issue of my friends follow.
:param viewer_voter_we_vote_id:
:param issue_we_vote_id:
:param from_twitter:
:return:
"""
status = ''
try:
suggested_issue_to_follow, created = SuggestedIssueToFollow.objects.update_or_create(
viewer_voter_we_vote_id=viewer_voter_we_vote_id,
issue_we_vote_id=issue_we_vote_id,
defaults={
'viewer_voter_we_vote_id': viewer_voter_we_vote_id,
'issue_we_vote_id': issue_we_vote_id,
'from_twitter': from_twitter
}
)
suggested_issue_to_follow_saved = True
success = True
status += "SUGGESTED_ISSUE_TO_FOLLOW_UPDATED "
except Exception as e:
suggested_issue_to_follow_saved = False
suggested_issue_to_follow = SuggestedIssueToFollow()
success = False
status += "SUGGESTED_ISSUE_TO_FOLLOW_NOT_UPDATED " + str(e) + ' '
results = {
'success': success,
'status': status,
'suggested_issue_to_follow_saved': suggested_issue_to_follow_saved,
'suggested_issue_to_follow': suggested_issue_to_follow,
}
return results
def retrieve_suggested_issue_to_follow_list(self, viewer_voter_we_vote_id, from_twitter=False):
"""
Retrieving suggested issues who i follow from SuggestedOrganizationToFollow table.
:param viewer_voter_we_vote_id:
:param from_twitter:
:return:
"""
suggested_issue_to_follow_list = []
status = ''
try:
suggested_issue_to_follow_queryset = SuggestedIssueToFollow.objects.all()
suggested_issue_to_follow_list = suggested_issue_to_follow_queryset.filter(
viewer_voter_we_vote_id__iexact=viewer_voter_we_vote_id,
from_twitter=from_twitter)
if len(suggested_issue_to_follow_list):
success = True
suggested_issue_to_follow_list_found = True
status += "SUGGESTED_ISSUE_TO_FOLLOW_RETRIEVED "
else:
success = True
suggested_issue_to_follow_list_found = False
status += "NO_SUGGESTED_ISSUE_TO_FOLLOW_LIST_RETRIEVED "
except SuggestedIssueToFollow.DoesNotExist:
# No data found. Try again below
success = True
suggested_issue_to_follow_list_found = False
status = 'NO_SUGGESTED_ISSUE_TO_FOLLOW_LIST_RETRIEVED_DoesNotExist '
except Exception as e:
success = False
suggested_issue_to_follow_list_found = False
status += "SUGGESTED_ISSUE_TO_FOLLOW_LIST_NOT_RETRIEVED " + str(e) + ' '
results = {
'success': success,
'status': status,
'suggested_issue_to_follow_list_found': suggested_issue_to_follow_list_found,
'suggested_issue_to_follow_list': suggested_issue_to_follow_list,
}
return results
class FollowMetricsManager(models.Manager):
def __unicode__(self):
return "FollowMetricsManager"
def fetch_organization_followers(self, organization_we_vote_id, google_civic_election_id=0):
count_result = None
try:
count_query = FollowOrganization.objects.using('readonly').all()
count_query = count_query.filter(organization_we_vote_id__iexact=organization_we_vote_id)
count_query = count_query.filter(following_status=FOLLOWING)
count_query = count_query.values("voter_id").distinct()
if positive_value_exists(google_civic_election_id):
election_manager = ElectionManager()
election_result = election_manager.retrieve_election(google_civic_election_id)
if election_result['election_found']:
election = election_result['election']
if positive_value_exists(election.election_day_text):
timezone = pytz.timezone("America/Los_Angeles")
date_of_election = timezone.localize(datetime.strptime(election.election_day_text, "%Y-%m-%d"))
date_of_election += timedelta(days=1) # Add one day, to catch the entire election day
# Find all of the follow entries before or on the day of the election
count_query = count_query.filter(date_last_changed__lte=date_of_election)
else:
# Failed retrieving date, so we return 0
return 0
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_issues_followed(self, voter_we_vote_id='',
limit_to_one_date_as_integer=0, count_through_this_date_as_integer=0):
timezone = pytz.timezone("America/Los_Angeles")
if positive_value_exists(limit_to_one_date_as_integer):
one_date_string = str(limit_to_one_date_as_integer)
limit_to_one_date = timezone.localize(datetime.strptime(one_date_string, "%Y%m%d"))
if positive_value_exists(count_through_this_date_as_integer):
count_through_date_string = str(count_through_this_date_as_integer)
count_through_this_date = timezone.localize(datetime.strptime(count_through_date_string, "%Y%m%d"))
count_result = None
try:
count_query = FollowIssue.objects.using('readonly').all()
if positive_value_exists(voter_we_vote_id):
count_query = count_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
count_query = count_query.filter(following_status=FOLLOWING)
if positive_value_exists(limit_to_one_date_as_integer):
# TODO DALE THIS NEEDS WORK TO FIND ALL ENTRIES ON ONE DAY
count_query = count_query.filter(date_last_changed=limit_to_one_date)
elif positive_value_exists(count_through_this_date_as_integer):
count_query = count_query.filter(date_last_changed__lte=count_through_this_date)
count_result = count_query.count()
except Exception as e:
pass
return count_result
def fetch_voter_organizations_followed(self, voter_id):
count_result = None
try:
count_query = FollowOrganization.objects.using('readonly').all()
count_query = count_query.filter(voter_id=voter_id)
count_query = count_query.filter(following_status=FOLLOWING)
count_result = count_query.count()
except Exception as e:
pass
return count_result
class FollowIssueList(models.Model):
"""
A way to retrieve all of the follow_issue information
"""
def fetch_follow_issue_count_by_issue_we_vote_id(self, issue_we_vote_id):
follow_issue_list_length = 0
try:
follow_issue_list_query = FollowIssue.objects.using('readonly').all()
follow_issue_list_query = follow_issue_list_query.filter(issue_we_vote_id__iexact=issue_we_vote_id)
follow_issue_list_query = follow_issue_list_query.filter(following_status=FOLLOWING)
follow_issue_list_length = follow_issue_list_query.count()
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
return follow_issue_list_length
def fetch_follow_issue_count_by_voter_we_vote_id(self, voter_we_vote_id, following_status=None):
if following_status is None:
following_status = FOLLOWING
follow_issue_list_length = 0
try:
follow_issue_list_query = FollowIssue.objects.using('readonly').all()
follow_issue_list_query = follow_issue_list_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
follow_issue_list_query = follow_issue_list_query.filter(following_status=following_status)
follow_issue_list_length = follow_issue_list_query.count()
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
return follow_issue_list_length
def retrieve_follow_issue_list_by_voter_we_vote_id(self, voter_we_vote_id, following_status=None, read_only=True):
"""
Retrieve a list of follow_issue entries for this voter
:param voter_we_vote_id:
:param following_status:
:param read_only:
:return: a list of follow_issue objects for the voter_we_vote_id
"""
follow_issue_list_found = False
if following_status is None:
following_status = FOLLOWING
follow_issue_list = {}
try:
if positive_value_exists(read_only):
follow_issue_list_query = FollowIssue.objects.using('readonly').all()
else:
follow_issue_list_query = FollowIssue.objects.all()
follow_issue_list_query = follow_issue_list_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
if positive_value_exists(following_status):
follow_issue_list = follow_issue_list_query.filter(following_status=following_status)
if len(follow_issue_list):
follow_issue_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if follow_issue_list_found:
return follow_issue_list
else:
follow_issue_list = {}
return follow_issue_list
def retrieve_follow_issue_we_vote_id_list_by_voter_we_vote_id(self, voter_we_vote_id, following_status=None):
follow_issue_we_vote_id_list = []
follow_issue_we_vote_id_list_result = []
if following_status is None:
following_status = FOLLOWING
try:
follow_issue_list_query = FollowIssue.objects.using('readonly').all()
follow_issue_list_query = follow_issue_list_query.filter(voter_we_vote_id__iexact=voter_we_vote_id)
if positive_value_exists(following_status):
follow_issue_list_query = follow_issue_list_query.filter(following_status=following_status)
follow_issue_list_query = follow_issue_list_query.values("issue_we_vote_id").distinct()
follow_issue_we_vote_id_list_result = list(follow_issue_list_query)
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
for query in follow_issue_we_vote_id_list_result:
follow_issue_we_vote_id_list.append(query["issue_we_vote_id"])
return follow_issue_we_vote_id_list
def fetch_follow_issue_following_count_by_voter_we_vote_id(self, voter_we_vote_id):
following_status = FOLLOWING
return self.fetch_follow_issue_count_by_voter_we_vote_id(voter_we_vote_id, following_status)
def fetch_follow_issue_ignore_count_by_voter_we_vote_id(self, voter_we_vote_id):
following_status = FOLLOW_IGNORE
return self.fetch_follow_issue_count_by_voter_we_vote_id(voter_we_vote_id, following_status)
def retrieve_follow_issue_ignore_list_by_voter_we_vote_id(self, voter_we_vote_id):
following_status = FOLLOW_IGNORE
return self.retrieve_follow_issue_list_by_voter_we_vote_id(voter_we_vote_id, following_status)
def retrieve_follow_issue_following_we_vote_id_list_by_voter_we_vote_id(self, voter_we_vote_id):
following_status = FOLLOWING
return self.retrieve_follow_issue_we_vote_id_list_by_voter_we_vote_id(voter_we_vote_id, following_status)
def retrieve_follow_issue_ignore_we_vote_id_list_by_voter_we_vote_id(self, voter_we_vote_id):
following_status = FOLLOW_IGNORE
return self.retrieve_follow_issue_we_vote_id_list_by_voter_we_vote_id(voter_we_vote_id, following_status)
def retrieve_follow_issue_list_by_issue_id(self, issue_id):
issue_we_vote_id = None
following_status = FOLLOWING
return self.retrieve_follow_issue_list(issue_id, issue_we_vote_id, following_status)
def retrieve_follow_issue_following_list_by_issue_we_vote_id(self, issue_we_vote_id):
issue_id = None
following_status = FOLLOWING
return self.retrieve_follow_issue_list(issue_id, issue_we_vote_id, following_status)
def retrieve_follow_issue_list(self, issue_id, issue_we_vote_id, following_status):
follow_issue_list_found = False
follow_issue_list = {}
try:
follow_issue_list = FollowIssue.objects.using('readonly').all()
if positive_value_exists(issue_id):
follow_issue_list = follow_issue_list.filter(issue_id=issue_id)
else:
follow_issue_list = follow_issue_list.filter(issue_we_vote_id__iexact=issue_we_vote_id)
if positive_value_exists(following_status):
follow_issue_list = follow_issue_list.filter(following_status=following_status)
if len(follow_issue_list):
follow_issue_list_found = True
except Exception as e:
pass
if follow_issue_list_found:
return follow_issue_list
else:
follow_issue_list = {}
return follow_issue_list
class FollowOrganization(models.Model):
# We are relying on built-in Python id field
# The voter following the organization
voter_id = models.BigIntegerField(null=True, blank=True, db_index=True)
# The organization being followed
organization_id = models.BigIntegerField(null=True, blank=True, db_index=True)
voter_linked_organization_we_vote_id = models.CharField(
verbose_name="organization we vote permanent id",
max_length=255, null=True, blank=True, unique=False, db_index=True)
# This is used when we want to export the organizations that a voter is following
organization_we_vote_id = models.CharField(
verbose_name="we vote permanent id", max_length=255, null=True, blank=True, unique=False, db_index=True)
# Is this person following or ignoring this organization?
following_status = models.CharField(max_length=15, choices=FOLLOWING_CHOICES, default=FOLLOWING, db_index=True)
# Is this person automatically following the suggested twitter organization?
auto_followed_from_twitter_suggestion = models.BooleanField(verbose_name='', default=False)
# Is the fact that this organization is being followed by voter visible to the public?
is_follow_visible_publicly = models.BooleanField(verbose_name='', default=False)
# The date the voter followed or stopped following this organization
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
# This is used when we want to export the organizations that a voter is following
def voter_we_vote_id(self):
voter_manager = VoterManager()
return voter_manager.fetch_we_vote_id_from_local_id(self.voter_id)
def __unicode__(self):
return self.organization_id
def is_following(self):
if self.following_status == FOLLOWING:
return True
return False
def is_not_following(self):
if self.following_status == STOP_FOLLOWING:
return True
return False
def is_ignoring(self):
if self.following_status == FOLLOW_IGNORE:
return True
return False
class FollowOrganizationManager(models.Manager):
def __unicode__(self):
return "FollowOrganizationManager"
def fetch_number_of_organizations_followed(self, voter_id):
number_of_organizations_followed = 0
try:
if positive_value_exists(voter_id):
follow_organization_query = FollowOrganization.objects.filter(
voter_id=voter_id,
following_status=FOLLOWING
)
number_of_organizations_followed = follow_organization_query.count()
except Exception as e:
pass
return number_of_organizations_followed
def toggle_on_voter_following_organization(self, voter_id, organization_id, organization_we_vote_id,
voter_linked_organization_we_vote_id,
auto_followed_from_twitter_suggestion=False):
following_status = FOLLOWING
follow_organization_manager = FollowOrganizationManager()
return follow_organization_manager.toggle_voter_following_organization(
voter_id, organization_id, organization_we_vote_id, voter_linked_organization_we_vote_id, following_status,
auto_followed_from_twitter_suggestion)
def toggle_off_voter_following_organization(self, voter_id, organization_id, organization_we_vote_id,
voter_linked_organization_we_vote_id):
following_status = STOP_FOLLOWING
follow_organization_manager = FollowOrganizationManager()
return follow_organization_manager.toggle_voter_following_organization(
voter_id, organization_id, organization_we_vote_id, voter_linked_organization_we_vote_id, following_status)
def toggle_ignore_voter_following_organization(self, voter_id, organization_id, organization_we_vote_id,
voter_linked_organization_we_vote_id):
following_status = FOLLOW_IGNORE
follow_organization_manager = FollowOrganizationManager()
return follow_organization_manager.toggle_voter_following_organization(
voter_id, organization_id, organization_we_vote_id, voter_linked_organization_we_vote_id, following_status)
def toggle_off_voter_ignoring_organization(self, voter_id, organization_id, organization_we_vote_id,
voter_linked_organization_we_vote_id):
following_status = STOP_FOLLOWING # STOP_IGNORING (We don't actually store STOP_IGNORING in the database
follow_organization_manager = FollowOrganizationManager()
return follow_organization_manager.toggle_voter_following_organization(
voter_id, organization_id, organization_we_vote_id, voter_linked_organization_we_vote_id, following_status)
def toggle_voter_following_organization(self, voter_id, organization_id, organization_we_vote_id,
voter_linked_organization_we_vote_id, following_status,
auto_followed_from_twitter_suggestion=False):
status = ""
# Does a follow_organization entry exist from this voter already exist?
follow_organization_manager = FollowOrganizationManager()
results = follow_organization_manager.retrieve_follow_organization(0, voter_id,
organization_id, organization_we_vote_id)
follow_organization_on_stage_found = False
follow_organization_on_stage_id = 0
follow_organization_on_stage = FollowOrganization()
if results['follow_organization_found']:
follow_organization_on_stage = results['follow_organization']
# Update this follow_organization entry with new values - we do not delete because we might be able to use
try:
if auto_followed_from_twitter_suggestion:
# If here we are auto-following because the voter follows this organization on Twitter
if follow_organization_on_stage.following_status == "STOP_FOLLOWING" or \
follow_organization_on_stage.following_status == "FOLLOW_IGNORE":
# Do not follow again
pass
else:
follow_organization_on_stage.following_status = following_status
else:
follow_organization_on_stage.following_status = following_status
follow_organization_on_stage.auto_followed_from_twitter_suggestion = False
follow_organization_on_stage.voter_linked_organization_we_vote_id = voter_linked_organization_we_vote_id
# We don't need to update here because set set auto_now=True in the field
# follow_organization_on_stage.date_last_changed =
follow_organization_on_stage.save()
follow_organization_on_stage_id = follow_organization_on_stage.id
follow_organization_on_stage_found = True
status += 'UPDATE ' + following_status
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status + ' '
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
elif results['MultipleObjectsReturned']:
logger.warning("follow_organization: delete all but one and take it over?")
status += 'TOGGLE_FOLLOWING_ORGANIZATION MultipleObjectsReturned ' + following_status + ' '
elif results['DoesNotExist']:
try:
# Create new follow_organization entry
# First make sure that organization_id is for a valid organization
organization_manager = OrganizationManager()
if positive_value_exists(organization_id):
results = organization_manager.retrieve_organization(organization_id)
else:
results = organization_manager.retrieve_organization(0, organization_we_vote_id)
if results['organization_found']:
organization = results['organization']
follow_organization_on_stage = FollowOrganization(
voter_id=voter_id,
organization_id=organization.id,
organization_we_vote_id=organization.we_vote_id,
voter_linked_organization_we_vote_id=voter_linked_organization_we_vote_id,
following_status=following_status,
)
if auto_followed_from_twitter_suggestion:
follow_organization_on_stage.auto_followed_from_twitter_suggestion = True
follow_organization_on_stage.save()
follow_organization_on_stage_id = follow_organization_on_stage.id
follow_organization_on_stage_found = True
status += 'CREATE ' + following_status + ' '
else:
status += 'ORGANIZATION_NOT_FOUND_ON_CREATE ' + following_status + ' '
except Exception as e:
status += 'FAILED_TO_UPDATE ' + following_status + ' '
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
else:
status += results['status']
results = {
'success': True if follow_organization_on_stage_found else False,
'status': status,
'follow_organization_found': follow_organization_on_stage_found,
'follow_organization_id': follow_organization_on_stage_id,
'follow_organization': follow_organization_on_stage,
'voter_linked_organization_we_vote_id': voter_linked_organization_we_vote_id,
}
return results
def retrieve_follow_organization(self, follow_organization_id, voter_id, organization_id, organization_we_vote_id,
read_only=False):
"""
follow_organization_id is the identifier for records stored in this table (it is NOT the organization_id)
"""
error_result = False
exception_does_not_exist = False
exception_multiple_object_returned = False
follow_organization_on_stage = FollowOrganization()
follow_organization_on_stage_id = 0
status = ""
try:
if positive_value_exists(follow_organization_id):
if read_only:
follow_organization_on_stage = FollowOrganization.objects.using('readonly').get(
id=follow_organization_id)
else:
follow_organization_on_stage = FollowOrganization.objects.get(id=follow_organization_id)
follow_organization_on_stage_id = organization_id.id
success = True
status += 'FOLLOW_ORGANIZATION_FOUND_WITH_ID '
elif positive_value_exists(voter_id) and positive_value_exists(organization_id):
if read_only:
follow_organization_on_stage = FollowOrganization.objects.using('readonly').get(
voter_id=voter_id, organization_id=organization_id)
else:
follow_organization_on_stage = FollowOrganization.objects.get(
voter_id=voter_id, organization_id=organization_id)
follow_organization_on_stage_id = follow_organization_on_stage.id
success = True
status += 'FOLLOW_ORGANIZATION_FOUND_WITH_VOTER_ID_AND_ORGANIZATION_ID '
elif positive_value_exists(voter_id) and positive_value_exists(organization_we_vote_id):
if read_only:
follow_organization_on_stage = FollowOrganization.objects.using('readonly').get(
voter_id=voter_id, organization_we_vote_id=organization_we_vote_id)
else:
follow_organization_on_stage = FollowOrganization.objects.get(
voter_id=voter_id, organization_we_vote_id=organization_we_vote_id)
follow_organization_on_stage_id = follow_organization_on_stage.id
success = True
status += 'FOLLOW_ORGANIZATION_FOUND_WITH_VOTER_ID_AND_ORGANIZATION_WE_VOTE_ID '
else:
success = False
status += 'FOLLOW_ORGANIZATION_MISSING_REQUIRED_VARIABLES '
except FollowOrganization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
error_result = True
exception_multiple_object_returned = True
success = False
status += 'FOLLOW_ORGANIZATION_NOT_FOUND_MultipleObjectsReturned '
follow_organization_list_found = False
follow_organization_list = []
# Delete the oldest values and retrieve the correct one
try:
if positive_value_exists(voter_id) and positive_value_exists(organization_id):
follow_organization_query = FollowOrganization.objects.all()
follow_organization_query = follow_organization_query.filter(
voter_id=voter_id, organization_id=organization_id)
follow_organization_query = follow_organization_query.order_by('id')
follow_organization_list = list(follow_organization_query)
follow_organization_list_found = positive_value_exists(len(follow_organization_list))
success = True
status += 'FOLLOW_ORGANIZATION_FOUND_WITH_VOTER_ID_AND_ORGANIZATION_ID '
elif positive_value_exists(voter_id) and positive_value_exists(organization_we_vote_id):
follow_organization_query = FollowOrganization.objects.all()
follow_organization_query = follow_organization_query.filter(
voter_id=voter_id, organization_we_vote_id=organization_we_vote_id)
follow_organization_query = follow_organization_query.order_by('id')
follow_organization_list = list(follow_organization_query)
follow_organization_list_found = positive_value_exists(len(follow_organization_list))
success = True
status += 'FOLLOW_ORGANIZATION_FOUND_WITH_VOTER_ID_AND_ORGANIZATION_WE_VOTE_ID '
if follow_organization_list_found:
follow_organization_on_stage = follow_organization_list.pop()
follow_organization_on_stage_id = follow_organization_on_stage.id
# Now cycle through remaining list and delete
for one_follow_organization in follow_organization_list:
one_follow_organization.delete()
print_to_log(logger, exception_message_optional="FollowOrganization duplicates removed.")
except Exception as e:
handle_exception(e, logger,
exception_message="Error trying to delete duplicate FollowOrganization entries.")
except FollowOrganization.DoesNotExist:
error_result = False
exception_does_not_exist = True
success = True
status += 'FOLLOW_ORGANIZATION_NOT_FOUND_DoesNotExist '
if positive_value_exists(follow_organization_on_stage_id):
follow_organization_on_stage_found = True
is_following = follow_organization_on_stage.is_following()
is_not_following = follow_organization_on_stage.is_not_following()
is_ignoring = follow_organization_on_stage.is_ignoring()
else:
follow_organization_on_stage_found = False
is_following = False
is_not_following = True
is_ignoring = False
results = {
'status': status,
'success': success,
'follow_organization_found': follow_organization_on_stage_found,
'follow_organization_id': follow_organization_on_stage_id,
'follow_organization': follow_organization_on_stage,
'is_following': is_following,
'is_not_following': is_not_following,
'is_ignoring': is_ignoring,
'error_result': error_result,
'DoesNotExist': exception_does_not_exist,
'MultipleObjectsReturned': exception_multiple_object_returned,
}
return results
def retrieve_voter_following_org_status(self, voter_id, voter_we_vote_id,
organization_id, organization_we_vote_id, read_only=False):
"""
Retrieve one follow entry so we can see if a voter is following or ignoring a particular org
"""
if not positive_value_exists(voter_id) and positive_value_exists(voter_we_vote_id):
# We need voter_id to call retrieve_follow_organization
voter_manager = VoterManager()
voter_id = voter_manager.fetch_local_id_from_we_vote_id(voter_we_vote_id)
if not positive_value_exists(voter_id) and \
not (positive_value_exists(organization_id) or positive_value_exists(organization_we_vote_id)):
results = {
'status': 'RETRIEVE_VOTER_FOLLOWING_MISSING_VARIABLES',
'success': False,
'follow_organization_found': False,
'follow_organization_id': 0,
'follow_organization': FollowOrganization(),
'is_following': False,
'is_not_following': True,
'is_ignoring': False,
'error_result': True,
'DoesNotExist': False,
'MultipleObjectsReturned': False,
}
return results
return self.retrieve_follow_organization(
0, voter_id, organization_id, organization_we_vote_id, read_only=read_only)
def update_or_create_suggested_organization_to_follow(self, viewer_voter_we_vote_id, organization_we_vote_id,
from_twitter=False):
"""
Create or update the SuggestedOrganizationToFollow table with suggested organizations from twitter ids i follow
or organization of my friends follow.
:param viewer_voter_we_vote_id:
:param organization_we_vote_id:
:param from_twitter:
:return:
"""
status = ''
try:
suggested_organization_to_follow, created = SuggestedOrganizationToFollow.objects.update_or_create(
viewer_voter_we_vote_id=viewer_voter_we_vote_id,
organization_we_vote_id=organization_we_vote_id,
defaults={
'viewer_voter_we_vote_id': viewer_voter_we_vote_id,
'organization_we_vote_id': organization_we_vote_id,
'from_twitter': from_twitter
}
)
suggested_organization_to_follow_saved = True
success = True
status += "SUGGESTED_ORGANIZATION_TO_FOLLOW_UPDATED "
except Exception as e:
suggested_organization_to_follow_saved = False
suggested_organization_to_follow = SuggestedOrganizationToFollow()
success = False
status += "SUGGESTED_ORGANIZATION_TO_FOLLOW_NOT_UPDATED " + str(e) + ' '
results = {
'success': success,
'status': status,
'suggested_organization_to_follow_saved': suggested_organization_to_follow_saved,
'suggested_organization_to_follow': suggested_organization_to_follow,
}
return results
def retrieve_suggested_organization_to_follow_list(self, viewer_voter_we_vote_id, from_twitter=False):
"""
Retrieving suggested organizations who i follow from SuggestedOrganizationToFollow table.
:param viewer_voter_we_vote_id:
:param from_twitter:
:return:
"""
suggested_organization_to_follow_list = []
status = ''
try:
suggested_organization_to_follow_queryset = SuggestedOrganizationToFollow.objects.all()
suggested_organization_to_follow_list = suggested_organization_to_follow_queryset.filter(
viewer_voter_we_vote_id__iexact=viewer_voter_we_vote_id,
from_twitter=from_twitter)
if len(suggested_organization_to_follow_list):
success = True
suggested_organization_to_follow_list_found = True
status += "SUGGESTED_ORGANIZATION_TO_FOLLOW_RETRIEVED "
else:
success = True
suggested_organization_to_follow_list_found = False
status += "NO_SUGGESTED_ORGANIZATION_TO_FOLLOW_LIST_RETRIEVED "
except SuggestedOrganizationToFollow.DoesNotExist:
# No data found. Try again below
success = True
suggested_organization_to_follow_list_found = False
status += 'NO_SUGGESTED_ORGANIZATION_TO_FOLLOW_LIST_RETRIEVED_DoesNotExist '
except Exception as e:
success = False
suggested_organization_to_follow_list_found = False
status += "SUGGESTED_ORGANIZATION_TO_FOLLOW_LIST_NOT_RETRIEVED " + str(e) + ' '
results = {
'success': success,
'status': status,
'suggested_organization_to_follow_list_found': suggested_organization_to_follow_list_found,
'suggested_organization_to_follow_list': suggested_organization_to_follow_list,
}
return results
class FollowOrganizationList(models.Model):
"""
A way to retrieve all of the follow_organization information
"""
def fetch_follow_organization_by_voter_id_count(self, voter_id):
follow_organization_list = self.retrieve_follow_organization_by_voter_id(voter_id)
return len(follow_organization_list)
def retrieve_follow_organization_by_voter_id(self, voter_id, auto_followed_from_twitter_suggestion=False,
read_only=False):
# Retrieve a list of follow_organization entries for this voter
follow_organization_list_found = False
following_status = FOLLOWING
follow_organization_list = {}
try:
# Should not default to 'readonly' since we sometimes save the results of this call
if read_only:
follow_organization_list = FollowOrganization.objects.using('readonly').all()
else:
follow_organization_list = FollowOrganization.objects.all()
follow_organization_list = follow_organization_list.filter(voter_id=voter_id)
follow_organization_list = follow_organization_list.filter(following_status=following_status)
if auto_followed_from_twitter_suggestion:
follow_organization_list = follow_organization_list.filter(
auto_followed_from_twitter_suggestion=auto_followed_from_twitter_suggestion)
if len(follow_organization_list):
follow_organization_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if follow_organization_list_found:
return follow_organization_list
else:
follow_organization_list = {}
return follow_organization_list
def retrieve_follow_organization_by_own_organization_we_vote_id(self, organization_we_vote_id,
auto_followed_from_twitter_suggestion=False):
# Retrieve a list of followed organizations entries by voter_linked_organization_we_vote_id for voter guides
follow_organization_list_found = False
following_status = FOLLOWING
follow_organization_list = []
try:
follow_organization_list = FollowOrganization.objects.all()
follow_organization_list = follow_organization_list.filter(
voter_linked_organization_we_vote_id=organization_we_vote_id)
follow_organization_list = follow_organization_list.filter(following_status=following_status)
if auto_followed_from_twitter_suggestion:
follow_organization_list = follow_organization_list.filter(
auto_followed_from_twitter_suggestion=auto_followed_from_twitter_suggestion)
if len(follow_organization_list):
follow_organization_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if follow_organization_list_found:
return follow_organization_list
else:
follow_organization_list = []
return follow_organization_list
def retrieve_ignore_organization_by_voter_id(self, voter_id, read_only=False):
# Retrieve a list of follow_organization entries for this voter
follow_organization_list_found = False
following_status = FOLLOW_IGNORE
follow_organization_list = {}
try:
if positive_value_exists(read_only):
follow_organization_list = FollowOrganization.objects.using('readonly').all()
else:
follow_organization_list = FollowOrganization.objects.all()
follow_organization_list = follow_organization_list.filter(voter_id=voter_id)
follow_organization_list = follow_organization_list.filter(following_status=following_status)
if len(follow_organization_list):
follow_organization_list_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
if follow_organization_list_found:
return follow_organization_list
else:
follow_organization_list = {}
return follow_organization_list
def retrieve_follow_organization_by_voter_id_simple_id_array(self, voter_id, return_we_vote_id=False,
auto_followed_from_twitter_suggestion=False,
read_only=False):
follow_organization_list_manager = FollowOrganizationList()
follow_organization_list = \
follow_organization_list_manager.retrieve_follow_organization_by_voter_id(
voter_id, auto_followed_from_twitter_suggestion, read_only=read_only)
follow_organization_list_simple_array = []
if len(follow_organization_list):
voter_manager = VoterManager()
voter_linked_organization_we_vote_id = \
voter_manager.fetch_linked_organization_we_vote_id_from_local_id(voter_id)
for follow_organization in follow_organization_list:
if not read_only:
# Heal the data by making sure the voter's linked_organization_we_vote_id exists and is accurate
if positive_value_exists(voter_linked_organization_we_vote_id) \
and voter_linked_organization_we_vote_id != \
follow_organization.voter_linked_organization_we_vote_id:
try:
follow_organization.voter_linked_organization_we_vote_id = \
voter_linked_organization_we_vote_id
follow_organization.save()
except Exception as e:
status = 'FAILED_TO_UPDATE_FOLLOW_ISSUE-voter_id ' + str(voter_id)
handle_record_not_saved_exception(e, logger=logger, exception_message_optional=status)
if return_we_vote_id:
follow_organization_list_simple_array.append(follow_organization.organization_we_vote_id)
else:
follow_organization_list_simple_array.append(follow_organization.organization_id)
return follow_organization_list_simple_array
def retrieve_followed_organization_by_organization_we_vote_id_simple_id_array(
self, organization_we_vote_id, return_we_vote_id=False,
auto_followed_from_twitter_suggestion=False):
follow_organization_list_manager = FollowOrganizationList()
follow_organization_list = \
follow_organization_list_manager.retrieve_follow_organization_by_own_organization_we_vote_id(
organization_we_vote_id, auto_followed_from_twitter_suggestion)
follow_organization_list_simple_array = []
if len(follow_organization_list):
for follow_organization in follow_organization_list:
if return_we_vote_id:
follow_organization_list_simple_array.append(follow_organization.organization_we_vote_id)
else:
follow_organization_list_simple_array.append(follow_organization.organization_id)
return follow_organization_list_simple_array
def fetch_followers_list_by_organization_we_vote_id(
self, organization_we_vote_id, return_voter_we_vote_id=False):
"""
Fetch a list of the voter_id or voter_we_vote_id of followers of organization_we_vote_id.
:param organization_we_vote_id:
:param return_voter_we_vote_id:
:return:
"""
follow_organization_list_manager = FollowOrganizationList()
followers_list = \
follow_organization_list_manager.retrieve_follow_organization_by_organization_we_vote_id(
organization_we_vote_id)
followers_list_simple_array = []
if len(followers_list):
voter_manager = VoterManager()
for follow_organization in followers_list:
if return_voter_we_vote_id:
voter_we_vote_id = voter_manager.fetch_we_vote_id_from_local_id(follow_organization.voter_id)
if positive_value_exists(voter_we_vote_id):
followers_list_simple_array.append(voter_we_vote_id)
else:
if positive_value_exists(follow_organization.voter_id):
followers_list_simple_array.append(follow_organization.voter_id)
return followers_list_simple_array
def retrieve_followers_organization_by_organization_we_vote_id_simple_id_array(
self, organization_we_vote_id, return_we_vote_id=False,
auto_followed_from_twitter_suggestion=False):
"""
Retrieve the organization_id (or organization_we_vote_id) for each voter that follows organization_we_vote_id.
:param organization_we_vote_id:
:param return_we_vote_id:
:param auto_followed_from_twitter_suggestion:
:return:
"""
follow_organization_list_manager = FollowOrganizationList()
followers_organization_list = \
follow_organization_list_manager.retrieve_follow_organization_by_organization_we_vote_id(
organization_we_vote_id)
followers_organization_list_simple_array = []
if len(followers_organization_list):
for follow_organization in followers_organization_list:
if return_we_vote_id:
if positive_value_exists(follow_organization.voter_linked_organization_we_vote_id):
followers_organization_list_simple_array.append(
follow_organization.voter_linked_organization_we_vote_id)
else:
followers_organization_list_simple_array.append(follow_organization.organization_id)
return followers_organization_list_simple_array
def retrieve_ignore_organization_by_voter_id_simple_id_array(
self, voter_id, return_we_vote_id=False, read_only=False):
follow_organization_list_manager = FollowOrganizationList()
ignore_organization_list = \
follow_organization_list_manager.retrieve_ignore_organization_by_voter_id(voter_id, read_only=read_only)
ignore_organization_list_simple_array = []
if len(ignore_organization_list):
for ignore_organization in ignore_organization_list:
if return_we_vote_id:
ignore_organization_list_simple_array.append(ignore_organization.organization_we_vote_id)
else:
ignore_organization_list_simple_array.append(ignore_organization.organization_id)
return ignore_organization_list_simple_array
def retrieve_follow_organization_by_organization_id(self, organization_id):
# Retrieve a list of follow_organization entries for this organization
follow_organization_list_found = False
following_status = FOLLOWING
follow_organization_list = {}
try:
follow_organization_list = FollowOrganization.objects.all()
follow_organization_list = follow_organization_list.filter(organization_id=organization_id)
follow_organization_list = follow_organization_list.filter(following_status=following_status)
if len(follow_organization_list):
follow_organization_list_found = True
except Exception as e:
pass
if follow_organization_list_found:
return follow_organization_list
else:
follow_organization_list = {}
return follow_organization_list
def retrieve_follow_organization_by_organization_we_vote_id(self, organization_we_vote_id):
# Retrieve a list of follow_organization entries for this organization
follow_organization_list_found = False
following_status = FOLLOWING
follow_organization_list = {}
try:
follow_organization_list = FollowOrganization.objects.all()
follow_organization_list = follow_organization_list.filter(organization_we_vote_id=organization_we_vote_id)
follow_organization_list = follow_organization_list.filter(following_status=following_status)
if len(follow_organization_list):
follow_organization_list_found = True
except Exception as e:
pass
if follow_organization_list_found:
return follow_organization_list
else:
follow_organization_list = {}
return follow_organization_list
class SuggestedIssueToFollow(models.Model):
"""
This table stores possible suggested issues to follow
"""
viewer_voter_we_vote_id = models.CharField(
verbose_name="voter we vote id", max_length=255, null=True, blank=True, unique=False)
issue_we_vote_id = models.CharField(
verbose_name="issue we vote id", max_length=255, null=True, blank=True, unique=False)
# organization_we_vote_id_making_suggestion = models.CharField(
# verbose_name="organization we vote id making decision", max_length=255, null=True, blank=True, unique=False)
# from_twitter = models.BooleanField(verbose_name="from twitter", default=False)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
# def fetch_other_organization_we_vote_id(self, one_we_vote_id):
# if one_we_vote_id == self.viewer_voter_we_vote_id:
# return self.viewee_voter_we_vote_id
# else:
# # If the we_vote_id passed in wasn't found, don't return another we_vote_id
# return ""
class SuggestedOrganizationToFollow(models.Model):
"""
This table stores possible suggested organization from twitter ids i follow or organization of my friends follow.
"""
viewer_voter_we_vote_id = models.CharField(
verbose_name="voter we vote id person 1", max_length=255, null=True, blank=True, unique=False)
organization_we_vote_id = models.CharField(
verbose_name="organization we vote id person 2", max_length=255, null=True, blank=True, unique=False)
# organization_we_vote_id_making_suggestion = models.CharField(
# verbose_name="organization we vote id making decision", max_length=255, null=True, blank=True, unique=False)
from_twitter = models.BooleanField(verbose_name="from twitter", default=False)
date_last_changed = models.DateTimeField(verbose_name='date last changed', null=True, auto_now=True)
def fetch_other_organization_we_vote_id(self, one_we_vote_id):
if one_we_vote_id == self.viewer_voter_we_vote_id:
return self.viewee_voter_we_vote_id
else:
# If the we_vote_id passed in wasn't found, don't return another we_vote_id
return ""
|
whtsky/parguments
|
parguments/cli.py
|
# Copyright (c) 2010 by Dan Jacob.
#
# Some rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * The names of the contributors may not be used to endorse or
# promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import getpass
try:
assert raw_input
except NameError:
raw_input = input
def prompt(name, default=None):
"""
Grab user input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = raw_input(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_pass(name, default=None):
"""
Grabs hidden (password) input from command line.
:param name: prompt text
:param default: default value if no input provided.
"""
prompt = name + (default and ' [%s]' % default or '')
prompt += name.endswith('?') and ' ' or ': '
while True:
rv = getpass.getpass(prompt)
if rv:
return rv
if default is not None:
return default
def prompt_bool(name, default=False, yes_choices=None, no_choices=None):
"""
Grabs user input from command line and converts to boolean
value.
:param name: prompt text
:param default: default value if no input provided.
:param yes_choices: default 'y', 'yes', '1', 'on', 'true', 't'
:param no_choices: default 'n', 'no', '0', 'off', 'false', 'f'
"""
yes_choices = yes_choices or ('y', 'yes', '1', 'on', 'true', 't')
no_choices = no_choices or ('n', 'no', '0', 'off', 'false', 'f')
while True:
rv = prompt(name + '?', default and yes_choices[0] or no_choices[0])
if rv.lower() in yes_choices:
return True
elif rv.lower() in no_choices:
return False
def prompt_choices(name, choices, default=None, no_choice=('none',)):
"""
Grabs user input from command line from set of provided choices.
:param name: prompt text
:param choices: list or tuple of available choices.
:param default: default value if no input provided.
:param no_choice: acceptable list of strings for "null choice"
"""
_choices = []
options = []
for choice in choices:
options.append(choice)
_choices.append(choice)
while True:
rv = prompt(name + '? - (%s)' % ', '.join(options), default)
rv = rv.lower()
if rv in no_choice:
return None
if rv in _choices:
return rv
|
GeneralizedLearningUtilities/SuperGLU
|
python_module/stomp/test/p3_backward_test.py
|
import unittest
from stomp import backward3
class TestBackward3(unittest.TestCase):
def test_pack_mixed_string_and_bytes(self):
lines = ['SEND', '\n', 'header1:test', '\u6771']
self.assertEqual(backward3.encode(backward3.pack(lines)),
b'SEND\nheader1:test\xe6\x9d\xb1')
lines = ['SEND', '\n', 'header1:test', b'\xe6\x9d\xb1']
self.assertEqual(backward3.encode(backward3.pack(lines)),
b'SEND\nheader1:test\xe6\x9d\xb1')
def test_decode(self):
self.assertTrue(backward3.decode(None) is None)
self.assertEqual('test', backward3.decode(b'test'))
def test_encode(self):
self.assertEqual(b'test', backward3.encode('test'))
self.assertEqual(b'test', backward3.encode(b'test'))
self.assertRaises(TypeError, backward3.encode, None)
|
Bjornkjohnson/makeChangePython
|
Change.py
|
import collections
class Change(object):
def __init__(self):
super(Change, self).__init__()
def makeChange(self, change):
coinVaulues = collections.OrderedDict()
coinVaulues['h'] = 50
coinVaulues['q'] = 25
coinVaulues['d'] = 10
coinVaulues['n'] = 5
coinVaulues['p'] = 1
coins = {}
for key in coinVaulues:
while change >= coinVaulues[key]:
coins[key] = coins.get(key, 0) + 1
change -= coinVaulues[key]
return coins
|
fanout/webhookinbox
|
api/views.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from base64 import b64encode, b64decode
import datetime
import copy
import json
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseNotAllowed
from gripcontrol import Channel, HttpResponseFormat, HttpStreamFormat
from django_grip import set_hold_longpoll, set_hold_stream, publish
import redis_ops
def _setting(name, default):
v = getattr(settings, name, None)
if v is None:
return default
return v
db = redis_ops.RedisOps()
grip_prefix = _setting('WHINBOX_GRIP_PREFIX', 'wi-')
orig_headers = _setting('WHINBOX_ORIG_HEADERS', False)
# useful list derived from requestbin
ignore_headers = """
X-Varnish
X-Forwarded-For
X-Heroku-Dynos-In-Use
X-Request-Start
X-Heroku-Queue-Wait-Time
X-Heroku-Queue-Depth
X-Real-Ip
X-Forwarded-Proto
X-Via
X-Forwarded-Port
Grip-Sig
Grip-Feature
Grip-Last
""".split("\n")[1:-1]
def _ignore_header(name):
name = name.lower()
for h in ignore_headers:
if name == h.lower():
return True
return False
def _convert_header_name(name):
out = ''
word_start = True
for c in name:
if c == '_':
out += '-'
word_start = True
elif word_start:
out += c.upper()
word_start = False
else:
out += c.lower()
return out
def _req_to_item(req):
item = dict()
item['method'] = req.method
item['path'] = req.path
query = req.META.get('QUERY_STRING')
if query:
item['query'] = query
raw_headers = list()
content_length = req.META.get('CONTENT_LENGTH')
if content_length:
raw_headers.append(('CONTENT_LENGTH', content_length))
content_type = req.META.get('CONTENT_TYPE')
if content_type:
raw_headers.append(('CONTENT_TYPE', content_type))
for k, v in req.META.iteritems():
if k.startswith('HTTP_'):
raw_headers.append((k[5:], v))
# undjangoify the header names
headers = list()
for h in raw_headers:
headers.append((_convert_header_name(h[0]), h[1]))
if orig_headers:
# if this option is set, then we assume the exact headers are magic prefixed
tmp = list()
for h in headers:
if h[0].lower().startswith('eb9bf0f5-'):
tmp.append((h[0][9:], h[1]))
headers = tmp
else:
# otherwise, use the blacklist to clean things up
tmp = list()
for h in headers:
if not _ignore_header(h[0]):
tmp.append(h)
headers = tmp
item['headers'] = headers
if len(req.body) > 0:
try:
# if the body is valid utf-8, then store as text
item['body'] = req.body.decode('utf-8')
except:
# else, store as binary
item['body-bin'] = b64encode(req.body)
forwardedfor = req.META.get('HTTP_X_FORWARDED_FOR')
if forwardedfor:
ip_address = forwardedfor.split(',')[0].strip()
else:
ip_address = req.META['REMOTE_ADDR']
item['ip_address'] = ip_address
return item
def _convert_item(item, responded=False):
out = copy.deepcopy(item)
created = datetime.datetime.fromtimestamp(item['created']).isoformat()
if len(created) > 0 and created[-1] != 'Z':
created += 'Z'
out['created'] = created
if responded:
out['state'] = 'responded'
else:
out['state'] = 'response-pending'
return out
def root(req):
return HttpResponseNotFound('Not Found\n')
def create(req):
if req.method == 'POST':
host = req.META.get('HTTP_HOST')
if not host:
return HttpResponseBadRequest('Bad Request: No \'Host\' header\n')
inbox_id = req.POST.get('id')
if inbox_id is not None and len(inbox_id) > 64:
return HttpResponseBadRequest('Bad Request: Id length must not exceed 64\n')
ttl = req.POST.get('ttl')
if ttl is not None:
ttl = int(ttl)
if ttl is None:
ttl = 3600
response_mode = req.POST.get('response_mode')
if not response_mode:
response_mode = 'auto'
if response_mode not in ('auto', 'wait-verify', 'wait'):
return HttpResponseBadRequest('Bad Request: response_mode must be "auto", "wait-verify", or "wait"\n')
try:
inbox_id = db.inbox_create(inbox_id, ttl, response_mode)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectExists:
return HttpResponse('Conflict: Inbox already exists\n', status=409)
except:
return HttpResponse('Service Unavailable\n', status=503)
out = dict()
out['id'] = inbox_id
out['base_url'] = 'http://' + host + '/i/' + inbox_id + '/'
out['ttl'] = ttl
out['response_mode'] = response_mode
return HttpResponse(json.dumps(out) + '\n', content_type='application/json')
else:
return HttpResponseNotAllowed(['POST'])
def inbox(req, inbox_id):
if req.method == 'GET':
host = req.META.get('HTTP_HOST')
if not host:
return HttpResponseBadRequest('Bad Request: No \'Host\' header\n')
try:
inbox = db.inbox_get(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
out = dict()
out['id'] = inbox_id
out['base_url'] = 'http://' + host + '/i/' + inbox_id + '/'
out['ttl'] = inbox['ttl']
response_mode = inbox.get('response_mode')
if not response_mode:
response_mode = 'auto'
out['response_mode'] = response_mode
return HttpResponse(json.dumps(out) + '\n', content_type='application/json')
elif req.method == 'DELETE':
try:
db.inbox_delete(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
# we'll push a 404 to any long polls because we're that cool
publish(grip_prefix + 'inbox-%s' % inbox_id, HttpResponseFormat(code=404, headers={'Content-Type': 'text/html'}, body='Not Found\n'))
return HttpResponse('Deleted\n')
else:
return HttpResponseNotAllowed(['GET', 'DELETE'])
def refresh(req, inbox_id):
if req.method == 'POST':
ttl = req.POST.get('ttl')
if ttl is not None:
ttl = int(ttl)
try:
db.inbox_refresh(inbox_id, ttl)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
return HttpResponse('Refreshed\n')
else:
return HttpResponseNotAllowed(['POST'])
def respond(req, inbox_id, item_id):
if req.method == 'POST':
try:
content = json.loads(req.body)
except:
return HttpResponseBadRequest('Bad Request: Body must be valid JSON\n')
try:
code = content.get('code')
if code is not None:
code = int(code)
else:
code = 200
reason = content.get('reason')
headers = content.get('headers')
if 'body-bin' in content:
body = b64decode(content['body-bin'])
elif 'body' in content:
body = content['body']
else:
body = ''
except:
return HttpResponseBadRequest('Bad Request: Bad format of response\n')
try:
db.request_remove_pending(inbox_id, item_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
publish(grip_prefix + 'wait-%s-%s' % (inbox_id, item_id), HttpResponseFormat(code=code, reason=reason, headers=headers, body=body), id='1', prev_id='0')
return HttpResponse('Ok\n')
else:
return HttpResponseNotAllowed(['POST'])
def hit(req, inbox_id):
if len(req.grip.last) > 0:
for channel, last_id in req.grip.last.iteritems():
break
set_hold_longpoll(req, Channel(channel, last_id))
return HttpResponse('Service Unavailable\n', status=503, content_type='text/html')
try:
inbox = db.inbox_get(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
response_mode = inbox.get('response_mode')
if not response_mode:
response_mode = 'auto'
# pubsubhubbub verify request?
hub_challenge = req.GET.get('hub.challenge')
if response_mode == 'wait' or (response_mode == 'wait-verify' and hub_challenge):
respond_now = False
else:
respond_now = True
item = _req_to_item(req)
if hub_challenge:
item['type'] = 'hub-verify'
else:
item['type'] = 'normal'
try:
item_id, prev_id, item_created = db.inbox_append_item(inbox_id, item)
db.inbox_clear_expired_items(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
item['id'] = item_id
item['created'] = item_created
item = _convert_item(item, respond_now)
hr_headers = dict()
hr_headers['Content-Type'] = 'application/json'
hr = dict()
hr['last_cursor'] = item_id
hr['items'] = [item]
hr_body = json.dumps(hr) + '\n'
hs_body = json.dumps(item) + '\n'
formats = list()
formats.append(HttpResponseFormat(headers=hr_headers, body=hr_body))
formats.append(HttpStreamFormat(hs_body))
publish(grip_prefix + 'inbox-%s' % inbox_id, formats, id=item_id, prev_id=prev_id)
if respond_now:
if hub_challenge:
return HttpResponse(hub_challenge)
else:
return HttpResponse('Ok\n')
else:
# wait for the user to respond
db.request_add_pending(inbox_id, item_id)
set_hold_longpoll(req, Channel(grip_prefix + 'wait-%s-%s' % (inbox_id, item_id), '0'))
return HttpResponse('Service Unavailable\n', status=503, content_type='text/html')
def items(req, inbox_id):
if req.method == 'GET':
try:
db.inbox_refresh(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
order = req.GET.get('order')
if order and order not in ('created', '-created'):
return HttpResponseBadRequest('Bad Request: Invalid order value\n')
if not order:
order = 'created'
imax = req.GET.get('max')
if imax:
try:
imax = int(imax)
if imax < 1:
raise ValueError('max too small')
except:
return HttpResponseBadRequest('Bad Request: Invalid max value\n')
if not imax or imax > 50:
imax = 50
since = req.GET.get('since')
since_id = None
since_cursor = None
if since:
if since.startswith('id:'):
since_id = since[3:]
elif since.startswith('cursor:'):
since_cursor = since[7:]
else:
return HttpResponseBadRequest('Bad Request: Invalid since value\n')
# at the moment, cursor is identical to id
item_id = None
if since_id:
item_id = since_id
elif since_cursor:
item_id = since_cursor
if order == 'created':
try:
items, last_id = db.inbox_get_items_after(inbox_id, item_id, imax)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
out = dict()
out['last_cursor'] = last_id
out_items = list()
for i in items:
out_items.append(_convert_item(i, not db.request_is_pending(inbox_id, i['id'])))
out['items'] = out_items
if len(out_items) == 0:
set_hold_longpoll(req, Channel(grip_prefix + 'inbox-%s' % inbox_id, last_id))
return HttpResponse(json.dumps(out) + '\n', content_type='application/json')
else: # -created
try:
items, last_id, eof = db.inbox_get_items_before(inbox_id, item_id, imax)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
out = dict()
if not eof and last_id:
out['last_cursor'] = last_id
out_items = list()
for i in items:
out_items.append(_convert_item(i, not db.request_is_pending(inbox_id, i['id'])))
out['items'] = out_items
return HttpResponse(json.dumps(out) + '\n', content_type='application/json')
else:
return HttpResponseNotAllowed(['GET'])
def stream(req, inbox_id):
if req.method == 'GET':
try:
db.inbox_get(inbox_id)
except redis_ops.InvalidId:
return HttpResponseBadRequest('Bad Request: Invalid id\n')
except redis_ops.ObjectDoesNotExist:
return HttpResponseNotFound('Not Found\n')
except:
return HttpResponse('Service Unavailable\n', status=503)
set_hold_stream(req, grip_prefix + 'inbox-%s' % inbox_id)
return HttpResponse('[opened]\n', content_type='text/plain')
else:
return HttpResponseNotAllowed(['GET'])
|
mitdbg/modeldb
|
client/verta/verta/_swagger/_public/uac/model/UacGetOrganizationByIdResponse.py
|
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class UacGetOrganizationByIdResponse(BaseType):
def __init__(self, organization=None):
required = {
"organization": False,
}
self.organization = organization
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .UacOrganization import UacOrganization
tmp = d.get('organization', None)
if tmp is not None:
d['organization'] = UacOrganization.from_json(tmp)
return UacGetOrganizationByIdResponse(**d)
|
CospanDesign/python
|
yaml/example1.py
|
#! /usr/bin/python
import yaml
def main():
#f = open("data.yaml", "r")
f = open("data2.yaml", "r")
yd = yaml.load(f)
#print "YAML Data: %s" % str(yd)
for key in yd:
print "%s" % key
print "Type: %s" % str(type(yd[key]))
print str(yd[key])
print ""
def yaml_test():
#f = open("data.yaml", "r")
f = open("data2.yaml", "r")
yd = yaml.load(f)
#print "YAML Data: %s" % str(yd)
for key in yd:
print "%s" % key
print "Type: %s" % str(type(yd[key]))
print str(yd[key])
print ""
if __name__ == "__main__":
main()
|
disenone/zsync
|
test/parallel_task/ventilator.py
|
# -*- coding: utf-8 -*-
import zmq
import random
import time
def run():
context = zmq.Context()
sender = context.socket(zmq.PUSH)
sender.bind('tcp://*:5557')
sink = context.socket(zmq.PUSH)
sink.connect('tcp://localhost:5558')
print 'Press Enter when the workers are ready: '
_ = raw_input()
print('sending tasks to workders...')
sink.send(b'0')
random.seed()
total_msec = 0
for task_nbr in xrange(100):
workload = random.randint(1, 100)
total_msec += workload
sender.send_string(u'%i' % workload)
print 'Total expected cost: %s msec' % total_msec
time.sleep(1)
if __name__ == '__main__':
run()
|
tdickers/mitmproxy
|
mitmproxy/console/grideditor.py
|
from __future__ import absolute_import, print_function, division
import copy
import os
import re
import urwid
from mitmproxy import filt
from mitmproxy import script
from mitmproxy import utils
from mitmproxy.console import common
from mitmproxy.console import signals
from netlib.http import cookies
from netlib.http import user_agents
FOOTER = [
('heading_key', "enter"), ":edit ",
('heading_key', "q"), ":back ",
]
FOOTER_EDITING = [
('heading_key', "esc"), ":stop editing ",
]
class TextColumn:
subeditor = None
def __init__(self, heading):
self.heading = heading
def text(self, obj):
return SEscaped(obj or "")
def blank(self):
return ""
def keypress(self, key, editor):
if key == "r":
if editor.walker.get_current_value() is not None:
signals.status_prompt_path.send(
self,
prompt = "Read file",
callback = editor.read_file
)
elif key == "R":
if editor.walker.get_current_value() is not None:
signals.status_prompt_path.send(
editor,
prompt = "Read unescaped file",
callback = editor.read_file,
args = (True,)
)
elif key == "e":
o = editor.walker.get_current_value()
if o is not None:
n = editor.master.spawn_editor(o.encode("string-escape"))
n = utils.clean_hanging_newline(n)
editor.walker.set_current_value(n, False)
editor.walker._modified()
elif key in ["enter"]:
editor.walker.start_edit()
else:
return key
class SubgridColumn:
def __init__(self, heading, subeditor):
self.heading = heading
self.subeditor = subeditor
def text(self, obj):
p = cookies._format_pairs(obj, sep="\n")
return urwid.Text(p)
def blank(self):
return []
def keypress(self, key, editor):
if key in "rRe":
signals.status_message.send(
self,
message = "Press enter to edit this field.",
expire = 1000
)
return
elif key in ["enter"]:
editor.master.view_grideditor(
self.subeditor(
editor.master,
editor.walker.get_current_value(),
editor.set_subeditor_value,
editor.walker.focus,
editor.walker.focus_col
)
)
else:
return key
class SEscaped(urwid.WidgetWrap):
def __init__(self, txt):
txt = txt.encode("string-escape")
w = urwid.Text(txt, wrap="any")
urwid.WidgetWrap.__init__(self, w)
def get_text(self):
return self._w.get_text()[0]
def keypress(self, size, key):
return key
def selectable(self):
return True
class SEdit(urwid.WidgetWrap):
def __init__(self, txt):
txt = txt.encode("string-escape")
w = urwid.Edit(edit_text=txt, wrap="any", multiline=True)
w = urwid.AttrWrap(w, "editfield")
urwid.WidgetWrap.__init__(self, w)
def get_text(self):
return self._w.get_text()[0].strip()
def selectable(self):
return True
class GridRow(urwid.WidgetWrap):
def __init__(self, focused, editing, editor, values):
self.focused, self.editing, self.editor = focused, editing, editor
errors = values[1]
self.fields = []
for i, v in enumerate(values[0]):
if focused == i and editing:
self.editing = SEdit(v)
self.fields.append(self.editing)
else:
w = self.editor.columns[i].text(v)
if focused == i:
if i in errors:
w = urwid.AttrWrap(w, "focusfield_error")
else:
w = urwid.AttrWrap(w, "focusfield")
elif i in errors:
w = urwid.AttrWrap(w, "field_error")
self.fields.append(w)
fspecs = self.fields[:]
if len(self.fields) > 1:
fspecs[0] = ("fixed", self.editor.first_width + 2, fspecs[0])
w = urwid.Columns(
fspecs,
dividechars = 2
)
if focused is not None:
w.set_focus_column(focused)
urwid.WidgetWrap.__init__(self, w)
def get_edit_value(self):
return self.editing.get_text()
def keypress(self, s, k):
if self.editing:
w = self._w.column_widths(s)[self.focused]
k = self.editing.keypress((w,), k)
return k
def selectable(self):
return True
class GridWalker(urwid.ListWalker):
"""
Stores rows as a list of (rows, errors) tuples, where rows is a list
and errors is a set with an entry of each offset in rows that is an
error.
"""
def __init__(self, lst, editor):
self.lst = [(i, set([])) for i in lst]
self.editor = editor
self.focus = 0
self.focus_col = 0
self.editing = False
def _modified(self):
self.editor.show_empty_msg()
return urwid.ListWalker._modified(self)
def add_value(self, lst):
self.lst.append((lst[:], set([])))
self._modified()
def get_current_value(self):
if self.lst:
return self.lst[self.focus][0][self.focus_col]
def set_current_value(self, val, unescaped):
if not unescaped:
try:
val = val.decode("string-escape")
except ValueError:
signals.status_message.send(
self,
message = "Invalid Python-style string encoding.",
expire = 1000
)
return
errors = self.lst[self.focus][1]
emsg = self.editor.is_error(self.focus_col, val)
if emsg:
signals.status_message.send(message = emsg, expire = 1)
errors.add(self.focus_col)
else:
errors.discard(self.focus_col)
self.set_value(val, self.focus, self.focus_col, errors)
def set_value(self, val, focus, focus_col, errors=None):
if not errors:
errors = set([])
row = list(self.lst[focus][0])
row[focus_col] = val
self.lst[focus] = [tuple(row), errors]
self._modified()
def delete_focus(self):
if self.lst:
del self.lst[self.focus]
self.focus = min(len(self.lst) - 1, self.focus)
self._modified()
def _insert(self, pos):
self.focus = pos
self.lst.insert(
self.focus,
[
[c.blank() for c in self.editor.columns], set([])
]
)
self.focus_col = 0
self.start_edit()
def insert(self):
return self._insert(self.focus)
def add(self):
return self._insert(min(self.focus + 1, len(self.lst)))
def start_edit(self):
col = self.editor.columns[self.focus_col]
if self.lst and not col.subeditor:
self.editing = GridRow(
self.focus_col, True, self.editor, self.lst[self.focus]
)
self.editor.master.loop.widget.footer.update(FOOTER_EDITING)
self._modified()
def stop_edit(self):
if self.editing:
self.editor.master.loop.widget.footer.update(FOOTER)
self.set_current_value(self.editing.get_edit_value(), False)
self.editing = False
self._modified()
def left(self):
self.focus_col = max(self.focus_col - 1, 0)
self._modified()
def right(self):
self.focus_col = min(self.focus_col + 1, len(self.editor.columns) - 1)
self._modified()
def tab_next(self):
self.stop_edit()
if self.focus_col < len(self.editor.columns) - 1:
self.focus_col += 1
elif self.focus != len(self.lst) - 1:
self.focus_col = 0
self.focus += 1
self._modified()
def get_focus(self):
if self.editing:
return self.editing, self.focus
elif self.lst:
return GridRow(
self.focus_col,
False,
self.editor,
self.lst[self.focus]
), self.focus
else:
return None, None
def set_focus(self, focus):
self.stop_edit()
self.focus = focus
self._modified()
def get_next(self, pos):
if pos + 1 >= len(self.lst):
return None, None
return GridRow(None, False, self.editor, self.lst[pos + 1]), pos + 1
def get_prev(self, pos):
if pos - 1 < 0:
return None, None
return GridRow(None, False, self.editor, self.lst[pos - 1]), pos - 1
class GridListBox(urwid.ListBox):
def __init__(self, lw):
urwid.ListBox.__init__(self, lw)
FIRST_WIDTH_MAX = 40
FIRST_WIDTH_MIN = 20
class GridEditor(urwid.WidgetWrap):
title = None
columns = None
def __init__(self, master, value, callback, *cb_args, **cb_kwargs):
value = self.data_in(copy.deepcopy(value))
self.master, self.value, self.callback = master, value, callback
self.cb_args, self.cb_kwargs = cb_args, cb_kwargs
first_width = 20
if value:
for r in value:
assert len(r) == len(self.columns)
first_width = max(len(r), first_width)
self.first_width = min(first_width, FIRST_WIDTH_MAX)
title = urwid.Text(self.title)
title = urwid.Padding(title, align="left", width=("relative", 100))
title = urwid.AttrWrap(title, "heading")
headings = []
for i, col in enumerate(self.columns):
c = urwid.Text(col.heading)
if i == 0 and len(self.columns) > 1:
headings.append(("fixed", first_width + 2, c))
else:
headings.append(c)
h = urwid.Columns(
headings,
dividechars = 2
)
h = urwid.AttrWrap(h, "heading")
self.walker = GridWalker(self.value, self)
self.lb = GridListBox(self.walker)
self._w = urwid.Frame(
self.lb,
header = urwid.Pile([title, h])
)
self.master.loop.widget.footer.update("")
self.show_empty_msg()
def show_empty_msg(self):
if self.walker.lst:
self._w.set_footer(None)
else:
self._w.set_footer(
urwid.Text(
[
("highlight", "No values. Press "),
("key", "a"),
("highlight", " to add some."),
]
)
)
def encode(self, s):
if not self.encoding:
return s
try:
return s.encode(self.encoding)
except ValueError:
return None
def read_file(self, p, unescaped=False):
if p:
try:
p = os.path.expanduser(p)
d = file(p, "rb").read()
self.walker.set_current_value(d, unescaped)
self.walker._modified()
except IOError as v:
return str(v)
def set_subeditor_value(self, val, focus, focus_col):
self.walker.set_value(val, focus, focus_col)
def keypress(self, size, key):
if self.walker.editing:
if key in ["esc"]:
self.walker.stop_edit()
elif key == "tab":
pf, pfc = self.walker.focus, self.walker.focus_col
self.walker.tab_next()
if self.walker.focus == pf and self.walker.focus_col != pfc:
self.walker.start_edit()
else:
self._w.keypress(size, key)
return None
key = common.shortcuts(key)
column = self.columns[self.walker.focus_col]
if key in ["q", "esc"]:
res = []
for i in self.walker.lst:
if not i[1] and any([x for x in i[0]]):
res.append(i[0])
self.callback(self.data_out(res), *self.cb_args, **self.cb_kwargs)
signals.pop_view_state.send(self)
elif key == "g":
self.walker.set_focus(0)
elif key == "G":
self.walker.set_focus(len(self.walker.lst) - 1)
elif key in ["h", "left"]:
self.walker.left()
elif key in ["l", "right"]:
self.walker.right()
elif key == "tab":
self.walker.tab_next()
elif key == "a":
self.walker.add()
elif key == "A":
self.walker.insert()
elif key == "d":
self.walker.delete_focus()
elif column.keypress(key, self) and not self.handle_key(key):
return self._w.keypress(size, key)
def data_out(self, data):
"""
Called on raw list data, before data is returned through the
callback.
"""
return data
def data_in(self, data):
"""
Called to prepare provided data.
"""
return data
def is_error(self, col, val):
"""
Return False, or a string error message.
"""
return False
def handle_key(self, key):
return False
def make_help(self):
text = []
text.append(urwid.Text([("text", "Editor control:\n")]))
keys = [
("A", "insert row before cursor"),
("a", "add row after cursor"),
("d", "delete row"),
("e", "spawn external editor on current field"),
("q", "save changes and exit editor"),
("r", "read value from file"),
("R", "read unescaped value from file"),
("esc", "save changes and exit editor"),
("tab", "next field"),
("enter", "edit field"),
]
text.extend(
common.format_keyvals(keys, key="key", val="text", indent=4)
)
text.append(
urwid.Text(
[
"\n",
("text", "Values are escaped Python-style strings.\n"),
]
)
)
return text
class QueryEditor(GridEditor):
title = "Editing query"
columns = [
TextColumn("Key"),
TextColumn("Value")
]
class HeaderEditor(GridEditor):
title = "Editing headers"
columns = [
TextColumn("Key"),
TextColumn("Value")
]
def make_help(self):
h = GridEditor.make_help(self)
text = []
text.append(urwid.Text([("text", "Special keys:\n")]))
keys = [
("U", "add User-Agent header"),
]
text.extend(
common.format_keyvals(keys, key="key", val="text", indent=4)
)
text.append(urwid.Text([("text", "\n")]))
text.extend(h)
return text
def set_user_agent(self, k):
ua = user_agents.get_by_shortcut(k)
if ua:
self.walker.add_value(
[
"User-Agent",
ua[2]
]
)
def handle_key(self, key):
if key == "U":
signals.status_prompt_onekey.send(
prompt = "Add User-Agent header:",
keys = [(i[0], i[1]) for i in user_agents.UASTRINGS],
callback = self.set_user_agent,
)
return True
class URLEncodedFormEditor(GridEditor):
title = "Editing URL-encoded form"
columns = [
TextColumn("Key"),
TextColumn("Value")
]
class ReplaceEditor(GridEditor):
title = "Editing replacement patterns"
columns = [
TextColumn("Filter"),
TextColumn("Regex"),
TextColumn("Replacement"),
]
def is_error(self, col, val):
if col == 0:
if not filt.parse(val):
return "Invalid filter specification."
elif col == 1:
try:
re.compile(val)
except re.error:
return "Invalid regular expression."
return False
class SetHeadersEditor(GridEditor):
title = "Editing header set patterns"
columns = [
TextColumn("Filter"),
TextColumn("Header"),
TextColumn("Value"),
]
def is_error(self, col, val):
if col == 0:
if not filt.parse(val):
return "Invalid filter specification"
return False
def make_help(self):
h = GridEditor.make_help(self)
text = []
text.append(urwid.Text([("text", "Special keys:\n")]))
keys = [
("U", "add User-Agent header"),
]
text.extend(
common.format_keyvals(keys, key="key", val="text", indent=4)
)
text.append(urwid.Text([("text", "\n")]))
text.extend(h)
return text
def set_user_agent(self, k):
ua = user_agents.get_by_shortcut(k)
if ua:
self.walker.add_value(
[
".*",
"User-Agent",
ua[2]
]
)
def handle_key(self, key):
if key == "U":
signals.status_prompt_onekey.send(
prompt = "Add User-Agent header:",
keys = [(i[0], i[1]) for i in user_agents.UASTRINGS],
callback = self.set_user_agent,
)
return True
class PathEditor(GridEditor):
title = "Editing URL path components"
columns = [
TextColumn("Component"),
]
def data_in(self, data):
return [[i] for i in data]
def data_out(self, data):
return [i[0] for i in data]
class ScriptEditor(GridEditor):
title = "Editing scripts"
columns = [
TextColumn("Command"),
]
def is_error(self, col, val):
try:
script.Script.parse_command(val)
except script.ScriptException as e:
return str(e)
class HostPatternEditor(GridEditor):
title = "Editing host patterns"
columns = [
TextColumn("Regex (matched on hostname:port / ip:port)")
]
def is_error(self, col, val):
try:
re.compile(val, re.IGNORECASE)
except re.error as e:
return "Invalid regex: %s" % str(e)
def data_in(self, data):
return [[i] for i in data]
def data_out(self, data):
return [i[0] for i in data]
class CookieEditor(GridEditor):
title = "Editing request Cookie header"
columns = [
TextColumn("Name"),
TextColumn("Value"),
]
class CookieAttributeEditor(GridEditor):
title = "Editing Set-Cookie attributes"
columns = [
TextColumn("Name"),
TextColumn("Value"),
]
def data_out(self, data):
ret = []
for i in data:
if not i[1]:
ret.append([i[0], None])
else:
ret.append(i)
return ret
class SetCookieEditor(GridEditor):
title = "Editing response SetCookie header"
columns = [
TextColumn("Name"),
TextColumn("Value"),
SubgridColumn("Attributes", CookieAttributeEditor),
]
def data_in(self, data):
flattened = []
for key, (value, attrs) in data:
flattened.append([key, value, attrs.items(multi=True)])
return flattened
def data_out(self, data):
vals = []
for key, value, attrs in data:
vals.append(
[
key,
(value, attrs)
]
)
return vals
|
wolrah/arris_stats
|
arris_scraper.py
|
#!/usr/bin/env python
# A library to scrape statistics from Arris CM820 and similar cable modems
# Inspired by https://gist.github.com/berg/2651577
import BeautifulSoup
import requests
import time
cm_time_format = '%a %Y-%m-%d %H:%M:%S'
def get_status(baseurl):
# Retrieve and process the page from the modem
url = baseurl + 'status_cgi'
pagedata = requests.get(url).content
timestamp = time.time() # Get the time immediately after retrieval
bs = BeautifulSoup.BeautifulSoup(pagedata)
downstream_table = bs.findAll('table')[1].findAll('tr')[1:]
upstream_table = bs.findAll('table')[3].findAll('tr')[2:]
status_table = bs.findAll('table')[5].findAll('tr')
interface_table = bs.findAll('table')[7].findAll('tr')[1:]
downstream_stats = []
for row in downstream_table:
cols = row.findAll('td')
modem_channel = int(cols[0].string.strip()[-1])
docsis_channel = int(cols[1].string.strip())
frequency = float(cols[2].string.strip().split()[0])
if cols[3].string.strip() == '----':
channel_available = False
power = None
snr = None
modulation = None
octets = None
corrected_errors = None
uncorrectable_errors = None
else:
power = float(cols[3].string.strip().split()[0])
snr = float(cols[4].string.strip().split()[0])
modulation = cols[5].string.strip()
octets = int(cols[6].string.strip())
corrected_errors = int(cols[7].string.strip())
uncorrectable_errors = int(cols[8].string.strip())
channelstats = {'modem_channel': modem_channel,
'dcid': docsis_channel,
'frequency': frequency,
'power': power,
'snr': snr,
'modulation': modulation,
'octets': octets,
'corrected_errors': corrected_errors,
'uncorrectable_errors': uncorrectable_errors}
downstream_stats.append(channelstats)
upstream_stats = []
for row in upstream_table:
cols = row.findAll('td')
modem_channel = int(cols[0].string.strip()[-1])
docsis_channel = int(cols[1].string.strip())
frequency = float(cols[2].string.strip().split()[0])
power = float(cols[3].string.strip().split()[0])
channel_type = cols[4].string.strip()
symbol_rate = int(cols[5].string.strip().split()[0]) * 1000
modulation = cols[6].string.strip()
channelstats = {'modem_channel': modem_channel,
'ucid': docsis_channel,
'frequency': frequency,
'power': power,
'channel_type': channel_type,
'symbol_rate': symbol_rate,
'modulation': modulation}
upstream_stats.append(channelstats)
uptime_split = status_table[0].findAll('td')[1].string.strip().split(':')
uptime_days = int(uptime_split[0].strip().split()[0])
uptime_hours = int(uptime_split[1].strip().split()[0])
uptime_minutes = int(uptime_split[2].strip().split()[0])
uptime = ((((uptime_days * 24) + uptime_hours) * 60) + uptime_minutes) * 60
cpe_split = status_table[1].findAll('td')[1].string.strip().split(',')
cpelist = {}
for entry in cpe_split:
entrystripped = entry.strip()
entrysplit = entrystripped.split('CPE')
cpe_type = entrysplit[0]
cpe_count = int(entrysplit[1].strip('()'))
cpelist[cpe_type] = cpe_count
cm_status = status_table[2].findAll('td')[1].string.strip()
cm_time_string = status_table[3].findAll('td')[1].string.strip()
cm_time = time.mktime(time.strptime(cm_time_string, cm_time_format))
modem_status = {'uptime': uptime,
'cpe': cpelist,
'cm_status': cm_status,
'cm_time': cm_time}
interfaces = []
for row in interface_table:
cols = row.findAll('td')
interface_name = cols[0].string.strip()
provisioning_state = cols[1].string.strip()
interface_state = cols[2].string.strip()
interface_speed = cols[3].string.strip()
mac = cols[4].string.strip()
interface_data = {'name': interface_name,
'provisioned': provisioning_state,
'state': interface_state,
'speed': interface_speed,
'mac': mac}
interfaces.append(interface_data)
status = {'timestamp': timestamp,
'status': modem_status,
'downstream': downstream_stats,
'upstream': upstream_stats,
'interfaces': interfaces}
return status
def get_versions(baseurl):
raise NotImplementedError()
def get_eventlog(baseurl):
raise NotImplementedError()
def get_cmstate(baseurl):
raise NotImplementedError()
def get_productdetails(baseurl):
raise NotImplementedError()
def get_dhcpparams(baseurl):
raise NotImplementedError()
def get_qos(url):
raise NotImplementedError()
def get_config(url):
raise NotImplementedError()
|
Tjorriemorrie/trading
|
18_theoryofruns/app_old/src/main/main.py
|
import logging
from src.settings import JINJA_ENVIRONMENT
from src.base import BaseHandler
from src.main.models import Torrent, UserTorrent
from google.appengine.ext import ndb
from google.appengine.api import users
import arrow
from time import sleep
class IndexPage(BaseHandler):
def get(self):
# new movies
self.template_values['movies'] = Torrent.query(Torrent.category_code == 207, Torrent.uploader == 'YIFY', Torrent.resolution == 720).order(-Torrent.uploaded_at).fetch(30)
# new series
self.template_values['series_new'] = Torrent.query(Torrent.category_code == 205, Torrent.series_episode == 1).order(-Torrent.uploaded_at).fetch(15)
episodes_new = []
series_watching = []
# watching series
uts = UserTorrent.query(UserTorrent.user == users.get_current_user(), UserTorrent.category_code == 205).fetch()
if uts:
series_watching = set()
for ut in [ut for ut in uts if ut.torrent.get().series_title]:
series_watching.add(ut.torrent.get().series_title)
logging.info('{0} series being watched by user'.format(len(uts)))
# new episodes
if series_watching:
cutoff = arrow.utcnow().replace(days=-14).datetime
episodes_new = Torrent.query(Torrent.series_title.IN(series_watching), Torrent.uploaded_at > cutoff, Torrent.category_code == 205).order(-Torrent.uploaded_at).fetch()
logging.info('{0} episodes fetched for watched series'.format(len(episodes_new)))
self.template_values['series_watching'] = series_watching
self.template_values['episodes_new'] = episodes_new
# logging.info('{0}'.format(self.template_values))
template = JINJA_ENVIRONMENT.get_template('main/templates/index.html')
self.response.write(template.render(self.template_values))
class CategoryPage(BaseHandler):
def get(self, cat):
logging.info('cat {0}'.format(cat))
self.template_values['cat'] = int(cat)
# get torrents
torrents = Torrent.query(Torrent.category_code == int(cat)).order(-Torrent.uploaded_at).fetch()
self.template_values['torrents'] = torrents
logging.info('torrents {0}'.format(len(torrents)))
template = JINJA_ENVIRONMENT.get_template('main/templates/category.html')
self.response.write(template.render(self.template_values))
class DownloadPage(BaseHandler):
def get(self, key):
logging.info('download {0}'.format(key))
logging.info('user {0}'.format(self.user))
torrent = ndb.Key(urlsafe=key).get()
logging.info('torrent {0}'.format(torrent))
ut = UserTorrent.query(UserTorrent.user == self.user, UserTorrent.torrent == torrent.key).get()
if not ut:
ut = UserTorrent(user=self.user, torrent=torrent.key, category_code=torrent.category_code)
ut.put()
logging.info('User Torrent saved')
else:
ut.key.delete()
logging.info('User Torrent deleted')
logging.info('User Torrent {0}'.format(ut))
self.response.status = '200 OK'
|
seanbell/opensurfaces
|
server/photos/management/commands/add_special.py
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from photos.models import PhotoSceneCategory
from photos.add import add_photo
#from licenses.models import License
class Command(BaseCommand):
args = '<flickr_dir>'
help = 'Adds photos from flickr'
def handle(self, *args, **options):
admin_user = User.objects.get_or_create(
username='admin')[0].get_profile()
print 'user:', admin_user
name = 'kitchen'
scene_category, _ = PhotoSceneCategory.objects \
.get_or_create(name=name)
path = args[0]
if not path:
print 'No path'
return
try:
photo = add_photo(
path=path,
user=admin_user,
scene_category=scene_category,
flickr_user=None,
flickr_id=None,
license=None,
exif='',
fov=None,
)
except Exception as e:
print '\nNot adding photo:', e
else:
print '\nAdded photo:', path
photo.synthetic = True
photo.save()
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/pandas/core/format.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable=W0141
import sys
from pandas.core.base import PandasObject
from pandas.core.common import adjoin, notnull
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas import compat
from pandas.compat import(StringIO, lzip, range, map, zip, reduce, u,
OrderedDict)
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option, set_option
import pandas.core.common as com
import pandas.lib as lib
from pandas.tslib import iNaT, Timestamp, Timedelta, format_array_from_datetime
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex
import numpy as np
import itertools
import csv
docstring_to_string = """
Parameters
----------
frame : DataFrame
object to render
buf : StringIO-like, optional
buffer to write to
columns : sequence, optional
the subset of columns to write; default None writes all columns
col_space : int, optional
the minimum width of each column
header : bool, optional
whether to print column labels, default True
index : bool, optional
whether to print index (row) labels, default True
na_rep : string, optional
string representation of NAN to use, default 'NaN'
formatters : list or dict of one-parameter functions, optional
formatter functions to apply to columns' elements by position or name,
default None. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats,
default None. The result of this function must be a unicode string.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every
multiindex key at each row, default True
justify : {'left', 'right'}, default None
Left or right-justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box.
index_names : bool, optional
Prints the names of the indexes, default True
force_unicode : bool, default False
Always return a unicode result. Deprecated in v0.10.0 as string
formatting is now rendered to unicode by default.
Returns
-------
formatted : string (or unicode, depending on data and options)"""
class CategoricalFormatter(object):
def __init__(self, categorical, buf=None, length=True,
na_rep='NaN', name=False, footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO(u(""))
self.name = name
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ''
if self.name:
name = com.pprint_thing(self.categorical.name,
escape_chars=('\t', '\r', '\n'))
footer += ('Name: %s' % name if self.categorical.name is not None
else '')
if self.length:
if footer:
footer += ', '
footer += "Length: %d" % len(self.categorical)
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += '\n'
footer += level_info
return compat.text_type(footer)
def _get_formatted_values(self):
return format_array(self.categorical.get_values(), None,
float_format=None,
na_rep=self.na_rep)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return u('')
fmt_values = self._get_formatted_values()
result = ['%s' % i for i in fmt_values]
result = [i.strip() for i in result]
result = u(', ').join(result)
result = [u('[')+result+u(']')]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return compat.text_type(u('\n').join(result))
class SeriesFormatter(object):
def __init__(self, series, buf=None, length=True, header=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
max_rows=None):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.max_rows = max_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self._chk_truncate()
def _chk_truncate(self):
from pandas.tools.merge import concat
max_rows = self.max_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num], series.iloc[-row_num:]))
self.tr_row_num = row_num
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = u('')
if getattr(self.series.index, 'freq', None) is not None:
footer += 'Freq: %s' % self.series.index.freqstr
if self.name is not False and name is not None:
if footer:
footer += ', '
series_name = com.pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
footer += ("Name: %s" %
series_name) if name is not None else ""
if self.length:
if footer:
footer += ', '
footer += 'Length: %d' % len(self.series)
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
footer += 'dtype: %s' % com.pprint_thing(name)
# level infos are added to the end and in a new line, like it is done for Categoricals
# Only added when we request a name
if name and com.is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series.values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return compat.text_type(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, MultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
return format_array(self.tr_series.get_values(), None,
float_format=self.float_format,
na_rep=self.na_rep)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return 'Series([], ' + footer + ')'
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
maxlen = max(len(x) for x in fmt_index) # max index len
pad_space = min(maxlen, 60)
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = len(fmt_values[row_num-1])
if width > 3:
dot_str = '...'
else:
dot_str = '..'
dot_str = dot_str.center(width)
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, '')
result = adjoin(3, *[fmt_index[1:], fmt_values])
if self.header and have_header:
result = fmt_index[0] + '\n' + result
if footer:
result += '\n' + footer
return compat.text_type(u('').join(result))
def _strlen_func():
if compat.PY3: # pragma: no cover
_strlen = len
else:
encoding = get_option("display.encoding")
def _strlen(x):
try:
return len(x.decode(encoding))
except UnicodeError:
return len(x)
return _strlen
class TableFormatter(object):
is_truncated = False
show_dimensions = None
@property
def should_show_dimensions(self):
return self.show_dimensions is True or (self.show_dimensions == 'truncate' and
self.is_truncated)
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if com.is_integer(i):
return self.formatters[i]
else:
return None
else:
if com.is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ''
__doc__ += docstring_to_string
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False, **kwds):
self.frame = frame
self.buf = buf if buf is not None else StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = _ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
def _chk_truncate(self):
'''
Checks whether the frame should be truncated. If so, slices
the frame up.
'''
from pandas.tools.merge import concat
# Column of which first element is used to determine width of a dot col
self.tr_size_col = -1
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal (why else = 0)
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = self.header + dot_row + show_dimension_rows + prompt_row
max_rows_adj = self.h - n_add_rows # rows available to fill with actual data
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = concat((frame.iloc[:, :col_num], frame.iloc[:, -col_num:]), axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 0:
row_num = len(frame)
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :], frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
_strlen = _strlen_func()
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
str_columns = self._get_formatted_column_labels(frame)
if self.header:
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
max_colwidth = max(self.col_space or 0,
*(_strlen(x) for x in cheader))
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=max_colwidth)
max_len = max(np.max([_strlen(x) for x in fmt_values]),
max_colwidth)
if self.justify == 'left':
cheader = [x.ljust(max_len) for x in cheader]
else:
cheader = [x.rjust(max_len) for x in cheader]
stringified.append(cheader + fmt_values)
else:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=(self.col_space or 0))
stringified.append(fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
col_width = len(strcols[self.tr_size_col][0]) # infer from column header
strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] * (len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
cwidth = len(strcols[ix][row_num]) # infer from above row
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = '...'
else:
my_str = '..'
if ix == 0:
dot_str = my_str.ljust(cwidth)
elif is_dot_col:
cwidth = len(strcols[self.tr_size_col][0])
dot_str = my_str.center(cwidth)
else:
dot_str = my_str.rjust(cwidth)
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s')
% (type(self.frame).__name__,
com.pprint_thing(frame.columns),
com.pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print the whole frame
text = adjoin(1, *strcols)
elif not isinstance(self.max_cols, int) or self.max_cols > 0: # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = adjoin(1, *strcols).split('\n')
row_lens = Series(text).apply(len)
max_len_col_ix = np.argmax(row_lens)
max_len = row_lens[max_len_col_ix]
headers = [ele[0] for ele in strcols]
# Size of last col determines dot col size. See `self._to_str_columns
size_tr_col = len(headers[self.tr_size_col])
max_len += size_tr_col # Need to make space for largest row plus truncate dot col
dif = max_len - self.w
adj_dif = dif
col_lens = Series([Series(ele).apply(len).max() for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
adj_dif -= (col_len + 1) # adjoin adds one
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
max_cols_adj = n_cols - self.index # subtract index column
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = adjoin(1, *strcols)
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write("\n\n[%d rows x %d columns]"
% (len(frame), len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([len(x) for x in idx]).max() + adjoin_width
col_widths = [np.array([len(x) for x in col]).max()
if len(col) > 0 else 0
for col in strcols]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([' \\'] + [' '] * (nrows - 1))
else:
row.append([' '] * nrows)
str_lst.append(adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
def to_latex(self, column_format=None, longtable=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
self.escape = self.kwds.get('escape', True)
# TODO: column_format is not settable in df.to_latex
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
else:
return 'l'
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty %s\nColumns: %s\nIndex: %s')
% (type(self.frame).__name__,
frame.columns, frame.index))
strcols = [[info_line]]
else:
strcols = self._to_str_columns()
if self.index and isinstance(self.frame.index, MultiIndex):
clevels = self.frame.columns.nlevels
strcols.pop(0)
name = any(self.frame.columns.names)
for i, lev in enumerate(self.frame.index.levels):
lev2 = lev.format(name=name)
blank = ' ' * len(lev2[0])
lev3 = [blank] * clevels
for level_idx, group in itertools.groupby(
self.frame.index.labels[i]):
count = len(list(group))
lev3.extend([lev2[level_idx]] + [blank] * (count - 1))
strcols.insert(i, lev3)
if column_format is None:
dtypes = self.frame.dtypes.values
column_format = ''.join(map(get_col_type, dtypes))
if self.index:
index_format = 'l' * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, not %s'
% type(column_format))
def write(buf, frame, column_format, strcols, longtable=False):
if not longtable:
buf.write('\\begin{tabular}{%s}\n' % column_format)
buf.write('\\toprule\n')
else:
buf.write('\\begin{longtable}{%s}\n' % column_format)
buf.write('\\toprule\n')
nlevels = frame.columns.nlevels
for i, row in enumerate(zip(*strcols)):
if i == nlevels:
buf.write('\\midrule\n') # End of header
if longtable:
buf.write('\\endhead\n')
buf.write('\\midrule\n')
buf.write('\\multicolumn{3}{r}{{Continued on next '
'page}} \\\\\n')
buf.write('\midrule\n')
buf.write('\endfoot\n\n')
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.escape:
crow = [(x.replace('\\', '\\textbackslash') # escape backslashes first
.replace('_', '\\_')
.replace('%', '\\%')
.replace('$', '\\$')
.replace('#', '\\#')
.replace('{', '\\{')
.replace('}', '\\}')
.replace('~', '\\textasciitilde')
.replace('^', '\\textasciicircum')
.replace('&', '\\&') if x else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
if not longtable:
buf.write('\\bottomrule\n')
buf.write('\\end{tabular}\n')
else:
buf.write('\\end{longtable}\n')
if hasattr(self.buf, 'write'):
write(self.buf, frame, column_format, strcols, longtable)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
write(f, frame, column_format, strcols, longtable)
else:
raise TypeError('buf is not a file name and it has no write '
'method')
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
return format_array(
(frame.iloc[:, i]).get_values(),
formatter, float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space
)
def to_html(self, classes=None):
"""
Render a DataFrame to a html table.
"""
html_renderer = HTMLFormatter(self, classes=classes,
max_rows=self.max_rows,
max_cols=self.max_cols)
if hasattr(self.buf, 'write'):
html_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
html_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
' method')
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
def is_numeric_dtype(dtype):
return issubclass(dtype.type, np.number)
columns = frame.columns
if isinstance(columns, MultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = lzip(*fmt_columns)
dtypes = self.frame.dtypes.values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any([l.is_floating for l in columns.levels])
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if y not in self.formatters and need_leadsp[x] and not restrict_formatting:
return ' ' + y
return y
str_columns = list(zip(*[[space_format(x, y) for y in x] for x in fmt_columns]))
if self.sparsify:
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [[' ' + x
if not self._get_formatter(i) and need_leadsp[x]
else x]
for i, (col, x) in
enumerate(zip(columns, fmt_columns))]
if self.show_index_names and self.has_index_names:
for x in str_columns:
x.append('')
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by to_html().
index = frame.index
columns = frame.columns
show_index_names = self.show_index_names and self.has_index_names
show_col_names = (self.show_index_names and self.has_column_names)
fmt = self._get_formatter('__index__')
if isinstance(index, MultiIndex):
fmt_index = index.format(sparsify=self.sparsify, adjoin=False,
names=show_index_names,
formatter=fmt)
else:
fmt_index = [index.format(name=show_index_names, formatter=fmt)]
fmt_index = [tuple(_make_fixed_width(
list(x), justify='left', minimum=(self.col_space or 0)))
for x in fmt_index]
adjoined = adjoin(1, *fmt_index).split('\n')
# empty space for columns
if show_col_names:
col_header = ['%s' % x for x in self._get_column_name_list()]
else:
col_header = [''] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, MultiIndex):
names.extend('' if name is None else name
for name in columns.names)
else:
names.append('' if columns.name is None else columns.name)
return names
class HTMLFormatter(TableFormatter):
indent_delta = 2
def __init__(self, formatter, classes=None, max_rows=None, max_cols=None):
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements = []
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.escape = self.fmt.kwds.get('escape', True)
self.max_rows = max_rows or len(self.fmt.frame)
self.max_cols = max_cols or len(self.fmt.columns)
self.show_dimensions = self.fmt.show_dimensions
self.is_truncated = (self.max_rows < len(self.fmt.frame) or
self.max_cols < len(self.fmt.columns))
def write(self, s, indent=0):
rs = com.pprint_thing(s)
self.elements.append(' ' * indent + rs)
def write_th(self, s, indent=0, tags=None):
if (self.fmt.col_space is not None
and self.fmt.col_space > 0):
tags = (tags or "")
tags += 'style="min-width: %s;"' % self.fmt.col_space
return self._write_cell(s, kind='th', indent=indent, tags=tags)
def write_td(self, s, indent=0, tags=None):
return self._write_cell(s, kind='td', indent=indent, tags=tags)
def _write_cell(self, s, kind='td', indent=0, tags=None):
if tags is not None:
start_tag = '<%s %s>' % (kind, tags)
else:
start_tag = '<%s>' % kind
if self.escape:
# escape & first to prevent double escaping of &
esc = OrderedDict(
[('&', r'&'), ('<', r'<'), ('>', r'>')]
)
else:
esc = {}
rs = com.pprint_thing(s, escape_chars=esc).strip()
self.write(
'%s%s</%s>' % (start_tag, rs, kind), indent)
def write_tr(self, line, indent=0, indent_delta=4, header=False,
align=None, tags=None, nindex_levels=0):
if tags is None:
tags = {}
if align is None:
self.write('<tr>', indent)
else:
self.write('<tr style="text-align: %s;">' % align, indent)
indent += indent_delta
for i, s in enumerate(line):
val_tag = tags.get(i, None)
if header or (self.bold_rows and i < nindex_levels):
self.write_th(s, indent, tags=val_tag)
else:
self.write_td(s, indent, tags=val_tag)
indent -= indent_delta
self.write('</tr>', indent)
def write_result(self, buf):
indent = 0
frame = self.frame
_classes = ['dataframe'] # Default class.
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise AssertionError(('classes must be list or tuple, '
'not %s') % type(self.classes))
_classes.extend(self.classes)
self.write('<table border="1" class="%s">' % ' '.join(_classes),
indent)
indent += self.indent_delta
indent = self._write_header(indent)
indent = self._write_body(indent)
self.write('</table>', indent)
if self.should_show_dimensions:
by = chr(215) if compat.PY3 else unichr(215) # ×
self.write(u('<p>%d rows %s %d columns</p>') %
(len(frame), by, len(frame.columns)))
_put_lines(buf, self.elements)
def _write_header(self, indent):
truncate_h = self.fmt.truncate_h
row_levels = self.frame.index.nlevels
if not self.fmt.header:
# write nothing
return indent
def _column_header():
if self.fmt.index:
row = [''] * (self.frame.index.nlevels - 1)
else:
row = []
if isinstance(self.columns, MultiIndex):
if self.fmt.has_column_names and self.fmt.index:
row.append(single_column_table(self.columns.names))
else:
row.append('')
style = "text-align: %s;" % self.fmt.justify
row.extend([single_column_table(c, self.fmt.justify, style) for
c in self.columns])
else:
if self.fmt.index:
row.append(self.columns.name or '')
row.extend(self.columns)
return row
self.write('<thead>', indent)
row = []
indent += self.indent_delta
if isinstance(self.columns, MultiIndex):
template = 'colspan="%d" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
else:
sentinel = None
levels = self.columns.format(sparsify=sentinel,
adjoin=False, names=False)
level_lengths = _get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths,
levels)):
if truncate_h:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = values[:ins_col] + (u('...'),) + \
values[ins_col:]
else: # sparse col headers do not receive a ...
values = (values[:ins_col] + (values[ins_col - 1],) +
values[ins_col:])
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = values[:ins_col] + (u('...'),) + \
values[ins_col:]
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = values[:ins_col] + [u('...')] + values[ins_col:]
name = self.columns.names[lnum]
row = [''] * (row_levels - 1) + ['' if name is None
else com.pprint_thing(name)]
if row == [""] and self.fmt.index is False:
row = []
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags,
header=True)
else:
col_row = _column_header()
align = self.fmt.justify
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
col_row.insert(ins_col, '...')
self.write_tr(col_row, indent, self.indent_delta, header=True,
align=align)
if self.fmt.has_index_names:
row = [
x if x is not None else '' for x in self.frame.index.names
] + [''] * min(len(self.columns), self.max_cols)
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
row.insert(ins_col, '')
self.write_tr(row, indent, self.indent_delta, header=True)
indent -= self.indent_delta
self.write('</thead>', indent)
return indent
def _write_body(self, indent):
self.write('<tbody>', indent)
indent += self.indent_delta
fmt_values = {}
for i in range(min(len(self.columns), self.max_cols)):
fmt_values[i] = self.fmt._format_col(i)
# write values
if self.fmt.index:
if isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent)
else:
self._write_regular_rows(fmt_values, indent)
else:
for i in range(len(self.frame)):
row = [fmt_values[j][i] for j in range(len(self.columns))]
self.write_tr(row, indent, self.indent_delta, tags=None)
indent -= self.indent_delta
self.write('</tbody>', indent)
indent -= self.indent_delta
return indent
def _write_regular_rows(self, fmt_values, indent):
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
ncols = len(self.fmt.tr_frame.columns)
nrows = len(self.fmt.tr_frame)
fmt = self.fmt._get_formatter('__index__')
if fmt is not None:
index_values = self.fmt.tr_frame.index.map(fmt)
else:
index_values = self.fmt.tr_frame.index.format()
row = []
for i in range(nrows):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = ['...' for ele in row]
self.write_tr(str_sep_row, indent, self.indent_delta, tags=None,
nindex_levels=1)
row = []
row.append(index_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
dot_col_ix = self.fmt.tr_col_num + 1
row.insert(dot_col_ix, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=1)
def _write_hierarchical_rows(self, fmt_values, indent):
template = 'rowspan="%d" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
frame = self.fmt.tr_frame
ncols = len(frame.columns)
nrows = len(frame)
row_levels = self.frame.index.nlevels
idx_values = frame.index.format(sparsify=False, adjoin=False, names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
# GH3547
sentinel = com.sentinel_factory()
levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False)
level_lengths = _get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if truncate_v:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = u('...')
idx_values.insert(ins_row, tuple(dot_row))
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(ins_row, tuple([u('...')]*len(level_lengths)))
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, '...')
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template % records[i]
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels - sparse_offset + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=tags,
nindex_levels=len(levels) - sparse_offset)
else:
for i in range(len(frame)):
idx_values = list(zip(*frame.index.format(sparsify=False,
adjoin=False,
names=False)))
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=frame.index.nlevels)
def _get_level_lengths(levels, sentinel=''):
from itertools import groupby
def _make_grouper():
record = {'count': 0}
def grouper(x):
if x != sentinel:
record['count'] += 1
return record['count']
return grouper
result = []
for lev in levels:
i = 0
f = _make_grouper()
recs = {}
for key, gpr in groupby(lev, f):
values = list(gpr)
recs[i] = len(values)
i += len(values)
result.append(recs)
return result
class CSVFormatter(object):
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None,
cols=None, header=True, index=True, index_label=None,
mode='w', nanRep=None, encoding=None, quoting=None,
line_terminator='\n', chunksize=None, engine=None,
tupleize_cols=False, quotechar='"', date_format=None,
doublequote=True, escapechar=None, decimal='.'):
self.engine = engine # remove for 0.13
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf = path_or_buf
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.decimal = decimal
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
self.encoding = encoding
if quoting is None:
quoting = csv.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csv.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator
self.date_format = date_format
# GH3457
if not self.obj.columns.is_unique and engine == 'python':
raise NotImplementedError("columns.is_unique == False not "
"supported with engine='python'")
self.tupleize_cols = tupleize_cols
self.has_mi_columns = isinstance(obj.columns, MultiIndex
) and not self.tupleize_cols
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the "
"columns")
if cols is not None:
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = np.asarray(list(cols))
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = np.asarray(list(cols))
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if isinstance(obj.index, PeriodIndex):
self.data_index = obj.index.to_timestamp()
if (isinstance(self.data_index, DatetimeIndex) and
date_format is not None):
self.data_index = Index([x.strftime(date_format)
if notnull(x) else ''
for x in self.data_index])
self.nlevels = getattr(self.data_index, 'nlevels', 1)
if not index:
self.nlevels = 0
# original python implem. of df.to_csv
# invoked by df.to_csv(engine=python)
def _helper_csv(self, writer, na_rep=None, cols=None,
header=True, index=True,
index_label=None, float_format=None, date_format=None):
if cols is None:
cols = self.columns
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if has_aliases or header:
if index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(self.obj.index, MultiIndex):
index_label = []
for i, name in enumerate(self.obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = self.obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label,
(list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(cols), len(header))))
else:
write_cols = header
else:
write_cols = cols
encoded_cols = list(write_cols)
writer.writerow(encoded_labels + encoded_cols)
else:
encoded_cols = list(cols)
writer.writerow(encoded_cols)
if date_format is None:
date_formatter = lambda x: Timestamp(x)._repr_base
else:
def strftime_with_nulls(x):
x = Timestamp(x)
if notnull(x):
return x.strftime(date_format)
date_formatter = lambda x: strftime_with_nulls(x)
data_index = self.obj.index
if isinstance(self.obj.index, PeriodIndex):
data_index = self.obj.index.to_timestamp()
if isinstance(data_index, DatetimeIndex) and date_format is not None:
data_index = Index([date_formatter(x) for x in data_index])
values = self.obj.copy()
values.index = data_index
values.columns = values.columns.to_native_types(
na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
values = values[cols]
series = {}
for k, v in compat.iteritems(values._series):
series[k] = v.values
nlevels = getattr(data_index, 'nlevels', 1)
for j, idx in enumerate(data_index):
row_fields = []
if index:
if nlevels == 1:
row_fields = [idx]
else: # handle MultiIndex
row_fields = list(idx)
for i, col in enumerate(cols):
val = series[col][j]
if lib.checknull(val):
val = na_rep
if float_format is not None and com.is_float(val):
val = float_format % val
elif isinstance(val, (np.datetime64, Timestamp)):
val = date_formatter(val)
row_fields.append(val)
writer.writerow(row_fields)
def save(self):
# create the writer & save
if hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f = com._get_handle(self.path_or_buf, self.mode,
encoding=self.encoding)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if self.encoding is not None:
writer_kwargs['encoding'] = self.encoding
self.writer = com.UnicodeWriter(f, **writer_kwargs)
else:
self.writer = csv.writer(f, **writer_kwargs)
if self.engine == 'python':
# to be removed in 0.13
self._helper_csv(self.writer, na_rep=self.na_rep,
float_format=self.float_format,
cols=self.cols, header=self.header,
index=self.index,
index_label=self.index_label,
date_format=self.date_format)
else:
self._save()
finally:
if close:
f.close()
def _save_header(self):
writer = self.writer
obj = self.obj
index_label = self.index_label
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if not (has_aliases or self.header):
return
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(cols), len(header))))
else:
write_cols = header
else:
write_cols = cols
if self.index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(obj.index, MultiIndex):
index_label = []
for i, name in enumerate(obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label, (list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if not has_mi_columns:
encoded_labels += list(write_cols)
# write out the mi
if has_mi_columns:
columns = obj.columns
# write out the names for each level, then ALL of the values for
# each level
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(index_label, list) and len(index_label) > 1:
col_line.extend([''] * (len(index_label) - 1))
col_line.extend(columns.get_level_values(i))
writer.writerow(col_line)
# add blanks for the columns, so that we
# have consistent seps
encoded_labels.extend([''] * len(columns))
# write out the index label line
writer.writerow(encoded_labels)
def _save(self):
self._save_header()
nrows = len(self.data_index)
# write in chunksize bites
chunksize = self.chunksize
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
d = b.to_native_types(slicer=slicer,
na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(slicer=slicer,
na_rep=self.na_rep,
float_format=self.float_format,
date_format=self.date_format,
quoting=self.quoting)
lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
# from collections import namedtuple
# ExcelCell = namedtuple("ExcelCell",
# 'row, col, val, style, mergestart, mergeend')
class ExcelCell(object):
__fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend')
__slots__ = __fields__
def __init__(self, row, col, val,
style=None, mergestart=None, mergeend=None):
self.row = row
self.col = col
self.val = val
self.style = style
self.mergestart = mergestart
self.mergeend = mergeend
header_style = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
class ExcelFormatter(object):
"""
Class for formatting a DataFrame to a list of ExcelCells,
Parameters
----------
df : dataframe
na_rep: na representation
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
output row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
merge_cells : boolean, default False
Format MultiIndex and Hierarchical Rows as merged cells.
inf_rep : string, default `'inf'`
representation for np.inf values (which aren't representable in Excel)
A `'-'` sign will be added in front of -inf.
"""
def __init__(self, df, na_rep='', float_format=None, cols=None,
header=True, index=True, index_label=None, merge_cells=False,
inf_rep='inf'):
self.df = df
self.rowcounter = 0
self.na_rep = na_rep
self.columns = cols
if cols is None:
self.columns = df.columns
self.float_format = float_format
self.index = index
self.index_label = index_label
self.header = header
self.merge_cells = merge_cells
self.inf_rep = inf_rep
def _format_value(self, val):
if lib.checknull(val):
val = self.na_rep
elif com.is_float(val):
if np.isposinf(val):
val = self.inf_rep
elif np.isneginf(val):
val = '-%s' % self.inf_rep
elif self.float_format is not None:
val = float(self.float_format % val)
return val
def _format_header_mi(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if not(has_aliases or self.header):
return
columns = self.columns
level_strs = columns.format(sparsify=True, adjoin=False, names=False)
level_lengths = _get_level_lengths(level_strs)
coloffset = 0
lnum = 0
if self.index and isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0]) - 1
if self.merge_cells:
# Format multi-index as a merged cells.
for lnum in range(len(level_lengths)):
name = columns.names[lnum]
yield ExcelCell(lnum, coloffset, name, header_style)
for lnum, (spans, levels, labels) in enumerate(zip(level_lengths,
columns.levels,
columns.labels)
):
values = levels.take(labels)
for i in spans:
if spans[i] > 1:
yield ExcelCell(lnum,
coloffset + i + 1,
values[i],
header_style,
lnum,
coloffset + i + spans[i])
else:
yield ExcelCell(lnum,
coloffset + i + 1,
values[i],
header_style)
else:
# Format in legacy format with dots to indicate levels.
for i, values in enumerate(zip(*level_strs)):
v = ".".join(map(com.pprint_thing, values))
yield ExcelCell(lnum, coloffset + i + 1, v, header_style)
self.rowcounter = lnum
def _format_header_regular(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
coloffset = 0
if self.index:
coloffset = 1
if isinstance(self.df.index, MultiIndex):
coloffset = len(self.df.index[0])
colnames = self.columns
if has_aliases:
if len(self.header) != len(self.columns):
raise ValueError(('Writing %d cols but got %d aliases'
% (len(self.columns), len(self.header))))
else:
colnames = self.header
for colindex, colname in enumerate(colnames):
yield ExcelCell(self.rowcounter, colindex + coloffset, colname,
header_style)
def _format_header(self):
if isinstance(self.columns, MultiIndex):
gen = self._format_header_mi()
else:
gen = self._format_header_regular()
gen2 = ()
if self.df.index.names:
row = [x if x is not None else ''
for x in self.df.index.names] + [''] * len(self.columns)
if reduce(lambda x, y: x and y, map(lambda x: x != '', row)):
gen2 = (ExcelCell(self.rowcounter, colindex, val, header_style)
for colindex, val in enumerate(row))
self.rowcounter += 1
return itertools.chain(gen, gen2)
def _format_body(self):
if isinstance(self.df.index, MultiIndex):
return self._format_hierarchical_rows()
else:
return self._format_regular_rows()
def _format_regular_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
coloffset = 0
# output index and index_label?
if self.index:
# chek aliases
# if list only take first as this is not a MultiIndex
if self.index_label and isinstance(self.index_label,
(list, tuple, np.ndarray, Index)):
index_label = self.index_label[0]
# if string good to go
elif self.index_label and isinstance(self.index_label, str):
index_label = self.index_label
else:
index_label = self.df.index.names[0]
if index_label and self.header is not False:
if self.merge_cells:
yield ExcelCell(self.rowcounter,
0,
index_label,
header_style)
self.rowcounter += 1
else:
yield ExcelCell(self.rowcounter - 1,
0,
index_label,
header_style)
# write index_values
index_values = self.df.index
if isinstance(self.df.index, PeriodIndex):
index_values = self.df.index.to_timestamp()
coloffset = 1
for idx, idxval in enumerate(index_values):
yield ExcelCell(self.rowcounter + idx, 0, idxval, header_style)
# Get a frame that will account for any duplicates in the column names.
col_mapped_frame = self.df.loc[:, self.columns]
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = col_mapped_frame.iloc[:, colidx]
for i, val in enumerate(series):
yield ExcelCell(self.rowcounter + i, colidx + coloffset, val)
def _format_hierarchical_rows(self):
has_aliases = isinstance(self.header, (tuple, list, np.ndarray, Index))
if has_aliases or self.header:
self.rowcounter += 1
gcolidx = 0
if self.index:
index_labels = self.df.index.names
# check for aliases
if self.index_label and isinstance(self.index_label,
(list, tuple, np.ndarray, Index)):
index_labels = self.index_label
# if index labels are not empty go ahead and dump
if (any(x is not None for x in index_labels)
and self.header is not False):
if not self.merge_cells:
self.rowcounter -= 1
for cidx, name in enumerate(index_labels):
yield ExcelCell(self.rowcounter,
cidx,
name,
header_style)
self.rowcounter += 1
if self.merge_cells:
# Format hierarchical rows as merged cells.
level_strs = self.df.index.format(sparsify=True, adjoin=False,
names=False)
level_lengths = _get_level_lengths(level_strs)
for spans, levels, labels in zip(level_lengths,
self.df.index.levels,
self.df.index.labels):
values = levels.take(labels)
for i in spans:
if spans[i] > 1:
yield ExcelCell(self.rowcounter + i,
gcolidx,
values[i],
header_style,
self.rowcounter + i + spans[i] - 1,
gcolidx)
else:
yield ExcelCell(self.rowcounter + i,
gcolidx,
values[i],
header_style)
gcolidx += 1
else:
# Format hierarchical rows with non-merged values.
for indexcolvals in zip(*self.df.index):
for idx, indexcolval in enumerate(indexcolvals):
yield ExcelCell(self.rowcounter + idx,
gcolidx,
indexcolval,
header_style)
gcolidx += 1
# Get a frame that will account for any duplicates in the column names.
col_mapped_frame = self.df.loc[:, self.columns]
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
series = col_mapped_frame.iloc[:, colidx]
for i, val in enumerate(series):
yield ExcelCell(self.rowcounter + i, gcolidx + colidx, val)
def get_formatted_cells(self):
for cell in itertools.chain(self._format_header(),
self._format_body()):
cell.val = self._format_value(cell.val)
yield cell
# ----------------------------------------------------------------------
# Array formatters
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right'):
if com.is_float_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif com.is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
elif com.is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif com.is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format,
formatter=formatter, space=space,
justify=justify)
return fmt_obj.get_result()
class GenericArrayFormatter(object):
def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
space=12, float_format=None, justify='right'):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = '%% .%dg' % get_option("display.precision")
float_format = lambda x: fmt_str % x
else:
float_format = self.float_format
formatter = self.formatter if self.formatter is not None else \
(lambda x: com.pprint_thing(x, escape_chars=('\t', '\r', '\n')))
def _format(x):
if self.na_rep is not None and lib.checknull(x):
if x is None:
return 'None'
return self.na_rep
elif isinstance(x, PandasObject):
return '%s' % x
else:
# object dtype
return '%s' % formatter(x)
vals = self.values
is_float = lib.map_infer(vals, com.is_float) & notnull(vals)
leading_space = is_float.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float[i] and leading_space:
fmt_values.append(' %s' % _format(v))
elif is_float[i]:
fmt_values.append(float_format(v))
else:
fmt_values.append(' %s' % _format(v))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
if self.float_format is not None and self.formatter is None:
self.formatter = self.float_format
def _format_with(self, fmt_str):
def _val(x, threshold):
if notnull(x):
if (threshold is None or
abs(x) > get_option("display.chop_threshold")):
return fmt_str % x
else:
if fmt_str.endswith("e"): # engineering format
return "0"
else:
return fmt_str % 0
else:
return self.na_rep
threshold = get_option("display.chop_threshold")
fmt_values = [_val(x, threshold) for x in self.values]
return _trim_zeros(fmt_values, self.na_rep)
def _format_strings(self):
if self.formatter is not None:
fmt_values = [self.formatter(x) for x in self.values]
else:
fmt_str = '%% .%df' % (self.digits - 1)
fmt_values = self._format_with(fmt_str)
if len(fmt_values) > 0:
maxlen = max(len(x) for x in fmt_values)
else:
maxlen = 0
too_long = maxlen > self.digits + 5
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
has_large_values = (abs_vals > 1e8).any()
has_small_values = ((abs_vals < 10 ** (-self.digits+1)) &
(abs_vals > 0)).any()
if too_long and has_large_values:
fmt_str = '%% .%de' % (self.digits - 1)
fmt_values = self._format_with(fmt_str)
elif has_small_values:
fmt_str = '%% .%de' % (self.digits - 1)
fmt_values = self._format_with(fmt_str)
return fmt_values
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: '% d' % x)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
super(Datetime64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
# we may have a tz, if so, then need to process element-by-element
# when DatetimeBlockWithTimezones is a reality this could be fixed
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if values.tz is None:
fmt_values = format_array_from_datetime(values.asi8.ravel(),
format=_get_format_datetime64_from_values(values, self.date_format),
na_rep=self.nat_rep).reshape(values.shape)
fmt_values = fmt_values.tolist()
else:
values = values.asobject
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or _get_format_datetime64(is_dates_only, values, date_format=self.date_format))
fmt_values = [ formatter(x) for x in self.values ]
return fmt_values
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep='NaT'):
if x is None or lib.checknull(x):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
x = Timestamp(x, tz=tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(x,
nat_rep=nat_rep,
date_format=date_format)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return None
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
super(Timedelta64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = self.formatter or _get_format_timedelta64(self.values, nat_rep=self.nat_rep,
box=self.box)
fmt_values = [formatter(x) for x in self.values]
return fmt_values
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = 'even_day'
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or lib.checknull(x):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{0}'".format(result)
return result
return _formatter
def _make_fixed_width(strings, justify='right', minimum=None):
if len(strings) == 0 or justify == 'all':
return strings
_strlen = _strlen_func()
max_len = np.max([_strlen(x) for x in strings])
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
if justify == 'left':
justfunc = lambda self, x: self.ljust(x)
else:
justfunc = lambda self, x: self.rjust(x)
def just(x):
eff_len = max_len
if conf_max is not None:
if (conf_max > 3) & (_strlen(x) > max_len):
x = x[:eff_len - 3] + '...'
return justfunc(x, eff_len)
result = [just(x) for x in strings]
return result
def _trim_zeros(str_floats, na_rep='NaN'):
"""
Trims zeros and decimal points.
"""
trimmed = str_floats
def _cond(values):
non_na = [x for x in values if x != na_rep]
return (len(non_na) > 0 and all([x.endswith('0') for x in non_na]) and
not(any([('e' in x) or ('E' in x) for x in non_na])))
while _cond(trimmed):
trimmed = [x[:-1] if x != na_rep else x for x in trimmed]
# trim decimal points
return [x[:-1] if x.endswith('.') and x != na_rep else x for x in trimmed]
def single_column_table(column, align=None, style=None):
table = '<table'
if align is not None:
table += (' align="%s"' % align)
if style is not None:
table += (' style="%s"' % style)
table += '><tbody>'
for i in column:
table += ('<tr><td>%s</td></tr>' % str(i))
table += '</tbody></table>'
return table
def single_row_table(row): # pragma: no cover
table = '<table><tbody><tr>'
for i in row:
table += ('<td>%s</td>' % str(i))
table += '</tr></tbody></table>'
return table
def _has_names(index):
if isinstance(index, MultiIndex):
return any([x is not None for x in index.names])
else:
return index.name is not None
# ------------------------------------------------------------------------------
# Global formatting options
_initial_defencoding = None
def detect_console_encoding():
"""
Try to find the most capable encoding supported by the console.
slighly modified from the way IPython handles the same issue.
"""
import locale
global _initial_defencoding
encoding = None
try:
encoding = sys.stdout.encoding or sys.stdin.encoding
except AttributeError:
pass
# try again for something better
if not encoding or 'ascii' in encoding.lower():
try:
encoding = locale.getpreferredencoding()
except Exception:
pass
# when all else fails. this will usually be "ascii"
if not encoding or 'ascii' in encoding.lower():
encoding = sys.getdefaultencoding()
# GH3360, save the reported defencoding at import time
# MPL backends may change it. Make available for debugging.
if not _initial_defencoding:
_initial_defencoding = sys.getdefaultencoding()
return encoding
def get_console_size():
"""Return console size as tuple = (width, height).
Returns (None,None) in non-interactive session.
"""
display_width = get_option('display.width')
# deprecated.
display_height = get_option('display.height', silent=True)
# Consider
# interactive shell terminal, can detect term size
# interactive non-shell terminal (ipnb/ipqtconsole), cannot detect term
# size non-interactive script, should disregard term size
# in addition
# width,height have default values, but setting to 'None' signals
# should use Auto-Detection, But only in interactive shell-terminal.
# Simple. yeah.
if com.in_interactive_session():
if com.in_ipython_frontend():
# sane defaults for interactive non-shell terminal
# match default for width,height in config_init
from pandas.core.config import get_default_val
terminal_width = get_default_val('display.width')
terminal_height = get_default_val('display.height')
else:
# pure terminal
terminal_width, terminal_height = get_terminal_size()
else:
terminal_width, terminal_height = None, None
# Note if the User sets width/Height to None (auto-detection)
# and we're in a script (non-inter), this will return (None,None)
# caller needs to deal.
return (display_width or terminal_width, display_height or terminal_height)
class EngFormatter(object):
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = 'E-%02d' % (-int_pow10)
else:
prefix = 'E+%02d' % int_pow10
mant = sign * dnum / (10 ** pow10)
if self.accuracy is None: # pragma: no cover
format_str = u("% g%s")
else:
format_str = (u("%% .%if%%s") % self.accuracy)
formatted = format_str % (mant, prefix)
return formatted # .strip()
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _put_lines(buf, lines):
if any(isinstance(x, compat.text_type) for x in lines):
lines = [compat.text_type(x) for x in lines]
buf.write('\n'.join(lines))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
if __name__ == '__main__':
arr = np.array([746.03, 0.00, 5620.00, 1592.36])
# arr = np.array([11111111.1, 1.55])
# arr = [314200.0034, 1.4125678]
arr = np.array([327763.3119, 345040.9076, 364460.9915, 398226.8688,
383800.5172, 433442.9262, 539415.0568, 568590.4108,
599502.4276, 620921.8593, 620898.5294, 552427.1093,
555221.2193, 519639.7059, 388175.7, 379199.5854,
614898.25, 504833.3333, 560600., 941214.2857,
1134250., 1219550., 855736.85, 1042615.4286,
722621.3043, 698167.1818, 803750.])
fmt = FloatArrayFormatter(arr, digits=7)
print(fmt.get_result())
|
JensTimmerman/radical.pilot
|
docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# RADICAL-Pilot documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 3 21:55:42 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import imp
import sys
import os
import radical.utils as ru
import pprint
import subprocess as sp
script_dir = os.path.dirname(os.path.realpath(__file__))
################################################################################
cmd = "git branch | grep '*' | cut -f 2 -d \ " \
+ " | sed -e 's/readthedocs.tutorial/tutorial/g' " \
+ " | sed -e 's/readthedocs/release/g'"
mytag = sp.Popen(cmd, shell=True, stdout=sp.PIPE).stdout.read().strip()
if 'detached' in mytag :
cmd = "git branch | grep '*' | cut -f 2 -d '/' | cut -f 1 -d ')'" \
+ " | sed -e 's/readthedocs.tutorial/tutorial/g' " \
+ " | sed -e 's/readthedocs/release/g'"
mytag = sp.Popen(cmd, shell=True, stdout=sp.PIPE).stdout.read().strip()
tags.add (mytag)
################################################################################
##
print "* Generating resource configuration docs: resources.rst"
print "* using tag: %s" % mytag
try:
os.remove("{0}/resources.rst".format(script_dir))
except OSError:
pass
with open("{0}/resources.rst".format(script_dir), "w") as resources_rst:
resources_rst.write("""
.. _chapter_resources:
List of Pre-Configured Resources
================================
""")
configs = os.listdir("{0}/../../src/radical/pilot/configs/".format(script_dir))
for config in configs:
if config.endswith(".json") is False:
continue # skip all non-python files
if config.startswith("aliases") is True:
continue # skip alias files
print " * %s" % config
try:
json_data = ru.read_json_str("../../src/radical/pilot/configs/%s" % config)
except Exception, ex:
print " * JSON PARSING ERROR: %s" % str(ex)
continue
resources_rst.write("{0}\n".format(config[:-5].upper()))
resources_rst.write("{0}\n\n".format("="*len(config[:-5])))
for host_key, resource_config in json_data.iteritems():
resource_key = "%s.%s" % (config[:-5], host_key)
print " * %s" % resource_key
try:
default_queue = resource_config["default_queue"]
except Exception, ex:
default_queue = None
try:
working_dir = resource_config["default_remote_workdir"]
except Exception, ex:
working_dir = "$HOME"
try:
python_interpreter = resource_config["python_interpreter"]
except Exception, ex:
python_interpreter = None
try:
access_schemas = resource_config["schemas"]
except Exception, ex:
access_schemas = ['n/a']
resources_rst.write("{0}\n".format(host_key.upper()))
resources_rst.write("{0}\n\n".format("*"*len(host_key)))
resources_rst.write("{0}\n\n".format(resource_config["description"]))
resources_rst.write("* **Resource label** : ``{0}``\n".format(resource_key))
resources_rst.write("* **Raw config** : :download:`{0} <../../src/radical/pilot/configs/{0}>`\n".format(config))
if resource_config["notes"] != "None":
resources_rst.write("* **Note** : {0}\n".format(resource_config["notes"]))
resources_rst.write("* **Default values** for ComputePilotDescription attributes:\n\n")
resources_rst.write(" * ``queue : {0}``\n".format(default_queue))
resources_rst.write(" * ``sandbox : {0}``\n".format(working_dir))
resources_rst.write(" * ``access_schema : {0}``\n\n".format(access_schemas[0]))
resources_rst.write("* **Available schemas** : ``{0}``\n".format(', '.join(access_schemas)))
resources_rst.write("\n")
##
################################################################################
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src/'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks']
[extensions]
todo_include_todos=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
rst_epilog = """
"""
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'RADICAL-Pilot'
copyright = u'2014, The RADICAL Group at Rutgers University'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
try:
import radical.pilot
version = radical.pilot.version
release = radical.pilot.version
except Exception as e:
print 'Could not determine version: %s' % e
version = "UNKNOWN"
release = "UNKNOWN"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_themes"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
extlinks = {'issue': ('https://github.com/radical-cybertools/radical.pilot/issues/%s',
'issue ')}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = "armstrong"
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"collapsiblesidebar" : "true",
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = 'images/logo.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'radical.pilot.doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'RADICAL-Pilot.tex', u'RADICAL-Pilot Documentation',
u'The RADICAL Group at Rutgers University', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'images/logo.jpg'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
latex_show_pagerefs = True
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'radical.pilot', u'RADICAL-Pilot Documentation',
[u'The RADICAL Group at Rutgers University'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'RADICAL-Pilot', u'RADICAL-Pilot Documentation',
u'The RADICAL Group at Rutgers University', 'RADICAL-Pilot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members'] #, 'undoc-members', 'show-inheritance']
|
swangui/ggrid
|
foobar.py
|
import os
import webapp2
from mako.template import Template
from mako.lookup import TemplateLookup
class MainHandler(webapp2.RequestHandler):
def get(self):
template_values = {
'some_foo': 'foo',
'some_bar': 'bar'
}
# the template file in our GAE app directory
path = os.path.join(os.path.dirname(__file__), 'templates/foobar.tmpl')
# make a new template instance
templ = Template(filename=path)
# unpack the dictionary to become keyword arguments and render
self.response.out.write(templ.render(**template_values))
app = webapp2.WSGIApplication([('/foobar', MainHandler)], debug=True)
|
glwagner/py2Periodic
|
tests/twoLayerQG/testTwoLayerQG.py
|
import time, sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../')
from py2Periodic.physics import twoLayerQG
from numpy import pi
params = {
'f0' : 1.0e-4,
'Lx' : 1.0e6,
'beta' : 1.5e-11,
'defRadius' : 1.5e4,
'H1' : 500.0,
'H2' : 2000.0,
'U1' : 2.5e-2,
'U2' : 0.0,
'bottomDrag' : 1.0e-7,
'nx' : 128,
'dt' : 1.0e3,
'visc' : 2.0e8,
'viscOrder' : 4.0,
'timeStepper': 'AB3',
'nThreads' : 4,
'useFilter' : False,
}
# Create the two-layer model
qg = twoLayerQG.model(**params)
qg.describe_model()
# Initial condition:
Ro = 1.0e-3
f0 = 1.0e-4
q1 = Ro*f0*np.random.standard_normal(qg.physVarShape)
q2 = Ro*f0*np.random.standard_normal(qg.physVarShape)
qg.set_q1_and_q2(q1, q2)
# Run a loop
nt = 1e3
for ii in np.arange(0, 1e3):
qg.step_nSteps(nSteps=nt, dnLog=nt)
qg.update_state_variables()
fig = plt.figure('Perturbation vorticity', figsize=(8, 8)); plt.clf()
plt.subplot(221); plt.imshow(qg.q1)
plt.subplot(222); plt.imshow(qg.q2)
plt.subplot(223); plt.imshow(np.abs(qg.soln[0:qg.ny//2, :, 0]))
plt.subplot(224); plt.imshow(np.abs(qg.soln[0:qg.ny//2, :, 1]))
plt.pause(0.01), plt.draw()
print("Close the plot to end the program")
plt.show()
|
reviewboard/rbpkg
|
rbpkg/package_manager/tests/test_dep_graph.py
|
from __future__ import unicode_literals
from rbpkg.package_manager.dep_graph import DependencyGraph
from rbpkg.testing.testcases import TestCase
class DependencyGraphTests(TestCase):
"""Unit tests for rbpkg.package_manager.dep_graph.DependencyGraph."""
def test_iter_sorted_simple(self):
"""Testing DependencyGraph.iter_sorted in simple case"""
graph = DependencyGraph()
graph.add(3, [2])
graph.add(2, [1])
graph.add(1, [])
self.assertEqual(list(graph.iter_sorted()), [1, 2, 3])
def test_iter_sorted_complex(self):
"""Testing DependencyGraph.iter_sorted with complex dependencies"""
graph = DependencyGraph()
graph.add(5, [9])
graph.add(12, [9, 6, 15])
graph.add(15, [9, 2])
graph.add(9, [14, 20])
graph.add(6, [14, 2])
self.assertEqual(list(graph.iter_sorted()),
[14, 20, 9, 5, 2, 6, 15, 12])
def test_iter_sorted_circular_ref(self):
"""Testing DependencyGraph.iter_sorted with circular reference"""
graph = DependencyGraph()
graph.add(1, [2])
graph.add(2, [1])
self.assertEqual(list(graph.iter_sorted()), [2, 1])
|
mvanveen/cargo
|
cargo/dock.py
|
import docker
from cargo.container import Container
from cargo.image import Image
# this is a hack to get `__getattribute__` working for a few reserved properties
RESERVED_METHODS = ['containers', '_client', 'images', 'info', 'start', 'stop']
class Dock(object):
"""Wrapper class for `docker-py` Client instances"""
def __init__(self, *args, **kw):
super(Dock, self).__init__()
self._client = docker.Client(*args, **kw)
def __repr__(self):
return '<Dock [%s] (%s)>' % (self.base_url, self.version().get('Version'))
def __getattribute__(self, x):
client = super(Dock, self).__getattribute__('_client')
# return client attribute if not a magic method or reserved attr
legal = not x.startswith('_') and not(x in RESERVED_METHODS)
if hasattr(client, x) and legal:
return client.__getattribute__(x)
return super(Dock, self).__getattribute__(x)
@property
def containers(self, *args, **kw):
return [Container(x) for x in self._client.containers(*args, **kw)]
@property
def _containers(self, *args, **kw):
return [x for x in self._client.containers(*args, **kw)]
@property
def images(self, *args, **kw):
return [Image(x) for x in self._client.images(*args, **kw)]
@property
def _images(self, *args, **kw):
return [x for x in self._client.images(*args, **kw)]
@property
def info(self):
return self._client.info()
@property
def total_num_containers(self):
info = self.info
return int(info.get('Containers'))
@property
def total_num_images(self):
info = self.info
return int(info.get('Images'))
@property
def total_num_goroutines(self):
info = self.info
return int(info.get('NGoroutines'))
@property
def memory_limit(self):
info = self.info
return info.get('MemoryLimit')
@property
def debug(self):
info = self.info
return info.get('Debug')
def running(self, container):
"""Returns True if dock is running container, else False
Accepts container id's and Container objects
"""
container_ids = [x.container_id for x in self.containers]
if isinstance(container, Container):
return container.container_id in containder_ids
elif isinstance(container, basestring):
return container in container_ids
raise TypeError('expected container id as string or Container object.')
def start(self, container, *args, **kw):
if isinstance(container, Container):
cid = container.container_id
elif isinstance(container, basestring):
cid = container
return self._client.start(cid, *args, **kw)
def stop(self, container, *args, **kw):
if isinstance(container, Container):
cid = container.container_id
elif isinstance(container, basestring):
cid = container
return self._client.stop(cid, *args, **kw)
|
artnez/faceoff
|
faceoff/helpers/decorators.py
|
"""
Copyright: (c) 2012-2014 Artem Nezvigin <artem@artnez.com>
License: MIT, see LICENSE for details
"""
from functools import wraps
from flask import g, request, session, render_template, url_for, redirect
from faceoff.models.user import find_user
def templated(template_name=None):
"""
Automatically renders a template named after the current endpoint. Will
also render the name provided if given.
"""
def closure(f):
@wraps(f)
def decorator(*args, **kwargs):
template = template_name
response = f(*args, **kwargs)
if response is None:
response = {}
elif not isinstance(response, dict):
return response
if template is None:
template = '%s.html' % request.endpoint
return render_template(template, **response)
return decorator
return closure
def authenticated(f):
"""
Asserts that an existing logged-in user session is active. If not,
redirects to the authenticate gate.
"""
@wraps(f)
def decorator(*args, **kwargs):
user_id = session.get('user_id')
if user_id is None:
return redirect(url_for('gate'))
user = find_user(id=user_id)
if user is None:
return redirect(url_for('gate'))
g.current_user = user
return f(*args, **kwargs)
return decorator
|
holmes-app/holmes-alf
|
holmesalf/wrapper.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of holmesalf.
# https://github.com/holmes-app/holmes-alf
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2014 Pablo Aguiar scorphus@gmail.com
from holmesalf import BaseAuthNZWrapper
from alf.client import Client as AlfSyncClient
from tornadoalf.client import Client as AlfAsyncClient
class AlfAuthNZWrapper(BaseAuthNZWrapper):
"""This class gathers authentication and authorization
for some of the services used by Holmes"""
def __init__(self, config):
self.config = config
self._sync_client = None
self._async_client = None
@property
def sync_client(self):
"""Synchronous OAuth 2.0 Bearer client"""
if not self._sync_client:
self._sync_client = AlfSyncClient(
token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'),
client_id=self.config.get('OAUTH_CLIENT_ID'),
client_secret=self.config.get('OAUTH_CLIENT_SECRET')
)
return self._sync_client
@property
def async_client(self):
"""Asynchronous OAuth 2.0 Bearer client"""
if not self._async_client:
self._async_client = AlfAsyncClient(
token_endpoint=self.config.get('OAUTH_TOKEN_ENDPOINT'),
client_id=self.config.get('OAUTH_CLIENT_ID'),
client_secret=self.config.get('OAUTH_CLIENT_SECRET')
)
return self._async_client
|
alexwaters/python-readability-api
|
examples/read-bookmarks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
read-bookmark.py
~~~~~~~~~~~~~~~~
This module is an example of how to harness the Readability API w/ oAuth.
This module expects the following environment variables to be set:
- READABILITY_CONSUMER_KEY
- READABILITY_CONSUMER_SECRET
- READABILITY_ACCESS_TOKEN
- READABILITY_ACCESS_SECRET
Once you have your consumer keys setup, run the following to get your
access tokens::
$ ./login-xauth.py <username> <password>
"""
import sys
from HTMLParser import HTMLParser
from ext import setup_rdd
class MLStripper(HTMLParser):
"""HTMLParser w/ overrides for stripping text out."""
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ' '.join(self.fed)
def strip_tags(html):
"""A super low-tech and debatably irresponsible attempt to turn HTML
into plain text."""
s = MLStripper()
s.feed(html)
data = s.get_data()
for s in ('\n\n\n\n\n', '\n\n\n\n', '\n\n\n', '\n', '\t'):
data = data.replace(s, '')
data = data.replace(' ', '')
return data
def main():
rdd = setup_rdd()
bookmarks = rdd.get_me().bookmarks(limit=10)
print 'Recent Bookmarks'
print '----------------\n'
for i, mark in enumerate(bookmarks):
print '%01d: %s (%s)' % (i, mark.article.title, mark.article.domain)
try:
selection = raw_input('\nRead Article (0-9)? ')
selection = int(selection)
assert (selection < 10) and (selection >= 0)
except (ValueError, AssertionError):
print >> sys.stderr, '\nEnter a number within 0-9, if you don\'t mind.'
except KeyboardInterrupt:
print >> sys.stderr, '\nWell, fine.'
sys.exit()
article = bookmarks[selection].article
article = rdd.get_article(article.id)
print article.title
print '-' * len(article.title) + '\n'
print strip_tags(article.content)
if __name__ == '__main__':
main()
|
Fizzadar/pyinfra
|
tests/test_api/test_api.py
|
from unittest import TestCase
from paramiko import SSHException
from pyinfra.api import Config, State
from pyinfra.api.connect import connect_all
from pyinfra.api.exceptions import NoGroupError, NoHostError, PyinfraError
from ..paramiko_util import PatchSSHTestCase
from ..util import make_inventory
class TestInventoryApi(TestCase):
def test_inventory_creation(self):
inventory = make_inventory()
# Check length
assert len(inventory.hosts) == 2
# Get a host
host = inventory.get_host('somehost')
assert host.data.ssh_user == 'vagrant'
# Check our group data
assert inventory.get_group_data('test_group') == {
'group_data': 'hello world',
}
def test_tuple_host_group_inventory_creation(self):
inventory = make_inventory(
hosts=[
('somehost', {'some_data': 'hello'}),
],
tuple_group=([
('somehost', {'another_data': 'world'}),
], {
'tuple_group_data': 'word',
}),
)
# Check host data
host = inventory.get_host('somehost')
assert host.data.some_data == 'hello'
assert host.data.another_data == 'world'
# Check group data
assert host.data.tuple_group_data == 'word'
def test_host_and_group_errors(self):
inventory = make_inventory()
with self.assertRaises(NoHostError):
inventory.get_host('i-dont-exist')
with self.assertRaises(NoGroupError):
inventory.get_group('i-dont-exist')
class TestStateApi(PatchSSHTestCase):
def test_fail_percent(self):
inventory = make_inventory((
'somehost',
('thinghost', {'ssh_hostname': SSHException}),
'anotherhost',
))
state = State(inventory, Config(FAIL_PERCENT=1))
# Ensure we would fail at this point
with self.assertRaises(PyinfraError) as context:
connect_all(state)
assert context.exception.args[0] == 'Over 1% of hosts failed (33%)'
# Ensure the other two did connect
assert len(state.active_hosts) == 2
|
sigopt/sigopt-python
|
test/cli/test_cli_config.py
|
import click
import mock
import pytest
from click.testing import CliRunner
from sigopt.cli import cli
class TestRunCli(object):
@pytest.mark.parametrize('opt_into_log_collection', [False, True])
@pytest.mark.parametrize('opt_into_cell_tracking', [False, True])
def test_config_command(self, opt_into_log_collection, opt_into_cell_tracking):
runner = CliRunner()
log_collection_arg = '--enable-log-collection' if opt_into_log_collection else '--no-enable-log-collection'
cell_tracking_arg = '--enable-cell-tracking' if opt_into_cell_tracking else '--no-enable-cell-tracking'
with mock.patch('sigopt.cli.commands.config._config.persist_configuration_options') as persist_configuration_options:
result = runner.invoke(cli, [
'config',
'--api-token=some_test_token',
log_collection_arg,
cell_tracking_arg,
])
persist_configuration_options.assert_called_once_with({
'api_token': 'some_test_token',
'code_tracking_enabled': opt_into_cell_tracking,
'log_collection_enabled': opt_into_log_collection,
})
assert result.exit_code == 0
assert result.output == ''
|
cmancone/ezgal
|
tests/zf_grid/test_get_rest_mags.py
|
import unittest
import ezgal.zf_grid
import numpy as np
import math
# I put the test data for the zf_grid tests in
# tests.zf_grid instead of in tests because
# there is a lot of data but it is all
# specific for this test.
import tests.zf_grid
class test_get_rest_mags(tests.zf_grid.test_zf_grid):
def test_get_rest_mags(self):
self.assertTrue(np.allclose(
self.zf_grid.get_rest_mags(tests.zf_grid.test_zs),
[0.275, 0.75, 1.25, 1.75, 2.25, 2.778], 1e-4))
def test_get_rest_mags_lower_bound(self):
# if we go lower than our lowest grided z then
# we should get a nan
vals = self.zf_grid.get_rest_mags([-1])
self.assertTrue(math.isnan(vals[0]))
def test_get_rest_mags_upper_bound(self):
# if we go lower than our lowest grided z then
# we should get a nan
vals = self.zf_grid.get_rest_mags([4])
self.assertTrue(math.isnan(vals[0]))
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.