text stringlengths 0 1.05M | meta dict |
|---|---|
# A small SOCP formulation of minimizing the p-norm of a vector subject to
# constraints (in this trivial case, maximize the two-norm subject to the
# entire vector being fixed). This example originally caused the ninth SOCP
# IPM KKT system to be excessively ill-conditioned and was reported by
# Steven Diamond via CVXPY. The original script was:
#
# from cvxpy import *
# x = Variable(3)
# prob = Problem(Maximize(pnorm(x,.5)),[x==[1.1,2,.1]])
# prob.solve(verbose=True, solver=ELEMENTAL)
#
# which should have a solution of roughly 7.72423. The problem data used
# in the following script can be found by then running
#
# data = prob.get_problem_data(ELEMENTAL)
# print( data['A'] )
# print( data['G'] )
# print( data['b'] )
# print( data['c'] )
# print( data['h'] )
#
# which has three SOCs of size three.
import El
El.Initialize()
orders = El.DistMultiVec(El.iTag)
firstInds = El.DistMultiVec(El.iTag)
labels = El.DistMultiVec(El.iTag)
orders.Resize(9,1)
firstInds.Resize(9,1)
labels.Resize(9,1)
for c in xrange(0,3):
for i in xrange(0,3):
orders.Set(3*c+i,0,3)
firstInds.Set(3*c+i,0,3*c)
labels.Set(3*c+i,0,c)
A = El.DistSparseMatrix(El.dTag)
A.Resize(4,7)
A.Reserve(7)
A.QueueUpdate( 1, 0, 1 )
A.QueueUpdate( 2, 1, 1 )
A.QueueUpdate( 3, 2, 1 )
A.QueueUpdate( 0, 3, -1 )
A.QueueUpdate( 0, 4, 1 )
A.QueueUpdate( 0, 5, 1 )
A.QueueUpdate( 0, 6, 1 )
A.ProcessLocalQueues()
G = El.DistSparseMatrix(El.dTag)
G.Resize(9,7)
G.Reserve(15)
G.QueueUpdate( 0, 0, -1 )
G.QueueUpdate( 1, 0, -1 )
G.QueueUpdate( 3, 1, -1 )
G.QueueUpdate( 4, 1, -1 )
G.QueueUpdate( 6, 2, -1 )
G.QueueUpdate( 7, 2, -1 )
G.QueueUpdate( 0, 3, -1 )
G.QueueUpdate( 1, 3, 1 )
G.QueueUpdate( 3, 3, -1 )
G.QueueUpdate( 4, 3, 1 )
G.QueueUpdate( 6, 3, -1 )
G.QueueUpdate( 7, 3, 1 )
G.QueueUpdate( 2, 4, -2 )
G.QueueUpdate( 5, 5, -2 )
G.QueueUpdate( 8, 6, -2 )
G.ProcessLocalQueues()
b = El.DistMultiVec(El.dTag)
b.Resize(4,1)
b.Set(0,0,-0.0)
b.Set(1,0, 1.1)
b.Set(2,0, 2.0)
b.Set(3,0, 0.1)
c = El.DistMultiVec(El.dTag)
c.Resize(7,1)
c.Set(0,0, 0)
c.Set(1,0, 0)
c.Set(2,0, 0)
c.Set(3,0,-1)
c.Set(4,0, 0)
c.Set(5,0, 0)
c.Set(6,0, 0)
h = El.DistMultiVec(El.dTag)
El.Zeros( h, 9, 1 )
x = El.DistMultiVec()
y = El.DistMultiVec()
z = El.DistMultiVec()
s = El.DistMultiVec()
ctrl = El.SOCPAffineCtrl_d()
ctrl.mehrotraCtrl.qsdCtrl.progress = True
ctrl.mehrotraCtrl.progress = True
ctrl.mehrotraCtrl.outerEquil = True
ctrl.mehrotraCtrl.time = True
El.SOCPAffine(A,G,b,c,h,orders,firstInds,labels,x,y,z,s,ctrl)
El.Finalize()
| {
"repo_name": "justusc/Elemental",
"path": "examples/interface/PNorm.py",
"copies": "1",
"size": "2536",
"license": "bsd-3-clause",
"hash": 3366034296351202000,
"line_mean": 23.862745098,
"line_max": 75,
"alpha_frac": 0.6612776025,
"autogenerated": false,
"ratio": 2.2245614035087717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8172738236868978,
"avg_score": 0.042620153827958675,
"num_lines": 102
} |
# A small, stupid benchmark for friendlydb.
#
# v2.0.0
# ------
# Generating 10000 took 0.11811709404
# Building 1000000 relations took 178.619861126
# Checking 1000 users followers took 1.66441106796
# mean: 0.00166352272034
# min: 0.00112891197205
# max: 0.00374412536621
#
# v0.3.1
# ------
#
# Generating 10000 took 0.103417158127
# Building 1000000 relations took 455.301289082
# Checking 1000 users followers took 0.428857803345
# mean: 0.000428096532822
# min: 0.000310897827148
# max: 0.000933885574341
#
# v0.2.1
# ------
#
# Generating 10000 took 0.106270074844
# Building 1000000 relations took 439.715919018
# Checking 1000 users followers took 0.83282494545
# mean: 0.000831272602081
# min: 0.000524997711182
# max: 0.000524997711182 # FAIL - fixed in v0.3.1
#
from __future__ import print_function
import random
import time
from friendlydb.db import FriendlyDB
# Config.
all_chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
host = 'localhost'
port = 6379
db = 15
user_count = 10000
relation_count = 1000000
followers_check = 1000
# Go go go!
chars = [char for char in all_chars]
users = []
fdb = FriendlyDB(host=host, port=port, db=db)
def time_taken(func):
start = time.time()
func()
return time.time() - start
def generate_users():
# Generate enough usernames to matter.
for i in range(0, user_count):
users.append(''.join(random.sample(chars, random.randint(6, 12))))
def build_relations():
for i in range(0, relation_count):
username_1 = random.choice(users)
username_2 = random.choice(users)
user = fdb[username_1]
if random.randint(0, 4) < 3:
user.follow(username_2)
else:
user.unfollow(username_2)
def check_followers():
times = []
results = {}
overall_start = time.time()
for i in range(0, followers_check):
start = time.time()
user = fdb[random.choice(users)]
user.followers()
end = time.time()
times.append(end - start)
overall_end = time.time()
results['overall_time'] = overall_end - overall_start
# Calculate mean/min/max.
results['mean_time'] = sum(times) / len(times)
results['min_time'] = min(times)
results['max_time'] = max(times)
return results
if __name__ == '__main__':
fdb.clear()
print('Running benchmark...')
print(' User Count: %s' % user_count)
print(' Relation Count: %s' % relation_count)
print(' Followers Check Count: %s' % followers_check)
print('')
print('')
print('')
print('Generating users...')
print("Generating %s took %s" % (user_count, time_taken(generate_users)))
print('')
print('Building relations...')
print("Building %s relations took %s" % (relation_count, time_taken(build_relations)))
print('')
print('Checking followers...')
results = check_followers()
print("Checking %s users followers took %s" % (followers_check, results['overall_time']))
print(" mean: %s" % results['mean_time'])
print(" min: %s" % results['min_time'])
print(" max: %s" % results['max_time'])
fdb.clear()
| {
"repo_name": "toastdriven/friendlydb",
"path": "benchmark.py",
"copies": "1",
"size": "3150",
"license": "bsd-3-clause",
"hash": -4462421401826307000,
"line_mean": 23.609375,
"line_max": 93,
"alpha_frac": 0.6406349206,
"autogenerated": false,
"ratio": 3.237410071942446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9377200175790049,
"avg_score": 0.00016896335047930793,
"num_lines": 128
} |
"""A small tweeter bot."""
from twitter import Twitter, OAuth
from random import choice
from config import consumer_key, consumer_secret, \
access_token, access_token_secret
adjective = "adjective.txt"
animal = "animal.txt"
friend = "friend.txt"
def adjectiveanimalfriend():
with open(adjective) as adjective_file:
with open(animal) as animal_file:
with open(friend) as friend_file:
adjective_list = adjective_file.readlines()
animal_list = animal_file.readlines()
friend_list = friend_file.readlines()
return "{} {} {}".format(choice(adjective_list).rstrip(),
choice(animal_list).rstrip(),
choice(friend_list).rstrip())
def main():
t = Twitter(auth=OAuth(access_token, access_token_secret, consumer_key,
consumer_secret))
t.statuses.update(status=adjectiveanimalfriend())
if __name__ == "__main__":
main()
| {
"repo_name": "fennekki/adjectiveanimalfriend",
"path": "main.py",
"copies": "1",
"size": "1029",
"license": "unlicense",
"hash": -7042552571555963000,
"line_mean": 30.1818181818,
"line_max": 75,
"alpha_frac": 0.5850340136,
"autogenerated": false,
"ratio": 4.051181102362205,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5136215115962205,
"avg_score": null,
"num_lines": null
} |
"""A small wrapper to grab the data from the pubg website
more functions and features will be added as pubg adds more API calls"""
import json
import requests
import constants
class APIException(Exception):
"""Generic exception class for raising errors"""
pass
class PUBGAPI:
"""Object that will represent the player unknown tracker api"""
def __init__(self, api_key, platform='pc'):
self.api_key = api_key
self.platform = platform
self.pubg_url = "https://pubgtracker.com/api/profile/{}/".format(self.platform)
self.pubg_url_steam = "https://pubgtracker.com/api/search?steamId={}/"
self.headers = {
'content-type': "application/json",
'trn-api-key': api_key,
}
def _get_player_profile(self, player_handle):
"""Returns pubg player profile from PUBG api, no filtering
:param player_handle: player PUBG profile name
:type player_handle: str
:return: return json from PUBG API
:rtype: dict
"""
url = self.pubg_url + player_handle
response = requests.request("GET", url, headers=self.headers)
data = json.loads(response.text)
return data
def player(self, player_handle):
"""Returns the full set of data on a player, no filtering"""
try:
url = self.pubg_url + player_handle
response = requests.request("GET", url, headers=self.headers)
return json.loads(response.text)
except BaseException as error:
print('Unhandled exception: ' + str(error))
raise
def player_s(self, sid) :
"""Returns the full set of data on a player, no filtering"""
try:
url = self.pubg_url_steam.format(str(sid))
response = requests.request("GET", url, headers=self.headers)
return json.loads(response.text)
except BaseException as error:
print('Unhandled exception: ' + str(error))
raise
def player_mode_stats(self, player_handle, game_mode=constants.GAME_MODE_WILDCARD, game_region=constants.GAME_REGION_WILDCARD):
"""Returns the stats for a particular mode of play,
accepts solo, duo and squad. Will return both regional
and global stats. Default gamemode is solo
by Zac: Add parameter game_region to extract player stats by region directly
"""
if game_mode not in constants.GAME_MODES:
raise APIException("game_mode must be one of: solo, duo, squad, all")
if game_region not in constants.GAME_REGIONS:
raise APIException("game_region must be one of: as, na, agg, sea, eu, oc, sa, all")
try:
data = self._get_player_profile(player_handle)
data = self._filter_gameplay_stats(data, game_mode, game_region)
return data
except BaseException as error:
print('Unhandled exception: ' + str(error))
raise
def _filter_gameplay_stats(self, data, game_mode, game_region):
"""Returns gameplay stats that are filtered by game_mode and game_region.
:param data: Json of gameplay stats.
:type data: dict
:param game_mode: Target game mode.
:type game_mode: str
:param game_region: Target game region.
:type game_region: str
:return: return list of gameplay stats with target game mode and region.
:rtype: list
"""
return_data = []
for stat in data['Stats']:
if self._is_target_game_mode(stat, game_mode) and self._is_target_region(stat, game_region):
return_data.append(stat)
return return_data
def _is_target_game_mode(self, stat, game_mode):
"""Returns if the stat matches target game mode.
:param stat: Json of gameplay stat.
:type stat: dict
:param game_mode: Target game mode.
:type game_mode: str
:return: return does the stat matches target game mode.
:rtype: bool
"""
if game_mode == constants.GAME_MODE_WILDCARD:
return True
return stat['Match'] == game_mode
def _is_target_region(self, stat, game_region):
"""Returns if the stat matches target game region.
:param stat: Json of gameplay stat.
:type stat: dict
:param game_region: Target game region.
:type game_region: str
:return: return does the stat matches target game region.
:rtype: bool
"""
if game_region == constants.GAME_REGION_WILDCARD:
return True
return stat['Region'] == game_region
def player_skill(self, player_handle, game_mode='solo'):
"""Returns the current skill rating of the player for a specified gamemode,
default gamemode is solo"""
if game_mode not in constants.GAME_MODES:
raise APIException("game_mode must be one of: solo, duo, squad, all")
try:
data = self._get_player_profile(player_handle)
player_stats = {}
return_data = []
for stat in data['Stats']:
if stat['Match'] == game_mode:
for datas in stat['Stats']:
if datas['label'] == 'Rating':
player_stats[stat['Region']] = datas['value']
return player_stats
except BaseException as error:
print('Unhandled exception: ' + str(error))
raise
| {
"repo_name": "lbrictson/pypubg",
"path": "pypubg/core.py",
"copies": "1",
"size": "5759",
"license": "mit",
"hash": 79198762550422000,
"line_mean": 39.4316546763,
"line_max": 131,
"alpha_frac": 0.5750998437,
"autogenerated": false,
"ratio": 4.262768319763138,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012082022866147391,
"num_lines": 139
} |
"""A smart choices interface for Django apps."""
__all__ = ['Choice', 'Choices']
def _smart_namify(name):
"""Translate the name into a more user friendly name."""
name = name.replace('_', ' ').title()
return name
class Choice(object):
"""A representation of a single choice."""
order = 0
def __init__(self, value=None, name=None):
if value:
self.value = Choice.order = value
else:
self.value = Choice.order
Choice.order += 1
self.name = name
class ChoicesMeta(type):
"""A metaclass for Choices."""
def __getitem__(self, key):
"""Returns the value of the choice by its human-readable name.
For example:
JayZProblems['FOES_THAT_WANNA_MAKE_SURE_MY_CASKETS_CLOSED']
will return 0.
"""
try:
choice_tuple_match = filter(lambda x: x[1] == key, self.choices)
# choice_tuple_match will be something like ((0, 'book'))
# and we only want the integer value in the tuple.
return choice_tuple_match[0][0]
except IndexError:
raise KeyError(key)
def __new__(cls, name, bases, attrs):
Choice.order = 0 # Reset Choice.order for every new Choices class.
choices = []
# Normally, we'd want to iterate through the base classes. We don't, so Choices
# won't support inheritance. Since we control the implementation, we can be
# naive about this.
fields = {}
for key, val in attrs.iteritems():
if isinstance(val, Choice):
fields[key] = val
for key, val in fields.iteritems():
if isinstance(val, Choice):
if val.name:
choices.append((val.value, val.name))
else:
Meta = attrs.get('Meta')
if Meta and Meta.smart_names:
choices.append((val.value, _smart_namify(key)))
else:
choices.append((val.value, key))
attrs[key] = val.value
choices = sorted(choices, key=lambda x: x[0])
attrs['choices'] = tuple(choices)
return super(ChoicesMeta, cls).__new__(cls, name, bases, attrs)
class Choices(object):
"""A representation of a collection of Choices."""
__metaclass__ = ChoicesMeta
class Meta:
smart_names = False
| {
"repo_name": "storybird/smartchoices",
"path": "smartchoices.py",
"copies": "1",
"size": "2441",
"license": "mit",
"hash": -7826773288257334000,
"line_mean": 30.2948717949,
"line_max": 87,
"alpha_frac": 0.5497746825,
"autogenerated": false,
"ratio": 4.208620689655173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5258395372155172,
"avg_score": null,
"num_lines": null
} |
"""A SmartInspect Python Library.
Port of SmartInspect.pas (from SmartInspect Professional v2.3.3.7025)
NOTE: Many things in here have not yet been tested at all. This code is alpha
quality in a very real sense. You have been warned.
Not all the features have been implemented yet. In particular, the file-based
protocols are missing.
Other Compatibility Notes:
* Identifers have been changed to conform with the Python style guide. In
some instances it made sense to slightly modify implementations, say
using a property instead of a simple get-method.
* Due to the dynamic nature of Python the number of Log*, Watch* etc.
methods could be drastically reduced. In particular, we are taking
advantage of Python's optional keyword arguments, simple string
formatting and loose-typing.
* The events of the ``SmartInspect`` class (OnWatch, OnLogEntry, ...) do
not exist. If needed, implementing such callbacks could be considered.
* OnError has also been removed and deserves a special mention - instead,
exceptions raised in methods like ``load_connections`` are passed through
to the caller directly.
* While the ``ViewerContext`` class exists to allow custom contexts, most
of it's default subclasses are not implemented, as it is unclear what
they are good for - their respective log functions seem to do their own
view formatting.
* Instead of the ``BufferedStream`` class we use the builtin buffering of
Pythons ``open()`` function, but apparently whether it works or not
depends partly on the underlying OS implementation (see the comments on
(fixed) the Python Bug #603724).
* The High-resolution clock is not supported on any platform.
* ``TrackMethod()`` is replaced by Python decorators (hurray for
first-class functions!)
* Comments are largely missing so far.
"""
from __future__ import with_statement
import os, sys
import re
import socket
import struct
import threading, thread
import datetime, time
import StringIO
try:
from pyutils.xtypes import ValueEnum as Enum
except:
from __enum import ValueEnum as Enum
__all__ = (
'SmartInspect', 'Session',
'Level',
)
################################################################################
## Global constants.
################################################################################
VERSION = '0.01 for 2.3.3.7025'
TCP_CLIENT_BANNER = 'SmartInspect Python Library v' + VERSION;
DEFAULT_COLOR = (0x05, 0x00, 0x00, 0xff,) # clWindow / COLOR_WINDOW
MAGIC_LOG_STRING = 'SILF'
DEFAULT_TEXT_PATTERN = '[%timestamp%] %level%: %title%'
CAPTION_NOT_FOUND_MSG = 'No protocol could be found with the specified caption'
SOCKET_CLOSED_MSG = 'Connection has been unexpectedly closed'
INVALID_SOCKET_MSG = 'Invalid socket handle'
CONNECTION_ESTABLISHED_MSG = 'Connection already established'
TIMEOUT_MSG = 'Timed out while trying to connect'
NO_CONNECTION_MSG = 'No socket connection established'
PROTOCOL_CONNECTED_MSG = 'This protocol is currently connected. ' +\
'Please disconnect before you change any ' +\
'protocol specific options.'
################################################################################
## Exception classes.
################################################################################
class SmartInspectError(Exception): pass
class LoadConnectionsError(SmartInspectError):
def __init__(self, filename):
self.filename = filename
class LoadConfigurationsError(SmartInspectError):
def __init__(self, filename):
self.filename = filename
class ProtocolError(SmartInspectError):
"""Error raised in Protocol-related code.
References the ``Protocol`` instance directly, instead of storing
it's name and options string, as in the Delphi implementation.
"""
def __init__(self, message, protocol, *args, **kwargs):
self.protocol = protocol
super(ProtocolError, self).__init__(message, *args, **kwargs)
class InvalidConnectionsError(SmartInspectError): pass
################################################################################
## Enumerations.
################################################################################
class PacketType(Enum):
"""IDs as expected by SI console."""
LogEntry = 4
ControlCommand = 1
Watch = 5
ProcessFlow = 6
class LogEntryType(Enum):
"""IDs as expected by SI console."""
Separator = 0
EnterMethod = 1
LeaveMethod = 2
ResetCallstack = 3
Message = 100
Warning = 101
Error = 102
InternalError = 103
Comment = 104
VariableValue = 105
Checkpoint = 106
Debug = 107
Verbose = 108
Fatal = 109
Conditional = 110
Assert = 111
Text = 200
Binary = 201
Graphic = 202
Source = 203
Object = 204
WebContent = 205
System = 206
MemoryStatistic = 207
DatabaseResult = 208
DatabaseStructure = 209
class ViewerId(Enum):
"""IDs as expected by SI console."""
None_ = -1
Title = 0
Data = 1
List = 2
ValueList = 3
Inspector = 4
Table = 5
Web = 100
Binary = 200
HtmlSource = 300
JavaScriptSource = 301
VbScriptSource = 302
PerlSource = 303
SqlSource = 304
IniSource = 305
PythonSource = 206
XmlSource = 307
Bitmap = 400
Jpeg = 401
Icon = 402
Metafile = 403
class SourceId(Enum):
Html = 1
Javascript = 2
VbScript = 3
Perl = 4
Sql = 5
Ini = 6
Python = 7
Xml = 8
class GraphicId(Enum):
Bitmap = 0
Jpeg = 1
Icon = 2
Metafile = 3
class ControlCommandType(Enum):
ClearLog = 0
ClearWatches = 1
ClearAutoViews = 2
ClearAll = 3
ClearProcessFlow = 4
class WatchType(Enum):
Char = 0
String = 1
Integer = 2
Float = 3
Boolean = 4
Address = 5
Timestamp = 6
Object = 7
class ProcessFlowType(Enum):
EnterMethod = 0
LeaveMethod = 1
EnterThread = 2
LeaveThread = 3
EnterProcess = 4
LeaveProcess = 5
class Level(Enum):
Debug = 0
Verbose = 1
Message = 2
Warning = 3
Error = 4
Fatal = 5
Control = 6
class FileRotate(Enum):
None_ = 0
Hourly = 1
Daily = 2
Weekly = 3
Monthly = 4
################################################################################
## Core classes (packet & packet types).
################################################################################
class Packet(object):
"""Base class for all packets."""
def __init__(self):
self.level = Level.Message
self._data = StringIO.StringIO()
def _get_size(self):
raise NotImplementedError()
size = property(lambda s: s._get_size())
def _get_packet_type(self):
raise NotImplementedError()
packet_type = property(lambda s: s._get_packet_type())
def _get_data(self):
return self._data
def _set_data(self, value):
if value:
self._data = copy(value)
else:
self._data.truncate(size=0)
data = property(_get_data, _set_data)
@property
def has_data(self):
return self._data and self._data.len > 0
class PacketQueue(object):
"""A queue of packet objects with auto byte-resizing functionality."""
def __init__(self, backlog=None):
self._data = []
self._backlog = backlog # maximum size
self._size = 0
def _set_backlog(self, value):
self._backlog = value
self._resize()
backlog = property(lambda s: s._backlog, _set_backlog)
def _resize(self):
while self._backlog and self._size > self._backlog:
self.pop()
def clear(self):
while self._data:
self.pop()
def push(self, packet):
self._data.append(packet)
self._size += packet.size
self._resize()
def pop(self):
if len(self._data):
p = self._data.pop(0)
self._size -= p.size
return p
else:
return None
class LogEntry(Packet):
def __init__(self, log_entry_type, viewer_id):
super(LogEntry, self).__init__()
self._data = StringIO.StringIO()
self.log_entry_type = log_entry_type
self.viewer_id = viewer_id
self.color = DEFAULT_COLOR
self.thread_id = thread.get_ident()
self.process_id = os.getpid()
def _get_size(self):
result = (48 + # header
len(self.session_name) +
len(self.title) +
len(self.appname) +
len(self.hostname))
if self.has_data:
result += self.data.len
return result
def _get_packet_type(self):
return PacketType.LogEntry
class ControlCommand(Packet):
def __init__(self, control_command_type):
super(ControlCommand, self).__init__()
self.control_command_type = control_command_type
self.level = Level.Control
def _get_size(self):
result = 8 # header
if self.has_data:
result += self.data.len
return result
def _get_packet_type(self):
return PacketType.ControlCommand
class Watch(Packet):
def __init__(self, watch_type):
super(Watch, self).__init__()
self.watch_type = watch_type
def _get_size(self):
result = (20 + # header
len(self.name) +
len(self.value))
return result
def _get_packet_type(self):
return PacketType.Watch
class ProcessFlow(Packet):
def __init__(self, process_flow_type):
super(ProcessFlow, self).__init__()
self.process_flow_type = process_flow_type
self.thread_id = thread.get_ident()
self.process_id = os.getpid()
def _get_size(self):
result = (28 + # header
len(self.title) +
len(self.hostname))
return result
def _get_packet_type(self):
return PacketType.ProcessFlow
################################################################################
## Protocols and related classes.
################################################################################
class Formatter(object):
"""Formatters are responsible for formatting and writing a packet."""
def format(self, packet, stream):
self.compile(packet)
self.write(stream)
class BinaryFormatter(Formatter):
"""Stores log data in a fast binary format."""
max_capacity = 10 * 1024 * 1024
def __init__(self):
self._stream = StringIO.StringIO()
self._size = 0
def _reset_stream(self):
if self._size > BinaryFormatter.max_capacity:
# Reset the stream capacity if the previous packet
# was very big. This ensures that the amount of memory
# can shrink again after a big packet has been sent.
self._stream.truncate(0)
else:
# Only reset the position. This should ensure better
# performance since no reallocations are necessary.
self._stream.pos = 0
def _write_string(self, s):
if isinstance(s, unicode):
s = s.encode('utf-8')
self._stream.write(s)
def _write_long(self, i):
# store as Delphi Integer (32bit signed, little endian)
self._stream.write(struct.pack('l', i))
def _write_ulong(self, i):
# store as Delphi Cardinal (32bit unsigned, little endian)
self._stream.write(struct.pack('l', i))
def _write_word(self, i):
# store as Delphi Word (16bit unsigned, little endian)
self._stream.write(struct.pack('H', i))
def _write_datetime(self, d):
# Delphi TDatetime is 8-byte double:
# TDateTime := UnixTimestamp / SecsPerDay * UnixTimestamp(01/10/1070)
tdatetime = time.mktime(d.timetuple()) / 86400 + 25569.0
self._stream.write(struct.pack('d', tdatetime))
def _write_color(self, c):
# c is a 3 or 4-tuple; see module doc section on colors for more info
e = (list(c) + [0])[:4]
if len(e) != 4: raise SmartInspectError('Not a valid color: %s'%c)
self._write_string("".join(map(lambda n: struct.pack('B', n), e)))
def write(self, stream):
"""Writes a previously compiled packet to the supplied stream."""
if self._size > 0:
# hack: store target stream locally so we can use _write_* methods
__self_stream = self._stream
self._stream = stream
# write packet header to output
self._write_word(self._packet.packet_type)
self._write_long(self._size)
# copy local, compiled data to output stream as packet body
__self_stream.pos = 0
# read max size as stream might not have been reset!
stream.write(__self_stream.read(self._size))
# switch streams back
self._stream = __self_stream
def compile(self, packet):
self._reset_stream()
self._packet = packet
if packet.packet_type == PacketType.LogEntry:
self._compile_log_entry()
elif packet.packet_type == PacketType.Watch:
self._compile_watch()
elif packet.packet_type == PacketType.ControlCommand:
self._compile_control_command()
elif packet.packet_type == PacketType.ProcessFlow:
self._compile_process_flow()
self._size = self._stream.pos
return self._size + 6 # packet header size: 6 bytes
def _compile_control_command(self):
control_command = self._packet
stream = self._stream
# header
self._write_long(control_command.control_command_type)
if control_command.has_data: self._write_long(control_command.data.len)
else: self._write_long(0)
# values
if control_command.has_data:
stream.write(control_command.data.read())
control_command.data.pos = 0
def _compile_log_entry(self):
log_entry = self._packet
stream = self._stream
# header
self._write_long(log_entry.log_entry_type)
self._write_long(log_entry.viewer_id)
self._write_long(len(log_entry.appname))
self._write_long(len(log_entry.session_name))
self._write_long(len(log_entry.title))
self._write_long(len(log_entry.hostname))
if log_entry.has_data: self._write_long(log_entry.data.len)
else: self._write_long(0)
self._write_ulong(log_entry.thread_id)
self._write_ulong(log_entry.process_id)
self._write_datetime(log_entry.timestamp)
self._write_color(log_entry.color)
# values
self._write_string(log_entry.appname)
self._write_string(log_entry.session_name)
self._write_string(log_entry.title)
self._write_string(log_entry.hostname)
if log_entry.has_data:
stream.write(log_entry.data.read())
log_entry.data.pos = 0
def _compile_process_flow(self):
process_flow = self._packet
stream = self._stream
# header
self._write_long(process_flow.process_flow_type)
self._write_long(len(process_flow.title))
self._write_long(len(process_flow.hostname))
self._write_ulong(process_flow.process_id)
self._write_ulong(process_flow.thread_id)
self._write_datetime(process_flow.timestamp)
# values
self._write_string(process_flow.title)
self._write_string(process_flow.hostname)
def _compile_watch(self):
watch = self._packet
stream = self._stream
# header
self._write_long(len(watch.name))
self._write_long(len(watch.value))
self._write_long(watch.watch_type)
self._write_datetime(watch.timestamp)
# values
self._write_string(watch.name)
self._write_string(watch.value)
class TextFormatter(Formatter):
def __init__(self):
self._parser = PatternParser()
def compile(self, packet):
if packet.packet_type == PacketType.LogEntry:
self.line = (self._parser.expand(packet) + "#13#10").encode('utf-8')
return len(line)
else:
self.line = ''
return 0
def write(self, stream):
if self.line:
stream.write(line)
@property
def indent(self):
return self._parser.indent
@property
def pattern(self):
return self._parser.indent
class ProtocolCommand(object):
def __init__(self, action, state):
self.action = action
self.state = state
class ProtocolOptions(object):
"""Manages a set of options for a protocol instance. The instance is
expected to store it's copy of the options in an attribute named
``_options``.
This replaces the BuildOptions/LoadOptions methods on ``Protocol``
classes in the Delphi implementation.
"""
def __init__(self, onchange=None):
self._options = {}
self.onchange = onchange
def __get__(self, instance, owner):
# options property is read, return an options instance linking to
# the protocol object this was accessed by (``instance``).
return ProtocolOptions._OptionsImpl(instance, self.onchange)
def __set__(self, instance, value):
# something is assigned to the options property
if isinstance(value, dict):
pass
elif value is None:
value = {}
elif isinstance(value, ProtocolOptions):
value = value._options
else:
raise SmartInspectError(
'Can''t assign a "%s" to protocol options.' % type(value))
# update our own values
for k, v in value.items():
setattr(instance.options, k, v)
class _OptionsImpl(object):
# The actual options "attribute access" implementation. The outer
# descriptor creates an instance of this with a link to the correct
# ``Protocol`` instance on "get".
def __init__(self, protocol, onchange):
self._onchange = onchange
self.__dict__['_protocol'] = protocol # __setattr__ already requires this
if not hasattr(protocol, '_options'):
protocol._options = {}
def _validate_option(self, option):
if not option in self._protocol.valid_options.keys():
raise SmartInspectError(u'Option "%s" not available for protocol "%s"' % \
(option, self._protocol.name,));
def __setattr__(self, key, value):
if key.startswith('_'):
return object.__setattr__(self, key, value)
else:
with self._protocol._lock:
if self._protocol.connected:
raise SmartInspectError(PROTOCOL_CONNECTED_MSG)
self._validate_option(key)
if value != self._protocol._options.get(key, None):
self._protocol._options[key] = value
# note this is only called on an actual change
if self._onchange:
self._onchange(self._protocol)
def __getattr__(self, key):
with self._protocol._lock:
self._validate_option(key)
return self._protocol._options.get(
key, self._protocol.valid_options[key])
def __iter__(self):
for option in self._protocol.valid_options.keys():
yield option
def reset(self):
"""Reset to default values."""
# clearing the dict works, we are just storing the changed values
self._protocol.options = {}
if self._onchange:
self._onchange(self._protocol)
class Protocol(object):
"""A protocol is responsible for the transport of packets."""
valid_options = {'level': Level.Debug,
'backlog': 0,
'flushon': Level.Error,
'reconnect': False,
'keepopen': True, \
'caption': None}
def _options_changed(self):
# default ``caption`` option to protocol name
if (self.options.caption is None) and (type(self).name is not None):
self.options.caption = type(self).name
if self.options.backlog <= 0:
self.options.keepopen = True
options = ProtocolOptions(lambda s: s._options_changed())
def __init__(self):
self._lock = threading.RLock()
self._queue = PacketQueue()
self.connected = False
self.options.reset() # will cause a validation
def _internal_reconnect(self):
self._internal_connect()
def _internal_dispatch(self, command):
# empty by default
pass
def connect(self):
with self._lock:
if not self.connected and self.options.keepopen:
try:
self._internal_connect()
self.connected = True
except Exception, e:
self.reset()
raise e
def reconnect(self):
try:
self._internal_reconnect()
self.connected = True
except:
# ignore reconnect exceptions
pass
def disconnect(self):
with self._lock:
if self.connected:
try:
self.reset()
except Exception, e:
raise ProtocolError(str(e), self)
def reset(self):
self._queue.clear()
self._connected = False
self._internal_disconnect()
def dispatch(self, command):
with self._lock:
if self.connected:
try:
self._internal_dispatch(command)
except Exception, e:
raise ProtocolError(str(e), self)
def forward_packet(self, packet, disconnect):
if not self.connected:
if not self.options.keepopen:
self._internal_connect()
connected = True
else:
self.reconnect()
if self.connected:
self._internal_write_packet(packet)
if disconnect:
self.connected = False
self._internal_disconnect()
def write_packet(self, packet):
with self._lock:
if packet.level < self.options.level:
return
if self.connected or self.options.reconnect or \
not self.options.keepopen:
try:
skip = False
if self.options.backlog > 0:
if packet.level >= self.options.flushon and \
packet.level <> PacketLevel.Control:
p = self._queue.pop()
while p:
try:
self.forward_packet(p, False)
finally:
p.release()
p = self._queue.pop()
else:
self._queue.push(packet)
skip = True
if not skip:
self.forward_packet(packet, not self.options.keepopen)
except Exception, e:
self.reset()
raise ProtocolError(str(e), self)
class MemoryProtocol(Protocol):
name = "mem"
valid_options = dict(Protocol.valid_options)
valid_options.update({
'astext': False,
'maxsize': 2048,
'pattern': DEFAULT_TEXT_PATTERN,
'indent': False})
def _options_changed(self):
super(MemoryProtocol, self)._options_changed()
# use a formatter fitting for the selection setting
if self.options.astext:
self.formatter = TextFormatter()
self.formatter.pattern = self.options.pattern
self.formatter.indent = self.options.indent
else:
self.formatter = BinaryFormatter()
def _internal_connect(self):
self.queue = PacketQueue()
self.queue.size = self.options.maxsize
def _internal_disconnect(self):
self.queue.clear()
def _internal_dispatch(self, command):
if not command:
return
if hasattr(command.state, 'write'):
s = command.state
if self.options.astext:
s.write('\xef\xbb\xbf') # bom header
else:
s.write(MAGIC_LOG_STRING)
packet = self.queue.pop()
while packet:
self.formatter.format(packet, s)
packet = self.queue.pop()
def _internal_write_packet(self, packet):
self.queue.push(packet)
class TcpProtocol(Protocol):
name = "tcp"
valid_options = dict(Protocol.valid_options)
valid_options.update({'host': '127.0.0.1',
'port': 4228,
'timeout': 30000})
def __init__(self):
super(TcpProtocol, self).__init__()
self.formatter = BinaryFormatter()
def _internal_connect(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self.options.timeout/1000.0)
self._socket.connect((self.options.host, self.options.port,))
self._buffer = self._socket.makefile('rw', bufsize=0x2000)
# exchange banners
self._buffer.readline()
self._buffer.write("%s\n"%TCP_CLIENT_BANNER)
def _internal_write_packet(self, packet):
# let the formatter write directly to the tcp stream
self.formatter.format(packet, self._buffer)
self._buffer.flush()
# read (and wait for) the server answer
x = self._buffer.read(2)
if len(x) <> 2:
#if len(self._buffer.read(2)) <> 2:
# We couldn't read the entire answer from the server, but the
# Read method didn't raise an exception. This means that the
# socket connection has been normally closed by the server,
# but this shouldn't occur while trying to read the answer!
raise ESmartInspectError.Create(SOCKET_CLOSED_MSG);
def _internal_disconnect(self):
self._socket.close()
class Protocols(object):
"""Globally manage a list of available protocols.
Thread-safe.
Replaces the ProtocolFactory class of the Delphi implementation.
"""
_table = {
'mem': MemoryProtocol,
'tcp': TcpProtocol
# Not yet supported
# ('file', FileProtocol)
# ('text', TextProtocol)
}
_lock = threading.RLock()
@classmethod
def get(cls, name, options=None):
with cls._lock:
result = cls._table.get(name, False)()
if not result:
raise SmartInspectError(u'Protocol "%s" not found'%name)
if options:
result.options = options
return result
@classmethod
def register(cls, name, klass):
if name and klass:
with cls._lock:
cls._table[name] = klass
################################################################################
## Public interface clasess
################################################################################
def default_level_to_parent(func):
"""Decorator used by log methods that have an optional ``level`` argument
which should fallback to the instance's default if missing. For use within
``Session``.
It's rather complex considering it only saves us one line within each
method it's applied to (the line would like like this:
``if not level: level = self._parent.level``).
Note: Currently this requires ``level`` to be passed as a keyword argument.
Using introspection we could make it work for positional arguments as well.
Instead of using this we could attempt to fallback to the default log level
at a later moment before sending, in a common method, but this is difficult
for a number of reasons:
* Some methods need to validate that a certain level is below the
treshold (see also ``if_is_on``), while other's don't - but all of
them want to use the default level (e.g. see ``reset_callback``).
* In addition, for thread-safety reasons the ``is_on()`` check should
use the same level than the packet will then actually use, which
means the level to use (and therefore whether to use the default) has
to be determined before that.
* Sometimes we have to send multiple packets (e.g. ``enter_method``)
"""
def wrapper(self, *args, **kwargs):
if kwargs.get('level', None) is None:
kwargs['level'] = self.parent.defaultlevel
return func(self, *args, **kwargs)
return wrapper
def if_is_on(func):
"""Only runs the decorated method after passing the ``level`` keyword
argument successfully through ``is_on``. For use within ``Session``.
"""
def wrapper(self, *args, **kwargs):
if self.is_on(kwargs.get('level')):
return func(self, *args, **kwargs)
return wrapper
class Session(object):
def __init__(self, parent, name):
self._namelock = threading.RLock()
self._checkpointlock = threading.RLock()
self._counterlock = threading.RLock()
self.parent = parent
self.name = name
self._checkpointcounter = 0
self.active = True
self._counters = {}
def set_name(self, value):
with self._namelock:
if value <> self.name:
if self.is_stored:
self.parent.uddate_esssion(self, value, self.name)
self.name = value
def is_on(self, level=None):
"""
Returns ``True`` if information would be logged for a certain log
level or ``False`` if not. If ``level`` is not passed the result
can still vary depending on whether the session is active, or the
parent ``SmartInspect`` instance enabled.
Python port note: The Delphi version uses two overloaded versions
of this method here, which we can replace by defaulting the
``level`` argument to ``None``.
"""
return self.active and self.parent.enabled and (
level >= self.parent.level or level is None)
def send_log_entry(self, level, title, log_entry_type, viewer_id,
color=None, data=None):
entry = LogEntry(log_entry_type, viewer_id)
entry.timestamp = self.parent.now()
entry.level = level
entry.title = title
entry.color = color or DEFAULT_COLOR
entry.session_name = self.name
entry.data = data
self.parent.send_log_entry(entry)
def send_process_flow(self, level, title, process_flow_type):
process_flow = ProcessFlow(process_flow_type)
process_flow.timestamp = self.parent.now()
process_flow.level = level
process_flow.title = title
self.parent.send_process_flow(process_flow)
def send_watch(self, level, name, value, watch_type):
watch = Watch(watch_type)
watch.timestamp = self.parent.now()
watch.level = level
watch.name = name
watch.value = value
self.parent.send_watch(watch)
def send_control_command(self, control_command_type, data=None):
control_command = ControlCommand(control_command_type)
control_command.level = Level.Control
control_command.data = data
self.parent.send_control_command(control_command)
def send_context():
# TODO: just send_log_entry with a viewer instance as the data part
raise NotImplementedError()
@default_level_to_parent
@if_is_on
def log(self, title, level=None, color=None,
entry_type=LogEntryType.Message):
"""
A generic log method that allows full customization of all
parameters involved.
Python port note: This method is new - the Delphi implementation's
log methods all call ``send_log_entry`` directly. In the Python
version, those other methods all capture surplus arguments and pass
them along to this generic version. I.e. the ``color`` argument (and
possibly others in the future) are supported by all the log methods
through this base implementation.
While you can directly call this method if you want, it is
recommended that you use the level- or type-specific methods like
``log_debug`` or ``log_value`` instead whenever possible.. In
particular, those will also take care of sending the appropriate
entry type for each specific log level (while the level determines
what or when an entry is shown, the entry type determines *how* it is
displayed, e.g. icon).
"""
self.send_log_entry(level, "%s"%title, entry_type,
ViewerId.Title, color=color)
def log_debug(self, *args, **kwargs):
self.log(level=Level.Debug, entry_type=LogEntryType.Debug, *args, **kwargs)
def log_verbose(self, *args, **kwargs):
self.log(level=Level.Verbose, entry_type=LogEntryType.Verbose, *args, **kwargs)
def log_message(self, *args, **kwargs):
self.log(level=Level.Message, entry_type=LogEntryType.Message, *args, **kwargs)
def log_warning(self, *args, **kwargs):
self.log(level=Level.Warning, entry_type=LogEntryType.Warning, *args, **kwargs)
def log_error(self, *args, **kwargs):
self.log(level=Level.Error, entry_type=LogEntryType.Error, *args, **kwargs)
def log_fatal(self, *args, **kwargs):
self.log(level=Level.Fatal, entry_type=LogEntryType.Fatal, *args, **kwargs)
def log_value(self, name, value, level=None, *args, **kwargs):
# Depending on the datatype we may choose a different output format
if isinstance(value, basestring):
title = "%s = '%s'" % (name, value)
else:
title = "%s = %s" % (name, value)
self.log(title, level=level,
entry_type=LogEntryType.VariableValue,
*args, **kwargs)
@default_level_to_parent
@if_is_on
def log_separator(self, level=None):
self.send_log_entry(level, '', LogEntryType.Separator, ViewerId.None_)
def log_assert(self, condition, title):
if self.is_on(Level.Error):
if not condition:
self.send_log_entry(Level.Error, title, LogEntryType.Assert,
ViewerId.Title)
def log_custom_file(self):
raise NotImplementedError() # TODO
def log_custom_stream(self):
raise NotImplementedError() # TODO
def log_object(self):
raise NotImplementedError() # TODO: RTTI/dir
def log_memory_statistic(self):
raise NotImplementedError() # TODO
def log_system(self):
raise NotImplementedError() # TODO
def log_internal_error(self, title):
self.log(title, level=Level.Error, entry_type=LogEntryType.InternalError)
def log_exception(self, title='', exception=None, *args, **kwargs):
"""Log the exception given in ``exception`` argument, or the one
currently handled, if any.
"""
if self.is_on(Level.Error):
if not exception:
exception = sys.exc_info()[1]
if exception is None:
self.log_internal_error('log_exception: No exception is currently handled')
return
# TODO: this is dangerous, as we don't know what str() returns;
# especially non-ascii error messages will create problems.
# find a better way to convert to a string, e.g. see:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/444746
msg = (title and title+': ' or '') + str(exception)
self.log_error(msg, *args, **kwargs)
## -- Hm, looks liek sys.last_value only works for unhandled exceptions.
## -- That's not that helpful.
## def log_last_exception(self, title=''):
## """Log the last exception occured.
##
## Python port note: Not available in the Delphi version. Do not
## confuse with LogLastError(), which is Win32 API specific.
## """
## exception = getattr(sys, 'last_value', None)
## if not exception:
## self.log_internal_error('log_exception: No last exception found')
## else:
## return self.log_exception(title=title, exception=exception)
@default_level_to_parent
def reset_callstack(self, level=None):
self.send_log_entry(level, '', LogEntryType.ResetCallstack)
@default_level_to_parent
@if_is_on
def enter_method(self, name, level=None, instance=None):
if instance:
name = "%s.%s" %(type(instance).__name__, name)
# send two packets, one log entry and one process flow entry
self.send_log_entry(level, name, LogEntryType.EnterMethod,
ViewerId.Title);
self.send_process_flow(level, name, ProcessFlowType.EnterMethod)
@default_level_to_parent
@if_is_on
def leave_method(self, name, level=None, instance=None):
if instance:
name = "%s.%s" %(type(instance).__name__, name)
# send two packets, one log entry and one process flow entry
self.send_log_entry(level, name, LogEntryType.LeaveMethod,
ViewerId.Title);
self.send_process_flow(level, name, ProcessFlowType.LeaveMethod)
@default_level_to_parent
@if_is_on
def enter_thread(self, name, level=None):
self.send_process_flow(level, name, ProcessFlowType.EnterThread)
@default_level_to_parent
@if_is_on
def leave_thread(self, name, level=None):
self.send_process_flow(level, name, ProcessFlowType.LeaveThread)
@default_level_to_parent
@if_is_on
def enter_process(self, name=None, level=None):
self.send_process_flow(level, name or self.parent.appname,
ProcessFlowType.EnterProcess)
self.send_process_flow(level, 'Main Thread', ProcessFlowType.EnterThread)
@default_level_to_parent
@if_is_on
def leave_process(self, name=None, level=None):
self.send_process_flow(level, 'Main Thread', ProcessFlowType.LeaveThread)
self.send_process_flow(level, name or self.parent.appname,
ProcessFlowType.LeaveProcess)
def track(self, func):
"""Decorator to add process flow tracking around the wrapped function.
Python lib specific (replaces ``TrackMethod``` utilities in Delphi).
"""
def wrapped(*args, **kwargs):
self.enter_method(func.__name__)
try:
return func(*args, **kwargs)
finally:
self.leave_method(func.__name__)
return wrapped
@default_level_to_parent
@if_is_on
def watch(self, name, value, level, watch_type=None):
# Determine the value format and watch type to use, based on the type
# of the value. The latter can be overridden via the ``watch_type``
# argument, which can also affect formatting (e.g. WatchType.Address).
if watch_type == WatchType.Address:
tp = watch_type
title = "%s" % id(value)
# wtObject: currently unused by SmartInspect.pas
elif isinstance(value, bool):
tp = WatchType.Boolean
title = value and 'True' or 'False'
elif isinstance(value, int):
tp = WatchType.Integer
title = u"%s" % value
elif isinstance(value, float):
tp = WatchType.Float
title = u"%s" % value
elif isinstance(value, datetime.datetime):
tp = WatchType.Float
title = u"%s" % value # TODO: use better format?
else:
tp = WatchType.String
title = u"%s" % value
self.send_watch(level, name, title, tp)
@default_level_to_parent
def add_checkpoint(self, level):
with self._checkpointlock:
self._checkpointcounter += 1
counter = self._checkpointcounter
if self.is_on(level):
title = 'Checkpoint #%d' % self._checkpointcounter
self.send_log_entry(level, title, LogEntryType.Checkpoint,
ViewerId.Title)
def reset_checkpoint(self):
with self._checkpointlock:
self._checkpointcounter = 0
@default_level_to_parent
@if_is_on
def inc_counter(self, name, level=None):
with self._counterlock:
self.counters[name] += 1
value = self.counters[name]
self.send_watch(level, name, value, WatchType.Integer)
@default_level_to_parent
@if_is_on
def dec_counter(self, name, level=None):
with self._counterlock:
self.counters[name] -= 1
value = self.counters[name]
self.send_watch(level, name, value, WatchType.Integer)
@default_level_to_parent
@if_is_on
def reset_counter(self, name):
with self._counterlock:
del counters[name]
def send_custom_control_command():
raise NotImplementedError() # TODO
def send_custom_log_entry():
raise NotImplementedError() # TODO
def send_custom_process_flow():
raise NotImplementedError() # TODO
def send_custom_watch():
raise NotImplementedError() # TODO
@if_is_on
def clear_all(self):
self.send_control_command(ControlCommandType.ClearAll)
@if_is_on
def clear_auto_views(self):
self.send_control_command(ControlCommandType.ClearAutoViews)
@if_is_on
def clear_watches(self):
self.send_control_command(ControlCommandType.ClearWatches)
@if_is_on
def clear_log(self):
self.send_control_command(ControlCommandType.ClearLog)
@if_is_on
def clear_process_flow(self):
self.send_control_command(ControlCommandType.ClearProcessFlow)
def parse_connections(connection_str):
"""Helper utility generator; parses a connection string with
protocol/options and yields ``Protocol`` instances.
Combines the ConnectionsParser and OptionsParser classes from
the Delphi implementation.
Here's a regex that would match a full connection string; for parsing,
we need to break it up to parse out the option list separately.
([a-z]+) \( # protocol name
( # multiple options
([a-z]+) = # option name =
# option value (escape with ")
((?:"[^"]*(?:"")?[^"]*")|\w+)
# option seperator or end
(?:\s*(,\s*|(?=\))))
)*
\)
\s*(,|$) # protocol separator
"""
re_connections = re.compile(r'([a-z]+)\((.*?)\)(?:,|$)')
re_options = re.compile(r'([a-z]+)=((?:"[^"]*(?:"")?[^"]*")|\w+)\s*(?:,\s*|$)')
def rsvopt(opt):
"""Resolve string to option."""
connections = []
while connection_str:
m_connection = re_connections.search(connection_str)
if not m_connection:
raise InvalidConnectionsError('Invalid connection string: "%s"'%connection_str)
protocol_name, option_str = m_connection.groups()
connection_str = connection_str[m_connection.end():]
connection = Protocols.get(protocol_name)
options = {}
while option_str:
m_option = re_options.search(option_str)
if not m_option:
raise InvalidConnectionsError('Invalid option string: "%s"'%option_str)
option_name, option_raw_value = m_option.groups()
option_str = option_str[m_option.end():]
# try to convert the value string to the right type
option_value = None
if option_raw_value.isdigit():
option_value = int(option_raw_value)
elif option_raw_value in ['yes', 'true']:
option_value = True
elif option_raw_value in ['0', 'no', 'false']:
option_value = False
elif option_raw_value.startswith('"') and\
option_raw_value.endswith('"') and\
len(option_raw_value)>1:
option_value = option_raw_value[1:-1]
else:
# otherwise, this must be an enum identifer - but which one?
for check in [Level, FileRotate]:
option_value = check.by_name(option_raw_value)
if option_value is None:
break
if not option_value is None:
raise InvalidConnectionsError(option_raw_value)
options[option_name] = option_value
# finally, add the found connection to the list
connection.options = options
connections.append(connection)
# Return what we have found; note that we have first parsed the whole
# string - possible errors have already been caught. E.g. this function
# will either succeed and return all connections, or fail fully (no
# partial results). This effects the callers exception handling.
for c in connections:
yield c
def make_connection_string(connection_list):
"""Serialize a list of ``Protocol`` instances into a string (their
options, to be exact. This is the opposite of ``parse_connections``.
In Delphi, this is implemented via the ConnectionsBuilder class.
"""
def fmtopt(option):
if option is None:
return "none"
elif isinstance(option, bool):
return option and 'true' or 'false'
elif isinstance(option, type(Level.Debug)): # EnumInstance
return option.name(short=True).lower()
elif isinstance(option, int):
return '%s' % option
else: # elif isinstance(option, basestring)
return '"%s"' % option
return ",".join(["%s(%s)" % (connection.name,
",".join([
"%s=%s" % (option, fmtopt(getattr(connection.options, option)))
for option in connection.options
]))
for connection in connection_list
])
class SmartInspect(object):
"""Main entry point; Manages a list of ``Session``s"""
version = VERSION
def __init__(self, appname):
self.level = Level.Debug
self.defaultlevel = Level.Message
self.appname = appname
self.hostname = socket.gethostname()
self._enabled = False
self._eventlock = threading.RLock()
self._mainlock = threading.RLock()
self._sessionlock = threading.RLock()
self._sessions = {}
self._connections = []
def _set_connections(self, value):
with self._mainlock:
# disconnect first, and reset connection list
self.disconnect()
self._connections = []
for connection in parse_connections(value):
self._connections.append(connection)
# if we're currently enabled,
# then connect right away.
if self.enabled:
self.connect()
def _get_connections(self):
return make_connection_string(self._connections)
connections = property( _get_connections, _set_connections)
def load_connections(self): # TODO
"""Read the connections string form a file."""
raise NotImplementedError()
def load_configuration(self, filename):
"""Load configuration from a file."""
with self._mainlock:
config = {}
f = open(filename)
with f:
for line in f:
line = line.strip()
if line and not line[1] == ';':
key, value = line.split("=")
config[key.strip()] = value.strip()
self.appname = config.get('appname', self.appname)
self.level = Level.by_name(config.get('level', None), self.level)
self.defaultlevel = Level.by_name(config.get('defaultlevel', None),
self.defaultlevel)
# applying connections is tougher; make sure that depending on the
# value of "enabled" we set the new connections at the right point
# in time, so that enabling will work with the new connections, but
# disabling still uses the old ones.
connections = configuration.get('connections', '')
if 'enabled' in configuration:
enabled = configuration['enabled']
if enabled:
self.connections = connections
self.enable()
else:
self.connections = connections
tryconnections(connections)
else:
self.connections = connections
def connect(self):
for connection in self._connections:
connection.connect()
def disconnect(self):
for connection in self._connections:
connection.disconnect()
def add_session(self, name, store=False):
result = Session(self, name)
with self._sessionlock:
if store:
self._sessions[name] = result
result.is_stored = True
return result
def _update_session(self, session, to, from_):
with self._sessionlock:
s = self._sessions[from_]
if s == session:
del self._sessions[from_]
self._sessions[to] = session
def get_session(self, name):
with self._sessionlock:
return self._sessions[name]
def delete_session(session):
"""
``session`` can be a ``Session`` instance or the name of a stored
session.
"""
with self._sessionlock:
if isinstance(session, basesstring):
del self._sessions[name]
else:
for key, value in self._sessions.iteritems():
if value == session:
del self._sessions[key]
break
def find_protocol(self, caption):
"""Find a protocol by it's ``caption`` option, or ``None`` if not
found.
Python port note: This method is protected in Delphi, but is very
useful as a public functionality, for example in our tests where we
need to check if a dispatch() call will work, or fail.
"""
for connection in self._connections:
if connection.options.caption == caption:
return connection
return None
def dispatch(self, caption, action, state):
with self._mainlock:
protocol = self.find_protocol(caption)
if not protocol:
raise SmartInspectError(CAPTION_NOT_FOUND_MSG)
command = ProtocolCommand(action, state)
protocol.dispatch(command)
def filter(self, packet):
"""Return False to allow the packet to pass."""
# TODO: allow callback to user
return False
def now(self):
# return datetime.datetime.utcfromtimestamp(time.time())
return datetime.datetime.now()
def process_packet(self, packet):
with self._mainlock:
for connection in self._connections:
connection.write_packet(packet)
def send_control_command(self, control_command):
if not self.filter(control_command):
self.process_packet(control_command)
def send_log_entry(self, log_entry):
log_entry.appname = self.appname
log_entry.hostname = self.hostname
if not self.filter(log_entry):
self.process_packet(log_entry)
def send_process_flow(self, process_flow):
process_flow.hostname = self.hostname
if not self.filter(process_flow):
self.process_packet(process_flow)
def send_watch(self, watch):
if not self.filter(watch):
self.process_packet(watch)
def _set_enabled(self, value):
value and [self.enable()] or [self.disable()]
enabled = property(lambda s: s._enabled, _set_enabled)
def disable(self):
if self._enabled:
self._enabled = False
self.disconnect()
def enable(self):
if not self._enabled:
self._enabled = True
self.connect() | {
"repo_name": "miracle2k/python-smartinspect",
"path": "smartinspect/smartinspect.py",
"copies": "2",
"size": "54644",
"license": "bsd-2-clause",
"hash": -8688558772469361000,
"line_mean": 32.8285532186,
"line_max": 95,
"alpha_frac": 0.5603725935,
"autogenerated": false,
"ratio": 4.399323725947991,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.002760493282425192,
"num_lines": 1569
} |
# A smart string is one with characters in strictly increasing or decreasing order.
# Given a string M, we need to determine the minimum number of contiguous substrings in which M can be broken so that each substring is smart.
# Examples: Input: abcdcba
# Output: 2
# partitioned into abc and dcba
# Input: ffdhbbbdeeggbb
# Output:4
# 1 is uphill 0 is downhill -1 ambigious
def smartStrings(s: str):
res, grad = 1, 0
for i in range(1, len(s)):
if s[i] > s[i-1]: # Increasing slope starts/continues
if grad == 0:
grad = 1 # ambigous to positive slope
elif grad == -1:
grad = 0
res += 1
elif s[i] < s[i-1]: # Decreasing slope starts/continues
if grad == 0:
grad = -1
elif grad == 1:
grad = 0
res += 1
# print(s[i], res)
return res
print(smartStrings("abcdcba"))
print(smartStrings("ffdhbbbdeeggbb"))
print(smartStrings("abcdabcdcbaaaa"))
print(smartStrings("dcbaaaabcdddddddccccbbbaaaa")) | {
"repo_name": "saisankargochhayat/algo_quest",
"path": "Company-Based/google/smart_strings.py",
"copies": "1",
"size": "1097",
"license": "apache-2.0",
"hash": -7891037780157796000,
"line_mean": 27.1538461538,
"line_max": 142,
"alpha_frac": 0.5843208751,
"autogenerated": false,
"ratio": 3.6085526315789473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46928735066789473,
"avg_score": null,
"num_lines": null
} |
# ASN.1 "character string" types
from pyasn1.type import univ, tag
class NumericString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
)
class PrintableString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
)
class TeletexString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
)
class T61String(TeletexString): pass
class VideotexString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
)
class IA5String(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
)
class GraphicString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
)
class VisibleString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
)
class ISO646String(VisibleString): pass
class GeneralString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
)
class UniversalString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
)
encoding = "utf-32-be"
class BMPString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
)
encoding = "utf-16-be"
class UTF8String(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
)
encoding = "utf-8"
| {
"repo_name": "mith1979/ansible_automation",
"path": "applied_python/applied_python/lib/python2.7/site-packages/pyasn1/type/char.py",
"copies": "172",
"size": "2043",
"license": "apache-2.0",
"hash": 2133283242103077600,
"line_mean": 30.921875,
"line_max": 63,
"alpha_frac": 0.7151248164,
"autogenerated": false,
"ratio": 3.4568527918781724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# ASN.1 "character string" types
from pyasn1.type import univ, tag
class UTF8String(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
)
encoding = "utf-8"
class NumericString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
)
class PrintableString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
)
class TeletexString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
)
class VideotexString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
)
class IA5String(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
)
class GraphicString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
)
class VisibleString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
)
class GeneralString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
)
class UniversalString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
)
encoding = "utf-32-be"
class BMPString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
)
encoding = "utf-16-be"
| {
"repo_name": "zlsun/XX-Net",
"path": "code/default/python27/1.0/lib/noarch/pyasn1/type/char.py",
"copies": "10",
"size": "1965",
"license": "bsd-2-clause",
"hash": -6364153715361140000,
"line_mean": 31.2131147541,
"line_max": 63,
"alpha_frac": 0.71043257,
"autogenerated": false,
"ratio": 3.4656084656084656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00757223362181379,
"num_lines": 61
} |
# ASN.1 "character string" types
from pyasn1.type import univ, tag
class UTF8String(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
)
encoding = "utf-8"
class NumericString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
)
class PrintableString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
)
class TeletexString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
)
class VideotexString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
)
class IA5String(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
)
class GraphicString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
)
class VisibleString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
)
class GeneralString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
)
class UniversalString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
)
encoding = "utf-32-be"
class BMPString(univ.OctetString):
tagSet = univ.OctetString.tagSet.tagImplicitly(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
)
encoding = "utf-16-be"
| {
"repo_name": "itielshwartz/BackendApi",
"path": "lib/pyasn1/type/char.py",
"copies": "1",
"size": "1931",
"license": "apache-2.0",
"hash": 1552037105156359200,
"line_mean": 26.1971830986,
"line_max": 63,
"alpha_frac": 0.7229414811,
"autogenerated": false,
"ratio": 3.4056437389770724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4628585220077072,
"avg_score": null,
"num_lines": null
} |
"""ASN.1 constraints checker.
"""
import string
from copy import copy
from . import ConstraintsError
from . import add_error_location
from . import compiler
from . import format_or
from .permitted_alphabet import NUMERIC_STRING
from .permitted_alphabet import PRINTABLE_STRING
from .permitted_alphabet import IA5_STRING
from .permitted_alphabet import BMP_STRING
from .permitted_alphabet import VISIBLE_STRING
STRING_TYPES = [
'OBJECT IDENTIFIER',
'TeletexString',
'NumericString',
'PrintableString',
'IA5String',
'VisibleString',
'GeneralString',
'UTF8String',
'BMPString',
'GraphicString',
'UniversalString',
'ObjectDescriptor'
]
TIME_TYPES = [
'UTCTime',
'GeneralizedTime',
'DATE',
'TIME-OF-DAY',
'DATE-TIME'
]
class Type(object):
def __init__(self, name):
self.name = name
self.minimum = 'MIN'
self.maximum = 'MAX'
def set_range(self, minimum, maximum, has_extension_marker):
if has_extension_marker:
return
if minimum is None:
minimum = 'MIN'
if maximum is None:
maximum = 'MAX'
self.minimum = minimum
self.maximum = maximum
def set_size_range(self, minimum, maximum, has_extension_marker):
self.set_range(minimum, maximum, has_extension_marker)
def set_restricted_to_range(self,
minimum,
maximum,
has_extension_marker):
self.set_range(minimum, maximum, has_extension_marker)
def is_in_range(self, value):
minimum_ok = (not self.has_lower_bound()) or (value >= self.minimum)
maximum_ok = (not self.has_upper_bound()) or (value <= self.maximum)
return minimum_ok and maximum_ok
def set_default(self, value):
pass
def has_lower_bound(self):
return self.minimum != 'MIN'
def has_upper_bound(self):
return self.maximum != 'MAX'
def is_bound(self):
return self.has_lower_bound() and self.has_upper_bound()
def encode(self, data):
raise NotImplementedError('To be implemented by subclasses.')
class String(Type):
PERMITTED_ALPHABET = None
def __init__(self,
name,
permitted_alphabet,
minimum,
maximum,
has_extension_marker):
super(String, self).__init__(name)
self.set_size_range(minimum, maximum, has_extension_marker)
if permitted_alphabet is None:
permitted_alphabet = self.PERMITTED_ALPHABET
self.permitted_alphabet = permitted_alphabet
@add_error_location
def encode(self, data):
length = len(data)
if not self.is_in_range(length):
raise ConstraintsError(
'Expected between {} and {} characters, but got {}.'.format(
self.minimum,
self.maximum,
length))
if self.permitted_alphabet is None:
return
for character in data:
if character not in self.permitted_alphabet:
raise ConstraintsError(
"Expected a character in '{}', but got '{}' (0x{:02x}).".format(
''.join([c if c in string.printable[:-5] else '.'
for c in self.permitted_alphabet]),
character if character in string.printable else '.',
ord(character)))
class Boolean(Type):
def encode(self, data):
pass
class Integer(Type):
def __init__(self, name):
super(Integer, self).__init__(name)
@add_error_location
def encode(self, data):
if not self.is_in_range(data):
raise ConstraintsError(
'Expected an integer between {} and {}, but got {}.'.format(
self.minimum,
self.maximum,
data))
class Float(Type):
def encode(self, data):
pass
class Null(Type):
def encode(self, data):
pass
class BitString(Type):
def __init__(self, name, minimum, maximum, has_extension_marker):
super(BitString, self).__init__(name)
self.set_size_range(minimum, maximum, has_extension_marker)
@add_error_location
def encode(self, data):
number_of_bits = data[1]
if not self.is_in_range(number_of_bits):
raise ConstraintsError(
'Expected between {} and {} bits, but got {}.'.format(
self.minimum,
self.maximum,
number_of_bits))
class Enumerated(Type):
def encode(self, data):
pass
class Bytes(Type):
def __init__(self, name, minimum, maximum, has_extension_marker):
super(Bytes, self).__init__(name)
self.set_size_range(minimum, maximum, has_extension_marker)
@add_error_location
def encode(self, data):
length = len(data)
if not self.is_in_range(length):
raise ConstraintsError(
'Expected between {} and {} bytes, but got {}.'.format(
self.minimum,
self.maximum,
length))
class Dict(Type):
def __init__(self, name, members):
super(Dict, self).__init__(name)
self.members = members
@add_error_location
def encode(self, data):
for member in self.members:
name = member.name
if name in data:
member.encode(data[name])
class List(Type):
def __init__(self, name, element_type, minimum, maximum, has_extension_marker):
super(List, self).__init__(name)
self.element_type = element_type
self.set_size_range(minimum, maximum, has_extension_marker)
@add_error_location
def encode(self, data):
length = len(data)
if not self.is_in_range(length):
raise ConstraintsError(
'Expected a list of between {} and {} elements, but got {}.'.format(
self.minimum,
self.maximum,
length))
for entry in data:
self.element_type.encode(entry)
class Choice(Type):
def __init__(self, name, members, has_extension_marker):
super(Choice, self).__init__(name)
self.members = members
self.name_to_member = {member.name: member for member in self.members}
self.has_extension_marker = has_extension_marker
def format_names(self):
return format_or(sorted(self.name_to_member))
@add_error_location
def encode(self, data):
value = data[0]
if value in self.name_to_member:
member = self.name_to_member[value]
elif self.has_extension_marker:
return
else:
raise ConstraintsError(
"Expected choice {}, but got '{}'.".format(
self.format_names(),
value))
member.encode(data[1])
class NumericString(String):
PERMITTED_ALPHABET = NUMERIC_STRING
class PrintableString(String):
PERMITTED_ALPHABET = PRINTABLE_STRING
class IA5String(String):
PERMITTED_ALPHABET = IA5_STRING
class BMPString(String):
PERMITTED_ALPHABET = BMP_STRING
class VisibleString(String):
PERMITTED_ALPHABET = VISIBLE_STRING
class Time(Type):
def encode(self, data):
pass
class Skip(Type):
def encode(self, data):
pass
class Recursive(Type, compiler.Recursive):
def __init__(self, name, type_name, module_name):
super(Recursive, self).__init__(name)
self.type_name = type_name
self.module_name = module_name
self.inner = None
def set_inner_type(self, inner):
self.inner = copy(inner)
@add_error_location
def encode(self, data):
self.inner.encode(data)
class CompiledType(compiler.CompiledType):
def encode(self, data):
self._type.encode(data)
class Compiler(compiler.Compiler):
def process_type(self, type_name, type_descriptor, module_name):
compiled_type = self.compile_type(type_name,
type_descriptor,
module_name)
return CompiledType(compiled_type)
def compile_type(self, name, type_descriptor, module_name):
module_name = type_descriptor.get('module-name', module_name)
type_name = type_descriptor['type']
if type_name in ['SEQUENCE', 'SET']:
members, _ = self.compile_members(type_descriptor['members'],
module_name)
compiled = Dict(name, members)
elif type_name in ['SEQUENCE OF', 'SET OF']:
element_type = self.compile_type('',
type_descriptor['element'],
module_name)
compiled = List(name,
element_type,
*self.get_size_range(
type_descriptor,
module_name))
elif type_name == 'CHOICE':
compiled = Choice(name,
*self.compile_members(type_descriptor['members'],
module_name))
elif type_name == 'INTEGER':
compiled = Integer(name)
elif type_name == 'REAL':
compiled = Float(name)
elif type_name == 'BOOLEAN':
compiled = Boolean(name)
elif type_name == 'OCTET STRING':
compiled = Bytes(name,
*self.get_size_range(type_descriptor,
module_name))
elif type_name == 'ENUMERATED':
compiled = Enumerated(name)
elif type_name in TIME_TYPES:
compiled = Time(name)
elif type_name == 'BIT STRING':
compiled = BitString(name,
*self.get_size_range(type_descriptor,
module_name))
elif type_name == 'NumericString':
compiled = NumericString(name,
self.get_permitted_alphabet(type_descriptor),
*self.get_size_range(type_descriptor,
module_name))
elif type_name == 'PrintableString':
compiled = PrintableString(name,
self.get_permitted_alphabet(type_descriptor),
*self.get_size_range(type_descriptor,
module_name))
elif type_name == 'IA5String':
compiled = IA5String(name,
self.get_permitted_alphabet(type_descriptor),
*self.get_size_range(type_descriptor,
module_name))
elif type_name == 'BMPString':
compiled = BMPString(name,
self.get_permitted_alphabet(type_descriptor),
*self.get_size_range(type_descriptor,
module_name))
elif type_name == 'VisibleString':
compiled = VisibleString(name,
self.get_permitted_alphabet(type_descriptor),
*self.get_size_range(type_descriptor,
module_name))
elif type_name in STRING_TYPES:
compiled = String(name,
self.get_permitted_alphabet(type_descriptor),
*self.get_size_range(type_descriptor,
module_name))
elif type_name in ['ANY', 'ANY DEFINED BY', 'OpenType']:
compiled = Skip(name)
elif type_name == 'NULL':
compiled = Null(name)
elif type_name == 'EXTERNAL':
members, _ = self.compile_members(
self.external_type_descriptor()['members'],
module_name)
compiled = Dict(name, members)
else:
if type_name in self.types_backtrace:
compiled = Recursive(name,
type_name,
module_name)
self.recursive_types.append(compiled)
else:
compiled = self.compile_user_type(name,
type_name,
module_name)
if 'restricted-to' in type_descriptor:
compiled = self.set_compiled_restricted_to(compiled,
type_descriptor,
module_name)
return compiled
def get_permitted_alphabet(self, type_descriptor):
def char_range(begin, end):
return ''.join([chr(char)
for char in range(ord(begin), ord(end) + 1)])
if 'from' not in type_descriptor:
return
permitted_alphabet = type_descriptor['from']
value = ''
for item in permitted_alphabet:
if isinstance(item, tuple):
value += char_range(item[0], item[1])
else:
value += item
return ''.join(sorted(value))
def compile_dict(specification, numeric_enums=False):
return Compiler(specification, numeric_enums).process()
| {
"repo_name": "eerimoq/asn1tools",
"path": "asn1tools/codecs/constraints_checker.py",
"copies": "1",
"size": "13905",
"license": "mit",
"hash": -6163410316042081000,
"line_mean": 29.1626898048,
"line_max": 84,
"alpha_frac": 0.5171521036,
"autogenerated": false,
"ratio": 4.519012024699382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015465350345767766,
"num_lines": 461
} |
'''ASN.1 definitions.
Not all ASN.1-handling code use these definitions, but when it does, they should be here.
'''
from pyasn1.type import univ, namedtype, tag
class PubKeyHeader(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('oid', univ.ObjectIdentifier()),
namedtype.NamedType('parameters', univ.Null()),
)
class OpenSSLPubKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('header', PubKeyHeader()),
# This little hack (the implicit tag) allows us to get a Bit String as Octet String
namedtype.NamedType('key', univ.OctetString().subtype(
implicitTag=tag.Tag(tagClass=0, tagFormat=0, tagId=3))),
)
class AsnPubKey(univ.Sequence):
'''ASN.1 contents of DER encoded public key:
RSAPublicKey ::= SEQUENCE {
modulus INTEGER, -- n
publicExponent INTEGER, -- e
'''
componentType = namedtype.NamedTypes(
namedtype.NamedType('modulus', univ.Integer()),
namedtype.NamedType('publicExponent', univ.Integer()),
)
| {
"repo_name": "itielshwartz/BackendApi",
"path": "lib/rsa/asn1.py",
"copies": "1",
"size": "1111",
"license": "apache-2.0",
"hash": 6756827686237756000,
"line_mean": 29.027027027,
"line_max": 91,
"alpha_frac": 0.6597659766,
"autogenerated": false,
"ratio": 4.084558823529412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005999477738608173,
"num_lines": 37
} |
"""ASN1 encoding helpers for converting between PKCS1 and PKCS8.
Required by rsa_backend but not cryptography_backend.
"""
from pyasn1.codec.der import decoder, encoder
from pyasn1.type import namedtype, univ
RSA_ENCRYPTION_ASN1_OID = "1.2.840.113549.1.1.1"
class RsaAlgorithmIdentifier(univ.Sequence):
"""ASN1 structure for recording RSA PrivateKeyAlgorithm identifiers."""
componentType = namedtype.NamedTypes(
namedtype.NamedType("rsaEncryption", univ.ObjectIdentifier()), namedtype.NamedType("parameters", univ.Null())
)
class PKCS8PrivateKey(univ.Sequence):
"""ASN1 structure for recording PKCS8 private keys."""
componentType = namedtype.NamedTypes(
namedtype.NamedType("version", univ.Integer()),
namedtype.NamedType("privateKeyAlgorithm", RsaAlgorithmIdentifier()),
namedtype.NamedType("privateKey", univ.OctetString()),
)
class PublicKeyInfo(univ.Sequence):
"""ASN1 structure for recording PKCS8 public keys."""
componentType = namedtype.NamedTypes(
namedtype.NamedType("algorithm", RsaAlgorithmIdentifier()), namedtype.NamedType("publicKey", univ.BitString())
)
def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key):
"""Convert a PKCS8-encoded RSA private key to PKCS1."""
decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey())
try:
decoded_key = decoded_values[0]
except IndexError:
raise ValueError("Invalid private key encoding")
return decoded_key["privateKey"]
def rsa_private_key_pkcs1_to_pkcs8(pkcs1_key):
"""Convert a PKCS1-encoded RSA private key to PKCS8."""
algorithm = RsaAlgorithmIdentifier()
algorithm["rsaEncryption"] = RSA_ENCRYPTION_ASN1_OID
pkcs8_key = PKCS8PrivateKey()
pkcs8_key["version"] = 0
pkcs8_key["privateKeyAlgorithm"] = algorithm
pkcs8_key["privateKey"] = pkcs1_key
return encoder.encode(pkcs8_key)
def rsa_public_key_pkcs1_to_pkcs8(pkcs1_key):
"""Convert a PKCS1-encoded RSA private key to PKCS8."""
algorithm = RsaAlgorithmIdentifier()
algorithm["rsaEncryption"] = RSA_ENCRYPTION_ASN1_OID
pkcs8_key = PublicKeyInfo()
pkcs8_key["algorithm"] = algorithm
pkcs8_key["publicKey"] = univ.BitString.fromOctetString(pkcs1_key)
return encoder.encode(pkcs8_key)
def rsa_public_key_pkcs8_to_pkcs1(pkcs8_key):
"""Convert a PKCS8-encoded RSA private key to PKCS1."""
decoded_values = decoder.decode(pkcs8_key, asn1Spec=PublicKeyInfo())
try:
decoded_key = decoded_values[0]
except IndexError:
raise ValueError("Invalid public key encoding.")
return decoded_key["publicKey"].asOctets()
| {
"repo_name": "mpdavis/python-jose",
"path": "jose/backends/_asn1.py",
"copies": "1",
"size": "2655",
"license": "mit",
"hash": -5183161161527873000,
"line_mean": 30.9879518072,
"line_max": 118,
"alpha_frac": 0.7122410546,
"autogenerated": false,
"ratio": 3.73943661971831,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9950660931088446,
"avg_score": 0.0002033486459725814,
"num_lines": 83
} |
# ASN.1 named integers
from pyasn1 import error
__all__ = [ 'NamedValues' ]
class NamedValues:
def __init__(self, *namedValues):
self.nameToValIdx = {}; self.valToNameIdx = {}
self.namedValues = ()
automaticVal = 1
for namedValue in namedValues:
if isinstance(namedValue, tuple):
name, val = namedValue
else:
name = namedValue
val = automaticVal
if name in self.nameToValIdx:
raise error.PyAsn1Error('Duplicate name %s' % (name,))
self.nameToValIdx[name] = val
if val in self.valToNameIdx:
raise error.PyAsn1Error('Duplicate value %s=%s' % (name, val))
self.valToNameIdx[val] = name
self.namedValues = self.namedValues + ((name, val),)
automaticVal = automaticVal + 1
def __str__(self): return str(self.namedValues)
def getName(self, value):
if value in self.valToNameIdx:
return self.valToNameIdx[value]
def getValue(self, name):
if name in self.nameToValIdx:
return self.nameToValIdx[name]
def __getitem__(self, i): return self.namedValues[i]
def __len__(self): return len(self.namedValues)
def __add__(self, namedValues):
return self.__class__(*self.namedValues + namedValues)
def __radd__(self, namedValues):
return self.__class__(*namedValues + tuple(self))
def clone(self, *namedValues):
return self.__class__(*tuple(self) + namedValues)
# XXX clone/subtype?
| {
"repo_name": "qqzwc/XX-Net",
"path": "code/default/python27/1.0/lib/noarch/pyasn1/type/namedval.py",
"copies": "8",
"size": "1581",
"license": "bsd-2-clause",
"hash": -3622864098095066600,
"line_mean": 33.3695652174,
"line_max": 78,
"alpha_frac": 0.586970272,
"autogenerated": false,
"ratio": 3.8655256723716382,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01578101446522499,
"num_lines": 46
} |
# ASN.1 named integers
from pyasn1 import error
__all__ = ['NamedValues']
class NamedValues:
def __init__(self, *namedValues):
self.nameToValIdx = {};
self.valToNameIdx = {}
self.namedValues = ()
automaticVal = 1
for namedValue in namedValues:
if isinstance(namedValue, tuple):
name, val = namedValue
else:
name = namedValue
val = automaticVal
if name in self.nameToValIdx:
raise error.PyAsn1Error('Duplicate name %s' % (name,))
self.nameToValIdx[name] = val
if val in self.valToNameIdx:
raise error.PyAsn1Error('Duplicate value %s=%s' % (name, val))
self.valToNameIdx[val] = name
self.namedValues = self.namedValues + ((name, val),)
automaticVal = automaticVal + 1
def __str__(self):
return str(self.namedValues)
def getName(self, value):
if value in self.valToNameIdx:
return self.valToNameIdx[value]
def getValue(self, name):
if name in self.nameToValIdx:
return self.nameToValIdx[name]
def __getitem__(self, i):
return self.namedValues[i]
def __len__(self):
return len(self.namedValues)
def __add__(self, namedValues):
return self.__class__(*self.namedValues + namedValues)
def __radd__(self, namedValues):
return self.__class__(*namedValues + tuple(self))
def clone(self, *namedValues):
return self.__class__(*tuple(self) + namedValues)
# XXX clone/subtype?
| {
"repo_name": "itielshwartz/BackendApi",
"path": "lib/pyasn1/type/namedval.py",
"copies": "1",
"size": "1615",
"license": "apache-2.0",
"hash": 1281108558821286000,
"line_mean": 28.9074074074,
"line_max": 78,
"alpha_frac": 0.5746130031,
"autogenerated": false,
"ratio": 3.929440389294404,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5004053392394404,
"avg_score": null,
"num_lines": null
} |
"""ASN.1 object identifiers. This module contains a dictionary of known OIDs.
See http://luca.ntop.org/Teaching/Appunti/asn1.html for a good introduction
to ASN.1.
"""
from ct.crypto import error
from ct.crypto.asn1 import tag
from ct.crypto.asn1 import types
@types.Universal(6, tag.PRIMITIVE)
class ObjectIdentifier(types.Simple):
"""Object identifier."""
def _name(self, dict_idx):
try:
return _OID_NAME_DICT[self][dict_idx]
except KeyError:
# fall back to OID
return ".".join(map(str, self._value))
@property
def short_name(self):
"""Return the short name representation of an OID."""
return self._name(1)
@property
def long_name(self):
"""Return the long name representation of an OID."""
return self._name(0)
def __str__(self):
return self.short_name
@property
def value(self):
"""The value of an OID is a tuple of integers."""
return self._value
@staticmethod
def _encode_component(value):
"""Encode an OID component as a bytearray.
Args:
value: an integer component value
Returns:
a bytearray representing the encoded component.
"""
int_bytes = bytearray()
# Encode in base-128.
# All bytes apart from the lsb have the high bit set.
int_bytes.append(value & 0x7f)
value >>= 7
while value:
int_bytes.append(value & 0x7f | 0x80)
value >>= 7
int_bytes.reverse()
return int_bytes
@classmethod
def _read_component(cls, int_bytes):
"""Parse a single component from a non-empty bytearray.
Args:
int_bytes: a non-empty bytearray.
Returns:
a (component, rest) tuple with the decoded integer and the
remaining bytes of the bytearray.
"""
ret = 0
i = 0
while int_bytes[i] & 0x80:
num = int_bytes[i] & 0x7f
if not ret and not num:
# The component must be encoded with as few digits as possible,
# i.e., leading zeroes are not allowed. Since ASN.1 libraries
# interpret leading 0x80-octets differently, this may be
# indicative of an attempt to trick a browser into accepting a
# certificate it shouldn't. See page 7 of
# www.cosic.esat.kuleuven.be/publications/article-1432.pdf
raise error.ASN1Error("Leading 0x80 octets in the base-128 "
"encoding of OID component")
ret |= num
ret <<= 7
i += 1
ret |= int_bytes[i]
return ret, int_bytes[i+1:]
def _encode_value(self):
int_bytes = bytearray()
# ASN.1 specifies that the first two components are encoded together
# as c0*40 + c1.
int_bytes += self._encode_component(self._value[0]*40 + self._value[1])
for v in self._value[2:]:
int_bytes += self._encode_component(v)
return str(int_bytes)
@classmethod
def _convert_value(cls, value):
if isinstance(value, ObjectIdentifier):
return value.value
else:
if isinstance(value, str):
value = [int(v) for v in value.split(".")]
if len(value) < 2:
raise ValueError("OID must have at least 2 components")
if not all([v >= 0 for v in value]):
raise ValueError("OID cannot have negative components")
if value[0] > 2:
raise ValueError("First OID component must be 0, 1 or 2, "
"got %d" % value[0])
if value[0] <= 1 and value[1] > 39:
raise ValueError("Second OID component must be <= 39 if "
"first component is <= 1; got %d, %d" %
(value[0], value[1]))
return tuple(value)
@classmethod
def _decode_value(cls, buf, strict=True):
"""Decode from a string or string buffer."""
if buf in _OID_DECODING_DICT:
return _OID_DECODING_DICT[buf]
if not buf:
raise error.ASN1Error("Invalid encoding")
int_bytes = bytearray(buf)
# Last byte can't have the high bit set.
if int_bytes[-1] & 0x80:
raise error.ASN1Error("Invalid encoding")
components = []
first, int_bytes = cls._read_component(int_bytes)
if first < 40:
components += [0, first]
elif first < 80:
components += [1, first - 40]
else:
components += [2, first - 80]
while int_bytes:
component, int_bytes = cls._read_component(int_bytes)
components.append(component)
return tuple(components)
###############################################################################
# Known object identifiers #
# #
# If you add a new OID, make sure to also add its commonly known alias to #
# _OID_NAME_DICT. #
###############################################################################
# Signature and public key algorithms
# RFC 3279
RSA_ENCRYPTION = ObjectIdentifier(value="1.2.840.113549.1.1.1")
MD2_WITH_RSA_ENCRYPTION = ObjectIdentifier(value="1.2.840.113549.1.1.2")
MD5_WITH_RSA_ENCRYPTION = ObjectIdentifier(value="1.2.840.113549.1.1.4")
SHA1_WITH_RSA_ENCRYPTION = ObjectIdentifier(value="1.2.840.113549.1.1.5")
ID_DSA = ObjectIdentifier(value="1.2.840.10040.4.1")
ID_DSA_WITH_SHA1 = ObjectIdentifier(value="1.2.840.10040.4.3")
ID_EC_PUBLICKEY = ObjectIdentifier(value="1.2.840.10045.2.1")
ECDSA_WITH_SHA1 = ObjectIdentifier(value="1.2.840.10045.4.1")
# RFC 5758
ECDSA_WITH_SHA224 = ObjectIdentifier(value="1.2.840.10045.4.3.1")
ECDSA_WITH_SHA256 = ObjectIdentifier(value="1.2.840.10045.4.3.2")
ECDSA_WITH_SHA384 = ObjectIdentifier(value="1.2.840.10045.4.3.3")
ECDSA_WITH_SHA512 = ObjectIdentifier(value="1.2.840.10045.4.3.4")
# RFC 4055
ID_RSASSA_PSS = ObjectIdentifier(value="1.2.840.113549.1.1.10")
ID_SHA256_WITH_RSA_ENCRYPTION = ObjectIdentifier(value="1.2.840.113549.1.1.11")
ID_SHA384_WITH_RSA_ENCRYPTION = ObjectIdentifier(value="1.2.840.113549.1.1.12")
ID_SHA512_WITH_RSA_ENCRYPTION = ObjectIdentifier(value="1.2.840.113549.1.1.13")
ID_SHA224_WITH_RSA_ENCRYPTION = ObjectIdentifier(value="1.2.840.113549.1.1.14")
# RFC 4491
ID_GOSTR3411_94_WITH_GOSTR3410_94 = ObjectIdentifier(value="1.2.643.2.2.4")
ID_GOSTR3411_94_WITH_GOSTR3410_2001 = ObjectIdentifier(value="1.2.643.2.2.3")
# RFC 5758
ID_DSA_WITH_SHA224 = ObjectIdentifier(value="2.16.840.1.101.3.4.3.1")
ID_DSA_WITH_SHA256 = ObjectIdentifier(value="2.16.840.1.101.3.4.3.2")
# Naming attributes (RFC 5280)
ID_AT_NAME = ObjectIdentifier(value="2.5.4.41")
ID_AT_SURNAME = ObjectIdentifier(value="2.5.4.4")
ID_AT_GIVEN_NAME = ObjectIdentifier(value="2.5.4.42")
ID_AT_INITIALS = ObjectIdentifier(value="2.5.4.43")
ID_AT_GENERATION_QUALIFIER = ObjectIdentifier(value="2.5.4.44")
ID_AT_COMMON_NAME = ObjectIdentifier(value="2.5.4.3")
ID_AT_LOCALITY_NAME = ObjectIdentifier(value="2.5.4.7")
ID_AT_STATE_OR_PROVINCE_NAME = ObjectIdentifier(value="2.5.4.8")
ID_AT_ORGANIZATION_NAME = ObjectIdentifier(value="2.5.4.10")
ID_AT_ORGANIZATIONAL_UNIT_NAME = ObjectIdentifier(value="2.5.4.11")
ID_AT_TITLE = ObjectIdentifier(value="2.5.4.12")
ID_AT_DN_QUALIFIER = ObjectIdentifier(value="2.5.4.46")
ID_AT_COUNTRY_NAME = ObjectIdentifier(value="2.5.4.6")
ID_AT_SERIAL_NUMBER = ObjectIdentifier(value="2.5.4.5")
ID_AT_PSEUDONYM = ObjectIdentifier(value="2.5.4.65")
ID_DOMAIN_COMPONENT = ObjectIdentifier(value="0.9.2342.19200300.100.1.25")
ID_EMAIL_ADDRESS = ObjectIdentifier(value="1.2.840.113549.1.9.1")
# Other naming attributes commonly found in certs
ID_AT_STREET_ADDRESS = ObjectIdentifier(value="2.5.4.9")
ID_AT_DESCRIPTION = ObjectIdentifier(value="2.5.4.13")
ID_AT_BUSINESS_CATEGORY = ObjectIdentifier(value="2.5.4.15")
ID_AT_POSTAL_CODE = ObjectIdentifier(value="2.5.4.17")
ID_AT_POST_OFFICE_BOX = ObjectIdentifier(value="2.5.4.18")
# Standard X509v3 certificate extensions
ID_CE_AUTHORITY_KEY_IDENTIFIER = ObjectIdentifier(value="2.5.29.35")
ID_CE_SUBJECT_KEY_IDENTIFIER = ObjectIdentifier(value="2.5.29.14")
ID_CE_KEY_USAGE = ObjectIdentifier(value="2.5.29.15")
ID_CE_PRIVATE_KEY_USAGE_PERIOD = ObjectIdentifier(value="2.5.29.16")
ID_CE_CERTIFICATE_POLICIES = ObjectIdentifier(value="2.5.29.32")
ID_CE_SUBJECT_ALT_NAME = ObjectIdentifier(value="2.5.29.17")
ID_CE_ISSUER_ALT_NAME = ObjectIdentifier(value="2.5.29.18")
ID_CE_SUBJECT_DIRECTORY_ATTRIBUTES = ObjectIdentifier(value="2.5.29.9")
ID_CE_BASIC_CONSTRAINTS = ObjectIdentifier(value="2.5.29.19")
ID_CE_NAME_CONSTRAINTS = ObjectIdentifier(value="2.5.29.30")
ID_CE_POLICY_CONSTRAINTS = ObjectIdentifier(value="2.5.29.30")
ID_CE_EXT_KEY_USAGE = ObjectIdentifier(value="2.5.29.37")
ID_CE_CRL_DISTRIBUTION_POINTS = ObjectIdentifier(value="2.5.29.31")
ID_CE_INHIBIT_ANY_POLICY = ObjectIdentifier(value="2.5.29.54")
ID_PE_AUTHORITY_INFO_ACCESS = ObjectIdentifier(value="1.3.6.1.5.5.7.1.1")
ID_PE_SUBJECT_INFO_ACCESS = ObjectIdentifier(value="1.3.6.1.5.5.7.1.11")
# RFC 3280 - Used in ExtendedKeyUsage extension
ID_KP_SERVER_AUTH = ObjectIdentifier(value="1.3.6.1.5.5.7.3.1")
ID_KP_CLIENT_AUTH = ObjectIdentifier(value="1.3.6.1.5.5.7.3.2")
ID_KP_CODE_SIGNING = ObjectIdentifier(value="1.3.6.1.5.5.7.3.3")
ID_KP_EMAIL_PROTECTION = ObjectIdentifier(value="1.3.6.1.5.5.7.3.4")
ID_KP_TIME_STAMPING = ObjectIdentifier(value="1.3.6.1.5.5.7.3.8")
ID_KP_OCSP_SIGNING = ObjectIdentifier(value="1.3.6.1.5.5.7.3.9")
# RFC 3280 - Used in Authority Info Access extension
ID_AD_OCSP = ObjectIdentifier(value="1.3.6.1.5.5.7.48.1")
ID_AD_CA_ISSUERS = ObjectIdentifier(value="1.3.6.1.5.5.7.48.2")
# Certificate Policy related OIDs
ID_QT_CPS = ObjectIdentifier(value="1.3.6.1.5.5.7.2.1")
ID_QT_UNOTICE = ObjectIdentifier(value="1.3.6.1.5.5.7.2.2")
ANY_POLICY = ObjectIdentifier(value="2.5.29.32.0")
# CT Specific
CT_EMBEDDED_SCT_LIST = ObjectIdentifier(value="1.3.6.1.4.1.11129.2.4.2")
CT_POISON = ObjectIdentifier(value="1.3.6.1.4.1.11129.2.4.3")
CT_PRECERTIFICATE_SIGNING = ObjectIdentifier(value="1.3.6.1.4.1.11129.2.4.4")
_OID_NAME_DICT = {
# Object identifier long names taken verbatim from the RFCs.
# Short names are colloquial.
RSA_ENCRYPTION: ("rsaEncryption", "RSA"),
MD2_WITH_RSA_ENCRYPTION: ("md2WithRSAEncryption", "RSA-MD2"),
MD5_WITH_RSA_ENCRYPTION: ("md5WithRSAEncryption", "RSA-MD5"),
SHA1_WITH_RSA_ENCRYPTION: ("sha-1WithRSAEncryption", "RSA-SHA1"),
ID_DSA: ("id-dsa", "DSA"),
ID_DSA_WITH_SHA1: ("id-dsa-with-sha1", "DSA-SHA1"),
ID_EC_PUBLICKEY: ("id-ecPublicKey", "EC-PUBKEY"),
ECDSA_WITH_SHA1: ("ecdsa-with-SHA1", "ECDSA-SHA1"),
ECDSA_WITH_SHA224: ("ecdsa-with-SHA224", "ECDSA-SHA224"),
ECDSA_WITH_SHA256: ("ecdsa-with-SHA256", "ECDSA-SHA256"),
ECDSA_WITH_SHA384: ("ecdsa-with-SHA384", "ECDSA-SHA384"),
ECDSA_WITH_SHA512: ("ecdsa-with-SHA512", "ECDSA-SHA512"),
ID_RSASSA_PSS: ("id-RSASSA-PSS", "RSASSA-PSS"),
ID_GOSTR3411_94_WITH_GOSTR3410_94: ("id-GostR3411-94-with-GostR3410-94",
"GOST94"),
ID_GOSTR3411_94_WITH_GOSTR3410_2001: ("id-GostR3411-94-with-GostR3410-2001",
"GOST2001"),
ID_SHA256_WITH_RSA_ENCRYPTION: ("sha256WithRSAEncryption", "RSA-SHA256"),
ID_SHA384_WITH_RSA_ENCRYPTION: ("sha384WithRSAEncryption", "RSA-SHA384"),
ID_SHA512_WITH_RSA_ENCRYPTION: ("sha512WithRSAEncryption", "RSA-SHA512"),
ID_SHA224_WITH_RSA_ENCRYPTION: ("sha224WithRSAEncryption", "RSA-SHA224"),
ID_DSA_WITH_SHA224: ("id-dsa-with-sha224", "DSA-SHA224"),
ID_DSA_WITH_SHA256: ("id-dsa-with-sha256", "DSA-SHA256"),
ID_AT_NAME: ("id-at-name", "name"),
ID_AT_SURNAME: ("id-at-surname", "surname"),
ID_AT_GIVEN_NAME: ("id-at-givenName", "givenName"),
ID_AT_INITIALS: ("id-at-initials", "initials"),
ID_AT_GENERATION_QUALIFIER: ("id-at-generationQualifier", "genQualifier"),
ID_AT_COMMON_NAME: ("id-at-commonName", "CN"),
ID_AT_LOCALITY_NAME: ("id-at-localityName", "L"),
ID_AT_STATE_OR_PROVINCE_NAME: ("id-at-stateOrProvinceName", "ST"),
ID_AT_ORGANIZATION_NAME: ("id-at-organizationName", "O"),
ID_AT_ORGANIZATIONAL_UNIT_NAME: ("id-at-organizationalUnitName", "OU"),
ID_AT_TITLE: ("id-at-title", "title"),
ID_AT_DN_QUALIFIER: ("id-at-dnQualifier", "dnQualifier"),
ID_AT_COUNTRY_NAME: ("id-at-countryName", "C"),
ID_AT_SERIAL_NUMBER: ("id-at-serialNumber", "serialNumber"),
ID_AT_PSEUDONYM: ("id-at-pseudonym", "pseudonym"),
ID_DOMAIN_COMPONENT: ("id-domainComponent", "domainComponent"),
ID_EMAIL_ADDRESS: ("id-emailAddress", "email"),
ID_AT_STREET_ADDRESS: ("id-at-streetAddress", "streetAddress"),
ID_AT_DESCRIPTION: ("id-at-description", "description"),
ID_AT_BUSINESS_CATEGORY: ("id-at-businessCategory", "businessCategory"),
ID_AT_POSTAL_CODE: ("id-at-postalCode", "postalCode"),
ID_AT_POST_OFFICE_BOX: ("id-at-postOfficeBox", "postOfficeBox"),
ID_CE_AUTHORITY_KEY_IDENTIFIER: ("id-ce-authorityKeyIdentifier",
"authorityKeyIdentifier"),
ID_CE_SUBJECT_KEY_IDENTIFIER: ("id-ce-subjectKeyIdentifier",
"subjectKeyIdentifier"),
ID_CE_KEY_USAGE: ("id-ce-keyUsage", "keyUsage"),
ID_CE_PRIVATE_KEY_USAGE_PERIOD: ("id-ce-privateKeyUsagePeriod",
"privateKeyUsagePeriod"),
ID_CE_CERTIFICATE_POLICIES: ("id-ce-certificatePolicies",
"certificatePolicies"),
ID_CE_SUBJECT_ALT_NAME: ("id-ce-subjectAltName", "subjectAltName"),
ID_CE_ISSUER_ALT_NAME: ("id-ce-issuerAltName", "issuerAltName"),
ID_CE_SUBJECT_DIRECTORY_ATTRIBUTES: ("id-ce-subjectDirectoryAttributes",
"subjectDirectoryAttributes"),
ID_CE_BASIC_CONSTRAINTS: ("id-ce-basicConstraints", "basicConstraints"),
ID_CE_NAME_CONSTRAINTS: ("id-ce-nameConstraints", "nameConstraints"),
ID_CE_POLICY_CONSTRAINTS: ("id-ce-policyConstraints", "policyConstraints"),
ID_CE_EXT_KEY_USAGE: ("id-ce-extKeyUsage", "extendedKeyUsage"),
ID_CE_CRL_DISTRIBUTION_POINTS: ("id-ce-cRLDistributionPoints",
"CRLDistributionPoints"),
ID_CE_INHIBIT_ANY_POLICY: ("id-ce-inhibitAnyPolicy", "inhibitAnyPolicy"),
ID_PE_AUTHORITY_INFO_ACCESS: ("id-pe-authorityInfoAccess",
"authorityInformationAccess"),
ID_PE_SUBJECT_INFO_ACCESS: ("id-pe-subjectInfoAccess",
"subjectInformationAccess"),
ID_KP_SERVER_AUTH: ("id-kp-serverAuth", "serverAuth"),
ID_KP_CLIENT_AUTH: ("id-kp-clientAuth", "clientAuth"),
ID_KP_CODE_SIGNING: ("id-kp-codeSigning", "codeSigning"),
ID_KP_EMAIL_PROTECTION: ("id-kp-emailProtection", "emailProtection"),
ID_KP_TIME_STAMPING: ("id-kp-timeStamping", "timeStamping"),
ID_KP_OCSP_SIGNING: ("id-kp-OCSPSigning", "OCSPSigning"),
ID_AD_OCSP: ("id-ad-ocsp", "OCSP"),
ID_AD_CA_ISSUERS: ("id-ad-caIssuers", "caIssuers"),
ID_QT_CPS: ("id-qt-cps", "CPS"),
ID_QT_UNOTICE: ("id-qt-unotice", "UserNotice"),
ANY_POLICY: ("anyPolicy", "anyPolicy"),
CT_EMBEDDED_SCT_LIST: ("ctEmbeddedSCT", "ctEmbeddedSCT"),
CT_POISON: ("ctPoison", "ctPoison"),
CT_PRECERTIFICATE_SIGNING: ("ctPrecertificateSigningCert", "ctPrecertificateSigningCert")
}
_OID_DECODING_DICT = {}
for oid in _OID_NAME_DICT:
_OID_DECODING_DICT[oid._encode_value()] = oid._value
| {
"repo_name": "kyprizel/certificate-transparency",
"path": "python/ct/crypto/asn1/oid.py",
"copies": "34",
"size": "15864",
"license": "apache-2.0",
"hash": -2977350829034045000,
"line_mean": 44.9826086957,
"line_max": 93,
"alpha_frac": 0.6265758951,
"autogenerated": false,
"ratio": 3.042577675489068,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016461100019276605,
"num_lines": 345
} |
"""ASN.1 specification for X509 extensions."""
from ct.crypto.asn1 import named_value
from ct.crypto.asn1 import oid
from ct.crypto.asn1 import tag
from ct.crypto.asn1 import types
from ct.crypto.asn1 import x509_common
from ct.crypto.asn1 import x509_name
# Standard extensions from RFC 5280.
class BasicConstraints(types.Sequence):
print_delimiter = ", "
components = (
(types.Component("cA", types.Boolean, default=False)),
(types.Component("pathLenConstraint", types.Integer, optional=True))
)
class SubjectAlternativeNames(types.SequenceOf):
print_delimiter = ", "
component = x509_name.GeneralName
class KeyUsage(types.NamedBitList):
DIGITAL_SIGNATURE = named_value.NamedValue("digitalSignature", 0)
NON_REPUDIATION = named_value.NamedValue("nonRepudiation", 1)
KEY_ENCIPHERMENT = named_value.NamedValue("keyEncipherment", 2)
DATA_ENCIPHERMENT = named_value.NamedValue("dataEncipherment", 3)
KEY_AGREEMENT = named_value.NamedValue("keyAgreement", 4)
KEY_CERT_SIGN = named_value.NamedValue("keyCertSign", 5)
CRL_SIGN = named_value.NamedValue("cRLSign", 6)
ENCIPHER_ONLY = named_value.NamedValue("encipherOnly", 7)
DECIPHER_ONLY = named_value.NamedValue("decipherOnly", 8)
named_bit_list = (DIGITAL_SIGNATURE, NON_REPUDIATION, KEY_ENCIPHERMENT,
DATA_ENCIPHERMENT, KEY_AGREEMENT, KEY_CERT_SIGN,
CRL_SIGN, ENCIPHER_ONLY, DECIPHER_ONLY)
class KeyPurposeID(oid.ObjectIdentifier):
pass
class ExtendedKeyUsage(types.SequenceOf):
print_delimiter = ", "
print_labels = False
component = KeyPurposeID
class KeyIdentifier(types.OctetString):
pass
class SubjectKeyIdentifier(KeyIdentifier):
pass
KEY_IDENTIFIER = "keyIdentifier"
AUTHORITY_CERT_ISSUER = "authorityCertIssuer"
AUTHORITY_CERT_SERIAL_NUMBER = "authorityCertSerialNumber"
class AuthorityKeyIdentifier(types.Sequence):
components = (
types.Component(KEY_IDENTIFIER, KeyIdentifier.implicit(0), optional=True),
types.Component(AUTHORITY_CERT_ISSUER, x509_name.GeneralNames.implicit(1),
optional=True),
types.Component(AUTHORITY_CERT_SERIAL_NUMBER,
x509_common.CertificateSerialNumber.implicit(2),
optional=True)
)
class DisplayText(types.Choice):
components = {
"ia5String": types.IA5String,
"visibleString": types.VisibleString,
"bmpString": types.BMPString,
"utf8String": types.UTF8String
}
class NoticeNumbers(types.SequenceOf):
component = types.Integer
class NoticeReference(types.Sequence):
components = (
types.Component("organization", DisplayText),
types.Component("noticeNumbers", NoticeNumbers)
)
NOTICE_REF = "noticeRef"
EXPLICIT_TEXT = "explicitText"
class UserNotice(types.Sequence):
components = (
types.Component(NOTICE_REF, NoticeReference, optional=True),
types.Component(EXPLICIT_TEXT, DisplayText, optional=True)
)
class CPSuri(types.IA5String):
pass
_POLICY_QUALIFIER_DICT = {
oid.ID_QT_CPS: CPSuri,
oid.ID_QT_UNOTICE: UserNotice
}
POLICY_QUALIFIER_ID = "policyQualifierId"
QUALIFIER = "qualifier"
class PolicyQualifierInfo(types.Sequence):
print_labels = False
print_delimiter = ": "
components = (
types.Component(POLICY_QUALIFIER_ID, oid.ObjectIdentifier),
types.Component(QUALIFIER, types.Any, defined_by="policyQualifierId",
lookup=_POLICY_QUALIFIER_DICT)
)
class PolicyQualifiers(types.SequenceOf):
print_labels = False
component = PolicyQualifierInfo
POLICY_IDENTIFIER = "policyIdentifier"
POLICY_QUALIFIERS = "policyQualifiers"
class PolicyInformation(types.Sequence):
components = (
types.Component(POLICY_IDENTIFIER, oid.ObjectIdentifier),
types.Component(POLICY_QUALIFIERS, PolicyQualifiers, optional=True)
)
class CertificatePolicies(types.SequenceOf):
component = PolicyInformation
FULL_NAME = "fullName"
RELATIVE_NAME = "nameRelativetoCRLIssuer"
class DistributionPointName(types.Choice):
components = {
FULL_NAME: x509_name.GeneralNames.implicit(0),
RELATIVE_NAME: x509_name.RelativeDistinguishedName.implicit(1)
}
class ReasonFlags(types.NamedBitList):
UNUSED = named_value.NamedValue("unused", 0)
KEY_COMPROMISE = named_value.NamedValue("keyCompromise", 1)
CA_COMPROMISE = named_value.NamedValue("cACompromise", 2),
AFFILIATION_CHANGED = named_value.NamedValue("affiliationChanged", 3)
SUPERSEDED = named_value.NamedValue("superseded", 4)
CESSATION_OF_OPERATION = named_value.NamedValue("cessationOfOperation", 5)
CERTIFICATE_HOLD = named_value.NamedValue("certificateHold", 6)
PRIVILEGE_WITHDRAWN = named_value.NamedValue("privilegeWithdrawn", 7)
AA_COMPROMISE = named_value.NamedValue("aACompromise", 8)
named_bit_list = (UNUSED, KEY_COMPROMISE, CA_COMPROMISE,
AFFILIATION_CHANGED, SUPERSEDED, CESSATION_OF_OPERATION,
CERTIFICATE_HOLD, PRIVILEGE_WITHDRAWN, AA_COMPROMISE)
DISTRIBUTION_POINT = "distributionPoint"
REASONS = "reasons"
CRL_ISSUER = "cRLIssuer"
class DistributionPoint(types.Sequence):
components = (
types.Component(DISTRIBUTION_POINT, DistributionPointName.explicit(0),
optional=True),
types.Component(REASONS, ReasonFlags.implicit(1), optional=True),
types.Component(CRL_ISSUER, x509_name.GeneralNames.implicit(2),
optional=True)
)
class CRLDistributionPoints(types.SequenceOf):
component = DistributionPoint
ACCESS_METHOD = "accessMethod"
ACCESS_LOCATION = "accessLocation"
class AccessDescription(types.Sequence):
print_labels = False
print_delimiter = ": "
components = (
types.Component(ACCESS_METHOD, oid.ObjectIdentifier),
types.Component(ACCESS_LOCATION, x509_name.GeneralName)
)
# Called AuthorityInfoAccessSyntax in RFC 5280.
class AuthorityInfoAccess(types.SequenceOf):
component = AccessDescription
# Hack! This is not a valid ASN.1 definition but it works: an extension value
# value is defined as a DER-encoded value wrapped in an OctetString.
# This is functionally equivalent to an Any type that is tagged with the
# OctetString tag.
@types.Universal(4, tag.PRIMITIVE)
class ExtensionValue(types.Any):
pass
_EXTENSION_DICT = {
oid.ID_CE_BASIC_CONSTRAINTS: BasicConstraints,
oid.ID_CE_SUBJECT_ALT_NAME: SubjectAlternativeNames,
oid.ID_CE_KEY_USAGE: KeyUsage,
oid.ID_CE_EXT_KEY_USAGE: ExtendedKeyUsage,
oid.ID_CE_SUBJECT_KEY_IDENTIFIER: SubjectKeyIdentifier,
oid.ID_CE_AUTHORITY_KEY_IDENTIFIER: AuthorityKeyIdentifier,
oid.ID_CE_CERTIFICATE_POLICIES: CertificatePolicies,
oid.ID_CE_CRL_DISTRIBUTION_POINTS: CRLDistributionPoints,
oid.ID_PE_AUTHORITY_INFO_ACCESS: AuthorityInfoAccess
}
class Extension(types.Sequence):
print_delimiter = ", "
components = (
types.Component("extnID", oid.ObjectIdentifier),
types.Component("critical", types.Boolean, default=False),
types.Component("extnValue", ExtensionValue, defined_by="extnID",
lookup=_EXTENSION_DICT)
)
class Extensions(types.SequenceOf):
component = Extension
| {
"repo_name": "rep/certificate-transparency",
"path": "python/ct/crypto/asn1/x509_extension.py",
"copies": "1",
"size": "7387",
"license": "apache-2.0",
"hash": -9024353478338749000,
"line_mean": 29.1510204082,
"line_max": 80,
"alpha_frac": 0.7071883038,
"autogenerated": false,
"ratio": 3.5772397094430994,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47844280132430994,
"avg_score": null,
"num_lines": null
} |
"""ASN.1 specification for X509 name types."""
from ct.crypto import error
from ct.crypto.asn1 import oid
from ct.crypto.asn1 import types
class AttributeType(oid.ObjectIdentifier):
pass
class AttributeValue(types.Any):
pass
class DirectoryString(types.Choice):
components = {
"teletexString": types.TeletexString,
"printableString": types.PrintableString,
"universalString": types.UniversalString,
"utf8String": types.UTF8String,
"bmpString": types.BMPString,
# Does not really belong here.
"ia5String": types.IA5String
}
_ATTRIBUTE_DICT = {
# Note: this mapping does not conform to the RFCs, as some of the OIDs
# have more restricted values. But real certificates do not conform either,
# so we try to be lenient and accept all strings that we can recognize.
oid.ID_AT_NAME: DirectoryString,
oid.ID_AT_SURNAME: DirectoryString,
oid.ID_AT_GIVEN_NAME: DirectoryString,
oid.ID_AT_INITIALS: DirectoryString,
oid.ID_AT_GENERATION_QUALIFIER: DirectoryString,
oid.ID_AT_COMMON_NAME: DirectoryString,
oid.ID_AT_LOCALITY_NAME: DirectoryString,
oid.ID_AT_STATE_OR_PROVINCE_NAME: DirectoryString,
oid.ID_AT_ORGANIZATION_NAME: DirectoryString,
oid.ID_AT_ORGANIZATIONAL_UNIT_NAME: DirectoryString,
oid.ID_AT_TITLE: DirectoryString,
oid.ID_AT_DN_QUALIFIER: DirectoryString, # PrintableString
oid.ID_AT_COUNTRY_NAME: DirectoryString, # PrintableString
oid.ID_AT_SERIAL_NUMBER: DirectoryString, # PrintableString
oid.ID_AT_PSEUDONYM: DirectoryString,
oid.ID_DOMAIN_COMPONENT: DirectoryString, # IA5String
oid.ID_EMAIL_ADDRESS: DirectoryString, # IA5String
oid.ID_AT_STREET_ADDRESS: DirectoryString,
oid.ID_AT_DESCRIPTION: DirectoryString,
oid.ID_AT_BUSINESS_CATEGORY: DirectoryString,
oid.ID_AT_POSTAL_CODE: DirectoryString,
oid.ID_AT_POST_OFFICE_BOX: DirectoryString,
}
class AttributeTypeAndValue(types.Sequence):
print_labels = False
print_delimiter = "="
components = (
(types.Component("type", AttributeType)),
(types.Component("value", AttributeValue, defined_by="type",
lookup=_ATTRIBUTE_DICT))
)
class RelativeDistinguishedName(types.SetOf):
print_labels = False
print_delimiter = ", "
component = AttributeTypeAndValue
class RDNSequence(types.SequenceOf):
print_labels = False
print_delimiter = "/"
component = RelativeDistinguishedName
# See http://tools.ietf.org/html/rfc6125 for context.
def flatten(self):
"""Get a flat list of AttributeTypeAndValue pairs in an RDNSequence.
The hierarchical (Relative) information is not used in all contexts,
so we provide a way of discarding that information and flattening
the structure.
"""
return sum([list(rdn) for rdn in self], [])
def attributes(self, attr_type):
"""Get a flat list of attribute values of the given type.
Returns:
a list of attributes.
Raises:
error.ASN1Error: corrupt attribute value.
"""
attrs = self.flatten()
decoded_values = [attr["value"].decoded_value for attr in attrs
if attr["type"] == attr_type]
if any([val is None for val in decoded_values]):
raise error.ASN1Error("Corrupt name attribute")
# A subject name attribute is always a DirectoryString (a Choice),
# so we need to take its value.
return [d.component_value() for d in decoded_values]
# Bypass the CHOICE indirection since exactly one option is specified.
# class Name(types.Choice):
# components = {"rdnSequence": RDNSequence}
class Name(RDNSequence):
pass
class OtherName(types.Sequence):
print_delimiter = ", "
components = (
(types.Component("type-id", oid.ObjectIdentifier)),
(types.Component("value", types.Any.explicit(0)))
)
class EDIPartyName(types.Sequence):
print_delimiter = ", "
components = (
# Definition here: http://tools.ietf.org/html/rfc5280#section-4.2.1.6
# Note: this definition suggests that the tagging is implicit.
# However, implicit tagging of a CHOICE type is ambiguous, so this is
# in practice interpreted as an explicit tag.
(types.Component("nameAssigner", DirectoryString.explicit(0),
optional=True)),
(types.Component("partyName", DirectoryString.explicit(1)))
)
# Partially defined ORAddress: we've not come across any certs that contain it
# but this should be enough to allow the decoder to continue without blowing up.
class BuiltInDomainDefinedAttributes(types.SequenceOf):
component = types.Any
class ExtensionAttributes(types.SetOf):
component = types.Any
class ORAddress(types.Sequence):
components = (
(types.Component("builtInStandardAttributes", types.Any)),
(types.Component("builtInDomainDefinedAttributes",
BuiltInDomainDefinedAttributes, optional=True)),
(types.Component("extensionAttributes",
ExtensionAttributes, optional=True))
)
OTHER_NAME = "otherName"
RFC822_NAME = "rfc822Name"
DNS_NAME = "dNSName"
X400_ADDRESS_NAME = "x400Address"
DIRECTORY_NAME = "directoryName"
EDI_PARTY_NAME = "ediPartyName"
URI_NAME = "uniformResourceIdentifier"
IP_ADDRESS_NAME = "iPAddress"
REGISTERED_ID_NAME = "registeredID"
class IPAddress(types.OctetString):
def __init__(self, value=None, serialized_value=None, strict=True):
super(IPAddress, self).__init__(value=value,
serialized_value=serialized_value,
strict=strict)
if strict and len(self._value) != 4 and len(self._value) != 16:
raise error.ASN1Error("%s is not a valid IP address" %
self.value.encode("hex"))
def as_octets(self):
return tuple([ord(b) for b in self._value])
def __str__(self):
if len(self._value) == 4:
return ".".join([str(ord(c)) for c in self._value])
if len(self._value) == 16:
return ":".join([self._value[i:i+2].encode("hex")
for i in range(0, len(self._value), 2)])
return super(IPAddress, self).__str__()
class GeneralName(types.Choice):
print_labels = True # Print component type.
# Definition here: http://tools.ietf.org/html/rfc5280#section-4.2.1.6
components = {
OTHER_NAME: OtherName.implicit(0),
RFC822_NAME: types.IA5String.implicit(1),
DNS_NAME: types.IA5String.implicit(2),
X400_ADDRESS_NAME: ORAddress.implicit(3),
# Implicit CHOICE tag is converted to an explicit one.
DIRECTORY_NAME: Name.explicit(4),
EDI_PARTY_NAME: EDIPartyName.implicit(5),
URI_NAME: types.IA5String.implicit(6),
IP_ADDRESS_NAME: IPAddress.implicit(7),
REGISTERED_ID_NAME: oid.ObjectIdentifier.implicit(8)
}
class GeneralNames(types.SequenceOf):
component = GeneralName
| {
"repo_name": "nhr/origin",
"path": "vendor/github.com/google/certificate-transparency/python/ct/crypto/asn1/x509_name.py",
"copies": "35",
"size": "7180",
"license": "apache-2.0",
"hash": 6338756929624510000,
"line_mean": 34.1960784314,
"line_max": 80,
"alpha_frac": 0.6544568245,
"autogenerated": false,
"ratio": 3.81104033970276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005546735434889767,
"num_lines": 204
} |
"""ASN.1 tagging."""
from ct.crypto import error
UNIVERSAL = 0x00
APPLICATION = 0x40
CONTEXT_SPECIFIC = 0x80
PRIVATE = 0xc0
PRIMITIVE = 0x00
CONSTRUCTED = 0x20
# Constants for better readability.
IMPLICIT, EXPLICIT = range(2)
class Tag(object):
"""An ASN.1 tag."""
_CLASS_MASK = 0xc0
_ENCODING_MASK = 0x20
_NUMBER_MASK = 0x1f
_HIGH = 0x1f
_FULL_SUB_OCTET = 0x7f
_LAST_OCTET = 0x80
def __init__(self, number, tag_class, encoding):
"""ASN.1 tag.
Initialize a tag from its number, class and encoding.
Args:
number: the numeric value of the tag.
tag_class: must be one of UNIVERSAL, APPLICATION, CONTEXT_SPECIFIC
or PRIVATE.
encoding: must be one of PRIMITIVE or CONSTRUCTED.
Raises:
ValueError: invalid initializers.
"""
if tag_class not in (UNIVERSAL, APPLICATION, CONTEXT_SPECIFIC, PRIVATE):
raise ValueError("Invalid tag class %s" % tag_class)
if encoding not in (PRIMITIVE, CONSTRUCTED):
raise ValueError("Invalid encoding %s" % encoding)
# Public just for lightweight access. Do not modify directly.
self.number = number
self.tag_class = tag_class
self.encoding = encoding
if number <= 30:
self.value = chr(tag_class | encoding | number)
else:
res = [tag_class | encoding | self._HIGH]
tmp = []
while number > 0:
tmp.append((number & self._FULL_SUB_OCTET) | self._LAST_OCTET)
number >>= 7
tmp[0] -= self._LAST_OCTET
tmp.reverse()
res += tmp
self.value = ''.join([chr(byte) for byte in res])
def __repr__(self):
return ("%s(%r, %r, %r)" % (self.__class__.__name__, self.number,
self.tag_class, self.encoding))
def __str__(self):
return "[%s %d]" % (self.class_name(), self.number)
def __len__(self):
return len(self.value)
def class_name(self):
if self.tag_class == UNIVERSAL:
return "UNIVERSAL"
elif self.tag_class == APPLICATION:
return "APPLICATION"
elif self.tag_class == CONTEXT_SPECIFIC:
return "CONTEXT-SPECIFIC"
elif self.tag_class == PRIVATE:
return "PRIVATE"
else:
raise ValueError("Invalid tag class %x" % self.tag_class)
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if not isinstance(other, Tag):
return NotImplemented
return self.value == other.value
def __ne__(self, other):
if not isinstance(other, Tag):
return NotImplemented
return self.value != other.value
@classmethod
def read(cls, buf):
"""Read from the beginning of a string or buffer.
Args:
buf: a binary string or string buffer containing an ASN.1 object.
Returns:
an tuple consisting of an instance of the class and the remaining
buffer/string.
"""
if not buf:
raise error.ASN1TagError("Ran out of bytes while decoding")
tag_bytes = 0
id_byte = ord(buf[tag_bytes])
tag_class = id_byte & cls._CLASS_MASK
encoding = id_byte & cls._ENCODING_MASK
number = id_byte & cls._NUMBER_MASK
if number == cls._HIGH:
number = 0
tag_bytes += 1
success = False
for i in range(1, len(buf)):
number <<= 7
id_byte = ord(buf[i])
number |= (id_byte & cls._FULL_SUB_OCTET)
tag_bytes += 1
if id_byte & cls._LAST_OCTET == 0:
success = True
break
if not success:
raise error.ASN1TagError("Ran out of bytes while decoding")
if tag_bytes - 1 > 5:
raise error.ASN1TagError("Base 128 integer too large")
tag_bytes -= 1
tag = cls(number, tag_class, encoding)
return tag, buf[tag_bytes + 1:]
| {
"repo_name": "RJPercival/certificate-transparency",
"path": "python/ct/crypto/asn1/tag.py",
"copies": "35",
"size": "4183",
"license": "apache-2.0",
"hash": 6140881235332341000,
"line_mean": 30.2164179104,
"line_max": 80,
"alpha_frac": 0.5395649056,
"autogenerated": false,
"ratio": 3.957426679280984,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""ASN.1 types.
Spec: http://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf
See also http://luca.ntop.org/Teaching/Appunti/asn1.html for a good introduction
to ASN.1.
This module implements a restricted encoder/decoder for a subset of ASN.1 types.
The decoder has a strict and non-strict mode. Non-strict mode tolerates selected
non-fatal DER decoding errors. The encoder is DER-only.
Generic decoding is not supported: objects can only be decoded against a
predefined ASN.1 type. However, applications can derive arbitrary custom ASN.1
type specifications from the supported base types.
Constraints (e.g., on the length of an ASN.1 string value) are not supported,
and should be checked at application level, where necessary.
"""
import abc
import collections
import functools
import re
from ct.crypto import error
from ct.crypto.asn1 import print_util
from ct.crypto.asn1 import tag
_ZERO = "\x00"
_MINUS_ONE = "\xff"
_EOC = "\x00\x00"
def encode_int(value, signed=True):
"""Encode an integer.
Args:
value: an integral value.
signed: if True, encode in two's complement form. If False, encode as
an unsigned integer.
Raises:
ValueError: attempt to encode a negative integer as unsigned.
Returns:
a variable-length string representing the encoded integer.
"""
if not signed and value < 0:
raise ValueError("Unsigned integer cannot be negative")
if not value:
return _ZERO
if value == -1:
return _MINUS_ONE
int_bytes = bytearray()
while value != 0 and value != -1:
int_bytes.append(value & 0xff)
value >>= 8
if signed:
# In two's complement form, negative values have the most significant
# bit set, thus we:
if value == -1 and int_bytes[-1] <= 127:
# Add a "-1"-byte for indicating a negative value.
int_bytes.append(0xff)
elif value == 0 and int_bytes[-1] > 127:
# Add a "0"-byte for indicating a positive value.
int_bytes.append(0)
int_bytes.reverse()
return str(int_bytes)
def decode_int(buf, signed=True, strict=True):
"""Decode an integer.
Args:
buf: a string or string buffer.
signed: if True, decode in two's complement form. If False, decode as
an unsigned integer.
Raises:
ASN1Error.
Returns:
an integer.
"""
if not buf:
raise error.ASN1Error("Invalid integer encoding: empty value")
leading = ord(buf[0])
int_bytes = bytearray(buf[1:])
if int_bytes:
if strict and leading == 0 and int_bytes[0] < 128:
# 0x00 0x42 == 0x42
raise error.ASN1Error("Extra leading 0-bytes in integer "
"encoding")
elif strict and signed and leading == 0xff and int_bytes[0] >= 128:
# 0xff 0x82 == 0x82
raise error.ASN1Error("Extra leading 0xff-bytes in negative "
"integer encoding")
if signed and leading > 127:
leading -= 256
for b in int_bytes:
leading <<= 8
leading += b
return leading
# Lengths between 0 and 127 are encoded as a single byte.
# Lengths greater than 127 are encoded as follows:
# * MSB of first byte is 1 and remaining bits encode the number of
# additional bytes.
# * Remaining bytes encode the length.
_MULTIBYTE_LENGTH = 0x80
_MULTIBYTE_LENGTH_MASK = 0x7f
def encode_length(length):
"""Encode an integer.
Args:
length: a non-negative integral value.
Returns:
a string.
"""
if length <= 127:
return chr(length)
encoded_length = encode_int(length, signed=False)
return chr(_MULTIBYTE_LENGTH | len(encoded_length)) + encoded_length
def read_length(buf, strict=True):
"""Read an ASN.1 object length from the beginning of the buffer.
Args:
buf: a string or string buffer.
strict: if false, accept indefinite length encoding.
Raises:
ASN1Error.
Returns:
a (length, rest) tuple consisting of a non-negative integer representing
the length of an ASN.1 object, and the remaining bytes. For indefinite
length, returns (-1, rest).
"""
if not buf:
raise error.ASN1Error("Invalid length encoding: empty value")
length, rest = ord(buf[0]), buf[1:]
if length <= 127:
return length, rest
# 0x80 == ASN.1 indefinite length
if length == 128:
if strict:
raise error.ASN1Error("Indefinite length encoding")
return -1, rest
length &= _MULTIBYTE_LENGTH_MASK
if len(rest) < length:
raise error.ASN1Error("Invalid length encoding")
# strict=True: let's hope that at least ASN.1 lengths are properly encoded.
return (decode_int(rest[:length], signed=False, strict=True), rest[length:])
class Universal(object):
"""Apply a universal tag to the class.
Can be used as a callable, or a decorator:
Integer = Universal(2, tag.PRIMITIVE)(Abstract)
is the same as
@Universal(2, tag.PRIMITIVE)
class Integer(Abstract):
pass
and defines a type with an ASN.1 integer tag.
"""
def __init__(self, number, encoding):
"""Setup the tag.
Args:
number: the tag number.
encoding: the encoding. One of tag.PRIMITIVE or tag.CONSTRUCTED.
"""
self.tag = tag.Tag(number, tag.UNIVERSAL, encoding)
def __call__(self, cls):
"""Apply the universal tag.
Args:
cls: class to modify. The class must have an empty 'tags'
attribute.
Returns:
the class with a modified 'tags' attribute.
Raises:
TypeError: invalid application of the tag.
"""
if cls.tags:
raise TypeError("Cannot apply a UNIVERSAL tag to a tagged type.")
cls.tags = (self.tag,)
return cls
class Explicit(object):
"""Apply an explicit tag to the class.
Can be used as a callable, or a decorator:
MyInteger = Explicit(0, tag.APPLICATION)(Integer)
is the same as
@Explicit(0, tag.APPLICATION)
class MyInteger(Integer):
pass
and results in a MyInteger type that is explicitly tagged with an
application-class 0-tag.
"""
def __init__(self, number, tag_class=tag.CONTEXT_SPECIFIC):
"""Setup the tag.
Args:
number: the tag number.
tag_class: the tag class. One of tag.CONTEXT_SPECIFIC,
tag.APPLICATION or tag.PRIVATE.
Raises:
TypeError: invalid application of the tag.
"""
if tag_class == tag.UNIVERSAL:
raise TypeError("Cannot tag with a UNIVERSAL tag")
# Explicit tagging always results in constructed encoding.
self._tag = tag.Tag(number, tag_class, tag.CONSTRUCTED)
def __call__(self, cls):
"""Apply the explicit tag.
Args:
cls: class to modify. The class must have an iterable 'tags'
attribute.
Returns:
the class with a modified 'tags' attribute.
"""
tags = list(cls.tags)
tags.append(self._tag)
cls.tags = tuple(tags)
return cls
class Implicit(object):
"""Apply an implicit tag to the class.
Can be used as a callable, or a decorator:
MyInteger = Implicit(0, tag.APPLICATION)(Integer)
is the same as
@Implicit(0, tag.APPLICATION)
class MyInteger(Integer):
pass
and results in a MyInteger type whose tag is implicitly replaced with an
application-class 0-tag.
"""
def __init__(self, number, tag_class=tag.CONTEXT_SPECIFIC):
"""Setup the tag.
Args:
number: the tag number.
tag_class: the tag class. One of tag.CONTEXT_SPECIFIC,
tag.APPLICATION or tag.PRIVATE.
Raises:
TypeError: invalid application of the tag.
"""
if tag_class == tag.UNIVERSAL:
raise TypeError("Cannot tag with a UNIVERSAL tag")
# We cannot precompute the tag because the encoding depends
# on the existing tags.
self._number = number
self._tag_class = tag_class
def __call__(self, cls):
"""Apply the implicit tag.
Args:
cls: class to modify. The class must have an iterable 'tags'
attribute.
Returns:
the class with a modified 'tags' attribute.
Raises:
TypeError: invalid application of the tag.
"""
if not cls.tags:
raise TypeError("Cannot implicitly tag an untagged type")
tags = list(cls.tags)
# Only simple types and simple types derived via implicit tagging have a
# primitive encoding, so the last tag determines the encoding type.
tags[-1] = (tag.Tag(self._number, self._tag_class,
cls.tags[-1].encoding))
cls.tags = tuple(tags)
return cls
class Abstract(object):
"""Abstract base class."""
__metaclass__ = abc.ABCMeta
tags = ()
@classmethod
def explicit(cls, number, tag_class=tag.CONTEXT_SPECIFIC):
"""Dynamically create a new tagged type.
Args:
number: tag number.
tag_class: tag class.
Returns:
a subtype of cls with the given explicit tag.
"""
name = "%s.explicit(%d, %d)" % (cls.__name__, number, tag_class)
# TODO(ekasper): the metaclass could register created types so we
# return the _same_ type when called more than once with the same
# arguments.
mcs = cls.__metaclass__
return_class = mcs(name, (cls,), {})
return Explicit(number, tag_class)(return_class)
@classmethod
def implicit(cls, number, tag_class=tag.CONTEXT_SPECIFIC):
"""Dynamically create a new tagged type.
Args:
number: tag number.
tag_class: tag class.
Returns:
a subtype of cls with the given implicit tag.
"""
name = "%s.implicit(%d, %d)" % (cls.__name__, number, tag_class)
mcs = cls.__metaclass__
return_class = mcs(name, (cls,), {})
return Implicit(number, tag_class)(return_class)
def __init__(self, value=None, serialized_value=None, strict=True):
"""Initialize from a value or serialized buffer.
Args:
value: initializing value of an appropriate type. If the
serialized_value is not set, the initializing value must be set.
serialized_value: serialized inner value (with tags and lengths
stripped).
strict: if False, tolerate some non-fatal decoding errors.
Raises:
error.ASN1Error: decoding the serialized value failed.
TypeError: invalid initializer.
"""
if serialized_value is not None:
self._value = self._decode_value(serialized_value, strict=strict)
elif value is not None:
self._value = self._convert_value(value)
else:
raise TypeError("Cannot initialize from None")
self._serialized_value = serialized_value
@classmethod
def _convert_value(cls, value):
"""Convert initializer to an appropriate value."""
raise NotImplementedError
@abc.abstractmethod
def _decode_value(self, buf, strict=True):
"""Decode the initializer value from a buffer.
Args:
buf: a string or string buffer.
strict: if False, tolerate some non-fatal decoding errors.
Returns:
the value of the object.
"""
pass
@property
def value(self):
"""Get the value of the object.
An ASN.1 object can always be reconstructed from its value.
"""
# Usually either the immutable value, or a shallow copy of
# the mutable value.
raise NotImplementedError
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.value)
def __str__(self):
return str(self.value)
@abc.abstractmethod
def _encode_value(self):
"""Encode the contents, excluding length and tags.
Returns:
a string representing the encoded value.
"""
pass
# Implemented by Choice and Any.
# Used when the type is untagged so that read() does not reveal a length.
@classmethod
def _read(cls, buf, strict=True):
"""Read the value from the beginning of a string or buffer."""
raise NotImplementedError
# Only applicable where indefinite length encoding is possible, i.e., for
# simple string types using constructed encoding and structured types
# (Sequence, Set, SequenceOf, SetOf) only. Currently, it's only
# implemented for structured types; constructed encoding of simple string
# types is not supported. Only applicable in non-strict mode.
@classmethod
def _read_indefinite_value(cls, buf):
"""Read the inner value from the beginning of a string or buffer."""
raise NotImplementedError
def encode(self):
"""Encode oneself.
Returns:
a string representing the encoded object.
"""
# If we have a read-only object that we created from a serialized value
# and never modified since, use the original cached value.
#
# This ensures that objects decoded in non-strict mode will retain their
# original encoding.
#
# BUG: we do not cache tag and length encoding, so reencoding is broken
# for objects that use indefinite length encoding.
if self._serialized_value and not self.modified():
encoded_value = self._serialized_value
else:
# We can only use the cached value if the object has never been
# modified after birth. Since mutable objects cannot track when
# their recursive subcomponents are modified, the modified flag,
# once set, can never be unset.
self._serialized_value = None
encoded_value = self._encode_value()
for t in self.tags:
encoded_length = encode_length(len(encoded_value))
encoded_value = t.value + encoded_length + encoded_value
return encoded_value
@classmethod
def read(cls, buf, strict=True):
"""Read from a string or buffer.
Args:
buf: a string or string buffer.
strict: if False, tolerate some non-fatal decoding errors.
Returns:
a tuple consisting of an instance of the class and the remaining
bytes.
"""
if cls.tags:
# Each indefinite length must be closed with the EOC (\x00\x00)
# octet.
# If we have multiple tags (i.e., explicit tagging is used) and the
# outer tags use indefinite length, each such encoding adds an EOC
# to the end (while a regular tag adds nothing). Therefore, we first
# read all tags, then the value, and finally strip the EOC octets of
# the explicit tags.
indefinite = 0
for t in reversed(cls.tags):
if buf[:len(t)] != t.value:
raise error.ASN1TagError(
"Invalid tag: expected %s, got %s while decoding %s" %
(t, buf[:len(t.value)], cls.__name__))
# Logging statements are really expensive in the recursion even
# if debug-level logging itself is disabled.
# logging.debug("%s: read tag %s", cls.__name__, t)
buf = buf[len(t):]
# Only permit indefinite length for constructed types.
decoded_length, buf = read_length(buf, strict=(
strict or t.encoding != tag.CONSTRUCTED))
if decoded_length == -1:
indefinite += 1
# logging.debug("%s: read length %d", cls.__name__,
# decoded_length)
elif len(buf) < decoded_length:
raise error.ASN1Error("Invalid length encoding in %s: "
"read length %d, remaining bytes %d" %
(cls.__name__, decoded_length,
len(buf)))
# The last tag had definite length.
if decoded_length != -1:
value, rest = (cls(serialized_value=buf[:decoded_length],
strict=strict), buf[decoded_length:])
else:
decoded, rest = cls._read_indefinite_value(buf)
value = cls(value=decoded)
# _read_indefinite_value will strip the inner EOC.
indefinite -= 1
# Remove EOC octets corresponding to outer explicit tags.
if indefinite:
if rest[:indefinite*2] != _EOC*indefinite:
raise error.ASN1Error("Missing EOC octets")
rest = rest[indefinite*2:]
else:
# Untagged CHOICE and ANY; no outer tags to determine the length.
value, rest = cls._read(buf, strict=strict)
# logging.debug("%s: decoded value %s", cls.__name__, value)
# logging.debug("Remaining bytes: %d", len(rest))
return value, rest
@classmethod
def decode(cls, buf, strict=True):
"""Decode from a string or buffer.
Args:
buf: a string or string buffer.
strict: if False, tolerate some non-fatal decoding errors.
Returns:
an instance of the class.
"""
value, rest = cls.read(buf, strict=strict)
if rest:
raise error.ASN1Error("Invalid encoding: leftover bytes when "
"decoding %s" % cls.__name__)
return value
# Compare by value.
# Note this means objects with equal values do not necessarily have
# equal encodings.
def __eq__(self, other):
return self.value == other
def __ne__(self, other):
return self.value != other
@abc.abstractmethod
def human_readable_lines(self, wrap=80, label=""):
"""A pretty human readable representation of the object.
Args:
wrap: maximum number of characters per line. 0 or negative wrap
means no limit. Should be chosen long enough to comfortably fit
formatted data; otherwise it is simply ignored and output may
look funny.
label: a label prefix.
Returns:
a list of line strings of at most |wrap| characters each.
"""
pass
def human_readable(self, wrap=80, label=""):
"""A pretty human readable representation of the object.
Args:
wrap: maximum number of characters per line. 0 or negative wrap
means no limit. Should be chosen long enough to comfortably fit
formatted data; otherwise it is simply ignored and output may
look funny.
label: a label prefix.
Returns:
a multi-line string of at most |wrap| characters per line.
"""
return ("\n").join(self.human_readable_lines(wrap=wrap, label=label))
# Boilerplate code for some simple types whose value directly corresponds to a
# basic immutable type.
@functools.total_ordering
class Simple(Abstract):
"""Base class for Boolean, Integer, and string types."""
# Pretty-printed character length.
# OctetString and BitString use this to nicely format hex bytes.
char_wrap = 1
@property
def value(self):
return self._value
def __hash__(self):
return hash(self.value)
def __lt__(self, other):
return self.value < other
def __bool__(self):
return bool(self.value)
def __int__(self):
return int(self.value)
def __nonzero__(self):
return bool(self.value)
def modified(self):
"""Returns True if the object has been modified after creation."""
return False
@classmethod
def wrap_lines(cls, long_string, wrap):
"""Split long lines into multiple chunks according to the wrap limit.
Derived classes can override char_wrap if they wish to, e.g., not split
hex bytes.
Args:
long_string: a string_value() representation of the object
wrap: maximum number of characters per line. 0 or negative wrap
means no limit. Should be chosen long enough to comfortably fit
formatted data; otherwise it is simply ignored and output may
look funny.
Returns:
long_string split into lines of at most |wrap| characters each.
"""
wrap -= wrap % cls.char_wrap
return print_util.wrap_lines(long_string, wrap)
def human_readable_lines(self, wrap=80, label=""):
"""A pretty human readable representation of the object.
Args:
wrap: maximum number of characters per line. 0 or negative wrap
means no limit. Should be chosen long enough to comfortably fit
formatted data; otherwise it is simply ignored and output may
look funny.
label: a label prefix.
Returns:
a list of line strings of at most |wrap| characters each.
"""
to_print = str(self)
formatted_label = label + ": " if label else ""
if (to_print.find("\n") == -1 and
(wrap <= 0 or len(to_print) + len(formatted_label) <= wrap)):
# Fits on one line, like this:
# label: value
return [formatted_label + to_print]
else:
# Multiline output:
# label:
# firstlongvalueline
# secondvalueline
ret = []
indent = 2
if label:
ret += print_util.wrap_lines(label + ":", wrap)
return ret + [" " * indent + x for x in
self.wrap_lines(to_print, wrap-indent)]
@Universal(1, tag.PRIMITIVE)
class Boolean(Simple):
"""Boolean."""
_TRUE = "\xff"
_FALSE = "\x00"
def _encode_value(self):
return self._TRUE if self._value else self._FALSE
@classmethod
def _convert_value(cls, value):
return bool(value)
@classmethod
def _decode_value(cls, buf, strict=True):
if len(buf) != 1:
raise error.ASN1Error("Invalid encoding")
# Continuing here breaks re-encoding.
if strict and buf[0] != cls._TRUE and buf[0] != cls._FALSE:
raise error.ASN1Error("BER encoding of Boolean value: %s" %
buf[0])
value = False if buf[0] == cls._FALSE else True
return value
@Universal(2, tag.PRIMITIVE)
class Integer(Simple):
"""Integer."""
def _encode_value(self):
return encode_int(self._value)
@classmethod
def _convert_value(cls, value):
return int(value)
@classmethod
def _decode_value(cls, buf, strict=True):
return decode_int(buf, strict=strict)
@Universal(5, tag.PRIMITIVE)
class Null(Simple):
"""Null."""
def _encode_value(self):
return ""
@classmethod
def _convert_value(cls, value):
return None
@classmethod
def _decode_value(cls, buf, strict=True):
return None
class ASN1String(Simple):
"""Base class for string types."""
def _encode_value(self):
return self._value
@classmethod
def _convert_value(cls, value):
if isinstance(value, str) or isinstance(value, buffer):
value = str(value)
elif isinstance(value, ASN1String):
value = value.value
else:
raise TypeError("Cannot convert %s to %s" %
(type(value), cls.__name__))
cls._check_for_illegal_characters(value)
return value
@classmethod
def _check_for_illegal_characters(cls, buf):
"""Raises if there are any illegal characters in string.
Args:
buf: string which will be checked for illegal characters
Raises:
ASN1Error.
"""
pass
@classmethod
def _decode_value(cls, buf, strict=True):
if strict:
cls._check_for_illegal_characters(buf)
return buf
# Based on https://www.itu.int/rec/T-REC-X.208-198811-W/en
# and http://kikaku.itscj.ipsj.or.jp/ISO-IR/overview.htm
@Universal(19, tag.PRIMITIVE)
class PrintableString(ASN1String):
"""PrintableString."""
NOT_ACCEPTABLE = re.compile("[^a-zA-Z0-9 '()+,\-./:=?]")
@classmethod
def _check_for_illegal_characters(cls, buf):
search_result = PrintableString.NOT_ACCEPTABLE.search(buf)
if search_result:
index = search_result.start()
raise error.ASN1IllegalCharacter(
"Illegal character in PrintableString", buf, index)
@Universal(20, tag.PRIMITIVE)
class TeletexString(ASN1String):
"""TeletexString (aka T61String)."""
pass
@Universal(22, tag.PRIMITIVE)
class IA5String(ASN1String):
"""IA5String."""
@classmethod
def _check_for_illegal_characters(self, buf):
for index, character in enumerate(buf):
if ord(character) > 127:
raise error.ASN1IllegalCharacter(
"Illegal character in IA5String", buf, index)
@Universal(26, tag.PRIMITIVE)
class VisibleString(ASN1String):
"""VisibleString (aka ISO646String)."""
@classmethod
def _check_for_illegal_characters(self, buf):
for index, character in enumerate(buf):
if ord(character) < 32 or ord(character) > 126:
raise error.ASN1IllegalCharacter(
"Illegal character in VisibleString", buf, index)
@Universal(30, tag.PRIMITIVE)
class BMPString(ASN1String):
"""BMPString."""
pass
@Universal(12, tag.PRIMITIVE)
class UTF8String(ASN1String):
"""UTF8String."""
pass
@Universal(28, tag.PRIMITIVE)
class UniversalString(ASN1String):
"""UniversalString."""
pass
@Universal(4, tag.PRIMITIVE)
class OctetString(ASN1String):
"""Octet string."""
char_wrap = 3
def __str__(self):
return print_util.bytes_to_hex(self._value)
@Universal(3, tag.PRIMITIVE)
class BitString(Simple):
"""Bit string."""
char_wrap = 3
def __str__(self):
return print_util.bits_to_hex(self._value)
def _encode_value(self):
pad = (8 - len(self._value) % 8) % 8
padded_bits = self._value + pad*"0"
ret = bytearray([pad])
for i in range(0, len(padded_bits), 8):
ret.append(int(padded_bits[i:i+8], 2))
return str(ret)
def _convert_value(self, value):
"""The value of a BitString is a string of '0's and '1's."""
if isinstance(value, BitString):
return value.value
elif isinstance(value, str):
# Must be a string of '0's and '1's.
if not all(c == "0" or c == "1" for c in value):
raise ValueError("Cannot initialize a BitString from %s:"
"string must consist of 0s and 1s" % value)
return value
else:
raise TypeError("Cannot initialize a BitString from %s"
% type(value))
@classmethod
def _decode_value(cls, buf, strict=True):
if not buf:
raise error.ASN1Error("Invalid encoding: empty %s value" %
cls.__name__)
int_bytes = bytearray(buf)
pad = int_bytes[0]
if pad > 7:
raise error.ASN1Error("Invalid padding %d in %s" %
(pad, cls.__name__))
ret = "".join(format(b, "08b") for b in int_bytes[1:])
if pad:
if not ret or any([c == "1" for c in ret[-1*pad:]]):
raise error.ASN1Error("Invalid padding")
ret = ret[:-1*pad]
return ret
class NamedBitList(BitString):
"""A bit string with named bits."""
# To use the NamedBitList ASN.1 construct, set named_bit_list
# to a tuple of NamedValue instances, where the name of each NamedValue
# corresponds to the identifier and the value to the number of the
# distinguished bit, defined by "number" or "DefinedValue" in ASN.1,
# see http://www.itu.int/ITU-T/studygroups/com17/languages/X.680-0207.pdf
named_bit_list = None
char_wrap = 1
def __str__(self):
return ", ".join(["%s" % n.name for n in self.bits_set()])
def has_bit_set(self, number):
"""Test if the given bit is set.
Args:
number: the number of the ASN.1 bit. Bit numbering follows ASN.1
conventions, i.e., bit number 0 is the "leading bit".
Returns:
True: the bit is 1.
False: the bit is 0, or the BitString is not long enough.
"""
# According to
# http://www.itu.int/ITU-T/studygroups/com17/languages/X.680-0207.pdf
# we must not assume that the presence of named bits constrains the
# contents of the bit string:
# "21.6 The presence of a "NamedBitList" has no effect on the set of
# abstract values of this type. Values containing 1 bits other than the
# named bits are permitted.
# 21.7 When a "NamedBitList" is used in defining a bitstring type ASN.1
# encoding rules are free to add (or remove) arbitrarily any trailing 0
# bits to (or from) values that are being encoded or decoded.
# Application designers should therefore ensure that different semantics
# are not associated with such values which differ only in the number of
# trailing 0 bits.
return len(self._value) > number and self._value[number] == "1"
def bits_set(self):
"""List the named_bit_list elements whose bit is set."""
return [n for n in self.named_bit_list if self.has_bit_set(n.value)]
class Any(ASN1String):
"""Any.
Any is a container for an arbitrary value. An Any type can be tagged with
explicit tags like any other type: those tags will be applied to the
underlying value. Implicit tagging of Any types is not supported.
The value of an Any is an undecoded raw string. In addition, Any can hold
the decoded value of the object.
"""
char_wrap = 3
def __init__(self, value=None, serialized_value=None, strict=True):
if isinstance(value, str):
super(Any, self).__init__(value=None, serialized_value=value,
strict=strict)
self._decoded_value = None
else:
super(Any, self).__init__(value=value,
serialized_value=serialized_value,
strict=strict)
self._decoded_value = value
def __repr__(self):
if self._decoded_value is not None:
return "%s(%r)" % (self.__class__.__name__, self._decoded_value)
return "%s(%r)" % (self.__class__.__name__, self._value)
def __str__(self):
if self._decoded_value is not None:
return str(self._decoded_value)
return print_util.bytes_to_hex(self._value)
def human_readable_lines(self, wrap=80, label=""):
"""A pretty human readable representation of the object.
Args:
wrap: maximum number of characters per line. 0 or negative wrap
means no limit. Should be chosen long enough to comfortably fit
formatted data; otherwise it is simply ignored and output may
look funny.
label: a label prefix.
Returns:
a list of line strings of at most |wrap| characters each.
"""
if self._decoded_value is not None:
return self._decoded_value.human_readable_lines(wrap=wrap,
label=label)
return super(Any, self).human_readable_lines(wrap=wrap, label=label)
def modified(self):
if self._decoded_value is not None:
return self._decoded_value.modified()
return False
def _encode_value(self):
if self._decoded_value is not None and self._decoded_value.modified():
return self._decoded_value.encode()
return self._value
@property
def decoded(self):
return self._decoded_value is not None
@property
def decoded_value(self):
return self._decoded_value
@classmethod
def _read(cls, buf, strict=True):
readahead_tag, rest = tag.Tag.read(buf)
length, rest = read_length(rest, strict=(
strict or readahead_tag.encoding != tag.CONSTRUCTED))
if length == -1:
# Not sure if this even makes any sense.
raise NotImplementedError("Indefinite length encoding of ANY types "
"is not supported")
if len(rest) < length:
raise error.ASN1Error("Invalid length encoding")
decoded_length = len(buf) - len(rest) + length
return cls(serialized_value=buf[:decoded_length],
strict=strict), buf[decoded_length:]
@classmethod
def _convert_value(cls, value):
"""The value of an Any is the undecoded value."""
# Always return the undecoded value for consistency; the
# decoded/decoded_value properties can be used to retrieve the
# decoded contents.
if isinstance(value, Any):
# This gets ambiguous real fast (do we keep the original tags or
# replace with our own tags?) so we ban it.
raise TypeError("Instantiating Any from another Any is illegal")
elif isinstance(value, Abstract):
return value.encode()
else:
raise TypeError("Cannot convert %s to %s" % (type(value),
cls.__name__))
@classmethod
def _decode_value(cls, buf, strict=True):
return buf
def decode_inner(self, value_type, strict=True):
"""Decode the undecoded contents according to a given specification.
Args:
value_type: an ASN.1 type.
strict: if False, tolerate some non-fatal decoding errors.
Raises:
ASN1Error: decoding failed.
RuntimeError: value already decoded.
"""
self._decoded_value = value_type.decode(self._value, strict=strict)
class Constructed(Abstract):
"""Constructed types."""
print_labels = True
print_delimiter = "\n"
def __init__(self, value=None, serialized_value=None, strict=True):
"""Initialize from a value or serialized buffer.
Args:
value: initializing value of an appropriate type. If the
serialized_value is not set, the initializing value must be set.
serialized_value: serialized inner value (with tags and lengths
stripped).
strict: if False, tolerate some non-fatal decoding errors.
Raises:
error.ASN1Error: decoding the serialized value failed.
TypeError: invalid initializer.
"""
super(Constructed, self).__init__(value=value,
serialized_value=serialized_value,
strict=strict)
# All methods that mutate the object must set this to True.
self._modified = False
def modified(self):
return self._modified or any([v and v.modified()
for _, v in self.iteritems()])
def human_readable_lines(self, wrap=80, label=""):
"""A pretty human readable representation of the object.
Args:
wrap: maximum number of characters per line. 0 or negative wrap
means no limit. Should be chosen long enough to comfortably fit
formatted data; otherwise it is simply ignored and output may
look funny.
label: a label prefix.
Returns:
a list of line strings of at most |wrap| characters each.
"""
# A "\n" becomes ["", ""] which magically starts a new line when we call
# append_lines() on it. Things like "\n-----\n" work, too.
delimiter = (print_util.wrap_lines(self.print_delimiter, wrap=wrap))
lines = []
# Component count. Needed so we can print "<no components>" when none
# are found.
count = 0
# Whether the next component should start on a new line. Set to true
# when the previous component was multiline. For example, a mix of short
# and long components with a ", " delimiter is thus printed as
# short1, short2, short3,
# myextremelylongcomponentth
# atspansmultiplelines
# short4, short5
newline = False
if label:
lines += print_util.wrap_lines(label + ":", wrap)
# If the delimiter is multiline, then output looks prettier if the
# label is also on a separate line.
if len(delimiter) > 1:
newline = True
elif len(lines[-1]) < wrap:
# Else add a whitespace so we get "label: value"
lines[-1] += " "
indent = 2
for key, value in self.iteritems():
if value is None:
continue
label = str(key) if self.print_labels else ""
print_component = value.human_readable_lines(wrap=wrap-indent,
label=label)
if not print_component:
continue
if count:
print_util.append_lines(delimiter, wrap, lines)
count += 1
# Make multiline components a separate block on a new line, unless
# we already are on a new line.
if (newline or len(print_component) > 1) and lines and lines[-1]:
lines += print_component
else:
print_util.append_lines(print_component, wrap, lines)
newline = len(print_component) > 1
if not count:
print_util.append_lines(["<no components>"], wrap, lines)
# Indent everything apart from the first line.
return [lines[0]] + [" " + x for x in lines[1:]]
class MetaChoice(abc.ABCMeta):
"""Metaclass for building a Choice type."""
def __new__(mcs, name, bases, dic):
# Build a tag -> component_name map for the decoder.
components = dic.get("components", {})
if components:
tag_map = {}
keys_seen = set()
for key, spec in components.iteritems():
if key in keys_seen:
raise TypeError("Duplicate name in Choice specification")
keys_seen.add(key)
if not spec.tags:
raise TypeError("Choice type cannot have untagged "
"components")
if spec.tags[-1] in tag_map:
raise TypeError("Duplicate outer tag in a Choice "
"specification")
tag_map[spec.tags[-1]] = key
dic["tag_map"] = tag_map
return super(MetaChoice, mcs).__new__(mcs, name, bases, dic)
class Choice(Constructed, collections.MutableMapping):
"""Choice."""
__metaclass__ = MetaChoice
# There is only ever one component anyway.
print_delimiter = ""
print_labels = False
def __init__(self, value=None, serialized_value=None,
readahead_tag=None, readahead_value=None, strict=True):
"""Initialize fully or partially.
Args:
value: if present, should be a dictionary with one entry
representing the chosen key and value.
serialized_value: if present, the serialized contents (with tags
and lengths stripped).
readahead_tag: if present, the first tag in serialized_value
readahead_value: if present, the value wrapped by the first tag in
serialized value.
strict: if False, tolerate some non-fatal decoding errors.
Raises:
ValueError: invalid initializer value.
"""
if readahead_tag is not None:
self._value = self._decode_readahead_value(
serialized_value, readahead_tag, readahead_value,
strict=strict)
self._serialized_value = serialized_value
self._modified = False
else:
super(Choice, self).__init__(value=value,
serialized_value=serialized_value,
strict=strict)
def __getitem__(self, key):
value = self._value.get(key, None)
if value is not None:
return value
elif key in self.components:
return None
raise KeyError("Invalid key %s for %s" % (key, self.__class__.__name__))
def __setitem__(self, key, value):
spec = self.components[key]
if value is None:
self._value = {}
elif type(value) is spec:
self._value = {key: value}
# If the supplied value is not of the exact same type then we try to
# construct one.
else:
self._value = {key: spec(value)}
self._modified = True
def __delitem__(self, key):
if key in self._value:
self._value = {}
# Raise if the key is invalid; else do nothing.
elif key not in self.components:
raise KeyError("Invalid key %s" % key)
self._modified = True
def __iter__(self):
return iter(self._value)
def __len__(self):
return len(self._value)
@property
def value(self):
return dict(self._value)
def component_key(self):
if not self._value:
return None
return self._value.keys()[0]
# A slightly unfortunate overload of the term "value"...
def component_value(self):
if not self._value:
return None
return self._value.values()[0]
def _encode_value(self):
if not self._value:
raise error.ASN1Error("Choice component not set")
# Encode the single component.
return self._value.values()[0].encode()
@classmethod
def _read(cls, buf, strict=True):
readahead_tag, rest = tag.Tag.read(buf)
length, rest = read_length(rest, strict=(
strict or readahead_tag.encoding != tag.CONSTRUCTED))
if length == -1:
raise NotImplementedError("Indefinite length encoding of CHOICE "
"type is not supported")
if len(rest) < length:
raise error.ASN1Error("Invalid length encoding")
decoded_length = len(buf) - len(rest) + length
return (cls(serialized_value=buf[:decoded_length],
readahead_tag=readahead_tag, readahead_value=rest[:length],
strict=strict),
buf[decoded_length:])
@classmethod
def _convert_value(cls, value):
if not value:
return dict()
if len(value) != 1:
raise ValueError("Choice must have at most one component set")
key, value = value.iteritems().next()
if value is None:
return {}
try:
spec = cls.components[key]
except KeyError:
raise ValueError("Invalid Choice key %s" % key)
if type(value) is spec:
return {key: value}
# If the supplied value is not of the exact same type then we try to
# construct one.
else:
return {key: spec(value)}
@classmethod
def _decode_readahead_value(cls, buf, readahead_tag, readahead_value,
strict=True):
"""Decode using additional information about the outermost tag."""
try:
key = cls.tag_map[readahead_tag]
except KeyError:
raise error.ASN1TagError("Tag %s is not a valid tag for a "
"component of %s" %
(readahead_tag, cls.__name__))
if len(cls.components[key].tags) == 1:
# Shortcut: we already know the tag and length, so directly get
# the value.
value = cls.components[key](serialized_value=readahead_value,
strict=strict)
else:
# Component has multiple tags but the readahead only read the
# outermost tag, so read everything again.
value, rest = cls.components[key].read(buf, strict=strict)
if rest:
raise error.ASN1Error("Invalid encoding: leftover bytes when "
"decoding %s" % cls.__name__)
return {key: value}
@classmethod
def _decode_value(cls, buf, strict=True):
readahead_tag, rest = tag.Tag.read(buf)
length, rest = read_length(rest, strict=strict)
if length == -1:
if readahead_tag.encoding != tag.CONSTRUCTED:
raise error.ASN1Error("Indefinite length encoding in primitive "
"type")
raise NotImplementedError("Indefinite length encoding of CHOICE "
"type is not supported")
if len(rest) != length:
raise error.ASN1Error("Invalid length encoding")
return cls._decode_readahead_value(buf, readahead_tag, rest,
strict=strict)
class Repeated(Constructed, collections.MutableSequence):
"""Base class for SetOf and SequenceOf."""
def __getitem__(self, index):
return self._value[index]
def __setitem__(self, index, value):
# We are required to support both single-value as well as slice
# assignment.
if isinstance(index, slice):
self._value[index] = self._convert_value(value)
else:
self._value[index] = (value if type(value) is self.component
else self.component(value))
self._modified = True
def __delitem__(self, index):
del self._value[index]
self._modified = True
def __len__(self):
return len(self._value)
def iteritems(self):
return enumerate(self._value)
def insert(self, index, value):
if type(value) is not self.component:
value = self.component(value)
self._value.insert(index, value)
self._modified = True
@property
def value(self):
return list(self._value)
@classmethod
def _convert_value(cls, value):
return [x if type(x) is cls.component else cls.component(x)
for x in value]
@Universal(16, tag.CONSTRUCTED)
class SequenceOf(Repeated):
"""Sequence Of."""
def _encode_value(self):
ret = [x.encode() for x in self._value]
return "".join(ret)
@classmethod
def _decode_value(cls, buf, strict=True):
ret = []
while buf:
value, buf = cls.component.read(buf, strict=strict)
ret.append(value)
return ret
@classmethod
def _read_indefinite_value(cls, buf):
ret = []
while len(buf) >= 2:
if buf[:2] == _EOC:
return ret, buf[2:]
value, buf = cls.component.read(buf, strict=False)
ret.append(value)
raise error.ASN1Error("Missing EOC octets")
# We cannot use a real set to represent SetOf because
# (a) our components are mutable and thus not hashable and
# (b) ASN.1 allows duplicates: {1} and {1, 1} are distinct sets.
# Note that this means that eq-comparison is order-dependent.
@Universal(17, tag.CONSTRUCTED)
class SetOf(Repeated):
"""Set Of."""
def _encode_value(self):
ret = [x.encode() for x in self._value]
ret.sort()
return "".join(ret)
@classmethod
def _decode_value(cls, buf, strict=True):
ret = []
while buf:
value, buf = cls.component.read(buf, strict=strict)
ret.append(value)
# TODO(ekasper): reject BER encodings in strict mode, i.e.,
# verify sort order.
return ret
@classmethod
def _read_indefinite_value(cls, buf):
ret = []
while len(buf) >= 2:
if buf[:2] == _EOC:
# TODO(ekasper): reject BER encodings in strict mode, i.e.,
# verify sort order.
return ret, buf[2:]
value, buf = cls.component.read(buf, strict=False)
ret.append(value)
raise error.ASN1Error("Missing EOC octets")
class Component(object):
"""Sequence component specification."""
def __init__(self, name, value_type, optional=False, default=None,
defined_by=None, lookup=None):
"""Define a sequence component.
Args:
name: component name. Must be unique within a sequence.
value_type: the ASN.1 type.
optional: if True, the component is optional.
default: default value of the component.
defined_by: for Any types, this specifies the component
that defines the type.
lookup: the lookup dictionary for Any types.
"""
self.name = name
self.value_type = value_type
if default is None or type(default) is value_type:
self.default = default
else:
self.default = value_type(default)
if self.default is not None:
self.encoded_default = self.default.encode()
else:
self.encoded_default = None
self.optional = optional or (self.default is not None)
self.defined_by = defined_by
self.lookup = lookup
class MetaSequence(abc.ABCMeta):
"""Metaclass for building Sequence types."""
def __new__(mcs, name, bases, dic):
# Build a key -> component map for setting values.
components = dic.get("components", ())
if components:
key_map = {}
for component in components:
if component.name in key_map:
raise TypeError("Duplicate name in Sequence specification")
key_map[component.name] = component
dic["key_map"] = key_map
return super(MetaSequence, mcs).__new__(mcs, name, bases, dic)
@Universal(16, tag.CONSTRUCTED)
class Sequence(Constructed, collections.MutableMapping):
"""Sequence."""
__metaclass__ = MetaSequence
def __getitem__(self, key):
return self._value[key]
def __setitem__(self, key, value):
component = self.key_map[key]
value = self._convert_single_value(component, value)
self._value[key] = value
self._modified = True
def __delitem__(self, key):
if key not in self.key_map:
raise KeyError("Invalid key %s" % key)
self[key] = None
self._modified = True
def __iter__(self):
"""Iterate component names in order."""
for component in self.components:
yield component.name
def __len__(self):
"""Missing optional components are counted in the length."""
return len(self.components)
@property
def value(self):
# Note that this does not preserve the component order.
# However an order is encoded in the type spec, so we can still
# recreate the original object from this value.
return dict(self._value)
def _encode_value(self):
ret = []
for component in self.components:
value = self._value[component.name]
if value is None:
if not component.optional:
raise error.ASN1Error("Missing %s value in %s" %
(component.name,
self.__class__.__name__))
else:
# Value is not None.
# We could compare by value for most types, but for "set" types
# different values may yield the same encoding, so we compare
# directly by encoding.
# (Even though I haven't seen a defaulted set type in practice.)
encoded_value = value.encode()
if component.encoded_default != encoded_value:
ret.append(encoded_value)
return "".join(ret)
@classmethod
def _convert_single_value(cls, component, value):
# If value is None, we store the default if it is different from None.
if value is None:
return component.default
elif type(value) is component.value_type:
return value
# If the supplied value is not of the exact same type then we discard
# the tag information and try to construct from scratch.
else:
# TODO(ekasper): verify defined_by constraints here.
return component.value_type(value)
@classmethod
def _convert_value(cls, value):
ret = {}
value = value or {}
if not all([key in cls.key_map for key in value]):
raise ValueError("Invalid keys in initializer")
for component in cls.components:
ret[component.name] = cls._convert_single_value(
component, value.get(component.name, None))
return ret
@classmethod
def _read_value(cls, buf, strict=True):
ret = dict()
for component in cls.components:
try:
value, buf = component.value_type.read(buf, strict=strict)
except error.ASN1TagError:
# If the component was optional and we got a tag mismatch,
# assume decoding failed because the component was missing,
# and carry on.
# TODO(ekasper): since we let errors fall through recursively,
# not all of the tag errors can be reasonably explained by
# missing optional components. We could tighten this to match by
# outermost tag only, and have metaclass verify the uniqueness
# of component tags. Meanwhile, the worst that can happen is
# that we retry in vain and don't return the most helpful error
# message when we do finally fail.
if not component.optional:
raise
else:
ret[component.name] = component.default
else:
ret[component.name] = value
# Second pass for decoding ANY.
for component in cls.components:
if component.defined_by is not None:
value_type = component.lookup.get(
ret[component.defined_by], None)
if value_type is not None:
try:
ret[component.name].decode_inner(value_type,
strict=strict)
except error.ASN1Error:
if strict:
raise
return ret, buf
@classmethod
def _decode_value(cls, buf, strict=True):
ret, buf = cls._read_value(buf, strict=strict)
if buf:
raise error.ASN1Error("Invalid encoding")
return ret
@classmethod
def _read_indefinite_value(cls, buf):
# We must be in strict=False mode by definition.
ret, buf = cls._read_value(buf, strict=False)
if buf[:2] != _EOC:
raise error.ASN1Error("Missing EOC octets")
return ret, buf[2:]
| {
"repo_name": "elyscape/origin",
"path": "vendor/github.com/google/certificate-transparency/python/ct/crypto/asn1/types.py",
"copies": "34",
"size": "56328",
"license": "apache-2.0",
"hash": -8156575416197773000,
"line_mean": 33.4513761468,
"line_max": 80,
"alpha_frac": 0.5748828291,
"autogenerated": false,
"ratio": 4.362791418170552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# ASN.1 types tags
from operator import getitem
from pyasn1 import error
tagClassUniversal = 0x00
tagClassApplication = 0x40
tagClassContext = 0x80
tagClassPrivate = 0xC0
tagFormatSimple = 0x00
tagFormatConstructed = 0x20
tagCategoryImplicit = 0x01
tagCategoryExplicit = 0x02
tagCategoryUntagged = 0x04
class Tag:
def __init__(self, tagClass, tagFormat, tagId):
if tagId < 0:
raise error.PyAsn1Error(
'Negative tag ID (%s) not allowed' % (tagId,)
)
self.__tag = (tagClass, tagFormat, tagId)
self.uniq = (tagClass, tagId)
self.__hashedUniqTag = hash(self.uniq)
def __repr__(self):
return '%s(tagClass=%s, tagFormat=%s, tagId=%s)' % (
(self.__class__.__name__,) + self.__tag
)
# These is really a hotspot -- expose public "uniq" attribute to save on
# function calls
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedUniqTag
def __getitem__(self, idx): return self.__tag[idx]
def __and__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag&tagClass, self.__tag&tagFormat, self.__tag&tagId
)
def __or__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag[0]|tagClass,
self.__tag[1]|tagFormat,
self.__tag[2]|tagId
)
def asTuple(self): return self.__tag # __getitem__() is slow
class TagSet:
def __init__(self, baseTag=(), *superTags):
self.__baseTag = baseTag
self.__superTags = superTags
self.__hashedSuperTags = hash(superTags)
_uniq = ()
for t in superTags:
_uniq = _uniq + t.uniq
self.uniq = _uniq
self.__lenOfSuperTags = len(superTags)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self.__superTags])
)
def __add__(self, superTag):
return self.__class__(
self.__baseTag, *self.__superTags + (superTag,)
)
def __radd__(self, superTag):
return self.__class__(
self.__baseTag, *(superTag,) + self.__superTags
)
def tagExplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if tagClass == tagClassUniversal:
raise error.PyAsn1Error(
'Can\'t tag with UNIVERSAL-class tag'
)
if tagFormat != tagFormatConstructed:
superTag = Tag(tagClass, tagFormatConstructed, tagId)
return self + superTag
def tagImplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if self.__superTags:
superTag = Tag(tagClass, self.__superTags[-1][1], tagId)
return self[:-1] + superTag
def getBaseTag(self): return self.__baseTag
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(
self.__baseTag, *getitem(self.__superTags, idx)
)
return self.__superTags[idx]
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedSuperTags
def __len__(self): return self.__lenOfSuperTags
def isSuperTagSetOf(self, tagSet):
if len(tagSet) < self.__lenOfSuperTags:
return
idx = self.__lenOfSuperTags - 1
while idx >= 0:
if self.__superTags[idx] != tagSet[idx]:
return
idx = idx - 1
return 1
def initTagSet(tag): return TagSet(tag, tag)
| {
"repo_name": "qqzwc/XX-Net",
"path": "code/default/python27/1.0/lib/noarch/pyasn1/type/tag.py",
"copies": "8",
"size": "4295",
"license": "bsd-2-clause",
"hash": -5603237265099525000,
"line_mean": 34.2049180328,
"line_max": 76,
"alpha_frac": 0.5694994179,
"autogenerated": false,
"ratio": 3.7609457092819616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0040324209713071815,
"num_lines": 122
} |
# ASN.1 types tags
from operator import getitem
from pyasn1 import error
tagClassUniversal = 0x00
tagClassApplication = 0x40
tagClassContext = 0x80
tagClassPrivate = 0xC0
tagFormatSimple = 0x00
tagFormatConstructed = 0x20
tagCategoryImplicit = 0x01
tagCategoryExplicit = 0x02
tagCategoryUntagged = 0x04
class Tag:
def __init__(self, tagClass, tagFormat, tagId):
if tagId < 0:
raise error.PyAsn1Error(
'Negative tag ID (%s) not allowed' % (tagId,)
)
self.__tag = (tagClass, tagFormat, tagId)
self.uniq = (tagClass, tagId)
self.__hashedUniqTag = hash(self.uniq)
def __repr__(self):
return '%s(tagClass=%s, tagFormat=%s, tagId=%s)' % (
(self.__class__.__name__,) + self.__tag
)
# These is really a hotspot -- expose public "uniq" attribute to save on
# function calls
def __eq__(self, other): return self.uniq == other.uniq
def __ne__(self, other): return self.uniq != other.uniq
def __lt__(self, other): return self.uniq < other.uniq
def __le__(self, other): return self.uniq <= other.uniq
def __gt__(self, other): return self.uniq > other.uniq
def __ge__(self, other): return self.uniq >= other.uniq
def __hash__(self): return self.__hashedUniqTag
def __getitem__(self, idx): return self.__tag[idx]
def __and__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag & tagClass, self.__tag & tagFormat, self.__tag & tagId
)
def __or__(self, otherTag):
(tagClass, tagFormat, tagId) = otherTag
return self.__class__(
self.__tag[0] | tagClass,
self.__tag[1] | tagFormat,
self.__tag[2] | tagId
)
def asTuple(self): return self.__tag # __getitem__() is slow
class TagSet:
def __init__(self, baseTag=(), *superTags):
self.__baseTag = baseTag
self.__superTags = superTags
self.__hashedSuperTags = hash(superTags)
_uniq = ()
for t in superTags:
_uniq = _uniq + t.uniq
self.uniq = _uniq
self.__lenOfSuperTags = len(superTags)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self.__superTags])
)
def __add__(self, superTag):
return self.__class__(
self.__baseTag, *self.__superTags + (superTag,)
)
def __radd__(self, superTag):
return self.__class__(
self.__baseTag, *(superTag,) + self.__superTags
)
def tagExplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if tagClass == tagClassUniversal:
raise error.PyAsn1Error(
'Can\'t tag with UNIVERSAL-class tag'
)
if tagFormat != tagFormatConstructed:
superTag = Tag(tagClass, tagFormatConstructed, tagId)
return self + superTag
def tagImplicitly(self, superTag):
tagClass, tagFormat, tagId = superTag
if self.__superTags:
superTag = Tag(tagClass, self.__superTags[-1][1], tagId)
return self[:-1] + superTag
def getBaseTag(self):
return self.__baseTag
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(
self.__baseTag, *getitem(self.__superTags, idx)
)
return self.__superTags[idx]
def __eq__(self, other):
return self.uniq == other.uniq
def __ne__(self, other):
return self.uniq != other.uniq
def __lt__(self, other):
return self.uniq < other.uniq
def __le__(self, other):
return self.uniq <= other.uniq
def __gt__(self, other):
return self.uniq > other.uniq
def __ge__(self, other):
return self.uniq >= other.uniq
def __hash__(self):
return self.__hashedSuperTags
def __len__(self):
return self.__lenOfSuperTags
def isSuperTagSetOf(self, tagSet):
if len(tagSet) < self.__lenOfSuperTags:
return
idx = self.__lenOfSuperTags - 1
while idx >= 0:
if self.__superTags[idx] != tagSet[idx]:
return
idx = idx - 1
return 1
def initTagSet(tag): return TagSet(tag, tag)
| {
"repo_name": "itielshwartz/BackendApi",
"path": "lib/pyasn1/type/tag.py",
"copies": "1",
"size": "4375",
"license": "apache-2.0",
"hash": -8513442430148157000,
"line_mean": 26.6898734177,
"line_max": 77,
"alpha_frac": 0.5590857143,
"autogenerated": false,
"ratio": 3.7748058671268336,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9833891581426834,
"avg_score": 0,
"num_lines": 158
} |
# ASN.1 "universal" data types
import operator
import sys
from pyasn1.type import base, tag, constraint, namedtype, namedval, tagmap
from pyasn1.codec.ber import eoo
from pyasn1.compat import octets
from pyasn1 import error
# "Simple" ASN.1 types (yet incomplete)
class Integer(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def __and__(self, value):
return self.clone(self._value & value)
def __rand__(self, value):
return self.clone(value & self._value)
def __or__(self, value):
return self.clone(self._value | value)
def __ror__(self, value):
return self.clone(value | self._value)
def __xor__(self, value):
return self.clone(self._value ^ value)
def __rxor__(self, value):
return self.clone(value ^ self._value)
def __lshift__(self, value):
return self.clone(self._value << value)
def __rshift__(self, value):
return self.clone(self._value >> value)
def __add__(self, value):
return self.clone(self._value + value)
def __radd__(self, value):
return self.clone(value + self._value)
def __sub__(self, value):
return self.clone(self._value - value)
def __rsub__(self, value):
return self.clone(value - self._value)
def __mul__(self, value):
return self.clone(self._value * value)
def __rmul__(self, value):
return self.clone(value * self._value)
def __mod__(self, value):
return self.clone(self._value % value)
def __rmod__(self, value):
return self.clone(value % self._value)
def __pow__(self, value, modulo=None):
return self.clone(pow(self._value, value, modulo))
def __rpow__(self, value):
return self.clone(pow(value, self._value))
if sys.version_info[0] <= 2:
def __div__(self, value):
return self.clone(self._value // value)
def __rdiv__(self, value):
return self.clone(value // self._value)
else:
def __truediv__(self, value):
return self.clone(self._value / value)
def __rtruediv__(self, value):
return self.clone(value / self._value)
def __divmod__(self, value):
return self.clone(self._value // value)
def __rdivmod__(self, value):
return self.clone(value // self._value)
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __int__(self):
return int(self._value)
if sys.version_info[0] <= 2:
def __long__(self): return long(self._value)
def __float__(self):
return float(self._value)
def __abs__(self):
return abs(self._value)
def __index__(self):
return int(self._value)
def __lt__(self, value):
return self._value < value
def __le__(self, value):
return self._value <= value
def __eq__(self, value):
return self._value == value
def __ne__(self, value):
return self._value != value
def __gt__(self, value):
return self._value > value
def __ge__(self, value):
return self._value >= value
def prettyIn(self, value):
if not isinstance(value, str):
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
)
r = self.__namedValues.getValue(value)
if r is not None:
return r
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
)
def prettyOut(self, value):
r = self.__namedValues.getName(value)
return r is None and str(value) or repr(r)
def getNamedValues(self):
return self.__namedValues
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
class Boolean(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
)
subtypeSpec = Integer.subtypeSpec + constraint.SingleValueConstraint(0, 1)
namedValues = Integer.namedValues.clone(('False', 0), ('True', 1))
class BitString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def __str__(self):
return str(tuple(self))
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value):
return self.clone(self._value + value)
def __radd__(self, value):
return self.clone(value + self._value)
def __mul__(self, value):
return self.clone(self._value * value)
def __rmul__(self, value):
return self * value
def prettyIn(self, value):
r = []
if not value:
return ()
elif isinstance(value, str):
if value[0] == '\'':
if value[-2:] == '\'B':
for v in value[1:-2]:
if v == '0':
r.append(0)
elif v == '1':
r.append(1)
else:
raise error.PyAsn1Error(
'Non-binary BIT STRING initializer %s' % (v,)
)
return tuple(r)
elif value[-2:] == '\'H':
for v in value[1:-2]:
i = 4
v = int(v, 16)
while i:
i = i - 1
r.append((v >> i) & 0x01)
return tuple(r)
else:
raise error.PyAsn1Error(
'Bad BIT STRING value notation %s' % (value,)
)
else:
for i in value.split(','):
j = self.__namedValues.getValue(i)
if j is None:
raise error.PyAsn1Error(
'Unknown bit identifier \'%s\'' % (i,)
)
if j >= len(r):
r.extend([0] * (j - len(r) + 1))
r[j] = 1
return tuple(r)
elif isinstance(value, (tuple, list)):
r = tuple(value)
for b in r:
if b and b != 1:
raise error.PyAsn1Error(
'Non-binary BitString initializer \'%s\'' % (r,)
)
return r
elif isinstance(value, BitString):
return tuple(value)
else:
raise error.PyAsn1Error(
'Bad BitString initializer type \'%s\'' % (value,)
)
def prettyOut(self, value):
return '\"\'%s\'B\"' % ''.join([str(x) for x in value])
class OctetString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
defaultBinValue = defaultHexValue = base.noValue
encoding = 'us-ascii'
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if encoding is None:
self._encoding = self.encoding
else:
self._encoding = encoding
if binValue is not None:
value = self.fromBinaryString(binValue)
if hexValue is not None:
value = self.fromHexString(hexValue)
if value is None or value is base.noValue:
value = self.defaultHexValue
if value is None or value is base.noValue:
value = self.defaultBinValue
self.__intValue = None
base.AbstractSimpleAsn1Item.__init__(self, value, tagSet, subtypeSpec)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if value is None and tagSet is None and subtypeSpec is None and \
encoding is None and binValue is None and hexValue is None:
return self
if value is None and binValue is None and hexValue is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if encoding is None:
encoding = self._encoding
return self.__class__(
value, tagSet, subtypeSpec, encoding, binValue, hexValue
)
if sys.version_info[0] <= 2:
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, (tuple, list)):
try:
return ''.join([chr(x) for x in value])
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
return str(value)
else:
def prettyIn(self, value):
if isinstance(value, bytes):
return value
elif isinstance(value, OctetString):
return value.asOctets()
elif isinstance(value, (tuple, list, map)):
try:
return bytes(value)
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
try:
return str(value).encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
def fromBinaryString(self, value):
bitNo = 8;
byte = 0;
r = ()
for v in value:
if bitNo:
bitNo = bitNo - 1
else:
bitNo = 7
r = r + (byte,)
byte = 0
if v == '0':
v = 0
elif v == '1':
v = 1
else:
raise error.PyAsn1Error(
'Non-binary OCTET STRING initializer %s' % (v,)
)
byte = byte | (v << bitNo)
return octets.ints2octs(r + (byte,))
def fromHexString(self, value):
r = p = ()
for v in value:
if p:
r = r + (int(p + v, 16),)
p = ()
else:
p = v
if p:
r = r + (int(p + '0', 16),)
return octets.ints2octs(r)
def prettyOut(self, value):
if sys.version_info[0] <= 2:
numbers = tuple([ord(x) for x in value])
else:
numbers = tuple(value)
if [x for x in numbers if x < 32 or x > 126]:
return '0x' + ''.join(['%.2x' % x for x in numbers])
else:
return str(value)
def __repr__(self):
if self._value is base.noValue:
return self.__class__.__name__ + '()'
if [x for x in self.asNumbers() if x < 32 or x > 126]:
return self.__class__.__name__ + '(hexValue=\'' + ''.join(['%.2x' % x for x in self.asNumbers()]) + '\')'
else:
return self.__class__.__name__ + '(\'' + self.prettyOut(self._value) + '\')'
if sys.version_info[0] <= 2:
def __str__(self):
return str(self._value)
def __unicode__(self):
return self._value.decode(self._encoding, 'ignore')
def asOctets(self):
return self._value
def asNumbers(self):
if self.__intValue is None:
self.__intValue = tuple([ord(x) for x in self._value])
return self.__intValue
else:
def __str__(self):
return self._value.decode(self._encoding, 'ignore')
def __bytes__(self):
return self._value
def asOctets(self):
return self._value
def asNumbers(self):
if self.__intValue is None:
self.__intValue = tuple(self._value)
return self.__intValue
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value):
return self.clone(self._value + self.prettyIn(value))
def __radd__(self, value):
return self.clone(self.prettyIn(value) + self._value)
def __mul__(self, value):
return self.clone(self._value * value)
def __rmul__(self, value):
return self * value
class Null(OctetString):
defaultValue = ''.encode() # This is tightly constrained
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
subtypeSpec = OctetString.subtypeSpec + constraint.SingleValueConstraint(''.encode())
if sys.version_info[0] <= 2:
intTypes = (int, long)
else:
intTypes = int
class ObjectIdentifier(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
def __add__(self, other):
return self.clone(self._value + other)
def __radd__(self, other):
return self.clone(other + self._value)
def asTuple(self):
return self._value
# Sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(
operator.getitem(self._value, i)
)
else:
return self._value[i]
def __str__(self):
return self.prettyPrint()
def index(self, suboid):
return self._value.index(suboid)
def isPrefixOf(self, value):
"""Returns true if argument OID resides deeper in the OID tree"""
l = len(self)
if l <= len(value):
if self._value[:l] == value[:l]:
return 1
return 0
def prettyIn(self, value):
"""Dotted -> tuple of numerics OID converter"""
if isinstance(value, tuple):
pass
elif isinstance(value, ObjectIdentifier):
return tuple(value)
elif isinstance(value, str):
r = []
for element in [x for x in value.split('.') if x != '']:
try:
r.append(int(element, 0))
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__, sys.exc_info()[1])
)
value = tuple(r)
else:
try:
value = tuple(value)
except TypeError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__, sys.exc_info()[1])
)
for x in value:
if not isinstance(x, intTypes) or x < 0:
raise error.PyAsn1Error(
'Invalid sub-ID in %s at %s' % (value, self.__class__.__name__)
)
return value
def prettyOut(self, value):
return '.'.join([str(x) for x in value])
class Real(base.AbstractSimpleAsn1Item):
try:
_plusInf = float('inf')
_minusInf = float('-inf')
_inf = (_plusInf, _minusInf)
except ValueError:
# Infinity support is platform and Python dependent
_plusInf = _minusInf = None
_inf = ()
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
def __normalizeBase10(self, value):
m, b, e = value
while m and m % 10 == 0:
m = m / 10
e = e + 1
return m, b, e
def prettyIn(self, value):
if isinstance(value, tuple) and len(value) == 3:
for d in value:
if not isinstance(d, intTypes):
raise error.PyAsn1Error(
'Lame Real value syntax: %s' % (value,)
)
if value[1] not in (2, 10):
raise error.PyAsn1Error(
'Prohibited base for Real value: %s' % (value[1],)
)
if value[1] == 10:
value = self.__normalizeBase10(value)
return value
elif isinstance(value, intTypes):
return self.__normalizeBase10((value, 10, 0))
elif isinstance(value, float):
if self._inf and value in self._inf:
return value
else:
e = 0
while int(value) != value:
value = value * 10
e = e - 1
return self.__normalizeBase10((int(value), 10, e))
elif isinstance(value, Real):
return tuple(value)
elif isinstance(value, str): # handle infinite literal
try:
return float(value)
except ValueError:
pass
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
def prettyOut(self, value):
if value in self._inf:
return '\'%s\'' % value
else:
return str(value)
def isPlusInfinity(self):
return self._value == self._plusInf
def isMinusInfinity(self):
return self._value == self._minusInf
def isInfinity(self):
return self._value in self._inf
def __str__(self):
return str(float(self))
def __add__(self, value):
return self.clone(float(self) + value)
def __radd__(self, value):
return self + value
def __mul__(self, value):
return self.clone(float(self) * value)
def __rmul__(self, value):
return self * value
def __sub__(self, value):
return self.clone(float(self) - value)
def __rsub__(self, value):
return self.clone(value - float(self))
def __mod__(self, value):
return self.clone(float(self) % value)
def __rmod__(self, value):
return self.clone(value % float(self))
def __pow__(self, value, modulo=None):
return self.clone(pow(float(self), value, modulo))
def __rpow__(self, value):
return self.clone(pow(value, float(self)))
if sys.version_info[0] <= 2:
def __div__(self, value):
return self.clone(float(self) / value)
def __rdiv__(self, value):
return self.clone(value / float(self))
else:
def __truediv__(self, value):
return self.clone(float(self) / value)
def __rtruediv__(self, value):
return self.clone(value / float(self))
def __divmod__(self, value):
return self.clone(float(self) // value)
def __rdivmod__(self, value):
return self.clone(value // float(self))
def __int__(self):
return int(float(self))
if sys.version_info[0] <= 2:
def __long__(self): return long(float(self))
def __float__(self):
if self._value in self._inf:
return self._value
else:
return float(
self._value[0] * pow(self._value[1], self._value[2])
)
def __abs__(self):
return abs(float(self))
def __lt__(self, value):
return float(self) < value
def __le__(self, value):
return float(self) <= value
def __eq__(self, value):
return float(self) == value
def __ne__(self, value):
return float(self) != value
def __gt__(self, value):
return float(self) > value
def __ge__(self, value):
return float(self) >= value
if sys.version_info[0] <= 2:
def __nonzero__(self):
return bool(float(self))
else:
def __bool__(self):
return bool(float(self))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __getitem__(self, idx):
if self._value in self._inf:
raise error.PyAsn1Error('Invalid infinite value operation')
else:
return self._value[idx]
class Enumerated(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
)
# "Structured" ASN.1 types
class SetOf(base.AbstractConstructedAsn1Item):
componentType = None
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 1
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0;
l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if self._componentType is not None and \
not self._componentType.isSuperTypeOf(value):
raise error.PyAsn1Error('Component type error %s' % (value,))
def getComponentByPosition(self, idx):
return self._componentValues[idx]
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx - l + 1) * [None]
if value is None:
if self._componentValues[idx] is None:
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
self._componentValues[idx] = self._componentType.clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
if isinstance(self._componentType, base.AbstractSimpleAsn1Item):
value = self._componentType.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentType is not None:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getComponentTagMap(self):
if self._componentType is not None:
return self._componentType.getTagMap()
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
r = r + ' ' * scope
if self._componentValues[idx] is None:
r = r + '<empty>'
else:
r = r + self._componentValues[idx].prettyPrint(scope)
return r
class SequenceOf(SetOf):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 2
class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
componentType = namedtype.NamedTypes()
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
base.AbstractConstructedAsn1Item.__init__(
self, componentType, tagSet, subtypeSpec, sizeSpec
)
if self._componentType is None:
self._componentTypeLen = 0
else:
self._componentTypeLen = len(self._componentType)
def __getitem__(self, idx):
if isinstance(idx, str):
return self.getComponentByName(idx)
else:
return base.AbstractConstructedAsn1Item.__getitem__(self, idx)
def __setitem__(self, idx, value):
if isinstance(idx, str):
self.setComponentByName(idx, value)
else:
base.AbstractConstructedAsn1Item.__setitem__(self, idx, value)
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0;
l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if idx >= self._componentTypeLen:
raise error.PyAsn1Error(
'Component type error out of range'
)
t = self._componentType[idx].getType()
if not t.isSuperTypeOf(value):
raise error.PyAsn1Error('Component type error %r vs %r' % (t, value))
def getComponentByName(self, name):
return self.getComponentByPosition(
self._componentType.getPositionByName(name)
)
def setComponentByName(self, name, value=None, verifyConstraints=True):
return self.setComponentByPosition(
self._componentType.getPositionByName(name), value,
verifyConstraints
)
def getComponentByPosition(self, idx):
try:
return self._componentValues[idx]
except IndexError:
if idx < self._componentTypeLen:
return
raise
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx - l + 1) * [None]
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
t = self._componentType.getTypeByPosition(idx)
if isinstance(t, base.AbstractSimpleAsn1Item):
value = t.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getNameByPosition(self, idx):
if self._componentTypeLen:
return self._componentType.getNameByPosition(idx)
def getDefaultComponentByPosition(self, idx):
if self._componentTypeLen and self._componentType[idx].isDefaulted:
return self._componentType[idx].getType()
def getComponentType(self):
if self._componentTypeLen:
return self._componentType
def setDefaultComponents(self):
if self._componentTypeLen == self._componentValuesSet:
return
idx = self._componentTypeLen
while idx:
idx = idx - 1
if self._componentType[idx].isDefaulted:
if self.getComponentByPosition(idx) is None:
self.setComponentByPosition(idx)
elif not self._componentType[idx].isOptional:
if self.getComponentByPosition(idx) is None:
raise error.PyAsn1Error(
'Uninitialized component #%s at %r' % (idx, self)
)
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is not None:
r = r + ' ' * scope
componentType = self.getComponentType()
if componentType is None:
r = r + '<no-name>'
else:
r = r + componentType.getNameByPosition(idx)
r = '%s=%s\n' % (
r, self._componentValues[idx].prettyPrint(scope)
)
return r
class Sequence(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 3
def getComponentTagMapNearPosition(self, idx):
if self._componentType:
return self._componentType.getTagMapNearPosition(idx)
def getComponentPositionNearType(self, tagSet, idx):
if self._componentType:
return self._componentType.getPositionNearType(tagSet, idx)
else:
return idx
class Set(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 4
def getComponent(self, innerFlag=0):
return self
def getComponentByType(self, tagSet, innerFlag=0):
c = self.getComponentByPosition(
self._componentType.getPositionByType(tagSet)
)
if innerFlag and isinstance(c, Set):
# get inner component by inner tagSet
return c.getComponent(1)
else:
# get outer component by inner tagSet
return c
def setComponentByType(self, tagSet, value=None, innerFlag=0,
verifyConstraints=True):
idx = self._componentType.getPositionByType(tagSet)
t = self._componentType.getTypeByPosition(idx)
if innerFlag: # set inner component by inner tagSet
if t.getTagSet():
return self.setComponentByPosition(
idx, value, verifyConstraints
)
else:
t = self.setComponentByPosition(idx).getComponentByPosition(idx)
return t.setComponentByType(
tagSet, value, innerFlag, verifyConstraints
)
else: # set outer component by inner tagSet
return self.setComponentByPosition(
idx, value, verifyConstraints
)
def getComponentTagMap(self):
if self._componentType:
return self._componentType.getTagMap(True)
def getComponentPositionByType(self, tagSet):
if self._componentType:
return self._componentType.getPositionByType(tagSet)
class Choice(Set):
tagSet = baseTagSet = tag.TagSet() # untagged
sizeSpec = constraint.ConstraintsIntersection(
constraint.ValueSizeConstraint(1, 1)
)
typeId = 5
_currentIdx = None
def __eq__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] == other
return NotImplemented
def __ne__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] != other
return NotImplemented
def __lt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] < other
return NotImplemented
def __le__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] <= other
return NotImplemented
def __gt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] > other
return NotImplemented
def __ge__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] >= other
return NotImplemented
if sys.version_info[0] <= 2:
def __nonzero__(self):
return bool(self._componentValues)
else:
def __bool__(self):
return bool(self._componentValues)
def __len__(self):
return self._currentIdx is not None and 1 or 0
def verifySizeSpec(self):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
self._sizeSpec(' ')
def _cloneComponentValues(self, myClone, cloneValueFlag):
try:
c = self.getComponent()
except error.PyAsn1Error:
pass
else:
if isinstance(c, Choice):
tagSet = c.getEffectiveTagSet()
else:
tagSet = c.getTagSet()
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByType(
tagSet, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByType(tagSet, c.clone())
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx - l + 1) * [None]
if self._currentIdx is not None:
self._componentValues[self._currentIdx] = None
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = 1
self._currentIdx = idx
return self
elif not isinstance(value, base.Asn1Item):
value = self._componentType.getTypeByPosition(idx).clone(
value=value
)
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
self._componentValues[idx] = value
self._currentIdx = idx
self._componentValuesSet = 1
return self
def getMinTagSet(self):
if self._tagSet:
return self._tagSet
else:
return self._componentType.genMinTagSet()
def getEffectiveTagSet(self):
if self._tagSet:
return self._tagSet
else:
c = self.getComponent()
if isinstance(c, Choice):
return c.getEffectiveTagSet()
else:
return c.getTagSet()
def getTagMap(self):
if self._tagSet:
return Set.getTagMap(self)
else:
return Set.getComponentTagMap(self)
def getComponent(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
c = self._componentValues[self._currentIdx]
if innerFlag and isinstance(c, Choice):
return c.getComponent(innerFlag)
else:
return c
def getName(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
if innerFlag:
c = self._componentValues[self._currentIdx]
if isinstance(c, Choice):
return c.getName(innerFlag)
return self._componentType.getNameByPosition(self._currentIdx)
def setDefaultComponents(self):
pass
class Any(OctetString):
tagSet = baseTagSet = tag.TagSet() # untagged
typeId = 6
def getTagMap(self):
return tagmap.TagMap(
{self.getTagSet(): self},
{eoo.endOfOctets.getTagSet(): eoo.endOfOctets},
self
)
# XXX
# coercion rules?
| {
"repo_name": "itielshwartz/BackendApi",
"path": "lib/pyasn1/type/univ.py",
"copies": "1",
"size": "40225",
"license": "apache-2.0",
"hash": 4607418034862288400,
"line_mean": 31.4919224556,
"line_max": 117,
"alpha_frac": 0.5433685519,
"autogenerated": false,
"ratio": 4.220438568880495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006107909553629148,
"num_lines": 1238
} |
# ASN.1 "universal" data types
import operator, sys, math
from pyasn1.type import base, tag, constraint, namedtype, namedval, tagmap
from pyasn1.codec.ber import eoo
from pyasn1.compat import octets
from pyasn1 import error
# "Simple" ASN.1 types (yet incomplete)
class Integer(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def __repr__(self):
if self.__namedValues is not self.namedValues:
return '%s, %r)' % (base.AbstractSimpleAsn1Item.__repr__(self)[:-1], self.__namedValues)
else:
return base.AbstractSimpleAsn1Item.__repr__(self)
def __and__(self, value): return self.clone(self._value & value)
def __rand__(self, value): return self.clone(value & self._value)
def __or__(self, value): return self.clone(self._value | value)
def __ror__(self, value): return self.clone(value | self._value)
def __xor__(self, value): return self.clone(self._value ^ value)
def __rxor__(self, value): return self.clone(value ^ self._value)
def __lshift__(self, value): return self.clone(self._value << value)
def __rshift__(self, value): return self.clone(self._value >> value)
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __sub__(self, value): return self.clone(self._value - value)
def __rsub__(self, value): return self.clone(value - self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self.clone(value * self._value)
def __mod__(self, value): return self.clone(self._value % value)
def __rmod__(self, value): return self.clone(value % self._value)
def __pow__(self, value, modulo=None): return self.clone(pow(self._value, value, modulo))
def __rpow__(self, value): return self.clone(pow(value, self._value))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(self._value // value)
def __rdiv__(self, value): return self.clone(value // self._value)
else:
def __truediv__(self, value): return self.clone(self._value / value)
def __rtruediv__(self, value): return self.clone(value / self._value)
def __divmod__(self, value): return self.clone(self._value // value)
def __rdivmod__(self, value): return self.clone(value // self._value)
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __int__(self): return int(self._value)
if sys.version_info[0] <= 2:
def __long__(self): return long(self._value)
def __float__(self): return float(self._value)
def __abs__(self): return self.clone(abs(self._value))
def __index__(self): return int(self._value)
def __pos__(self): return self.clone(+self._value)
def __neg__(self): return self.clone(-self._value)
def __invert__(self): return self.clone(~self._value)
def __round__(self, n=0):
r = round(self._value, n)
if n:
return self.clone(r)
else:
return r
def __floor__(self): return math.floor(self._value)
def __ceil__(self): return math.ceil(self._value)
if sys.version_info[0:2] > (2, 5):
def __trunc__(self): return self.clone(math.trunc(self._value))
def __lt__(self, value): return self._value < value
def __le__(self, value): return self._value <= value
def __eq__(self, value): return self._value == value
def __ne__(self, value): return self._value != value
def __gt__(self, value): return self._value > value
def __ge__(self, value): return self._value >= value
def prettyIn(self, value):
if not isinstance(value, str):
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
)
r = self.__namedValues.getValue(value)
if r is not None:
return r
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
)
def prettyOut(self, value):
r = self.__namedValues.getName(value)
return r is None and str(value) or repr(r)
def getNamedValues(self): return self.__namedValues
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
class Boolean(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
)
subtypeSpec = Integer.subtypeSpec+constraint.SingleValueConstraint(0,1)
namedValues = Integer.namedValues.clone(('False', 0), ('True', 1))
class BitString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def __str__(self): return str(tuple(self))
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
def prettyIn(self, value):
r = []
if not value:
return ()
elif isinstance(value, str):
if value[0] == '\'':
if value[-2:] == '\'B':
for v in value[1:-2]:
if v == '0':
r.append(0)
elif v == '1':
r.append(1)
else:
raise error.PyAsn1Error(
'Non-binary BIT STRING initializer %s' % (v,)
)
return tuple(r)
elif value[-2:] == '\'H':
for v in value[1:-2]:
i = 4
v = int(v, 16)
while i:
i = i - 1
r.append((v>>i)&0x01)
return tuple(r)
else:
raise error.PyAsn1Error(
'Bad BIT STRING value notation %s' % (value,)
)
else:
for i in value.split(','):
j = self.__namedValues.getValue(i)
if j is None:
raise error.PyAsn1Error(
'Unknown bit identifier \'%s\'' % (i,)
)
if j >= len(r):
r.extend([0]*(j-len(r)+1))
r[j] = 1
return tuple(r)
elif isinstance(value, (tuple, list)):
r = tuple(value)
for b in r:
if b and b != 1:
raise error.PyAsn1Error(
'Non-binary BitString initializer \'%s\'' % (r,)
)
return r
elif isinstance(value, BitString):
return tuple(value)
else:
raise error.PyAsn1Error(
'Bad BitString initializer type \'%s\'' % (value,)
)
def prettyOut(self, value):
return '\"\'%s\'B\"' % ''.join([str(x) for x in value])
class OctetString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
defaultBinValue = defaultHexValue = base.noValue
encoding = 'us-ascii'
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if encoding is None:
self._encoding = self.encoding
else:
self._encoding = encoding
if binValue is not None:
value = self.fromBinaryString(binValue)
if hexValue is not None:
value = self.fromHexString(hexValue)
if value is None or value is base.noValue:
value = self.defaultHexValue
if value is None or value is base.noValue:
value = self.defaultBinValue
self.__asNumbersCache = None
base.AbstractSimpleAsn1Item.__init__(self, value, tagSet, subtypeSpec)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if value is None and tagSet is None and subtypeSpec is None and \
encoding is None and binValue is None and hexValue is None:
return self
if value is None and binValue is None and hexValue is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if encoding is None:
encoding = self._encoding
return self.__class__(
value, tagSet, subtypeSpec, encoding, binValue, hexValue
)
if sys.version_info[0] <= 2:
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, unicode):
try:
return value.encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
elif isinstance(value, (tuple, list)):
try:
return ''.join([ chr(x) for x in value ])
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
return str(value)
else:
def prettyIn(self, value):
if isinstance(value, bytes):
return value
elif isinstance(value, str):
try:
return value.encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
elif isinstance(value, OctetString):
return value.asOctets()
elif isinstance(value, (tuple, list, map)):
try:
return bytes(value)
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
try:
return str(value).encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
def fromBinaryString(self, value):
bitNo = 8; byte = 0; r = ()
for v in value:
if bitNo:
bitNo = bitNo - 1
else:
bitNo = 7
r = r + (byte,)
byte = 0
if v == '0':
v = 0
elif v == '1':
v = 1
else:
raise error.PyAsn1Error(
'Non-binary OCTET STRING initializer %s' % (v,)
)
byte = byte | (v << bitNo)
return octets.ints2octs(r + (byte,))
def fromHexString(self, value):
r = p = ()
for v in value:
if p:
r = r + (int(p+v, 16),)
p = ()
else:
p = v
if p:
r = r + (int(p+'0', 16),)
return octets.ints2octs(r)
def prettyOut(self, value):
if sys.version_info[0] <= 2:
numbers = tuple([ ord(x) for x in value ])
else:
numbers = tuple(value)
if [ x for x in numbers if x < 32 or x > 126 ]:
return '0x' + ''.join([ '%.2x' % x for x in numbers ])
else:
return str(value)
def __repr__(self):
r = []
doHex = False
if self._value is not self.defaultValue:
for x in self.asNumbers():
if x < 32 or x > 126:
doHex = True
break
if not doHex:
r.append('%r' % (self._value,))
if self._tagSet is not self.tagSet:
r.append('tagSet=%r' % (self._tagSet,))
if self._subtypeSpec is not self.subtypeSpec:
r.append('subtypeSpec=%r' % (self._subtypeSpec,))
if self.encoding is not self._encoding:
r.append('encoding=%r' % (self._encoding,))
if doHex:
r.append('hexValue=%r' % ''.join([ '%.2x' % x for x in self.asNumbers() ]))
return '%s(%s)' % (self.__class__.__name__, ', '.join(r))
if sys.version_info[0] <= 2:
def __str__(self): return str(self._value)
def __unicode__(self):
return self._value.decode(self._encoding, 'ignore')
def asOctets(self): return self._value
def asNumbers(self):
if self.__asNumbersCache is None:
self.__asNumbersCache = tuple([ ord(x) for x in self._value ])
return self.__asNumbersCache
else:
def __str__(self): return self._value.decode(self._encoding, 'ignore')
def __bytes__(self): return self._value
def asOctets(self): return self._value
def asNumbers(self):
if self.__asNumbersCache is None:
self.__asNumbersCache = tuple(self._value)
return self.__asNumbersCache
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + self.prettyIn(value))
def __radd__(self, value): return self.clone(self.prettyIn(value) + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
def __int__(self): return int(self._value)
def __float__(self): return float(self._value)
class Null(OctetString):
defaultValue = ''.encode() # This is tightly constrained
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
subtypeSpec = OctetString.subtypeSpec+constraint.SingleValueConstraint(''.encode())
if sys.version_info[0] <= 2:
intTypes = (int, long)
else:
intTypes = (int,)
numericTypes = intTypes + (float,)
class ObjectIdentifier(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
def __add__(self, other): return self.clone(self._value + other)
def __radd__(self, other): return self.clone(other + self._value)
def asTuple(self): return self._value
# Sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(
operator.getitem(self._value, i)
)
else:
return self._value[i]
def __str__(self): return self.prettyPrint()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.prettyPrint())
def index(self, suboid): return self._value.index(suboid)
def isPrefixOf(self, value):
"""Returns true if argument OID resides deeper in the OID tree"""
l = len(self)
if l <= len(value):
if self._value[:l] == value[:l]:
return 1
return 0
def prettyIn(self, value):
"""Dotted -> tuple of numerics OID converter"""
if isinstance(value, tuple):
pass
elif isinstance(value, ObjectIdentifier):
return tuple(value)
elif isinstance(value, str):
r = []
for element in [ x for x in value.split('.') if x != '' ]:
try:
r.append(int(element, 0))
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__, sys.exc_info()[1])
)
value = tuple(r)
else:
try:
value = tuple(value)
except TypeError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__,sys.exc_info()[1])
)
for x in value:
if not isinstance(x, intTypes) or x < 0:
raise error.PyAsn1Error(
'Invalid sub-ID in %s at %s' % (value, self.__class__.__name__)
)
return value
def prettyOut(self, value): return '.'.join([ str(x) for x in value ])
class Real(base.AbstractSimpleAsn1Item):
binEncBase = None # binEncBase = 16 is recommended for large numbers
try:
_plusInf = float('inf')
_minusInf = float('-inf')
_inf = (_plusInf, _minusInf)
except ValueError:
# Infinity support is platform and Python dependent
_plusInf = _minusInf = None
_inf = ()
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
def __normalizeBase10(self, value):
m, b, e = value
while m and m % 10 == 0:
m = m / 10
e = e + 1
return m, b, e
def prettyIn(self, value):
if isinstance(value, tuple) and len(value) == 3:
if not isinstance(value[0], numericTypes) or \
not isinstance(value[1], intTypes) or \
not isinstance(value[2], intTypes):
raise error.PyAsn1Error('Lame Real value syntax: %s' % (value,))
if isinstance(value[0], float) and \
self._inf and value[0] in self._inf:
return value[0]
if value[1] not in (2, 10):
raise error.PyAsn1Error(
'Prohibited base for Real value: %s' % (value[1],)
)
if value[1] == 10:
value = self.__normalizeBase10(value)
return value
elif isinstance(value, intTypes):
return self.__normalizeBase10((value, 10, 0))
elif isinstance(value, (str, float)):
if isinstance(value, str):
try:
value = float(value)
except ValueError:
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
if self._inf and value in self._inf:
return value
else:
e = 0
while int(value) != value:
value = value * 10
e = e - 1
return self.__normalizeBase10((int(value), 10, e))
elif isinstance(value, Real):
return tuple(value)
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
def prettyOut(self, value):
if value in self._inf:
return '\'%s\'' % value
else:
return str(value)
def isPlusInfinity(self): return self._value == self._plusInf
def isMinusInfinity(self): return self._value == self._minusInf
def isInfinity(self): return self._value in self._inf
def __str__(self): return str(float(self))
def __add__(self, value): return self.clone(float(self) + value)
def __radd__(self, value): return self + value
def __mul__(self, value): return self.clone(float(self) * value)
def __rmul__(self, value): return self * value
def __sub__(self, value): return self.clone(float(self) - value)
def __rsub__(self, value): return self.clone(value - float(self))
def __mod__(self, value): return self.clone(float(self) % value)
def __rmod__(self, value): return self.clone(value % float(self))
def __pow__(self, value, modulo=None): return self.clone(pow(float(self), value, modulo))
def __rpow__(self, value): return self.clone(pow(value, float(self)))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(float(self) / value)
def __rdiv__(self, value): return self.clone(value / float(self))
else:
def __truediv__(self, value): return self.clone(float(self) / value)
def __rtruediv__(self, value): return self.clone(value / float(self))
def __divmod__(self, value): return self.clone(float(self) // value)
def __rdivmod__(self, value): return self.clone(value // float(self))
def __int__(self): return int(float(self))
if sys.version_info[0] <= 2:
def __long__(self): return long(float(self))
def __float__(self):
if self._value in self._inf:
return self._value
else:
return float(
self._value[0] * pow(self._value[1], self._value[2])
)
def __abs__(self): return self.clone(abs(float(self)))
def __pos__(self): return self.clone(+float(self))
def __neg__(self): return self.clone(-float(self))
def __round__(self, n=0):
r = round(float(self), n)
if n:
return self.clone(r)
else:
return r
def __floor__(self): return self.clone(math.floor(float(self)))
def __ceil__(self): return self.clone(math.ceil(float(self)))
if sys.version_info[0:2] > (2, 5):
def __trunc__(self): return self.clone(math.trunc(float(self)))
def __lt__(self, value): return float(self) < value
def __le__(self, value): return float(self) <= value
def __eq__(self, value): return float(self) == value
def __ne__(self, value): return float(self) != value
def __gt__(self, value): return float(self) > value
def __ge__(self, value): return float(self) >= value
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(float(self))
else:
def __bool__(self): return bool(float(self))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __getitem__(self, idx):
if self._value in self._inf:
raise error.PyAsn1Error('Invalid infinite value operation')
else:
return self._value[idx]
class Enumerated(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
)
# "Structured" ASN.1 types
class SetOf(base.AbstractConstructedAsn1Item):
componentType = None
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 1
strictConstraints = False
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
t = self._componentType
if t is None:
return
if not t.isSameTypeWith(value,matchConstraints=self.strictConstraints):
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, t))
if self.strictConstraints and \
not t.isSuperTypeOf(value, matchTags=False):
raise error.PyAsn1Error('Component value is constraints-incompatible: %r vs %r' % (value, t))
def getComponentByPosition(self, idx): return self._componentValues[idx]
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
self._componentValues[idx] = self._componentType.clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
if isinstance(self._componentType, base.AbstractSimpleAsn1Item):
value = self._componentType.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentType is not None:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getComponentTagMap(self):
if self._componentType is not None:
return self._componentType.getTagMap()
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
r = r + ' '*scope
if self._componentValues[idx] is None:
r = r + '<empty>'
else:
r = r + self._componentValues[idx].prettyPrint(scope)
return r
def prettyPrintType(self, scope=0):
scope = scope + 1
r = '%s -> %s {\n' % (self.getTagSet(), self.__class__.__name__)
if self._componentType is not None:
r = r + ' '*scope
r = r + self._componentType.prettyPrintType(scope)
return r + '\n' + ' '*(scope-1) + '}'
class SequenceOf(SetOf):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 2
class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
componentType = namedtype.NamedTypes()
strictConstraints = False
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
if componentType is None:
componentType = self.componentType
base.AbstractConstructedAsn1Item.__init__(
self, componentType.clone(), tagSet, subtypeSpec, sizeSpec
)
self._componentTypeLen = len(self._componentType)
def __getitem__(self, idx):
if isinstance(idx, str):
return self.getComponentByName(idx)
else:
return base.AbstractConstructedAsn1Item.__getitem__(self, idx)
def __setitem__(self, idx, value):
if isinstance(idx, str):
self.setComponentByName(idx, value)
else:
base.AbstractConstructedAsn1Item.__setitem__(self, idx, value)
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if idx >= self._componentTypeLen:
raise error.PyAsn1Error(
'Component type error out of range'
)
t = self._componentType[idx].getType()
if not t.isSameTypeWith(value,matchConstraints=self.strictConstraints):
raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, t))
if self.strictConstraints and \
not t.isSuperTypeOf(value, matchTags=False):
raise error.PyAsn1Error('Component value is constraints-incompatible: %r vs %r' % (value, t))
def getComponentByName(self, name):
return self.getComponentByPosition(
self._componentType.getPositionByName(name)
)
def setComponentByName(self, name, value=None, verifyConstraints=True):
return self.setComponentByPosition(
self._componentType.getPositionByName(name),value,verifyConstraints
)
def getComponentByPosition(self, idx):
try:
return self._componentValues[idx]
except IndexError:
if idx < self._componentTypeLen:
return
raise
def setComponentByPosition(self, idx, value=None,
verifyConstraints=True,
exactTypes=False,
matchTags=True,
matchConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
t = self._componentType.getTypeByPosition(idx)
if isinstance(t, base.AbstractSimpleAsn1Item):
value = t.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getNameByPosition(self, idx):
if self._componentTypeLen:
return self._componentType.getNameByPosition(idx)
def getDefaultComponentByPosition(self, idx):
if self._componentTypeLen and self._componentType[idx].isDefaulted:
return self._componentType[idx].getType()
def getComponentType(self):
if self._componentTypeLen:
return self._componentType
def setDefaultComponents(self):
if self._componentTypeLen == self._componentValuesSet:
return
idx = self._componentTypeLen
while idx:
idx = idx - 1
if self._componentType[idx].isDefaulted:
if self.getComponentByPosition(idx) is None:
self.setComponentByPosition(idx)
elif not self._componentType[idx].isOptional:
if self.getComponentByPosition(idx) is None:
raise error.PyAsn1Error(
'Uninitialized component #%s at %r' % (idx, self)
)
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is not None:
r = r + ' '*scope
componentType = self.getComponentType()
if componentType is None:
r = r + '<no-name>'
else:
r = r + componentType.getNameByPosition(idx)
r = '%s=%s\n' % (
r, self._componentValues[idx].prettyPrint(scope)
)
return r
def prettyPrintType(self, scope=0):
scope = scope + 1
r = '%s -> %s {\n' % (self.getTagSet(), self.__class__.__name__)
for idx in range(len(self.componentType)):
r = r + ' '*scope
r = r + '"%s"' % self.componentType.getNameByPosition(idx)
r = '%s = %s\n' % (
r, self._componentType.getTypeByPosition(idx).prettyPrintType(scope)
)
return r + '\n' + ' '*(scope-1) + '}'
class Sequence(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 3
def getComponentTagMapNearPosition(self, idx):
if self._componentType:
return self._componentType.getTagMapNearPosition(idx)
def getComponentPositionNearType(self, tagSet, idx):
if self._componentType:
return self._componentType.getPositionNearType(tagSet, idx)
else:
return idx
class Set(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 4
def getComponent(self, innerFlag=0): return self
def getComponentByType(self, tagSet, innerFlag=0):
c = self.getComponentByPosition(
self._componentType.getPositionByType(tagSet)
)
if innerFlag and isinstance(c, Set):
# get inner component by inner tagSet
return c.getComponent(1)
else:
# get outer component by inner tagSet
return c
def setComponentByType(self, tagSet, value=None, innerFlag=0,
verifyConstraints=True):
idx = self._componentType.getPositionByType(tagSet)
t = self._componentType.getTypeByPosition(idx)
if innerFlag: # set inner component by inner tagSet
if t.getTagSet():
return self.setComponentByPosition(
idx, value, verifyConstraints
)
else:
t = self.setComponentByPosition(idx).getComponentByPosition(idx)
return t.setComponentByType(
tagSet, value, innerFlag, verifyConstraints
)
else: # set outer component by inner tagSet
return self.setComponentByPosition(
idx, value, verifyConstraints
)
def getComponentTagMap(self):
if self._componentType:
return self._componentType.getTagMap(True)
def getComponentPositionByType(self, tagSet):
if self._componentType:
return self._componentType.getPositionByType(tagSet)
class Choice(Set):
tagSet = baseTagSet = tag.TagSet() # untagged
sizeSpec = constraint.ConstraintsIntersection(
constraint.ValueSizeConstraint(1, 1)
)
typeId = 5
_currentIdx = None
def __eq__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] == other
return NotImplemented
def __ne__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] != other
return NotImplemented
def __lt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] < other
return NotImplemented
def __le__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] <= other
return NotImplemented
def __gt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] > other
return NotImplemented
def __ge__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] >= other
return NotImplemented
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._componentValues)
else:
def __bool__(self): return bool(self._componentValues)
def __len__(self): return self._currentIdx is not None and 1 or 0
def verifySizeSpec(self):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
self._sizeSpec(' ')
def _cloneComponentValues(self, myClone, cloneValueFlag):
try:
c = self.getComponent()
except error.PyAsn1Error:
pass
else:
if isinstance(c, Choice):
tagSet = c.getEffectiveTagSet()
else:
tagSet = c.getTagSet()
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByType(
tagSet, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByType(tagSet, c.clone())
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if self._currentIdx is not None:
self._componentValues[self._currentIdx] = None
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = 1
self._currentIdx = idx
return self
elif not isinstance(value, base.Asn1Item):
value = self._componentType.getTypeByPosition(idx).clone(
value=value
)
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
self._componentValues[idx] = value
self._currentIdx = idx
self._componentValuesSet = 1
return self
def getMinTagSet(self):
if self._tagSet:
return self._tagSet
else:
return self._componentType.genMinTagSet()
def getEffectiveTagSet(self):
if self._tagSet:
return self._tagSet
else:
c = self.getComponent()
if isinstance(c, Choice):
return c.getEffectiveTagSet()
else:
return c.getTagSet()
def getTagMap(self):
if self._tagSet:
return Set.getTagMap(self)
else:
return Set.getComponentTagMap(self)
def getComponent(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
c = self._componentValues[self._currentIdx]
if innerFlag and isinstance(c, Choice):
return c.getComponent(innerFlag)
else:
return c
def getName(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
if innerFlag:
c = self._componentValues[self._currentIdx]
if isinstance(c, Choice):
return c.getName(innerFlag)
return self._componentType.getNameByPosition(self._currentIdx)
def setDefaultComponents(self): pass
class Any(OctetString):
tagSet = baseTagSet = tag.TagSet() # untagged
typeId = 6
def getTagMap(self):
return tagmap.TagMap(
{ self.getTagSet(): self },
{ eoo.endOfOctets.getTagSet(): eoo.endOfOctets },
self
)
# XXX
# coercion rules?
| {
"repo_name": "coruus/pyasn1",
"path": "pyasn1/type/univ.py",
"copies": "1",
"size": "44262",
"license": "bsd-2-clause",
"hash": -8172155834446786000,
"line_mean": 37.7922874671,
"line_max": 105,
"alpha_frac": 0.5488906963,
"autogenerated": false,
"ratio": 4.174478921060078,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006251993080119104,
"num_lines": 1141
} |
# ASN.1 "universal" data types
import operator, sys
from pyasn1.type import base, tag, constraint, namedtype, namedval, tagmap
from pyasn1.codec.ber import eoo
from pyasn1.compat import octets
from pyasn1 import error
# "Simple" ASN.1 types (yet incomplete)
class Integer(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def __and__(self, value): return self.clone(self._value & value)
def __rand__(self, value): return self.clone(value & self._value)
def __or__(self, value): return self.clone(self._value | value)
def __ror__(self, value): return self.clone(value | self._value)
def __xor__(self, value): return self.clone(self._value ^ value)
def __rxor__(self, value): return self.clone(value ^ self._value)
def __lshift__(self, value): return self.clone(self._value << value)
def __rshift__(self, value): return self.clone(self._value >> value)
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __sub__(self, value): return self.clone(self._value - value)
def __rsub__(self, value): return self.clone(value - self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self.clone(value * self._value)
def __mod__(self, value): return self.clone(self._value % value)
def __rmod__(self, value): return self.clone(value % self._value)
def __pow__(self, value, modulo=None): return self.clone(pow(self._value, value, modulo))
def __rpow__(self, value): return self.clone(pow(value, self._value))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(self._value // value)
def __rdiv__(self, value): return self.clone(value // self._value)
else:
def __truediv__(self, value): return self.clone(self._value / value)
def __rtruediv__(self, value): return self.clone(value / self._value)
def __divmod__(self, value): return self.clone(self._value // value)
def __rdivmod__(self, value): return self.clone(value // self._value)
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __int__(self): return int(self._value)
if sys.version_info[0] <= 2:
def __long__(self): return long(self._value)
def __float__(self): return float(self._value)
def __abs__(self): return abs(self._value)
def __index__(self): return int(self._value)
def __lt__(self, value): return self._value < value
def __le__(self, value): return self._value <= value
def __eq__(self, value): return self._value == value
def __ne__(self, value): return self._value != value
def __gt__(self, value): return self._value > value
def __ge__(self, value): return self._value >= value
def prettyIn(self, value):
if not isinstance(value, str):
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
)
r = self.__namedValues.getValue(value)
if r is not None:
return r
try:
return int(value)
except:
raise error.PyAsn1Error(
'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
)
def prettyOut(self, value):
r = self.__namedValues.getName(value)
return r is None and str(value) or repr(r)
def getNamedValues(self): return self.__namedValues
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
class Boolean(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
)
subtypeSpec = Integer.subtypeSpec+constraint.SingleValueConstraint(0,1)
namedValues = Integer.namedValues.clone(('False', 0), ('True', 1))
class BitString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def __str__(self): return str(tuple(self))
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
def prettyIn(self, value):
r = []
if not value:
return ()
elif isinstance(value, str):
if value[0] == '\'':
if value[-2:] == '\'B':
for v in value[1:-2]:
if v == '0':
r.append(0)
elif v == '1':
r.append(1)
else:
raise error.PyAsn1Error(
'Non-binary BIT STRING initializer %s' % (v,)
)
return tuple(r)
elif value[-2:] == '\'H':
for v in value[1:-2]:
i = 4
v = int(v, 16)
while i:
i = i - 1
r.append((v>>i)&0x01)
return tuple(r)
else:
raise error.PyAsn1Error(
'Bad BIT STRING value notation %s' % (value,)
)
else:
for i in value.split(','):
j = self.__namedValues.getValue(i)
if j is None:
raise error.PyAsn1Error(
'Unknown bit identifier \'%s\'' % (i,)
)
if j >= len(r):
r.extend([0]*(j-len(r)+1))
r[j] = 1
return tuple(r)
elif isinstance(value, (tuple, list)):
r = tuple(value)
for b in r:
if b and b != 1:
raise error.PyAsn1Error(
'Non-binary BitString initializer \'%s\'' % (r,)
)
return r
elif isinstance(value, BitString):
return tuple(value)
else:
raise error.PyAsn1Error(
'Bad BitString initializer type \'%s\'' % (value,)
)
def prettyOut(self, value):
return '\"\'%s\'B\"' % ''.join([str(x) for x in value])
class OctetString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
defaultBinValue = defaultHexValue = base.noValue
encoding = 'us-ascii'
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if encoding is None:
self._encoding = self.encoding
else:
self._encoding = encoding
if binValue is not None:
value = self.fromBinaryString(binValue)
if hexValue is not None:
value = self.fromHexString(hexValue)
if value is None or value is base.noValue:
value = self.defaultHexValue
if value is None or value is base.noValue:
value = self.defaultBinValue
self.__intValue = None
base.AbstractSimpleAsn1Item.__init__(self, value, tagSet, subtypeSpec)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if value is None and tagSet is None and subtypeSpec is None and \
encoding is None and binValue is None and hexValue is None:
return self
if value is None and binValue is None and hexValue is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if encoding is None:
encoding = self._encoding
return self.__class__(
value, tagSet, subtypeSpec, encoding, binValue, hexValue
)
if sys.version_info[0] <= 2:
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, (tuple, list)):
try:
return ''.join([ chr(x) for x in value ])
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
return str(value)
else:
def prettyIn(self, value):
if isinstance(value, bytes):
return value
elif isinstance(value, OctetString):
return value.asOctets()
elif isinstance(value, (tuple, list, map)):
try:
return bytes(value)
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
try:
return str(value).encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
def fromBinaryString(self, value):
bitNo = 8; byte = 0; r = ()
for v in value:
if bitNo:
bitNo = bitNo - 1
else:
bitNo = 7
r = r + (byte,)
byte = 0
if v == '0':
v = 0
elif v == '1':
v = 1
else:
raise error.PyAsn1Error(
'Non-binary OCTET STRING initializer %s' % (v,)
)
byte = byte | (v << bitNo)
return octets.ints2octs(r + (byte,))
def fromHexString(self, value):
r = p = ()
for v in value:
if p:
r = r + (int(p+v, 16),)
p = ()
else:
p = v
if p:
r = r + (int(p+'0', 16),)
return octets.ints2octs(r)
def prettyOut(self, value):
if sys.version_info[0] <= 2:
numbers = tuple([ ord(x) for x in value ])
else:
numbers = tuple(value)
if [ x for x in numbers if x < 32 or x > 126 ]:
return '0x' + ''.join([ '%.2x' % x for x in numbers ])
else:
return str(value)
def __repr__(self):
if self._value is base.noValue:
return self.__class__.__name__ + '()'
if [ x for x in self.asNumbers() if x < 32 or x > 126 ]:
return self.__class__.__name__ + '(hexValue=\'' + ''.join([ '%.2x' % x for x in self.asNumbers() ])+'\')'
else:
return self.__class__.__name__ + '(\'' + self.prettyOut(self._value) + '\')'
if sys.version_info[0] <= 2:
def __str__(self): return str(self._value)
def __unicode__(self):
return self._value.decode(self._encoding, 'ignore')
def asOctets(self): return self._value
def asNumbers(self):
if self.__intValue is None:
self.__intValue = tuple([ ord(x) for x in self._value ])
return self.__intValue
else:
def __str__(self): return self._value.decode(self._encoding, 'ignore')
def __bytes__(self): return self._value
def asOctets(self): return self._value
def asNumbers(self):
if self.__intValue is None:
self.__intValue = tuple(self._value)
return self.__intValue
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + self.prettyIn(value))
def __radd__(self, value): return self.clone(self.prettyIn(value) + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
class Null(OctetString):
defaultValue = ''.encode() # This is tightly constrained
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
subtypeSpec = OctetString.subtypeSpec+constraint.SingleValueConstraint(''.encode())
if sys.version_info[0] <= 2:
intTypes = (int, long)
else:
intTypes = int
class ObjectIdentifier(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
def __add__(self, other): return self.clone(self._value + other)
def __radd__(self, other): return self.clone(other + self._value)
def asTuple(self): return self._value
# Sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(
operator.getitem(self._value, i)
)
else:
return self._value[i]
def __str__(self): return self.prettyPrint()
def index(self, suboid): return self._value.index(suboid)
def isPrefixOf(self, value):
"""Returns true if argument OID resides deeper in the OID tree"""
l = len(self)
if l <= len(value):
if self._value[:l] == value[:l]:
return 1
return 0
def prettyIn(self, value):
"""Dotted -> tuple of numerics OID converter"""
if isinstance(value, tuple):
pass
elif isinstance(value, ObjectIdentifier):
return tuple(value)
elif isinstance(value, str):
r = []
for element in [ x for x in value.split('.') if x != '' ]:
try:
r.append(int(element, 0))
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__, sys.exc_info()[1])
)
value = tuple(r)
else:
try:
value = tuple(value)
except TypeError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__,sys.exc_info()[1])
)
for x in value:
if not isinstance(x, intTypes) or x < 0:
raise error.PyAsn1Error(
'Invalid sub-ID in %s at %s' % (value, self.__class__.__name__)
)
return value
def prettyOut(self, value): return '.'.join([ str(x) for x in value ])
class Real(base.AbstractSimpleAsn1Item):
try:
_plusInf = float('inf')
_minusInf = float('-inf')
_inf = (_plusInf, _minusInf)
except ValueError:
# Infinity support is platform and Python dependent
_plusInf = _minusInf = None
_inf = ()
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
def __normalizeBase10(self, value):
m, b, e = value
while m and m % 10 == 0:
m = m / 10
e = e + 1
return m, b, e
def prettyIn(self, value):
if isinstance(value, tuple) and len(value) == 3:
for d in value:
if not isinstance(d, intTypes):
raise error.PyAsn1Error(
'Lame Real value syntax: %s' % (value,)
)
if value[1] not in (2, 10):
raise error.PyAsn1Error(
'Prohibited base for Real value: %s' % (value[1],)
)
if value[1] == 10:
value = self.__normalizeBase10(value)
return value
elif isinstance(value, intTypes):
return self.__normalizeBase10((value, 10, 0))
elif isinstance(value, float):
if self._inf and value in self._inf:
return value
else:
e = 0
while int(value) != value:
value = value * 10
e = e - 1
return self.__normalizeBase10((int(value), 10, e))
elif isinstance(value, Real):
return tuple(value)
elif isinstance(value, str): # handle infinite literal
try:
return float(value)
except ValueError:
pass
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
def prettyOut(self, value):
if value in self._inf:
return '\'%s\'' % value
else:
return str(value)
def isPlusInfinity(self): return self._value == self._plusInf
def isMinusInfinity(self): return self._value == self._minusInf
def isInfinity(self): return self._value in self._inf
def __str__(self): return str(float(self))
def __add__(self, value): return self.clone(float(self) + value)
def __radd__(self, value): return self + value
def __mul__(self, value): return self.clone(float(self) * value)
def __rmul__(self, value): return self * value
def __sub__(self, value): return self.clone(float(self) - value)
def __rsub__(self, value): return self.clone(value - float(self))
def __mod__(self, value): return self.clone(float(self) % value)
def __rmod__(self, value): return self.clone(value % float(self))
def __pow__(self, value, modulo=None): return self.clone(pow(float(self), value, modulo))
def __rpow__(self, value): return self.clone(pow(value, float(self)))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(float(self) / value)
def __rdiv__(self, value): return self.clone(value / float(self))
else:
def __truediv__(self, value): return self.clone(float(self) / value)
def __rtruediv__(self, value): return self.clone(value / float(self))
def __divmod__(self, value): return self.clone(float(self) // value)
def __rdivmod__(self, value): return self.clone(value // float(self))
def __int__(self): return int(float(self))
if sys.version_info[0] <= 2:
def __long__(self): return long(float(self))
def __float__(self):
if self._value in self._inf:
return self._value
else:
return float(
self._value[0] * pow(self._value[1], self._value[2])
)
def __abs__(self): return abs(float(self))
def __lt__(self, value): return float(self) < value
def __le__(self, value): return float(self) <= value
def __eq__(self, value): return float(self) == value
def __ne__(self, value): return float(self) != value
def __gt__(self, value): return float(self) > value
def __ge__(self, value): return float(self) >= value
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(float(self))
else:
def __bool__(self): return bool(float(self))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __getitem__(self, idx):
if self._value in self._inf:
raise error.PyAsn1Error('Invalid infinite value operation')
else:
return self._value[idx]
class Enumerated(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
)
# "Structured" ASN.1 types
class SetOf(base.AbstractConstructedAsn1Item):
componentType = None
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 1
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if self._componentType is not None and \
not self._componentType.isSuperTypeOf(value):
raise error.PyAsn1Error('Component type error %s' % (value,))
def getComponentByPosition(self, idx): return self._componentValues[idx]
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
self._componentValues[idx] = self._componentType.clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
if isinstance(self._componentType, base.AbstractSimpleAsn1Item):
value = self._componentType.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentType is not None:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getComponentTagMap(self):
if self._componentType is not None:
return self._componentType.getTagMap()
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
r = r + ' '*scope
if self._componentValues[idx] is None:
r = r + '<empty>'
else:
r = r + self._componentValues[idx].prettyPrint(scope)
return r
class SequenceOf(SetOf):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 2
class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
componentType = namedtype.NamedTypes()
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
base.AbstractConstructedAsn1Item.__init__(
self, componentType, tagSet, subtypeSpec, sizeSpec
)
if self._componentType is None:
self._componentTypeLen = 0
else:
self._componentTypeLen = len(self._componentType)
def __getitem__(self, idx):
if isinstance(idx, str):
return self.getComponentByName(idx)
else:
return base.AbstractConstructedAsn1Item.__getitem__(self, idx)
def __setitem__(self, idx, value):
if isinstance(idx, str):
self.setComponentByName(idx, value)
else:
base.AbstractConstructedAsn1Item.__setitem__(self, idx, value)
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if idx >= self._componentTypeLen:
raise error.PyAsn1Error(
'Component type error out of range'
)
t = self._componentType[idx].getType()
if not t.isSuperTypeOf(value):
raise error.PyAsn1Error('Component type error %r vs %r' % (t, value))
def getComponentByName(self, name):
return self.getComponentByPosition(
self._componentType.getPositionByName(name)
)
def setComponentByName(self, name, value=None, verifyConstraints=True):
return self.setComponentByPosition(
self._componentType.getPositionByName(name), value,
verifyConstraints
)
def getComponentByPosition(self, idx):
try:
return self._componentValues[idx]
except IndexError:
if idx < self._componentTypeLen:
return
raise
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
t = self._componentType.getTypeByPosition(idx)
if isinstance(t, base.AbstractSimpleAsn1Item):
value = t.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getNameByPosition(self, idx):
if self._componentTypeLen:
return self._componentType.getNameByPosition(idx)
def getDefaultComponentByPosition(self, idx):
if self._componentTypeLen and self._componentType[idx].isDefaulted:
return self._componentType[idx].getType()
def getComponentType(self):
if self._componentTypeLen:
return self._componentType
def setDefaultComponents(self):
if self._componentTypeLen == self._componentValuesSet:
return
idx = self._componentTypeLen
while idx:
idx = idx - 1
if self._componentType[idx].isDefaulted:
if self.getComponentByPosition(idx) is None:
self.setComponentByPosition(idx)
elif not self._componentType[idx].isOptional:
if self.getComponentByPosition(idx) is None:
raise error.PyAsn1Error(
'Uninitialized component #%s at %r' % (idx, self)
)
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is not None:
r = r + ' '*scope
componentType = self.getComponentType()
if componentType is None:
r = r + '<no-name>'
else:
r = r + componentType.getNameByPosition(idx)
r = '%s=%s\n' % (
r, self._componentValues[idx].prettyPrint(scope)
)
return r
class Sequence(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 3
def getComponentTagMapNearPosition(self, idx):
if self._componentType:
return self._componentType.getTagMapNearPosition(idx)
def getComponentPositionNearType(self, tagSet, idx):
if self._componentType:
return self._componentType.getPositionNearType(tagSet, idx)
else:
return idx
class Set(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 4
def getComponent(self, innerFlag=0): return self
def getComponentByType(self, tagSet, innerFlag=0):
c = self.getComponentByPosition(
self._componentType.getPositionByType(tagSet)
)
if innerFlag and isinstance(c, Set):
# get inner component by inner tagSet
return c.getComponent(1)
else:
# get outer component by inner tagSet
return c
def setComponentByType(self, tagSet, value=None, innerFlag=0,
verifyConstraints=True):
idx = self._componentType.getPositionByType(tagSet)
t = self._componentType.getTypeByPosition(idx)
if innerFlag: # set inner component by inner tagSet
if t.getTagSet():
return self.setComponentByPosition(
idx, value, verifyConstraints
)
else:
t = self.setComponentByPosition(idx).getComponentByPosition(idx)
return t.setComponentByType(
tagSet, value, innerFlag, verifyConstraints
)
else: # set outer component by inner tagSet
return self.setComponentByPosition(
idx, value, verifyConstraints
)
def getComponentTagMap(self):
if self._componentType:
return self._componentType.getTagMap(True)
def getComponentPositionByType(self, tagSet):
if self._componentType:
return self._componentType.getPositionByType(tagSet)
class Choice(Set):
tagSet = baseTagSet = tag.TagSet() # untagged
sizeSpec = constraint.ConstraintsIntersection(
constraint.ValueSizeConstraint(1, 1)
)
typeId = 5
_currentIdx = None
def __eq__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] == other
return NotImplemented
def __ne__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] != other
return NotImplemented
def __lt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] < other
return NotImplemented
def __le__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] <= other
return NotImplemented
def __gt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] > other
return NotImplemented
def __ge__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] >= other
return NotImplemented
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._componentValues)
else:
def __bool__(self): return bool(self._componentValues)
def __len__(self): return self._currentIdx is not None and 1 or 0
def verifySizeSpec(self):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
self._sizeSpec(' ')
def _cloneComponentValues(self, myClone, cloneValueFlag):
try:
c = self.getComponent()
except error.PyAsn1Error:
pass
else:
if isinstance(c, Choice):
tagSet = c.getEffectiveTagSet()
else:
tagSet = c.getTagSet()
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByType(
tagSet, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByType(tagSet, c.clone())
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if self._currentIdx is not None:
self._componentValues[self._currentIdx] = None
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = 1
self._currentIdx = idx
return self
elif not isinstance(value, base.Asn1Item):
value = self._componentType.getTypeByPosition(idx).clone(
value=value
)
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
self._componentValues[idx] = value
self._currentIdx = idx
self._componentValuesSet = 1
return self
def getMinTagSet(self):
if self._tagSet:
return self._tagSet
else:
return self._componentType.genMinTagSet()
def getEffectiveTagSet(self):
if self._tagSet:
return self._tagSet
else:
c = self.getComponent()
if isinstance(c, Choice):
return c.getEffectiveTagSet()
else:
return c.getTagSet()
def getTagMap(self):
if self._tagSet:
return Set.getTagMap(self)
else:
return Set.getComponentTagMap(self)
def getComponent(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
c = self._componentValues[self._currentIdx]
if innerFlag and isinstance(c, Choice):
return c.getComponent(innerFlag)
else:
return c
def getName(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
if innerFlag:
c = self._componentValues[self._currentIdx]
if isinstance(c, Choice):
return c.getName(innerFlag)
return self._componentType.getNameByPosition(self._currentIdx)
def setDefaultComponents(self): pass
class Any(OctetString):
tagSet = baseTagSet = tag.TagSet() # untagged
typeId = 6
def getTagMap(self):
return tagmap.TagMap(
{ self.getTagSet(): self },
{ eoo.endOfOctets.getTagSet(): eoo.endOfOctets },
self
)
# XXX
# coercion rules?
| {
"repo_name": "viger/docker",
"path": "proxy/proxy/code/default/python27/1.0/lib/noarch/pyasn1/type/univ.py",
"copies": "8",
"size": "39459",
"license": "mit",
"hash": -2490643430241797000,
"line_mean": 36.8685220729,
"line_max": 117,
"alpha_frac": 0.5537646671,
"autogenerated": false,
"ratio": 4.1835241730279895,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0021335415291157877,
"num_lines": 1042
} |
"""ASN.1 UTCTime and GeneralizedTime, as understood by RFC 5280."""
import abc
import time
from ct.crypto import error
from ct.crypto.asn1 import tag
from ct.crypto.asn1 import types
class BaseTime(types.ASN1String):
"""Base class for time types."""
def __init__(self, value=None, serialized_value=None, strict=True):
super(BaseTime, self).__init__(value=value,
serialized_value=serialized_value,
strict=strict)
self._gmtime = self._decode_gmtime()
# This is a lenient "strict": if we were able to decode the time,
# even if it didn't fully conform to the standard, then we'll allow it.
# If the time string is garbage then we raise.
if strict and self._gmtime is None:
raise error.ASN1Error("Corrupt time: %s" % self._value)
def gmtime(self):
"""GMT time.
Returns:
a time.struct_time struct.
Raises:
error.ASN1Error: the ASN.1 string does not represent a valid time.
"""
if self._gmtime is None:
raise error.ASN1Error("Corrupt time: %s" % self._value)
return self._gmtime
@abc.abstractmethod
def _decode_gmtime(self):
pass
def __str__(self):
try:
return time.strftime("%c GMT", self.gmtime())
except error.ASN1Error:
return str(self.value)
@types.Universal(23, tag.PRIMITIVE)
class UTCTime(BaseTime):
"""UTCTime, as understood by RFC 5280."""
# YYMMDDHHMMSSZ
_ASN1_LENGTH = 13
# YYMMDDHHMMZ
_UTC_NO_SECONDS_LENGTH = 11
# YYMMDDHHMMSS+HHMM
_UTC_TZ_OFFSET_LENGTH = 17
# YYMMDDHHMMSS
_UTC_NO_Z_LENGTH = 12
def _decode_gmtime(self):
"""GMT time.
Returns:
a time.struct_time struct, or None if the string does not represent
a valid time.
"""
# From RFC 5280:
# For the purposes of this profile, UTCTime values MUST be expressed in
# Greenwich Mean Time (Zulu) and MUST include seconds (i.e., times are
# YYMMDDHHMMSSZ), even where the number of seconds is zero. Conforming
# systems MUST interpret the year field (YY) as follows:
#
# Where YY is greater than or equal to 50, the year SHALL be
# interpreted as 19YY; and
#
# Where YY is less than 50, the year SHALL be interpreted as 20YY.
#
# In addition, there are a number of older certificates
# that exclude the seconds, e.g. 0001010000Z and others than use
# an alternative timezone format 360526194526+0000
string_time = self.value
if len(string_time) == self._ASN1_LENGTH and string_time[-1] == "Z":
format = "%Y%m%d%H%M%S%Z"
elif (len(string_time) == self._UTC_NO_SECONDS_LENGTH and
string_time[-1] == "Z"):
format = "%Y%m%d%H%M%Z"
elif (len(string_time) == self._UTC_TZ_OFFSET_LENGTH and
string_time[self._UTC_NO_Z_LENGTH] in ('+','-')):
# note according to http://docs.python.org/2/library/time.html
# "%z" is not supported on all platforms.
#
# TBD: in next patch, parse this correctly
#
# Given that it's very infrequent and non-standard,
# we'll ignore time zone for now.
#
# convert the +HHMM to a timedelta and add to timestruct
# One could also special case the "+0000" which should be the same
# as GMT (without DST).
#
format = "%Y%m%d%H%M%S%Z"
string_time = string_time[0:self._ASN1_LENGTH]
else:
return None
try:
year = int(string_time[:2])
except ValueError:
return None
if 0 <= year < 50:
century = "20"
elif 50 <= year <= 99:
century = "19"
else:
return None
try:
# Adding GMT clears the daylight saving flag.
return time.strptime(century + string_time[:-1] + "GMT", format)
except ValueError:
return None
@types.Universal(24, tag.PRIMITIVE)
class GeneralizedTime(BaseTime):
"""Generalized time, as understood by RFC 5280."""
# YYYYMMDDHHMMSSZ
_ASN1_LENGTH = 15
def _decode_gmtime(self):
"""GMT time.
Returns:
a time.struct_time struct, or None if the string does not represent
a valid time.
"""
# From RFC 5280:
# For the purposes of this profile, GeneralizedTime values MUST be
# expressed in Greenwich Mean Time (Zulu) and MUST include seconds
# (i.e., times are YYYYMMDDHHMMSSZ), even where the number of seconds
# is zero. GeneralizedTime values MUST NOT include fractional seconds.
if len(self._value) != self._ASN1_LENGTH or self._value[-1] != "Z":
return None
try:
# Adding GMT clears the daylight saving flag.
return time.strptime(self._value[:-1] + "GMT", "%Y%m%d%H%M%S%Z")
except ValueError:
return None
class Time(types.Choice):
print_labels = False
components = {"utcTime": UTCTime,
"generalTime": GeneralizedTime}
class Validity(types.Sequence):
components = (
(types.Component("notBefore", Time)),
(types.Component("notAfter", Time))
)
| {
"repo_name": "kyprizel/certificate-transparency",
"path": "python/ct/crypto/asn1/x509_time.py",
"copies": "6",
"size": "5505",
"license": "apache-2.0",
"hash": 7206818316295418000,
"line_mean": 32.3636363636,
"line_max": 79,
"alpha_frac": 0.5714804723,
"autogenerated": false,
"ratio": 3.8550420168067228,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7426522489106723,
"avg_score": null,
"num_lines": null
} |
"""ASN.1 X509 specification."""
from ct.crypto.asn1 import types
from ct.crypto.asn1 import x509_common
from ct.crypto.asn1 import x509_extension
from ct.crypto.asn1 import x509_name
from ct.crypto.asn1 import x509_time
class TBSCertificate(types.Sequence):
components = (
(types.Component("version", x509_common.Version.explicit(0),
default=0)),
(types.Component("serialNumber", x509_common.CertificateSerialNumber)),
(types.Component("signature", x509_common.AlgorithmIdentifier)),
(types.Component("issuer", x509_name.Name)),
(types.Component("validity", x509_time.Validity)),
(types.Component("subject", x509_name.Name)),
(types.Component("subjectPublicKeyInfo",
x509_common.SubjectPublicKeyInfo)),
(types.Component("issuerUniqueID",
x509_common.UniqueIdentifier.implicit(1),
optional=True)),
(types.Component("subjectUniqueID",
x509_common.UniqueIdentifier.implicit(2),
optional=True)),
(types.Component("extensions",
x509_extension.Extensions.explicit(3), optional=True))
)
class Certificate(types.Sequence):
components = (
(types.Component("tbsCertificate", TBSCertificate)),
(types.Component("signatureAlgorithm", x509_common.AlgorithmIdentifier)),
(types.Component("signatureValue", types.BitString))
)
| {
"repo_name": "AlCutter/certificate-transparency",
"path": "python/ct/crypto/asn1/x509.py",
"copies": "35",
"size": "1520",
"license": "apache-2.0",
"hash": 6837186720075873000,
"line_mean": 40.0810810811,
"line_max": 81,
"alpha_frac": 0.6269736842,
"autogenerated": false,
"ratio": 4.257703081232493,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
#A snapshot of a conversation segment
class Snapshot:
def __init__(self):
self.dialog = []
self.uses = 0 #For pruning
def append(self,s):
self.dialog.append(s)
def getFinale(self):
self.uses += 1
return self.dialog[-1]
def getScore(self,sin):
i = 0
score = 0
for j in range(0,len(self.dialog)-1):
i += 1
sd = self.dialog[j]
mp = self.getStringPoints(sd,sin)
score += 2*mp^i
return score
def getStringPoints(self,s0,s1):
s0 = self.cleanString(s0)
s1 = self.cleanString(s1)
s0 = s0.split()
s1 = s1.split()
points = 0
for word in s0:
if word in s1:
points += 1
return points
def cleanString(self,s0):
punc = "\"\'.,;()[]{}\\/!?~`"
s0 = s0.lower()
for char in punc:
s0 = s0.replace(char,'')
l = s0.split()
out = ''.join(s + ' ' for s in l)
out = out[0:-1]
return out
| {
"repo_name": "henfredemars/python-personal-projects",
"path": "ChatBot/Snapshot.py",
"copies": "1",
"size": "1123",
"license": "mit",
"hash": -1501657738977634000,
"line_mean": 21.8936170213,
"line_max": 45,
"alpha_frac": 0.4452359751,
"autogenerated": false,
"ratio": 3.509375,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44546109751,
"avg_score": null,
"num_lines": null
} |
# as noted in the individual source code files.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import gavl
from gavl import relalg, constants
from gavl.nodes import PreNodeVisitor, PostNodeVisitor, Node
from gavl.parser.visitors import ActiveFieldResolver
import pandas as pd
from pandas.core.common import is_timedelta64_dtype
import numpy as np
import sqlalchemy as sa
SUPPORTED_FILTER_OPERATORS = {
"==": constants.OpCodes.EQ,
"<": constants.OpCodes.LT,
"<=": constants.OpCodes.LTE,
">": constants.OpCodes.GT,
">=": constants.OpCodes.GTE,
}
def create_sa_db(conn_string, echo=False):
db = sa.create_engine(conn_string, connect_args={'sslmode': 'prefer'},
echo=echo)
return db
class Relation(object):
pass
class Attribute(object):
pass
class SAAttribute(Attribute):
def __init__(self, parent, sa_column):
self.parent = parent
self.sa_column = sa_column
def get_sa_column(self):
return self.sa_column
class SARelation(Relation):
def __init__(self, db, table, schema='public'):
self.db = db
self.schema = schema
self.attributes = {}
self.table_clause = table.__table__.alias()
for attr in self.table_clause.c:
self.add_attribute(
attr.key,
SAAttribute(self, attr)
)
def __getattr__(self, name):
return self.attributes[name]
def add_attribute(self, name, attr):
self.attributes[name] = attr
class Engine(object):
def __init__(self, db):
self.relations = {}
self.links = []
self.symbol_table = {}
self.db = db
def get_relation(self, name, default=None):
return self.relations.get(name, default)
def add_relation(self, name, relation):
self.relations[name] = relation
return relation
def get_symbol(self, name, default=None):
return self.symbol_table.get(name, default)
def add_symbol(self, name, symbol):
self.symbol_table[name] = symbol
def link(self, a, b):
self.links.append((a, b))
def find_links_between(self, from_relation, to_relation):
result = []
for a, b in self.links:
if a.parent == from_relation and b.parent == to_relation:
result.append((a, b))
return result
def query(self, query, groupby=[]):
groupby = [tuple(x.split('.')) for x in groupby]
root_ast = gavl.parse(query)
root_ast = VariableSaver(self).visit(root_ast)
root_relalg = gavl.plan(root_ast)
root_relalg = VariableReplacer(self).visit(root_relalg)
root_plan = QueryPlanner(self).visit(root_relalg)
result = QueryExecutor(self, groupby).visit(root_plan)
active_field = list(ActiveFieldResolver().visit(root_relalg))
result.rename(columns={active_field[0]: "result"}, inplace=True)
return result
class VariableSaver(PreNodeVisitor):
def __init__(self, engine):
self.engine = engine
def visit_assign(self, node):
var_name, relation = node
relalg = gavl.plan(relation)
self.engine.add_symbol(var_name, relalg)
return node
class VariableReplacer(PostNodeVisitor):
def __init__(self, engine):
self.engine = engine
def visit_relation(self, node):
symbol = self.engine.get_symbol(node.name)
if symbol:
compiled_relalg = VariableReplacer(self.engine).visit(relation)
return compiled_relalg
relation = self.engine.get_relation(node.name)
if relation:
return relalg.RelationNode(node.name)
raise Exception("Cannot find relation or symbol "
"'{}'".format(node.name))
PlanNode = Node
SAQuery = PlanNode('sa_query', 'query db')
PandasArith = PlanNode('pandas_arith', 'df out_field left_col right_col op_code')
PandasMerge = PlanNode('pandas_merge', 'left right')
class DataSource(object):
pass
class SADataSource(DataSource):
pass
class SASelectBuilder(PostNodeVisitor):
def __init__(self, engine):
self.engine = engine
def visit_constant(self, node):
return [sa.sql.expression.literal_column(str(node.value))
.label(node.field)]
def visit_relation(self, node):
sa_relation = self.engine.get_relation(node.name)
return sa_relation.table_clause.columns
def visit_project(self, node):
projected = [c for c in node.relation if c.name in node.fields]
assert len(projected) == len(node.fields), node
assert len(projected) > 0
return projected
def visit_join(self, node):
return list(node.left) + list(node.right)
selects = [c for c in node.left]
for c in node.right:
if c.name not in [x.name for x in selects]:
selects.append(c)
return selects
def visit_arithmetic(self, node):
left_field = [c for c in node.relation if c.name == node.left_field]
right_field = [c for c in node.relation if c.name == node.right_field]
assert len(left_field) == 1, left_field
assert len(right_field) == 1, right_field
f = constants.PYTHON_OPERATORS[node.op_code]
return [f(left_field[0], right_field[0]).label(node.out_field)]
def visit_agg(self, node):
if node.func.name == "UNIQUE":
agg_func = lambda x: sa.func.COUNT(sa.distinct(x))
else:
agg_func = getattr(sa.func, node.func.name)
agg_col = [c for c in node.relation if c.name == node.field]
assert len(agg_col) == 1, str(agg_col)
agg_col = agg_col[0]
return [agg_func(agg_col).label(node.out_field)]
def visit_select(self, node):
return node.relation
def visit_bool_op(self, node):
assert len(node.left) == 1, node
assert len(node.right) == 1, node
if node.op_code == constants.OpCodes.AND:
f = sa.and_
elif node.op_code == constants.OpCodes.OR:
f = sa.or_
else:
f = constants.PYTHON_OPERATORS[node.op_code]
return [f(node.left[0], node.right[0])]
def visit_bool_constant(self, node):
return [sa.sql.expression.literal(node.value)]
class SAFromBuilder(PostNodeVisitor):
def __init__(self, engine):
self.engine = engine
def visit_constant(self, node):
return []
def visit_relation(self, node):
sa_relation = self.engine.get_relation(node.name)
return [sa_relation]
def visit_join(self, node):
return list(set(node.left).union(set(node.right)))
def visit_select(self, node):
return node.relation
def visit_project(self, node):
return node.relation
def visit_rename(self, node):
return node.relation
def visit_arithmetic(self, node):
return node.relation
def visit_agg(self, node):
return node.relation
class SAWhereBuilder(PostNodeVisitor):
def __init__(self, engine):
self.engine = engine
def visit_constant(self, node):
return []
def visit_relation(self, node):
return []
def visit_project(self, node):
return node.relation
def visit_join(self, node):
return node.left + node.right
def visit_select(self, node):
return node.relation + [node.bool_expr]
def visit_rename(self, node):
return node.relation
def visit_arithmetic(self, node):
return node.relation
def visit_agg(self, node):
return node.relation
class SABoolBuilder(PreNodeVisitor):
def __init__(self, engine):
self.engine = engine
def visit_select(self, node):
bool_expr = SASelectBuilder(self.engine).visit(node.bool_expr)[0]
return relalg.SelectNode(
node.relation,
bool_expr
)
class PandasBuilder(PostNodeVisitor):
pass
class DataSourceFinder(PostNodeVisitor):
pass
class QueryPlanner(PreNodeVisitor):
def __init__(self, engine):
self.engine = engine
def visit_arithmetic(self, node):
if isinstance(node.relation, relalg.JoinNode):
if (isinstance(node.relation.left, relalg.AggNode) and
isinstance(node.relation.right, relalg.AggNode)):
return PandasArith(
PandasMerge(
node.relation.left,
node.relation.right),
node.out_field, node.left_field, node.right_field, node.op_code
)
return self.default_visit(node)
def visit_pandas_arith(self, node):
return node
def visit_pandas_merge(self, node):
return node
def default_visit(self, node):
# Shortcut for now
selects = SASelectBuilder(self.engine).visit(node)
froms = SAFromBuilder(self.engine).visit(node)
wheres = SAWhereBuilder(self.engine).visit(
SABoolBuilder(self.engine).visit(node)
)
query = sa.select(selects)
first_from = froms[0]
nodes = {}
for f in froms:
for g in froms:
nodes.setdefault(f, [])
nodes[f].extend([x[1] for x in
self.engine.find_links_between(f, g)])
relations = []
def _visit(node, visited=[]):
if node in visited:
raise Exception("Circular Dependency")
if node not in relations:
for r in nodes:
edges = self.engine.find_links_between(node, r)
if edges:
_visit(r, visited + [node])
relations.insert(0, node)
for n in nodes:
if n not in relations:
_visit(n)
joins = relations[0].table_clause
for f in relations[1:]:
links = []
for x in relations:
links = self.engine.find_links_between(x, f)
if links:
break
columns = [(a.get_sa_column(), b.get_sa_column())
for a, b in links]
if columns:
join_cond = functools.reduce(sa.and_,
[a == b for a, b in columns])
else:
join_cond = sa.sql.expression.literal(True)
joins = joins.join(f.table_clause, join_cond)
query = sa.select(selects).select_from(joins)
for where in wheres:
query = query.where(where)
return SAQuery(query, self.engine.db)
sources = DataSourceFinder().visit(node)
if len(sources) > 1:
return PandasBuilder().visit(node)
elif len(sources) == 1:
source = source[0]
if isinstance(source, SADataStore):
return SAQuery(SAQueryBuilder(self.engine).visit(node))
else:
return PandasBuilder().visit(node)
else:
return node
class QueryExecutor(PostNodeVisitor):
"""
In: RelAlg node
Out: Pandas Dataframes
"""
def __init__(self, engine, group_by={}):
self.engine = engine
self.group_by = group_by
def visit_sa_query(self, node):
query = node.query
for k, v in self.group_by:
rel = self.engine.get_relation(k)
col = getattr(rel.table_clause.c, v)
query = query.column(col)
query = query.group_by(col).order_by(col)
connection = node.db.connect()
result = pd.read_sql_query(query, connection)
connection.close()
return result
def visit_pandas_merge(self, node):
if len(self.group_by) == 0:
return pd.merge(node.left, node.right, left_index=True,
right_index=True)
else:
return pd.merge(node.left, node.right)
def visit_pandas_arith(self, node):
f = constants.PYTHON_OPERATORS[node.op_code]
result = node.df.copy()
result[node.out_field] = f(node.df[node.left_col],node.df[node.right_col])
group_cols = [v for k, v in self.group_by] + [node.out_field]
result = result[group_cols]
return result
def visit_pandas(self, node):
pass
| {
"repo_name": "enderlabs/gavl",
"path": "gavl/engine.py",
"copies": "1",
"size": "12900",
"license": "apache-2.0",
"hash": -291037946808374700,
"line_mean": 27.9237668161,
"line_max": 83,
"alpha_frac": 0.5905426357,
"autogenerated": false,
"ratio": 3.8313038313038312,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4921846467003831,
"avg_score": null,
"num_lines": null
} |
# as_nrf_test.py Test script for as_nrf_stream
# (C) Peter Hinch 2020
# Released under the MIT licence
import uasyncio as asyncio
import ujson
import time
from as_nrf_stream import Master, Slave
from asconfig import config_master, config_slave # Hardware configuration
try:
from pyb import LED
except ImportError: # Non-pyboard platform: dummy LED
class LED:
on = lambda _ : None
off = lambda _ : None
toggle = lambda _ : None
led = LED(1) # Red lit during an outage.
green = LED(2) # Message received
missed = 0 # Non-sequential records
outages = 0
tstart = 0
# Generator produces variable length strings: test for issues mapping onto
# nRF24l01 fixed size 32 byte records.
def gen_str(maxlen=65):
while True:
s = ''
x = ord('a')
while len(s) < maxlen:
s += chr(x)
yield s
x = x + 1 if x < ord('z') else ord('a')
async def sender(device, interval):
gs = gen_str()
ds = [0, 0, [], ''] # Data object for transmission
swriter = asyncio.StreamWriter(device, {})
while True:
s = ''.join((ujson.dumps(ds), '\n'))
swriter.write(s.encode()) # convert to bytes
await swriter.drain()
await asyncio.sleep_ms(interval)
ds[0] += 1 # Record number
ds[1] = missed # Send local missed record count to remote
ds[2] = device.stats()
ds[3] = next(gs) # Range of possible string lengths
async def receiver(device):
global missed
msg = 'Missed record count: {:3d} (local) {:3d} (remote).'
tmsg = 'Uptime: {:7.2f}hrs Outages: {:3d}'
smsg = '{} statistics. Timeouts: RX {} TX {} Received packets: All {} Non-duplicate data {}'
sreader = asyncio.StreamReader(device)
x = 0
last = None # Record no. of last received data
while True:
res = await sreader.readline() # Can return b''
if res:
green.toggle()
try:
dat = ujson.loads(res)
except ValueError: # Extremely rare case of data corruption. See docs.
print('JSON error', res)
else:
print('Received record no: {:5d} text: {:s}'.format(dat[0], dat[3]))
if last is not None and (last + 1) != dat[0]:
missed += 1
last = dat[0]
x += 1
x %= 20
if not x:
print(msg.format(missed, dat[1]))
print(tmsg.format((time.time() - tstart)/3600, outages))
if isinstance(dat[2], list):
print(smsg.format('Remote', *dat[2]))
local_stats = device.stats()
if isinstance(local_stats, list):
print(smsg.format('Local', *local_stats))
async def fail_detect(device):
global outages
while True:
if device.t_last_ms() > 5000:
outages += 1
print('Remote outage')
led.on()
while device.t_last_ms() > 5000:
await asyncio.sleep(1)
print('Remote has reconnected')
led.off()
await asyncio.sleep(1)
async def main(master):
global tstart
tstart = time.time()
# This line is the only *necessary* diffference between master and slave:
device = Master(config_master) if master else Slave(config_slave)
# But script uses different periods test for timing issues:
asyncio.create_task(sender(device, 2000 if master else 1777))
asyncio.create_task(receiver(device))
await fail_detect(device)
def test(master):
try:
asyncio.run(main(master))
finally: # Reset uasyncio case of KeyboardInterrupt
asyncio.new_event_loop()
msg = '''Test script for as_nrf_stream driver for nRF24l01 radios.
On master issue
as_nrf_test.test(True)
On slave issue
as_nrf_test.test(False)
'''
print(msg)
| {
"repo_name": "peterhinch/micropython-radio",
"path": "async/as_nrf_test.py",
"copies": "1",
"size": "3926",
"license": "mit",
"hash": -5562622274729315000,
"line_mean": 31.9915966387,
"line_max": 96,
"alpha_frac": 0.5748853795,
"autogenerated": false,
"ratio": 3.6725912067352664,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47474765862352664,
"avg_score": null,
"num_lines": null
} |
"""A socket for sending and receiving s-expressions"""
from socket import *
_socket =socket
from pisces import spkilib
def socket(domain, type, proto=None):
if proto is None:
sock = _socket(domain, type)
else:
sock = _socket(domain, type, proto)
return SexpSocket(sock)
class SexpSocket:
SEXP_BUF_SIZE = 1024
VERBOSE = 0
def __init__(self, sock):
self.sock = sock
self.buf = ''
def send_sexp(self, obj):
"""Encode a SPKI object and send it over the socket"""
if self.VERBOSE:
print "send"
print spkilib.sexp.pprint(obj.sexp())
print
self.sock.send(obj.sexp().encode_canonical())
def recv_sexp(self):
"""Read a full sexp from the socket if possible"""
if self.buf:
sexp, self.buf = parseSexp(self.buf)
if sexp:
return sexp
buf = self.recv(self.SEXP_BUF_SIZE)
if buf == '':
raise EOFError
self.buf = self.buf + buf
sexp, self.buf = parseSexp(self.buf)
return sexp
def read_sexp(self):
"""Read a full sexp from the socket, blocking if necessary"""
sexp = None
try:
while sexp is None:
sexp = self.recv_sexp()
except EOFError:
return None
if self.VERBOSE:
print "read"
print spkilib.sexp.pprint(sexp)
print
return sexp
def __getattr__(self, attr):
return getattr(self.sock, attr)
class ExtractableSexp(spkilib.sexp.SExp):
"""A subclass that makes it easier to read sexps off a socket
The standard sexp parser assumes that it will be given a chunk of
data the exactly contains an sexp. It will either parse it or
raise an exception. It will also hide any data left over after
the sexp is parsed. None of this is very helpful when you're
reading data coming in off a socket, because there's no guarantee
that any particular recv call will contain exactly a complete
sexp.
It would probably be most helpful to have a parser that can be
called with a partial sexp and then called again with more input.
That's a little harder so I'm punting on it for now.
"""
def _parse_canonical(self, canon):
spkilib.sexp.SExp._parse_canonical(self, canon)
self.__unparsed = canon[self._consumed + 1:]
def getUnparsedData(self):
return self.__unparsed
def parseSexp(buf):
"""Returns an Sexp object and a string of leftover data
Tries to parse the buffer. If it doesn't contain a full sexp, it
will return None, buf.
"""
try:
s = ExtractableSexp(buf)
except spkilib.sexp.ParseError:
return None, buf
else:
return s, s.getUnparsedData()
| {
"repo_name": "danieljohnlewis/pisces",
"path": "pisces/spkilib/sexpsocket.py",
"copies": "1",
"size": "2794",
"license": "mit",
"hash": -4869357104469675000,
"line_mean": 28.1041666667,
"line_max": 69,
"alpha_frac": 0.6202576951,
"autogenerated": false,
"ratio": 3.7203728362183757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48406305313183756,
"avg_score": null,
"num_lines": null
} |
"""A Socket subclass that adds some serialization methods."""
import zlib
import pickle
import numpy
import zmq
class SerializingSocket(zmq.Socket):
"""A class with some extra serialization methods
send_zipped_pickle is just like send_pyobj, but uses
zlib to compress the stream before sending.
send_array sends numpy arrays with metadata necessary
for reconstructing the array on the other side (dtype,shape).
"""
def send_zipped_pickle(self, obj, flags=0, protocol=-1):
"""pack and compress an object with pickle and zlib."""
pobj = pickle.dumps(obj, protocol)
zobj = zlib.compress(pobj)
print('zipped pickle is %i bytes' % len(zobj))
return self.send(zobj, flags=flags)
def recv_zipped_pickle(self, flags=0):
"""reconstruct a Python object sent with zipped_pickle"""
zobj = self.recv(flags)
pobj = zlib.decompress(zobj)
return pickle.loads(pobj)
def send_array(self, A, flags=0, copy=True, track=False):
"""send a numpy array with metadata"""
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
self.send_json(md, flags|zmq.SNDMORE)
return self.send(A, flags, copy=copy, track=track)
def recv_array(self, flags=0, copy=True, track=False):
"""recv a numpy array"""
md = self.recv_json(flags=flags)
msg = self.recv(flags=flags, copy=copy, track=track)
A = numpy.frombuffer(msg, dtype=md['dtype'])
return A.reshape(md['shape'])
class SerializingContext(zmq.Context):
_socket_class = SerializingSocket
def main():
ctx = SerializingContext()
req = ctx.socket(zmq.REQ)
rep = ctx.socket(zmq.REP)
rep.bind('inproc://a')
req.connect('inproc://a')
A = numpy.ones((1024,1024))
print ("Array is %i bytes" % (A.size * A.itemsize))
# send/recv with pickle+zip
req.send_zipped_pickle(A)
B = rep.recv_zipped_pickle()
# now try non-copying version
rep.send_array(A, copy=False)
C = req.recv_array(copy=False)
print ("Checking zipped pickle...")
print ("Okay" if (A==B).all() else "Failed")
print ("Checking send_array...")
print ("Okay" if (C==B).all() else "Failed")
if __name__ == '__main__':
main()
| {
"repo_name": "Mustard-Systems-Ltd/pyzmq",
"path": "examples/serialization/serialsocket.py",
"copies": "6",
"size": "2319",
"license": "bsd-3-clause",
"hash": -4013049703060465700,
"line_mean": 30.3378378378,
"line_max": 65,
"alpha_frac": 0.6188012074,
"autogenerated": false,
"ratio": 3.5458715596330275,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02304504625772627,
"num_lines": 74
} |
# a socks5 proxy with rule-based forwarding
import logging
import sys
import os
import re
from urlparse import urlparse
from sys import platform as _platform
if _platform == "linux" or _platform == "linux2":
os.environ['GEVENT_RESOLVER'] = "ares"
from smart_relay import SmartRelayFactory, RESocksMatcher, ForwardDestination
from server import SocksServer
def usage(f):
print >> f, """
Usage: python smartproxy.py localip localport
"""
def main():
if len(sys.argv) < 3:
usage(f=sys.stderr)
sys.exit(-1)
logging.basicConfig(
format='[%(asctime)s][%(name)s][%(levelname)s] - %(message)s',
datefmt='%Y-%d-%m %H:%M:%S',
level=logging.DEBUG,
)
localip = sys.argv[1]
localport = int(sys.argv[2])
dst = ForwardDestination("socks5", urlparse('socks5://127.0.0.1:1080/'))
rules = {
(re.compile(r'.*\.whereisip\.net$'), re.compile(r'.*'), re.compile(r'.*')): dst,
(re.compile(r'.*\.google\.com$'), re.compile(r'.*'), re.compile(r'.*')): dst,
}
matcher = RESocksMatcher(rules)
relay = SmartRelayFactory(matcher)
socks = SocksServer(localip, localport, relay)
socks.run()
if __name__ == '__main__':
main()
| {
"repo_name": "Jonavin/firefly-proxy",
"path": "gsocks/smartproxy.py",
"copies": "6",
"size": "1251",
"license": "bsd-2-clause",
"hash": -3410202184097716000,
"line_mean": 26.2173913043,
"line_max": 88,
"alpha_frac": 0.6059152678,
"autogenerated": false,
"ratio": 3.3449197860962565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.029787355638856986,
"num_lines": 46
} |
# a socks proxy forwarding to a remote socks through meek (i.e., HTTP transport)
import logging
import os
import sys
from sys import platform as _platform
if _platform == "linux" or _platform == "linux2":
os.environ['GEVENT_RESOLVER'] = "ares"
from server import SocksServer
from relay import MeekRelayFactory, Relay
def usage(f):
print >> f, """
Usage: python meeksocks-client.py localip localport relays
Format of relays:
url1,host1,property1:property2|url2,host2,|...
Example:
https://www.google.com/,firefly-relay-1.appspot.com,stream
"""
def main():
if len(sys.argv) < 4:
usage(f = sys.stderr)
sys.exit(-1)
localip = sys.argv[1]
localport = int(sys.argv[2])
relays = []
for entry in sys.argv[3].split("|"):
fronturl, hostname, properties = entry.split(",")
properties = properties.split(":")
relays.append(Relay(
fronturl=fronturl, hostname=hostname, properties=properties, failure=0))
logging.basicConfig(
format='[%(asctime)s][%(name)s][%(levelname)s] - %(message)s',
datefmt='%Y-%d-%m %H:%M:%S',
level=logging.DEBUG,
)
meekfactory = MeekRelayFactory(relays)
socks = SocksServer(localip, localport, meekfactory)
socks.run()
if __name__ == '__main__':
main()
| {
"repo_name": "yinghuocho/meeksocks-py",
"path": "client.py",
"copies": "6",
"size": "1354",
"license": "bsd-2-clause",
"hash": -2168233039744343800,
"line_mean": 27.2083333333,
"line_max": 84,
"alpha_frac": 0.6196454948,
"autogenerated": false,
"ratio": 3.359801488833747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6979446983633747,
"avg_score": null,
"num_lines": null
} |
#As of 09-14-2016, twitch API now requires Client-ID with all requests. You will need to create one from here:
# https://www.twitch.tv/settings/connections
# complete twitch.py rewrite coming soon™
import requests
import sopel
import re
from sopel.tools import SopelMemory
import datetime
# TODO: Make these config options c:
twitchclientid = "YOUR_API_KEY"
announce_chan = "#pony.ql"
logchannel = "#willie-testing" #used for live logging certain issues that plague this module
streamers = [
"qlrankstv",
"phixxion",
"hc_mikle",
"beatheadstv",
"ijustwantagf",
"phgp_tv",
"zlive",
"cooller",
"h3h3productions",
"cypheronline",
"quakecon",
"dreamhackql",
"k1llsen_",
"runterfallnoob",
"flyfunnyb",
"strenx_",
"gamesdonequick",
"mikletv",
"freddurst",
"carkasjak",
"blizzard",
"scglive",
"linustech",
"quakecon",
"ryuquezacotl",
"lofihiphop",
"shane_rapha",
"awesomehardware",
"deadmau5",
"ridelore",
"luke_lafr",
"quakechampions",
"warcraft",
"iverass",
"esamarathon"
]
hstreamers = [
'mister_placeholder_3000'
]
#end config
twitchregex = re.compile('(?!.*\/v\/).*https?:\/\/(?:www\.)?twitch.tv\/(.*?)\/?(?:(?=[\s])|$)')
mixerregex = re.compile('(?!.*\/v\/).*https?:\/\/(?:www\.)?mixer.com\/(.*?)\/?(?:(?=[\s])|$)')
def setup(bot):
if not bot.memory.contains('url_callbacks'):
bot.memory['url_callbacks'] = SopelMemory()
bot.memory['url_callbacks'][twitchregex] = twitchirc
bot.memory['url_callbacks'][mixerregex] = mixerirc
def shutdown(bot):
del bot.memory['url_callbacks'][twitchregex]
del bot.memory['url_callbacks'][mixerregex]
currently_streaming = {}
currently_hstreaming = {}
currently_ystreaming = {}
@sopel.module.interval(20)
def monitor_streamers(bot):
streaming_names = []
try:
streaming = requests.get('https://api.twitch.tv/kraken/streams', params={"channel": ",".join(streamers)}, headers={"Client-ID":twitchclientid}).json()
except Exception as e:
return print("There was an error reading twitch API {0}".format(e))
results = []
if streaming.get("streams"):
for streamer in streaming["streams"]:
streamer_name = streamer["channel"]["name"]
streamer_game = streamer["channel"]["game"]
streamer_url = streamer["channel"]["url"]
streamer_status = streamer["channel"]["status"]
streamer_starttime = datetime.datetime.strptime(streamer['created_at'], '%Y-%m-%dT%H:%M:%SZ')
streamer_viewers = streamer["viewers"]
if streamer_name not in currently_streaming:
currently_streaming[streamer_name] = streamer_game, {'cooldown': 0, 'starttime': streamer_starttime}
results.append("%s just went live playing %s! %s - %s" % (streamer_name,streamer_game,streamer_status,streamer_url))
streaming_names.append(streamer_name)
if results:
bot.msg(announce_chan, ", ".join(results))
# Remove people who stopped streaming
for streamer in list(currently_streaming):
if streamer not in streaming_names:
currently_streaming[streamer][1]['cooldown'] += 10
if currently_streaming[streamer][1]['cooldown'] > 130:
bot.msg(logchannel,'{0} was removed from currently_streaming for reaching a cooldown of {1}'.format(streamer,currently_streaming[streamer][1]['cooldown']))
del currently_streaming[streamer]
hstreaming_names = []
hs = ",".join(hstreamers)
try:
testingtimeout = datetime.datetime.now()
hstreaming = requests.get('http://api.smashcast.tv/media/live/{0}'.format(hs),timeout=(1.5,1.5)).json()
except requests.exceptions.ConnectionError:
return bot.msg(logchannel,"timeout time: {}".format((datetime.datetime.now() - testingtimeout).total_seconds()))
except:
return bot.msg(logchannel,"error with smashcast api")
hresults = []
if hstreaming.get("livestream"):
for hstreamer in hstreaming["livestream"]:
if hstreamer["media_is_live"] is "1":
hstreamer_name = hstreamer["media_user_name"]
hstreamer_game = hstreamer["category_name"]
hstreamer_url = hstreamer["channel"]["channel_link"]
hstreamer_viewers = hstreamer["media_views"]
if hstreamer_name not in currently_hstreaming:
currently_hstreaming[hstreamer_name] = hstreamer_game, {'cooldown': 0}
hresults.append("%s just went live playing %s! %s - %s viewer%s" % (hstreamer_name,hstreamer_game,hstreamer_url,hstreamer_viewers,"s" if hstreamer_viewers != 1 else ""))
hstreaming_names.append(hstreamer_name)
if hresults:
bot.msg(announce_chan, ", ".join(hresults))
for hstreamer in list(currently_hstreaming):
if hstreamer not in hstreaming_names:
currently_hstreaming[hstreamer][1]['cooldown'] += 10
if currently_hstreaming[hstreamer][1]['cooldown'] > 130:
del currently_hstreaming[hstreamer]
@sopel.module.commands('twitchtv','twitch')
@sopel.module.example('.twitchtv or .twitch username')
def streamer_status(bot, trigger):
streamer_name = trigger.group(2)
query = streamers if streamer_name is None else streamer_name.split(" ")
streaming = requests.get('https://api.twitch.tv/kraken/streams', params={"channel": ",".join(query)}, headers={"Client-ID":twitchclientid}).json()
results = []
if streaming.get("streams"):
for streamer in streaming["streams"]:
streamer_name = streamer["channel"]["name"]
streamer_game = streamer["channel"]["game"]
streamer_url = streamer["channel"]["url"]
streamer_status = streamer["channel"]["status"]
streamer_viewers = streamer["viewers"]
results.append("%s is playing %s %s - %s - %s viewer%s" % (streamer_name,
streamer_game,
streamer_url,
streamer_status,
streamer_viewers,
"s" if streamer_viewers != 1 else "" ))
if results:
bot.say(", ".join(results))
else:
bot.say("Nobody is currently streaming.")
@sopel.module.commands('sc','smashcast')
@sopel.module.example('.sc or .sc username')
def hstreamer_status(bot, trigger):
hstreamer_name = trigger.group(2)
query = ",".join(hstreamers) if hstreamer_name is None else hstreamer_name
hstreaming = requests.get('http://api.smashcast.tv/media/live/{0}'.format(query)).json()
hresults = []
for hstreamer in hstreaming["livestream"]:
if hstreamer["media_is_live"] is "1":
hstreamer_name = hstreamer["media_user_name"]
hstreamer_game = hstreamer["category_name"]
hstreamer_url = hstreamer["channel"]["channel_link"]
hstreamer_viewers = hstreamer["media_views"]
hresults.append("%s is playing %s %s - %s viewer%s" % (hstreamer_name,
hstreamer_game,
hstreamer_url,
hstreamer_viewers,
"s" if hstreamer_viewers != 1 else "" ))
if hresults:
bot.say(", ".join(hresults))
else:
bot.say("Nobody is currently streaming.")
@sopel.module.commands('tv')
@sopel.module.example('.tv')
def allstreamer_status(bot, trigger):
streamer_name = trigger.group(2)
query = streamers if streamer_name is None else streamer_name.split(" ")
streaming = requests.get('https://api.twitch.tv/kraken/streams', params={"channel": ",".join(query)}, headers={"Client-ID":twitchclientid}).json()
results = []
if streaming.get("streams"):
for streamer in streaming["streams"]:
streamer_name = streamer["channel"]["name"]
streamer_game = streamer["channel"]["game"]
streamer_url = streamer["channel"]["url"]
streamer_viewers = streamer["viewers"]
results.append("%s is playing %s %s - %s viewer%s" % (streamer_name,
streamer_game,
streamer_url,
streamer_viewers,
"s" if streamer_viewers != 1 else "" ))
query = ",".join(hstreamers)
hstreaming = requests.get('http://api.smashcast.tv/media/live/{0}'.format(query)).json()
hresults = []
if hstreaming.get("livestream"):
for hstreamer in hstreaming["livestream"]:
if hstreamer["media_is_live"] is "1":
hstreamer_name = hstreamer["media_user_name"]
hstreamer_game = hstreamer["category_name"]
hstreamer_url = hstreamer["channel"]["channel_link"]
hstreamer_viewers = hstreamer["media_views"]
results.append("%s is playing %s %s - %s viewer%s" % (hstreamer_name,
hstreamer_game,
hstreamer_url,
hstreamer_viewers,
"s" if hstreamer_viewers != 1 else "" ))
if results:
bot.say(", ".join(results), max_messages=3)
else:
bot.say("Nobody is currently streaming.")
@sopel.module.rule('(?!.*\/v\/).*https?:\/\/(?:www\.)?twitch.tv\/(.*?)\/?(?:(?=[\s])|$)')
def twitchirc(bot, trigger, match = None):
match = match or trigger
streamer_name = match.group(1)
query = streamers if streamer_name is None else streamer_name.split(" ")
streaming = requests.get('https://api.twitch.tv/kraken/streams', params={"channel": ",".join(query)}, headers={"Client-ID":twitchclientid}).json()
results = []
if streaming.get("streams"):
for streamer in streaming["streams"]:
streamer_name = streamer["channel"]["name"]
streamer_game = streamer["channel"]["game"]
streamer_status = streamer["channel"]["status"]
streamer_viewers = streamer["viewers"]
results.append("%s is playing %s [%s] - %s viewer%s" % (streamer_name,
streamer_game,
streamer_status,
streamer_viewers,
"s" if streamer_viewers != 1 else "" ))
if results:
bot.say(", ".join(results))
else:
pass
#bot.say("Nobody is currently streaming.")
@sopel.module.rule('(?!.*\/v\/).*https?:\/\/(?:www\.)?mixer.com\/(.*?)\/?(?:(?=[\s])|$)')
def mixerirc(bot, trigger, match = None):
match = match or trigger
streamer_name = match.group(1)
streaming = requests.get('https://mixer.com/api/v1/channels/{}'.format(streamer_name)).json()
results = []
if streaming:
streamer_name = streaming["token"]
if streaming.get("type"):
streamer_game = streaming["type"]["name"]
else:
streamer_game = "a game"
streamer_status = streaming["name"]
streamer_viewers = streaming["viewersCurrent"]
results.append("%s is playing %s [%s] - %s viewer%s" % (streamer_name,
streamer_game,
streamer_status,
streamer_viewers,
"s" if streamer_viewers != 1 else "" ))
if results:
bot.say(", ".join(results))
else:
pass
@sopel.module.commands('debugtv')
def debug(bot, trigger):
bot.msg(logchannel,"currently_streaming: {}".format(", ".join(currently_streaming)))
bot.msg(logchannel,"currently_hstreaming: {}".format(", ".join(currently_hstreaming)))
| {
"repo_name": "ridelore/sopel-modules",
"path": "twitch.py",
"copies": "2",
"size": "11859",
"license": "apache-2.0",
"hash": -4951539943785539000,
"line_mean": 40.3135888502,
"line_max": 179,
"alpha_frac": 0.5800792781,
"autogenerated": false,
"ratio": 3.6404666871354006,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.52205459652354,
"avg_score": null,
"num_lines": null
} |
# A solution to the British Informatics Olympiad 2011 Question 2
# Scores 24/24
from __future__ import print_function
while True:
try:
input = raw_input
except:
pass
from collections import deque
suits = ['C', 'H', 'S', 'D']
values = ['A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K']
def make_deck(suits, values):
for s in suits:
for v in values:
yield (s, v)
def nice_card(card):
s, v = card
return (v + s)
def shuffle(deck, shuffle_numbers):
assert len(shuffle_numbers) == 6
current_number = 0
while len(deck) != 0:
for j in range(1, shuffle_numbers[current_number]):
deck.append(deck.popleft())
yield deck.popleft()
current_number += 1
current_number %= 6
def make_row(shuffled_deck):
for i in shuffled_deck:
yield [i]
def can_sit_on(a, b):
return a[0][0] == b[0][0] or a[0][1] == b[0][1]
def legal_moves(row):
n = 0
for i in range(len(row) - 1, -1, -1):
# see if there are any valid moves
if i < 1:
# can't move
continue
if can_sit_on(row[i], row[i-1]):
n += 1
if i < 3:
continue
if can_sit_on(row[i], row[i-3]):
n += 1
return n
def score(row, fr, to):
row[to] = row.pop(fr) + row[to]
return legal_moves(row)
def strategy_1(row):
status = True
while status:
status = _strategy_1(row)
print(len(row), nice_card(row[0][0]))
def _strategy_1(row):
for i in range(len(row) - 1, -1, -1):
# see if there are any valid moves
if i < 1:
# can't move
continue
if can_sit_on(row[i], row[i-1]):
row[i-1] = row.pop(i) + row[i-1]
return True
if i < 3:
continue
if can_sit_on(row[i], row[i-3]):
row[i-3] = row.pop(i) + row[i-3]
return True
return False
def strategy_2(row):
status = True
while status:
status = _strategy_2(row)
print(len(row), nice_card(row[0][0]))
def _strategy_2(row):
max_size = -1
fr = None
to = None
for i in range(len(row) - 1, -1, -1):
# see if there are any valid moves
if i < 1:
# can't move
continue
if can_sit_on(row[i], row[i-1]):
size = len(row[i]) + len(row[i-1])
if size > max_size:
fr = i
to = i - 1
max_size = size
if i < 3:
continue
if can_sit_on(row[i], row[i-3]):
size = len(row[i]) + len(row[i-3])
if size > max_size:
fr = i
to = i - 3
max_size = size
if max_size != -1:
row[to] = row.pop(fr) + row[to]
return True
return False
def strategy_3(row):
status = True
while status:
status = _strategy_3(row)
print(len(row), nice_card(row[0][0]))
def _strategy_3(row):
best_score = -1
fr = None
to = None
for i in range(len(row) - 1, -1, -1):
# see if there are any valid moves
if i < 1:
# can't move
continue
if can_sit_on(row[i], row[i-1]):
s = score(list(row), i, i - 1)
if s > best_score:
fr = i
to = i - 1
best_score = s
if i < 3:
continue
if can_sit_on(row[i], row[i-3]):
s = score(list(row), i, i - 3)
if s > best_score:
fr = i
to = i - 3
best_score = s
if best_score != -1:
row[to] = row.pop(fr) + row[to]
return True
return False
shuffle_numbers = tuple(int(x) for x in input().split())
deck = list(shuffle(deque(make_deck(suits, values)), shuffle_numbers))
print(nice_card(deck[0]), nice_card(deck[-1]))
row = list(make_row(deck))
# play strategy 1
strategy_1(list(row))
# play strategy 2
strategy_2(list(row))
# play strategy 3
strategy_3(list(row))
| {
"repo_name": "matthewelse/british-informatics-olympiad",
"path": "2011/q2-accordionpatience.py",
"copies": "1",
"size": "4646",
"license": "mit",
"hash": -418890924998253630,
"line_mean": 23.0725388601,
"line_max": 78,
"alpha_frac": 0.4246663797,
"autogenerated": false,
"ratio": 3.5959752321981426,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9517249063168807,
"avg_score": 0.0006785097458672587,
"num_lines": 193
} |
# A solution to the British Informatics Olympiad 2011 Question 3
# Scores 24/24
from __future__ import print_function
try:
input = raw_input
except:
pass
def number_with_n_digits(n):
return 9**(n//2)
def nth_with_n_digits(number_of_digits, n):
if number_of_digits == 0:
return ""
if number_of_digits % 2 == 1:
even = nth_with_n_digits(number_of_digits - 1, n)
middle = len(even)//2
return even[:middle] + "5" + even[middle:]
# 9**(n / 2)
output = ""
# it needs to start at 0
n -= 1
for i in range(number_of_digits // 2):
n, inner = divmod(n, 9)
inner += 1
#n += 1
output = str(inner) + output + str(10-inner)
return output
nth = int(input())
#nth = 11
sum_so_far = 0
number_of_digits = 0
while sum_so_far < nth:
number_of_digits += 1
sum_so_far += number_with_n_digits(number_of_digits)
#print("The number has %i digits!" % number_of_digits)
#print(nth - sum_so_far + number_with_n_digits(number_of_digits))
print(nth_with_n_digits(number_of_digits, nth - sum_so_far + number_with_n_digits(number_of_digits)))
| {
"repo_name": "matthewelse/british-informatics-olympiad",
"path": "2011/q3-upsidedown.py",
"copies": "1",
"size": "1050",
"license": "mit",
"hash": -6917340826105736000,
"line_mean": 22.3333333333,
"line_max": 101,
"alpha_frac": 0.6514285714,
"autogenerated": false,
"ratio": 2.5609756097560976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37124041811560976,
"avg_score": null,
"num_lines": null
} |
# A solution to the British Informatics Olympiad 2012 Question 2
# Scores 19/23 Marks
class Station(object):
def __init__(self, name, up, left, right, lazy=True):
self.name = name
self.up = up
self.left = left
self.right = right
assert isinstance(lazy, bool), "Lazy must be boolean"
self.lazy = lazy
self.downto = self.left
def enter_left(self):
if self.lazy:
self.downto = self.left
else:
if self.downto == self.left:
self.downto = self.right
else:
self.downto = self.left
return self.up
def enter_right(self):
if self.lazy:
self.downto = self.right
else:
if self.downto == self.left:
self.downto = self.right
else:
self.downto = self.left
return self.up
def enter_front(self):
return self.downto
def __str__(self):
return self.name
stations = {
'A': Station("A", "D", "E", "F"),
'B': Station("B", "C", "G", "H"),
'C': Station("C", "B", "I", "J"),
'D': Station("D", "A", "K", "L"),
'E': Station("E", "A", "M", "N"),
'F': Station("F", "A", "N", "O"),
'G': Station("G", "B", "O", "P"),
'H': Station("H", "B", "P", "Q"),
'I': Station("I", "C", "Q", "R"),
'J': Station("J", "C", "R", "S"),
'K': Station("K", "D", "S", "T"),
'L': Station("L", "D", "T", "M"),
'M': Station("M", "U", "L", "E"),
'N': Station("N", "U", "E", "F"),
'O': Station("O", "V", "F", "G"),
'P': Station("P", "V", "G", "H"),
'Q': Station("Q", "W", "H", "I"),
'R': Station("R", "W", "I", "J"),
'S': Station("S", "X", "J", "K"),
'T': Station("T", "X", "K", "L"),
'U': Station("U", "V", "M", "N"),
'V': Station("V", "U", "O", "P"),
'W': Station("W", "X", "Q", "R"),
'X': Station("X", "W", "S", "T")
}
flipflop = str(raw_input())
for c in flipflop:
stations[c].lazy = False
del flipflop
direction = str(raw_input())
current = direction[0]
_next = direction[1]
iterations = int(raw_input())
for i in xrange(iterations):
# Work out where we are entering _next...
if stations[_next].left == str(current):
current = _next
_next = stations[current].enter_left()
elif stations[_next].right == str(current):
current = _next
_next = stations[current].enter_right()
else:
current = _next
_next = stations[current].enter_front()
print current
print _next
| {
"repo_name": "matthewelse/british-informatics-olympiad",
"path": "2012/q2-ontherighttrack.py",
"copies": "1",
"size": "2674",
"license": "mit",
"hash": 8159926709352365000,
"line_mean": 27.1473684211,
"line_max": 64,
"alpha_frac": 0.4569932685,
"autogenerated": false,
"ratio": 2.9910514541387023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8933469823853277,
"avg_score": 0.0029149797570850204,
"num_lines": 95
} |
# A solution to the problem.txt in Python with test cases :)
# To try this on your own simply delete the contents of string_compression and fill it in yourself
# Enter any assumptions you're making about about the problem or any notes you want taken into account here
# - Currently this method assumes that you will get a string of letters (no spaces or numbers or other
# characters) as input
# This method requires a string. It returns a compressed string version of the input following the
# spec of the problem in problem.txt
def string_compression(string):
# If the string is length 0 then we can return right away
if len(string) is 0:
return ""
# We start off having seen at least one letter, as we know the string is not of length 0
letter_count = 1
previous_letter = string[0]
compressed_string = ""
# For every letter after the first (which we've already seen)...
for letter in string[1:]:
# ...if it's the same letter then just increment the count of that letter...
if letter == previous_letter:
letter_count+=1
# ...otherwise store the previous letter plus the count on the compressed string and start keeping track
# of a new letter
else:
compressed_string = compressed_string + previous_letter + str(letter_count)
letter_count = 1
previous_letter = letter
# We need to append the last letter and it's count before we return
compressed_string = compressed_string + previous_letter + str(letter_count)
return compressed_string
def run_single_test(test_number, input, expected):
output = string_compression(input)
if output == expected:
print("Test " + str(test_number) + " PASSED")
else:
print("Test " + str(test_number) + " FAILED: expected " + expected + " but got " + output)
def run_tests():
# Test 1: simple string
run_single_test(1, "aaaabbb", "a4b3")
# Test 2: empty string
run_single_test(2, "", "")
# Test 3: single character
run_single_test(3, "a", "a1")
# Test 4: multiple single characters
run_single_test(4, "abcdefg", "a1b1c1d1e1f1g1")
# Test 5: a more complex string
run_single_test(5, "aaaaaaaabcdddeeeeefeeeeeefeeeeeggggggabc", "a8b1c1d3e5f1e6f1e5g6a1b1c1")
# Finally run the tests to see how we did
run_tests() | {
"repo_name": "riggspc/exercises",
"path": "strings/string_compression/solution.py",
"copies": "1",
"size": "2362",
"license": "mit",
"hash": 9042891775661324000,
"line_mean": 37.737704918,
"line_max": 112,
"alpha_frac": 0.6769686706,
"autogenerated": false,
"ratio": 3.8096774193548386,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49866460899548387,
"avg_score": null,
"num_lines": null
} |
"""A solver that alternates between deductive and hypothetical phases:
Deductive phase:
* Generate a list of all possible lines for each row and column.
* For each list, mark or unmark any cell that is in common between all of
the possible lines.
* Iterate to fixity.
Hypothetical phase:
* Find the cell with the fewest possible row and possible column lines.
* Explore each possible value in turn.
"""
import rules.nonogram as rules
# TODO ggould figure out why pycharm dislikes doing these as local imports.
from solver.solver_utils import all_legal_lines
from solver.solver_coroutine import SolverCoroutine, SolutionNotFound
class BackwardChainSolver(SolverCoroutine):
"""A solver (see solver_coroutine.py for API details) that uses
alternating deductive and recursive phases."""
def __init__(self, puzzle, initial_solution=None):
super(BackwardChainSolver, self).__init__(puzzle, initial_solution)
self.partial_solution = None
self.partial_solution_legal_rows = None
self.partial_solution_legal_cols = None
self.update_partials(self.initial_solution.clone())
def update_partials(self, new_partial):
"""Update the solver with a new partial solution, and causes
regeneration of the cached legal rows/columns."""
self.partial_solution = new_partial
self.partial_solution_legal_rows = [
list(all_legal_lines(self.puzzle.row_run_counts[y],
self.partial_solution.row(y)))
for y in range(self.puzzle.height)]
self.partial_solution_legal_cols = [
list(all_legal_lines(self.puzzle.col_run_counts[x],
self.partial_solution.column(x)))
for x in range(self.puzzle.width)]
def deduce(self):
"""Change UNKNOWN cells to MARKED or UNMARKED in self.partial_solution
where that can be inferred from commonalities between the possible
lines. Return True if changes were made, to allow the caller to
iterate this method to fixity."""
new_partial_solution = self.partial_solution.clone()
changed = False
for (x, col) in enumerate(self.partial_solution.columns):
possible_lines = self.partial_solution_legal_cols[x]
for y in range(self.puzzle.height):
if new_partial_solution.cells[x][y] != rules.UNKNOWN:
continue
if len(set(line[y] for line in possible_lines)) == 1:
changed = True
new_partial_solution.cells[x][y] = possible_lines[0][y]
for (y, row) in enumerate(self.partial_solution.rows):
possible_lines = self.partial_solution_legal_rows[y]
for x in range(self.puzzle.width):
if new_partial_solution.cells[x][y] != rules.UNKNOWN:
continue
if len(set(line[x] for line in possible_lines)) == 1:
changed = True
new_partial_solution.cells[x][y] = possible_lines[0][x]
if changed:
self.update_partials(new_partial_solution)
return changed
def solve(self):
"""Yield a partial solution from each iteration of deduction, then
delegate to the solve() coroutines of hypotheses on a chosen cell.
Returns after a complete solution or after it proves that the
initial_solution is impossible."""
yield self.initial_solution
# Iterate deduction to fixity.
while self.deduce():
if not self.partial_solution.correct():
raise SolutionNotFound("Deduction forced a contradiction")
if any(len(rows) == 0
for rows in self.partial_solution_legal_rows):
# Deduction created an impossible row.
raise SolutionNotFound("Deduction created an impossible row")
if any(len(cols) == 0
for cols in self.partial_solution_legal_cols):
# Deduction created an impossible column.
raise SolutionNotFound(
"Deduction created an impossible column")
yield self.partial_solution
# Identify a cell to hypothesize about.
unknowns = self.partial_solution.unknown_cell_coordinates()
if not unknowns:
# Deduction produced a complete solution; we win.
return
# Sort unknowns to prefer cases where hypotheses are likely to
# generate cascading inferences.
_, speculation_coords = min(
((len(self.partial_solution_legal_rows[y]) +
len(self.partial_solution_legal_cols[x])),
(x, y))
for (x, y) in unknowns)
# Hypothesize a cell value; delegate to a new solver for that
# hypothesis.
hypothetical_solvers = []
# TODO ggould Trying unmarking first on the hunch that unmarks can
# sometimes get big splitting leverage. This is a half-baked idea;
# needs any theoretical or even empirical justification.
for fn in (rules.NonogramSolution.unmark,
rules.NonogramSolution.mark):
solver = BackwardChainSolver(
self.puzzle, initial_solution=self.partial_solution.clone())
partial = solver.partial_solution.clone()
fn(partial, speculation_coords)
solver.update_partials(partial)
hypothetical_solvers.append(solver)
# TODO ggould Can we sort these solvers sensibly?
# TODO ggould Is there a way around Global Interpreter Locking to
# get multicore leverage on this?
for solver in hypothetical_solvers:
try:
yield from solver.solve()
# Victory! This hypothesis found a correct solution.
return
except SolutionNotFound as _:
pass # Ignore this and move on to the next.
raise SolutionNotFound("All hypotheses at %s failed",
speculation_coords)
| {
"repo_name": "ggould256/nonogram",
"path": "src/solver/backward_chain_solver.py",
"copies": "1",
"size": "6110",
"license": "apache-2.0",
"hash": 6569717767358210000,
"line_mean": 44.9398496241,
"line_max": 78,
"alpha_frac": 0.6220949264,
"autogenerated": false,
"ratio": 4.3241330502477,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 133
} |
"A sort of application framework for the Mac"
DEBUG=0
from warnings import warnpy3k
warnpy3k("In 3.x, the FrameWork module is removed.", stacklevel=2)
import MacOS
import traceback
from Carbon.AE import *
from Carbon.AppleEvents import *
from Carbon.Ctl import *
from Carbon.Controls import *
from Carbon.Dlg import *
from Carbon.Dialogs import *
from Carbon.Evt import *
from Carbon.Events import *
from Carbon.Help import *
from Carbon.Menu import *
from Carbon.Menus import *
from Carbon.Qd import *
from Carbon.QuickDraw import *
#from Carbon.Res import *
#from Carbon.Resources import *
#from Carbon.Snd import *
#from Carbon.Sound import *
from Carbon.Win import *
from Carbon.Windows import *
import types
import EasyDialogs
try:
MyFrontWindow = FrontNonFloatingWindow
except NameError:
MyFrontWindow = FrontWindow
kHighLevelEvent = 23 # Don't know what header file this should come from
SCROLLBARWIDTH = 16 # Again, not a clue...
# Trick to forestall a set of SIOUX menus being added to our menubar
SIOUX_APPLEMENU_ID=32000
# Map event 'what' field to strings
eventname = {}
eventname[1] = 'mouseDown'
eventname[2] = 'mouseUp'
eventname[3] = 'keyDown'
eventname[4] = 'keyUp'
eventname[5] = 'autoKey'
eventname[6] = 'updateEvt'
eventname[7] = 'diskEvt'
eventname[8] = 'activateEvt'
eventname[15] = 'osEvt'
eventname[23] = 'kHighLevelEvent'
# Map part codes returned by WhichWindow() to strings
partname = {}
partname[0] = 'inDesk'
partname[1] = 'inMenuBar'
partname[2] = 'inSysWindow'
partname[3] = 'inContent'
partname[4] = 'inDrag'
partname[5] = 'inGrow'
partname[6] = 'inGoAway'
partname[7] = 'inZoomIn'
partname[8] = 'inZoomOut'
#
# The useable portion of the screen
# ## but what happens with multiple screens? jvr
screenbounds = GetQDGlobalsScreenBits().bounds
screenbounds = screenbounds[0]+4, screenbounds[1]+4, \
screenbounds[2]-4, screenbounds[3]-4
next_window_x = 16 # jvr
next_window_y = 44 # jvr
def windowbounds(width, height):
"Return sensible window bounds"
global next_window_x, next_window_y
r, b = next_window_x+width, next_window_y+height
if r > screenbounds[2]:
next_window_x = 16
if b > screenbounds[3]:
next_window_y = 44
l, t = next_window_x, next_window_y
r, b = next_window_x+width, next_window_y+height
next_window_x, next_window_y = next_window_x + 8, next_window_y + 20 # jvr
return l, t, r, b
_watch = None
def setwatchcursor():
global _watch
if _watch is None:
_watch = GetCursor(4).data
SetCursor(_watch)
def setarrowcursor():
SetCursor(GetQDGlobalsArrow())
class Application:
"Application framework -- your application should be a derived class"
def __init__(self, nomenubar=0):
self._doing_asyncevents = 0
self.quitting = 0
self.needmenubarredraw = 0
self._windows = {}
self._helpmenu = None
if nomenubar:
self.menubar = None
else:
self.makemenubar()
def __del__(self):
if self._doing_asyncevents:
self._doing_asyncevents = 0
MacOS.SetEventHandler()
def makemenubar(self):
self.menubar = MenuBar(self)
AppleMenu(self.menubar, self.getabouttext(), self.do_about)
self.makeusermenus()
def makeusermenus(self):
self.filemenu = m = Menu(self.menubar, "File")
self._quititem = MenuItem(m, "Quit", "Q", self._quit)
def gethelpmenu(self):
if self._helpmenu is None:
self._helpmenu = HelpMenu(self.menubar)
return self._helpmenu
def _quit(self, *args):
self.quitting = 1
def cleanup(self):
for w in self._windows.values():
w.do_close()
return self._windows == {}
def appendwindow(self, wid, window):
self._windows[wid] = window
def removewindow(self, wid):
del self._windows[wid]
def getabouttext(self):
return "About %s..." % self.__class__.__name__
def do_about(self, id, item, window, event):
EasyDialogs.Message("Hello, world!" + "\015(%s)" % self.__class__.__name__)
# The main event loop is broken up in several simple steps.
# This is done so you can override each individual part,
# if you have a need to do extra processing independent of the
# event type.
# Normally, however, you'd just define handlers for individual
# events.
schedparams = (0, 0) # By default disable Python's event handling
default_wait = None # By default we wait GetCaretTime in WaitNextEvent
def mainloop(self, mask = everyEvent, wait = None):
self.quitting = 0
if hasattr(MacOS, 'SchedParams'):
saveparams = MacOS.SchedParams(*self.schedparams)
try:
while not self.quitting:
try:
self.do1event(mask, wait)
except (Application, SystemExit):
# Note: the raising of "self" is old-fashioned idiom to
# exit the mainloop. Calling _quit() is better for new
# applications.
break
finally:
if hasattr(MacOS, 'SchedParams'):
MacOS.SchedParams(*saveparams)
def dopendingevents(self, mask = everyEvent):
"""dopendingevents - Handle all pending events"""
while self.do1event(mask, wait=0):
pass
def do1event(self, mask = everyEvent, wait = None):
ok, event = self.getevent(mask, wait)
if IsDialogEvent(event):
if self.do_dialogevent(event):
return
if ok:
self.dispatch(event)
else:
self.idle(event)
def idle(self, event):
pass
def getevent(self, mask = everyEvent, wait = None):
if self.needmenubarredraw:
DrawMenuBar()
self.needmenubarredraw = 0
if wait is None:
wait = self.default_wait
if wait is None:
wait = GetCaretTime()
ok, event = WaitNextEvent(mask, wait)
return ok, event
def dispatch(self, event):
# The following appears to be double work (already done in do1event)
# but we need it for asynchronous event handling
if IsDialogEvent(event):
if self.do_dialogevent(event):
return
(what, message, when, where, modifiers) = event
if eventname.has_key(what):
name = "do_" + eventname[what]
else:
name = "do_%d" % what
try:
handler = getattr(self, name)
except AttributeError:
handler = self.do_unknownevent
handler(event)
def asyncevents(self, onoff):
"""asyncevents - Set asynchronous event handling on or off"""
if MacOS.runtimemodel == 'macho':
raise 'Unsupported in MachoPython'
old = self._doing_asyncevents
if old:
MacOS.SetEventHandler()
MacOS.SchedParams(*self.schedparams)
if onoff:
MacOS.SetEventHandler(self.dispatch)
doint, dummymask, benice, howoften, bgyield = \
self.schedparams
MacOS.SchedParams(doint, everyEvent, benice,
howoften, bgyield)
self._doing_asyncevents = onoff
return old
def do_dialogevent(self, event):
gotone, dlg, item = DialogSelect(event)
if gotone:
window = dlg.GetDialogWindow()
if self._windows.has_key(window):
self._windows[window].do_itemhit(item, event)
else:
print 'Dialog event for unknown dialog'
return 1
return 0
def do_mouseDown(self, event):
(what, message, when, where, modifiers) = event
partcode, wid = FindWindow(where)
#
# Find the correct name.
#
if partname.has_key(partcode):
name = "do_" + partname[partcode]
else:
name = "do_%d" % partcode
if wid is None:
# No window, or a non-python window
try:
handler = getattr(self, name)
except AttributeError:
# Not menubar or something, so assume someone
# else's window
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
return
elif self._windows.has_key(wid):
# It is a window. Hand off to correct window.
window = self._windows[wid]
try:
handler = getattr(window, name)
except AttributeError:
handler = self.do_unknownpartcode
else:
# It is a python-toolbox window, but not ours.
handler = self.do_unknownwindow
handler(partcode, wid, event)
def do_inSysWindow(self, partcode, window, event):
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_inDesk(self, partcode, window, event):
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_inMenuBar(self, partcode, window, event):
if not self.menubar:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
return
(what, message, when, where, modifiers) = event
result = MenuSelect(where)
id = (result>>16) & 0xffff # Hi word
if id >= 0x8000:
id = -65536 + id
item = result & 0xffff # Lo word
self.do_rawmenu(id, item, window, event)
def do_rawmenu(self, id, item, window, event):
try:
self.do_menu(id, item, window, event)
finally:
HiliteMenu(0)
def do_menu(self, id, item, window, event):
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.menubar.dispatch(id, item, window, event)
def do_unknownpartcode(self, partcode, window, event):
(what, message, when, where, modifiers) = event
if DEBUG: print "Mouse down at global:", where
if DEBUG: print "\tUnknown part code:", partcode
if DEBUG: print "\tEvent:", self.printevent(event)
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_unknownwindow(self, partcode, window, event):
if DEBUG: print 'Unknown window:', window
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_keyDown(self, event):
self.do_key(event)
def do_autoKey(self, event):
if not event[-1] & cmdKey:
self.do_key(event)
def do_key(self, event):
(what, message, when, where, modifiers) = event
c = chr(message & charCodeMask)
if self.menubar:
result = MenuEvent(event)
id = (result>>16) & 0xffff # Hi word
item = result & 0xffff # Lo word
if id:
self.do_rawmenu(id, item, None, event)
return
# Otherwise we fall-through
if modifiers & cmdKey:
if c == '.':
raise self
else:
if not self.menubar:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
return
else:
# See whether the front window wants it
w = MyFrontWindow()
if w and self._windows.has_key(w):
window = self._windows[w]
try:
do_char = window.do_char
except AttributeError:
do_char = self.do_char
do_char(c, event)
# else it wasn't for us, sigh...
def do_char(self, c, event):
if DEBUG: print "Character", repr(c)
def do_updateEvt(self, event):
(what, message, when, where, modifiers) = event
wid = WhichWindow(message)
if wid and self._windows.has_key(wid):
window = self._windows[wid]
window.do_rawupdate(wid, event)
else:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_activateEvt(self, event):
(what, message, when, where, modifiers) = event
wid = WhichWindow(message)
if wid and self._windows.has_key(wid):
window = self._windows[wid]
window.do_activate(modifiers & 1, event)
else:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_osEvt(self, event):
(what, message, when, where, modifiers) = event
which = (message >> 24) & 0xff
if which == 1: # suspend/resume
self.do_suspendresume(event)
else:
if DEBUG:
print 'unknown osEvt:',
self.printevent(event)
def do_suspendresume(self, event):
(what, message, when, where, modifiers) = event
wid = MyFrontWindow()
if wid and self._windows.has_key(wid):
window = self._windows[wid]
window.do_activate(message & 1, event)
def do_kHighLevelEvent(self, event):
(what, message, when, where, modifiers) = event
if DEBUG:
print "High Level Event:",
self.printevent(event)
try:
AEProcessAppleEvent(event)
except:
pass
#print "AEProcessAppleEvent error:"
#traceback.print_exc()
def do_unknownevent(self, event):
if DEBUG:
print "Unhandled event:",
self.printevent(event)
def printevent(self, event):
(what, message, when, where, modifiers) = event
nicewhat = repr(what)
if eventname.has_key(what):
nicewhat = eventname[what]
print nicewhat,
if what == kHighLevelEvent:
h, v = where
print repr(ostypecode(message)), hex(when), repr(ostypecode(h | (v<<16))),
else:
print hex(message), hex(when), where,
print hex(modifiers)
class MenuBar:
"""Represent a set of menus in a menu bar.
Interface:
- (constructor)
- (destructor)
- addmenu
- addpopup (normally used internally)
- dispatch (called from Application)
"""
nextid = 1 # Necessarily a class variable
def getnextid(self):
id = MenuBar.nextid
MenuBar.nextid = id+1
return id
def __init__(self, parent=None):
self.parent = parent
ClearMenuBar()
self.bar = GetMenuBar()
self.menus = {}
# XXX necessary?
def close(self):
self.parent = None
self.bar = None
self.menus = None
def addmenu(self, title, after = 0, id=None):
if id is None:
id = self.getnextid()
if DEBUG: print 'Newmenu', title, id # XXXX
m = NewMenu(id, title)
m.InsertMenu(after)
if after >= 0:
if self.parent:
self.parent.needmenubarredraw = 1
else:
DrawMenuBar()
return id, m
def delmenu(self, id):
if DEBUG: print 'Delmenu', id # XXXX
DeleteMenu(id)
def addpopup(self, title = ''):
return self.addmenu(title, -1)
# Useless:
# def install(self):
# if not self.bar: return
# SetMenuBar(self.bar)
# if self.parent:
# self.parent.needmenubarredraw = 1
# else:
# DrawMenuBar()
def fixmenudimstate(self):
for m in self.menus.keys():
menu = self.menus[m]
if menu.__class__ == FrameWork.AppleMenu:
continue
for i in range(len(menu.items)):
label, shortcut, callback, kind = menu.items[i]
if type(callback) == types.StringType:
wid = MyFrontWindow()
if wid and self.parent._windows.has_key(wid):
window = self.parent._windows[wid]
if hasattr(window, "domenu_" + callback):
menu.menu.EnableMenuItem(i + 1)
elif hasattr(self.parent, "domenu_" + callback):
menu.menu.EnableMenuItem(i + 1)
else:
menu.menu.DisableMenuItem(i + 1)
elif hasattr(self.parent, "domenu_" + callback):
menu.menu.EnableMenuItem(i + 1)
else:
menu.menu.DisableMenuItem(i + 1)
elif callback:
pass
def dispatch(self, id, item, window, event):
if self.menus.has_key(id):
self.menus[id].dispatch(id, item, window, event)
else:
if DEBUG: print "MenuBar.dispatch(%d, %d, %s, %s)" % \
(id, item, window, event)
# XXX Need a way to get menus as resources and bind them to callbacks
class Menu:
"One menu."
def __init__(self, bar, title, after=0, id=None):
self.bar = bar
self.id, self.menu = self.bar.addmenu(title, after, id)
bar.menus[self.id] = self
self.items = []
self._parent = None
def delete(self):
self.bar.delmenu(self.id)
del self.bar.menus[self.id]
self.menu.DisposeMenu()
del self.bar
del self.items
del self.menu
del self.id
del self._parent
def additem(self, label, shortcut=None, callback=None, kind=None):
self.menu.AppendMenu('x') # add a dummy string
self.items.append((label, shortcut, callback, kind))
item = len(self.items)
if isinstance(label, unicode):
self.menu.SetMenuItemTextWithCFString(item, label)
else:
self.menu.SetMenuItemText(item, label)
if shortcut and type(shortcut) == type(()):
modifiers, char = shortcut[:2]
self.menu.SetItemCmd(item, ord(char))
self.menu.SetMenuItemModifiers(item, modifiers)
if len(shortcut) > 2:
self.menu.SetMenuItemKeyGlyph(item, shortcut[2])
elif shortcut:
self.menu.SetItemCmd(item, ord(shortcut))
return item
def delitem(self, item):
if item != len(self.items):
raise 'Can only delete last item of a menu'
self.menu.DeleteMenuItem(item)
del self.items[item-1]
def addcheck(self, label, shortcut=None, callback=None):
return self.additem(label, shortcut, callback, 'check')
def addradio(self, label, shortcut=None, callback=None):
return self.additem(label, shortcut, callback, 'radio')
def addseparator(self):
self.menu.AppendMenu('(-')
self.items.append(('', None, None, 'separator'))
def addsubmenu(self, label, title=''):
sub = Menu(self.bar, title, -1)
item = self.additem(label, '\x1B', None, 'submenu')
self.menu.SetItemMark(item, sub.id)
sub._parent = self
sub._parent_item = item
return sub
def dispatch(self, id, item, window, event):
title, shortcut, callback, mtype = self.items[item-1]
if callback:
if not self.bar.parent or type(callback) <> types.StringType:
menuhandler = callback
else:
# callback is string
wid = MyFrontWindow()
if wid and self.bar.parent._windows.has_key(wid):
window = self.bar.parent._windows[wid]
if hasattr(window, "domenu_" + callback):
menuhandler = getattr(window, "domenu_" + callback)
elif hasattr(self.bar.parent, "domenu_" + callback):
menuhandler = getattr(self.bar.parent, "domenu_" + callback)
else:
# nothing we can do. we shouldn't have come this far
# since the menu item should have been disabled...
return
elif hasattr(self.bar.parent, "domenu_" + callback):
menuhandler = getattr(self.bar.parent, "domenu_" + callback)
else:
# nothing we can do. we shouldn't have come this far
# since the menu item should have been disabled...
return
menuhandler(id, item, window, event)
def enable(self, onoff):
if onoff:
self.menu.EnableMenuItem(0)
if self._parent:
self._parent.menu.EnableMenuItem(self._parent_item)
else:
self.menu.DisableMenuItem(0)
if self._parent:
self._parent.menu.DisableMenuItem(self._parent_item)
if self.bar and self.bar.parent:
self.bar.parent.needmenubarredraw = 1
class PopupMenu(Menu):
def __init__(self, bar):
Menu.__init__(self, bar, '(popup)', -1)
def popup(self, x, y, event, default=1, window=None):
# NOTE that x and y are global coordinates, and they should probably
# be topleft of the button the user clicked (not mouse-coordinates),
# so the popup nicely overlaps.
reply = self.menu.PopUpMenuSelect(x, y, default)
if not reply:
return
id = (reply >> 16) & 0xffff
item = reply & 0xffff
if not window:
wid = MyFrontWindow()
try:
window = self.bar.parent._windows[wid]
except:
pass # If we can't find the window we pass None
self.dispatch(id, item, window, event)
class MenuItem:
def __init__(self, menu, title, shortcut=None, callback=None, kind=None):
self.item = menu.additem(title, shortcut, callback)
self.menu = menu
def delete(self):
self.menu.delitem(self.item)
del self.menu
del self.item
def check(self, onoff):
self.menu.menu.CheckMenuItem(self.item, onoff)
def enable(self, onoff):
if onoff:
self.menu.menu.EnableMenuItem(self.item)
else:
self.menu.menu.DisableMenuItem(self.item)
def settext(self, text):
self.menu.menu.SetMenuItemText(self.item, text)
def setstyle(self, style):
self.menu.menu.SetItemStyle(self.item, style)
def seticon(self, icon):
self.menu.menu.SetItemIcon(self.item, icon)
def setcmd(self, cmd):
self.menu.menu.SetItemCmd(self.item, cmd)
def setmark(self, cmd):
self.menu.menu.SetItemMark(self.item, cmd)
class RadioItem(MenuItem):
def __init__(self, menu, title, shortcut=None, callback=None):
MenuItem.__init__(self, menu, title, shortcut, callback, 'radio')
class CheckItem(MenuItem):
def __init__(self, menu, title, shortcut=None, callback=None):
MenuItem.__init__(self, menu, title, shortcut, callback, 'check')
def Separator(menu):
menu.addseparator()
def SubMenu(menu, label, title=''):
return menu.addsubmenu(label, title)
class AppleMenu(Menu):
def __init__(self, bar, abouttext="About me...", aboutcallback=None):
Menu.__init__(self, bar, "\024", id=SIOUX_APPLEMENU_ID)
if MacOS.runtimemodel == 'ppc':
self.additem(abouttext, None, aboutcallback)
self.addseparator()
self.menu.AppendResMenu('DRVR')
else:
# Additem()'s tricks do not work for "apple" menu under Carbon
self.menu.InsertMenuItem(abouttext, 0)
self.items.append((abouttext, None, aboutcallback, None))
def dispatch(self, id, item, window, event):
if item == 1:
Menu.dispatch(self, id, item, window, event)
elif MacOS.runtimemodel == 'ppc':
name = self.menu.GetMenuItemText(item)
OpenDeskAcc(name)
class HelpMenu(Menu):
def __init__(self, bar):
# Note we don't call Menu.__init__, we do the necessary things by hand
self.bar = bar
self.menu, index = HMGetHelpMenu()
self.id = self.menu.GetMenuID()
bar.menus[self.id] = self
# The next line caters for the entries the system already handles for us
self.items = [None]*(index-1)
self._parent = None
class Window:
"""A single window belonging to an application"""
def __init__(self, parent):
self.wid = None
self.parent = parent
def open(self, bounds=(40, 40, 400, 400), resid=None):
if resid <> None:
self.wid = GetNewWindow(resid, -1)
else:
self.wid = NewWindow(bounds, self.__class__.__name__, 1,
8, -1, 1, 0) # changed to proc id 8 to include zoom box. jvr
self.do_postopen()
def do_postopen(self):
"""Tell our parent we exist"""
self.parent.appendwindow(self.wid, self)
def close(self):
self.do_postclose()
def do_postclose(self):
self.parent.removewindow(self.wid)
self.parent = None
self.wid = None
def SetPort(self):
# Convinience method
SetPort(self.wid)
def GetWindow(self):
return self.wid
def do_inDrag(self, partcode, window, event):
where = event[3]
window.DragWindow(where, self.draglimit)
draglimit = screenbounds
def do_inGoAway(self, partcode, window, event):
where = event[3]
if window.TrackGoAway(where):
self.close()
def do_inZoom(self, partcode, window, event):
(what, message, when, where, modifiers) = event
if window.TrackBox(where, partcode):
window.ZoomWindow(partcode, 1)
rect = window.GetWindowUserState() # so that zoom really works... jvr
self.do_postresize(rect[2] - rect[0], rect[3] - rect[1], window) # jvr
def do_inZoomIn(self, partcode, window, event):
SetPort(window) # !!!
self.do_inZoom(partcode, window, event)
def do_inZoomOut(self, partcode, window, event):
SetPort(window) # !!!
self.do_inZoom(partcode, window, event)
def do_inGrow(self, partcode, window, event):
(what, message, when, where, modifiers) = event
result = window.GrowWindow(where, self.growlimit)
if result:
height = (result>>16) & 0xffff # Hi word
width = result & 0xffff # Lo word
self.do_resize(width, height, window)
growlimit = (50, 50, screenbounds[2] - screenbounds[0], screenbounds[3] - screenbounds[1]) # jvr
def do_resize(self, width, height, window):
l, t, r, b = self.wid.GetWindowPort().GetPortBounds() # jvr, forGrowIcon
self.SetPort() # jvr
self.wid.InvalWindowRect((r - SCROLLBARWIDTH + 1, b - SCROLLBARWIDTH + 1, r, b)) # jvr
window.SizeWindow(width, height, 1) # changed updateFlag to true jvr
self.do_postresize(width, height, window)
def do_postresize(self, width, height, window):
SetPort(window)
self.wid.InvalWindowRect(window.GetWindowPort().GetPortBounds())
def do_inContent(self, partcode, window, event):
#
# If we're not frontmost, select ourselves and wait for
# the activate event.
#
if MyFrontWindow() <> window:
window.SelectWindow()
return
# We are. Handle the event.
(what, message, when, where, modifiers) = event
SetPort(window)
local = GlobalToLocal(where)
self.do_contentclick(local, modifiers, event)
def do_contentclick(self, local, modifiers, event):
if DEBUG:
print 'Click in contents at %s, modifiers %s'%(local, modifiers)
def do_rawupdate(self, window, event):
if DEBUG: print "raw update for", window
SetPort(window)
window.BeginUpdate()
self.do_update(window, event)
window.EndUpdate()
def do_update(self, window, event):
if DEBUG:
import time
for i in range(8):
time.sleep(0.1)
InvertRgn(window.GetWindowPort().visRgn)
FillRgn(window.GetWindowPort().visRgn, GetQDGlobalsGray())
else:
EraseRgn(window.GetWindowPort().visRgn)
def do_activate(self, activate, event):
if DEBUG: print 'Activate %d for %s'%(activate, self.wid)
class ControlsWindow(Window):
def do_rawupdate(self, window, event):
if DEBUG: print "raw update for", window
SetPort(window)
window.BeginUpdate()
self.do_update(window, event)
#DrawControls(window) # jvr
UpdateControls(window, window.GetWindowPort().visRgn) # jvr
window.DrawGrowIcon()
window.EndUpdate()
def do_controlhit(self, window, control, pcode, event):
if DEBUG: print "control hit in", window, "on", control, "; pcode =", pcode
def do_inContent(self, partcode, window, event):
if MyFrontWindow() <> window:
window.SelectWindow()
return
(what, message, when, where, modifiers) = event
SetPort(window) # XXXX Needed?
local = GlobalToLocal(where)
pcode, control = FindControl(local, window)
if pcode and control:
self.do_rawcontrolhit(window, control, pcode, local, event)
else:
if DEBUG: print "FindControl(%s, %s) -> (%s, %s)" % \
(local, window, pcode, control)
self.do_contentclick(local, modifiers, event)
def do_rawcontrolhit(self, window, control, pcode, local, event):
pcode = control.TrackControl(local)
if pcode:
self.do_controlhit(window, control, pcode, event)
class ScrolledWindow(ControlsWindow):
def __init__(self, parent):
self.barx = self.bary = None
self.barx_enabled = self.bary_enabled = 1
self.activated = 1
ControlsWindow.__init__(self, parent)
def scrollbars(self, wantx=1, wanty=1):
SetPort(self.wid)
self.barx = self.bary = None
self.barx_enabled = self.bary_enabled = 1
x0, y0, x1, y1 = self.wid.GetWindowPort().GetPortBounds()
vx, vy = self.getscrollbarvalues()
if vx is None: self.barx_enabled, vx = 0, 0
if vy is None: self.bary_enabled, vy = 0, 0
if wantx:
rect = x0-1, y1-(SCROLLBARWIDTH-1), x1-(SCROLLBARWIDTH-2), y1+1
self.barx = NewControl(self.wid, rect, "", 1, vx, 0, 32767, 16, 0)
if not self.barx_enabled: self.barx.HiliteControl(255)
## self.wid.InvalWindowRect(rect)
if wanty:
rect = x1-(SCROLLBARWIDTH-1), y0-1, x1+1, y1-(SCROLLBARWIDTH-2)
self.bary = NewControl(self.wid, rect, "", 1, vy, 0, 32767, 16, 0)
if not self.bary_enabled: self.bary.HiliteControl(255)
## self.wid.InvalWindowRect(rect)
def do_postclose(self):
self.barx = self.bary = None
ControlsWindow.do_postclose(self)
def do_activate(self, onoff, event):
self.activated = onoff
if onoff:
if self.barx and self.barx_enabled:
self.barx.ShowControl() # jvr
if self.bary and self.bary_enabled:
self.bary.ShowControl() # jvr
else:
if self.barx:
self.barx.HideControl() # jvr; An inactive window should have *hidden*
# scrollbars, not just dimmed (no matter what
# BBEdit does... look at the Finder)
if self.bary:
self.bary.HideControl() # jvr
self.wid.DrawGrowIcon() # jvr
def do_postresize(self, width, height, window):
l, t, r, b = self.wid.GetWindowPort().GetPortBounds()
self.SetPort()
if self.barx:
self.barx.HideControl() # jvr
self.barx.MoveControl(l-1, b-(SCROLLBARWIDTH-1))
self.barx.SizeControl((r-l)-(SCROLLBARWIDTH-3), SCROLLBARWIDTH) # jvr
if self.bary:
self.bary.HideControl() # jvr
self.bary.MoveControl(r-(SCROLLBARWIDTH-1), t-1)
self.bary.SizeControl(SCROLLBARWIDTH, (b-t)-(SCROLLBARWIDTH-3)) # jvr
if self.barx:
self.barx.ShowControl() # jvr
self.wid.ValidWindowRect((l, b - SCROLLBARWIDTH + 1, r - SCROLLBARWIDTH + 2, b)) # jvr
if self.bary:
self.bary.ShowControl() # jvr
self.wid.ValidWindowRect((r - SCROLLBARWIDTH + 1, t, r, b - SCROLLBARWIDTH + 2)) # jvr
self.wid.InvalWindowRect((r - SCROLLBARWIDTH + 1, b - SCROLLBARWIDTH + 1, r, b)) # jvr, growicon
def do_rawcontrolhit(self, window, control, pcode, local, event):
if control == self.barx:
which = 'x'
elif control == self.bary:
which = 'y'
else:
return 0
if pcode in (inUpButton, inDownButton, inPageUp, inPageDown):
# We do the work for the buttons and grey area in the tracker
dummy = control.TrackControl(local, self.do_controltrack)
else:
# but the thumb is handled here
pcode = control.TrackControl(local)
if pcode == inThumb:
value = control.GetControlValue()
print 'setbars', which, value #DBG
self.scrollbar_callback(which, 'set', value)
self.updatescrollbars()
else:
print 'funny part', pcode #DBG
return 1
def do_controltrack(self, control, pcode):
if control == self.barx:
which = 'x'
elif control == self.bary:
which = 'y'
else:
return
if pcode == inUpButton:
what = '-'
elif pcode == inDownButton:
what = '+'
elif pcode == inPageUp:
what = '--'
elif pcode == inPageDown:
what = '++'
else:
return
self.scrollbar_callback(which, what, None)
self.updatescrollbars()
def updatescrollbars(self):
SetPort(self.wid)
vx, vy = self.getscrollbarvalues()
if self.barx:
if vx is None:
self.barx.HiliteControl(255)
self.barx_enabled = 0
else:
if not self.barx_enabled:
self.barx_enabled = 1
if self.activated:
self.barx.HiliteControl(0)
self.barx.SetControlValue(vx)
if self.bary:
if vy is None:
self.bary.HiliteControl(255)
self.bary_enabled = 0
else:
if not self.bary_enabled:
self.bary_enabled = 1
if self.activated:
self.bary.HiliteControl(0)
self.bary.SetControlValue(vy)
# Auxiliary function: convert standard text/image/etc coordinate
# to something palatable as getscrollbarvalues() return
def scalebarvalue(self, absmin, absmax, curmin, curmax):
if curmin <= absmin and curmax >= absmax:
return None
if curmin <= absmin:
return 0
if curmax >= absmax:
return 32767
perc = float(curmin-absmin)/float(absmax-absmin)
return int(perc*32767)
# To be overridden:
def getscrollbarvalues(self):
return 0, 0
def scrollbar_callback(self, which, what, value):
print 'scroll', which, what, value
class DialogWindow(Window):
"""A modeless dialog window"""
def open(self, resid):
self.dlg = GetNewDialog(resid, -1)
self.wid = self.dlg.GetDialogWindow()
self.do_postopen()
def close(self):
self.do_postclose()
def do_postclose(self):
self.dlg = None
Window.do_postclose(self)
def do_itemhit(self, item, event):
print 'Dialog %s, item %d hit'%(self.dlg, item)
def do_rawupdate(self, window, event):
pass
def ostypecode(x):
"Convert a long int to the 4-character code it really is"
s = ''
for i in range(4):
x, c = divmod(x, 256)
s = chr(c) + s
return s
class TestApp(Application):
"This class is used by the test() function"
def makeusermenus(self):
self.filemenu = m = Menu(self.menubar, "File")
self.saveitem = MenuItem(m, "Save", "S", self.save)
Separator(m)
self.optionsmenu = mm = SubMenu(m, "Options")
self.opt1 = CheckItem(mm, "Arguments", "A")
self.opt2 = CheckItem(mm, "Being hit on the head lessons", (kMenuOptionModifier, "A"))
self.opt3 = CheckItem(mm, "Complaints", (kMenuOptionModifier|kMenuNoCommandModifier, "A"))
Separator(m)
self.itemeh = MenuItem(m, "Enable Help", None, self.enablehelp)
self.itemdbg = MenuItem(m, "Debug", None, self.debug)
Separator(m)
self.quititem = MenuItem(m, "Quit", "Q", self.quit)
def save(self, *args):
print "Save"
def quit(self, *args):
raise self
def enablehelp(self, *args):
hm = self.gethelpmenu()
self.nohelpitem = MenuItem(hm, "There isn't any", None, self.nohelp)
def nohelp(self, *args):
print "I told you there isn't any!"
def debug(self, *args):
import pdb
pdb.set_trace()
def test():
"Test program"
app = TestApp()
app.mainloop()
if __name__ == '__main__':
test()
| {
"repo_name": "Lh4cKg/sl4a",
"path": "python/src/Lib/plat-mac/FrameWork.py",
"copies": "33",
"size": "37669",
"license": "apache-2.0",
"hash": -3665134559612246500,
"line_mean": 32.4538188277,
"line_max": 107,
"alpha_frac": 0.566725955,
"autogenerated": false,
"ratio": 3.7477862899214007,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0034749789636932555,
"num_lines": 1126
} |
"A sort of application framework for the Mac"
DEBUG=0
import MacOS
import traceback
from Carbon.AE import *
from Carbon.AppleEvents import *
from Carbon.Ctl import *
from Carbon.Controls import *
from Carbon.Dlg import *
from Carbon.Dialogs import *
from Carbon.Evt import *
from Carbon.Events import *
from Carbon.Help import *
from Carbon.Menu import *
from Carbon.Menus import *
from Carbon.Qd import *
from Carbon.QuickDraw import *
#from Carbon.Res import *
#from Carbon.Resources import *
#from Carbon.Snd import *
#from Carbon.Sound import *
from Carbon.Win import *
from Carbon.Windows import *
import types
import EasyDialogs
try:
MyFrontWindow = FrontNonFloatingWindow
except NameError:
MyFrontWindow = FrontWindow
kHighLevelEvent = 23 # Don't know what header file this should come from
SCROLLBARWIDTH = 16 # Again, not a clue...
# Trick to forestall a set of SIOUX menus being added to our menubar
SIOUX_APPLEMENU_ID=32000
# Map event 'what' field to strings
eventname = {}
eventname[1] = 'mouseDown'
eventname[2] = 'mouseUp'
eventname[3] = 'keyDown'
eventname[4] = 'keyUp'
eventname[5] = 'autoKey'
eventname[6] = 'updateEvt'
eventname[7] = 'diskEvt'
eventname[8] = 'activateEvt'
eventname[15] = 'osEvt'
eventname[23] = 'kHighLevelEvent'
# Map part codes returned by WhichWindow() to strings
partname = {}
partname[0] = 'inDesk'
partname[1] = 'inMenuBar'
partname[2] = 'inSysWindow'
partname[3] = 'inContent'
partname[4] = 'inDrag'
partname[5] = 'inGrow'
partname[6] = 'inGoAway'
partname[7] = 'inZoomIn'
partname[8] = 'inZoomOut'
#
# The useable portion of the screen
# ## but what happens with multiple screens? jvr
screenbounds = GetQDGlobalsScreenBits().bounds
screenbounds = screenbounds[0]+4, screenbounds[1]+4, \
screenbounds[2]-4, screenbounds[3]-4
next_window_x = 16 # jvr
next_window_y = 44 # jvr
def windowbounds(width, height):
"Return sensible window bounds"
global next_window_x, next_window_y
r, b = next_window_x+width, next_window_y+height
if r > screenbounds[2]:
next_window_x = 16
if b > screenbounds[3]:
next_window_y = 44
l, t = next_window_x, next_window_y
r, b = next_window_x+width, next_window_y+height
next_window_x, next_window_y = next_window_x + 8, next_window_y + 20 # jvr
return l, t, r, b
_watch = None
def setwatchcursor():
global _watch
if _watch == None:
_watch = GetCursor(4).data
SetCursor(_watch)
def setarrowcursor():
SetCursor(GetQDGlobalsArrow())
class Application:
"Application framework -- your application should be a derived class"
def __init__(self, nomenubar=0):
self._doing_asyncevents = 0
self.quitting = 0
self.needmenubarredraw = 0
self._windows = {}
self._helpmenu = None
if nomenubar:
self.menubar = None
else:
self.makemenubar()
def __del__(self):
if self._doing_asyncevents:
self._doing_asyncevents = 0
MacOS.SetEventHandler()
def makemenubar(self):
self.menubar = MenuBar(self)
AppleMenu(self.menubar, self.getabouttext(), self.do_about)
self.makeusermenus()
def makeusermenus(self):
self.filemenu = m = Menu(self.menubar, "File")
self._quititem = MenuItem(m, "Quit", "Q", self._quit)
def gethelpmenu(self):
if self._helpmenu == None:
self._helpmenu = HelpMenu(self.menubar)
return self._helpmenu
def _quit(self, *args):
self.quitting = 1
def cleanup(self):
for w in self._windows.values():
w.do_close()
return self._windows == {}
def appendwindow(self, wid, window):
self._windows[wid] = window
def removewindow(self, wid):
del self._windows[wid]
def getabouttext(self):
return "About %s..." % self.__class__.__name__
def do_about(self, id, item, window, event):
EasyDialogs.Message("Hello, world!" + "\015(%s)" % self.__class__.__name__)
# The main event loop is broken up in several simple steps.
# This is done so you can override each individual part,
# if you have a need to do extra processing independent of the
# event type.
# Normally, however, you'd just define handlers for individual
# events.
schedparams = (0, 0) # By default disable Python's event handling
default_wait = None # By default we wait GetCaretTime in WaitNextEvent
def mainloop(self, mask = everyEvent, wait = None):
self.quitting = 0
if hasattr(MacOS, 'SchedParams'):
saveparams = MacOS.SchedParams(*self.schedparams)
try:
while not self.quitting:
try:
self.do1event(mask, wait)
except (Application, SystemExit):
# Note: the raising of "self" is old-fashioned idiom to
# exit the mainloop. Calling _quit() is better for new
# applications.
break
finally:
if hasattr(MacOS, 'SchedParams'):
MacOS.SchedParams(*saveparams)
def dopendingevents(self, mask = everyEvent):
"""dopendingevents - Handle all pending events"""
while self.do1event(mask, wait=0):
pass
def do1event(self, mask = everyEvent, wait = None):
ok, event = self.getevent(mask, wait)
if IsDialogEvent(event):
if self.do_dialogevent(event):
return
if ok:
self.dispatch(event)
else:
self.idle(event)
def idle(self, event):
pass
def getevent(self, mask = everyEvent, wait = None):
if self.needmenubarredraw:
DrawMenuBar()
self.needmenubarredraw = 0
if wait is None:
wait = self.default_wait
if wait is None:
wait = GetCaretTime()
ok, event = WaitNextEvent(mask, wait)
return ok, event
def dispatch(self, event):
# The following appears to be double work (already done in do1event)
# but we need it for asynchronous event handling
if IsDialogEvent(event):
if self.do_dialogevent(event):
return
(what, message, when, where, modifiers) = event
if eventname.has_key(what):
name = "do_" + eventname[what]
else:
name = "do_%d" % what
try:
handler = getattr(self, name)
except AttributeError:
handler = self.do_unknownevent
handler(event)
def asyncevents(self, onoff):
"""asyncevents - Set asynchronous event handling on or off"""
if MacOS.runtimemodel == 'macho':
raise 'Unsupported in MachoPython'
old = self._doing_asyncevents
if old:
MacOS.SetEventHandler()
MacOS.SchedParams(*self.schedparams)
if onoff:
MacOS.SetEventHandler(self.dispatch)
doint, dummymask, benice, howoften, bgyield = \
self.schedparams
MacOS.SchedParams(doint, everyEvent, benice,
howoften, bgyield)
self._doing_asyncevents = onoff
return old
def do_dialogevent(self, event):
gotone, dlg, item = DialogSelect(event)
if gotone:
window = dlg.GetDialogWindow()
if self._windows.has_key(window):
self._windows[window].do_itemhit(item, event)
else:
print 'Dialog event for unknown dialog'
return 1
return 0
def do_mouseDown(self, event):
(what, message, when, where, modifiers) = event
partcode, wid = FindWindow(where)
#
# Find the correct name.
#
if partname.has_key(partcode):
name = "do_" + partname[partcode]
else:
name = "do_%d" % partcode
if wid == None:
# No window, or a non-python window
try:
handler = getattr(self, name)
except AttributeError:
# Not menubar or something, so assume someone
# else's window
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
return
elif self._windows.has_key(wid):
# It is a window. Hand off to correct window.
window = self._windows[wid]
try:
handler = getattr(window, name)
except AttributeError:
handler = self.do_unknownpartcode
else:
# It is a python-toolbox window, but not ours.
handler = self.do_unknownwindow
handler(partcode, wid, event)
def do_inSysWindow(self, partcode, window, event):
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_inDesk(self, partcode, window, event):
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_inMenuBar(self, partcode, window, event):
if not self.menubar:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
return
(what, message, when, where, modifiers) = event
result = MenuSelect(where)
id = (result>>16) & 0xffff # Hi word
if id >= 0x8000:
id = -65536 + id
item = result & 0xffff # Lo word
self.do_rawmenu(id, item, window, event)
def do_rawmenu(self, id, item, window, event):
try:
self.do_menu(id, item, window, event)
finally:
HiliteMenu(0)
def do_menu(self, id, item, window, event):
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.menubar.dispatch(id, item, window, event)
def do_unknownpartcode(self, partcode, window, event):
(what, message, when, where, modifiers) = event
if DEBUG: print "Mouse down at global:", where
if DEBUG: print "\tUnknown part code:", partcode
if DEBUG: print "\tEvent:", self.printevent(event)
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_unknownwindow(self, partcode, window, event):
if DEBUG: print 'Unknown window:', window
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_keyDown(self, event):
self.do_key(event)
def do_autoKey(self, event):
if not event[-1] & cmdKey:
self.do_key(event)
def do_key(self, event):
(what, message, when, where, modifiers) = event
c = chr(message & charCodeMask)
if self.menubar:
result = MenuEvent(event)
id = (result>>16) & 0xffff # Hi word
item = result & 0xffff # Lo word
if id:
self.do_rawmenu(id, item, None, event)
return
# Otherwise we fall-through
if modifiers & cmdKey:
if c == '.':
raise self
else:
if not self.menubar:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
return
else:
# See whether the front window wants it
w = MyFrontWindow()
if w and self._windows.has_key(w):
window = self._windows[w]
try:
do_char = window.do_char
except AttributeError:
do_char = self.do_char
do_char(c, event)
# else it wasn't for us, sigh...
def do_char(self, c, event):
if DEBUG: print "Character", `c`
def do_updateEvt(self, event):
(what, message, when, where, modifiers) = event
wid = WhichWindow(message)
if wid and self._windows.has_key(wid):
window = self._windows[wid]
window.do_rawupdate(wid, event)
else:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_activateEvt(self, event):
(what, message, when, where, modifiers) = event
wid = WhichWindow(message)
if wid and self._windows.has_key(wid):
window = self._windows[wid]
window.do_activate(modifiers & 1, event)
else:
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
def do_osEvt(self, event):
(what, message, when, where, modifiers) = event
which = (message >> 24) & 0xff
if which == 1: # suspend/resume
self.do_suspendresume(event)
else:
if DEBUG:
print 'unknown osEvt:',
self.printevent(event)
def do_suspendresume(self, event):
(what, message, when, where, modifiers) = event
wid = MyFrontWindow()
if wid and self._windows.has_key(wid):
window = self._windows[wid]
window.do_activate(message & 1, event)
def do_kHighLevelEvent(self, event):
(what, message, when, where, modifiers) = event
if DEBUG:
print "High Level Event:",
self.printevent(event)
try:
AEProcessAppleEvent(event)
except:
pass
#print "AEProcessAppleEvent error:"
#traceback.print_exc()
def do_unknownevent(self, event):
if DEBUG:
print "Unhandled event:",
self.printevent(event)
def printevent(self, event):
(what, message, when, where, modifiers) = event
nicewhat = `what`
if eventname.has_key(what):
nicewhat = eventname[what]
print nicewhat,
if what == kHighLevelEvent:
h, v = where
print `ostypecode(message)`, hex(when), `ostypecode(h | (v<<16))`,
else:
print hex(message), hex(when), where,
print hex(modifiers)
class MenuBar:
"""Represent a set of menus in a menu bar.
Interface:
- (constructor)
- (destructor)
- addmenu
- addpopup (normally used internally)
- dispatch (called from Application)
"""
nextid = 1 # Necessarily a class variable
def getnextid(self):
id = MenuBar.nextid
MenuBar.nextid = id+1
return id
def __init__(self, parent=None):
self.parent = parent
ClearMenuBar()
self.bar = GetMenuBar()
self.menus = {}
# XXX necessary?
def close(self):
self.parent = None
self.bar = None
self.menus = None
def addmenu(self, title, after = 0, id=None):
if id == None:
id = self.getnextid()
if DEBUG: print 'Newmenu', title, id # XXXX
m = NewMenu(id, title)
m.InsertMenu(after)
if after >= 0:
if self.parent:
self.parent.needmenubarredraw = 1
else:
DrawMenuBar()
return id, m
def delmenu(self, id):
if DEBUG: print 'Delmenu', id # XXXX
DeleteMenu(id)
def addpopup(self, title = ''):
return self.addmenu(title, -1)
# Useless:
# def install(self):
# if not self.bar: return
# SetMenuBar(self.bar)
# if self.parent:
# self.parent.needmenubarredraw = 1
# else:
# DrawMenuBar()
def fixmenudimstate(self):
for m in self.menus.keys():
menu = self.menus[m]
if menu.__class__ == FrameWork.AppleMenu:
continue
for i in range(len(menu.items)):
label, shortcut, callback, kind = menu.items[i]
if type(callback) == types.StringType:
wid = MyFrontWindow()
if wid and self.parent._windows.has_key(wid):
window = self.parent._windows[wid]
if hasattr(window, "domenu_" + callback):
menu.menu.EnableMenuItem(i + 1)
elif hasattr(self.parent, "domenu_" + callback):
menu.menu.EnableMenuItem(i + 1)
else:
menu.menu.DisableMenuItem(i + 1)
elif hasattr(self.parent, "domenu_" + callback):
menu.menu.EnableMenuItem(i + 1)
else:
menu.menu.DisableMenuItem(i + 1)
elif callback:
pass
def dispatch(self, id, item, window, event):
if self.menus.has_key(id):
self.menus[id].dispatch(id, item, window, event)
else:
if DEBUG: print "MenuBar.dispatch(%d, %d, %s, %s)" % \
(id, item, window, event)
# XXX Need a way to get menus as resources and bind them to callbacks
class Menu:
"One menu."
def __init__(self, bar, title, after=0, id=None):
self.bar = bar
self.id, self.menu = self.bar.addmenu(title, after, id)
bar.menus[self.id] = self
self.items = []
self._parent = None
def delete(self):
self.bar.delmenu(self.id)
del self.bar.menus[self.id]
self.menu.DisposeMenu()
del self.bar
del self.items
del self.menu
del self.id
del self._parent
def additem(self, label, shortcut=None, callback=None, kind=None):
self.menu.AppendMenu('x') # add a dummy string
self.items.append((label, shortcut, callback, kind))
item = len(self.items)
if isinstance(label, unicode):
self.menu.SetMenuItemTextWithCFString(item, label)
else:
self.menu.SetMenuItemText(item, label)
if shortcut and type(shortcut) == type(()):
modifiers, char = shortcut[:2]
self.menu.SetItemCmd(item, ord(char))
self.menu.SetMenuItemModifiers(item, modifiers)
if len(shortcut) > 2:
self.menu.SetMenuItemKeyGlyph(item, shortcut[2])
elif shortcut:
self.menu.SetItemCmd(item, ord(shortcut))
return item
def delitem(self, item):
if item != len(self.items):
raise 'Can only delete last item of a menu'
self.menu.DeleteMenuItem(item)
del self.items[item-1]
def addcheck(self, label, shortcut=None, callback=None):
return self.additem(label, shortcut, callback, 'check')
def addradio(self, label, shortcut=None, callback=None):
return self.additem(label, shortcut, callback, 'radio')
def addseparator(self):
self.menu.AppendMenu('(-')
self.items.append(('', None, None, 'separator'))
def addsubmenu(self, label, title=''):
sub = Menu(self.bar, title, -1)
item = self.additem(label, '\x1B', None, 'submenu')
self.menu.SetItemMark(item, sub.id)
sub._parent = self
sub._parent_item = item
return sub
def dispatch(self, id, item, window, event):
title, shortcut, callback, mtype = self.items[item-1]
if callback:
if not self.bar.parent or type(callback) <> types.StringType:
menuhandler = callback
else:
# callback is string
wid = MyFrontWindow()
if wid and self.bar.parent._windows.has_key(wid):
window = self.bar.parent._windows[wid]
if hasattr(window, "domenu_" + callback):
menuhandler = getattr(window, "domenu_" + callback)
elif hasattr(self.bar.parent, "domenu_" + callback):
menuhandler = getattr(self.bar.parent, "domenu_" + callback)
else:
# nothing we can do. we shouldn't have come this far
# since the menu item should have been disabled...
return
elif hasattr(self.bar.parent, "domenu_" + callback):
menuhandler = getattr(self.bar.parent, "domenu_" + callback)
else:
# nothing we can do. we shouldn't have come this far
# since the menu item should have been disabled...
return
menuhandler(id, item, window, event)
def enable(self, onoff):
if onoff:
self.menu.EnableMenuItem(0)
if self._parent:
self._parent.menu.EnableMenuItem(self._parent_item)
else:
self.menu.DisableMenuItem(0)
if self._parent:
self._parent.menu.DisableMenuItem(self._parent_item)
if self.bar and self.bar.parent:
self.bar.parent.needmenubarredraw = 1
class PopupMenu(Menu):
def __init__(self, bar):
Menu.__init__(self, bar, '(popup)', -1)
def popup(self, x, y, event, default=1, window=None):
# NOTE that x and y are global coordinates, and they should probably
# be topleft of the button the user clicked (not mouse-coordinates),
# so the popup nicely overlaps.
reply = self.menu.PopUpMenuSelect(x, y, default)
if not reply:
return
id = (reply >> 16) & 0xffff
item = reply & 0xffff
if not window:
wid = MyFrontWindow()
try:
window = self.bar.parent._windows[wid]
except:
pass # If we can't find the window we pass None
self.dispatch(id, item, window, event)
class MenuItem:
def __init__(self, menu, title, shortcut=None, callback=None, kind=None):
self.item = menu.additem(title, shortcut, callback)
self.menu = menu
def delete(self):
self.menu.delitem(self.item)
del self.menu
del self.item
def check(self, onoff):
self.menu.menu.CheckMenuItem(self.item, onoff)
def enable(self, onoff):
if onoff:
self.menu.menu.EnableMenuItem(self.item)
else:
self.menu.menu.DisableMenuItem(self.item)
def settext(self, text):
self.menu.menu.SetMenuItemText(self.item, text)
def setstyle(self, style):
self.menu.menu.SetItemStyle(self.item, style)
def seticon(self, icon):
self.menu.menu.SetItemIcon(self.item, icon)
def setcmd(self, cmd):
self.menu.menu.SetItemCmd(self.item, cmd)
def setmark(self, cmd):
self.menu.menu.SetItemMark(self.item, cmd)
class RadioItem(MenuItem):
def __init__(self, menu, title, shortcut=None, callback=None):
MenuItem.__init__(self, menu, title, shortcut, callback, 'radio')
class CheckItem(MenuItem):
def __init__(self, menu, title, shortcut=None, callback=None):
MenuItem.__init__(self, menu, title, shortcut, callback, 'check')
def Separator(menu):
menu.addseparator()
def SubMenu(menu, label, title=''):
return menu.addsubmenu(label, title)
class AppleMenu(Menu):
def __init__(self, bar, abouttext="About me...", aboutcallback=None):
Menu.__init__(self, bar, "\024", id=SIOUX_APPLEMENU_ID)
if MacOS.runtimemodel == 'ppc':
self.additem(abouttext, None, aboutcallback)
self.addseparator()
self.menu.AppendResMenu('DRVR')
else:
# Additem()'s tricks do not work for "apple" menu under Carbon
self.menu.InsertMenuItem(abouttext, 0)
self.items.append((abouttext, None, aboutcallback, None))
def dispatch(self, id, item, window, event):
if item == 1:
Menu.dispatch(self, id, item, window, event)
elif MacOS.runtimemodel == 'ppc':
name = self.menu.GetMenuItemText(item)
OpenDeskAcc(name)
class HelpMenu(Menu):
def __init__(self, bar):
# Note we don't call Menu.__init__, we do the necessary things by hand
self.bar = bar
self.menu, index = HMGetHelpMenu()
self.id = self.menu.GetMenuID()
bar.menus[self.id] = self
# The next line caters for the entries the system already handles for us
self.items = [None]*(index-1)
self._parent = None
class Window:
"""A single window belonging to an application"""
def __init__(self, parent):
self.wid = None
self.parent = parent
def open(self, bounds=(40, 40, 400, 400), resid=None):
if resid <> None:
self.wid = GetNewWindow(resid, -1)
else:
self.wid = NewWindow(bounds, self.__class__.__name__, 1,
8, -1, 1, 0) # changed to proc id 8 to include zoom box. jvr
self.do_postopen()
def do_postopen(self):
"""Tell our parent we exist"""
self.parent.appendwindow(self.wid, self)
def close(self):
self.do_postclose()
def do_postclose(self):
self.parent.removewindow(self.wid)
self.parent = None
self.wid = None
def SetPort(self):
# Convinience method
SetPort(self.wid)
def GetWindow(self):
return self.wid
def do_inDrag(self, partcode, window, event):
where = event[3]
window.DragWindow(where, self.draglimit)
draglimit = screenbounds
def do_inGoAway(self, partcode, window, event):
where = event[3]
if window.TrackGoAway(where):
self.close()
def do_inZoom(self, partcode, window, event):
(what, message, when, where, modifiers) = event
if window.TrackBox(where, partcode):
window.ZoomWindow(partcode, 1)
rect = window.GetWindowUserState() # so that zoom really works... jvr
self.do_postresize(rect[2] - rect[0], rect[3] - rect[1], window) # jvr
def do_inZoomIn(self, partcode, window, event):
SetPort(window) # !!!
self.do_inZoom(partcode, window, event)
def do_inZoomOut(self, partcode, window, event):
SetPort(window) # !!!
self.do_inZoom(partcode, window, event)
def do_inGrow(self, partcode, window, event):
(what, message, when, where, modifiers) = event
result = window.GrowWindow(where, self.growlimit)
if result:
height = (result>>16) & 0xffff # Hi word
width = result & 0xffff # Lo word
self.do_resize(width, height, window)
growlimit = (50, 50, screenbounds[2] - screenbounds[0], screenbounds[3] - screenbounds[1]) # jvr
def do_resize(self, width, height, window):
l, t, r, b = self.wid.GetWindowPort().GetPortBounds() # jvr, forGrowIcon
self.SetPort() # jvr
self.wid.InvalWindowRect((r - SCROLLBARWIDTH + 1, b - SCROLLBARWIDTH + 1, r, b)) # jvr
window.SizeWindow(width, height, 1) # changed updateFlag to true jvr
self.do_postresize(width, height, window)
def do_postresize(self, width, height, window):
SetPort(window)
self.wid.InvalWindowRect(window.GetWindowPort().GetPortBounds())
def do_inContent(self, partcode, window, event):
#
# If we're not frontmost, select ourselves and wait for
# the activate event.
#
if MyFrontWindow() <> window:
window.SelectWindow()
return
# We are. Handle the event.
(what, message, when, where, modifiers) = event
SetPort(window)
local = GlobalToLocal(where)
self.do_contentclick(local, modifiers, event)
def do_contentclick(self, local, modifiers, event):
if DEBUG:
print 'Click in contents at %s, modifiers %s'%(local, modifiers)
def do_rawupdate(self, window, event):
if DEBUG: print "raw update for", window
SetPort(window)
window.BeginUpdate()
self.do_update(window, event)
window.EndUpdate()
def do_update(self, window, event):
if DEBUG:
import time
for i in range(8):
time.sleep(0.1)
InvertRgn(window.GetWindowPort().visRgn)
FillRgn(window.GetWindowPort().visRgn, GetQDGlobalsGray())
else:
EraseRgn(window.GetWindowPort().visRgn)
def do_activate(self, activate, event):
if DEBUG: print 'Activate %d for %s'%(activate, self.wid)
class ControlsWindow(Window):
def do_rawupdate(self, window, event):
if DEBUG: print "raw update for", window
SetPort(window)
window.BeginUpdate()
self.do_update(window, event)
#DrawControls(window) # jvr
UpdateControls(window, window.GetWindowPort().visRgn) # jvr
window.DrawGrowIcon()
window.EndUpdate()
def do_controlhit(self, window, control, pcode, event):
if DEBUG: print "control hit in", window, "on", control, "; pcode =", pcode
def do_inContent(self, partcode, window, event):
if MyFrontWindow() <> window:
window.SelectWindow()
return
(what, message, when, where, modifiers) = event
SetPort(window) # XXXX Needed?
local = GlobalToLocal(where)
pcode, control = FindControl(local, window)
if pcode and control:
self.do_rawcontrolhit(window, control, pcode, local, event)
else:
if DEBUG: print "FindControl(%s, %s) -> (%s, %s)" % \
(local, window, pcode, control)
self.do_contentclick(local, modifiers, event)
def do_rawcontrolhit(self, window, control, pcode, local, event):
pcode = control.TrackControl(local)
if pcode:
self.do_controlhit(window, control, pcode, event)
class ScrolledWindow(ControlsWindow):
def __init__(self, parent):
self.barx = self.bary = None
self.barx_enabled = self.bary_enabled = 1
self.activated = 1
ControlsWindow.__init__(self, parent)
def scrollbars(self, wantx=1, wanty=1):
SetPort(self.wid)
self.barx = self.bary = None
self.barx_enabled = self.bary_enabled = 1
x0, y0, x1, y1 = self.wid.GetWindowPort().GetPortBounds()
vx, vy = self.getscrollbarvalues()
if vx == None: self.barx_enabled, vx = 0, 0
if vy == None: self.bary_enabled, vy = 0, 0
if wantx:
rect = x0-1, y1-(SCROLLBARWIDTH-1), x1-(SCROLLBARWIDTH-2), y1+1
self.barx = NewControl(self.wid, rect, "", 1, vx, 0, 32767, 16, 0)
if not self.barx_enabled: self.barx.HiliteControl(255)
## self.wid.InvalWindowRect(rect)
if wanty:
rect = x1-(SCROLLBARWIDTH-1), y0-1, x1+1, y1-(SCROLLBARWIDTH-2)
self.bary = NewControl(self.wid, rect, "", 1, vy, 0, 32767, 16, 0)
if not self.bary_enabled: self.bary.HiliteControl(255)
## self.wid.InvalWindowRect(rect)
def do_postclose(self):
self.barx = self.bary = None
ControlsWindow.do_postclose(self)
def do_activate(self, onoff, event):
self.activated = onoff
if onoff:
if self.barx and self.barx_enabled:
self.barx.ShowControl() # jvr
if self.bary and self.bary_enabled:
self.bary.ShowControl() # jvr
else:
if self.barx:
self.barx.HideControl() # jvr; An inactive window should have *hidden*
# scrollbars, not just dimmed (no matter what
# BBEdit does... look at the Finder)
if self.bary:
self.bary.HideControl() # jvr
self.wid.DrawGrowIcon() # jvr
def do_postresize(self, width, height, window):
l, t, r, b = self.wid.GetWindowPort().GetPortBounds()
self.SetPort()
if self.barx:
self.barx.HideControl() # jvr
self.barx.MoveControl(l-1, b-(SCROLLBARWIDTH-1))
self.barx.SizeControl((r-l)-(SCROLLBARWIDTH-3), SCROLLBARWIDTH) # jvr
if self.bary:
self.bary.HideControl() # jvr
self.bary.MoveControl(r-(SCROLLBARWIDTH-1), t-1)
self.bary.SizeControl(SCROLLBARWIDTH, (b-t)-(SCROLLBARWIDTH-3)) # jvr
if self.barx:
self.barx.ShowControl() # jvr
self.wid.ValidWindowRect((l, b - SCROLLBARWIDTH + 1, r - SCROLLBARWIDTH + 2, b)) # jvr
if self.bary:
self.bary.ShowControl() # jvr
self.wid.ValidWindowRect((r - SCROLLBARWIDTH + 1, t, r, b - SCROLLBARWIDTH + 2)) # jvr
self.wid.InvalWindowRect((r - SCROLLBARWIDTH + 1, b - SCROLLBARWIDTH + 1, r, b)) # jvr, growicon
def do_rawcontrolhit(self, window, control, pcode, local, event):
if control == self.barx:
which = 'x'
elif control == self.bary:
which = 'y'
else:
return 0
if pcode in (inUpButton, inDownButton, inPageUp, inPageDown):
# We do the work for the buttons and grey area in the tracker
dummy = control.TrackControl(local, self.do_controltrack)
else:
# but the thumb is handled here
pcode = control.TrackControl(local)
if pcode == inThumb:
value = control.GetControlValue()
print 'setbars', which, value #DBG
self.scrollbar_callback(which, 'set', value)
self.updatescrollbars()
else:
print 'funny part', pcode #DBG
return 1
def do_controltrack(self, control, pcode):
if control == self.barx:
which = 'x'
elif control == self.bary:
which = 'y'
else:
return
if pcode == inUpButton:
what = '-'
elif pcode == inDownButton:
what = '+'
elif pcode == inPageUp:
what = '--'
elif pcode == inPageDown:
what = '++'
else:
return
self.scrollbar_callback(which, what, None)
self.updatescrollbars()
def updatescrollbars(self):
SetPort(self.wid)
vx, vy = self.getscrollbarvalues()
if self.barx:
if vx == None:
self.barx.HiliteControl(255)
self.barx_enabled = 0
else:
if not self.barx_enabled:
self.barx_enabled = 1
if self.activated:
self.barx.HiliteControl(0)
self.barx.SetControlValue(vx)
if self.bary:
if vy == None:
self.bary.HiliteControl(255)
self.bary_enabled = 0
else:
if not self.bary_enabled:
self.bary_enabled = 1
if self.activated:
self.bary.HiliteControl(0)
self.bary.SetControlValue(vy)
# Auxiliary function: convert standard text/image/etc coordinate
# to something palatable as getscrollbarvalues() return
def scalebarvalue(self, absmin, absmax, curmin, curmax):
if curmin <= absmin and curmax >= absmax:
return None
if curmin <= absmin:
return 0
if curmax >= absmax:
return 32767
perc = float(curmin-absmin)/float(absmax-absmin)
return int(perc*32767)
# To be overridden:
def getscrollbarvalues(self):
return 0, 0
def scrollbar_callback(self, which, what, value):
print 'scroll', which, what, value
class DialogWindow(Window):
"""A modeless dialog window"""
def open(self, resid):
self.dlg = GetNewDialog(resid, -1)
self.wid = self.dlg.GetDialogWindow()
self.do_postopen()
def close(self):
self.do_postclose()
def do_postclose(self):
self.dlg = None
Window.do_postclose(self)
def do_itemhit(self, item, event):
print 'Dialog %s, item %d hit'%(self.dlg, item)
def do_rawupdate(self, window, event):
pass
def ostypecode(x):
"Convert a long int to the 4-character code it really is"
s = ''
for i in range(4):
x, c = divmod(x, 256)
s = chr(c) + s
return s
class TestApp(Application):
"This class is used by the test() function"
def makeusermenus(self):
self.filemenu = m = Menu(self.menubar, "File")
self.saveitem = MenuItem(m, "Save", "S", self.save)
Separator(m)
self.optionsmenu = mm = SubMenu(m, "Options")
self.opt1 = CheckItem(mm, "Arguments", "A")
self.opt2 = CheckItem(mm, "Being hit on the head lessons", (kMenuOptionModifier, "A"))
self.opt3 = CheckItem(mm, "Complaints", (kMenuOptionModifier|kMenuNoCommandModifier, "A"))
Separator(m)
self.itemeh = MenuItem(m, "Enable Help", None, self.enablehelp)
self.itemdbg = MenuItem(m, "Debug", None, self.debug)
Separator(m)
self.quititem = MenuItem(m, "Quit", "Q", self.quit)
def save(self, *args):
print "Save"
def quit(self, *args):
raise self
def enablehelp(self, *args):
hm = self.gethelpmenu()
self.nohelpitem = MenuItem(hm, "There isn't any", None, self.nohelp)
def nohelp(self, *args):
print "I told you there isn't any!"
def debug(self, *args):
import pdb
pdb.set_trace()
def test():
"Test program"
app = TestApp()
app.mainloop()
if __name__ == '__main__':
test()
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/FrameWork.py",
"copies": "1",
"size": "37559",
"license": "mit",
"hash": 7710305092704972000,
"line_mean": 32.4452359751,
"line_max": 107,
"alpha_frac": 0.5655102639,
"autogenerated": false,
"ratio": 3.7495258061295798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9796179928080293,
"avg_score": 0.0037712283898574347,
"num_lines": 1123
} |
"""ASOS comparisons"""
import datetime
import calendar
import numpy as np
from pandas.io.sql import read_sql
from pyiem.plot import get_cmap
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {
"low": "Morning Low (midnight to 8 AM)",
"high": "Afternoon High (noon to 8 PM)",
}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc["cache"] = 86400
desc[
"description"
] = """This application plots the difference in morning
low or afternoon high temperature between two sites of your choice.
The morning is
defined as the period between midnight and 8 AM local time. The afternoon
high is defined as the period between noon and 8 PM. If any difference
is greater than 25 degrees, it is omitted from this analysis. This app
may take a while to generate a plot, so please be patient!"""
desc["arguments"] = [
dict(
type="zstation",
name="zstation1",
default="ALO",
network="IA_ASOS",
label="Select Station 1:",
),
dict(
type="zstation",
name="zstation2",
default="OLZ",
network="AWOS",
label="Select Station 2:",
),
dict(
type="select",
name="varname",
default="low",
options=PDICT,
label="Select Comparison",
),
dict(type="cmap", name="cmap", default="Greens", label="Color Ramp:"),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn("asos")
ctx = get_autoplot_context(fdict, get_description())
station1 = ctx["zstation1"]
station2 = ctx["zstation2"]
varname = ctx["varname"]
aggfunc = "min"
tlimit = "0 and 8"
if varname == "high":
aggfunc = "max"
tlimit = "12 and 20"
df = read_sql(
f"""
WITH one as (
SELECT date(valid), {aggfunc}(tmpf::int) as d, avg(sknt)
from alldata where station = %s
and extract(hour from valid at time zone %s) between {tlimit}
and tmpf between -70 and 140 GROUP by date),
two as (
SELECT date(valid), {aggfunc}(tmpf::int) as d, avg(sknt)
from alldata where station = %s
and extract(hour from valid at time zone %s) between {tlimit}
and tmpf between -70 and 140 GROUP by date)
SELECT one.date as day,
extract(week from one.date) as week,
one.d - two.d as delta,
one.avg as sknt,
two.avg as sknt2
from one JOIN two on (one.date = two.date) WHERE one.avg >= 0
and one.d - two.d between -25 and 25
""",
pgconn,
params=(
station1,
ctx["_nt1"].sts[station1]["tzname"],
station2,
ctx["_nt2"].sts[station2]["tzname"],
),
index_col=None,
)
if df.empty:
raise NoDataFound("No Data Found.")
sts = datetime.datetime(2012, 1, 1)
xticks = []
for i in range(1, 13):
ts = sts.replace(month=i)
xticks.append(int(ts.strftime("%j")))
(fig, ax) = plt.subplots(2, 1, figsize=(8, 6))
ax[0].set_title(
("[%s] %s minus [%s] %s\n" "%s Temp Difference Period: %s - %s")
% (
station1,
ctx["_nt1"].sts[station1]["name"],
station2,
ctx["_nt2"].sts[station2]["name"],
"Mid - 8 AM Low" if varname == "low" else "Noon - 8 PM High",
df["day"].min(),
df["day"].max(),
)
)
bins = np.arange(-20.5, 20.5, 1)
H, xedges, yedges = np.histogram2d(
df["week"].values, df["delta"].values, [range(0, 54), bins]
)
H = np.ma.array(H)
H.mask = np.ma.where(H < 1, True, False)
ax[0].pcolormesh(
(xedges - 1) * 7, yedges, H.transpose(), cmap=get_cmap(ctx["cmap"])
)
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(calendar.month_abbr[1:])
ax[0].set_xlim(0, 366)
y = []
for i in range(np.shape(H)[0]):
y.append(np.ma.sum(H[i, :] * (bins[:-1] + 0.5)) / np.ma.sum(H[i, :]))
ax[0].plot(xedges[:-1] * 7, y, zorder=3, lw=3, color="k")
ax[0].plot(xedges[:-1] * 7, y, zorder=3, lw=1, color="w")
rng = min([max([df["delta"].max(), 0 - df["delta"].min()]), 12])
ax[0].set_ylim(0 - rng - 2, rng + 2)
ax[0].grid(True)
ax[0].set_ylabel(
("%s Temp Diff " r"$^\circ$F")
% ("Low" if varname == "low" else "High",)
)
ax[0].text(
-0.01,
1.02,
"%s\nWarmer" % (station1,),
transform=ax[0].transAxes,
va="top",
ha="right",
fontsize=8,
)
ax[0].text(
-0.01,
-0.02,
"%s\nColder" % (station1,),
transform=ax[0].transAxes,
va="bottom",
ha="right",
fontsize=8,
)
H, xedges, yedges = np.histogram2d(
df["sknt"].values, df["delta"].values, [range(0, 31), bins]
)
H = np.ma.array(H)
H.mask = np.where(H < 1, True, False)
ax[1].pcolormesh(
(xedges - 0.5), yedges, H.transpose(), cmap=get_cmap(ctx["cmap"])
)
y = []
x = []
for i in range(np.shape(H)[0]):
_ = np.ma.sum(H[i, :] * (bins[:-1] + 0.5)) / np.ma.sum(H[i, :])
if not np.ma.is_masked(_):
x.append(xedges[i])
y.append(_)
ax[1].plot(x, y, zorder=3, lw=3, color="k")
ax[1].plot(x, y, zorder=3, lw=1, color="w")
ax[1].set_ylim(0 - rng - 2, rng + 2)
ax[1].grid(True)
ax[1].set_xlim(left=-0.25)
ax[1].set_xlabel("Average Wind Speed [kts] for %s" % (station1,))
ax[1].set_ylabel(
("%s Temp Diff " r"$^\circ$F")
% ("Low" if varname == "low" else "High",)
)
ax[1].text(
-0.01,
1.02,
"%s\nWarmer" % (station1,),
transform=ax[1].transAxes,
va="top",
ha="right",
fontsize=8,
)
ax[1].text(
-0.01,
-0.02,
"%s\nColder" % (station1,),
transform=ax[1].transAxes,
va="bottom",
ha="right",
fontsize=8,
)
return fig, df
if __name__ == "__main__":
plotter(dict())
| {
"repo_name": "akrherz/iem",
"path": "htdocs/plotting/auto/scripts/p54.py",
"copies": "1",
"size": "6270",
"license": "mit",
"hash": 7147107645008939000,
"line_mean": 27.1165919283,
"line_max": 78,
"alpha_frac": 0.5189792663,
"autogenerated": false,
"ratio": 3.1365682841420712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9155547550442071,
"avg_score": 0,
"num_lines": 223
} |
""" ASOS Daily Summary Message Parser ingestor """
# 3rd Party
import pytz
from twisted.internet import reactor
from pyiem.nws.products.dsm import parser
from pyiem.util import get_dbconn, LOG
# Local
from pywwa import common
from pywwa.ldm import bridge
from pywwa.database import get_database
# database timezones to pytz cache
TIMEZONES = dict()
STATIONS = dict()
def load_stations(txn):
"""load station metadata to build a xref."""
txn.execute(
"SELECT id, tzname from stations "
"where network ~* 'ASOS' or network = 'AWOS'"
)
for row in txn:
# we need four char station IDs
station = row[0] if len(row[0]) == 4 else "K" + row[0]
tzname = row[1]
if tzname not in TIMEZONES:
try:
TIMEZONES[tzname] = pytz.timezone(tzname)
except Exception as exp:
LOG.info("pytz does not like tzname: %s %s", tzname, exp)
TIMEZONES[tzname] = pytz.UTC
STATIONS[station] = TIMEZONES[tzname]
def real_parser(txn, data):
"""Please process some data"""
prod = parser(data, utcnow=common.utcnow())
prod.tzlocalize(STATIONS)
if common.dbwrite_enabled():
prod.sql(txn)
if prod.warnings:
common.email_error("\n".join(prod.warnings), data)
def main():
"""build things up."""
common.main(with_jabber=False)
# sync
pgconn = get_dbconn("mesosite")
cursor = pgconn.cursor()
load_stations(cursor)
pgconn.close()
bridge(real_parser, dbpool=get_database("iem"))
reactor.run()
if __name__ == "__main__":
main()
| {
"repo_name": "akrherz/pyWWA",
"path": "parsers/pywwa/workflows/dsm_parser.py",
"copies": "1",
"size": "1611",
"license": "mit",
"hash": 3383250952382785000,
"line_mean": 25.4098360656,
"line_max": 73,
"alpha_frac": 0.6238361266,
"autogenerated": false,
"ratio": 3.427659574468085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9551495701068085,
"avg_score": 0,
"num_lines": 61
} |
"""A sound file (a wav file) undergoing analysis.
Loads the data from the sound file on disk, and provides methods for accessing
the sound data and, if it exists, associated textgrid annotation information.
"""
# Licensed under Apache v2 (see LICENSE)
from __future__ import division
import math
import os
import numpy as np
from scipy.signal import resample
from scipy.io import wavfile
from opensauce.helpers import wavread
from opensauce.textgrid import TextGrid, IntervalTier
class SoundFile(object):
def __init__(self, wavpath, tgdir=None, tgfn=None, resample_freq=None):
"""Load sound data from wavpath and TextGrid from tgdir+tgfn. If
resample_freq is specified, then also resample the sound data and
save the resampled data, in addition to the original data.
Assume that input wav files are 16-bit PCM (integers between -32767
and 32767). Output resampled wav files are also written as 16-bit
PCM.
If tgdir is not specified look for the TextGrid in the same directory
as the sound file. if tgfn is not specified, look for a file with
the same name as the sound file and an extension of 'TextGrid'.
The returned SoundFile object has the following useful attributes:
wavpath The original path specified in the
constructor.
wavfn The filename component of wavpath.
wavdata An ndarray of wavfile samples (float)
wavdata_int An ndarray of wavfile sample (16-bit int)
fs The number of samples per second
ns Total number of samples
wavpath_rs Path for wav file corresponding to
resampled data
wavdata_rs An ndarray of wavfile float samples after
resampling (None if resample_freq = None)
wavdata_rs_int An ndarray of wavfile 16-bit int samples after
resampling (None if resample_freq = None)
fs_rs The number of samples per second after
resampling (None if resample_freq = None)
ns_rs Total number of samples after resampling
(None if resample_freq = None)
tgpath Full path to the textgrid file.
textgrid A TextGrid object loaded from tgpath if a
file exists at tgpath, else None.
textgrid_intervals A list of three tuples of the form
(label, start, stop), where label is a
text interval label and start and stop
are floating point numbers of seconds
from the start of the file of the
beginning and end of the interval. The
list is a concatenation of all TextGrid
tiers of type 'intervaltier', in the
order they occur in the TextGrid.
The textgrid_intervals attribute exists if and only if the TextGrid
file exists.
"""
open(wavpath).close() # Generate an error if the file doesn't exist.
self.wavpath = wavpath
self.wavfn = os.path.basename(self.wavpath)
if tgfn is None:
tgfn = os.path.splitext(os.path.basename(wavpath))[0] + '.TextGrid'
if tgdir is None:
tgdir = os.path.dirname(wavpath)
self.tgpath = os.path.join(tgdir, tgfn)
# Check that resample_freq has valid value
if resample_freq is not None:
if not isinstance(resample_freq, int):
raise ValueError('Resample frequency must be an integer')
if resample_freq <= 0:
raise ValueError('Resample frequency must be positive')
self.fs_rs = resample_freq
@property
def wavdata(self):
return self._wavdata()[0]
@property
def wavdata_int(self):
return self._wavdata()[1]
@property
def fs(self):
return self._wavdata()[2]
@property
def ns(self):
return len(self.wavdata)
def _wavdata(self):
data, data_int, fs = wavread(self.wavpath)
self.__dict__['wavdata'], self.__dict__['fs'] = data, fs
return data, data_int, fs
@property
def wavpath_rs(self):
return self._wavdata_rs()[0]
@property
def wavdata_rs(self):
return self._wavdata_rs()[1]
@property
def wavdata_rs_int(self):
return self._wavdata_rs()[2]
@property
def ns_rs(self):
return self._wavdata_rs()[3]
def _wavdata_rs(self):
if self.fs_rs is not None:
# Number of points in resample
ns_rs = np.int_(np.ceil(self.ns * self.fs_rs / self.fs))
# Do resample
# XXX: Tried using a Hamming window as a low pass filter, but it
# didn't seem to make a big difference, so it's not used
# here.
data_rs = resample(self.wavdata, ns_rs)
wavpath_rs = self.wavpath.split('.')[0] + '-resample-' + str(self.fs_rs) + 'Hz.wav'
# Write resampled data to wav file
# Convert data from 32-bit floating point to 16-bit PCM
data_rs_int = np.int16(data_rs * 32768)
wavfile.write(wavpath_rs, self.fs_rs, data_rs_int)
# XXX: Was worried that Python might continue executing code
# before the file write is finished, but it seems like it's
# not an issue.
return wavpath_rs, data_rs, data_rs_int, ns_rs
else:
return None, None, None, None
@property
def ms_len(self):
ms_len = int(math.floor(len(self.wavdata) / self.fs * 1000))
self.__dict__['ms_len'] = ms_len
return ms_len
@property
def textgrid(self):
if os.path.exists(self.tgpath):
res = TextGrid.fromFile(self.tgpath)
else:
res = None
self.__dict__['textgrid'] = res
return res
@property
def textgrid_intervals(self):
if self.textgrid is None:
raise ValueError("Textgrid file {!r} not found".format(self.tgpath))
res = []
for tier in self.textgrid.tiers:
if tier.__class__ != IntervalTier:
continue
for i in tier.intervals:
res.append((i.mark, float(i.minTime), float(i.maxTime)))
self.__dict__['textgrid_intervals'] = res
return res
| {
"repo_name": "voicesauce/opensauce-python",
"path": "opensauce/soundfile.py",
"copies": "1",
"size": "6905",
"license": "apache-2.0",
"hash": 1154700737670910200,
"line_mean": 38.683908046,
"line_max": 95,
"alpha_frac": 0.5551049964,
"autogenerated": false,
"ratio": 4.315625,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.53707299964,
"avg_score": null,
"num_lines": null
} |
"""A source object that computes stress tensors on a volume.
The tensors are computed from the application of a point load on a
semi-infinite domain.
"""
# Authors: KK Rai (kk.rai [at] iitb.ac.in)
# R. Ambareesha (ambareesha [at] iitb.ac.in)
# Prabhu Ramachandran (prabhu [at] aero.iitb.ac.in)
# Enthought library imports.
from traits.api import Instance
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
# Local imports
from mayavi.core.source import Source
from mayavi.core.pipeline_info import PipelineInfo
######################################################################
# `PointLoad` class.
######################################################################
class PointLoad(Source):
# The version of this class. Used for persistence.
__version__ = 0
point_load = Instance(tvtk.PointLoad, args=(), allow_none=False,
record=True)
# Information about what this object can produce.
output_info = PipelineInfo(datasets=['image_data'],
attribute_types=['any'],
attributes=['any'])
# Create the UI for the traits.
view = View(Group(Item(name='point_load',
style='custom',
resizable=True),
label='PointLoad',
show_labels=False),
resizable=True)
######################################################################
# `object` interface
######################################################################
def __init__(self, **traits):
# Call parent class' init.
super(PointLoad, self).__init__(**traits)
# Call render everytime source traits change.
self.point_load.on_trait_change(self.render)
# Setup the outputs.
self.outputs = [self.point_load.output]
def has_output_port(self):
""" Return True as the point load has output port."""
return True
def get_output_object(self):
""" Return the point load output port."""
return self.point_load.output_port
| {
"repo_name": "alexandreleroux/mayavi",
"path": "mayavi/sources/point_load.py",
"copies": "3",
"size": "2160",
"license": "bsd-3-clause",
"hash": 3936292239228518400,
"line_mean": 33.8387096774,
"line_max": 74,
"alpha_frac": 0.5175925926,
"autogenerated": false,
"ratio": 4.509394572025053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002986857825567503,
"num_lines": 62
} |
"""A source of audio signal that connects to the
:py:class:`~pyfmodex.channel_group.ChannelGroup` mixing
hierarchy.
"""
from ctypes import *
from .channel_control import ChannelControl
from .fmodobject import _dll
from .globalvars import get_class
from .utils import check_type, ckresult
class Channel(ChannelControl):
"""A source of audio signal that connects to the
:py:class:`~pyfmodex.channel_group.ChannelGroup` mixing hierarchy.
Created with :py:meth:`~pyfmodex.system.System.play_sound` or
:py:meth:`~pyfmodex.system.System.play_dsp`.
"""
@property
def channel_group(self):
"""The ChannelGroup this object outputs to.
A :py:class:`~pyfmodex.channel_group.ChannelGroup` may contain many
:py:class:`Channels <~pyfmodex.channel.Channel>`.
:py:class:`Channels <~pyfmodex.channel.Channel>` may only output to a
single :py:class:`~pyfmodex.channel_group.ChannelGroup`. Setting this
will remove it from the previous group first.
:type: ChannelGroup
"""
grp_ptr = c_void_p()
ckresult(_dll.FMOD_Channel_GetChannelGroup(self._ptr, byref(grp_ptr)))
return get_class("ChannelGroup")(grp_ptr)
@channel_group.setter
def channel_group(self, group):
check_type(group, get_class("ChannelGroup"))
ckresult(_dll.FMOD_Channel_SetChannelGroup(self._ptr, group._ptr))
@property
def current_sound(self):
"""The currently playing Sound.
May be None if no :py:class:`~pyfmodex.sound.Sound` is playing.
:type: Sound
"""
snd_ptr = c_void_p()
ckresult(_dll.FMOD_Channel_GetCurrentSound(self._ptr, byref(snd_ptr)))
return get_class("Sound")(snd_ptr)
@property
def frequency(self):
"""The playback frequency or playback rate.
:type: float
"""
freq = c_float()
ckresult(_dll.FMOD_Channel_GetFrequency(self._ptr, byref(freq)))
return freq.value
@frequency.setter
def frequency(self, freq):
ckresult(_dll.FMOD_Channel_SetFrequency(self._ptr, c_float(freq)))
@property
def index(self):
"""The index of this object in the :py:class:`~pyfmodex.system.System`
:py:class:`~pyfmodex.channel.Channel` pool.
:type: int
"""
idx = c_int()
self._call_fmod("FMOD_Channel_GetIndex", byref(idx))
return idx.value
@property
def loop_count(self):
"""The number of times to loop before stopping.
Times to loop before stopping, with:
- 0: oneshot
- 1: loop once then stop
- -1: loop forever
This is the current loop countdown value that will decrement as it
plays until reaching 0. It can be reset by setting this property.
The :py:class:`~pyfmodex.flags.MODE` flags of the
:py:class:`~pyfmodex.sound.Sound` or
:py:class:`~pyfmodex.channel.Channel` must include LOOP_NORMAL or
LOOP_BIDI for this function to work.
:type: int
"""
loopcount = c_int()
ckresult(_dll.FMOD_Channel_GetLoopCount(self._ptr, byref(loopcount)))
return loopcount.value
@loop_count.setter
def loop_count(self, count):
ckresult(_dll.FMOD_Channel_SetLoopCount(self._ptr, c_int(count)))
def get_loop_points(self, startunit, endunit):
"""Retrieve the loop start and end points.
Valid TIMEUNIT types are PCM, MS and PCMBYTES. Any other time units
return :py:attr:`~pyfmodex.enums.RESULT.FORMAT`.
If MS or PCMBYTES are used, the value is internally converted from PCM,
so the retrieved value may not exactly match the set value.
:param TIMEUNIT startunit: Time units for loop start point.
:param TIMEUNIT endunit: Time units for loop end point.
:returns: Loop start point and loop end point.
:rtype: two-tuple of ints
"""
start = c_uint()
end = c_uint()
ckresult(
_dll.FMOD_Channel_GetLoopPoints(
self._ptr, byref(start), startunit.value, byref(end), endunit.value
)
)
return start.value, end.value
def set_loop_points(self, start, startunit, end, endunit):
"""Set the loop start and end points.
Loop points may only be set on a :py:class:`~pyfmodex.channel.Channel`
playing a :py:class:`~pyfmodex.sound.Sound`, not a
:py:class:`~pyfmodex.channel.Channel` playing a
:py:class:`~pyfmodex.dsp.DSP` (see
:py:meth:`~pyfmodex.system.System.play_dsp`).
Valid TIMEUNIT types are PCM, MS and PCMBYTES. Any other time units
return :py:attr:`~pyfmodex.enums.RESULT.FORMAT`. If MS or PCMBYTES are
used, the value is internally converted to PCM.
:param int start: Loop start point.
:param TIMEUNIT startunit: Time units for `start`.
:param int end: Loop end point.
:param TIMEUNIT endunit: Time units for `end`.
"""
ckresult(
_dll.FMOD_Channel_SetLoopPoints(
self._ptr, c_uint(start), startunit.value, c_uint(end), endunit.value
)
)
def get_position(self, unit):
"""Retrieve the current playback position.
Certain TIMEUNIT types are always available: PCM, PCMBYTES and MS. The
others are format specific such as MODORDER / MODROW / MODPATTERN which
is specific to files of type MOD / S3M / XM / IT.
If MS or PCMBYTES are used, the value is internally converted from PCM,
so the retrieved value may not exactly match the set value.
:param TIMEUNIT unit: Time units for position.
:returns: Playback position.
:rtype: int
"""
pos = c_uint()
ckresult(_dll.FMOD_Channel_GetPosition(self._ptr, byref(pos), unit.value))
return pos.value
def set_position(self, pos, unit):
"""Set the current playback position.
Certain TIMEUNIT types are always available: PCM, PCMBYTES and MS. The
others are format specific such as MODORDER / MODROW / MODPATTERN which
is specific to files of type MOD / S3M / XM / IT.
If playing a :py:class:`~pyfmodex.sound.Sound` created with
:py:meth:`~pyfmodex.system.System.create_stream` or the
:py:class:`~pyfmodex.flags.MODE` flag CREATESTREAM changing the
position may cause a slow reflush operation while the file seek and
decode occurs. You can avoid this by creating the stream with the
:py:class:`~pyfmodex.flags.MODE` flag NONBLOCKING. This will cause the
stream to go into :py:attr:`~pyfmodex.enums.OPENSTATE.SETPOSITION`
state (see :py:attr:`~pyfmodex.sound.Sound.open_state` and Sound
commands will return :py:attr:`~pyfmodex.enums.RESULT.NOTREADY`.
:py:meth:`get_position` will also not update until this non-blocking
set position operation has completed.
Using a VBR source that does not have an associated seek table or seek
information (such as MP3 or MOD/S3M/XM/IT) may cause inaccurate seeking
if you specify MS or PCM. If you want FMOD to create a PCM vs bytes
seek table so that seeking is accurate, you will have to specify the
:py:class:`~pyfmodex.flags.MODE` flag ACCURATETIME when loading or
opening the sound. This means there is a slight delay as FMOD scans the
whole file when loading the sound to create this table.
:param int pos: Playback position.
:param TIMEUNIT unit: Time units for `pos`.
"""
ckresult(_dll.FMOD_Channel_SetPosition(self._ptr, pos, unit))
@property
def priority(self):
"""The priority used for virtual Channel ordering, where 0 represents
most important and 256 represents least important.
Priority is used as a coarse grain control for the virtual
:py:class:`~pyfmodex.channel.Channel` system, lower priority
:py:class:`Channels <~pyfmodex.channel.Channel>` will always be stolen
before higher. For channels of equal priority, those with the quietest
:py:attr:`~pyfmodex.channel_control.ChannelControl.audibility` value
will be stolen first.
"""
pri = c_int()
ckresult(_dll.FMOD_Channel_GetPriority(self._ptr, byref(pri)))
return pri.value
@priority.setter
def priority(self, pri):
ckresult(_dll.FMOD_Channel_SetPriority(self._ptr, pri))
@property
def is_virtual(self):
"""Whether the Channel is being emulated by the virtual Channel system.
- True: silent / emulated
- False: audible / real
:type: bool
"""
virtual_state = c_bool()
ckresult(_dll.FMOD_Channel_IsVirtual(self._ptr, byref(virtual_state)))
return virtual_state.value
| {
"repo_name": "tyrylu/pyfmodex",
"path": "pyfmodex/channel.py",
"copies": "1",
"size": "8896",
"license": "mit",
"hash": 7559459173687925000,
"line_mean": 36.6949152542,
"line_max": 85,
"alpha_frac": 0.6435476619,
"autogenerated": false,
"ratio": 3.775891341256367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4919439003156367,
"avg_score": null,
"num_lines": null
} |
""" A sparse matrix in COOrdinate or 'triplet' format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['coo_matrix', 'isspmatrix_coo']
from warnings import warn
import numpy as np
from scipy.lib.six import xrange, zip as izip
from ._sparsetools import coo_tocsr, coo_todense, coo_matvec
from .base import isspmatrix
from .data import _data_matrix, _minmax_mixin
from .sputils import (upcast, upcast_char, to_native, isshape, getdtype,
isintlike, get_index_dtype, downcast_intp_index, _compat_bincount)
class coo_matrix(_data_matrix, _minmax_mixin):
"""
A sparse matrix in COOrdinate format.
Also known as the 'ijv' or 'triplet' format.
This can be instantiated in several ways:
coo_matrix(D)
with a dense matrix D
coo_matrix(S)
with another sparse matrix S (equivalent to S.tocoo())
coo_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
coo_matrix((data, (i, j)), [shape=(M, N)])
to construct from three arrays:
1. data[:] the entries of the matrix, in any order
2. i[:] the row indices of the matrix entries
3. j[:] the column indices of the matrix entries
Where ``A[i[k], j[k]] = data[k]``. When shape is not
specified, it is inferred from the index arrays
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
COO format data array of the matrix
row
COO format row index array of the matrix
col
COO format column index array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the COO format
- facilitates fast conversion among sparse formats
- permits duplicate entries (see example)
- very fast conversion to and from CSR/CSC formats
Disadvantages of the COO format
- does not directly support:
+ arithmetic operations
+ slicing
Intended Usage
- COO is a fast format for constructing sparse matrices
- Once a matrix has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- By default when converting to CSR or CSC format, duplicate (i,j)
entries will be summed together. This facilitates efficient
construction of finite element matrices and the like. (see example)
Examples
--------
>>> from scipy.sparse import coo_matrix
>>> coo_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 3, 1, 0])
>>> col = np.array([0, 3, 1, 2])
>>> data = np.array([4, 5, 7, 9])
>>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
array([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]])
>>> # example with duplicates
>>> row = np.array([0, 0, 1, 3, 1, 0, 0])
>>> col = np.array([0, 2, 1, 3, 1, 0, 0])
>>> data = np.array([1, 1, 1, 1, 1, 1, 1])
>>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isinstance(arg1, tuple):
if isshape(arg1):
M, N = arg1
self.shape = (M,N)
idx_dtype = get_index_dtype(maxval=max(M, N))
self.row = np.array([], dtype=idx_dtype)
self.col = np.array([], dtype=idx_dtype)
self.data = np.array([], getdtype(dtype, default=float))
self.has_canonical_format = True
else:
try:
obj, ij = arg1
except:
raise TypeError('invalid input format')
try:
if len(ij) != 2:
raise TypeError
except TypeError:
raise TypeError('invalid input format')
self.row = np.array(ij[0], copy=copy)
self.col = np.array(ij[1], copy=copy)
self.data = np.array(obj, copy=copy)
if shape is None:
if len(self.row) == 0 or len(self.col) == 0:
raise ValueError('cannot infer dimensions from zero '
'sized index arrays')
M = self.row.max() + 1
N = self.col.max() + 1
self.shape = (M, N)
else:
# Use 2 steps to ensure shape has length 2.
M, N = shape
self.shape = (M, N)
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.row = self.row.astype(idx_dtype)
self.col = self.col.astype(idx_dtype)
self.has_canonical_format = False
elif arg1 is None:
# Initialize an empty matrix.
if not isinstance(shape, tuple) or not isintlike(shape[0]):
raise TypeError('dimensions not understood')
warn('coo_matrix(None, shape=(M,N)) is deprecated, '
'use coo_matrix( (M,N) ) instead', DeprecationWarning)
idx_dtype = get_index_dtype(maxval=max(M, N))
self.shape = shape
self.data = np.array([], getdtype(dtype, default=float))
self.row = np.array([], dtype=idx_dtype)
self.col = np.array([], dtype=idx_dtype)
self.has_canonical_format = True
else:
if isspmatrix(arg1):
if isspmatrix_coo(arg1) and copy:
self.row = arg1.row.copy()
self.col = arg1.col.copy()
self.data = arg1.data.copy()
self.shape = arg1.shape
else:
coo = arg1.tocoo()
self.row = coo.row
self.col = coo.col
self.data = coo.data
self.shape = coo.shape
self.has_canonical_format = False
else:
#dense argument
try:
M = np.atleast_2d(np.asarray(arg1))
except:
raise TypeError('invalid input format')
if M.ndim != 2:
raise TypeError('expected dimension <= 2 array or matrix')
else:
self.shape = M.shape
self.row, self.col = M.nonzero()
self.data = M[self.row, self.col]
self.has_canonical_format = True
if dtype is not None:
self.data = self.data.astype(dtype)
self._check()
def getnnz(self, axis=None):
"""Get the count of explicitly-stored values (nonzeros)
Parameters
----------
axis : None, 0, or 1
Select between the number of values across the whole matrix, in
each column, or in each row.
"""
if axis is None:
nnz = len(self.data)
if nnz != len(self.row) or nnz != len(self.col):
raise ValueError('row, column, and data array must all be the '
'same length')
if self.data.ndim != 1 or self.row.ndim != 1 or \
self.col.ndim != 1:
raise ValueError('row, column, and data arrays must be 1-D')
return int(nnz)
if axis < 0:
axis += 2
if axis == 0:
return _compat_bincount(downcast_intp_index(self.col),
minlength=self.shape[1])
elif axis == 1:
return _compat_bincount(downcast_intp_index(self.row),
minlength=self.shape[0])
else:
raise ValueError('axis out of bounds')
nnz = property(fget=getnnz)
def _check(self):
""" Checks data structure for consistency """
nnz = self.nnz
# index arrays should have integer data types
if self.row.dtype.kind != 'i':
warn("row index array has non-integer dtype (%s) "
% self.row.dtype.name)
if self.col.dtype.kind != 'i':
warn("col index array has non-integer dtype (%s) "
% self.col.dtype.name)
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.row = np.asarray(self.row, dtype=idx_dtype)
self.col = np.asarray(self.col, dtype=idx_dtype)
self.data = to_native(self.data)
if nnz > 0:
if self.row.max() >= self.shape[0]:
raise ValueError('row index exceeds matrix dimensions')
if self.col.max() >= self.shape[1]:
raise ValueError('column index exceeds matrix dimensions')
if self.row.min() < 0:
raise ValueError('negative row index found')
if self.col.min() < 0:
raise ValueError('negative column index found')
def transpose(self, copy=False):
M,N = self.shape
return coo_matrix((self.data, (self.col, self.row)), shape=(N,M), copy=copy)
def toarray(self, order=None, out=None):
"""See the docstring for `spmatrix.toarray`."""
B = self._process_toarray_args(order, out)
fortran = int(B.flags.f_contiguous)
if not fortran and not B.flags.c_contiguous:
raise ValueError("Output array must be C or F contiguous")
M,N = self.shape
coo_todense(M, N, self.nnz, self.row, self.col, self.data,
B.ravel('A'), fortran)
return B
def tocsc(self):
"""Return a copy of this matrix in Compressed Sparse Column format
Duplicate entries will be summed together.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0, 0, 1, 3, 1, 0, 0])
>>> col = array([0, 2, 1, 3, 1, 0, 0])
>>> data = array([1, 1, 1, 1, 1, 1, 1])
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsc()
>>> A.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from .csc import csc_matrix
if self.nnz == 0:
return csc_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
idx_dtype = get_index_dtype((self.col, self.row),
maxval=max(self.nnz, M))
indptr = np.empty(N + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
coo_tocsr(N, M, self.nnz,
self.col.astype(idx_dtype),
self.row.astype(idx_dtype),
self.data,
indptr, indices, data)
A = csc_matrix((data, indices, indptr), shape=self.shape)
A.sum_duplicates()
return A
def tocsr(self):
"""Return a copy of this matrix in Compressed Sparse Row format
Duplicate entries will be summed together.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0, 0, 1, 3, 1, 0, 0])
>>> col = array([0, 2, 1, 3, 1, 0, 0])
>>> data = array([1, 1, 1, 1, 1, 1, 1])
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsr()
>>> A.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from .csr import csr_matrix
if self.nnz == 0:
return csr_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
idx_dtype = get_index_dtype((self.row, self.col),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
coo_tocsr(M, N, self.nnz,
self.row.astype(idx_dtype),
self.col.astype(idx_dtype),
self.data,
indptr,
indices,
data)
A = csr_matrix((data, indices, indptr), shape=self.shape)
A.sum_duplicates()
return A
def tocoo(self, copy=False):
if copy:
return self.copy()
else:
return self
def todia(self):
from .dia import dia_matrix
ks = self.col - self.row # the diagonal for each nonzero
diags = np.unique(ks)
if len(diags) > 100:
#probably undesired, should we do something?
#should todia() have a maxdiags parameter?
pass
#initialize and fill in data array
if self.data.size == 0:
data = np.zeros((0, 0), dtype=self.dtype)
else:
data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype)
data[np.searchsorted(diags,ks), self.col] = self.data
return dia_matrix((data,diags), shape=self.shape)
def todok(self):
from .dok import dok_matrix
self.sum_duplicates()
dok = dok_matrix((self.shape), dtype=self.dtype)
dok.update(izip(izip(self.row,self.col),self.data))
return dok
def diagonal(self):
# Could be rewritten without the python loop.
# Data entries at the same (row, col) are summed.
n = min(self.shape)
ndata = self.data.shape[0]
d = np.zeros(n, dtype=self.dtype)
for i in xrange(ndata):
r = self.row[i]
if r == self.col[i]:
d[r] += self.data[i]
return d
diagonal.__doc__ = _data_matrix.diagonal.__doc__
def _setdiag(self, values, k):
M, N = self.shape
if values.ndim and not len(values):
return
idx_dtype = self.row.dtype
# Determine which triples to keep and where to put the new ones.
full_keep = self.col - self.row != k
if k < 0:
max_index = min(M+k, N)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, self.col >= max_index)
new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
new_col = np.arange(max_index, dtype=idx_dtype)
else:
max_index = min(M, N-k)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, self.row >= max_index)
new_row = np.arange(max_index, dtype=idx_dtype)
new_col = np.arange(k, k + max_index, dtype=idx_dtype)
# Define the array of data consisting of the entries to be added.
if values.ndim:
new_data = values[:max_index]
else:
new_data = np.empty(max_index, dtype=self.dtype)
new_data[:] = values
# Update the internal structure.
self.row = np.concatenate((self.row[keep], new_row))
self.col = np.concatenate((self.col[keep], new_col))
self.data = np.concatenate((self.data[keep], new_data))
self.has_canonical_format = False
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the index arrays
(i.e. .row and .col) are copied.
"""
if copy:
return coo_matrix((data, (self.row.copy(), self.col.copy())),
shape=self.shape, dtype=data.dtype)
else:
return coo_matrix((data, (self.row, self.col)),
shape=self.shape, dtype=data.dtype)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
This is an *in place* operation
"""
if self.has_canonical_format or len(self.data) == 0:
return
order = np.lexsort((self.row,self.col))
self.row = self.row[order]
self.col = self.col[order]
self.data = self.data[order]
unique_mask = ((self.row[1:] != self.row[:-1]) |
(self.col[1:] != self.col[:-1]))
unique_mask = np.append(True, unique_mask)
self.row = self.row[unique_mask]
self.col = self.col[unique_mask]
unique_inds, = np.nonzero(unique_mask)
self.data = np.add.reduceat(self.data, unique_inds, dtype=self.dtype)
self.has_canonical_format = True
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
#output array
result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
other.dtype.char))
coo_matvec(self.nnz, self.row, self.col, self.data, other, result)
return result
def _mul_multivector(self, other):
return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T])
def isspmatrix_coo(x):
return isinstance(x, coo_matrix)
| {
"repo_name": "jshiv/turntable",
"path": "test/lib/python2.7/site-packages/scipy/sparse/coo.py",
"copies": "7",
"size": "18062",
"license": "mit",
"hash": -646700355547197600,
"line_mean": 34.9800796813,
"line_max": 84,
"alpha_frac": 0.5133982948,
"autogenerated": false,
"ratio": 3.856929318812727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001381529281138739,
"num_lines": 502
} |
""" A sparse matrix in COOrdinate or 'triplet' format"""
__docformat__ = "restructuredtext en"
__all__ = ['coo_matrix', 'isspmatrix_coo']
from warnings import warn
import numpy as np
from sparsetools import coo_tocsr, coo_todense, coo_matvec
from base import isspmatrix
from data import _data_matrix
from sputils import upcast, to_native, isshape, getdtype, isintlike
class coo_matrix(_data_matrix):
"""
A sparse matrix in COOrdinate format.
Also known as the 'ijv' or 'triplet' format.
This can be instantiated in several ways:
coo_matrix(D)
with a dense matrix D
coo_matrix(S)
with another sparse matrix S (equivalent to S.tocoo())
coo_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
coo_matrix((data, ij), [shape=(M, N)])
The arguments 'data' and 'ij' represent three arrays:
1. data[:] the entries of the matrix, in any order
2. ij[0][:] the row indices of the matrix entries
3. ij[1][:] the column indices of the matrix entries
Where ``A[ij[0][k], ij[1][k] = data[k]``. When shape is
not specified, it is inferred from the index arrays
Notes
-----
Advantages of the COO format
- facilitates fast conversion among sparse formats
- permits duplicate entries (see example)
- very fast conversion to and from CSR/CSC formats
Disadvantages of the COO format
- does not directly support:
+ arithmetic operations
+ slicing
Intended Usage
- COO is a fast format for constructing sparse matrices
- Once a matrix has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- By default when converting to CSR or CSC format, duplicate (i,j)
entries will be summed together. This facilitates efficient
construction of finite element matrices and the like. (see example)
Examples
--------
>>> from scipy.sparse import *
>>> from scipy import *
>>> coo_matrix( (3,4), dtype=int8 ).todense()
matrix([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = array([0,3,1,0])
>>> col = array([0,3,1,2])
>>> data = array([4,5,7,9])
>>> coo_matrix( (data,(row,col)), shape=(4,4) ).todense()
matrix([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]])
>>> # example with duplicates
>>> row = array([0,0,1,3,1,0,0])
>>> col = array([0,2,1,3,1,0,0])
>>> data = array([1,1,1,1,1,1,1])
>>> coo_matrix( (data,(row,col)), shape=(4,4)).todense()
matrix([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isinstance(arg1, tuple):
if isshape(arg1):
M, N = arg1
self.shape = (M,N)
self.row = np.array([], dtype=np.intc)
self.col = np.array([], dtype=np.intc)
self.data = np.array([], getdtype(dtype, default=float))
else:
try:
obj, ij = arg1
except:
raise TypeError('invalid input format')
try:
if len(ij) != 2:
raise TypeError
except TypeError:
raise TypeError('invalid input format')
self.row = np.array(ij[0], copy=copy, dtype=np.intc)
self.col = np.array(ij[1], copy=copy, dtype=np.intc)
self.data = np.array( obj, copy=copy)
if shape is None:
if len(self.row) == 0 or len(self.col) == 0:
raise ValueError('cannot infer dimensions from zero sized index arrays')
M = self.row.max() + 1
N = self.col.max() + 1
self.shape = (M, N)
else:
# Use 2 steps to ensure shape has length 2.
M, N = shape
self.shape = (M, N)
elif arg1 is None:
# Initialize an empty matrix.
if not isinstance(shape, tuple) or not isintlike(shape[0]):
raise TypeError('dimensions not understood')
warn('coo_matrix(None, shape=(M,N)) is deprecated, ' \
'use coo_matrix( (M,N) ) instead', DeprecationWarning)
self.shape = shape
self.data = np.array([], getdtype(dtype, default=float))
self.row = np.array([], dtype=np.intc)
self.col = np.array([], dtype=np.intc)
else:
if isspmatrix(arg1):
if isspmatrix_coo(arg1) and copy:
self.row = arg1.row.copy()
self.col = arg1.col.copy()
self.data = arg1.data.copy()
self.shape = arg1.shape
else:
coo = arg1.tocoo()
self.row = coo.row
self.col = coo.col
self.data = coo.data
self.shape = coo.shape
else:
#dense argument
try:
M = np.atleast_2d(np.asarray(arg1))
except:
raise TypeError('invalid input format')
if np.rank(M) != 2:
raise TypeError('expected rank <= 2 array or matrix')
self.shape = M.shape
self.row,self.col = (M != 0).nonzero()
self.data = M[self.row,self.col]
if dtype is not None:
self.data = self.data.astype(dtype)
self._check()
def getnnz(self):
nnz = len(self.data)
if nnz != len(self.row) or nnz != len(self.col):
raise ValueError('row, column, and data array must all be the same length')
if np.rank(self.data) != 1 or np.rank(self.row) != 1 or np.rank(self.col) != 1:
raise ValueError('row, column, and data arrays must have rank 1')
return nnz
nnz = property(fget=getnnz)
def _check(self):
""" Checks data structure for consistency """
nnz = self.nnz
# index arrays should have integer data types
if self.row.dtype.kind != 'i':
warn("row index array has non-integer dtype (%s) " \
% self.row.dtype.name )
if self.col.dtype.kind != 'i':
warn("col index array has non-integer dtype (%s) " \
% self.col.dtype.name )
# only support 32-bit ints for now
self.row = np.asarray(self.row, dtype=np.intc)
self.col = np.asarray(self.col, dtype=np.intc)
self.data = to_native(self.data)
if nnz > 0:
if self.row.max() >= self.shape[0]:
raise ValueError('row index exceedes matrix dimensions')
if self.col.max() >= self.shape[1]:
raise ValueError('column index exceedes matrix dimensions')
if self.row.min() < 0:
raise ValueError('negative row index found')
if self.col.min() < 0:
raise ValueError('negative column index found')
def transpose(self, copy=False):
M,N = self.shape
return coo_matrix((self.data, (self.col, self.row)), shape=(N,M), copy=copy)
def toarray(self):
B = np.zeros(self.shape, dtype=self.dtype)
M,N = self.shape
coo_todense(M, N, self.nnz, self.row, self.col, self.data, B.ravel())
return B
def tocsc(self):
"""Return a copy of this matrix in Compressed Sparse Column format
Duplicate entries will be summed together.
Example
-------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0,0,1,3,1,0,0])
>>> col = array([0,2,1,3,1,0,0])
>>> data = array([1,1,1,1,1,1,1])
>>> A = coo_matrix( (data,(row,col)), shape=(4,4)).tocsc()
>>> A.todense()
matrix([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from csc import csc_matrix
if self.nnz == 0:
return csc_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
indptr = np.empty(N + 1, dtype=np.intc)
indices = np.empty(self.nnz, dtype=np.intc)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
coo_tocsr(N, M, self.nnz, \
self.col, self.row, self.data, \
indptr, indices, data)
A = csc_matrix((data, indices, indptr), shape=self.shape)
A.sum_duplicates()
return A
def tocsr(self):
"""Return a copy of this matrix in Compressed Sparse Row format
Duplicate entries will be summed together.
Example
-------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0,0,1,3,1,0,0])
>>> col = array([0,2,1,3,1,0,0])
>>> data = array([1,1,1,1,1,1,1])
>>> A = coo_matrix( (data,(row,col)), shape=(4,4)).tocsr()
>>> A.todense()
matrix([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from csr import csr_matrix
if self.nnz == 0:
return csr_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
indptr = np.empty(M + 1, dtype=np.intc)
indices = np.empty(self.nnz, dtype=np.intc)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
coo_tocsr(M, N, self.nnz, \
self.row, self.col, self.data, \
indptr, indices, data)
A = csr_matrix((data, indices, indptr), shape=self.shape)
A.sum_duplicates()
return A
def tocoo(self, copy=False):
if copy:
return self.copy()
else:
return self
def todia(self):
from dia import dia_matrix
ks = self.col - self.row #the diagonal for each nonzero
diags = np.unique(ks)
if len(diags) > 100:
#probably undesired, should we do something?
#should todia() have a maxdiags parameter?
pass
#initialize and fill in data array
data = np.zeros( (len(diags), self.col.max()+1), dtype=self.dtype)
data[ np.searchsorted(diags,ks), self.col ] = self.data
return dia_matrix((data,diags), shape=self.shape)
def todok(self):
from itertools import izip
from dok import dok_matrix
dok = dok_matrix((self.shape), dtype=self.dtype)
dok.update( izip(izip(self.row,self.col),self.data) )
return dok
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the index arrays
(i.e. .row and .col) are copied.
"""
if copy:
return coo_matrix( (data, (self.row.copy(), self.col.copy()) ), \
shape=self.shape, dtype=data.dtype)
else:
return coo_matrix( (data, (self.row, self.col) ), \
shape=self.shape, dtype=data.dtype)
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
#output array
result = np.zeros( self.shape[0], dtype=upcast(self.dtype,other.dtype) )
coo_matvec(self.nnz, self.row, self.col, self.data, other, result)
return result
def _mul_multivector(self, other):
return np.hstack( [ self._mul_vector(col).reshape(-1,1) for col in other.T ] )
from sputils import _isinstance
def isspmatrix_coo( x ):
return _isinstance(x, coo_matrix)
| {
"repo_name": "jasonmccampbell/scipy-refactor",
"path": "scipy/sparse/coo.py",
"copies": "1",
"size": "12356",
"license": "bsd-3-clause",
"hash": -2725816645268759000,
"line_mean": 33.0385674931,
"line_max": 96,
"alpha_frac": 0.5101165426,
"autogenerated": false,
"ratio": 3.7105105105105105,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9694648049058662,
"avg_score": 0.005195800810369695,
"num_lines": 363
} |
""" A sparse matrix in COOrdinate or 'triplet' format"""
__docformat__ = "restructuredtext en"
__all__ = ['coo_matrix', 'isspmatrix_coo']
from warnings import warn
import numpy as np
from ._sparsetools import coo_tocsr, coo_todense, coo_matvec
from .base import isspmatrix, SparseEfficiencyWarning, spmatrix
from .data import _data_matrix, _minmax_mixin
from .sputils import (upcast, upcast_char, to_native, isshape, getdtype,
getdata, get_index_dtype, downcast_intp_index,
check_shape, check_reshape_kwargs, matrix)
import operator
class coo_matrix(_data_matrix, _minmax_mixin):
"""
A sparse matrix in COOrdinate format.
Also known as the 'ijv' or 'triplet' format.
This can be instantiated in several ways:
coo_matrix(D)
with a dense matrix D
coo_matrix(S)
with another sparse matrix S (equivalent to S.tocoo())
coo_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
coo_matrix((data, (i, j)), [shape=(M, N)])
to construct from three arrays:
1. data[:] the entries of the matrix, in any order
2. i[:] the row indices of the matrix entries
3. j[:] the column indices of the matrix entries
Where ``A[i[k], j[k]] = data[k]``. When shape is not
specified, it is inferred from the index arrays
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of stored values, including explicit zeros
data
COO format data array of the matrix
row
COO format row index array of the matrix
col
COO format column index array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the COO format
- facilitates fast conversion among sparse formats
- permits duplicate entries (see example)
- very fast conversion to and from CSR/CSC formats
Disadvantages of the COO format
- does not directly support:
+ arithmetic operations
+ slicing
Intended Usage
- COO is a fast format for constructing sparse matrices
- Once a matrix has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- By default when converting to CSR or CSC format, duplicate (i,j)
entries will be summed together. This facilitates efficient
construction of finite element matrices and the like. (see example)
Examples
--------
>>> # Constructing an empty matrix
>>> from scipy.sparse import coo_matrix
>>> coo_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> # Constructing a matrix using ijv format
>>> row = np.array([0, 3, 1, 0])
>>> col = np.array([0, 3, 1, 2])
>>> data = np.array([4, 5, 7, 9])
>>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
array([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]])
>>> # Constructing a matrix with duplicate indices
>>> row = np.array([0, 0, 1, 3, 1, 0, 0])
>>> col = np.array([0, 2, 1, 3, 1, 0, 0])
>>> data = np.array([1, 1, 1, 1, 1, 1, 1])
>>> coo = coo_matrix((data, (row, col)), shape=(4, 4))
>>> # Duplicate indices are maintained until implicitly or explicitly summed
>>> np.max(coo.data)
1
>>> coo.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
format = 'coo'
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isinstance(arg1, tuple):
if isshape(arg1):
M, N = arg1
self._shape = check_shape((M, N))
idx_dtype = get_index_dtype(maxval=max(M, N))
data_dtype = getdtype(dtype, default=float)
self.row = np.array([], dtype=idx_dtype)
self.col = np.array([], dtype=idx_dtype)
self.data = np.array([], dtype=data_dtype)
self.has_canonical_format = True
else:
try:
obj, (row, col) = arg1
except (TypeError, ValueError) as e:
raise TypeError('invalid input format') from e
if shape is None:
if len(row) == 0 or len(col) == 0:
raise ValueError('cannot infer dimensions from zero '
'sized index arrays')
M = operator.index(np.max(row)) + 1
N = operator.index(np.max(col)) + 1
self._shape = check_shape((M, N))
else:
# Use 2 steps to ensure shape has length 2.
M, N = shape
self._shape = check_shape((M, N))
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.row = np.array(row, copy=copy, dtype=idx_dtype)
self.col = np.array(col, copy=copy, dtype=idx_dtype)
self.data = getdata(obj, copy=copy, dtype=dtype)
self.has_canonical_format = False
else:
if isspmatrix(arg1):
if isspmatrix_coo(arg1) and copy:
self.row = arg1.row.copy()
self.col = arg1.col.copy()
self.data = arg1.data.copy()
self._shape = check_shape(arg1.shape)
else:
coo = arg1.tocoo()
self.row = coo.row
self.col = coo.col
self.data = coo.data
self._shape = check_shape(coo.shape)
self.has_canonical_format = False
else:
#dense argument
M = np.atleast_2d(np.asarray(arg1))
if M.ndim != 2:
raise TypeError('expected dimension <= 2 array or matrix')
self._shape = check_shape(M.shape)
if shape is not None:
if check_shape(shape) != self._shape:
raise ValueError('inconsistent shapes: %s != %s' %
(shape, self._shape))
self.row, self.col = M.nonzero()
self.data = M[self.row, self.col]
self.has_canonical_format = True
if dtype is not None:
self.data = self.data.astype(dtype, copy=False)
self._check()
def reshape(self, *args, **kwargs):
shape = check_shape(args, self.shape)
order, copy = check_reshape_kwargs(kwargs)
# Return early if reshape is not required
if shape == self.shape:
if copy:
return self.copy()
else:
return self
nrows, ncols = self.shape
if order == 'C':
# Upcast to avoid overflows: the coo_matrix constructor
# below will downcast the results to a smaller dtype, if
# possible.
dtype = get_index_dtype(maxval=(ncols * max(0, nrows - 1) + max(0, ncols - 1)))
flat_indices = np.multiply(ncols, self.row, dtype=dtype) + self.col
new_row, new_col = divmod(flat_indices, shape[1])
elif order == 'F':
dtype = get_index_dtype(maxval=(nrows * max(0, ncols - 1) + max(0, nrows - 1)))
flat_indices = np.multiply(nrows, self.col, dtype=dtype) + self.row
new_col, new_row = divmod(flat_indices, shape[0])
else:
raise ValueError("'order' must be 'C' or 'F'")
# Handle copy here rather than passing on to the constructor so that no
# copy will be made of new_row and new_col regardless
if copy:
new_data = self.data.copy()
else:
new_data = self.data
return coo_matrix((new_data, (new_row, new_col)),
shape=shape, copy=False)
reshape.__doc__ = spmatrix.reshape.__doc__
def getnnz(self, axis=None):
if axis is None:
nnz = len(self.data)
if nnz != len(self.row) or nnz != len(self.col):
raise ValueError('row, column, and data array must all be the '
'same length')
if self.data.ndim != 1 or self.row.ndim != 1 or \
self.col.ndim != 1:
raise ValueError('row, column, and data arrays must be 1-D')
return int(nnz)
if axis < 0:
axis += 2
if axis == 0:
return np.bincount(downcast_intp_index(self.col),
minlength=self.shape[1])
elif axis == 1:
return np.bincount(downcast_intp_index(self.row),
minlength=self.shape[0])
else:
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _check(self):
""" Checks data structure for consistency """
# index arrays should have integer data types
if self.row.dtype.kind != 'i':
warn("row index array has non-integer dtype (%s) "
% self.row.dtype.name)
if self.col.dtype.kind != 'i':
warn("col index array has non-integer dtype (%s) "
% self.col.dtype.name)
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.row = np.asarray(self.row, dtype=idx_dtype)
self.col = np.asarray(self.col, dtype=idx_dtype)
self.data = to_native(self.data)
if self.nnz > 0:
if self.row.max() >= self.shape[0]:
raise ValueError('row index exceeds matrix dimensions')
if self.col.max() >= self.shape[1]:
raise ValueError('column index exceeds matrix dimensions')
if self.row.min() < 0:
raise ValueError('negative row index found')
if self.col.min() < 0:
raise ValueError('negative column index found')
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
M, N = self.shape
return coo_matrix((self.data, (self.col, self.row)),
shape=(N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
def resize(self, *shape):
shape = check_shape(shape)
new_M, new_N = shape
M, N = self.shape
if new_M < M or new_N < N:
mask = np.logical_and(self.row < new_M, self.col < new_N)
if not mask.all():
self.row = self.row[mask]
self.col = self.col[mask]
self.data = self.data[mask]
self._shape = shape
resize.__doc__ = spmatrix.resize.__doc__
def toarray(self, order=None, out=None):
"""See the docstring for `spmatrix.toarray`."""
B = self._process_toarray_args(order, out)
fortran = int(B.flags.f_contiguous)
if not fortran and not B.flags.c_contiguous:
raise ValueError("Output array must be C or F contiguous")
M,N = self.shape
coo_todense(M, N, self.nnz, self.row, self.col, self.data,
B.ravel('A'), fortran)
return B
def tocsc(self, copy=False):
"""Convert this matrix to Compressed Sparse Column format
Duplicate entries will be summed together.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0, 0, 1, 3, 1, 0, 0])
>>> col = array([0, 2, 1, 3, 1, 0, 0])
>>> data = array([1, 1, 1, 1, 1, 1, 1])
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsc()
>>> A.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from .csc import csc_matrix
if self.nnz == 0:
return csc_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
idx_dtype = get_index_dtype((self.col, self.row),
maxval=max(self.nnz, M))
row = self.row.astype(idx_dtype, copy=False)
col = self.col.astype(idx_dtype, copy=False)
indptr = np.empty(N + 1, dtype=idx_dtype)
indices = np.empty_like(row, dtype=idx_dtype)
data = np.empty_like(self.data, dtype=upcast(self.dtype))
coo_tocsr(N, M, self.nnz, col, row, self.data,
indptr, indices, data)
x = csc_matrix((data, indices, indptr), shape=self.shape)
if not self.has_canonical_format:
x.sum_duplicates()
return x
def tocsr(self, copy=False):
"""Convert this matrix to Compressed Sparse Row format
Duplicate entries will be summed together.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0, 0, 1, 3, 1, 0, 0])
>>> col = array([0, 2, 1, 3, 1, 0, 0])
>>> data = array([1, 1, 1, 1, 1, 1, 1])
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsr()
>>> A.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from .csr import csr_matrix
if self.nnz == 0:
return csr_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
idx_dtype = get_index_dtype((self.row, self.col),
maxval=max(self.nnz, N))
row = self.row.astype(idx_dtype, copy=False)
col = self.col.astype(idx_dtype, copy=False)
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty_like(col, dtype=idx_dtype)
data = np.empty_like(self.data, dtype=upcast(self.dtype))
coo_tocsr(M, N, self.nnz, row, col, self.data,
indptr, indices, data)
x = csr_matrix((data, indices, indptr), shape=self.shape)
if not self.has_canonical_format:
x.sum_duplicates()
return x
def tocoo(self, copy=False):
if copy:
return self.copy()
else:
return self
tocoo.__doc__ = spmatrix.tocoo.__doc__
def todia(self, copy=False):
from .dia import dia_matrix
self.sum_duplicates()
ks = self.col - self.row # the diagonal for each nonzero
diags, diag_idx = np.unique(ks, return_inverse=True)
if len(diags) > 100:
# probably undesired, should todia() have a maxdiags parameter?
warn("Constructing a DIA matrix with %d diagonals "
"is inefficient" % len(diags), SparseEfficiencyWarning)
#initialize and fill in data array
if self.data.size == 0:
data = np.zeros((0, 0), dtype=self.dtype)
else:
data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype)
data[diag_idx, self.col] = self.data
return dia_matrix((data,diags), shape=self.shape)
todia.__doc__ = spmatrix.todia.__doc__
def todok(self, copy=False):
from .dok import dok_matrix
self.sum_duplicates()
dok = dok_matrix((self.shape), dtype=self.dtype)
dok._update(zip(zip(self.row,self.col),self.data))
return dok
todok.__doc__ = spmatrix.todok.__doc__
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
return np.empty(0, dtype=self.data.dtype)
diag = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
dtype=self.dtype)
diag_mask = (self.row + k) == self.col
if self.has_canonical_format:
row = self.row[diag_mask]
data = self.data[diag_mask]
else:
row, _, data = self._sum_duplicates(self.row[diag_mask],
self.col[diag_mask],
self.data[diag_mask])
diag[row + min(k, 0)] = data
return diag
diagonal.__doc__ = _data_matrix.diagonal.__doc__
def _setdiag(self, values, k):
M, N = self.shape
if values.ndim and not len(values):
return
idx_dtype = self.row.dtype
# Determine which triples to keep and where to put the new ones.
full_keep = self.col - self.row != k
if k < 0:
max_index = min(M+k, N)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, self.col >= max_index)
new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
new_col = np.arange(max_index, dtype=idx_dtype)
else:
max_index = min(M, N-k)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, self.row >= max_index)
new_row = np.arange(max_index, dtype=idx_dtype)
new_col = np.arange(k, k + max_index, dtype=idx_dtype)
# Define the array of data consisting of the entries to be added.
if values.ndim:
new_data = values[:max_index]
else:
new_data = np.empty(max_index, dtype=self.dtype)
new_data[:] = values
# Update the internal structure.
self.row = np.concatenate((self.row[keep], new_row))
self.col = np.concatenate((self.col[keep], new_col))
self.data = np.concatenate((self.data[keep], new_data))
self.has_canonical_format = False
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the index arrays
(i.e. .row and .col) are copied.
"""
if copy:
return coo_matrix((data, (self.row.copy(), self.col.copy())),
shape=self.shape, dtype=data.dtype)
else:
return coo_matrix((data, (self.row, self.col)),
shape=self.shape, dtype=data.dtype)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
This is an *in place* operation
"""
if self.has_canonical_format:
return
summed = self._sum_duplicates(self.row, self.col, self.data)
self.row, self.col, self.data = summed
self.has_canonical_format = True
def _sum_duplicates(self, row, col, data):
# Assumes (data, row, col) not in canonical format.
if len(data) == 0:
return row, col, data
order = np.lexsort((row, col))
row = row[order]
col = col[order]
data = data[order]
unique_mask = ((row[1:] != row[:-1]) |
(col[1:] != col[:-1]))
unique_mask = np.append(True, unique_mask)
row = row[unique_mask]
col = col[unique_mask]
unique_inds, = np.nonzero(unique_mask)
data = np.add.reduceat(data, unique_inds, dtype=self.dtype)
return row, col, data
def eliminate_zeros(self):
"""Remove zero entries from the matrix
This is an *in place* operation
"""
mask = self.data != 0
self.data = self.data[mask]
self.row = self.row[mask]
self.col = self.col[mask]
#######################
# Arithmetic handlers #
#######################
def _add_dense(self, other):
if other.shape != self.shape:
raise ValueError('Incompatible shapes ({} and {})'
.format(self.shape, other.shape))
dtype = upcast_char(self.dtype.char, other.dtype.char)
result = np.array(other, dtype=dtype, copy=True)
fortran = int(result.flags.f_contiguous)
M, N = self.shape
coo_todense(M, N, self.nnz, self.row, self.col, self.data,
result.ravel('A'), fortran)
return matrix(result, copy=False)
def _mul_vector(self, other):
#output array
result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
other.dtype.char))
coo_matvec(self.nnz, self.row, self.col, self.data, other, result)
return result
def _mul_multivector(self, other):
result = np.zeros((other.shape[1], self.shape[0]),
dtype=upcast_char(self.dtype.char, other.dtype.char))
for i, col in enumerate(other.T):
coo_matvec(self.nnz, self.row, self.col, self.data, col, result[i])
return result.T.view(type=type(other))
def isspmatrix_coo(x):
"""Is x of coo_matrix type?
Parameters
----------
x
object to check for being a coo matrix
Returns
-------
bool
True if x is a coo matrix, False otherwise
Examples
--------
>>> from scipy.sparse import coo_matrix, isspmatrix_coo
>>> isspmatrix_coo(coo_matrix([[5]]))
True
>>> from scipy.sparse import coo_matrix, csr_matrix, isspmatrix_coo
>>> isspmatrix_coo(csr_matrix([[5]]))
False
"""
return isinstance(x, coo_matrix)
| {
"repo_name": "endolith/scipy",
"path": "scipy/sparse/coo.py",
"copies": "12",
"size": "22145",
"license": "bsd-3-clause",
"hash": 2635325659697761000,
"line_mean": 34.8333333333,
"line_max": 91,
"alpha_frac": 0.5230977647,
"autogenerated": false,
"ratio": 3.8227170723286727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A Spawner for JupyterHub to allow the Hub to be run as non-root.
This spawns a mediator process with sudo, which then takes actions on behalf of the user.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import shutil
import sys
import os
import warnings
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.process import Subprocess
from traitlets import List, Unicode, Bool
from jupyterhub.spawner import LocalProcessSpawner
from jupyterhub.utils import random_port
class SudoSpawner(LocalProcessSpawner):
sudospawner_path = Unicode(shutil.which('sudospawner') or 'sudospawner', config=True,
help="Path to sudospawner script"
)
sudo_args = List(['-nH'], config=True,
help="Extra args to pass to sudo"
)
mediator_log_level = Unicode("INFO", config=True,
help="Log level for the mediator process",
)
# TODO: deprecated in > 0.5.2
debug_mediator = Bool(False, config=True,
help="Extra log output from the mediator process for debugging",
)
@gen.coroutine
def relog_stderr(self, stderr):
while not stderr.closed():
try:
line = yield stderr.read_until(b'\n')
except StreamClosedError:
return
else:
# TODO: log instead of write to stderr directly?
# If we do that, will get huge double-prefix messages:
# [I date JupyterHub] [W date SingleUser] msg...
sys.stderr.write(line.decode('utf8', 'replace'))
def make_preexec_fn(self):
return None
@gen.coroutine
def do(self, action, **kwargs):
"""Instruct the mediator process to take a given action"""
kwargs['action'] = action
if kwargs.pop('_skip_sudo', False):
# mock testing by skipping the sudo part
cmd = []
else:
cmd = ['sudo', '-u', self.user.name]
cmd.extend(self.sudo_args)
cmd.append(self.sudospawner_path)
if self.debug_mediator:
self.mediator_log_level = 'DEBUG'
warnings.warn("debug_mediator is deprecated in favor of mediator_log_level", DeprecationWarning)
if self.mediator_log_level:
cmd.append('--logging={}'.format(self.mediator_log_level))
self.log.debug("Spawning %s", cmd)
p = Subprocess(cmd, stdin=Subprocess.STREAM, stdout=Subprocess.STREAM, stderr=Subprocess.STREAM, preexec_fn=self.make_preexec_fn())
stderr_future = self.relog_stderr(p.stderr)
# hand the stderr future to the IOLoop so it isn't orphaned,
# even though we aren't going to wait for it unless there's an error
IOLoop.current().add_callback(lambda : stderr_future)
yield p.stdin.write(json.dumps(kwargs).encode('utf8'))
p.stdin.close()
data = yield p.stdout.read_until_close()
if p.returncode:
yield stderr_future
raise RuntimeError("sudospawner subprocess failed with exit code: %r" % p.returncode)
data_str = data.decode('utf8', 'replace')
try:
data_str = data_str[data_str.index('{'):data_str.rindex('}')+1]
response = json.loads(data_str)
except ValueError:
self.log.error("Failed to get JSON result from mediator: %r" % data_str)
raise
return response
@gen.coroutine
def start(self):
self.port = random_port()
# only args, not the base command
reply = yield self.do(action='spawn', args=self.get_args(), env=self.get_env())
self.pid = reply['pid']
print(self.ip)
# 0.7 expects ip, port to be returned
return (self.ip or '127.0.0.1', self.port)
@gen.coroutine
def _signal(self, sig):
if sig == 0:
# short-circuit existence check without invoking sudo
try:
os.kill(self.pid, sig)
except ProcessLookupError:
# No such process
return False
except PermissionError:
# When running hub with reduced permissions,
# we won't have permission to send signals, even 0.
# PermissionError means process exists.
pass
return True
reply = yield self.do('kill', pid=self.pid, signal=sig)
return reply['alive']
| {
"repo_name": "jupyter/sudospawner",
"path": "sudospawner/spawner.py",
"copies": "1",
"size": "4510",
"license": "bsd-3-clause",
"hash": -1383084342336814000,
"line_mean": 34.7936507937,
"line_max": 139,
"alpha_frac": 0.6133037694,
"autogenerated": false,
"ratio": 3.994685562444641,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5107989331844641,
"avg_score": null,
"num_lines": null
} |
""" A Spawner for JupyterHub to allow users to execute Jupyter
without involving root, sudo, or local system accounts. """
# Copyright (c) Christopher H Smith <chris@binc.jp>
# Distributed under the terms of the Modified BSD License.
import os
import errno
import signal
import pipes
import shutil
from tornado import gen
from subprocess import Popen
from jupyterhub.spawner import Spawner
from jupyterhub.utils import random_port
from traitlets import (Unicode, Integer, Instance, validate)
class RootlessSpawner(Spawner):
INTERRUPT_TIMEOUT = Integer(10,
help="Seconds to wait for process to halt after SIGINT before proceeding to SIGTERM"
).tag(config=True)
TERM_TIMEOUT = Integer(5,
help="Seconds to wait for process to halt after SIGTERM before proceeding to SIGKILL"
).tag(config=True)
KILL_TIMEOUT = Integer(5,
help="Seconds to wait for process to halt after SIGKILL before giving up"
).tag(config=True)
shared_dir = Unicode(
config=True,
help="The path to a directory shared by all users"
)
proc = Instance(Popen, allow_none=True)
pid = Integer(0)
@validate('notebook_dir')
def _notebook_dir_validate(self, p):
value = self.format_string(p['value'])
# Strip any trailing slashes
# *except* if it's root
_, path = os.path.splitdrive(value)
if path == os.sep:
return value
value = value.rstrip(os.sep)
if not os.path.isabs(value):
# If we receive a non-absolute path, make it absolute.
value = os.path.abspath(value)
if not os.path.isdir(value):
os.mkdir(value, mode=0o755)
if self.shared_dir and not os.path.islink(value + '/Shared'):
# Create a symlink to the shared directory
os.symlink(self.shared_dir, value + '/Shared')
return value
def load_state(self, state):
"""load pid from state"""
super(RootlessSpawner, self).load_state(state)
if 'pid' in state:
self.pid = state['pid']
def get_state(self):
"""add pid to state"""
state = super(RootlessSpawner, self).get_state()
if self.pid:
state['pid'] = self.pid
return state
def clear_state(self):
"""clear pid state"""
super(RootlessSpawner, self).clear_state()
self.pid = 0
def get_env(self):
"""Add user environment variables"""
env = super().get_env()
return env
@gen.coroutine
def start(self):
"""Start the process"""
self.port = random_port()
cmd = []
env = self.get_env()
cmd.extend(self.cmd)
cmd.extend(self.get_args())
self.log.info("Spawning %s", ' '.join(pipes.quote(s) for s in cmd))
try:
self.proc = Popen(cmd, env=env,
start_new_session=True, # don't forward signals
)
except PermissionError:
# use which to get abspath
script = shutil.which(cmd[0]) or cmd[0]
self.log.error("Permission denied trying to run %r. Does %s have access to this file?",
script, self.user.name,
)
raise
self.pid = self.proc.pid
return (self.ip or '127.0.0.1', self.port)
@gen.coroutine
def poll(self):
"""Poll the process"""
# if we started the process, poll with Popen
if self.proc is not None:
status = self.proc.poll()
if status is not None:
# clear state if the process is done
self.clear_state()
return status
# if we resumed from stored state,
# we don't have the Popen handle anymore, so rely on self.pid
if not self.pid:
# no pid, not running
self.clear_state()
return 0
# send signal 0 to check if PID exists
# this doesn't work on Windows, but that's okay because we don't support Windows.
alive = yield self._signal(0)
if not alive:
self.clear_state()
return 0
else:
return None
@gen.coroutine
def _signal(self, sig):
try:
os.kill(self.pid, sig)
except OSError as e:
if e.errno == errno.ESRCH:
return False # process is gone
else:
raise
return True # process exists
@gen.coroutine
def stop(self, now=False):
"""stop the subprocess
if `now`, skip waiting for clean shutdown
"""
if not now:
status = yield self.poll()
if status is not None:
return
self.log.debug("Interrupting %i", self.pid)
yield self._signal(signal.SIGINT)
yield self.wait_for_death(self.INTERRUPT_TIMEOUT)
# clean shutdown failed, use TERM
status = yield self.poll()
if status is not None:
return
self.log.debug("Terminating %i", self.pid)
yield self._signal(signal.SIGTERM)
yield self.wait_for_death(self.TERM_TIMEOUT)
# TERM failed, use KILL
status = yield self.poll()
if status is not None:
return
self.log.debug("Killing %i", self.pid)
yield self._signal(signal.SIGKILL)
yield self.wait_for_death(self.KILL_TIMEOUT)
status = yield self.poll()
if status is None:
# it all failed, zombie process
self.log.warning("Process %i never died", self.pid)
| {
"repo_name": "chris-x86-64/rootlessspawner",
"path": "rootlessspawner/spawner.py",
"copies": "1",
"size": "5614",
"license": "bsd-3-clause",
"hash": 8141347024322774000,
"line_mean": 29.8461538462,
"line_max": 99,
"alpha_frac": 0.5774848593,
"autogenerated": false,
"ratio": 4.02725968436155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0027328509634058427,
"num_lines": 182
} |
"""A spec and code to implement the spec.
The spec defines a generic mapper for types to other types,
which can be used for validation, code generation and more.
NOTE: The below imports are always wrapped in a try/catch block
to ensure that users can optionally use/install one or more without having
to install them all.
"""
from decimal import Decimal
from datetime import date
from datetime import datetime
try:
# Py3 only.
from enum import Enum
except ImportError:
Enum = None
try:
# Py3 uses unicode for str.
unicode
except NameError:
unicode = str
vanilla = {
# Numbers
'int': int,
'integer': int,
'decimal': Decimal,
'float': float,
'number': int,
'num': int,
'int32': int,
'int64': int,
'uint32': int,
'uint64': int,
'double': float,
'long': float,
# Strings
'str': str,
'string': str,
'unicode': unicode,
# Booleans
'true': bool,
'false': bool,
'bool': bool,
# Dates
'time': datetime,
'datetime': datetime,
'date': date,
# Binary
'file': bytes,
'blob': bytes,
'binary': bytes,
'bytes': bytes,
# Password
'password': str,
# Multi-choice
'enum': Enum,
# Configs/serialized formats - TODO: how best to map these.
'pickle': None,
'pkl': None,
'json': None,
'yaml': None,
# Data structures - list
'list': list,
'array': list,
# Data structures - dictionary
'dict': dict,
'dictionary': dict,
# Data structures - tuple
'tuple': tuple,
}
try:
import sqlalchemy
sqlalchemy_mapper = {
# Numbers
'int': sqlalchemy.Integer,
'integer': sqlalchemy.Integer,
'decimal': sqlalchemy.Float,
'float': sqlalchemy.Float,
'number': sqlalchemy.Integer,
'num': sqlalchemy.Integer,
'int32': sqlalchemy.SmallInteger,
'int64': sqlalchemy.BigInteger,
'uint32': sqlalchemy.SmallInteger,
'uint64': sqlalchemy.BigInteger,
'double': sqlalchemy.Float,
'long': sqlalchemy.BigInteger,
# Strings
'str': sqlalchemy.String,
'string': sqlalchemy.String,
'unicode': sqlalchemy.Unicode,
# Booleans
'true': sqlalchemy.Boolean,
'false': sqlalchemy.Boolean,
'bool': sqlalchemy.Boolean,
# Dates
'time': sqlalchemy.Time,
'datetime': sqlalchemy.DateTime,
'date': sqlalchemy.Date,
# Binary
'file': sqlalchemy.BLOB,
'blob': sqlalchemy.BLOB,
'binary': sqlalchemy.Binary,
'bytes': sqlalchemy.Binary,
# Password
'password': sqlalchemy.String,
# Multi-choice
'enum': sqlalchemy.Enum,
# Configs/serialized formats
'pickle': sqlalchemy.PickleType,
'pkl': sqlalchemy.PickleType,
'json': sqlalchemy.types.JSON,
'yaml': None,
# Data structures - list
'list': sqlalchemy.types.ARRAY,
'array': sqlalchemy.types.ARRAY,
# Data structures - dictionary
'dict': None,
'dictionary': None,
# Data structures - tuple
'tuple': None,
}
except ImportError:
sqlalchemy_mapper = {}
try:
import factory.fuzzy
factoryboy_mapper = {
# Numbers
'int': factory.fuzzy.FuzzyInteger,
'integer': factory.fuzzy.FuzzyInteger,
'decimal': factory.fuzzy.FuzzyDecimal,
'float': factory.fuzzy.FuzzyFloat,
'number': factory.fuzzy.FuzzyInteger,
'num': factory.fuzzy.FuzzyInteger,
'int32': factory.fuzzy.FuzzyInteger,
'int64': factory.fuzzy.FuzzyInteger,
'uint32': factory.fuzzy.FuzzyInteger,
'uint64': factory.fuzzy.FuzzyInteger,
'double': factory.fuzzy.FuzzyFloat,
'long': factory.fuzzy.FuzzyFloat,
# Strings
'str': factory.fuzzy.FuzzyText,
'string': factory.fuzzy.FuzzyText,
'unicode': factory.fuzzy.FuzzyText,
# Booleans
'true': None,
'false': None,
'bool': None,
# Dates
'time': factory.fuzzy.FuzzyDateTime,
'datetime': factory.fuzzy.FuzzyDateTime,
'date': factory.fuzzy.FuzzyDate,
# Binary
'file': None,
'blob': None,
'binary': None,
'bytes': None,
# Password
'password': str,
# Multi-choice
'enum': factory.fuzzy.FuzzyChoice,
# Configs/serialized formats
'pickle': None,
'pkl': None,
'json': None,
'yaml': None,
# Data structures - list
'list': factory.fuzzy.FuzzyChoice,
'array': factory.fuzzy.FuzzyChoice,
# Data structures - dictionary
'dict': None,
'dictionary': None,
# Data structures - tuple
'tuple': None,
}
except ImportError:
factoryboy_mapper = {}
try:
import wtforms
wtform_mapper = {
# Numbers
'int': wtforms.IntegerField,
'integer': wtforms.IntegerField,
'decimal': wtforms.DecimalField,
'float': wtforms.DecimalField, # Not using floatfield atm
'number': wtforms.IntegerField,
'num': wtforms.IntegerField,
'int32': wtforms.IntegerField,
'int64': wtforms.IntegerField,
'uint32': wtforms.IntegerField,
'uint64': wtforms.DecimalField,
'double': wtforms.IntegerField,
'long': wtforms.IntegerField,
# Strings
'str': wtforms.TextField,
'string': wtforms.TextField,
'unicode': wtforms.TextField,
# Booleans
'true': wtforms.BooleanField,
'false': wtforms.BooleanField,
'bool': wtforms.BooleanField,
# Dates
'time': wtforms.DateTimeField,
'datetime': wtforms.DateTimeField,
'date': wtforms.DateField,
# Binary
'file': wtforms.FileField,
'blob': wtforms.FileField,
'binary': wtforms.FileField,
'bytes': wtforms.FileField,
# Password
'password': wtforms.PasswordField,
# Multi-choice
'enum': wtforms.SelectField,
# Configs/serialized formats
'pickle': wtforms.TextAreaField,
'pkl': wtforms.TextAreaField,
'json': wtforms.TextAreaField,
'yaml': wtforms.TextAreaField,
# Data structures - list
'list': wtforms.SelectMultipleField,
'array': wtforms.SelectMultipleField,
# Data structures - dictionary
'dict': None,
'dictionary': None,
# Data structures - tuple
'tuple': None,
}
except ImportError:
wtform_mapper = {}
all_maps = {
'vanilla': vanilla,
'wtforms': wtform_mapper,
'sqlalchemy': sqlalchemy_mapper,
'factoryboy': factoryboy_mapper,
}
def get_context_field(fieldtype, context, fallback=None):
"""Given a context and a field, return the matching type.
Args:
fieldtype: The field type.
context: The mapping context
fallback: The name of the fallback string to use (default: {None})
Returns:
The field.
None or callable, depending on the outcome.
Raises:
NotImplementedError: If no type exists
NotImplementedError: If no specified fallback exists
"""
fieldtype = fieldtype.lower()
no_type_error = ('Type: "{}" for "{}" '
'has not been implemented'.format(fieldtype, context))
try:
typ = all_maps[context][fieldtype]
if typ is None:
if fallback is None:
raise NotImplementedError(no_type_error)
typ = all_maps[context][fallback]
if typ is None:
raise NotImplementedError(no_type_error)
return typ
except KeyError:
return None
| {
"repo_name": "christabor/type_mappers",
"path": "type_mappers/__init__.py",
"copies": "1",
"size": "7832",
"license": "mit",
"hash": 54866169894168890,
"line_mean": 23.3987538941,
"line_max": 75,
"alpha_frac": 0.5820990807,
"autogenerated": false,
"ratio": 4.087682672233821,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 321
} |
"""A specfile interface.
Used mainly by transformer and tests to represent
a conventional specfile as a structure of sections.
"""
try:
from functools import reduce
except ImportError:
pass # we're on Python 2 => ok
import re
from spec2scl import settings
class Specfile(object):
"""An specfile interface."""
def __init__(self, specfile):
if not isinstance(specfile, str):
specfile = ''.join(specfile)
self.specfile = specfile
self.sections = self.split_sections()
def split_sections(self):
"""Split a specfile into sections.
Return:
list of (section name, section text)
"""
headers_re = [re.compile('^' + x + '\\b', re.M) for x in settings.SPECFILE_SECTIONS]
section_starts = []
for header in headers_re:
for match in header.finditer(self.specfile):
section_starts.append(match.start())
section_starts.sort()
# this is mainly for tests - if the header is the only section
header_end = section_starts[0] if section_starts else len(self.specfile)
sections = [('%header', self.specfile[:header_end])]
for i in range(len(section_starts)):
if len(section_starts) > i + 1:
curr_section = self.specfile[section_starts[i]: section_starts[i + 1]]
else:
curr_section = self.specfile[section_starts[i]:]
for header in headers_re:
if header.match(curr_section):
sections.append((header.pattern[1:-2], curr_section))
return sections
def __contains__(self, what):
return reduce(lambda x, y: x or (what in y[1]), self.sections, False)
def __str__(self):
# in tests (maybe in reality, too), we may have an empty header, which will result in
# putting unnecessary newlines on top => leave out empty sections from joining
return '\n\n'.join([section for section in list(zip(*self.sections))[1] if section])
| {
"repo_name": "sclorg/spec2scl",
"path": "spec2scl/specfile.py",
"copies": "2",
"size": "2041",
"license": "mit",
"hash": 8684009288473769000,
"line_mean": 33.0166666667,
"line_max": 93,
"alpha_frac": 0.6080352768,
"autogenerated": false,
"ratio": 4.041584158415842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001124630982931643,
"num_lines": 60
} |
"""A special directive for including a matplotlib plot.
Given a path to a .py file, it includes the source code inline, then:
- On HTML, will include a .png with a link to a high-res .png.
- On LaTeX, will include a .pdf
This directive supports all of the options of the `image` directive,
except for `target` (since plot will add its own target).
Additionally, if the :include-source: option is provided, the literal
source will be included inline, as well as a link to the source.
"""
import sys, os, glob, shutil
from docutils.parsers.rst import directives
try:
# docutils 0.4
from docutils.parsers.rst.directives.images import align
except ImportError:
# docutils 0.5
from docutils.parsers.rst.directives.images import Image
align = Image.align
import matplotlib
import IPython.Shell
matplotlib.use('Agg')
import matplotlib.pyplot as plt
mplshell = IPython.Shell.MatplotlibShell('mpl')
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': align,
'class': directives.class_option,
'include-source': directives.flag }
template = """
.. htmlonly::
[`source code <../%(srcdir)s/%(basename)s.py>`__,
`png <../%(srcdir)s/%(basename)s.hires.png>`__,
`pdf <../%(srcdir)s/%(basename)s.pdf>`__]
.. image:: ../%(srcdir)s/%(basename)s.png
%(options)s
.. latexonly::
.. image:: ../%(srcdir)s/%(basename)s.pdf
%(options)s
"""
def makefig(fullpath, outdir):
"""
run a pyplot script and save the low and high res PNGs and a PDF in _static
"""
fullpath = str(fullpath) # todo, why is unicode breaking this
formats = [('png', 100),
('hires.png', 200),
('pdf', 72),
]
basedir, fname = os.path.split(fullpath)
basename, ext = os.path.splitext(fname)
all_exists = True
if basedir != outdir:
shutil.copyfile(fullpath, os.path.join(outdir, fname))
for format, dpi in formats:
outname = os.path.join(outdir, '%s.%s' % (basename, format))
if not os.path.exists(outname):
all_exists = False
break
if all_exists:
print ' already have %s'%fullpath
return
print ' building %s'%fullpath
plt.close('all') # we need to clear between runs
matplotlib.rcdefaults()
mplshell.magic_run(fullpath)
for format, dpi in formats:
outname = os.path.join(outdir, '%s.%s' % (basename, format))
if os.path.exists(outname): continue
plt.savefig(outname, dpi=dpi)
def run(arguments, options, state_machine, lineno):
reference = directives.uri(arguments[0])
basedir, fname = os.path.split(reference)
basename, ext = os.path.splitext(fname)
# todo - should we be using the _static dir for the outdir, I am
# not sure we want to corrupt that dir with autogenerated files
# since it also has permanent files in it which makes it difficult
# to clean (save an rm -rf followed by and svn up)
srcdir = 'pyplots'
makefig(os.path.join(srcdir, reference), srcdir)
# todo: it is not great design to assume the makefile is putting
# the figs into the right place, so we may want to do that here instead.
if options.has_key('include-source'):
lines = ['.. literalinclude:: ../pyplots/%(reference)s' % locals()]
del options['include-source']
else:
lines = []
options = [' :%s: %s' % (key, val) for key, val in
options.items()]
options = "\n".join(options)
lines.extend((template % locals()).split('\n'))
state_machine.insert_input(
lines, state_machine.input_lines.source(0))
return []
try:
from docutils.parsers.rst import Directive
except ImportError:
from docutils.parsers.rst.directives import _directives
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, options, state_machine, lineno)
plot_directive.__doc__ = __doc__
plot_directive.arguments = (1, 0, 1)
plot_directive.options = options
_directives['plot'] = plot_directive
else:
class plot_directive(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = options
def run(self):
return run(self.arguments, self.options,
self.state_machine, self.lineno)
plot_directive.__doc__ = __doc__
directives.register_directive('plot', plot_directive)
| {
"repo_name": "FrankBian/kuma",
"path": "vendor/packages/ipython/docs/sphinxext/plot_directive.py",
"copies": "5",
"size": "4718",
"license": "mpl-2.0",
"hash": -1913020632091823000,
"line_mean": 29.4387096774,
"line_max": 79,
"alpha_frac": 0.637134379,
"autogenerated": false,
"ratio": 3.8264395782643956,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6963573957264395,
"avg_score": null,
"num_lines": null
} |
"""A specialization of RO.Wdg.StripChart that adds methods to trace keyVars
History:
2010-10-01 ROwen
2010-12-23 Backward-incompatible changes:
- Modified for backward-incompatible RO.Wdg.StripChartWdg
- plotKeyVar no longer takes a "name" argument; use label if you want a name that shows up in legends.
2012-05-31 Return line from plotKeyVar.
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
"""
import RO.Wdg.StripChartWdg
TimeConverter = RO.Wdg.StripChartWdg.TimeConverter
class StripChartWdg(RO.Wdg.StripChartWdg.StripChartWdg):
def plotKeyVar(self, subplotInd, keyVar, keyInd=0, func=None, **kargs):
"""Plot one value of one keyVar
Inputs:
- subplotInd: index of line on Subplot
- keyVar: keyword variable to plot
- keyInd: index of keyword variable to plot
- func: function to transform the value; note that func will never receive None;
if func is None then the data is not transformed
**kargs: keyword arguments for StripChartWdg.addLine
"""
line = self.addLine(subplotInd=subplotInd, **kargs)
if func is None:
func = lambda x: x
def callFunc(keyVar, line=line, keyInd=keyInd, func=func):
if not keyVar.isCurrent or not keyVar.isGenuine:
return
val = keyVar[keyInd]
if val is None:
return
line.addPoint(func(val))
keyVar.addCallback(callFunc, callNow=False)
return line
| {
"repo_name": "r-owen/stui",
"path": "TUI/Base/StripChartWdg.py",
"copies": "1",
"size": "1617",
"license": "bsd-3-clause",
"hash": 7166198364515294000,
"line_mean": 38.4390243902,
"line_max": 114,
"alpha_frac": 0.6406926407,
"autogenerated": false,
"ratio": 3.887019230769231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.502771187146923,
"avg_score": null,
"num_lines": null
} |
"""A specialization of RO.Wdg.StripChart that adds methods to trace keyVars
History:
2010-10-01 ROwen
2010-12-23 Backward-incompatible changes:
- Modified for backward-incompatible RO.Wdg.StripChartWdg
- plotKeyVar no longer takes a "name" argument; use label if you want a name that shows up in legends.
"""
import RO.Wdg.StripChartWdg
TimeConverter = RO.Wdg.StripChartWdg.TimeConverter
class StripChartWdg(RO.Wdg.StripChartWdg.StripChartWdg):
def plotKeyVar(self, subplotInd, keyVar, keyInd=0, func=None, **kargs):
"""Plot one value of one keyVar
Inputs:
- subplotInd: index of line on Subplot
- keyVar: keyword variable to plot
- keyInd: index of keyword variable to plot
- func: function to transform the value; note that func will never receive None;
if func is None then the data is not transformed
**kargs: keyword arguments for StripChartWdg.addLine
"""
line = self.addLine(subplotInd=subplotInd, **kargs)
if func is None:
func = lambda x: x
def callFunc(valList, isCurrent, keyVar=None, line=line, keyInd=keyInd, func=func):
if not isCurrent or not keyVar.isGenuine():
return
val = valList[keyInd]
if val is None:
return
line.addPoint(func(val))
keyVar.addCallback(callFunc, callNow=False)
| {
"repo_name": "r-owen/TUI",
"path": "TUI/Base/StripChartWdg.py",
"copies": "1",
"size": "1467",
"license": "bsd-3-clause",
"hash": 8897508130497432000,
"line_mean": 37.6052631579,
"line_max": 114,
"alpha_frac": 0.6387184731,
"autogenerated": false,
"ratio": 4.019178082191781,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.515789655529178,
"avg_score": null,
"num_lines": null
} |
"""A specialized report"""
from io import StringIO
import datetime
from pyiem.util import get_dbconn
def averageTemp(db, hi="high", lo="low"):
"""Average Temp"""
highSum, lowSum = 0, 0
for day in db.keys():
highSum += db[day][hi]
lowSum += db[day][lo]
highAvg = highSum / float(len(db))
lowAvg = lowSum / float(len(db))
return highAvg, lowAvg
def hdd(db, hi="high", lo="low"):
"""Compute heating degree days"""
dd = 0
for day in db:
h = db[day][hi]
low = db[day][lo]
a = (h + low) / 2.00
if a < 65:
dd += 65.0 - a
return dd
def cdd(db, hi="high", lo="low"):
"""Cooling Degree Days"""
dd = 0
for day in db:
h = db[day][hi]
low = db[day][lo]
a = (h + low) / 2.00
if a > 65:
dd += a - 65.0
return dd
def application(_environ, start_response):
"""Go Main Go"""
COOP = get_dbconn("coop")
ccursor = COOP.cursor()
IEM = get_dbconn("iem")
icursor = IEM.cursor()
ASOS = get_dbconn("asos")
acursor = ASOS.cursor()
ADJUSTMENT = 0
now = datetime.datetime.now()
e = now.replace(day=17)
s = (e - datetime.timedelta(days=31)).replace(day=18)
db = {}
now = s
while now <= e:
db[now.strftime("%m%d")] = {
"high": "M",
"low": "M",
"avg_high": "M",
"avg_low": "M",
}
now += datetime.timedelta(days=1)
# Get Sioux City data
icursor.execute(
"""SELECT day, max_tmpf, min_tmpf from
summary s JOIN stations t ON (t.iemid = s.iemid)
WHERE t.id = 'SUX' and day >= '%s' and
day <= '%s' """
% (s.strftime("%Y-%m-%d"), e.strftime("%Y-%m-%d"))
)
for row in icursor:
db[row[0].strftime("%m%d")]["high"] = row[1] + ADJUSTMENT
db[row[0].strftime("%m%d")]["low"] = row[2] + ADJUSTMENT
# Lemars
ccursor.execute(
"""SELECT high, low, valid from climate
WHERE station = 'IA4735'"""
)
for row in ccursor:
if row[2].strftime("%m%d") not in db:
continue
db[row[2].strftime("%m%d")]["avg_high"] = row[0]
db[row[2].strftime("%m%d")]["avg_low"] = row[1]
# Compute Average wind speed
acursor.execute(
"""
SELECT station, avg(sknt) from alldata where station in ('SHL', 'ORC')
and valid BETWEEN '%s' and '%s' and sknt >= 0
GROUP by station ORDER by station DESC
"""
% (s.strftime("%Y-%m-%d %H:%M"), e.strftime("%Y-%m-%d %H:%M"))
)
row = acursor.fetchone()
awind = row[1]
headers = [("Content-type", "text/plain")]
start_response("200 OK", headers)
sio = StringIO()
sio.write(" Orange City Climate Summary\n")
sio.write("%15s %6s %6s\n" % ("DATE", "HIGH", "LOW"))
now = s
while now <= e:
sio.write(
("%15s %6i %6i %6i %6i\n")
% (
now.strftime("%Y-%m-%d"),
db[now.strftime("%m%d")]["high"],
db[now.strftime("%m%d")]["low"],
db[now.strftime("%m%d")]["avg_high"],
db[now.strftime("%m%d")]["avg_low"],
)
)
now += datetime.timedelta(days=1)
h, low = averageTemp(db)
ch, cl = averageTemp(db, "avg_high", "avg_low")
l_hdd = hdd(db)
c_hdd = hdd(db, "avg_high", "avg_low")
l_cdd = cdd(db)
c_cdd = cdd(db, "avg_high", "avg_low")
sio.write(
"""
Summary Information [%s - %s]
-------------------
Observed | Climate | Diff
High %4.1f %4.1f %4.1f
Low %4.1f %4.1f %4.1f
HDD(base65) %4.0f %4.0f %4.0f
CDD(base65) %4.0f %4.0f %4.0f
Wind[MPH] %4.1f M M
"""
% (
s.strftime("%d %B %Y"),
e.strftime("%d %B %Y"),
h,
ch,
h - ch,
low,
cl,
low - cl,
l_hdd,
c_hdd,
l_hdd - c_hdd,
l_cdd,
c_cdd,
l_cdd - c_cdd,
awind,
)
)
return [sio.getvalue().encode("ascii")]
| {
"repo_name": "akrherz/iem",
"path": "cgi-bin/climate/orc.py",
"copies": "1",
"size": "4250",
"license": "mit",
"hash": 5682048764897902000,
"line_mean": 25.397515528,
"line_max": 76,
"alpha_frac": 0.4567058824,
"autogenerated": false,
"ratio": 3.0422333571939872,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8998939239593987,
"avg_score": 0,
"num_lines": 161
} |
""" A special-purpose statusbar for Kura """
False = 0
True = 1
import os.path
from qt import QStatusBar, QLabel, QSizePolicy, QFont
from kuragui.guiconfig import guiConf
from kuragui import guiconfig
from kuralib import kuraapp
class KuraPanel(QLabel):
def __init__(self, *args):
QLabel.__init__(self, *args)
self.setFrameShape(QLabel.Panel)
self.setSizePolicy(QSizePolicy(1, 0,
self.sizePolicy().hasHeightForWidth()))
self.setFont(QFont(guiConf.widgetfontfamily, guiConf.widgetfontsize))
def setText(self, text):
QLabel.setText(self, " " + text + " " )
class KuraStatusBar(QStatusBar):
def __init__(self, *args):
QStatusBar.__init__(self, *args)
self.lblConnection = KuraPanel(self)
self.addWidget(self.lblConnection, 0, False)
## self.lblUser=KuraPanel(self)
## self.addWidget(self.lblUser, 0, False)
## self.lblProject=KuraPanel(self)
## self.addWidget(self.lblProject, 0, False)
## self.lblLanguage=KuraPanel(self)
## self.addWidget(self.lblLanguage, 0, False)
self.reset()
def reset(self):
if guiConf.backend == guiconfig.FILE:
self.lblConnection.setText(os.path.join(guiConf.filepath,
guiConf.datastore))
else:
self.lblConnection.setText(guiConf.username + "@" +
guiConf.hostname + ":" +
guiConf.database)
## self.setUserLabel(guiConf.usernr)
## self.setProjectLabel(guiConf.projectnr)
## self.setlanguageLabel(guiConf.languagenr)
def setUserLabel(self, usernr):
try:
r = kuraapp.app.getObject("lng_user", usernr = usernr)
username = r.getFieldValue("name")
except:
username="No current user"
self.lblUser.setText(username)
def setProjectLabel(self, projectnr):
try:
r = kuraapp.app.getObject("lng_project", projectnr = projectnr)
project = r.getFieldValue("description")
except:
project="No current project"
self.lblProject.setText(project)
def setlanguageLabel(self, languagenr):
try:
language="Language: %s" % \
kuraapp.app.getObject("lng_language",
languagenr=languagenr).getFieldValue("language")
except:
language="No current language"
self.lblLanguage.setText(language)
__copyright__="""
copyright : (C) 2002 by Boudewijn Rempt
see copyright notice for license
email : boud@valdyas.org
"""
__revision__="""$Revision: 1.4 $"""[11:-2]
| {
"repo_name": "boudewijnrempt/kura",
"path": "kuraclient/kurastatusbar.py",
"copies": "1",
"size": "2886",
"license": "bsd-2-clause",
"hash": -6744461611099576000,
"line_mean": 30.0322580645,
"line_max": 89,
"alpha_frac": 0.5637560638,
"autogenerated": false,
"ratio": 3.9,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9820933886881635,
"avg_score": 0.028564435383672853,
"num_lines": 93
} |
# Import system modules
from __future__ import absolute_import
import arcpy
import math
import sys
from arcpy.sa import Aspect, Sin, Cos
# local imports
from . import utils
from . import config
# Check out any necessary licenses
arcpy.CheckOutExtension("Spatial")
def main(bathy=None, out_sin_raster=None, out_cos_raster=None):
"""
Calculate the statistical aspect of a raster, which
computes the sin(aspect) and cos(aspect). By using these two
variables, aspect can be accounted for as a continuous circular
variable. Because aspect is circular (0 and 359.9 are immediately
adjacent), this trigonometric transformation preserves distances
between elements and is the simplest transformation mechanism.
"""
try:
arcpy.env.compression = "LZW"
arcpy.env.rasterStatistics = "STATISTICS"
# Calculate the aspect of the bathymetric raster. "Aspect is expressed
# in positive degrees from 0 to 359.9, measured clockwise from north."
utils.msg("Calculating aspect...")
aspect = Aspect(bathy)
# Both the sin and cos functions here expect radians, not degrees.
# convert our Aspect raster into radians, check that the values
# are in range.
aspect_rad = aspect * (math.pi / 180)
aspect_sin = Sin(aspect_rad)
aspect_cos = Cos(aspect_rad)
out_sin_raster = utils.validate_path(out_sin_raster)
out_cos_raster = utils.validate_path(out_cos_raster)
arcpy.CopyRaster_management(aspect_sin, out_sin_raster)
arcpy.CopyRaster_management(aspect_cos, out_cos_raster)
except Exception as e:
utils.msg(e, mtype='error')
# When executing as a standalone script get parameters from sys
if __name__ == '__main__':
config.mode = 'script'
main(bathy=sys.argv[1],
out_sin_raster=sys.argv[2],
out_cos_raster=sys.argv[3])
| {
"repo_name": "EsriOceans/btm",
"path": "Install/toolbox/scripts/aspect.py",
"copies": "1",
"size": "2017",
"license": "mpl-2.0",
"hash": 7761680052133755000,
"line_mean": 32.775862069,
"line_max": 78,
"alpha_frac": 0.6643529995,
"autogenerated": false,
"ratio": 3.7771535580524342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4941506557552434,
"avg_score": null,
"num_lines": null
} |
"""A speedometer to measure the speed of a ball."""
from mpf.core.system_wide_device import SystemWideDevice
class Speedometer(SystemWideDevice):
"""A device which represents a tachometer."""
config_section = 'speedometers'
collection = 'speedometers'
class_label = 'speedometer'
__slots__ = ["time_start"]
def __init__(self, machine, name):
"""Initialise speedometer."""
super().__init__(machine, name)
self.time_start = None
async def device_added_system_wide(self):
"""Register switch handlers on load."""
await super().device_added_system_wide()
self.machine.switch_controller.add_switch_handler_obj(
self.config['start_switch'], self._handle_start_switch, 1)
self.machine.switch_controller.add_switch_handler_obj(
self.config['stop_switch'], self._handle_stop_switch, 1)
def _handle_start_switch(self, **kwargs):
del kwargs
self.time_start = self.config['start_switch'].last_change
def _handle_stop_switch(self, **kwargs):
del kwargs
if self.time_start is not None:
delta = self.config['stop_switch'].last_change - self.time_start
self.time_start = None
print(delta)
# TODO: post event
| {
"repo_name": "missionpinball/mpf",
"path": "mpf/devices/speedometer.py",
"copies": "1",
"size": "1298",
"license": "mit",
"hash": 5540917065144658000,
"line_mean": 31.45,
"line_max": 76,
"alpha_frac": 0.626348228,
"autogenerated": false,
"ratio": 3.851632047477745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49779802754777447,
"avg_score": null,
"num_lines": null
} |
'''A Sphinx extension to automatically document CKAN's crazy plugins toolkit,
autodoc-style.
Sphinx's autodoc extension can document modules or classes, but although it
masquerades as a module CKAN's plugins toolkit is actually neither a module nor
a class, it's an object-instance of a class, and it's an object with weird
__getattr__ behavior too. Autodoc can't handle it, so we have this custom
Sphinx extension to automate documenting it instead.
This extension plugs into the reading phase of the Sphinx build. It intercepts
the 'toolkit' document (extensions/plugins-toolkit.rst) after Sphinx has read
the reStructuredText source from file. It modifies the source, adding in Sphinx
directives for everything in the plugins toolkit, and then the Sphinx build
continues as normal (just as if the generated reStructuredText had been entered
into plugins-toolkit.rst manually before running Sphinx).
'''
import types
import inspect
import ckan.plugins.toolkit as toolkit
def setup(app):
'''Setup this Sphinx extension. Called once when initializing Sphinx.
'''
# Connect to Sphinx's source-read event, the callback function will be
# called after each source file is read.
app.connect('source-read', source_read)
def format_function(name, function, docstring=None):
'''Return a Sphinx .. function:: directive for the given function.
The directive includes the function's docstring if it has one.
:param name: the name to give to the function in the directive,
eg. 'get_converter'
:type name: string
:param function: the function itself
:type function: function
:param docstring: if given, use this instead of introspecting the function
to find its actual docstring
:type docstring: string
:returns: a Sphinx .. function:: directive for the function
:rtype: string
'''
# The template we'll use to render the Sphinx function directive.
template = ('.. py:function:: ckan.plugins.toolkit.{function}{args}\n'
'\n'
'{docstring}\n'
'\n')
# Get the arguments of the function, as a string like:
# "(foo, bar=None, ...)"
argstring = inspect.formatargspec(*inspect.getargspec(function))
docstring = docstring or inspect.getdoc(function)
if docstring is None:
docstring = ''
else:
# Indent the docstring by 3 spaces, as needed for the Sphinx directive.
docstring = '\n'.join([' ' + line for line in docstring.split('\n')])
return template.format(function=name, args=argstring, docstring=docstring)
def format_class(name, class_, docstring=None):
'''Return a Sphinx .. class:: directive for the given class.
The directive includes the class's docstring if it has one.
:param name: the name to give to the class in the directive,
eg. 'DefaultDatasetForm'
:type name: string
:param class_: the class itself
:type class_: class
:param docstring: if given, use this instead of introspecting the class
to find its actual docstring
:type docstring: string
:returns: a Sphinx .. class:: directive for the class
:rtype: string
'''
# The template we'll use to render the Sphinx class directive.
template = ('.. py:class:: ckan.plugins.toolkit.{cls}\n'
'\n'
'{docstring}\n'
'\n')
docstring = docstring or inspect.getdoc(class_)
if docstring is None:
docstring = ''
else:
# Indent the docstring by 3 spaces, as needed for the Sphinx directive.
docstring = '\n'.join([' ' + line for line in docstring.split('\n')])
return template.format(cls=name, docstring=docstring)
def format_object(name, object_, docstring=None):
'''Return a Sphinx .. attribute:: directive for the given object.
The directive includes the object's class's docstring if it has one.
:param name: the name to give to the object in the directive,
eg. 'request'
:type name: string
:param object_: the object itself
:type object_: object
:param docstring: if given, use this instead of introspecting the object
to find its actual docstring
:type docstring: string
:returns: a Sphinx .. attribute:: directive for the object
:rtype: string
'''
# The template we'll use to render the Sphinx attribute directive.
template = ('.. py:attribute:: ckan.plugins.toolkit.{obj}\n'
'\n'
'{docstring}\n'
'\n')
docstring = docstring or inspect.getdoc(object_)
if docstring is None:
docstring = ''
else:
# Indent the docstring by 3 spaces, as needed for the Sphinx directive.
docstring = '\n'.join([' ' + line for line in docstring.split('\n')])
return template.format(obj=name, docstring=docstring)
def source_read(app, docname, source):
'''Transform the contents of plugins-toolkit.rst to contain reference docs.
'''
# We're only interested in the 'plugins-toolkit' doc (plugins-toolkit.rst).
if docname != 'extensions/plugins-toolkit':
return
source_ = ''
for name, thing in inspect.getmembers(toolkit):
# The plugins toolkit can override the docstrings of some of its
# members (e.g. things that are imported from third-party libraries)
# by putting custom docstrings in this docstring_overrides dict.
custom_docstring = toolkit.docstring_overrides.get(name)
if inspect.isfunction(thing):
source_ += format_function(name, thing, docstring=custom_docstring)
elif inspect.ismethod(thing):
# We document plugins toolkit methods as if they're functions. This
# is correct because the class ckan.plugins.toolkit._Toolkit
# actually masquerades as a module ckan.plugins.toolkit, and you
# call its methods as if they were functions.
source_ += format_function(name, thing, docstring=custom_docstring)
elif inspect.isclass(thing):
source_ += format_class(name, thing, docstring=custom_docstring)
elif isinstance(thing, types.ObjectType):
source_ += format_object(name, thing, docstring=custom_docstring)
else:
assert False, ("Someone added {name}:{thing} to the plugins "
"toolkit and this Sphinx extension doesn't know "
"how to document that yet. If you're that someone, "
"you need to add a new format_*() function for it "
"here or the docs won't build.".format(
name=name, thing=thing))
source[0] += source_
# This is useful for debugging the generated RST.
#open('/tmp/source', 'w').write(source[0])
| {
"repo_name": "WilJoey/tn_ckan",
"path": "ckan/plugins/toolkit_sphinx_extension.py",
"copies": "1",
"size": "6849",
"license": "mit",
"hash": 5073529053988026000,
"line_mean": 36.0216216216,
"line_max": 79,
"alpha_frac": 0.6579062637,
"autogenerated": false,
"ratio": 4.413015463917525,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00011500862564692352,
"num_lines": 185
} |
"""A sphnix-doc plugin to build mxnet docs"""
import subprocess
import re
import os
import json
from recommonmark import transform
import pypandoc
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '#'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = root_path + '/docs/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package/core/src/main/scala/ml/dmlc/mxnet'
# scaldoc fails on some apis, so exit 0 to pass the check
_run_cmd('cd ' + scala_path + '; scaladoc `find . | grep .*scala`; exit 0')
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path)
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lang, lines):
cur_block = []
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code and cur_lang != lang:
in_code = False
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, cur_block)
cur_block = []
cur_block.append(l)
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, cur_block)
def _get_jupyter_notebook(lang, lines):
cells = []
for in_code, lines in _get_blocks(lang, lines):
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": '\n'.join(lines)
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix + '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
f = ipynb.split('/')[-1]
btn += '<div class="download_btn"><a href="%s" download="%s">' \
'<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# then add lang buttons
for k,l in enumerate(lines):
if _LANG_SELECTION_MARK in l:
lines[k] = _get_lang_selection_btn(langs)
source[i] = '\n'.join(lines)
def setup(app):
app.connect("builder-inited", build_mxnet)
app.connect("builder-inited", generate_doxygen)
app.connect("builder-inited", build_scala_docs)
# skipped to build r, it requires to install latex, which is kinds of too heavy
# app.connect("builder-inited", build_r_docs)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
| {
"repo_name": "pluskid/mxnet",
"path": "docs/mxdoc.py",
"copies": "8",
"size": "9690",
"license": "apache-2.0",
"hash": 8571601984516879000,
"line_mean": 34.1086956522,
"line_max": 111,
"alpha_frac": 0.5034055728,
"autogenerated": false,
"ratio": 3.524918152055293,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0056571682397628656,
"num_lines": 276
} |
"""A sphnix-doc plugin to build mxnet docs"""
import subprocess
import re
import os
import json
import sys
from recommonmark import transform
import pypandoc
import StringIO
import contextlib
# white list to evaluate the code block output, such as ['tutorials/gluon']
_EVAL_WHILTELIST = []
# start or end of a code block
_CODE_MARK = re.compile('^([ ]*)```([\w]*)')
# language names and the according file extensions and comment symbol
_LANGS = {'python' : ('py', '#'),
'r' : ('R','#'),
'scala' : ('scala', '#'),
'julia' : ('jl', '#'),
'perl' : ('pl', '#'),
'cpp' : ('cc', '//'),
'bash' : ('sh', '#')}
_LANG_SELECTION_MARK = 'INSERT SELECTION BUTTONS'
_SRC_DOWNLOAD_MARK = 'INSERT SOURCE DOWNLOAD BUTTONS'
def _run_cmd(cmds):
"""Run commands, raise exception if failed"""
if not isinstance(cmds, str):
cmds = "".join(cmds)
print("Execute \"%s\"" % cmds)
try:
subprocess.check_call(cmds, shell=True)
except subprocess.CalledProcessError as err:
print(err)
raise err
def generate_doxygen(app):
"""Run the doxygen make commands"""
_run_cmd("cd %s/.. && make doxygen" % app.builder.srcdir)
_run_cmd("cp -rf doxygen/html %s/doxygen" % app.builder.outdir)
def build_mxnet(app):
"""Build mxnet .so lib"""
_run_cmd("cd %s/.. && cp make/config.mk config.mk && make -j$(nproc) DEBUG=1" %
app.builder.srcdir)
def build_r_docs(app):
"""build r pdf"""
r_root = app.builder.srcdir + '/../R-package'
pdf_path = root_path + '/docs/api/r/mxnet-r-reference-manual.pdf'
_run_cmd('cd ' + r_root +
'; R -e "roxygen2::roxygenize()"; R CMD Rd2pdf . --no-preview -o ' + pdf_path)
dest_path = app.builder.outdir + '/api/r/'
_run_cmd('mkdir -p ' + dest_path + '; mv ' + pdf_path + ' ' + dest_path)
def build_scala_docs(app):
"""build scala doc and then move the outdir"""
scala_path = app.builder.srcdir + '/../scala-package/core/src/main/scala/ml/dmlc/mxnet'
# scaldoc fails on some apis, so exit 0 to pass the check
_run_cmd('cd ' + scala_path + '; scaladoc `find . | grep .*scala`; exit 0')
dest_path = app.builder.outdir + '/api/scala/docs'
_run_cmd('rm -rf ' + dest_path)
_run_cmd('mkdir -p ' + dest_path)
scaladocs = ['index', 'index.html', 'ml', 'lib', 'index.js', 'package.html']
for doc_file in scaladocs:
_run_cmd('cd ' + scala_path + ' && mv -f ' + doc_file + ' ' + dest_path)
def _convert_md_table_to_rst(table):
"""Convert a markdown table to rst format"""
if len(table) < 3:
return ''
out = '```eval_rst\n.. list-table::\n :header-rows: 1\n\n'
for i,l in enumerate(table):
cols = l.split('|')[1:-1]
if i == 0:
ncol = len(cols)
else:
if len(cols) != ncol:
return ''
if i == 1:
for c in cols:
if len(c) is not 0 and '---' not in c:
return ''
else:
for j,c in enumerate(cols):
out += ' * - ' if j == 0 else ' - '
out += pypandoc.convert_text(
c, 'rst', format='md').replace('\n', ' ').replace('\r', '') + '\n'
out += '```\n'
return out
def convert_table(app, docname, source):
"""Find tables in a markdown and then convert them into the rst format"""
num_tables = 0
for i,j in enumerate(source):
table = []
output = ''
in_table = False
for l in j.split('\n'):
r = l.strip()
if r.startswith('|'):
table.append(r)
in_table = True
else:
if in_table is True:
converted = _convert_md_table_to_rst(table)
if converted is '':
print("Failed to convert the markdown table")
print(table)
else:
num_tables += 1
output += converted
in_table = False
table = []
output += l + '\n'
source[i] = output
if num_tables > 0:
print('Converted %d tables in %s' % (num_tables, docname))
def _parse_code_lines(lines):
"""A iterator that returns if a line is within a code block
Returns
-------
iterator of (str, bool, str, int)
- line: the line
- in_code: if this line is in a code block
- lang: the code block langunage
- indent: the code indent
"""
in_code = False
lang = None
indent = None
for l in lines:
m = _CODE_MARK.match(l)
if m is not None:
if not in_code:
if m.groups()[1].lower() in _LANGS:
lang = m.groups()[1].lower()
indent = len(m.groups()[0])
in_code = True
yield (l, in_code, lang, indent)
else:
yield (l, in_code, lang, indent)
lang = None
indent = None
in_code = False
else:
yield (l, in_code, lang, indent)
def _get_lang_selection_btn(langs):
active = True
btngroup = '<div class="text-center">\n<div class="btn-group opt-group" role="group">'
for l in langs:
btngroup += '<button type="button" class="btn btn-default opt %s">%s</button>\n' % (
'active' if active else '', l[0].upper()+l[1:].lower())
active = False
btngroup += '</div>\n</div> <script type="text/javascript" src="../../_static/js/options.js"></script>'
return btngroup
def _get_blocks(lines):
"""split lines into code and non-code blocks
Returns
-------
iterator of (bool, str, list of str)
- if it is a code block
- source language
- lines of source
"""
cur_block = []
pre_lang = None
pre_in_code = None
for (l, in_code, cur_lang, _) in _parse_code_lines(lines):
if in_code != pre_in_code:
if pre_in_code and len(cur_block) >= 2:
cur_block = cur_block[1:-1] # remove ```
# remove empty lines at head
while len(cur_block) > 0:
if len(cur_block[0]) == 0:
cur_block.pop(0)
else:
break
# remove empty lines at tail
while len(cur_block) > 0:
if len(cur_block[-1]) == 0:
cur_block.pop()
else:
break
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
cur_block = []
cur_block.append(l)
pre_lang = cur_lang
pre_in_code = in_code
if len(cur_block):
yield (pre_in_code, pre_lang, cur_block)
def _get_mk_code_block(src, lang):
"""Return a markdown code block
E.g.
```python
import mxnet
````
"""
if lang is None:
lang = ''
return '```'+lang+'\n'+src.rstrip()+'\n'+'```\n'
@contextlib.contextmanager
def _string_io():
oldout = sys.stdout
olderr = sys.stderr
strio = StringIO.StringIO()
sys.stdout = strio
sys.stderr = strio
yield strio
sys.stdout = oldout
sys.stderr = olderr
def _get_python_block_output(src, global_dict, local_dict):
"""Evaluate python source codes
Returns
(bool, str):
- True if success
- output
"""
src = '\n'.join([l for l in src.split('\n')
if not l.startswith('%') and not 'plt.show()' in l])
ret_status = True
err = ''
with _string_io() as s:
try:
exec(src, global_dict, global_dict)
except Exception as e:
err = str(e)
ret_status = False
return (ret_status, s.getvalue()+err)
def _get_jupyter_notebook(lang, lines):
cells = []
for in_code, blk_lang, lines in _get_blocks(lines):
if blk_lang != lang:
in_code = False
src = '\n'.join(lines)
cell = {
"cell_type": "code" if in_code else "markdown",
"metadata": {},
"source": src
}
if in_code:
cell.update({
"outputs": [],
"execution_count": None,
})
cells.append(cell)
ipynb = {"nbformat" : 4,
"nbformat_minor" : 2,
"metadata" : {"language":lang, "display_name":'', "name":''},
"cells" : cells}
return ipynb
def _get_source(lang, lines):
cmt = _LANGS[lang][1] + ' '
out = []
for in_code, lines in _get_blocks(lang, lines):
if in_code:
out.append('')
for l in lines:
if in_code:
if '%matplotlib' not in l:
out.append(l)
else:
if ('<div>' in l or '</div>' in l or
'<script>' in l or '</script>' in l or
'<!--' in l or '-->' in l or
'%matplotlib' in l ):
continue
out.append(cmt+l)
if in_code:
out.append('')
return out
def _get_src_download_btn(out_prefix, langs, lines):
btn = '<div class="btn-group" role="group">\n'
for lang in langs:
ipynb = out_prefix
if lang == 'python':
ipynb += '.ipynb'
else:
ipynb += '_' + lang + '.ipynb'
with open(ipynb, 'w') as f:
json.dump(_get_jupyter_notebook(lang, lines), f)
f = ipynb.split('/')[-1]
btn += '<div class="download_btn"><a href="%s" download="%s">' \
'<span class="glyphicon glyphicon-download-alt"></span> %s</a></div>' % (f, f, f)
btn += '</div>\n'
return btn
def add_buttons(app, docname, source):
out_prefix = app.builder.outdir + '/' + docname
dirname = os.path.dirname(out_prefix)
if not os.path.exists(dirname):
os.makedirs(dirname)
for i,j in enumerate(source):
local_dict = {}
global_dict = {}
lines = j.split('\n')
langs = set([l for (_, _, l, _) in _parse_code_lines(lines)
if l is not None and l in _LANGS])
# first convert
for k,l in enumerate(lines):
if _SRC_DOWNLOAD_MARK in l:
lines[k] = _get_src_download_btn(
out_prefix, langs, lines)
# # then add lang buttons
# for k,l in enumerate(lines):
# if _LANG_SELECTION_MARK in l:
# lines[k] = _get_lang_selection_btn(langs)
output = ''
for in_code, lang, lines in _get_blocks(lines):
src = '\n'.join(lines)+'\n'
if in_code:
output += _get_mk_code_block(src, lang)
if lang == 'python' and any([w in docname for w in _EVAL_WHILTELIST]):
status, blk_out = _get_python_block_output(src, global_dict, local_dict)
if len(blk_out):
output += '<div class=\"cell-results-header\">Output:</div>\n\n'
output += _get_mk_code_block(blk_out, 'results')
else:
output += src
source[i] = output
# source[i] = '\n'.join(lines)
def setup(app):
app.connect("builder-inited", build_mxnet)
app.connect("builder-inited", generate_doxygen)
app.connect("builder-inited", build_scala_docs)
# skipped to build r, it requires to install latex, which is kinds of too heavy
# app.connect("builder-inited", build_r_docs)
app.connect('source-read', convert_table)
app.connect('source-read', add_buttons)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: 'http://mxnet.io/' + url,
'enable_eval_rst': True,
}, True)
app.add_transform(transform.AutoStructify)
| {
"repo_name": "hesseltuinhof/mxnet",
"path": "docs/mxdoc.py",
"copies": "4",
"size": "11916",
"license": "apache-2.0",
"hash": -354918375070730940,
"line_mean": 32.3781512605,
"line_max": 111,
"alpha_frac": 0.5061262169,
"autogenerated": false,
"ratio": 3.5517138599105813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6057840076810581,
"avg_score": null,
"num_lines": null
} |
# A spider, S, sits in one corner of a cuboid room, measuring
# 6 by 5 by 3, and a fly, F, sits in the opposite corner. By
# travelling on the surfaces of the room the shortest "straight
# line" distance from S to F is 10 and the path is shown on the
# diagram.
# [../img/p086.gif]
# However, there are up to three "shortest" path candidates for
# any given cuboid and the shortest route doesn't always have
# integer length.
# It can be shown that there are exactly 2060 distinct cuboids,
# ignoring rotations, with integer dimensions, up to a maximum
# size of M by M by M, for which the shortest route has integer
# length when M = 100. This is the least value of M for which
# the number of solutions first exceeds two thousand; the number
# of solutions when M = 99 is 1975.
# Find the least value of M such that the number of solutions
# first exceeds one million.
from math import sqrt
M = 0
limit = 1000000
count = 0
while count <= limit:
M += 1
for L in range(2, M * 2 + 1):
tmp = sqrt(M * M + L * L)
if tmp == int(tmp):
count += L / 2 if L <= M else M - (L - 1) / 2
print M
| {
"repo_name": "cloudzfy/euler",
"path": "src/86.py",
"copies": "1",
"size": "1101",
"license": "mit",
"hash": 4113555804418402300,
"line_mean": 29.5833333333,
"line_max": 64,
"alpha_frac": 0.6939146231,
"autogenerated": false,
"ratio": 3.238235294117647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4432149917217647,
"avg_score": null,
"num_lines": null
} |
""" A spiral matrix is a square matrix of size n × n. It contains all the integers in range from 1 to n * n so
that number 1 is written in the bottom right corner, and all other numbers are written in increasing order
spirally in the counterclockwise direction.
Given the size of the matrix n, your task is to create a spiral matrix.
Example
For n = 3, the output should be
createSpiralMatrix(n) = [[5, 4, 3],
[6, 9, 2],
[7, 8, 1]]
"""
def createSpiralMatrix(n):
dirs = [(-1, 0), (0, -1), (1, 0), (0, 1)]
curDir = 0
curPos = (n - 1, n - 1)
res = [[0]*n for _ in range(n)] # this line is what CF asks for
for i in range(1, n * n + 1):
res[curPos[0]][curPos[1]] = i
nextPos = curPos[0] + dirs[curDir][0], curPos[1] + dirs[curDir][1]
if not (0 <= nextPos[0] < n and
0 <= nextPos[1] < n and
res[nextPos[0]][nextPos[1]] == 0):
curDir = (curDir + 1) % 4
nextPos = curPos[0] + dirs[curDir][0], curPos[1] + dirs[curDir][1]
curPos = nextPos
return res
| {
"repo_name": "ntthuy11/CodeFights",
"path": "Arcade/04_Python/05_ComplexityOfComprehension/createSpiralMatrix.py",
"copies": "1",
"size": "1127",
"license": "mit",
"hash": -7177934518818029000,
"line_mean": 36.5333333333,
"line_max": 111,
"alpha_frac": 0.5426287744,
"autogenerated": false,
"ratio": 3.1019283746556474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41445571490556476,
"avg_score": null,
"num_lines": null
} |
"""A Sportszone client.
Given a Sportszone URL, the client will scrape the site for team and scheduling
information.
"""
import collections
import httplib
import time
import urlparse
from lxml import html
Game = collections.namedtuple(
'Game', ['game_datetime', 'arena', 'home_away', 'opponent'])
class SportszoneException(Exception):
"""An exception thrown by the Sportszone client."""
pass
class Sportszone(object):
"""A Sportszone client."""
def __init__(self, base_url, league_id):
"""Creates a new Sportszone client.
Args:
base_url: The base URL.
league_id: The league ID.
"""
self._base_url = base_url
self._league_id = league_id
def get_schedule(self, team_id, season_id):
"""Gets a team schedule from Sportszone for a given season.
Args:
team_id: The team ID.
season_id: The season ID.
Returns:
A list of games posted on a team's schedule.
Raises:
SportszoneException: Raised when there is an error reading the Sportszone
schedule.
"""
parsed = urlparse.urlparse(
'%s?LeagueID=%d&TeamID=%d&SeasonID=%d&Page=Teams&Section=Schedule'
% (self._base_url, self._league_id, team_id, season_id))
if parsed.scheme == 'http':
http = httplib.HTTPConnection(parsed.netloc)
else:
http = httplib.HTTPSConnection(parsed.netloc)
http.request('GET', '%s?%s' % (parsed.path, parsed.query))
response = http.getresponse()
if response.status != 200:
http.close()
raise SportszoneException('Error retreiving page.')
tree = html.fromstring(response.read())
http.close()
result = []
rows = tree.xpath('//table[@class="text11"]/tbody/tr')
for row in rows:
# The expected structure is:
# 0. Game number
# 1. Day
# 2. Date (e.g. May 14, 2015)
# 3. Time (e.g. 8:10 PM)
# 4. Arena
# 5. Home/Away
# 6. Opponent
# 7. Score
# 8. Result
# 9. Boxscore
if len(row) != 10 or row[0][0].text == '#':
continue
game_datetime = time.strptime(
'%s %s' % (row[2].text, row[3].text), '%b %d, %Y %I:%M %p')
arena = row[4][0][0].text
home_away = row[5][0].text
opponent = row[6][0].text
game = Game(game_datetime, arena, home_away, opponent)
result.append(game)
return result
| {
"repo_name": "kjiwa/sportszone-exporter",
"path": "sportszone.py",
"copies": "1",
"size": "2393",
"license": "mit",
"hash": 1515149513282930400,
"line_mean": 24.4574468085,
"line_max": 79,
"alpha_frac": 0.6021730046,
"autogenerated": false,
"ratio": 3.399147727272727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4501320731872727,
"avg_score": null,
"num_lines": null
} |
''' As practice with using classes, I'm going to define a class corresponding to three-vectors
(named, appropriately, ThreeVec).
ThreeVec will support vector addition and subtraction, scalar multiplication and division, dot
products (which, perhaps confusingly, will /also/ be represented by *, so that for three vectors
v1 and v2 and a scalar k, scalar multiplication will be represented as k * v1, and dot products
will be represented as v1 * v2), magnitudes, the creation of unit vectors, component-by-component
comparison, and parity. '''
class ThreeVec(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
# def __str__(self):
# return "<%s, %s, %s>" % (self.x, self.y, self.z)
def __repr__(self):
return str([self.x, self.y, self.z])
def __add__(self, vec2):
if type(vec2) == ThreeVec:
return ThreeVec(self.x + vec2.x, self.y + vec2.y, self.z + vec2.z)
else:
raise TypeError("+ with type 'ThreeVec' supports only operand type 'ThreeVec'")
def __sub__(self, vec2):
if type(vec2) == ThreeVec:
return ThreeVec(self.x - vec2.x, self.y - vec2.y, self.z - vec2.z)
else:
raise TypeError("- with type 'ThreeVec' supports only operand type 'ThreeVec'")
''' An added benefit of the way __add__ and __sub__ are defined is that __radd__ and __rsub__ don't need to be defined; v2 + v1
automatically passes to v2's +/- operations. '''
def __mul__(self, other):
if type(other) == float or type(other) == int:
return ThreeVec(other * self.x, other * self.y, other * self.z)
elif type(other) == ThreeVec:
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
else:
raise TypeError("* with type 'ThreeVec' supports only operand types 'float', 'int', and 'ThreeVec'")
def __rmul__(self, other):
if type(other) == float or type(other) == int:
return ThreeVec(other * self.x, other * self.y, other * self.z)
elif type(other) == ThreeVec:
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
else:
raise TypeError("* with type 'ThreeVec' supports only operand types 'float', 'int', and 'ThreeVec'")
def __div__(self, other):
if type(other) == float or type(other) == int:
return ThreeVec(self.x/other, self.y/other, self.z/other)
else:
raise TypeError("/ with type 'ThreeVec' supports only operand types 'float' and 'int'")
def __truediv__(self, other):
if type(other) == float or type(other) == int:
return ThreeVec(self.x/other, self.y/other, self.z/other)
else:
raise TypeError("/ with type 'ThreeVec' supports only operand types 'float' and 'int'")
def __xor__(self, vec2):
if type(vec2) == ThreeVec:
return ThreeVec((self.y * vec2.z) - (self.z * vec2.y), (self.z * vec2.x) - (self.x * vec2.z), (self.x * vec2.y) - (self.y * vec2.x))
else:
raise TypeError("^ with type 'ThreeVec' supports only operand type 'ThreeVec'")
# As with __add__ and __sub__, by construction, we don't need to define __rxor__.
def __abs__(self):
return self.mag()
# Hopefully, by defining __abs__(self) as just self.mag() I'm not being willfully perverse. =c
def __lt__(self, other):
raise TypeError("unsupported operand type for '<': 'ThreeVec'")
def __le__(self, other):
raise TypeError("unsupported operand type for '<=': 'ThreeVec'")
def __eq__(self, other):
raise TypeError("unsupported operand type for '==': 'ThreeVec'")
def __ne__(self, other):
raise TypeError("unsupported operand type for '!=': 'ThreeVec'")
def __gt__(self, other):
raise TypeError("unsupported operand type for '>': 'ThreeVec'")
def __ge__(self, other):
raise TypeError("unsupported operand type for '>=': 'ThreeVec'")
def unit(self):
mag = self.mag()
return ThreeVec(self.x/mag, self.y/mag, self.z/mag)
def equal_components(self, vec2):
if type(vec2) == ThreeVec:
if self.x == vec2.x and self.y == vec2.y and self.z == vec2.z:
return True
else:
return False
else:
raise TypeError("function 'equal_components' requires type 'ThreeVec'")
def parity(self):
return ThreeVec(-self.x, -self.y, -self.z)
E_field = ThreeVec(12, 4, 5)
B_field = ThreeVec (5, 3, 5)
print E_field.__dict__
| {
"repo_name": "k-shukla/misc_exercises-py",
"path": "Shukla - Three Vec.py",
"copies": "1",
"size": "4847",
"license": "mit",
"hash": -1041949717208874100,
"line_mean": 39.7844827586,
"line_max": 144,
"alpha_frac": 0.5597276666,
"autogenerated": false,
"ratio": 3.711332312404288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4771059979004288,
"avg_score": null,
"num_lines": null
} |
# A sprite class derivative that implements integer z values (larger negatives are more distant)
import pyglet
from pyglet.gl import *
from pyglet import graphics
from pyglet import image
from pyglet import sprite
from pyglet import clock
"""
NB. You will need to set up z-buffering and your near and far clipping planes properly.
eg. In your window class:
def __init__(self, etc)
...
glClearDepth(1.0);
...
def on_resize(self, width, height):
# Based on the default with more useful clipping planes
gl.glViewport(0, 0, width, height)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(0, width, 0, height, 0.5, 1000)
gl.glMatrixMode(gl.GL_MODELVIEW)
"""
class ZSpriteGroup(sprite.SpriteGroup):
'''Shared sprite rendering group with z-coordinate support.
The group is automatically coallesced with other sprite groups sharing the
same parent group, texture and blend parameters.
'''
def __init__(self, alpha_test_val, *args):
super(ZSpriteGroup, self).__init__(*args)
# this value is a bit of a hack. Ideally it would be zero, but if you
# have any values that are close to zero, you probably want to catch them too.
self._alpha_test_val = alpha_test_val
def set_state(self):
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
glPushAttrib(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Normal alpha blending mode
glEnable(GL_BLEND)
glBlendFunc(self.blend_src, self.blend_dest)
# ...plus depth testing to allow proper occlusion
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LESS)
# ...and alpha testing so that nearby objects that contain transparent areas
# don't conceal more distant objects behind the transparency
glEnable(GL_ALPHA_TEST)
glAlphaFunc(GL_GREATER, self._alpha_test_val)
# I think that the unset state from the parent class cleans up correctly.
# Bugs will happen if I am wrong.
class ZSprite(pyglet.sprite.Sprite):
def __init__(self,
img, x=0, y=0, z=0,
blend_src=GL_SRC_ALPHA,
blend_dest=GL_ONE_MINUS_SRC_ALPHA,
batch=None,
group=None,
usage='dynamic',
subpixel=False,
alpha_test_val=0.5):
'''Create a sprite with z-coordinate support.
:Parameters:
`img` : `AbstractImage` or `Animation`
Image or animation to display.
`x` : int
X coordinate of the sprite.
`y` : int
Y coordinate of the sprite.
`z` : int
Z coordinate of the sprite.
`blend_src` : int
OpenGL blend source mode. The default is suitable for
compositing sprites drawn from back-to-front.
`blend_dest` : int
OpenGL blend destination mode. The default is suitable for
compositing sprites drawn from back-to-front.
`batch` : `Batch`
Optional batch to add the sprite to.
`group` : `Group`
Optional parent group of the sprite.
`usage` : str
Vertex buffer object usage hint, one of ``"none"`` (default),
``"stream"``, ``"dynamic"`` or ``"static"``. Applies
only to vertex data.
'''
if batch is not None:
self._batch = batch
self._x = x
self._y = y
self._z = z
self._alpha_test_val = alpha_test_val
if isinstance(img, image.Animation):
self._animation = img
self._frame_index = 0
self._texture = img.frames[0].image.get_texture()
self._next_dt = img.frames[0].duration
if self._next_dt:
clock.schedule_once(self._animate, self._next_dt)
else:
self._texture = img.get_texture()
# Must use the ZSpriteGroup to be able to enable depth testing
self._group = ZSpriteGroup(self._alpha_test_val, self._texture, blend_src, blend_dest, group)
self._usage = usage
self._subpixel = subpixel
self._create_vertex_list()
def _set_group(self, group):
if self._group.parent == group:
return
# Use ZSpriteGroup to enable depth testing
self._group = ZSpriteGroup(self._alpha_test_val,
self._texture,
self._group.blend_src,
self._group.blend_dest,
group)
if self._batch is not None:
self._batch.migrate( self._vertex_list, GL_QUADS, self._group,
self._batch)
def _get_group(self):
return self._group.parent
# If we don't reimplement this property, it appears to defer to the
# parent class's implementation of _get_group and _set_group, which is no good
group = property(_get_group, _set_group,
doc='''Parent graphics group.
The sprite can change its rendering group, however this can be an
expensive operation.
:type: `Group`
''')
def _set_texture(self, texture):
# Again, mostly copy and paste from the parent class
if texture.id is not self._texture.id:
self._group = ZSpriteGroup(self._alpha_test_val,
texture,
self._group.blend_src,
self._group.blend_dest,
self._group.parent)
if self._batch is None:
self._vertex_list.tex_coords[:] = texture.tex_coords
else:
self._vertex_list.delete()
self._texture = texture
self._create_vertex_list()
else:
self._vertex_list.tex_coords[:] = texture.tex_coords
self._texture = texture
def _create_vertex_list(self):
# Slightly changed from the parent, in that it passes v3f instead of v2i
if self._subpixel:
vertex_format = 'v3f/%s' % self._usage
else:
vertex_format = 'v3i/%s' % self._usage
if self._batch is None:
self._vertex_list = graphics.vertex_list(4,
vertex_format,
'c4B', ('t3f', self._texture.tex_coords))
else:
self._vertex_list = self._batch.add(4, GL_QUADS, self._group,
vertex_format,
'c4B', ('t3f', self._texture.tex_coords))
self._update_position()
self._update_color()
def _update_position(self):
# Just differs from parent in the addition of extra z vertices
img = self._texture
if not self._visible:
self._vertex_list.vertices[:] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
elif self._rotation:
x1 = -img.anchor_x * self._scale
y1 = -img.anchor_y * self._scale
x2 = x1 + img.width * self._scale
y2 = y1 + img.height * self._scale
x = self._x
y = self._y
z = self._z
r = -math.radians(self._rotation)
cr = math.cos(r)
sr = math.sin(r)
ax = x1 * cr - y1 * sr + x
ay = x1 * sr + y1 * cr + y
bx = x2 * cr - y1 * sr + x
by = x2 * sr + y1 * cr + y
cx = x2 * cr - y2 * sr + x
cy = x2 * sr + y2 * cr + y
dx = x1 * cr - y2 * sr + x
dy = x1 * sr + y2 * cr + y
self._vertex_list.vertices[:] = [ax, ay, z, bx, by, z, cx, cy, z, dx, dy, z]
elif self._scale != 1.0:
x1 = self._x - img.anchor_x * self._scale
y1 = self._y - img.anchor_y * self._scale
x2 = x1 + img.width * self._scale
y2 = y1 + img.height * self._scale
z = self._z
self._vertex_list.vertices[:]= [x1, y1, z, x2, y1, z, x2, y2, z, x1, y2, z]
else:
x1 = self._x - img.anchor_x
y1 = self._y - img.anchor_y
x2 = x1 + img.width
y2 = y1 + img.height
z = self._z
self._vertex_list.vertices[:] = [x1, y1, z, x2, y1, z, x2, y2, z, x1, y2, z]
if not self._subpixel:
self._vertex_list.vertices[:] = [
int(v) for v in self._vertex_list.vertices[:]
]
# self._vertex_list.vertices[:] = vertices
def set_position(self, x, y, z):
'''Set the X, Y, and Z coordinates of the sprite simultaneously.
:Parameters:
`x` : int
X coordinate of the sprite.
`y` : int
Y coordinate of the sprite.
`z` : int
Z coordinate of the sprite.
'''
self._x = x
self._y = y
self._z = z
self._update_position()
position = property(lambda self: (self._x, self._y, self._z),
lambda self, t: self.set_position(*t),
doc='''The (x, y, z) coordinates of the sprite.
:type: (int, int, int)
''')
def _set_z(self, z):
self._z = z
self._update_position()
z = property(lambda self: self._z, _set_z,
doc='''Z coordinate of the sprite.
:type: int
''')
| {
"repo_name": "NiclasEriksen/rpg_procgen",
"path": "zsprite.py",
"copies": "1",
"size": "9692",
"license": "cc0-1.0",
"hash": -3163968723735719400,
"line_mean": 34.5054945055,
"line_max": 101,
"alpha_frac": 0.5159925712,
"autogenerated": false,
"ratio": 3.9286582894203486,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9903726847910239,
"avg_score": 0.00818480254202201,
"num_lines": 273
} |
# Asq's setup.py
from distutils.core import setup
import io
import os
import re
from asq import __version__ as version
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the relevant file
with open(os.path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = "asq",
packages = ["asq"],
version = find_version("asq/version.py"),
description = "LINQ-for-objects style queries for Python iterables.",
author = "Robert Smallshire",
author_email = "robert@smallshire.org.uk",
url = "https://github.com/rob-smallshire/asq",
keywords = ["Python", "LINQ"],
license="MIT License",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
long_description = long_description
)
| {
"repo_name": "sherzberg/asq",
"path": "setup.py",
"copies": "1",
"size": "1788",
"license": "mit",
"hash": -5424735152461003000,
"line_mean": 30.9285714286,
"line_max": 73,
"alpha_frac": 0.6096196868,
"autogenerated": false,
"ratio": 3.804255319148936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.985419649859413,
"avg_score": 0.011935701470960996,
"num_lines": 56
} |
# Asq's setup.py
from distutils.core import setup
from asq import __version__ as version
with open('README.txt', 'r') as readme:
long_description = readme.read()
setup(
name = "asq",
packages = ["asq"],
version = "{version}".format(version=version),
description = "LINQ-for-objects style queries for Python iterables.",
author = "Robert Smallshire",
author_email = "robert@smallshire.org.uk",
url = "http://code.google.com/p/asq/",
download_url="http://code.google.com/p/asq/downloads/detail?name=asq-{version}.tar.gz".format(version=version),
keywords = ["Python", "LINQ"],
license="MIT License",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
requires = ['ordereddict'],
long_description = long_description
)
| {
"repo_name": "flying-circus/asq",
"path": "setup.py",
"copies": "4",
"size": "1278",
"license": "mit",
"hash": 3985569163965589000,
"line_mean": 34.5,
"line_max": 115,
"alpha_frac": 0.6244131455,
"autogenerated": false,
"ratio": 3.9444444444444446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01907886259311428,
"num_lines": 36
} |
# Asq's setup.py
from setuptools import setup
import io
import os
import re
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the relevant file
with io.open(os.path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="asq",
packages=["asq"],
version=find_version("asq/version.py"),
description="LINQ-for-objects style queries for Python iterables.",
author="Sixty North AS",
author_email="rob@sixty-norty.com",
url="https://github.com/sixty-north/asq",
keywords=["Python", "LINQ"],
license="MIT License",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
long_description=long_description
)
| {
"repo_name": "rob-smallshire/asq",
"path": "setup.py",
"copies": "1",
"size": "1864",
"license": "mit",
"hash": -1774069633655595800,
"line_mean": 31.1379310345,
"line_max": 75,
"alpha_frac": 0.6056866953,
"autogenerated": false,
"ratio": 3.8592132505175982,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9963175807886564,
"avg_score": 0.0003448275862068966,
"num_lines": 58
} |
# as_rwGPS.py Asynchronous device driver for GPS devices using a UART.
# Supports a limited subset of the PMTK command packets employed by the
# widely used MTK3329/MTK3339 chip.
# Sentence parsing based on MicropyGPS by Michael Calvin McCoy
# https://github.com/inmcm/micropyGPS
# Copyright (c) 2018 Peter Hinch
# Released under the MIT License (MIT) - see LICENSE file
import as_drivers.as_GPS as as_GPS
try:
from micropython import const
except ImportError:
const = lambda x : x
HOT_START = const(1)
WARM_START = const(2)
COLD_START = const(3)
FULL_COLD_START = const(4)
STANDBY = const(5)
DEFAULT_SENTENCES = const(6)
VERSION = const(7)
ENABLE = const(8)
ANTENNA = const(9)
NO_ANTENNA = const(10)
# Return CRC of a bytearray.
def _crc(sentence):
x = 1
crc = 0
while sentence[x] != ord('*'):
crc ^= sentence[x]
x += 1
return crc # integer
class GPS(as_GPS.AS_GPS):
fixed_commands = {HOT_START: b'$PMTK101*32\r\n',
WARM_START: b'$PMTK102*31\r\n',
COLD_START: b'$PMTK103*30\r\n',
FULL_COLD_START: b'$PMTK104*37\r\n',
STANDBY: b'$PMTK161,0*28\r\n',
DEFAULT_SENTENCES: b'$PMTK314,-1*04\r\n',
VERSION: b'$PMTK605*31\r\n',
ENABLE: b'$PMTK414*33\r\n',
ANTENNA: b'$PGCMD,33,1*6C',
NO_ANTENNA: b'$PGCMD,33,0*6D',
}
def __init__(self, sreader, swriter, local_offset=0,
fix_cb=lambda *_ : None, cb_mask=as_GPS.RMC, fix_cb_args=(),
msg_cb=lambda *_ : None, msg_cb_args=()):
super().__init__(sreader, local_offset, fix_cb, cb_mask, fix_cb_args)
self._swriter = swriter
self.version = None # Response to VERSION query
self.enabled = None # Response to ENABLE query
self.antenna = 0 # Response to ANTENNA.
self._msg_cb = msg_cb
self._msg_cb_args = msg_cb_args
async def _send(self, sentence):
# Create a bytes object containing hex CRC
bcrc = '{:2x}'.format(_crc(sentence)).encode()
sentence[-4] = bcrc[0] # Fix up CRC bytes
sentence[-3] = bcrc[1]
await self._swriter.awrite(sentence)
async def baudrate(self, value=9600):
if value not in (4800,9600,14400,19200,38400,57600,115200):
raise ValueError('Invalid baudrate {:d}.'.format(value))
sentence = bytearray('$PMTK251,{:d}*00\r\n'.format(value))
await self._send(sentence)
async def update_interval(self, ms=1000):
if ms < 100 or ms > 10000:
raise ValueError('Invalid update interval {:d}ms.'.format(ms))
sentence = bytearray('$PMTK220,{:d}*00\r\n'.format(ms))
await self._send(sentence)
self._update_ms = ms # Save for timing driver
async def enable(self, *, gll=0, rmc=1, vtg=1, gga=1, gsa=1, gsv=5, chan=0):
fstr = '$PMTK314,{:d},{:d},{:d},{:d},{:d},{:d},0,0,0,0,0,0,0,0,0,0,0,0,{:d}*00\r\n'
sentence = bytearray(fstr.format(gll, rmc, vtg, gga, gsa, gsv, chan))
await self._send(sentence)
async def command(self, cmd):
if cmd not in self.fixed_commands:
raise ValueError('Invalid command {:s}.'.format(cmd))
await self._swriter.awrite(self.fixed_commands[cmd])
# Should get 705 from VERSION 514 from ENABLE
def parse(self, segs):
if segs[0] == 'PMTK705': # Version response
self.version = segs[1:]
segs[0] = 'version'
self._msg_cb(self, segs, *self._msg_cb_args)
return True
if segs[0] == 'PMTK514':
print('enabled segs', segs)
self.enabled = {'gll': segs[1], 'rmc': segs[2], 'vtg': segs[3],
'gga': segs[4], 'gsa': segs[5], 'gsv': segs[6],
'chan': segs[19]}
segs = ['enabled', self.enabled]
self._msg_cb(self, segs, *self._msg_cb_args)
return True
if segs[0] == 'PGTOP':
self.antenna = segs[2]
segs = ['antenna', self.antenna]
self._msg_cb(self, segs, *self._msg_cb_args)
return True
if segs[0][:4] == 'PMTK':
self._msg_cb(self, segs, *self._msg_cb_args)
return True
return False
| {
"repo_name": "peterhinch/micropython-async",
"path": "v3/as_drivers/as_GPS/as_rwGPS.py",
"copies": "1",
"size": "4383",
"license": "mit",
"hash": -2223760741929690000,
"line_mean": 36.1440677966,
"line_max": 91,
"alpha_frac": 0.5544147844,
"autogenerated": false,
"ratio": 3.0522284122562673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9073964374465153,
"avg_score": 0.006535764438222625,
"num_lines": 118
} |
# Ass7.2
# Liu Li
# 18 Nov, 2015
'''
Write a program that prompts for a file name, then opens that file and reads through the file, looking for lines of the form:
X-DSPAM-Confidence: 0.8475
Count these lines and extract the floating point values from each of the lines and compute the average of those values and produce an output as shown below.
You can download the sample data at http://www.pythonlearn.com/code/mbox-short.txt when you are testing below enter mbox-short.txt as the file name.
'''
# Use the file name mbox-short.txt as the file name
# fname = raw_input("Enter file name: ")
import re
fname = "mbox-short.txt"
with open(fname, 'r') as fh:
accumulator = 0
count = 0
for line in fh:
if line.startswith("X-DSPAM-Confidence:"):
str_num = re.findall('[0-9.]+', line)[0]
accumulator += float(str_num)
count += 1
print "Desired Output:\nAverage spam confidence: 0.750718518519\n"
print "My Output: "
print "Average spam confidence: " + str(accumulator/count)
| {
"repo_name": "ll0816/PythonForEverybody",
"path": "Python-Data-Structure/Ass7.2.py",
"copies": "1",
"size": "1031",
"license": "mit",
"hash": -2842668874437314000,
"line_mean": 38.6538461538,
"line_max": 156,
"alpha_frac": 0.694471387,
"autogenerated": false,
"ratio": 3.4713804713804715,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46658518583804715,
"avg_score": null,
"num_lines": null
} |
"""assassins URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.views import logout
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^login$', views.login, {'template_name':'registration/login.html'}, name='login'),
url(r'^logout$', logout, {'next_page':'index'}, name='logout'),
url(r'^user/', include('users.urls', namespace='users')),
url(r'^game/', include('games.urls', namespace='games')),
url(r'^admin/', admin.site.urls),
]
| {
"repo_name": "bobandbetty/assassins",
"path": "assassins/assassins/urls.py",
"copies": "2",
"size": "1209",
"license": "mit",
"hash": -1494126348087691800,
"line_mean": 39.3,
"line_max": 89,
"alpha_frac": 0.6906534326,
"autogenerated": false,
"ratio": 3.4152542372881354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0066877482711559435,
"num_lines": 30
} |
"""Assay information class"""
import os
import yaml
import pandas as pd
from .example_filetype_format import FileTypeFormat
from . import process_functions
class Assayinfo(FileTypeFormat):
"""Assay information file type"""
_fileType = "assayinfo"
_process_kwargs = ["newPath", "databaseSynId"]
def _validateFilename(self, filepath_list):
"""Validate assay information filename"""
assert os.path.basename(filepath_list[0]) == "assay_information.yaml"
def process_steps(self, assay_info_df, newPath, databaseSynId):
"""
Process bed input and update bed database
Args:
assay_info_df: Assay information dataframe
newPath: Path to processed assay information
databaseSynId: assay information database synapse id
Returns:
path to assay information dataframe
"""
# Must pass in a list
process_assay_info_df = self._process(assay_info_df)
process_functions.updateData(self.syn, databaseSynId,
process_assay_info_df, self.center,
filterByColumn="CENTER",
toDelete=True)
process_assay_info_df.to_csv(newPath, sep="\t", index=False)
return newPath
def _process(self, df):
"""
Process assay_information.yaml. Standardizes SEQ_ASSAY_ID,
default 10 for gene_padding, and fills in variant_classifications
Args:
df: Assay information dataframe
Returns:
dataframe: Processed dataframe
"""
seq_assay_ids = [assay.upper().replace('_', '-')
for assay in df['SEQ_ASSAY_ID']]
df['SEQ_ASSAY_ID'] = seq_assay_ids
df['SEQ_PIPELINE_ID'] = [assay.upper().replace('_', '-')
for assay in df['SEQ_PIPELINE_ID']]
if process_functions.checkColExist(df, "gene_padding"):
df['gene_padding'] = df['gene_padding'].fillna(10)
df['gene_padding'] = df['gene_padding'].astype(int)
else:
df['gene_padding'] = 10
if not process_functions.checkColExist(df, "variant_classifications"):
df['variant_classifications'] = float('nan')
df['CENTER'] = self.center
return df
def _get_dataframe(self, filepath_list):
"""Take in yaml file, returns dataframe"""
filepath = filepath_list[0]
try:
with open(filepath, 'r') as yamlfile:
# https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
# Must add this because yaml load deprecation
assay_info_dict = yaml.load(yamlfile, Loader=yaml.FullLoader)
except Exception:
raise ValueError(
"assay_information.yaml: Can't read in your file. "
"Please make sure the file is a correctly formatted yaml")
# assay_info_df = pd.DataFrame(panel_info_dict)
# assay_info_df = assay_info_df.transpose()
# assay_info_df['SEQ_ASSAY_ID'] = assay_info_df.index
# assay_info_df.reset_index(drop=True, inplace=True)
assay_infodf = pd.DataFrame(assay_info_dict)
assay_info_transposeddf = assay_infodf.transpose()
all_panel_info = pd.DataFrame()
for assay in assay_info_dict:
assay_specific_info = assay_info_dict[assay]['assay_specific_info']
assay_specific_infodf = pd.DataFrame(assay_specific_info)
seq_assay_id_infodf = assay_info_transposeddf.loc[[assay]]
to_appenddf = [seq_assay_id_infodf]*(len(assay_specific_info) - 1)
if to_appenddf:
seq_assay_id_infodf = seq_assay_id_infodf.append(to_appenddf)
seq_assay_id_infodf.reset_index(drop=True, inplace=True)
assay_finaldf = pd.concat(
[assay_specific_infodf, seq_assay_id_infodf], axis=1)
del assay_finaldf['assay_specific_info']
columns_containing_lists = ['variant_classifications',
'alteration_types',
'preservation_technique', 'coverage']
for col in columns_containing_lists:
if assay_finaldf.get(col) is not None:
assay_finaldf[col] = [";".join(row)
for row in assay_finaldf[col]]
assay_finaldf['SEQ_PIPELINE_ID'] = assay
all_panel_info = all_panel_info.append(assay_finaldf)
return all_panel_info
def _validate(self, assay_info_df):
"""
Validates the values of assay information file
Args:
assay_info_df: assay information dataframe
Returns:
tuple: error and warning
"""
total_error = ""
warning = ""
if process_functions.checkColExist(assay_info_df, "SEQ_ASSAY_ID"):
all_seq_assays = assay_info_df.SEQ_ASSAY_ID.unique()
if not all([assay.startswith(self.center)
for assay in all_seq_assays]):
total_error += \
("Assay_information.yaml: Please make sure your all your "
"SEQ_ASSAY_IDs start with your center abbreviation.\n")
else:
total_error += \
"Assay_information.yaml: Must have SEQ_ASSAY_ID column.\n"
read_group_dict = process_functions.get_gdc_data_dictionary(
"read_group")
read_group_headers = read_group_dict['properties']
warn, error = process_functions.check_col_and_values(
assay_info_df,
'is_paired_end',
[True, False],
filename="Assay_information.yaml",
required=True)
warning += warn
total_error += error
warn, error = process_functions.check_col_and_values(
assay_info_df, 'library_selection',
read_group_headers['library_selection']['enum'],
filename="Assay_information.yaml",
required=True)
warning += warn
total_error += error
warn, error = process_functions.check_col_and_values(
assay_info_df,
'library_strategy',
read_group_headers['library_strategy']['enum'],
filename="Assay_information.yaml",
required=True)
warning += warn
total_error += error
warn, error = process_functions.check_col_and_values(
assay_info_df,
'platform',
read_group_headers['platform']['enum'],
filename="Assay_information.yaml",
required=True)
warning += warn
total_error += error
instrument_model = read_group_headers['instrument_model']['enum']
instrument_model.append(None)
warn, error = process_functions.check_col_and_values(
assay_info_df,
'instrument_model',
instrument_model,
filename="Assay_information.yaml",
required=True)
warning += warn
total_error += error
# target_capture_kit = read_group_headers['target_capture_kit']['enum']
# warn, error = process_functions.check_col_and_values(
# assay_info_df,
# 'target_capture_kit',
# target_capture_kit,
# filename="Assay_information.yaml",
# required=True)
# warning += warn
# total_error += error
if not process_functions.checkColExist(assay_info_df,
"target_capture_kit"):
total_error += ("Assay_information.yaml: "
"Must have target_capture_kit column.\n")
variant_classes = ['Splice_Site', 'Nonsense_Mutation',
'Frame_Shift_Del', 'Frame_Shift_Ins',
'Nonstop_Mutation', 'Translation_Start_Site',
'In_Frame_Ins', 'In_Frame_Del',
'Missense_Mutation', 'Intron', 'Splice_Region',
'Silent', 'RNA', "5'UTR", "3'UTR", 'IGR',
"5'Flank", "3'Flank", None]
warn, error = process_functions.check_col_and_values(
assay_info_df,
'variant_classifications',
variant_classes,
filename="Assay_information.yaml",
na_allowed=True,
sep=";")
warning += warn
total_error += error
if process_functions.checkColExist(assay_info_df, "read_length"):
if not all([process_functions.checkInt(i)
for i in assay_info_df["read_length"]
if i is not None and not pd.isnull(i)]):
total_error += \
("Assay_information.yaml: "
"Please double check your read_length. "
"It must be an integer or null.\n")
else:
total_error += \
("Assay_information.yaml: "
"Must have read_length column.\n")
if process_functions.checkColExist(assay_info_df, "number_of_genes"):
if not all([process_functions.checkInt(i)
for i in assay_info_df["number_of_genes"]]):
total_error += \
("Assay_information.yaml: "
"Please double check your number_of_genes. "
"It must be an integer.\n")
else:
total_error += \
("Assay_information.yaml: "
"Must have number_of_genes column.\n")
if process_functions.checkColExist(assay_info_df, "gene_padding"):
if not all([process_functions.checkInt(i)
for i in assay_info_df["gene_padding"]
if i is not None and not pd.isnull(i)]):
total_error += \
("Assay_information.yaml: "
"Please double check your gene_padding. "
"It must be an integer or blank.\n")
else:
warning += \
("Assay_information.yaml: "
"gene_padding is by default 10 if not specified.\n")
warn, error = process_functions.check_col_and_values(
assay_info_df, 'calling_strategy', ['tumor_only', 'tumor_normal'],
filename="Assay_information.yaml", required=True)
warning += warn
total_error += error
if process_functions.checkColExist(assay_info_df, "specimen_tumor_cellularity"):
if not all([i.startswith(">") and i.endswith("%")
for i in assay_info_df["specimen_tumor_cellularity"]]):
total_error += \
("Assay_information.yaml: "
"Please double check your specimen_tumor_cellularity. "
"It must in this format >(num)%. ie. >10%\n")
else:
total_error += \
("Assay_information.yaml: "
"Must have specimen_tumor_cellularity column.\n")
alteration_types = ["snv", "small_indels", "gene_level_cna",
"intragenic_cna", "structural_variants"]
warn, error = process_functions.check_col_and_values(
assay_info_df, 'alteration_types', alteration_types,
filename="Assay_information.yaml", required=True, sep=";")
warning += warn
total_error += error
preservation_technique = ["FFPE", 'fresh_frozen']
warn, error = process_functions.check_col_and_values(
assay_info_df, 'preservation_technique', preservation_technique,
filename="Assay_information.yaml", required=True, sep=";")
warning += warn
total_error += error
coverage = ['hotspot_regions', 'coding_exons', 'introns', 'promoters']
warn, error = process_functions.check_col_and_values(
assay_info_df, 'coverage', coverage,
filename="Assay_information.yaml", required=True, sep=";")
warning += warn
total_error += error
return total_error, warning
| {
"repo_name": "thomasyu888/Genie",
"path": "genie/assay.py",
"copies": "1",
"size": "12289",
"license": "mit",
"hash": 3083143859711158300,
"line_mean": 39.8272425249,
"line_max": 89,
"alpha_frac": 0.5457726422,
"autogenerated": false,
"ratio": 4.038448899112717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5084221541312718,
"avg_score": null,
"num_lines": null
} |
# Assemble Arybo IR into ASM thanks to LLVM
# Map symbol names to register
# Warning: this tries to do its best to save modified temporary registers.
# There might be errors while doing this. The idea is not to regenerate clean
# binaries, but to help the reverser!
try:
import llvmlite.ir as ll
import llvmlite.binding as llvm
import ctypes
llvmlite_available = True
__llvm_initialized = False
except ImportError:
llvmlite_available = False
import six
import collections
import arybo.lib.mba_exprs as EX
from arybo.lib.exprs_passes import lower_rol_ror, CachePass
def IntType(n):
return ll.IntType(int(n))
class ToLLVMIr(CachePass):
def __init__(self, sym_to_value, IRB):
super(ToLLVMIr,self).__init__()
self.IRB = IRB
self.sym_to_value = sym_to_value
self.values = {}
def visit_wrapper(self, e, cb):
ret = super(ToLLVMIr, self).visit_wrapper(e, cb)
if not isinstance(ret, tuple):
return (ret,self.IRB.block)
else:
return ret
def visit_value(self, e):
return EX.visit(e, self)[0]
def visit_Cst(self, e):
return ll.Constant(IntType(e.nbits), e.n)
def visit_BV(self, e):
name = e.v.name
value = self.sym_to_value.get(name, None)
if value is None:
raise ValueError("unable to map BV name '%s' to an LLVM value!" % name)
# TODO: check value bit-size
#ret,nbits = value
#if e.nbits != nbits:
# raise ValueError("bit-vector is %d bits, expected %d bits" % (e.nbits, nbits))
return value
def visit_Not(self, e):
return self.IRB.not_(self.visit_value(e.arg))
def visit_ZX(self, e):
return self.IRB.zext(self.visit_value(e.arg), IntType(e.n))
def visit_SX(self, e):
return self.IRB.sext(self.visit_value(e.arg), IntType(e.n))
def visit_Concat(self, e):
# Generate a suite of OR + shifts
# TODO: pass that lowers concat
arg0 = e.args[0]
ret = self.visit_value(arg0)
type_ = IntType(e.nbits)
ret = self.IRB.zext(ret, type_)
cur_bits = arg0.nbits
for a in e.args[1:]:
cur_arg = self.IRB.zext(self.visit_value(a), type_)
ret = self.IRB.or_(ret,
self.IRB.shl(cur_arg, ll.Constant(type_, cur_bits)))
cur_bits += a.nbits
return ret
def visit_Slice(self, e):
# TODO: pass that lowers slice
ret = self.visit_value(e.arg)
idxes = e.idxes
# Support only sorted indxes for now
if idxes != list(range(idxes[0], idxes[-1]+1)):
raise ValueError("slice indexes must be continuous and sorted")
if idxes[0] != 0:
ret = self.IRB.lshr(ret, ll.Constant(IntType(e.arg.nbits), idxes[0]))
return self.IRB.trunc(ret, IntType(len(idxes)))
def visit_Broadcast(self, e):
# TODO: pass that lowers broadcast
# left-shift to get the idx as the MSB, and them use an arithmetic
# right shift of nbits-1
type_ = IntType(e.nbits)
ret = self.visit_value(e.arg)
ret = self.IRB.zext(ret, type_)
ret = self.IRB.shl(ret, ll.Constant(type_, e.nbits-e.idx-1))
return self.IRB.ashr(ret, ll.Constant(type_, e.nbits-1))
def visit_nary_args(self, e, op):
return op(*(self.visit_value(a) for a in e.args))
def visit_BinaryOp(self, e):
ops = {
EX.ExprAdd: self.IRB.add,
EX.ExprSub: self.IRB.sub,
EX.ExprMul: self.IRB.mul,
EX.ExprShl: self.IRB.shl,
EX.ExprLShr: self.IRB.lshr,
EX.ExprAShr: self.IRB.ashr
}
op = ops[type(e)]
return self.visit_nary_args(e, op)
def visit_Div(self, e):
return self.visit_nary_args(e, self.IRB.sdiv if e.is_signed else self.IRB.udiv)
def visit_Rem(self, e):
return self.visit_nary_args(e, self.IRB.srem if e.is_signed else self.IRB.urem)
def visit_NaryOp(self, e):
ops = {
EX.ExprXor: self.IRB.xor,
EX.ExprAnd: self.IRB.and_,
EX.ExprOr: self.IRB.or_,
}
op = ops[type(e)]
return self.visit_nary_args(e, op)
def visit_Cmp(self, e):
f = self.IRB.icmp_signed if e.is_signed else self.IRB.icmp_unsigned
cmp_op = {
EX.ExprCmp.OpEq: '==',
EX.ExprCmp.OpNeq: '!=',
EX.ExprCmp.OpLt: '<',
EX.ExprCmp.OpLte: '<=',
EX.ExprCmp.OpGt: '>',
EX.ExprCmp.OpGte: '>='
}
return f(cmp_op[e.op], self.visit_value(e.X), self.visit_value(e.Y))
def visit_Cond(self, e):
cond = self.visit_value(e.cond)
bb_name = self.IRB.basic_block.name
ifb = self.IRB.append_basic_block(bb_name + ".if")
elseb = self.IRB.append_basic_block(bb_name + ".else")
endb = self.IRB.append_basic_block(bb_name + ".endif")
self.IRB.cbranch(cond, ifb, elseb)
self.IRB.position_at_end(ifb)
ifv,ifb = EX.visit(e.a, self)
self.IRB.branch(endb)
self.IRB.position_at_end(elseb)
elsev,elseb = EX.visit(e.b, self)
self.IRB.branch(endb)
self.IRB.position_at_end(endb)
ret = self.IRB.phi(IntType(e.nbits))
ret.add_incoming(ifv, ifb)
ret.add_incoming(elsev, elseb)
return ret,endb
def llvm_get_target(triple_or_target=None):
global __llvm_initialized
if not __llvm_initialized:
# Lazy initialisation
llvm.initialize()
llvm.initialize_all_targets()
llvm.initialize_all_asmprinters()
__llvm_initialized = True
if isinstance(triple_or_target, llvm.Target):
return triple_or_target
if triple_or_target is None:
return llvm.Target.from_default_triple()
return llvm.Target.from_triple(triple_or_target)
def _create_execution_engine(M, target):
target_machine = target.create_target_machine()
engine = llvm.create_mcjit_compiler(M, target_machine)
return engine
def to_llvm_ir(exprs, sym_to_value, IRB):
if not llvmlite_available:
raise RuntimeError("llvmlite module unavailable! can't assemble to LLVM IR...")
if not isinstance(exprs, collections.abc.Iterable):
exprs = (exprs,)
ret = None
visitor = ToLLVMIr(sym_to_value, IRB)
for e in exprs:
e = lower_rol_ror(e)
ret = visitor.visit_value(e)
return ret
def to_llvm_function(exprs, vars_, name="__arybo"):
if not llvmlite_available:
raise RuntimeError("llvmlite module unavailable! can't assemble to LLVM IR...")
if not isinstance(exprs, collections.abc.Iterable):
exprs = (exprs,)
M = ll.Module()
args_types = [IntType(v.nbits) for v in vars_]
fntype = ll.FunctionType(IntType(exprs[-1].nbits), args_types)
func = ll.Function(M, fntype, name=name)
func.attributes.add("nounwind")
BB = func.append_basic_block()
IRB = ll.IRBuilder()
IRB.position_at_end(BB)
sym_to_value = {}
for i,v in enumerate(vars_):
arg = func.args[i]
arg.name = v.name
sym_to_value[v.name] = arg
ret = to_llvm_ir(exprs, sym_to_value, IRB)
IRB.ret(ret)
return M
def asm_module(exprs, dst_reg, sym_to_reg, triple_or_target=None):
'''
Generate an LLVM module for a list of expressions
Arguments:
* See :meth:`arybo.lib.exprs_asm.asm_binary` for a description of the list of arguments
Output:
* An LLVM module with one function named "__arybo", containing the
translated expression.
See :meth:`arybo.lib.exprs_asm.asm_binary` for an usage example.
'''
if not llvmlite_available:
raise RuntimeError("llvmlite module unavailable! can't assemble...")
target = llvm_get_target(triple_or_target)
M = ll.Module()
fntype = ll.FunctionType(ll.VoidType(), [])
func = ll.Function(M, fntype, name='__arybo')
func.attributes.add("naked")
func.attributes.add("nounwind")
BB = func.append_basic_block()
IRB = ll.IRBuilder()
IRB.position_at_end(BB)
sym_to_value = {sym: IRB.load_reg(IntType(reg[1]), reg[0], reg[0]) for sym,reg in six.iteritems(sym_to_reg)}
ret = to_llvm_ir(exprs, sym_to_value, IRB)
IRB.store_reg(ret, IntType(dst_reg[1]), dst_reg[0])
# See https://llvm.org/bugs/show_bug.cgi?id=15806
IRB.unreachable()
return M
def asm_binary(exprs, dst_reg, sym_to_reg, triple_or_target=None):
'''
Compile and assemble an expression for a given architecture.
Arguments:
* *exprs*: list of expressions to convert. This can represent a graph of
expressions.
* *dst_reg*: final register on which to store the result of the last
expression. This is represented by a tuple ("reg_name", reg_size_bits).
Example: ("rax", 64)
* *sym_to_reg*: a dictionnary that maps Arybo variable name to registers
(described as tuple, see *dst_reg*). Example: {"x": ("rdi",64), "y": ("rsi", 64)}
* *triple_or_target*: LLVM architecture triple to use. Use by default the
host architecture. Example: "x86_64-unknown-unknown"
Output:
* binary stream of the assembled expression for the given target
Here is an example that will compile and assemble "x+y" for x86_64::
from arybo.lib import MBA
from arybo.lib import mba_exprs
from arybo.lib.exprs_asm import asm_binary
mba = MBA(64)
x = mba.var("x")
y = mba.var("y")
e = mba_exprs.ExprBV(x) + mba_exprs.ExprBV(y)
code = asm_binary([e], ("rax", 64), {"x": ("rdi", 64), "y": ("rsi", 64)}, "x86_64-unknown-unknown")
print(code.hex())
which outputs ``488d0437`` (which is equivalent to ``lea rax,[rdi+rsi*1]``).
'''
if not llvmlite_available:
raise RuntimeError("llvmlite module unavailable! can't assemble...")
target = llvm_get_target(triple_or_target)
M = asm_module(exprs, dst_reg, sym_to_reg, target)
# Use LLVM to compile the '__arybo' function. As the function is naked and
# is the only, we just got to dump the .text section to get the binary
# assembly.
# No need for keystone or whatever hype stuff. llvmlite does the job.
M = llvm.parse_assembly(str(M))
M.verify()
target_machine = target.create_target_machine()
obj_bin = target_machine.emit_object(M)
obj = llvm.ObjectFileRef.from_data(obj_bin)
for s in obj.sections():
if s.is_text():
return s.data()
raise RuntimeError("unable to get the assembled binary!")
| {
"repo_name": "quarkslab/arybo",
"path": "arybo/lib/exprs_asm.py",
"copies": "1",
"size": "10636",
"license": "bsd-3-clause",
"hash": -5456864958629841000,
"line_mean": 32.872611465,
"line_max": 112,
"alpha_frac": 0.6078412937,
"autogenerated": false,
"ratio": 3.1863391252246855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4294180418924685,
"avg_score": null,
"num_lines": null
} |
# Assembler - main class
#
# Input: a single assembly source file
# Output: an object file with the assembled source and other
# linkage information (relocation table, list of unresolved
# and exported labels).
#
# Luz micro-controller assembler
# Eli Bendersky (C) 2008-2010
#
import pprint, os, sys
from collections import defaultdict
from ..commonlib.utils import word2bytes, num_fits_in_nbits, unpack_bytes
from .asmparser import (
AsmParser, Instruction, Directive, Id, Number, String)
from .asm_common_types import (
SegAddr, ExportEntry, ImportEntry, RelocEntry)
from .asm_instructions import (
InstructionError, instruction_exists, instruction_length,
assemble_instruction)
from .objectfile import ObjectFile
class ArgumentError(Exception): pass
class AssemblyError(Exception): pass
class Assembler(object):
def __init__(self):
self.parser = AsmParser()
def assemble(self, str=None, filename=None):
""" Assembles the code given in a string or a file.
Returns an ObjectFile instance with the results of the
assembly process.
Provide the code as a string in str. If str is None
the code is assumed to reside in a file named by
filename.
"""
if str is None:
if filename is None:
raise ArgumentError('provide str or filename')
with open(filename, 'rU') as file:
str = file.read()
# 1. Parse assembly code into an intermediate format
#
imf = self._parse(str)
# 2. First pass
#
symtab, addr_imf = self._compute_addresses(imf)
# 3. Second pass
#
return self._assemble_code(symtab, addr_imf)
######################-- PRIVATE --######################
def _parse(self, str):
return self.parser.parse(str)
def _compute_addresses(self, imf):
""" First pass of the assembler.
Takes an intermediate form representation of the
parsed assembly code (imf).
Builds a symbol table mapping labels defined in the
code into addresses. The symbol table maps labels
to SegAddr objects.
Returns a pair: symbol table and an addressed IMF,
a list of (addr, imf_line) pairs, with addr being the
address into which the line is to be assembled.
Note: .segment directives are removed from this list,
since after the first pass they're no longer required.
Does the minimal amount of work to infer the address
of each label:
* Recognizes directives and takes into account those
that allocate storage, and those that specify
segments
* Recognizes instructions and takes into account their
lengths
Therefore, the error checking performed here is only
of the constructs required for construction the symbol
table:
* Recognition of unknown instructions
* Validation of only those directives that affect
address computations.
* Duplicated label definitions
* The arguments of directives like .word and .byte
are not fully verified, but only counted.
"""
# Symbol table
# Maps label names to SegAddr objects
#
symtab = {}
# Addressed imf
# Stores pairs (seg_addr, imf_line) for each line in the
# intermediate form. seg_addr is a SegAddr object
#
addr_imf = []
# Stores the current address for each segment. All start
# at 0 when the segment is first defined.
#
seg_addr = {}
# The current segment
#
cur_seg = None
for line in imf:
# If there's no "current segment", this line can only
# be a .segment definition (unlabeled, because this
# label can have no segment to reference, yet)
#
if not cur_seg:
if not (isinstance(line, Directive) and
line.name == '.segment' and
not line.label):
self._no_segment_error(line.lineno)
else:
# Current segment and its running address
saddr = SegAddr(cur_seg, seg_addr[cur_seg])
# If there's a label, make sure it's not duplicated
# and add it to the symbol table
#
if line.label:
if line.label in symtab:
self._assembly_error("label '%s' duplicated" % line.label, line.lineno)
symtab[line.label] = saddr
if isinstance(line, Instruction):
if line.name is None:
# Labels without instructions don't affect
# the address
# They don't have to be added to addr_imf
# either
#
pass
elif instruction_exists(line.name):
addr_imf.append((saddr, line))
seg_addr[cur_seg] += instruction_length(line.name)
else:
self._assembly_error("unknown instruction '%s'" % line.name, line.lineno)
elif isinstance(line, Directive):
if cur_seg and line.name != '.segment':
addr_imf.append((saddr, line))
if line.name == '.segment':
# Switch to the segment named in the
# directive. If it's a new segment, its
# address count starts at 0.
#
self._validate_args(line, [Id])
cur_seg = line.args[0].id
if not cur_seg in seg_addr:
seg_addr[cur_seg] = 0
elif line.name == '.word':
nargs = len(line.args)
seg_addr[cur_seg] += nargs * 4
elif line.name == '.byte':
nargs = len(line.args)
addr = seg_addr[cur_seg] + nargs
seg_addr[cur_seg] = self._align_at_next_word(addr)
elif line.name == '.alloc':
self._validate_args(line, [Number])
addr = seg_addr[cur_seg] + int(line.args[0].val)
seg_addr[cur_seg] = self._align_at_next_word(addr)
elif line.name == '.string':
self._validate_args(line, [String])
# +1 for the zero termination that will be
# inserted when the string is allocated in
# the second pass
#
addr = seg_addr[cur_seg] + len(line.args[0].val) + 1
seg_addr[cur_seg] = self._align_at_next_word(addr)
else:
self._assembly_error("bad assembly", line.lineno)
return symtab, addr_imf
def _assemble_code(self, symtab, addr_imf):
""" Second pass of the assembler.
Utilizes the symbol table and the addressed IMF lines
pre-computed in the first pass to assemble the code
into object data.
"""
# Holds constants defined with the .define directive
#
defines = {}
# The assembled data as a list of bytes, per segment
#
seg_data = defaultdict(list)
export_table = []
import_table = []
reloc_table = []
# addr: a SegAddr object
# line: the parsed IMF for a line of assembly
#
for addr, line in addr_imf:
if isinstance(line, Instruction):
# Sanity check: are we shoving the instruction(s)
# in the right place?
#
assert len(seg_data[addr.segment]) == addr.offset
# Assemble the instruction. This returns a list
# of AssembledInstruction objects.
#
try:
asm_instrs = assemble_instruction(
line.name, line.args, addr,
symtab, defines)
except InstructionError:
err = sys.exc_info()[1]
self._assembly_error(err, line.lineno)
for asm_instr in asm_instrs:
# The offset in the segment this instruction
# will be placed into
#
offset = len(seg_data[addr.segment])
if asm_instr.import_req:
type, symbol = asm_instr.import_req
# note that we're using offset, and not
# addr.offset here, because this could be
# a pseudo-instruction assembling to two
# instructions, and addr.offset points
# only to the first one.
#
import_table.append(ImportEntry(
import_symbol=symbol,
type=type,
addr=SegAddr(addr.segment, offset)))
if asm_instr.reloc_req:
type, segment = asm_instr.reloc_req
reloc_table.append(RelocEntry(
reloc_segment=segment,
type=type,
addr=SegAddr(addr.segment, offset)))
# Add the assembled instruction into the
# segment data
#
seg_data[addr.segment].extend(word2bytes(asm_instr.op))
elif isinstance(line, Directive):
if line.name == '.define':
# Add the defined symbol to the defines table,
# possibly overriding a previous definition
#
self._validate_args(line, [Id, Number])
defines[line.args[0].id] = line.args[1].val
elif line.name == '.global':
self._validate_args(line, [Id])
symbol_name = line.args[0].id
# The exported symbol must be present in the
# symbol table collected by the first pass
#
if symbol_name in symtab:
export_table.append(ExportEntry(
export_symbol=symbol_name,
addr=symtab[symbol_name]))
else:
self._assembly_error('.global defines an unknown label %s' % symbol_name, line.lineno)
elif line.name == '.alloc':
# The arguments of .alloc directives were
# validated in the first pass
#
num = self._align_at_next_word(line.args[0].val)
seg_data[addr.segment].extend([0] * num)
elif line.name == '.byte':
data = []
for i, byte_arg in enumerate(line.args):
if (isinstance(byte_arg, Number) and
num_fits_in_nbits(byte_arg.val, 8)
):
data.append(byte_arg.val)
else:
self._assembly_error('.byte -- argument %s not a valid byte' % (i + 1,), line.lineno)
leftover = len(data) % 4
if leftover:
data.extend([0] * (4 - leftover))
seg_data[addr.segment].extend(data)
elif line.name == '.word':
data = []
for i, word_arg in enumerate(line.args):
if (isinstance(word_arg, Number) and
num_fits_in_nbits(word_arg.val, 32)
):
data.extend(word2bytes(word_arg.val))
else:
self._assembly_error('.word -- argument %s not a valid word' % (i + 1,), line.lineno)
seg_data[addr.segment].extend(data)
elif line.name == '.string':
data = unpack_bytes(line.args[0].val + '\x00')
leftover = len(data) % 4
if leftover:
data.extend([0] * (4 - leftover))
seg_data[addr.segment].extend(data)
else:
# .segment directives should not be passed
# here by the first pass
#
if line.name == '.segment':
assert 0
self._assembly_error('unknown directive %s' % line.name, line.lineno)
else:
self._assembly_error("bad assembly", line.lineno)
return ObjectFile.from_assembler(
seg_data=seg_data,
export_table=export_table,
import_table=import_table,
reloc_table=reloc_table)
def _validate_args(self, line, exp_args):
""" Validates that the arguments of the
directive stored in 'line' are of the correct amount
and types.
"""
if len(exp_args) != len(line.args):
self._assembly_error("%s -- %s argument(s) expected" % (line.name, len(exp_args)), line.lineno)
for i, exp_type in enumerate(exp_args):
if not isinstance(line.args[i], exp_type):
self._assembly_error("%s -- argument '%s' of unexpected type" % (line.name, line.args[i]), line.lineno)
def _no_segment_error(self, lineno):
self._assembly_error("A segment must be defined before this line", lineno)
def _assembly_error(self, msg, lineno):
raise AssemblyError("%s (at line %s)" % (msg, lineno))
def _align_at_next_word(self, addr):
""" Make sure an address is aligned at a word (4-byte)
boundary. If it isn't, align it at the next word.
"""
offset = addr % 4;
if offset == 0:
return addr
else:
return addr + (4 - offset)
| {
"repo_name": "eliben/luz-cpu",
"path": "luz_asm_sim/lib/asmlib/assembler.py",
"copies": "1",
"size": "14448",
"license": "unlicense",
"hash": -8956863696156704000,
"line_mean": 38.0486486486,
"line_max": 119,
"alpha_frac": 0.4986157254,
"autogenerated": false,
"ratio": 4.703125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5701740725400001,
"avg_score": null,
"num_lines": null
} |
# Assembler - main class
#
# Input: a single assembly source file
# Output: an object file with the assembled source and other
# linkage information (relocation table, list of unresolved
# and exported labels).
#
# Luz micro-controller assembler
# Eli Bendersky (C) 2008-2010
#
import pprint, os, sys
from collections import defaultdict
from ..commonlib.utils import (
word2bytes, num_fits_in_nbits,
unpack_bytes)
from .asmparser import (
AsmParser, Instruction, Directive, Id, Number, String)
from .asm_common_types import (
SegAddr, ExportEntry, ImportEntry, RelocEntry)
from .asm_instructions import (
InstructionError, instruction_exists, instruction_length,
assemble_instruction)
from .objectfile import ObjectFile
class ArgumentError(Exception): pass
class AssemblyError(Exception): pass
class Assembler(object):
def __init__(self):
self.parser = AsmParser()
def assemble(self, str=None, filename=None):
""" Assembles the code given in a string or a file.
Returns an ObjectFile instance with the results of the
assembly process.
Provide the code as a string in str. If str is None
the code is assumed to reside in a file named by
filename.
"""
if str is None:
if filename is None:
raise ArgumentError('provide str or filename')
with open(filename, 'rU') as file:
str = file.read()
# 1. Parse assembly code into an intermediate format
#
imf = self._parse(str)
# 2. First pass
#
symtab, addr_imf = self._compute_addresses(imf)
# 3. Second pass
#
return self._assemble_code(symtab, addr_imf)
######################-- PRIVATE --######################
def _parse(self, str):
return self.parser.parse(str)
def _compute_addresses(self, imf):
""" First pass of the assembler.
Takes an intermediate form representation of the
parsed assembly code (imf).
Builds a symbol table mapping labels defined in the
code into addresses. The symbol table maps labels
to SegAddr objects.
Returns a pair: symbol table and an addressed IMF,
a list of (addr, imf_line) pairs, with addr being the
address into which the line is to be assembled.
Note: .segment directives are removed from this list,
since after the first pass they're no longer required.
Does the minimal amount of work to infer the address
of each label:
* Recognizes directives and takes into account those
that allocate storage, and those that specify
segments
* Recognizes instructions and takes into account their
lengths
Therefore, the error checking performed here is only
of the constructs required for construction the symbol
table:
* Recognition of unknown instructions
* Validation of only those directives that affect
address computations.
* Duplicated label definitions
* The arguments of directives like .word and .byte
are not fully verified, but only counted.
"""
# Symbol table
# Maps label names to SegAddr objects
#
symtab = {}
# Addressed imf
# Stores pairs (seg_addr, imf_line) for each line in the
# intermediate form. seg_addr is a SegAddr object
#
addr_imf = []
# Stores the current address for each segment. All start
# at 0 when the segment is first defined.
#
seg_addr = {}
# The current segment
#
cur_seg = None
for line in imf:
# If there's no "current segment", this line can only
# be a .segment definition (unlabeled, because this
# label can have no segment to reference, yet)
#
if not cur_seg:
if not (isinstance(line, Directive) and
line.name == '.segment' and
not line.label):
self._no_segment_error(line.lineno)
else:
# Current segment and its running address
saddr = SegAddr(cur_seg, seg_addr[cur_seg])
# If there's a label, make sure it's not duplicated
# and add it to the symbol table
#
if line.label:
if line.label in symtab:
self._assembly_error("label '%s' duplicated" % line.label, line.lineno)
symtab[line.label] = saddr
if isinstance(line, Instruction):
if line.name is None:
# Labels without instructions don't affect
# the address
# They don't have to be added to addr_imf
# either
#
pass
elif instruction_exists(line.name):
addr_imf.append((saddr, line))
seg_addr[cur_seg] += instruction_length(line.name)
else:
self._assembly_error("unknown instruction '%s'" % line.name, line.lineno)
elif isinstance(line, Directive):
if cur_seg and line.name != '.segment':
addr_imf.append((saddr, line))
if line.name == '.segment':
# Switch to the segment named in the
# directive. If it's a new segment, its
# address count starts at 0.
#
self._validate_args(line, [Id])
cur_seg = line.args[0].id
if not cur_seg in seg_addr:
seg_addr[cur_seg] = 0
elif line.name == '.word':
nargs = len(line.args)
seg_addr[cur_seg] += nargs * 4
elif line.name == '.byte':
nargs = len(line.args)
addr = seg_addr[cur_seg] + nargs
seg_addr[cur_seg] = self._align_at_next_word(addr)
elif line.name == '.alloc':
self._validate_args(line, [Number])
addr = seg_addr[cur_seg] + int(line.args[0].val)
seg_addr[cur_seg] = self._align_at_next_word(addr)
elif line.name == '.string':
self._validate_args(line, [String])
# +1 for the zero termination that will be
# inserted when the string is allocated in
# the second pass
#
addr = seg_addr[cur_seg] + len(line.args[0].val) + 1
seg_addr[cur_seg] = self._align_at_next_word(addr)
else:
self._assembly_error("bad assembly", line.lineno)
return symtab, addr_imf
def _assemble_code(self, symtab, addr_imf):
""" Second pass of the assembler.
Utilizes the symbol table and the addressed IMF lines
pre-computed in the first pass to assemble the code
into object data.
"""
# Holds constants defined with the .define directive
#
defines = {}
# The assembled data as a list of bytes, per segment
#
seg_data = defaultdict(list)
export_table = []
import_table = []
reloc_table = []
# addr: a SegAddr object
# line: the parsed IMF for a line of assembly
#
for addr, line in addr_imf:
if isinstance(line, Instruction):
# Sanity check: are we shoving the instruction(s)
# in the right place?
#
assert len(seg_data[addr.segment]) == addr.offset
# Assemble the instruction. This returns a list
# of AssembledInstruction objects.
#
try:
asm_instrs = assemble_instruction(
line.name, line.args, addr,
symtab, defines)
except InstructionError:
err = sys.exc_info()[1]
self._assembly_error(err, line.lineno)
for asm_instr in asm_instrs:
# The offset in the segment this instruction
# will be placed into
#
offset = len(seg_data[addr.segment])
if asm_instr.import_req:
type, symbol = asm_instr.import_req
# note that we're using offset, and not
# addr.offset here, because this could be
# a pseudo-instruction assembling to two
# instructions, and addr.offset points
# only to the first one.
#
import_table.append(ImportEntry(
import_symbol=symbol,
type=type,
addr=SegAddr(addr.segment, offset)))
if asm_instr.reloc_req:
type, segment = asm_instr.reloc_req
reloc_table.append(RelocEntry(
reloc_segment=segment,
type=type,
addr=SegAddr(addr.segment, offset)))
# Add the assembled instruction into the
# segment data
#
seg_data[addr.segment].extend(word2bytes(asm_instr.op))
elif isinstance(line, Directive):
if line.name == '.define':
# Add the defined symbol to the defines table,
# possibly overriding a previous definition
#
self._validate_args(line, [Id, Number])
defines[line.args[0].id] = line.args[1].val
elif line.name == '.global':
self._validate_args(line, [Id])
symbol_name = line.args[0].id
# The exported symbol must be present in the
# symbol table collected by the first pass
#
if symbol_name in symtab:
export_table.append(ExportEntry(
export_symbol=symbol_name,
addr=symtab[symbol_name]))
else:
self._assembly_error('.global defines an unknown label %s' % symbol_name, line.lineno)
elif line.name == '.alloc':
# The arguments of .alloc directives were
# validated in the first pass
#
num = self._align_at_next_word(line.args[0].val)
seg_data[addr.segment].extend([0] * num)
elif line.name == '.byte':
data = []
for i, byte_arg in enumerate(line.args):
if (isinstance(byte_arg, Number) and
num_fits_in_nbits(byte_arg.val, 8)
):
data.append(byte_arg.val)
else:
self._assembly_error('.byte -- argument %s not a valid byte' % (i + 1,), line.lineno)
leftover = len(data) % 4
if leftover:
data.extend([0] * (4 - leftover))
seg_data[addr.segment].extend(data)
elif line.name == '.word':
data = []
for i, word_arg in enumerate(line.args):
if (isinstance(word_arg, Number) and
num_fits_in_nbits(word_arg.val, 32)
):
data.extend(word2bytes(word_arg.val))
else:
self._assembly_error('.word -- argument %s not a valid word' % (i + 1,), line.lineno)
seg_data[addr.segment].extend(data)
elif line.name == '.string':
data = unpack_bytes(line.args[0].val + '\x00')
leftover = len(data) % 4
if leftover:
data.extend([0] * (4 - leftover))
seg_data[addr.segment].extend(data)
else:
# .segment directives should not be passed
# here by the first pass
#
if line.name == '.segment':
assert 0
self._assembly_error('unknown directive %s' % line.name, line.lineno)
else:
self._assembly_error("bad assembly", line.lineno)
return ObjectFile.from_assembler(
seg_data=seg_data,
export_table=export_table,
import_table=import_table,
reloc_table=reloc_table)
def _validate_args(self, line, exp_args):
""" Validates that the arguments of the
directive stored in 'line' are of the correct amount
and types.
"""
if len(exp_args) != len(line.args):
self._assembly_error("%s -- %s argument(s) expected" % (line.name, len(exp_args)), line.lineno)
for i, exp_type in enumerate(exp_args):
if not isinstance(line.args[i], exp_type):
self._assembly_error("%s -- argument '%s' of unexpected type" % (line.name, line.args[i]), line.lineno)
def _no_segment_error(self, lineno):
self._assembly_error("A segment must be defined before this line", lineno)
def _assembly_error(self, msg, lineno):
raise AssemblyError("%s (at line %s)" % (msg, lineno))
def _align_at_next_word(self, addr):
""" Make sure an address is aligned at a word (4-byte)
boundary. If it isn't, align it at the next word.
"""
offset = addr % 4;
if offset == 0:
return addr
else:
return addr + (4 - offset)
| {
"repo_name": "8l/luz-cpu",
"path": "luz_asm_sim/lib/asmlib/assembler.py",
"copies": "1",
"size": "15393",
"license": "unlicense",
"hash": 4126728296630606300,
"line_mean": 39.3002680965,
"line_max": 119,
"alpha_frac": 0.4680049373,
"autogenerated": false,
"ratio": 4.906917437041759,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.012774798361152305,
"num_lines": 373
} |
# assembler.py 03/11/2014 D.J.Whale
#
# Read a file and assemble it into a numeric representation
import parser
import instruction
import symtab
import io
# Extension mechanism
import extarch
# Set to True if you want addresses prefixed into the output file
PREFIX_ADDR = False
def trace(msg):
print(str(msg))
def parse(filename, memory=None, startaddr=0):
"""parse a whole file, storing each instruction in memory"""
addr = startaddr
f = open(filename, "rt")
if memory == None:
memory = {}
for line in f.readlines():
line = line.strip()
label, operator, operand, labelref = parser.parseLine(line)
#trace("parse:" + line + "=" + str((label, operator, operand, labelref)))
if line == "" or (label == None and operator == None):
# must be a comment
continue # go round to next line
instr = instruction.build(operator, operand)
#trace(" created:" + str(instr))
# dump any collected labels
if label != None:
symtab.define(label, addr)
if labelref != None:
addrref = symtab.use(labelref, addr)
if addrref != None:
# address of label already known, so fixup instruction operand now
#trace("info: Fixing label reference:" + labelref + " to:" + str(addrref))
instr = instruction.setOperand(instr, addrref)
# Store in memory
memory[addr] = instr
# Move to next memory location
addr += 1
f.close()
return memory
def write(memory, filename):
"""write the contents of memory to the file"""
f = open(filename, "wt")
size = len(memory)
startaddr = min(memory)
for addr in range(startaddr, startaddr+size):
#if PREFIX_ADDR:
# io.write(addr, file=f)
io.write(memory[addr], file=f)
f.close()
def main():
import sys
IN_NAME = sys.argv[1] #TODO if - or not present, use stdin
OUT_NAME = sys.argv[2] #TODO if - of not present, use stdout
SYM_NAME = OUT_NAME + ".sym"
m = parse(IN_NAME)
symtab.fixup(m)
sym_f = open(SYM_NAME, "w")
symtab.dumpLabels(sym_f)
symtab.dumpFixups(sym_f)
sym_f.close()
##loader.showmem(m)
##disassembler.disassemble(m)
write(m, OUT_NAME)
if __name__ == "__main__":
#TODO#### get encoder settings from command line args use io.configure()
main()
# END
| {
"repo_name": "whaleygeek/MyLittleComputer",
"path": "src/python/assembler.py",
"copies": "1",
"size": "2189",
"license": "mit",
"hash": 2115840577182450400,
"line_mean": 20.2524271845,
"line_max": 78,
"alpha_frac": 0.6697121974,
"autogenerated": false,
"ratio": 3.015151515151515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41848637125515153,
"avg_score": null,
"num_lines": null
} |
# assembles and executes example files and compares expected and actual output
import subprocess
import sys
# prep input to make it consumable by the pixie executable
def prep(s):
return ('\n'.join(s) + '\n').encode('ascii')
test_cases = {
'42':
[('', b'42')],
'add':
[(prep('11'), b'2'),
(prep('56'), b'11')],
'countdown':
[(b'', b'10987654321')],
'stack':
[(prep('1234560'), b'0654321')],
'brainfuck':
# 2 + 5 in brainfuck
[(prep([str(ord(c)) for c in '++>+++++[<+>-]<.'] + ['0']), b'7'),
# Hello world in brainfuck
(prep([str(ord(c)) for c in '++++++++[>++++[>++>+++>+++>+<<<<-'
']>+>+>->>+[<]<-]>>.>---.+++++++..+'
'++.>>.<-.<.+++.------.--------.>>+'
'.>++.'] + ['0']),
''.join([str(ord(c)) for c in 'Hello World!\n']).encode('ascii'))]
}
failures = []
for program in test_cases:
# compile program
subprocess.call(['python',
'..\\dust\\dust.py',
f'..\\examples\\{program}.dust',
f'{program}.pixie'])
for program_in, program_out in test_cases[program]:
p = subprocess.Popen(['..\\pixie\\vs\\Debug\\pixie.exe',
f'{program}.pixie'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
actual, _ = p.communicate(program_in)
if actual == program_out:
print('.', end='')
else:
failures.append({'program': program,
'input': program_in,
'expected output': program_out,
'actual output': actual})
print()
print('PASS' if not failures else f'{len(failures)} failures:')
for failure in failures:
print(failure)
| {
"repo_name": "vladris/pixie",
"path": "test/run.py",
"copies": "1",
"size": "1838",
"license": "mit",
"hash": 5293416439142903000,
"line_mean": 28.6451612903,
"line_max": 82,
"alpha_frac": 0.4504896627,
"autogenerated": false,
"ratio": 3.6252465483234713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4575736211023471,
"avg_score": null,
"num_lines": null
} |
''' Assemble websocket wire message fragments into complete Bokeh Server
message objects that can be processed.
'''
from __future__ import absolute_import
import six
from tornado.concurrent import return_future
from ..exceptions import ValidationError
import logging
log = logging.getLogger(__name__)
class Receiver(object):
'''
On MessageError or ValidationError, the receiver will reset its state
and attempt to consume a new message.
NOTE: the *fragment* received can be either bytes or unicode, depending
on the transport's semantics (WebSocket allows both).
[
# these are required
b'{header}', # serialized header dict
b'{metadata}', # serialized metadata dict
b'{content}, # serialized content dict
# these are optional, and come in pairs; header contains num_buffers
b'{buf_header}', # serialized buffer header dict
b'array' # raw buffer payload data
...
]
'''
def __init__(self, protocol):
self._protocol = protocol
self._current_consumer = self._HEADER
self._message = None
self._buf_header = None
@return_future
def consume(self, fragment, callback=None):
'''
'''
self._current_consumer(fragment)
callback(self._message)
def _HEADER(self, fragment):
self._assume_text(fragment)
self._message = None
self._partial = None
self._fragments = [fragment]
self._current_consumer = self._METADATA
def _METADATA(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
self._current_consumer = self._CONTENT
def _CONTENT(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
header_json, metadata_json, content_json = self._fragments[:3]
self._partial = self._protocol.assemble(header_json, metadata_json, content_json)
self._check_complete()
def _BUFFER_HEADER(self, fragment):
self._assume_text(fragment)
self._buf_header = fragment
self._current_consumer = self._BUFFER_PAYLOAD
def _BUFFER_PAYLOAD(self, fragment):
self._assume_binary(fragment)
self._partial.assemble_buffer(self._buf_header, fragment)
self._check_complete()
def _check_complete(self):
if self._partial.complete:
self._message = self._partial
self._current_consumer = self._HEADER
else:
self._current_consumer = self._BUFFER_HEADER
def _assume_text(self, fragment):
if not isinstance(fragment, six.text_type):
raise ValidationError("expected text fragment but received binary fragment for %s" % (self._current_consumer.__name__))
def _assume_binary(self, fragment):
if not isinstance(fragment, six.binary_type):
raise ValidationError("expected binary fragment but received text fragment for %s" % (self._current_consumer.__name__))
| {
"repo_name": "msarahan/bokeh",
"path": "bokeh/server/protocol/receiver.py",
"copies": "17",
"size": "3054",
"license": "bsd-3-clause",
"hash": 8954305614819286000,
"line_mean": 30.1632653061,
"line_max": 131,
"alpha_frac": 0.6365422397,
"autogenerated": false,
"ratio": 4.511078286558345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
''' Assemble WebSocket wire message fragments into complete Bokeh Server
message objects that can be processed.
'''
from __future__ import absolute_import
import six
from tornado.concurrent import return_future
from .exceptions import ValidationError
import logging
log = logging.getLogger(__name__)
class Receiver(object):
''' Receive wire message fragments and assemble complete Bokeh server
message objects.
On ``MessageError`` or ``ValidationError``, the receiver will reset its
state and attempt to consume a new message.
The *fragment* received can be either bytes or unicode, depending on
the transport's semantics (WebSocket allows both).
.. code-block:: python
[
# these are required
b'{header}', # serialized header dict
b'{metadata}', # serialized metadata dict
b'{content}, # serialized content dict
# these are optional, and come in pairs; header contains num_buffers
b'{buf_header}', # serialized buffer header dict
b'array' # raw buffer payload data
...
]
The ``header`` fragment will have the form:
.. code-block:: python
header = {
# these are required
'msgid' : <str> # a unique id for the message
'msgtype' : <str> # a message type, e.g. 'ACK', 'PATCH-DOC', etc
# these are optional
'num_buffers' : <int> # the number of additional buffers, if any
}
The ``metadata`` fragment may contain any arbitrary information. It is not
processed by Bokeh for any purpose, but may be useful for external
monitoring or instrumentation tools.
The ``content`` fragment is defined by the specific message type.
'''
def __init__(self, protocol):
''' Configure a Receiver with a specific Bokeh protocol version.
Args:
protocol (Protocol) :
A Bokeh protocol object to use to assemble colleted message
fragments.
'''
self._protocol = protocol
self._current_consumer = self._HEADER
self._message = None
self._buf_header = None
@return_future
def consume(self, fragment, callback=None):
''' Consume individual protocol message fragments.
Args:
fragment (``JSON``) :
A message fragment to assemble. When a complete message is
assembled, the receiver state will reset to begin consuming a
new message.
callback (callable, optional)
Argument required by ``return_future`` decorator
'''
self._current_consumer(fragment)
callback(self._message)
def _HEADER(self, fragment):
self._assume_text(fragment)
self._message = None
self._partial = None
self._fragments = [fragment]
self._current_consumer = self._METADATA
def _METADATA(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
self._current_consumer = self._CONTENT
def _CONTENT(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
header_json, metadata_json, content_json = self._fragments[:3]
self._partial = self._protocol.assemble(header_json, metadata_json, content_json)
self._check_complete()
def _BUFFER_HEADER(self, fragment):
self._assume_text(fragment)
self._buf_header = fragment
self._current_consumer = self._BUFFER_PAYLOAD
def _BUFFER_PAYLOAD(self, fragment):
self._assume_binary(fragment)
self._partial.assemble_buffer(self._buf_header, fragment)
self._check_complete()
def _check_complete(self):
if self._partial.complete:
self._message = self._partial
self._current_consumer = self._HEADER
else:
self._current_consumer = self._BUFFER_HEADER
def _assume_text(self, fragment):
if not isinstance(fragment, six.text_type):
raise ValidationError("expected text fragment but received binary fragment for %s" % (self._current_consumer.__name__))
def _assume_binary(self, fragment):
if not isinstance(fragment, six.binary_type):
raise ValidationError("expected binary fragment but received text fragment for %s" % (self._current_consumer.__name__))
| {
"repo_name": "DuCorey/bokeh",
"path": "bokeh/protocol/receiver.py",
"copies": "5",
"size": "4492",
"license": "bsd-3-clause",
"hash": 401028304959828540,
"line_mean": 32.0294117647,
"line_max": 131,
"alpha_frac": 0.6188780053,
"autogenerated": false,
"ratio": 4.718487394957983,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006810341981910609,
"num_lines": 136
} |
''' Assemble WebSocket wire message fragments into complete Bokeh Server
message objects that can be processed.
'''
from __future__ import absolute_import
import six
from tornado import gen
from .exceptions import ValidationError
import logging
log = logging.getLogger(__name__)
class Receiver(object):
''' Receive wire message fragments and assemble complete Bokeh server
message objects.
On ``MessageError`` or ``ValidationError``, the receiver will reset its
state and attempt to consume a new message.
The *fragment* received can be either bytes or unicode, depending on
the transport's semantics (WebSocket allows both).
.. code-block:: python
[
# these are required
b'{header}', # serialized header dict
b'{metadata}', # serialized metadata dict
b'{content}, # serialized content dict
# these are optional, and come in pairs; header contains num_buffers
b'{buf_header}', # serialized buffer header dict
b'array' # raw buffer payload data
...
]
The ``header`` fragment will have the form:
.. code-block:: python
header = {
# these are required
'msgid' : <str> # a unique id for the message
'msgtype' : <str> # a message type, e.g. 'ACK', 'PATCH-DOC', etc
# these are optional
'num_buffers' : <int> # the number of additional buffers, if any
}
The ``metadata`` fragment may contain any arbitrary information. It is not
processed by Bokeh for any purpose, but may be useful for external
monitoring or instrumentation tools.
The ``content`` fragment is defined by the specific message type.
'''
def __init__(self, protocol):
''' Configure a Receiver with a specific Bokeh protocol version.
Args:
protocol (Protocol) :
A Bokeh protocol object to use to assemble colleted message
fragments.
'''
self._protocol = protocol
self._current_consumer = self._HEADER
self._message = None
self._buf_header = None
@gen.coroutine
def consume(self, fragment):
''' Consume individual protocol message fragments.
Args:
fragment (``JSON``) :
A message fragment to assemble. When a complete message is
assembled, the receiver state will reset to begin consuming a
new message.
'''
self._current_consumer(fragment)
raise gen.Return(self._message)
def _HEADER(self, fragment):
self._assume_text(fragment)
self._message = None
self._partial = None
self._fragments = [fragment]
self._current_consumer = self._METADATA
def _METADATA(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
self._current_consumer = self._CONTENT
def _CONTENT(self, fragment):
self._assume_text(fragment)
self._fragments.append(fragment)
header_json, metadata_json, content_json = self._fragments[:3]
self._partial = self._protocol.assemble(header_json, metadata_json, content_json)
self._check_complete()
def _BUFFER_HEADER(self, fragment):
self._assume_text(fragment)
self._buf_header = fragment
self._current_consumer = self._BUFFER_PAYLOAD
def _BUFFER_PAYLOAD(self, fragment):
self._assume_binary(fragment)
self._partial.assemble_buffer(self._buf_header, fragment)
self._check_complete()
def _check_complete(self):
if self._partial.complete:
self._message = self._partial
self._current_consumer = self._HEADER
else:
self._current_consumer = self._BUFFER_HEADER
def _assume_text(self, fragment):
if not isinstance(fragment, six.text_type):
raise ValidationError("expected text fragment but received binary fragment for %s" % (self._current_consumer.__name__))
def _assume_binary(self, fragment):
if not isinstance(fragment, six.binary_type):
raise ValidationError("expected binary fragment but received text fragment for %s" % (self._current_consumer.__name__))
| {
"repo_name": "mindriot101/bokeh",
"path": "bokeh/protocol/receiver.py",
"copies": "3",
"size": "4356",
"license": "bsd-3-clause",
"hash": -4086375689410751500,
"line_mean": 31.7518796992,
"line_max": 131,
"alpha_frac": 0.6179981635,
"autogenerated": false,
"ratio": 4.683870967741935,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6801869131241935,
"avg_score": null,
"num_lines": null
} |
'''Assemble what we have and try it out
Sadly we get an error. But at n=6 so might be easy to fix. In
winter 2014-15 calculations we got past 6.
>>> doit_G(2, 3)
array([[[1, 0, 0, 0, 0, 1, 0, 1],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1]],
<BLANKLINE>
[[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]])
>>> doit_G(3, 3)
array([[[1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0]],
<BLANKLINE>
[[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 2, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0]],
<BLANKLINE>
[[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]])
>>> for n in range(2, 11):
... for m in range(1, n):
... if 2 * m <= n:
... print(n, m, stats(n - m, m))
...
2 1 [(1, 2)]
3 1 [(0, 3), (1, 3)]
4 1 [(0, 9), (1, 6)]
4 2 [(0, 14), (1, 6)]
5 1 [(0, 31), (1, 9)]
5 2 [(0, 37), (1, 11)]
6 1 [(0, 88), (1, 16)]
6 2 [(0, 112), (1, 18)]
6 3 [(0, 92), (1, 24), (2, 1)]
7 1 [(0, 247), (1, 26)]
7 2 [(0, 305), (1, 31)]
7 3 [(0, 273), (1, 40), (2, 2)]
8 1 [(0, 671), (1, 43)]
8 2 [(0, 831), (1, 53)]
8 3 [(-1, 2), (0, 735), (1, 75), (2, 4)]
8 4 [(-2, 1), (-1, 2), (0, 772), (1, 69), (2, 6)]
9 1 [(0, 1798), (1, 72)]
9 2 [(0, 2223), (1, 87)]
9 3 [(-1, 4), (0, 1998), (1, 136), (2, 6), (3, 1)]
9 4 [(-2, 2), (-1, 9), (0, 2045), (1, 132), (2, 12)]
10 1 [(0, 4778), (1, 117)]
10 2 [(0, 5902), (1, 150)]
10 3 [(-1, 12), (0, 5345), (1, 237), (2, 11), (3, 2)]
10 4 [(-2, 4), (-1, 24), (0, 5479), (1, 254), (2, 22), (3, 2)]
10 5 [(-2, 8), (-1, 29), (0, 5374), (1, 257), (2, 28)]
# Product in CDR is very nice - no negatives, many zeros, all entries
# small.
>>> for n in range(2, 11):
... for m in range(1, n):
... if 2 * m <= n:
... print(n, m, stats_CDR(n - m, m))
...
2 1 [(1, 2)]
3 1 [(0, 3), (1, 3)]
4 1 [(0, 10), (1, 5)]
4 2 [(0, 14), (1, 6)]
5 1 [(0, 32), (1, 8)]
5 2 [(0, 39), (1, 9)]
6 1 [(0, 91), (1, 13)]
6 2 [(0, 115), (1, 15)]
6 3 [(0, 101), (1, 15), (2, 1)]
7 1 [(0, 252), (1, 21)]
7 2 [(0, 312), (1, 24)]
7 3 [(0, 288), (1, 26), (2, 1)]
8 1 [(0, 680), (1, 34)]
8 2 [(0, 845), (1, 39)]
8 3 [(0, 769), (1, 45), (2, 2)]
8 4 [(0, 803), (1, 44), (2, 3)]
9 1 [(0, 1815), (1, 55)]
9 2 [(0, 2247), (1, 63)]
9 3 [(0, 2066), (1, 76), (2, 2), (3, 1)]
9 4 [(0, 2116), (1, 80), (2, 4)]
10 1 [(0, 4806), (1, 89)]
10 2 [(0, 5950), (1, 102)]
10 3 [(0, 5472), (1, 129), (2, 5), (3, 1)]
10 4 [(0, 5639), (1, 139), (2, 6), (3, 1)]
10 5 [(0, 5542), (1, 144), (2, 10)]
# Cone in CDR has some negatives, otherwise many zeros and the rest
# ones.
# Number of negatives is probably Fibonacci numbers - 1 (A000071).
# Number of ones is probably Lucas numbers (A000032).
>>> for d in range(11):
... counter = Counter(C_in_CDR[d].flatten())
... print(d, sorted(counter.items()))
0 [(1, 1)]
1 [(0, 1), (1, 1)]
2 [(0, 3), (1, 3)]
3 [(-1, 1), (0, 10), (1, 4)]
4 [(-1, 2), (0, 31), (1, 7)]
5 [(-1, 4), (0, 89), (1, 11)]
6 [(-1, 7), (0, 248), (1, 18)]
7 [(-1, 12), (0, 673), (1, 29)]
8 [(-1, 20), (0, 1803), (1, 47)]
9 [(-1, 33), (0, 4786), (1, 76)]
10 [(-1, 54), (0, 12639), (1, 123)]
Now try CDRv2. Get some negatives in the product. But not bad for
simplest thing that could possibly work. All seems good, up to error
in (6, 3). Investigate here?
>>> for n in range(2, 11):
... for m in range(1, n):
... if 2 * m <= n:
... print(n, m, stats_CDRv2(n - m, m))
2 1 [(1, 2)]
3 1 [(0, 3), (1, 3)]
4 1 [(0, 9), (1, 6)]
4 2 [(0, 14), (1, 6)]
5 1 [(0, 31), (1, 9)]
5 2 [(0, 37), (1, 11)]
6 1 [(0, 88), (1, 16)]
6 2 [(0, 112), (1, 18)]
6 3 [(-2, 1), (0, 92), (1, 23), (2, 1)]
7 1 [(0, 248), (1, 25)]
7 2 [(0, 305), (1, 31)]
7 3 [(-2, 3), (0, 272), (1, 38), (2, 2)]
8 1 [(0, 672), (1, 42)]
8 2 [(0, 834), (1, 50)]
8 3 [(-2, 7), (-1, 2), (0, 732), (1, 71), (2, 4)]
8 4 [(-4, 1), (-2, 8), (-1, 1), (0, 768), (1, 64), (2, 8)]
9 1 [(0, 1803), (1, 67)]
9 2 [(0, 2227), (1, 83)]
9 3 [(-2, 15), (-1, 6), (0, 1997), (1, 119), (2, 7), (3, 1)]
9 4 [(-4, 2), (-2, 19), (-1, 9), (0, 2031), (1, 122), (2, 17)]
10 1 [(0, 4785), (1, 110)]
10 2 [(0, 5918), (1, 134)]
10 3 [(-2, 30), (-1, 16), (0, 5336), (1, 208), (2, 15), (3, 2)]
10 4 [(-4, 4), (-2, 45), (-1, 25), (0, 5457), (1, 215), (2, 38), (3, 1)]
10 5 [(-4, 5), (-2, 46), (-1, 35), (0, 5332), (1, 236), (2, 42)]
# The C rule is still good, however.
>>> for d in range(11):
... counter = Counter(C_in_CDRv2[d].flatten())
... print(d, sorted(counter.items()))
0 [(1, 1)]
1 [(0, 1), (1, 1)]
2 [(0, 3), (1, 3)]
3 [(0, 11), (1, 4)]
4 [(0, 33), (1, 7)]
5 [(0, 93), (1, 11)]
6 [(0, 255), (1, 18)]
7 [(0, 685), (1, 29)]
8 [(0, 1823), (1, 47)]
9 [(0, 4819), (1, 76)]
10 [(0, 12693), (1, 123)]
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__metaclass__ = type
import itertools
import numpy
from collections import Counter
from .matrices import G_from_FLAG
from .matrices import IC_from_G
from .matrices import CDR_from_FLAG
from .matrices import C_in_CDR
from .matrices import IC_from_CDR
from .matrices import CDRv2_from_FLAG
from .matrices import C_in_CDRv2
from .matrices import IC_from_CDRv2
from .product import product_formula
from .product import change_product_basis
# Template for copy, paste and edit.
def doit_G(n, m):
return change_product_basis(
product_formula(n, m),
IC_from_G[n],
IC_from_G[m],
G_from_FLAG[n+m]
)
def stats(n, m):
matrix = doit_G(n, m)
counter = Counter(matrix.flatten())
counts = sorted(counter.items())
return counts
# Copy, paste and edit of previous code.
def doit_CDR(n, m):
return change_product_basis(
product_formula(n, m),
IC_from_CDR[n],
IC_from_CDR[m],
CDR_from_FLAG[n+m]
)
def stats_CDR(n, m):
matrix = doit_CDR(n, m)
counter = Counter(matrix.flatten())
counts = sorted(counter.items())
return counts
# Copy, paste and edit of previous code.
def doit_CDRv2(n, m):
return change_product_basis(
product_formula(n, m),
IC_from_CDRv2[n],
IC_from_CDRv2[m],
CDRv2_from_FLAG[n+m]
)
def stats_CDRv2(n, m):
matrix = doit_CDRv2(n, m)
counter = Counter(matrix.flatten())
counts = sorted(counter.items())
return counts
| {
"repo_name": "jfine2358/py-linhomy",
"path": "py/linhomy/work.py",
"copies": "1",
"size": "6556",
"license": "mit",
"hash": -1068034642474522000,
"line_mean": 26.2033195021,
"line_max": 72,
"alpha_frac": 0.4655277608,
"autogenerated": false,
"ratio": 2.146692861820563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.31122206226205634,
"avg_score": null,
"num_lines": null
} |
# Assembling likelihood functions for Gyrochronology, isochrones, dynamics
# and asteroseismology.
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import scipy.misc as spm
from isochrones import StarModel
from isochrones.mist import MIST_Isochrone
from simple_gibbs import gibbs
import emcee
import corner
import priors
def gc_model(ln_age, bv):
"""
Given a B-V colour and an age predict a rotation period.
Returns log(age) in Myr.
parameters:
----------
params: (array)
The array of age and (log) gyro parameters, a, b and n.
data: (array)
A an array containing colour.
"""
a, b, n = [.7725, .601, .5189]
return a*(np.exp(ln_age)*1e3)**n * (bv - .4)**b
def gc_lnlike(params, period, period_errs, bv, bv_errs):
"""
Probability of age and model parameters given rotation period and colour.
parameters:
----------
params: (array)
The array of log parameters: a, b, n, age.
period: (tuple)
The rotation period and period uncertainty in days
bv: (tuple)
The B-V colour and colour uncertainty.
"""
model_periods = gc_model(params, bv)
return sum(-.5*((period - model_periods)/period_errs)**2)
def lnprior(params):
N = int(len(params[3:])/5) # number of stars
ln_age = params[3+N:3+2*N] # parameter assignment
age_prior = sum([np.log(priors.age_prior(np.log10(1e9*np.exp(i))))
for i in ln_age])
m = (-10 < params) * (params < 10) # Broad bounds on all params.
if sum(m) == len(m):
return age_prior
else:
return -np.inf
def lnprob(params, period, period_errs, bv, bv_errs):
"""
The joint log-probability of age given gyro and iso parameters.
mod: (list)
list of pre-computed star model objects.
gyro: (bool)
If True, the gyro likelihood will be used.
iso: (bool)
If True, the iso likelihood will be used.
"""
return gc_lnlike(params, period, period_errs, bv, bv_errs) + \
lnprior(params)
if __name__ == "__main__":
# The parameters
p0 = np.array([4.56, .5])
periods = np.array([26., 8.3])
period_errs = np.array([.1, .1])
bvs = np.array([.65, .65])
bv_errs = np.array([.01, .01])
# Plot the data
# xs = np.linspace(.1, 6, 100)
# ps = gc_model(p0[:3], np.log(xs), p0[3], bvs[0])
# plt.clf()
# plt.plot(ages, periods, "k.")
# plt.plot(xs, ps)
# plt.xlabel("Age (Gyr)")
# plt.ylabel("Period (days)")
# plt.savefig("period_age_data")
# test the gyro lhf
print("gyro_lnlike = ", gc_lnlike(p0, periods, period_errs, bvs, bv_errs))
# test the lnprob.
print("lnprob = ", lnprob(p0, periods, period_errs, bvs, bv_errs))
start = time.time()
# Run emcee and plot corner
nwalkers, nsteps, ndim = 64, 10000, len(p0)
p0 = [1e-4*np.random.rand(ndim) + p0 for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=[periods, period_errs, bvs, bv_errs])
print("burning in...")
pos, _, _ = sampler.run_mcmc(p0, 2000)
sampler.reset()
print("production run...")
sampler.run_mcmc(pos, nsteps)
end = time.time()
print("Time taken = ", (end - start)/60., "mins")
flat = np.reshape(sampler.chain, (nwalkers*nsteps, ndim))
fig = corner.corner(flat)
fig.savefig("corner_test")
# Plot probability
plt.clf()
plt.plot(sampler.lnprobability.T, "k")
plt.savefig("prob_trace")
# Plot chains
for i in range(ndim):
plt.clf()
plt.plot(sampler.chain[:, :, i].T, alpha=.5)
plt.savefig("{}_trace".format(i))
| {
"repo_name": "RuthAngus/chronometer",
"path": "chronometer/age_test.py",
"copies": "1",
"size": "3725",
"license": "mit",
"hash": 9122150168786762000,
"line_mean": 27.2196969697,
"line_max": 78,
"alpha_frac": 0.5973154362,
"autogenerated": false,
"ratio": 3.0887230514096187,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41860384876096185,
"avg_score": null,
"num_lines": null
} |
# assembling
import dis
import struct
from error import *
compare_op = dict((which, num) for num, which in enumerate(dis.cmp_op))
def assemble(tuples):
return ''.join([assemble_instruction(*tup) for tup in tuples])
def assemble_instruction(opname, arg=None):
return assemble_opcode_arg(dis.opmap[opname], arg)
def assemble_opcode_arg(opcode, arg=None):
prefix = ''
oparg = ''
if opcode >= dis.HAVE_ARGUMENT:
if arg is None:
raise NoodleCompilerError("op %s given without argument" %
dis.opname[opcode])
if arg > 0xffff:
oparg = struct.pack('<L', arg)
prefix = chr(dis.EXTENDED_ARG) + oparg[2:]
oparg = oparg[:2]
else:
oparg = struct.pack('<H', arg)
return ''.join([prefix, chr(opcode), oparg])
stackchanges = {
'POP_TOP': -1,
'DUP_TOP': 1,
'SLICE+1': -1,
'SLICE+2': -1,
'SLICE+3': -2,
'STORE_SLICE+0': -2,
'STORE_SLICE+1': -3,
'STORE_SLICE+2': -3,
'STORE_SLICE+3': -4,
'DELETE_SLICE+0': -1,
'DELETE_SLICE+1': -2,
'DELETE_SLICE+2': -2,
'DELETE_SLICE+3': -3,
'STORE_SUBSCR': -3,
'DELETE_SUBSCR': -2,
'PRINT_EXPR': -1,
'PRINT_ITEM': -1,
'PRINT_ITEM_TO': -2,
'PRINT_NEWLINE_TO': -1,
'LOAD_LOCALS': 1,
'RETURN_VALUE': -1,
'YIELD_VALUE': -1,
'IMPORT_STAR': -1,
'EXEC_STMT': -3,
'BUILD_CLASS': -2,
'STORE_NAME': -1,
'STORE_ATTR': -2,
'DELETE_ATTR': -1,
'STORE_GLOBAL': -1,
'LOAD_CONST': 1,
'LOAD_NAME': 1,
'BUILD_MAP': 1,
'COMPARE_OP': -1,
'IMPORT_NAME': 0,
'IMPORT_FROM': 1,
'FOR_ITER': 1,
'LOAD_GLOBAL': 1,
'LOAD_FAST': 1,
'STORE_FAST': -1,
'LOAD_CLOSURE': 1,
'LOAD_DEREF': 1,
'STORE_DEREF': -1,
'LIST_APPEND': -2
}
def stackchange(opname, arg):
def func_stack_change(arg):
# number of positionals + twice the number of keyword args
return -((arg & 0xff) + 2 * (arg >> 8))
if opname.startswith('BINARY_') or opname.startswith('INPLACE_'):
return -1
elif opname in ('BUILD_TUPLE', 'BUILD_LIST', 'BUILD_SLICE', 'RAISE_VARARGS'):
return 1 - arg
elif opname == 'UNPACK_SEQUENCE':
return arg - 1
elif opname == 'DUP_TOPX':
return arg
elif opname == 'CALL_FUNCTION':
return func_stack_change(arg)
elif opname == 'MAKE_FUNCTION':
return -arg
elif opname == 'MAKE_CLOSURE':
# not quite correct; should also subtract number of free variables
# in code object at TOS
return -arg
elif opname in ('CALL_FUNCTION_VAR', 'CALL_FUNCTION_KW'):
return func_stack_change(arg) - 1
elif opname == 'CALL_FUNCTION_VAR_KW':
return func_stack_change(arg) - 2
else:
return stackchanges.get(opname, 0)
| {
"repo_name": "thepaul/noodle",
"path": "Noodle/assembling.py",
"copies": "1",
"size": "2854",
"license": "mit",
"hash": -6815999519838767000,
"line_mean": 26.9803921569,
"line_max": 81,
"alpha_frac": 0.5543097407,
"autogenerated": false,
"ratio": 2.9698231009365244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9013814434813752,
"avg_score": 0.0020636813645545614,
"num_lines": 102
} |
'''Assembly (byte code) utilities
'''
from __future__ import absolute_import
if __name__ == '__main__':
import jamenson.compiler.asm
exit()
import byteplay as B
from .util import collect_list
from ..runtime import ore as O
def finalize_instructions(ops):
ops = fix_set_lineno(ops)
if not (ops and ops[-1][0] == B.RETURN_VALUE):
ops.append((B.RETURN_VALUE, None))
return ops
@collect_list
def fix_set_lineno(ops):
'''ensure SetLinenos are monotonically increasing, and remove extras
'''
lastLineno = None
for op,arg in ops:
if op is not B.SetLineno:
yield op,arg
# (number > None) is always True
elif arg > lastLineno:
lastLineno = arg
yield op,arg
def first_lineno(ops):
for op,arg in ops:
if op is B.SetLineno:
return arg
return None
optimizations = []
def optimization(func):
optimizations.append(func)
return func
def optimize_instructions(ops):
'''peep hole optimizations on operations (no global analysis)
'''
for optimization in optimizations:
ops = optimization(ops)
return ops
@optimization
@collect_list
def dead_code_elimination(ops):
'''eliminate anything after unconditional jumps until label
'''
ujumps = B.JUMP_FORWARD, B.JUMP_ABSOLUTE
itr = iter(ops)
for op,arg in itr:
yield op,arg
if op in ujumps:
for op,arg in itr:
if isinstance(op, B.Label):
yield op,arg
break
@optimization
def shortcut_jumps(ops):
'''find jumps that land on other jumps and use last jump
'''
#Not yet implemented
return ops
#ore to match a bytecode operation
def Op(type, arg=None):
return O.Seq(O.Eq(type) if not isinstance(type, O.MatchBase) else type,
O.succeed if arg is None else arg)
def replacer(pattern):
if isinstance(pattern, list):
pattern = O.Seq(*pattern)
pattern = O.as_ore(pattern)
if not isinstance(pattern, (O.Seq, O.Or)):
pattern = O.Seq(pattern)
if isinstance(pattern, O.Seq):
pattern = O.Or(pattern)
assert isinstance(pattern, O.Or)
for child in pattern.children:
assert isinstance(child, O.Seq)
def inner(func):
def run(ops):
return list(O.replace_one_of(pattern.children, func, ops))
return run
return inner
# # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# reverse not jumps logic #
#-------------------------------------------------------#
# replace [NOT, JUMP_IF_FALSE] with [JUMP_IF_TRUE] and #
# [NOT, JUMP_IF_TRUE] with [JUMP_IF_FALSE] #
#-------------------------------------------------------#
reverse_condition_jumps_ore = [
Op(B.UNARY_NOT),
Op(O.Save("op", O.Or(O.Eq(B.JUMP_IF_FALSE),
O.Eq(B.JUMP_IF_TRUE))),
O.Save("label"))]
reverse_condition_jumps_map = {B.JUMP_IF_FALSE : B.JUMP_IF_TRUE,
B.JUMP_IF_TRUE : B.JUMP_IF_FALSE}
@optimization
@replacer(reverse_condition_jumps_ore)
def reverse_not_jumps_logic(match):
yield [reverse_condition_jumps_map[match.load('op')], match.load('label')]
# # # # # # # # # # # # # # # # # # # #
# reverse is logic #
#-------------------------------------#
# replace [NOT, IS] with [IS_NOT] and #
# [NOT, IS_NOT] with [IS] #
#-------------------------------------#
reverse_is_logic_ore = [
Op(B.UNARY_NOT),
Op(B.COMPARE_OP,
O.Save("compare", O.Or(O.Eq('is'), O.Eq('isnot'))))]
reverse_is_logic_map = {'isnot' : 'is',
'is' : 'isnot'}
@optimization
@replacer(reverse_is_logic_ore)
def reverse_is_logic(match):
yield B.COMPARE_OP, reverse_is_logic[match.load('compare')]
# # # # # # # # # # # # # # # # # # # # # # # # # # #
# simplify_redundant_loads #
#---------------------------------------------------#
# replace [STORE x, LOAD x] with [DUPTOP, STORE x] #
#---------------------------------------------------#
simplify_redundant_loads_ore = \
O.Or(*(O.Seq(O.Save('op', Op(store, O.Save('arg'))),
Op(load, O.Eq(O.Load('arg'))))
for store,load in [[B.STORE_FAST, B.LOAD_FAST],
[B.STORE_DEREF, B.LOAD_DEREF]]))
@optimization
@replacer(simplify_redundant_loads_ore)
def simplify_redundant_loads(match):
'''replace [STORE x, LOAD x] with [DUPTOP, STORE x]
'''
op,arg = match.loads('op', 'arg')
yield B.DUP_TOP, None
yield op
| {
"repo_name": "matthagy/Jamenson",
"path": "jamenson/compiler/asm.py",
"copies": "1",
"size": "4647",
"license": "apache-2.0",
"hash": -5285109826822895000,
"line_mean": 27.8633540373,
"line_max": 78,
"alpha_frac": 0.5295889821,
"autogenerated": false,
"ratio": 3.3431654676258993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.932897521093014,
"avg_score": 0.008755847759151739,
"num_lines": 161
} |
"""Assembly function."""
from typing import Optional, Tuple
from hdltools.binutils.instruction import AsmInstruction
from hdltools.binutils import AsmObject
class AsmFunction(AsmObject):
"""Asm function."""
def __init__(
self,
name: str,
address: int,
instructions: Optional[Tuple[AsmInstruction]] = None,
**kwargs
):
"""Initialize."""
super().__init__(**kwargs)
self._name = name
self._addr = address
if instructions is not None:
self._instructions = instructions
else:
self._instructions = []
@property
def name(self):
"""Get name."""
return self._name
@property
def address(self):
"""Get address."""
return self._addr
@property
def instructions(self):
"""Get instructions."""
return self._instructions
def __repr__(self):
"""Get representation."""
return "{} @{}".format(self.name, hex(self.address))
| {
"repo_name": "brunosmmm/hdltools",
"path": "hdltools/binutils/function.py",
"copies": "1",
"size": "1028",
"license": "mit",
"hash": 2910594776285354500,
"line_mean": 22.3636363636,
"line_max": 61,
"alpha_frac": 0.563229572,
"autogenerated": false,
"ratio": 4.589285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 44
} |
"""Assembly instruction."""
from typing import Optional, Any
from enum import Enum, auto
from hdltools.binutils import AsmObject
class InstructionType(Enum):
"""Abstract instruction type."""
UNKNOWN = auto()
class MemoryInstructionType(Enum):
"""Abstract memory instruction types."""
LOAD = auto()
STORE = auto()
class JumpInstructionType(Enum):
"""Abstract jump instruction types."""
RELATIVE = auto()
ABSOLUTE = auto()
SUBROUTINE = auto()
RETURN = auto()
class InstructionClass(Enum):
"""Abstract instruction classes."""
MEMORY = auto()
CONTROL = auto()
ARITHMETIC = auto()
LOGIC = auto()
JUMP = auto()
REGISTER = auto()
UNKNOWN = auto()
INSTRUCTION_TYPES = {
MEMORY: MemoryInstructionType,
JUMP: JumpInstructionType,
UNKNOWN: InstructionType,
}
class MetaAsmInstruction(type):
"""Metaclass for assembly instructions"""
@property
def instruction_class(cls):
"""Get instruction class."""
return cls._CLASS
@property
def instruction_type(cls):
"""Get instruction type."""
return cls._TYPE
class AsmInstruction(AsmObject, metaclass=MetaAsmInstruction):
"""Asm instruction."""
_CLASS = InstructionClass.UNKNOWN
_TYPE = InstructionType.UNKNOWN
def __init__(
self,
address: int,
opcode: int,
asm: Optional[str] = None,
parent: Optional[Any] = None,
**kwargs
):
"""Initialize."""
super().__init__(**kwargs)
self._addr = address
self._opcode = opcode
self._asm = asm
self._parent = parent
@property
def address(self):
"""Get Address."""
return self._addr
@property
def opcode(self):
"""Get opcode."""
return self._opcode
@property
def assembly(self):
"""Get assembly text."""
return self._asm
@property
def parent(self):
"""Get parent."""
return self._parent
@property
def instruction_class(self):
"""Get instruction class."""
return type(self)._CLASS
@property
def instruction_type(self):
"""Get instruction type."""
return type(self)._TYPE
def __repr__(self):
"""Get representation."""
return "{}: {} ({})".format(
hex(self.address), hex(self.opcode), self.assembly
)
| {
"repo_name": "brunosmmm/hdltools",
"path": "hdltools/binutils/instruction.py",
"copies": "1",
"size": "2449",
"license": "mit",
"hash": -5175874070656232000,
"line_mean": 19.9316239316,
"line_max": 62,
"alpha_frac": 0.5802368314,
"autogenerated": false,
"ratio": 4.365418894830659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5445655726230659,
"avg_score": null,
"num_lines": null
} |
"""assert functions from numpy and pandas testing
"""
import re
from distutils.version import StrictVersion, LooseVersion
import numpy as np
import numpy.testing as npt
import pandas
import pandas.util.testing as pdt
# for pandas version check
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def is_pandas_min_version(min_version):
'''check whether pandas is at least min_version
'''
from pandas import __version__ as pversion
return LooseVersion(pversion) >= min_version
# local copies, all unchanged
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_equal, assert_array_less,
assert_array_max_ulp, assert_raises, assert_string_equal, assert_warns)
# adjusted functions
if not is_pandas_min_version('0.14.1'):
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
npt.assert_equal(actual, desired, err_msg='', verbose=True)
else:
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
| {
"repo_name": "kiyoto/statsmodels",
"path": "statsmodels/tools/testing.py",
"copies": "6",
"size": "1504",
"license": "bsd-3-clause",
"hash": -5062789264598751000,
"line_mean": 31.6956521739,
"line_max": 76,
"alpha_frac": 0.6934840426,
"autogenerated": false,
"ratio": 3.7044334975369457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7397917540136946,
"avg_score": null,
"num_lines": null
} |
"""assert functions from numpy and pandas testing
"""
import re
from distutils.version import StrictVersion
import numpy as np
import numpy.testing as npt
import pandas
import pandas.util.testing as pdt
# for pandas version check
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def is_pandas_min_version(min_version):
'''check whether pandas is at least min_version
'''
from pandas.version import short_version as pversion
return StrictVersion(strip_rc(pversion)) >= min_version
# local copies, all unchanged
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_array_equal, assert_array_less,
assert_array_max_ulp, assert_raises, assert_string_equal, assert_warns)
# adjusted functions
def assert_equal(actual, desired, err_msg='', verbose=True, **kwds):
if not is_pandas_min_version('0.14.1'):
npt.assert_equal(actual, desired, err_msg='', verbose=True)
else:
if isinstance(desired, pandas.Index):
pdt.assert_index_equal(actual, desired)
elif isinstance(desired, pandas.Series):
pdt.assert_series_equal(actual, desired, **kwds)
elif isinstance(desired, pandas.DataFrame):
pdt.assert_frame_equal(actual, desired, **kwds)
else:
npt.assert_equal(actual, desired, err_msg='', verbose=True)
| {
"repo_name": "rgommers/statsmodels",
"path": "statsmodels/tools/testing.py",
"copies": "2",
"size": "1442",
"license": "bsd-3-clause",
"hash": -3064190874507549700,
"line_mean": 31.0444444444,
"line_max": 76,
"alpha_frac": 0.6955617198,
"autogenerated": false,
"ratio": 3.6974358974358976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0028639046334741685,
"num_lines": 45
} |
assert __import__("pkg_resources").get_distribution(
"sqlalchemy").version.split('.') >= ['0', '6'], \
"requires sqlalchemy version 0.6 or greater"
from builtins import next
import warnings
from datetime import datetime
from werkzeug.urls import iri_to_uri
from sqlalchemy import schema, Table, exc, util
from sqlalchemy.schema import Constraint
from sqlalchemy.sql import (text, bindparam, compiler, operators)
from sqlalchemy.sql.expression import (
BindParameter, TextClause, cast, ColumnElement)
from sqlalchemy.sql.schema import Sequence
from sqlalchemy.sql.compiler import BIND_PARAMS, BIND_PARAMS_ESC
from sqlalchemy.sql.ddl import _CreateDropBase
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy.engine import default
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.types import (
CHAR, VARCHAR, TIME, NCHAR, NVARCHAR, DATETIME, FLOAT, String, NUMERIC,
INTEGER, SMALLINT, VARBINARY, DECIMAL, TIMESTAMP, UnicodeText, REAL,
Unicode, Text, Float, Binary, UserDefinedType, TypeDecorator)
from sqlalchemy.orm import column_property
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.sql.elements import Grouping, ClauseList
import past.builtins
import pyodbc
class VirtuosoExecutionContext(default.DefaultExecutionContext):
def get_lastrowid(self):
# Experimental and unreliable
# if self.cursor.lastserial > 0:
# return self.cursor.lastserial
self.cursor.execute("SELECT identity_value() AS lastrowid")
lastrowid = int(self.cursor.fetchone()[0])
#print "idvalue: %d, lser: %d" % (lastrowid, self.cursor.lastserial)
return lastrowid
def fire_sequence(self, seq, type_):
return self._execute_scalar((
"select sequence_next('%s')" %
self.dialect.identifier_preparer.format_sequence(seq)), type_)
class VirtuosoSequence(Sequence):
def upcoming_value(self, connection):
# This gives the upcoming value without advancing the sequence
preparer = connection.bind.dialect.identifier_preparer
(val,) = next(iter(connection.execute(
"SELECT sequence_set('%s', 0, 1)" %
(preparer.format_sequence(self),))))
return int(val)
def set_value(self, value, connection):
preparer = connection.bind.dialect.identifier_preparer
connection.execute(
"SELECT sequence_set('%s', %d, 0)" %
(preparer.format_sequence(self), value))
RESERVED_WORDS = {
'__cost', '__elastic', '__tag', '__soap_doc', '__soap_docw',
'__soap_header', '__soap_http', '__soap_name', '__soap_type',
'__soap_xml_type', '__soap_fault', '__soap_dime_enc', '__soap_enc_mime',
'__soap_options', 'ada', 'add', 'admin', 'after', 'aggregate', 'all',
'alter', 'and', 'any', 'are', 'array', 'as', 'asc', 'assembly', 'attach',
'attribute', 'authorization', 'autoregister', 'backup', 'before', 'begin',
'best', 'between', 'bigint', 'binary', 'bitmap', 'breakup', 'by', 'c',
'call', 'called', 'cascade', 'case', 'cast', 'char', 'character', 'check',
'checked', 'checkpoint', 'close', 'cluster', 'clustered', 'clr',
'coalesce', 'cobol', 'collate', 'column', 'commit', 'committed',
'compress', 'constraint', 'constructor', 'contains', 'continue', 'convert',
'corresponding', 'create', 'cross', 'cube', 'current', 'current_date',
'current_time', 'current_timestamp', 'cursor', 'data', 'date', 'datetime',
'decimal', 'declare', 'default', 'delete', 'desc', 'deterministic',
'disable', 'disconnect', 'distinct', 'do', 'double', 'drop', 'dtd',
'dynamic', 'else', 'elseif', 'enable', 'encoding', 'end', 'escape',
'except', 'exclusive', 'execute', 'exists', 'external', 'extract', 'exit',
'fetch', 'final', 'float', 'for', 'foreach', 'foreign', 'fortran',
'for_vectored', 'for_rows', 'found', 'from', 'full', 'function', 'general',
'generated', 'go', 'goto', 'grant', 'group', 'grouping', 'handler',
'having', 'hash', 'identity', 'identified', 'if', 'in', 'incremental',
'increment', 'index', 'index_no_fill', 'index_only', 'indicator', 'inner',
'inout', 'input', 'insert', 'instance', 'instead', 'int', 'integer',
'intersect', 'internal', 'interval', 'into', 'is', 'isolation', 'iri_id',
'iri_id_8', 'java', 'join', 'key', 'keyset', 'language', 'left', 'level',
'library', 'like', 'locator', 'log', 'long', 'loop', 'method', 'modify',
'modifies', 'module', 'mumps', 'name', 'natural', 'nchar', 'new',
'nonincremental', 'not', 'no', 'novalidate', 'null', 'nullif', 'numeric',
'nvarchar', 'object_id', 'of', 'off', 'old', 'on', 'open', 'option', 'or',
'order', 'out', 'outer', 'overriding', 'partition', 'pascal', 'password',
'percent', 'permission_set', 'persistent', 'pli', 'position', 'precision',
'prefetch', 'primary', 'privileges', 'procedure', 'public', 'purge',
'quietcast', 'rdf_box', 'read', 'reads', 'real', 'ref', 'references',
'referencing', 'remote', 'rename', 'repeatable', 'replacing',
'replication', 'resignal', 'restrict', 'result', 'return', 'returns',
'revoke', 'rexecute', 'right', 'rollback', 'rollup', 'role', 'safe',
'same_as', 'uncommitted', 'unrestricted', 'schema', 'select', 'self',
'serializable', 'set', 'sets', 'shutdown', 'smallint', 'snapshot', 'soft',
'some', 'source', 'sparql', 'specific', 'sql', 'sqlcode', 'sqlexception',
'sqlstate', 'sqlwarning', 'static', 'start', 'style', 'sync', 'system',
't_cycles_only', 't_direction', 't_distinct', 't_end_flag', 't_exists',
't_final_as', 't_in', 't_max', 't_min', 't_no_cycles', 't_no_order',
't_out', 't_shortest_only', 'table', 'temporary', 'text', 'then', 'ties',
'time', 'timestamp', 'to', 'top', 'type', 'transaction', 'transitive',
'trigger', 'under', 'union', 'unique', 'update', 'use', 'user', 'using',
'validate', 'value', 'values', 'varbinary', 'varchar', 'variable',
'vector', 'vectored', 'view', 'when', 'whenever', 'where', 'while', 'with',
'without', 'work', 'xml', 'xpath'}
class VirtuosoIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def quote_schema(self, schema, force=None):
if '.' in schema:
cat, schema = schema.split('.', 1)
return self.quote(cat, force) + '.' + self.quote(schema, force)
else:
# Virtuoso needs an extra dot to indicate absent username
return self.quote(schema, force) + '.'
def format_sequence(self, sequence, use_schema=True):
res = super(VirtuosoIdentifierPreparer, self).format_sequence(
sequence, use_schema=use_schema)
# unquote
return res.strip('"')
class VirtuosoSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = {
'day': 'dayofmonth(%s)',
'dow': 'dayofweek(%s)',
'doy': 'dayofyear(%s)',
'epoch': 'msec_time()',
'hour': 'hour(%s)',
'microseconds': '0',
'milliseconds': 'atoi(substring(datestring(%s), 20, 6))',
'minute': 'minute(%s)',
'month': 'month(%s)',
'quarter': 'quarter(%s)',
'second': 'second(%s)',
'timezone_hour': 'floor(timezone(%s)/60)',
'timezone_minute': 'mod(timezone(%s),60)',
'week': 'week(%s)',
'year': 'year(%s)'
}
def get_select_precolumns(self, select, **kw):
s = select._distinct and "DISTINCT " or ""
# TODO: check if Virtuoso supports
# bind params for FIRST / TOP
if select._limit or select._offset:
if select._offset:
limit = select._limit or '100000'
s += "TOP %s, %s " % (limit, select._offset + 1)
else:
s += "TOP %s " % (select._limit,)
return s
def limit_clause(self, select):
# Limit in virtuoso is after the select keyword
return ""
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def visit_sequence(self, seq):
return "sequence_next('%s')" % self.preparer.format_sequence(seq)
def visit_extract(self, extract, **kw):
func = self.extract_map.get(extract.field)
if not func:
raise exc.CompileError(
"%s is not a valid extract argument." % extract.field)
return func % (self.process(extract.expr, **kw), )
def visit_true(self, expr, **kw):
return '1'
def visit_false(self, expr, **kw):
return '0'
def visit_in_op_binary(self, binary, operator, **kw):
""" This is beyond absurd. Virtuoso gives weird results on other columns
when doing a single-value IN clause. Avoid those. """
if (isinstance(binary.right, Grouping)
and isinstance(binary.right.element, ClauseList)
and len(binary.right.element.clauses) == 1):
el = binary.right.element.clauses[0]
return "%s = %s" % (
self.process(binary.left, **kw),
self.process(el, **kw))
return self._generate_generic_binary(binary, " IN ", **kw)
def visit_binary(self, binary, **kwargs):
if binary.operator == operators.ne:
if isinstance(binary.left, BindParameter)\
and isinstance(binary.right, BindParameter):
kwargs['literal_binds'] = True
return self._generate_generic_binary(
binary, ' <> ', **kwargs)
return super(VirtuosoSQLCompiler, self).visit_binary(binary, **kwargs)
def render_literal_value(self, value, type_):
if isinstance(value, IRI_ID_Literal):
return value
return super(VirtuosoSQLCompiler, self)\
.render_literal_value(value, type_)
def visit_sparqlclause(self, sparqlclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in sparqlclause._bindparams:
self.process(sparqlclause._bindparams[name], **kw)
return '??'
# un-escape any \:params
text = BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam,
self.post_process_text(sparqlclause.text))
)
if sparqlclause.quad_storage:
text = 'define input:storage %s %s' % (
sparqlclause.quad_storage, text)
return 'SPARQL ' + text
class SparqlClause(TextClause):
__visit_name__ = 'sparqlclause'
def __init__(self, text, bind=None, quad_storage=None):
super(SparqlClause, self).__init__(text, bind)
self.quad_storage = quad_storage
def columns(self, *cols, **types):
textasfrom = super(SparqlClause, self).columns(*cols, **types)
return textasfrom.alias()
class LONGVARCHAR(Text):
__visit_name__ = 'LONG VARCHAR'
class LONGNVARCHAR(UnicodeText):
__visit_name__ = 'LONG NVARCHAR'
class DOUBLEPRECISION(Float):
__visit_name__ = 'DOUBLE PRECISION'
class LONGVARBINARY(Binary):
__visit_name__ = 'LONG VARBINARY'
class CoerceUnicode(TypeDecorator):
impl = Unicode
# Maybe TypeDecorator should delegate? Another story
python_type = past.builtins.unicode
def process_bind_param(self, value, dialect):
if util.py2k and isinstance(value, util.binary_type):
value = value.decode(dialect.encoding)
return value
def bind_expression(self, bindvalue):
return _cast_nvarchar(bindvalue)
class _cast_nvarchar(ColumnElement):
def __init__(self, bindvalue):
self.bindvalue = bindvalue
@compiles(_cast_nvarchar)
def _compile(element, compiler, **kw):
return compiler.process(cast(element.bindvalue, Unicode), **kw)
class dt_set_tz(GenericFunction):
"Convert IRI IDs to int values"
type = DATETIME
name = "dt_set_tz"
def __init__(self, adatetime, offset, **kw):
if not (isinstance(adatetime, (datetime, DATETIME))
or isinstance(adatetime.__dict__.get('type'), DATETIME)):
warnings.warn(
"dt_set_tz() accepts a DATETIME object as first input.")
if not (isinstance(offset, (int, INTEGER))
or isinstance(offset.__dict__.get('type'), INTEGER)):
warnings.warn(
"dt_set_tz() accepts a INTEGER object as second input.")
super(dt_set_tz, self).__init__(adatetime, offset, **kw)
class Timestamp(TypeDecorator):
impl = TIMESTAMP
# Maybe TypeDecorator should delegate? Another story
python_type = datetime
def column_expression(self, colexpr):
return dt_set_tz(cast(colexpr, DATETIME), 0)
# def bind_expression(self, bindvalue):
# return _cast_timestamp(bindvalue)
# class _cast_timestamp(ColumnElement):
# def __init__(self, bindvalue):
# self.bindvalue = bindvalue
# @compiles(_cast_timestamp)
# def _compile(element, compiler, **kw):
# return compiler.process(cast(element.bindvalue, DATETIME), **kw)
TEXT_TYPES = (CHAR, VARCHAR, NCHAR, NVARCHAR, String, UnicodeText,
Unicode, Text, LONGVARCHAR, LONGNVARCHAR, CoerceUnicode)
class IRI_ID_Literal(str):
"An internal virtuoso IRI ID, of the form #innnnn"
def __str__(self):
return 'IRI_ID_Literal("%s")' % (self, )
def __repr__(self):
return str(self)
class IRI_ID(UserDefinedType):
"A column type for IRI ID"
__visit_name__ = 'IRI_ID'
def __init__(self):
super(IRI_ID, self).__init__()
def get_col_spec(self):
return "IRI_ID"
def bind_processor(self, dialect):
def process(value):
if value:
return IRI_ID_Literal(value)
return process
def result_processor(self, dialect, coltype):
def process(value):
if value:
return IRI_ID_Literal(value)
return process
class iri_id_num(GenericFunction):
"Convert IRI IDs to int values"
type = INTEGER
name = "iri_id_num"
def __init__(self, iri_id, **kw):
if not isinstance(iri_id, IRI_ID_Literal)\
and not isinstance(iri_id.__dict__.get('type'), IRI_ID):
warnings.warn("iri_id_num() accepts an IRI_ID object as input.")
super(iri_id_num, self).__init__(iri_id, **kw)
class iri_id_from_num(GenericFunction):
"Convert numeric IRI IDs to IRI ID literal type"
type = IRI_ID
name = "iri_id_from_num"
def __init__(self, num, **kw):
if not isinstance(num, int):
warnings.warn("iri_id_num() accepts an Integer as input.")
super(iri_id_from_num, self).__init__(num, **kw)
class id_to_iri(GenericFunction):
"Get the IRI from a given IRI ID"
type = String
name = "id_to_iri"
def __init__(self, iri_id, **kw):
# TODO: Handle deferred.
if not isinstance(iri_id, IRI_ID_Literal)\
and not isinstance(iri_id.__dict__.get('type'), IRI_ID):
warnings.warn("iri_id_num() accepts an IRI_ID as input.")
super(id_to_iri, self).__init__(iri_id, **kw)
class iri_to_id(GenericFunction):
"""Get an IRI ID from an IRI.
If the IRI is new to virtuoso, the IRI ID may be created on-the-fly,
according to the second argument."""
type = IRI_ID
name = "iri_to_id"
def __init__(self, iri, create=True, **kw):
if isinstance(iri, past.builtins.unicode):
iri = iri_to_uri(iri)
if not isinstance(iri, str):
warnings.warn("iri_id_num() accepts an IRI (VARCHAR) as input.")
super(iri_to_id, self).__init__(iri, create, **kw)
def iri_property(iri_id_colname, iri_propname):
"""Class decorator to add access to an IRI_ID column as an IRI.
The name of the IRI property will be iri_propname."""
def iri_class_decorator(klass):
iri_hpropname = '_'+iri_propname
setattr(klass, iri_hpropname,
column_property(id_to_iri(getattr(klass, iri_id_colname))))
def iri_accessor(self):
return getattr(self, iri_hpropname)
def iri_expression(klass):
return id_to_iri(getattr(klass, iri_id_colname))
def iri_setter(self, val):
setattr(self, iri_hpropname, val)
setattr(self, iri_id_colname, iri_to_id(val))
def iri_deleter(self):
setattr(self, iri_id_colname, None)
col = getattr(klass, iri_id_colname)
if not col.property.columns[0].nullable:
iri_deleter = None
prop = hybrid_property(
iri_accessor, iri_setter, iri_deleter, iri_expression)
setattr(klass, iri_propname, prop)
return klass
return iri_class_decorator
class XML(Text):
__visit_name__ = 'XML'
class LONGXML(Text):
__visit_name__ = 'LONG_XML'
class VirtuosoTypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_):
return self.visit_SMALLINT(type_)
def visit_LONGVARCHAR(self, type_):
return 'LONG VARCHAR'
def visit_LONGNVARCHAR(self, type_):
return 'LONG NVARCHAR'
def visit_DOUBLEPRECISION(self, type_):
return 'DOUBLE PRECISION'
def visit_BIGINT(self, type_):
return "INTEGER"
def visit_DATE(self, type_):
return "CHAR(10)"
def visit_CLOB(self, type_):
return self.visit_LONGVARCHAR(type_)
def visit_NCLOB(self, type_):
return self.visit_LONGNVARCHAR(type_)
def visit_TEXT(self, type_):
return self._render_string_type(type_, "LONG VARCHAR")
def visit_BLOB(self, type_):
return "LONG VARBINARY"
def visit_BINARY(self, type_):
return self.visit_VARBINARY(type_)
def visit_VARBINARY(self, type_):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_LONGVARBINARY(self, type_):
return 'LONG VARBINARY'
def visit_large_binary(self, type_):
return self.visit_LONGVARBINARY(type_)
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_text(self, type_):
return self.visit_TEXT(type_)
def visit_unicode_text(self, type_):
return self.visit_LONGNVARCHAR(type_)
def visit_IRI_ID(self, type_):
return "IRI_ID"
def visit_XML(self, type_):
return "XML"
def visit_LONG_XML(self, type_):
return "LONG XML"
# def visit_user_defined(self, type_):
# TODO!
# return type_.get_col_spec()
class AddForeignKey(_CreateDropBase):
"""Represent an ALTER TABLE ADD CONSTRAINT statement."""
__visit_name__ = "add_foreign_key"
def __init__(self, element, *args, **kw):
super(AddForeignKey, self).__init__(element, *args, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class DropForeignKey(_CreateDropBase):
"""Represent an ALTER TABLE DROP CONSTRAINT statement."""
__visit_name__ = "drop_foreign_key"
def __init__(self, element, cascade=False, **kw):
self.cascade = cascade
super(DropForeignKey, self).__init__(element, **kw)
element._create_rule = util.portable_instancemethod(
self._create_rule_disable)
class VirtuosoDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (self.preparer.format_column(column) + " "
+ self.dialect.type_compiler.process(column.type))
if column.nullable is not None:
if not column.nullable or column.primary_key or \
isinstance(column.default, schema.Sequence):
colspec += " NOT NULL"
else:
colspec += " NULL"
if column.table is None:
raise exc.CompileError(
"virtuoso requires Table-bound columns "
"in order to generate DDL")
# install an IDENTITY Sequence if we either a sequence
# or an implicit IDENTITY column
if isinstance(column.default, schema.Sequence):
if column.default.start == 0:
start = 0
else:
start = column.default.start or 1
colspec += " IDENTITY (START WITH %s)" % (start,)
elif column is column.table._autoincrement_column:
colspec += " IDENTITY"
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_under_constraint(self, constraint):
table = constraint.table
parent_table = constraint.parent_table
return "UNDER %s.%s " % (
self.preparer.quote_schema(
parent_table.schema, table.quote_schema),
self.preparer.quote(parent_table.name, table.quote))
def visit_drop_foreign_key(self, drop):
# Make sure the constraint has no name, ondelete, deferrable, onupdate
constraint = drop.element.constraint
names = ("name", "ondelete", "deferrable", "onupdate")
temp = {name: getattr(constraint, name, None) for name in names}
for name in names:
setattr(constraint, name, None)
result = "ALTER TABLE %s DROP %s" % (
self.preparer.format_table(drop.element.parent.table),
self.visit_foreign_key_constraint(constraint),
)
for name in names:
setattr(constraint, name, temp[name])
return result
def visit_add_foreign_key(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.parent.table),
self.visit_foreign_key_constraint(create.element.constraint),
)
def visit_create_text_index(self, create, include_schema=False,
include_table_schema=True):
text_index = create.element
column = text_index.column
params = dict(table=column.table.name, column=column.name)
for x in ('xml','clusters','key','with_insert','transform','language','encoding'):
params[x] =''
if isinstance(column.type, (XML, LONGXML)):
params['xml'] = 'XML'
else:
assert isinstance(column.type, TEXT_TYPES)
if text_index.clusters:
params['clusters'] = 'CLUSTERED WITH (' + ','.join((
self.preparer.quote(c.name) for c in text_index.clusters)) + ')'
if text_index.key:
params['key'] = 'WITH KEY ' + self.preparer.quote(text_index.key.name)
if not text_index.do_insert:
params['with_insert'] = 'NO INSERT'
if text_index.transform:
params['transform'] = 'USING ' + self.preparer.quote(text_index.transform)
if text_index.language:
params['language'] = "LANGUAGE '" + text_index.language + "'"
if text_index.encoding:
params['encoding'] = 'ENCODING ' + text_index.encoding
return ('CREATE TEXT {xml} INDEX ON "{table}" ( "{column}" ) {key} '
'{with_insert} {clusters} {transform} {language} {encoding}'
).format(**params)
def visit_drop_text_index(self, drop):
text_index = drop.element
name = "{table}_{column}_WORDS".format(
table=text_index.column.table.name,
column=text_index.column.name)
return '\nDROP TABLE %s.%s' % (
self.preparer.quote_schema(text_index.table.schema),
self.preparer.quote(name))
# TODO: Alter is weird. Use MODIFY with full new thing. Eg:
# ALTER TABLE assembl..imported_post MODIFY body_mime_type NVARCHAR NOT NULL
ischema_names = {
'bigint': INTEGER,
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': SMALLINT,
'unsigned bigint': INTEGER,
'unsigned int': INTEGER,
'unsigned smallint': SMALLINT,
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': DOUBLEPRECISION,
'double precision': DOUBLEPRECISION,
'real': REAL,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': CHAR,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': LONGVARCHAR,
'unitext': LONGNVARCHAR,
'binary': VARBINARY,
'varbinary': VARBINARY,
'long varbinary': LONGVARBINARY,
'long varchar': LONGVARCHAR,
'timestamp': TIMESTAMP,
}
# DO NOT USE! Deprecated in Columnar view.
class UnderConstraint(Constraint):
__visit_name__ = 'under_constraint'
def __init__(self, parent_table, **kw):
super(UnderConstraint, self).__init__(**kw)
if not isinstance(parent_table, Table)\
and parent_table.__dict__.get('__table__') is not None:
parent_table = parent_table.__table__
assert isinstance(parent_table, Table)
self.parent_table = parent_table
class VirtuosoDialect(PyODBCConnector, default.DefaultDialect):
name = 'virtuoso'
execution_ctx_cls = VirtuosoExecutionContext
preparer = VirtuosoIdentifierPreparer
statement_compiler = VirtuosoSQLCompiler
type_compiler = VirtuosoTypeCompiler
ischema_names = ischema_names
supports_unicode_statements = True
supports_unicode_binds = True
returns_unicode_strings = True
supports_native_boolean = False
ddl_compiler = VirtuosoDDLCompiler
supports_right_nested_joins = False
supports_multivalues_insert = False
supports_sequences = True
postfetch_lastrowid = True
def connect(self, *args, **kwargs):
connection = super(VirtuosoDialect, self).connect(*args, **kwargs)
if util.py2k:
connection.setdecoding(pyodbc.SQL_CHAR, 'utf-8', pyodbc.SQL_CHAR)
connection.setdecoding(pyodbc.SQL_WCHAR, 'utf-32LE', pyodbc.SQL_WCHAR, unicode)
connection.setdecoding(pyodbc.SQL_WMETADATA, 'utf-32LE', pyodbc.SQL_WCHAR, unicode)
connection.setencoding(unicode, 'utf-32LE', pyodbc.SQL_WCHAR)
connection.setencoding(str, 'utf-8', pyodbc.SQL_CHAR)
else:
connection.setdecoding(pyodbc.SQL_CHAR, 'utf-8', pyodbc.SQL_CHAR)
connection.setdecoding(pyodbc.SQL_WCHAR, 'utf-32LE', pyodbc.SQL_WCHAR)
connection.setdecoding(pyodbc.SQL_WMETADATA, 'utf-32LE', pyodbc.SQL_WCHAR)
connection.setencoding('utf-32LE', pyodbc.SQL_WCHAR)
connection.setencoding('utf-8', pyodbc.SQL_CHAR)
return connection
def _get_default_schema_name(self, connection):
res = connection.execute(
'select U_DEF_QUAL, get_user() from DB.DBA.SYS_USERS where U_NAME=get_user()')
catalog, schema = res.fetchone()
if catalog:
return '.'.join((catalog, schema))
def has_table(self, connection, tablename, schema=None):
if schema is None:
schema = self.default_schema_name
if '.' not in schema:
schema += '.'
catalog, schema = schema.split('.', 1)
result = connection.execute(
text("SELECT TABLE_NAME FROM DB..TABLES WHERE "
"TABLE_CATALOG=:schemaname AND "
"TABLE_NAME=:tablename",
bindparams=[
bindparam("schemaname", catalog),
bindparam("tablename", tablename)
])
)
return result.scalar() is not None
def has_sequence(self, connection, sequence_name, schema=None):
# sequences are auto-created in virtuoso
return True
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
if schema is None:
result = connection.execute(
text("SELECT TABLE_NAME FROM DB..TABLES"))
return [r[0] for r in result]
if '.' not in schema:
schema += '.'
catalog, schema = schema.split('.', 1)
if catalog:
if schema:
result = connection.execute(
text("SELECT TABLE_NAME FROM DB..TABLES WHERE "
"TABLE_CATALOG=:catalog AND TABLE_SCHEMA = :schema"),
catalog=catalog, schema=schema)
else:
result = connection.execute(
text("SELECT TABLE_NAME FROM DB..TABLES WHERE"
"TABLE_CATALOG=:catalog"), catalog=catalog)
else:
result = connection.execute(
text("SELECT TABLE_NAME FROM DB..TABLES WHERE"
"TABLE_SCHEMA=:schema"), schema=schema)
return [r[0] for r in result]
| {
"repo_name": "maparent/virtuoso-python",
"path": "virtuoso/alchemy.py",
"copies": "1",
"size": "29042",
"license": "bsd-3-clause",
"hash": -5632344684373782000,
"line_mean": 35.6691919192,
"line_max": 95,
"alpha_frac": 0.6018180566,
"autogenerated": false,
"ratio": 3.706227667177131,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4808045723777131,
"avg_score": null,
"num_lines": null
} |
"""Assertion based test cases for monotable.table.MonoTable for pytest."""
from collections import namedtuple
from os import path
import re
import pytest
import monotable
import monotable.plugin
import monotable.table
def read_file(*path_components):
"""Read a text file from the source tree into a string.
The path is relative to the directory containing this file.
"""
here = path.abspath(path.dirname(__file__))
with open(path.join(here, *path_components)) as f:
return f.read()
class TestConsistentVersionStrings:
"""Verify same release version string in all places.
Obtain the version string from various places in the source tree
and check that they are all the same.
This test does not prove the version is correct.
Whitespace around the equals sign in the version statement IS significant.
"""
auth_version = monotable.__version__ # authoritative
def test_setup_py_version(self):
# -------------------------------------------------------
# setup.py
# example: version='0.1.0',
setup_text = read_file('..', 'setup.py')
match = re.search(r" *version=['\"]([^'\"]*)['\"]", setup_text, re.M)
assert match.group(1) == self.auth_version
def test_conf_py_version_and_release(self):
# -------------------------------------------------------
# conf.py
# example:
# # The short X.Y version.
# version = u'0.1.0'
conf_text = read_file('..', 'doc', 'conf.py')
match = re.search(r"^version = u['\"]([^'\"]*)['\"]", conf_text, re.M)
assert match.group(1) == self.auth_version
# conf.py
# example:
# # The full version, including alpha/beta/rc tags.
# release = u'0.1.0'
match = re.search(r"^release = u['\"]([^'\"]*)['\"]", conf_text, re.M)
assert match.group(1) == self.auth_version
def test_index_rst_version(self):
# -------------------------------------------------------
# index.rst
# example:
# monotable version 1.0.1.
# Note the final period is required.
index_text = read_file('..', 'doc', 'index.rst')
version_re = re.compile(r"monotable version (\d+\.\d+\.\d+)\.", re.M)
match = version_re.search(index_text)
assert match.group(1) == self.auth_version
"""make sure we properly match possible future versions."""
v1 = 'monotable version 10.0.1.'
m1 = version_re.search(v1)
assert m1.group(1) == '10.0.1'
v2 = 'monotable version 1.11.1.'
m2 = version_re.search(v2)
assert m2.group(1) == '1.11.1'
v3 = 'monotable version 1.0.11.'
m3 = version_re.search(v3)
assert m3.group(1) == '1.0.11'
v4 = 'monotable version 12.34.56.'
m4 = version_re.search(v4)
assert m4.group(1) == '12.34.56'
# make sure we don't match bogus version strings.
v5 = 'monotable version 12.34.56' # no period
m5 = version_re.search(v5)
assert m5 is None
v6 = 'monotable version .34.56' # missing major version
m6 = version_re.search(v6)
assert m6 is None
v7 = 'monotable version 1.Z.56' # non numeric
m7 = version_re.search(v7)
assert m7 is None
#
# Test handling of empty lists and default constructor arguments.
#
def test_no_headings_no_formats_no_title_empty_cells():
"""No headings, formats, title, and no cells in the cellgrid.
Test with both default argument values and empty lists."""
tbl = monotable.table.MonoTable() # default args for headings, formats
text = tbl.table()
assert text == ''
text = tbl.table(cellgrid=[[]])
assert text == ''
text = monotable.table.cotable()
assert text == ''
text = tbl.bordered_table()
assert text == ''
text = tbl.bordered_table(cellgrid=[[]])
assert text == ''
row_strings = tbl.row_strings()
assert row_strings == [[]]
row_strings = tbl.row_strings(cellgrid=[[]])
assert row_strings == [[]]
def test_empty_headings_empty_formats_empty_cells():
"""Empty headings, empty formats, and no cells in the cellgrid."""
expected_title = 'My Title is a Good Title'
text = monotable.table.table([], [], cellgrid=[[]], title=expected_title)
assert text == expected_title
text = monotable.table.bordered_table([], [], cellgrid=[[]],
title=expected_title)
assert text == expected_title
def test_empty_headings_empty_formats_empty_cells_no_title():
"""Empty headings, empty formats, no title, and no cells in cellgrid."""
text = monotable.table.table([], [], cellgrid=[[]])
assert text == ''
def test_only_title():
tbl = monotable.table.MonoTable()
text = tbl.table(cellgrid=[[]], title='Table Title')
assert text == 'Table Title'
text = tbl.table(title='Table Title')
assert text == 'Table Title'
text = tbl.table(title='<Table Title')
assert text == 'Table Title'
text = tbl.table(title='^Table Title')
assert text == 'Table Title'
text = tbl.table(title='>Table Title')
assert text == 'Table Title'
# repeat for bordered tables
text = tbl.bordered_table(cellgrid=[[]], title='Table Title')
assert text == 'Table Title'
text = tbl.bordered_table(title='Table Title')
assert text == 'Table Title'
text = tbl.bordered_table(title='<Table Title')
assert text == 'Table Title'
text = tbl.bordered_table(title='^Table Title')
assert text == 'Table Title'
text = tbl.bordered_table(title='>Table Title')
assert text == 'Table Title'
# repeat for column oriented tables
text = monotable.table.cotable([], 'Table Title')
assert text == 'Table Title'
text = monotable.table.cobordered_table([], 'Table Title')
assert text == 'Table Title'
def test_only_wrapped_title():
"""Try to wrap a title on an empty table.
No wrapping is done since table width is 0.
"""
tbl = monotable.table.MonoTable()
text = tbl.table(title='=Wrapped Title')
assert text == 'Wrapped Title'
text = tbl.table(title='<=Wrapped Title')
assert text == 'Wrapped Title'
text = tbl.table(title='>=Wrapped Title')
assert text == 'Wrapped Title'
text = tbl.table(title='^=Wrapped Title')
assert text == 'Wrapped Title'
# repeat for bordered tables
text = tbl.bordered_table(title='=Wrapped Title')
assert text == 'Wrapped Title'
text = tbl.bordered_table(title='<=Wrapped Title')
assert text == 'Wrapped Title'
text = tbl.bordered_table(title='>=Wrapped Title')
assert text == 'Wrapped Title'
text = tbl.bordered_table(title='^=Wrapped Title')
assert text == 'Wrapped Title'
def test_one_column_table():
headings = ['Choices']
cells = [['Spam'], ['Spam'], ['Spam'], ['Spam']]
text = monotable.table.table(headings, [], cells)
expected = '\n'.join([
"-------",
"Choices",
"-------",
"Spam",
"Spam",
"Spam",
"Spam",
"-------",
])
assert text == expected
def test_one_column_cotable():
column = ('Choices', '', ['Spam', 'Spam', 'Spam', 'Spam'])
text = monotable.table.cotable([column])
expected = '\n'.join([
"-------",
"Choices",
"-------",
"Spam",
"Spam",
"Spam",
"Spam",
"-------",
])
assert text == expected
def test_one_column_bordered_table():
headings = ['Choices']
cells = [['Spam'], ['Spam'], ['Spam'], ['Spam']]
text = monotable.table.bordered_table(headings, [], cells)
expected = '\n'.join([
"+---------+",
"| Choices |",
"+=========+",
"| Spam |",
"+---------+",
"| Spam |",
"+---------+",
"| Spam |",
"+---------+",
"| Spam |",
"+---------+",
])
assert text == expected
def test_one_column_cobordered_table():
column = ('Choices', '', ['Spam', 'Spam', 'Spam', 'Spam'])
text = monotable.table.cobordered_table([column])
expected = '\n'.join([
"+---------+",
"| Choices |",
"+=========+",
"| Spam |",
"+---------+",
"| Spam |",
"+---------+",
"| Spam |",
"+---------+",
"| Spam |",
"+---------+",
])
assert text == expected
#
# Show that headings, formats, cellgrid rows, and each row
# can be a sequence or immutable sequence rather than a list.
# Show the callers args are not modified.
#
# Show that a cellgrid that is not a sequence of sequences is handled
# properly if it is an iterable of iterables.
# Show error handling for a row that can't be iterated.
#
def test_cellgrid_is_tuples():
cells = ((1, 2, 3), (4, 5, 6), (7, 8, 9)) # all tuples
text = monotable.table.table([], [], cells)
expected = '\n'.join([
"-------",
"1 2 3",
"4 5 6",
"7 8 9",
"-------",
])
assert text == expected
def test_column_oriented_cells_are_tuples():
column0 = ('', '', (1, 4, 7))
column1 = ('', '', (2, 5, 8))
column2 = ('', '', (3, 6, 9))
text = monotable.table.cotable([column0, column1, column2])
expected = '\n'.join([
"-------",
"1 2 3",
"4 5 6",
"7 8 9",
"-------",
])
assert text == expected
def test_column_oriented_bordered_table_cells_are_tuples():
column0 = ('', '', (1, 4, 7))
column1 = ('', '', (2, 5, 8))
column2 = ('', '', (3, 6, 9))
text = monotable.table.cobordered_table([column0, column1, column2])
expected = '\n'.join([
"+---+---+---+",
"| 1 | 2 | 3 |",
"+---+---+---+",
"| 4 | 5 | 6 |",
"+---+---+---+",
"| 7 | 8 | 9 |",
"+---+---+---+",
])
assert text == expected
def test_rows_are_ranges():
cells = (range(11, 14), range(14, 17), range(17, 20))
text = monotable.table.table([], [], cells) # no headings, no formats
expected = '\n'.join([
"----------",
"11 12 13",
"14 15 16",
"17 18 19",
"----------",
])
assert text == expected
def test_headings_formats_cells_are_tuples_and_missing_items():
headings = ('a', 'b') # missing heading
formats = ('', '', '', '') # extra format
cells = ((1, 2, 3), (4, 5, 6), (7, 8)) # missing last cell
text = monotable.table.table(headings, formats, cells)
expected = '\n'.join([
"-------",
"a b",
"-------",
"1 2 3",
"4 5 6",
"7 8",
"-------",
])
assert text == expected
def test_cell_rows_are_named_tuples():
row = namedtuple('Row', ['column0', 'column1', 'column2'])
cells = (row(column0=1, column1=2, column2=3),
row(column0=11, column1=12, column2=13))
text = monotable.table.table([], [], cells)
expected = '\n'.join([
"----------",
" 1 2 3",
"11 12 13",
"----------",
])
assert text == expected
def test_add_column_of_row_numbers():
"""Demonstrates a way to add a column of row numbers."""
def transpose(grid):
"""Swap rows for columns or columns for rows."""
return list(zip(*grid))
headings = ['row\nnum', 'X', 'Y', 'Z']
cells = (('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'H', 'I'))
row_numbers = range(1, len(cells) + 1)
cell_columns = transpose(cells)
transposed_row_numbered = [row_numbers] + cell_columns
row_numbered = transpose(transposed_row_numbered)
text = monotable.table.table(headings, [], row_numbered)
expected = '\n'.join([
"------------",
"row",
"num X Y Z",
"------------",
" 1 A B C",
" 2 D E F",
" 3 G H I",
"------------",
])
assert text == expected
def test_headings_are_iterable():
"""Headings is an interable, but not a sequence. No len()."""
headings = iter(('h1', 'h2', 'h3'))
cells = (('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'H', 'I'))
text = monotable.table.table(headings, [], cells,
title='Headings\nfrom\nIterable')
expected = '\n'.join([
" Headings",
" from",
" Iterable",
"----------",
"h1 h2 h3",
"----------",
"A B C",
"D E F",
"G H I",
"----------",
])
assert text == expected
def test_cellgrid_iterable_of_iterable():
"""Show indirectly that cellgrid can be an iterable of iterables.
Create types for cellgrid and the cellgrid rows that don't support
len().
"""
row0 = iter((1, 2, 3))
row1 = iter((4, 5, 6))
row2 = iter((7, 8, 9))
def row_generator():
yield row0
yield row1
yield row2
cells = row_generator()
text = monotable.table.table([], {}, cells)
expected = '\n'.join([
"-------",
"1 2 3",
"4 5 6",
"7 8 9",
"-------",
])
assert text == expected
def test_forgot_outer_list_with_one_row_cellgrid():
"""Show proper error handling for a row that can't be iterated.
Expect an AssertionError that tells the index of the offending row.
The int 9 in row[1] is not iterable.
"""
cells = [1, 2, 3] # missing outer []
with pytest.raises(AssertionError) as exc_info:
_ = monotable.table.table((), (), cells)
msg = 'If one row cellgrid, likely missing outer list.'
assert str(exc_info.value).endswith(msg)
def test_forgot_outer_list_with_one_column_of_column_tuples():
"""Show proper error handling for missing list around column tuples."""
msg = 'Short tuple or missing enclosing list.'
column = ('', '', (1, 4, 5))
with pytest.raises(AssertionError) as exc_info:
_ = monotable.table.cotable(column) # missing outer []
assert str(exc_info.value).endswith(msg)
with pytest.raises(AssertionError) as exc_info:
_ = monotable.table.cobordered_table(column) # missing outer []
assert str(exc_info.value).endswith(msg)
def test_too_short_column_tuple():
"""Show proper error handling for column tuple len() != 3."""
msg = 'Short tuple or missing enclosing list.'
column0 = ('', '', (1, 4, 7))
column1 = ('', '', (2,))
column2 = ('', (3, 6, 9)) # short tuple, missing format string
with pytest.raises(AssertionError) as exc_info:
_ = monotable.table.cotable([column0, column1, column2])
assert str(exc_info.value).endswith(msg)
with pytest.raises(AssertionError) as exc_info:
_ = monotable.table.cobordered_table([column0, column1, column2])
assert str(exc_info.value).endswith(msg)
def test_column_oriented_left_column_shorter():
column0 = ('', '', (1, 4))
column1 = ('', '', (2, 5, 8))
column2 = ('', '', (3, 6, 9))
text = monotable.table.cotable([column0, column1, column2])
expected = '\n'.join([
"-------",
"1 2 3",
"4 5 6",
" 8 9",
"-------",
])
assert text == expected
def test_column_oriented_right_column_shorter():
column0 = ('', '', (1, 4, 7))
column1 = ('', '', (2, 5, 8))
column2 = ('', '', (3, 6))
text = monotable.table.cotable([column0, column1, column2])
expected = '\n'.join([
"-------",
"1 2 3",
"4 5 6",
"7 8",
"-------",
])
assert text == expected
def test_column_oriented_middle_column_shorter():
column0 = ('', '', (1, 4, 7))
column1 = ('', '', (2,))
column2 = ('', '', (3, 6, 9))
text = monotable.table.cotable([column0, column1, column2])
expected = '\n'.join([
"-------",
"1 2 3",
"4 6",
"7 9",
"-------",
])
assert text == expected
def test_mixed_float_and_string_columns():
# column 1 has no format_spec so empty string is used to
# format the string 'abc' and the default float format spec is used
# to format the two floats.
column0 = ('', '', ('row0', 'row1'))
column1 = ('', '>', (1.5, 4.56, 'abc'))
column_tuples = (column0, column1)
text = monotable.table.cotable(column_tuples)
expected = '\n'.join([
"--------------",
"row0 1.500000",
"row1 4.560000",
" abc",
"--------------"
])
print(text)
assert text == expected
def test_bordered_column_oriented_left_column_shorter():
column0 = ('', '', (1, 4))
column1 = ('', '', (2, 5, 8))
column2 = ('', '', (3, 6, 9))
text = monotable.table.cobordered_table([column0, column1, column2])
expected = '\n'.join([
"+---+---+---+",
"| 1 | 2 | 3 |",
"+---+---+---+",
"| 4 | 5 | 6 |",
"+---+---+---+",
"| | 8 | 9 |",
"+---+---+---+",
])
assert text == expected
def test_bordered_column_oriented_right_column_shorter():
column0 = ('', '', (1, 4, 7))
column1 = ('', '', (2, 5, 8))
column2 = ('', '', (3, 6))
text = monotable.table.cobordered_table([column0, column1, column2])
expected = '\n'.join([
"+---+---+---+",
"| 1 | 2 | 3 |",
"+---+---+---+",
"| 4 | 5 | 6 |",
"+---+---+---+",
"| 7 | 8 | |",
"+---+---+---+",
])
assert text == expected
def test_bordered_column_oriented_middle_column_shorter():
column0 = ('', '', (1, 4, 7))
column1 = ('', '', (2,))
column2 = ('', '', (3, 6, 9))
text = monotable.table.cobordered_table([column0, column1, column2])
expected = '\n'.join([
"+---+---+---+",
"| 1 | 2 | 3 |",
"+---+---+---+",
"| 4 | | 6 |",
"+---+---+---+",
"| 7 | | 9 |",
"+---+---+---+",
])
assert text == expected
class TestMonoTableExceptionCallback:
"""Try out each of the exception callback functions."""
headings = ['column0', 'column1']
formats = ['', 'd'] # d is bad format for the string 'label1'
cells = [[0, 9999], [1, 'label1']]
def test_raise_it(self):
with pytest.raises(monotable.table.MonoTableCellError) as exc_info:
_ = monotable.table.table(self.headings, self.formats, self.cells)
exc = exc_info.value
assert exc.row == 1
assert exc.column == 1
assert exc.format_spec == 'd'
# trace_text is generated by the Python installation and is subject
# to change. Just check for presence of the text.
assert len(exc.trace_text) > 0
assert exc.name == 'MonoTableCellError'
expected_str = "MonoTableCellError: cell[1][1], format_spec= d"
assert expected_str in str(exc)
def test_bordered_raise_it(self):
with pytest.raises(monotable.table.MonoTableCellError) as exc_info:
_ = monotable.table.bordered_table(self.headings, self.formats,
self.cells)
exc = exc_info.value
assert exc.row == 1
assert exc.column == 1
assert exc.format_spec == 'd'
assert len(exc.trace_text) > 0
assert exc.name == 'MonoTableCellError'
expected_str = "MonoTableCellError: cell[1][1], format_spec= d"
assert expected_str in str(exc)
def test_bordered_format_ignore_it(self):
tbl = monotable.table.MonoTable()
tbl.format_exc_callback = monotable.plugin.ignore_it
text = tbl.bordered_table(self.headings, self.formats, self.cells)
# Each item in column not overridden by an align_spec
# is aligned by type(item).
# Since cell[1,1] = 'label1', a string, it auto-aligns to the left.
expected = '\n'.join([
"+---------+---------+",
"| column0 | column1 |",
"+=========+=========+",
"| 0 | 9999 |",
"+---------+---------+",
"| 1 | ??? |",
"+---------+---------+",
])
assert text == expected
def test_user_supplied_ignore_it(self):
def my_ignore_it(_):
return '!!!!!!!!!!!'
class MyIgnoreItMonoTable(monotable.table.MonoTable):
format_exc_callback = staticmethod(my_ignore_it)
tbl = MyIgnoreItMonoTable()
text = tbl.table(self.headings, self.formats, self.cells)
expected = '\n'.join([
"--------------------",
"column0 column1",
"--------------------",
" 0 9999",
" 1 !!!!!!!!!!!",
"--------------------",
])
assert text == expected
def test_print_it(capsys):
exc = monotable.table.MonoTableCellError(777, 999, 'spec',
'this is the trace text')
value = monotable.plugin.print_it(exc)
out, err = capsys.readouterr()
assert out == '\n'.join([
"MonoTableCellError: cell[777][999], format_spec= spec",
"MonoTableCellError raised after catching:",
"this is the trace text",
""])
assert value == '???'
class TestMonoTableCatchesFormatErrors:
"""Make sure format errors are caught."""
Point = namedtuple('Point', ['x', 'y'])
cells = [[Point(1, 91)],
[Point(5, 95)]]
def test_sformat_missing_attribute_error(self):
"""Callers cell object has no 'z' attribute."""
formats = ['(sformat){.z}'] # missing attribute
with pytest.raises(monotable.table.MonoTableCellError) as exc_info:
_ = monotable.table.table((), formats, self.cells)
exc = exc_info.value
assert exc.row == 0
assert exc.column == 0
assert exc.format_spec == '{.z}'
assert len(exc.trace_text) > 0
assert exc.name == 'MonoTableCellError'
def test_sformat_missing_index_error(self):
"""Callers cell object has no [2] index."""
formats = ['(sformat){[2]}'] # missing index
with pytest.raises(monotable.table.MonoTableCellError) as exc_info:
_ = monotable.table.table((), formats, self.cells)
exc = exc_info.value
assert exc.row == 0
assert exc.column == 0
assert exc.format_spec == '{[2]}'
assert len(exc.trace_text) > 0
assert exc.name == 'MonoTableCellError'
def test_mformat_missing_key_error():
"""Callers dict has no value for key 'name'."""
cells = [[dict(not_name=0)]] # has no value for key='name'
with pytest.raises(monotable.table.MonoTableCellError) as exc_info:
_ = monotable.table.table([], ['(mformat)name= {name}'], cells)
exc = exc_info.value
assert exc.row == 0
assert exc.column == 0
assert exc.format_spec == 'name= {name}'
assert len(exc.trace_text) > 0
assert exc.name == 'MonoTableCellError'
# no test case for ArithmeticError
def test_user_defined_format_function_hides_default_format_function():
"""A plugged in format function hides default of same name."""
def pformat(value, format_spec):
_, _ = value, format_spec # avoid value is not used warnings
my_format_func_map = {'pformat': pformat}
class MyMonoTable(monotable.table.MonoTable):
format_func_map = my_format_func_map
tbl = MyMonoTable()
assert id(tbl.format_func_map['pformat']) == id(pformat)
assert id(tbl.format_func_map['pformat']) != id(monotable.plugin.pformat)
def test_user_defined_format_function_raises_assertion_error():
"""User defined format function raises an assertion."""
def user_defined_format_function(value, format_spec):
_, _ = value, format_spec # avoid value is not used warnings
raise AssertionError('spam')
my_format_func_map = {'my_format_function': user_defined_format_function}
class MyMonoTable(monotable.table.MonoTable):
format_func_map = my_format_func_map
tbl = MyMonoTable()
cells = [[1234]]
with pytest.raises(monotable.table.MonoTableCellError) as exc_info:
_ = tbl.table([], ['(my_format_function)'], cells)
exc = exc_info.value
assert exc.row == 0
assert exc.column == 0
assert exc.format_spec == ''
assert len(exc.trace_text) > 0
assert exc.name == 'MonoTableCellError'
def test_init_illegal_vertical_align():
# Note- Does not test entire exception message.
msg = 'Expected a vertical align value, got:'
with pytest.raises(AssertionError) as exc_info:
tbl = monotable.table.MonoTable()
tbl.cell_valign = -1
_ = tbl.table([], [], [[]])
assert str(exc_info.value).startswith(msg)
with pytest.raises(AssertionError) as exc_info:
tbl = monotable.table.MonoTable()
tbl.cell_valign = 50
_ = tbl.bordered_table([], [], [[]])
assert str(exc_info.value).startswith(msg)
def test_bad_option_spec():
"""
Check for AssertionError raised by code in _process_formats() due
to a bad option in a valid option_spec.
"""
# Note- Does not test entire exception message.
tbl = monotable.table.MonoTable()
cells = [['A']]
with pytest.raises(AssertionError) as exc_info:
_ = tbl.bordered_table([], ['(bad_option_spec)s'], cells)
assert str(exc_info.value).startswith('cell column 0, format= ')
def test_override_option_spec_delimiters_bad_option_spec():
"""
Change option_spec_delimiters on an instance.
Use the same char for start and end delimiters.
Check for AssertionError raised by code in _process_formats() due
to a bad option in a valid option_spec. The option_spec is still
valid even with new delimiters.
"""
# Note- Does not test entire exception message.
tbl = monotable.table.MonoTable()
tbl.option_spec_delimiters = '!;!'
cells = [['A']]
with pytest.raises(AssertionError) as exc_info:
_ = tbl.bordered_table([], ['!bad_option_spec!s'], cells)
assert str(exc_info.value).startswith('cell column 0, format= ')
def test_no_option_spec_delimiters():
"""
Disable option spec scanning by setting option_spec_delims to
empty string. Format will be interpreted as a format_spec and
cause a MonoTableCellError.
"""
# Note- Does not test entire exception message.
tbl = monotable.table.MonoTable()
tbl.option_spec_delimiters = '' # disable
cells = [['A']]
msg = 'MonoTableCellError: cell[0][0], format_spec= !width=10!s'
with pytest.raises(monotable.table.MonoTableCellError) as exc_info:
_ = tbl.table([], ['!width=10!s'], cells)
assert str(exc_info.value).startswith(msg)
def test_override_option_spec_delimiters():
"""Test formatting with overridden option_spec_delimiters."""
tbl = monotable.table.MonoTable()
tbl.option_spec_delimiters = '!;!'
cells = [['A']]
text = tbl.table([], ['!width=10!s'], cells)
assert text == '-\nA\n-'
def test_format_row_strings():
row0 = [9.1234567] * 4
row1 = [88.1] * 4
cells = [row0, row1]
headings = ['.1f', '.3f', '<.5f', 'default=.4f']
formats = ['.1f', '<.3f', '.5f']
class Float4fMonoTable(monotable.table.MonoTable):
default_float_format_spec = '.4f'
t = Float4fMonoTable()
row_strings = t.row_strings(headings, formats, cells)
assert row_strings == [[' .1f', '.3f ', '.5f ', 'default=.4f'],
[' 9.1', '9.123 ', ' 9.12346', ' 9.1235'],
['88.1', '88.100', '88.10000', ' 88.1000']]
def test_format_to_row_strings_stripped():
row0 = [9.1234567] * 4
row1 = [88.1] * 4
cells = [row0, row1]
headings = ['.1f', '.3f', '<.5f', 'default=.4f']
formats = ['.1f', '<.3f', '.5f']
class Float4fMonoTable(monotable.table.MonoTable):
default_float_format_spec = '.4f'
t = Float4fMonoTable()
row_strings = t.row_strings(headings, formats, cells, strip=True)
assert row_strings == [['.1f', '.3f', '.5f', 'default=.4f'],
['9.1', '9.123', '9.12346', '9.1235'],
['88.1', '88.100', '88.10000', '88.1000']]
| {
"repo_name": "tmarktaylor/monotable",
"path": "test/test_monotable.py",
"copies": "1",
"size": "28338",
"license": "apache-2.0",
"hash": 4139715556106275300,
"line_mean": 30.1406593407,
"line_max": 80,
"alpha_frac": 0.535923495,
"autogenerated": false,
"ratio": 3.4749233599019007,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4510846854901901,
"avg_score": null,
"num_lines": null
} |
"""Assertion runners.
"""
import re
import verify
from .base import Assertion, is_assertion
__all__ = (
'ensure',
'expect',
)
class expect(object):
"""Pass `value` through a set of assertable functions.
There are two styles for invoking ``expect``:
1. Pass `value` and all `assertions` as arguments to the ``__init__``
method of ``expect``.
2. Pass `value` to the ``__init__`` method of ``expect`` and invoke
assertions via method chaining.
Examples:
Passing `value` and `assertions` to ``expect.__init__``:
>>> from verify import *
>>> expect(5, Truthy(), Greater(4))
<expect(5)>
>>> expect(5, Falsy())
Traceback (most recent call last):
...
AssertionError...
Using method chaining:
>>> expect(5).Truthy().Greater(4)
<expect(5)>
>>> expect(5).Falsy()
Traceback (most recent call last):
...
AssertionError...
Args:
value (mixed): Value to test.
*assertions (callable, optional): Callable objects that accept `value`
as its first argument. It's expected that these callables assert
something.
Returns:
self: Allows for method assertion chaining.
Raises:
AssertionError: If the evaluation of all assertions returns ``False``.
Aliases:
- ``ensure``
.. versionadded:: 0.0.1
.. versionchanged:: 0.1.0
- Rename from ``Expect`` to ``expect`` and change implementation from a
class to a function.
- Passed in `value` is no longer called if it's a callable.
- Return ``True`` if all assertions pass.
.. versionchanged:: 0.6.0
- Re-implement as class.
- Support method chaining of assertion classes.
- Wrap assertions that are not derived from Assertion in
:class:`.Predicate` for consistent behavior from external assertion
functions.
"""
def __init__(self, value, *assertions):
self.value = value
if assertions:
self(*assertions)
def __repr__(self): # pragma: no cover
return '<{0}>'.format(self)
def __str__(self): # pragma: no cover
return '{0}({1})'.format(self.__class__.__name__, self.value)
def __getattr__(self, attr):
"""Invoke assertions via attribute access. All :mod:`verify` assertions
are available.
"""
assertion = getattr(verify, attr, None)
if not callable(assertion) and not attr.endswith('_'):
# Alias method names not ending in underscore to their underscore
# counterpart. This allows chaining of functions that have a name
# conflict with builtins (e.g. "any_", "all_", etc).
assertion = getattr(verify, attr + '_', None)
if not is_assertion(assertion):
raise AttributeError(('"{0}" is not a valid assertion method'
.format(attr)))
def chained_assertion(*args, **kargs):
assertion(*args, **kargs)(self.value)
return self
chained_assertion.assertion = assertion
return chained_assertion
def __call__(self, *assertions):
for assertion in assertions:
if not is_assertion(assertion):
# Wrap non-verify assertions in Predicate for consistent
# behavior.
assertion = verify.Predicate(assertion)
assertion(self.value)
return self
ensure = expect
| {
"repo_name": "dgilland/verify",
"path": "verify/runners.py",
"copies": "1",
"size": "3566",
"license": "mit",
"hash": -2181258280822830000,
"line_mean": 27.528,
"line_max": 79,
"alpha_frac": 0.5765563657,
"autogenerated": false,
"ratio": 4.531130876747141,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5607687242447141,
"avg_score": null,
"num_lines": null
} |
""" Assertions for testing
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2019-06-18
:Copyright: 2019, Karr Lab
:License: MIT
"""
import humanfriendly
import objsize
def assert_memory_less(obj, size, exclusive=False):
""" Assert that the memory occupied by an object is less than a
size
Args:
obj (:obj:`object`): object
size (:obj:`int`): size in bytes
exclusive (:obj:`bool`, optional): if :obj:`True`, check the exclusive
memory of the object
Raises:
:obj:`ValueError`: if the memory occupied by the object is greater than
or equal to :obj:`size`
"""
if exclusive:
obj_size = objsize.get_exclusive_deep_size(obj)
else:
obj_size = objsize.get_deep_size(obj)
if obj_size >= size:
raise ValueError("{} memory is greater than or equal to {}".format(
humanfriendly.format_size(obj_size),
humanfriendly.format_size(size)))
def assert_memory_less_equal(obj, size, exclusive=False):
""" Assert that the memory occupied by an object is less than or equal to a
size
Args:
obj (:obj:`object`): object
size (:obj:`int`): size in bytes
exclusive (:obj:`bool`, optional): if :obj:`True`, check the exclusive
memory of the object
Raises:
:obj:`ValueError`: if the memory occupied by the object is greater than
:obj:`size`
"""
if exclusive:
obj_size = objsize.get_exclusive_deep_size(obj)
else:
obj_size = objsize.get_deep_size(obj)
if obj_size > size:
raise ValueError("{} memory is greater than {}".format(
humanfriendly.format_size(obj_size),
humanfriendly.format_size(size)))
| {
"repo_name": "KarrLab/wc_utils",
"path": "wc_utils/util/testing.py",
"copies": "1",
"size": "1750",
"license": "mit",
"hash": -6553077271228284000,
"line_mean": 28.1666666667,
"line_max": 79,
"alpha_frac": 0.6154285714,
"autogenerated": false,
"ratio": 3.9149888143176734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030417385717674,
"avg_score": null,
"num_lines": null
} |
""" Assertions for use within the test controllers.
Copyright (c) 2014 Kenn Takara
See LICENSE for details
"""
import os
import sys
def assert_equals(condition_a, condition_b):
""" Assert that both sides are equal """
caller = sys._getframe(1)
if condition_a != condition_b:
raise AssertionError(
'{2} != {3}, expected equal at {0} line {1}'.format(
os.path.basename(caller.f_code.co_filename),
caller.f_lineno,
condition_a,
condition_b))
def assert_not_equals(condition_a, condition_b):
""" Assert that the sides are not equal """
caller = sys._getframe(1)
if condition_a == condition_b:
raise AssertionError(
'{2} == {3}, expected not equal at {0} line {1}'.format(
os.path.basename(caller.f_code.co_filename),
caller.f_lineno,
condition_a,
condition_b))
def assert_is_none(condition):
""" Assert the condition is None """
caller = sys._getframe(1)
if condition is not None:
raise AssertionError(
'{2} is not None, expected None at {0} line {1}'.format(
os.path.basename(caller.f_code.co_filename),
caller.f_lineno,
condition))
def assert_is_not_none(condition):
""" Assert the condition is not None """
caller = sys._getframe(1)
if condition is None:
raise AssertionError(
'{2} is None, expected not None at {0} line {1}'.format(
os.path.basename(caller.f_code.co_filename),
caller.f_lineno,
condition))
def assert_true(condition):
""" Assert the condition is True """
caller = sys._getframe(1)
if condition is not True:
raise AssertionError(
'{2} is not True, expected True at {0} line {1}'.format(
os.path.basename(caller.f_code.co_filename),
caller.f_lineno,
condition))
def assert_false(condition):
""" Assert the condition is False """
caller = sys._getframe(1)
if condition is True:
raise AssertionError(
'{2} is True, expected False at {0} line {1}'.format(
os.path.basename(caller.f_code.co_filename),
caller.f_lineno,
condition))
def assert_tag_exists(message, tags):
""" Check to see that the tags exist in the message """
caller = sys._getframe(1)
for tag in tags:
if tag not in message:
raise AssertionError(
'{2} not in message at {0} line {1}'.format(
os.path.basename(caller.f_code.co_filename),
caller.f_lineno,
tag))
def assert_tag(message, tags):
""" Check to see that the tag and values are in the message """
caller = sys._getframe(1)
for tag in tags:
if tag[0] not in message:
raise AssertionError(
'{2} not in message at {0} line {1}'.format(
os.path.basename(caller.f_code.co_filename),
caller.f_lineno,
tag[0]))
if tag[1] != message[tag[0]]:
raise AssertionError(
'message[{2}] is {3}, expected {4} at {0} line {1}'.format(
os.path.basename(caller.f_code.co_filename),
caller.f_lineno,
tag[0],
message[tag[0]],
tag[1]))
| {
"repo_name": "kennt/fixtest",
"path": "fixtest/base/asserts.py",
"copies": "1",
"size": "3538",
"license": "mit",
"hash": -6776215625254238000,
"line_mean": 31.4587155963,
"line_max": 75,
"alpha_frac": 0.5353306953,
"autogenerated": false,
"ratio": 4.094907407407407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 109
} |
"""Assertions.
Generally you just use the ``ASSERT`` object defined in this module, but
if you want to raise different exception, just define an ``Assertions``
instance.
Examples:
>>> from g1.bases.assertions import ASSERT
>>> ASSERT(is_prime(x), 'expect a prime number, not {}', x)
This module is different from (and better than, in my opinion) the
assert statement in the following ways:
* You cannot turn it off (the assert statement can be turned off).
* It provides some common checks and default error messages.
* You may raise other exceptions, not just AssertionError.
"""
__all__ = [
'ASSERT',
'Assertions',
]
import builtins
import operator
from collections import abc
from functools import partialmethod
from . import functionals
def _empty(collection):
return isinstance(collection, abc.Collection) and not collection
def _not_empty(collection):
return isinstance(collection, abc.Collection) and collection
def _is_none(x):
return x is None
def _is_not_none(x):
return x is not None
def _xor(p, q):
return bool(p) != bool(q)
def _in(x, xs):
return x in xs
def _not_in(x, xs):
return x not in xs
def _in_range(v, pair):
return pair[0] <= v < pair[1]
def _only_one(xs):
count = 0
for x in xs:
if x:
count += 1
if count > 1:
break
return count == 1
def _unique(xs):
seen = set()
for x in xs:
if x in seen:
return False
seen.add(x)
return True
def _issubset_proper(u, v):
return u.issubset(v) and u != v
def _issuperset_proper(u, v):
return u.issuperset(v) and u != v
# ``_method_caller(name)(obj, *args)`` is equivalent to
# ``operator.methodcaller(name, *args)(obj)``.
def _method_caller(name):
return lambda obj, *args: getattr(obj, name)(*args)
class Assertions:
"""Assertions.
This class provides common assertion methods for asserting program
states.
By convention, all assertion methods return the first argument on
success. When raising an exception, the default error message is
composed under the convention that the second argument is what you
expect and the first argument is the actual input.
You may provide error message through the keyword-only ``message``
argument. Note that messages are ``{}``-formatted, not ``%``-
formatted. The benefits of ``{}``-formatting are that you may
reverse or repeat the formatting arguments in the output message.
Examples:
>>> HTTP_ASSERT = Assertions(HttpError)
>>> x = HTTP_ASSERT.greater(x, 0)
"""
def __init__(self, make_exc):
self._make_exc = make_exc
def __call__(self, cond, message, *args):
"""State an assertion.
Note that ``__call__`` function signature is slightly different
here: The formatting arguments are passed from ``*args``, and do
not include the condition stated by the assertion.
Examples:
>>> ASSERT(is_prime(x), 'expect a prime number, not {}', x)
"""
if not cond:
raise self._make_exc(message.format(*args), cond)
return cond
def unreachable(self, message, *args):
raise self._make_exc(message.format(*args))
def _assert_1(self, predicate, arg, *, message):
if not predicate(arg):
raise self._make_exc(message.format(arg), arg)
return arg
true = partialmethod(
_assert_1, bool, message='expect true-value, not {!r}'
)
false = partialmethod(
_assert_1, operator.not_, message='expect false-value, not {!r}'
)
empty = partialmethod(
_assert_1, _empty, message='expect empty collection, not {!r}'
)
not_empty = partialmethod(
_assert_1, _not_empty, message='expect non-empty collection, not {!r}'
)
none = partialmethod(_assert_1, _is_none, message='expect None, not {!r}')
not_none = partialmethod(
_assert_1, _is_not_none, message='expect non-None value'
)
def predicate(self, arg, predicate, *, message='expect {1}, not {0!r}'):
if not predicate(arg):
raise self._make_exc(message.format(arg, predicate), arg)
return arg
def not_predicate(
self, arg, predicate, *, message='expect not {1}, but {0!r}'
):
if predicate(arg):
raise self._make_exc(message.format(arg, predicate), arg)
return arg
def _assert_2(self, predicate, actual, expect, *, message):
if not predicate(actual, expect):
msg = message.format(actual, expect)
raise self._make_exc(msg, actual, expect)
return actual
xor = partialmethod(
_assert_2, _xor, message='expect {0!r} xor {1!r} be true'
)
not_xor = partialmethod(
_assert_2,
functionals.compose(operator.not_, _xor),
message='expect {0!r} xor {1!r} be false',
)
is_ = partialmethod(
_assert_2, operator.is_, message='expect {1!r}, not {0!r}'
)
is_not = partialmethod(
_assert_2, operator.is_not, message='expect non-{!r} value'
)
isinstance = partialmethod(
_assert_2,
builtins.isinstance,
message='expect {1}-typed value, not {0!r}',
)
not_isinstance = partialmethod(
_assert_2,
functionals.compose(operator.not_, builtins.isinstance),
message='expect non-{1}-typed value, but {0!r}',
)
issubclass = partialmethod(
_assert_2,
builtins.issubclass,
message='expect subclass of {1}, not {0!r}',
)
not_issubclass = partialmethod(
_assert_2,
functionals.compose(operator.not_, builtins.issubclass),
message='expect non-subclass of {1}, but {0!r}',
)
in_ = partialmethod(_assert_2, _in, message='expect {0!r} in {1!r}')
not_in = partialmethod(
_assert_2, _not_in, message='expect {0!r} not in {1!r}'
)
contains = partialmethod(
_assert_2, operator.contains, message='expect {0!r} containing {1!r}'
)
not_contains = partialmethod(
_assert_2,
functionals.compose(operator.not_, operator.contains),
message='expect {0!r} not containing {1!r}',
)
def getitem(self, collection, key):
"""Shorthand for ``ASSERT.contains(collection, key)[key]``."""
return self.contains(collection, key)[key]
def setitem(self, collection, key, value):
"""Check before set an item."""
self.not_contains(collection, key)[key] = value
equal = partialmethod(
_assert_2, operator.eq, message='expect x == {1!r}, not {0!r}'
)
not_equal = partialmethod(
_assert_2, operator.ne, message='expect x != {1!r}, not {0!r}'
)
greater = partialmethod(
_assert_2, operator.gt, message='expect x > {1!r}, not {0!r}'
)
greater_or_equal = partialmethod(
_assert_2, operator.ge, message='expect x >= {1!r}, not {0!r}'
)
less = partialmethod(
_assert_2, operator.lt, message='expect x < {1!r}, not {0!r}'
)
less_or_equal = partialmethod(
_assert_2, operator.le, message='expect x <= {1!r}, not {0!r}'
)
in_range = partialmethod(
_assert_2,
_in_range,
message='expect {1[0]!r} <= x < {1[1]!r}, not {0!r}',
)
not_in_range = partialmethod(
_assert_2,
functionals.compose(operator.not_, _in_range),
message='expect not {1[0]!r} <= x < {1[1]!r}, not {0!r}',
)
startswith = partialmethod(
_assert_2,
_method_caller('startswith'),
message='expect x.startswith({1!r}), not {0!r}',
)
not_startswith = partialmethod(
_assert_2,
functionals.compose(
operator.not_,
_method_caller('startswith'),
),
message='expect not x.startswith({1!r}), not {0!r}',
)
isdisjoint = partialmethod(
_assert_2,
_method_caller('isdisjoint'),
message='expect x.isdisjoint({1!r}), not {0!r}',
)
not_isdisjoint = partialmethod(
_assert_2,
functionals.compose(
operator.not_,
_method_caller('isdisjoint'),
),
message='expect not x.isdisjoint({1!r}), but {0!r}',
)
issubset = partialmethod(
_assert_2,
_method_caller('issubset'),
message='expect x.issubset({1!r}), not {0!r}',
)
not_issubset = partialmethod(
_assert_2,
functionals.compose(
operator.not_,
_method_caller('issubset'),
),
message='expect not x.issubset({1!r}), but {0!r}',
)
issubset_proper = partialmethod(
_assert_2,
_issubset_proper,
message='expect x is proper subset of {1!r}, not {0!r}',
)
not_issubset_proper = partialmethod(
_assert_2,
functionals.compose(
operator.not_,
_issubset_proper,
),
message='expect x is not proper subset of {1!r}, but {0!r}',
)
issuperset = partialmethod(
_assert_2,
_method_caller('issuperset'),
message='expect x.issuperset({1!r}), not {0!r}',
)
not_issuperset = partialmethod(
_assert_2,
functionals.compose(
operator.not_,
_method_caller('issuperset'),
),
message='expect not x.issuperset({1!r}), but {0!r}',
)
issuperset_proper = partialmethod(
_assert_2,
_issuperset_proper,
message='expect x is proper superset of {1!r}, not {0!r}',
)
not_issuperset_proper = partialmethod(
_assert_2,
functionals.compose(
operator.not_,
_issuperset_proper,
),
message='expect x is not proper superset of {1!r}, but {0!r}',
)
def _assert_collection(
self, predicate, collection, mapper=None, *, message
):
xs = collection
if mapper is not None:
xs = map(mapper, xs)
if not predicate(xs):
msg = message.format(collection, mapper or 'true')
raise self._make_exc(msg, collection)
return collection
# Given a collection of n elements, let x be the number of elements
# that satisfies the condition, and the following assertions can be
# expressed as...
# Assert x = n.
all = partialmethod(
_assert_collection, builtins.all, message='expect all {1}, not {0!r}'
)
# Assert 0 <= x < n.
not_all = partialmethod(
_assert_collection,
functionals.compose(operator.not_, builtins.all),
message='expect not all {1}, not {0!r}',
)
# Assert 0 < x <= n.
any = partialmethod(
_assert_collection, builtins.any, message='expect any {1}, not {0!r}'
)
# Assert x = 0.
not_any = partialmethod(
_assert_collection,
functionals.compose(operator.not_, builtins.any),
message='expect not any {1}, not {0!r}',
)
# Assert x = 1.
only_one = partialmethod(
_assert_collection,
_only_one,
message='expect only one {1}, not {0!r}',
)
unique = partialmethod(
_assert_collection,
_unique,
message='expect unique elements in {0!r}',
)
not_unique = partialmethod(
_assert_collection,
functionals.compose(operator.not_, _unique),
message='expect non-unique elements in {0!r}',
)
ASSERT = Assertions(lambda message, *_: AssertionError(message))
| {
"repo_name": "clchiou/garage",
"path": "py/g1/bases/g1/bases/assertions.py",
"copies": "1",
"size": "11506",
"license": "mit",
"hash": -7955244830073160000,
"line_mean": 27.0634146341,
"line_max": 78,
"alpha_frac": 0.5862158874,
"autogenerated": false,
"ratio": 3.6713465220165924,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9757562409416592,
"avg_score": 0,
"num_lines": 410
} |
"""Assertions related to containers/iterables.
"""
import operator
import pydash
from .base import Assertion, Comparator, Negate
from .numbers import Between
__all__ = (
'In',
'NotIn',
'Contains',
'NotContains',
'ContainsOnly',
'NotContainsOnly',
'Subset',
'NotSubset',
'Superset',
'NotSuperset',
'Unique',
'NotUnique',
'Length',
'NotLength',
)
class In(Comparator):
"""Asserts that `value` is in `comparable`.
Aliases:
- ``to_be_in``
- ``is_in``
.. versionadded:: 0.0.1
"""
#:
reason = '{0} is not in {comparable}'
@staticmethod
def op(value, comparable):
"""Return whether `value` is contained in `comparable`."""
try:
return value in comparable
except (TypeError, ValueError):
return False
to_be_in = In
is_in = In
class NotIn(Negate, In):
"""Asserts that `value` is not in `comparable`.
Aliases:
- ``to_not_be_in``
- ``is_not_in``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} is in {comparable}'
to_not_be_in = NotIn
is_not_in = NotIn
class Contains(Comparator):
"""Asserts that `value` is an iterable and contains `comparable`.
Aliases:
- ``to_contain``
- ``contains``
.. versionadded:: 0.2.0
"""
#:
reason = '{0} does not contain {comparable}'
@staticmethod
def op(value, comparable):
"""Return whether `value` contains `comparable`."""
try:
return comparable in value
except (TypeError, ValueError):
return False
to_contain = Contains
contains = Contains
class NotContains(Negate, Contains):
"""Asserts that `value` does not contain `comparable`.
Aliases:
- ``to_not_contain``
- ``does_not_contain``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} contains {comparable}'
to_not_contain = NotContains
does_not_contain = NotContains
class ContainsOnly(Comparator):
"""Asserts that `value` is an iterable and only contains `comparable`.
Aliases:
- ``to_contain_only``
- ``contains_only``
.. versionadded:: 0.2.0
"""
#:
reason = '{0} does not only contain values in {comparable}'
@staticmethod
def op(value, comparable):
"""Return whether `value` contains only values in `comparable`."""
try:
return all(val in comparable for val in value)
except (TypeError, ValueError):
return False
to_contain_only = ContainsOnly
contains_only = ContainsOnly
class NotContainsOnly(Negate, ContainsOnly):
"""Asserts that `value` does not contain only `comparable`.
Aliases:
- ``to_not_contain_only``
- ``does_not_contain_only``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} contains only {comparable}'
to_not_contain_only = NotContainsOnly
does_not_contain_only = NotContainsOnly
class Subset(Comparator):
"""Asserts that `value` is a subset of `comparable`. Comparison supports
nested ``dict``, ``list``, and ``tuple`` objects.
Aliases:
- ``to_be_subset``
- ``is_subset``
.. versionadded:: 0.3.0
"""
#:
reason = '{0} is not a subset of {comparable}'
op = pydash.rearg(pydash.is_match, 1, 0)
to_be_subset = Subset
is_subset = Subset
class NotSubset(Negate, Subset):
"""Asserts that `value` is a not a subset of `comparable`.
Aliases:
- ``to_not_be_subset``
- ``is_not_subset``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} is a subset of {comparable}'
to_not_be_subset = NotSubset
is_not_subset = NotSubset
class Superset(Comparator):
"""Asserts that `value` is a superset of `comparable`. Comparison supports
nested ``dict``, ``list``, and ``tuple`` objects.
Aliases:
- ``to_be_superset``
- ``is_superset``
.. versionadded:: 0.3.0
"""
#:
reason = '{0} is not a supserset of {comparable}'
op = staticmethod(pydash.is_match)
to_be_superset = Superset
is_superset = Superset
class NotSuperset(Negate, Superset):
"""Asserts that `value` is a not a superset of `comparable`.
Aliases:
- ``to_not_be_superset``
- ``is_not_superset``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} is a superset of {comparable}'
to_not_be_superset = NotSuperset
is_not_superset = NotSuperset
class Unique(Assertion):
"""Asserts that `value` contains only unique values. If `value` is a
``dict``, then its ``values()`` will be compared.
Aliases:
- ``to_be_unique``
- ``is_unique``
.. versionadded:: 0.3.0
"""
#:
reason = '{0} contains duplicate items'
@staticmethod
def op(value):
if isinstance(value, dict):
value = value.values()
is_unique = True
seen = []
for item in value:
if item in seen:
is_unique = False
break
seen.append(item)
return is_unique
to_be_unique = Unique
is_unique = Unique
class NotUnique(Negate, Unique):
"""Asserts that `value` is a not a unique.
Aliases:
- ``to_not_be_unique``
- ``is_not_unique``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} is unique'
to_not_be_unique = NotUnique
is_not_unique = NotUnique
class Length(Between):
"""Asserts that `value` is an iterable with length between `min` and `max`
inclusively.
Examples:
These will pass:
>>> assert Length([1, 2, 3], min=3, max=3) # 3 <= len(a) <= 3
>>> assert Length([1, 2, 3, 4, 5], min=5, max=6) # 5 <= len(a) <= 6
>>> assert Length([1, 2, 3], max=6) # len(a) <= 6
>>> assert Length([1, 2, 3, 4], min=4) # len(a) >= 4
This will fail:
>>> Length([1, 2, 4], max=2) # len(a) <= 2
Traceback (most recent call last):
...
AssertionError...
Args:
value (mixed, optional): Value to compare.
Keyword Args:
min (int, optional): Minimum value that `value` must be greater than or
equal to.
max (int, optional): Maximum value that `value` must be less than or
equal to.
Aliases:
- ``to_have_length``
- ``has_length``
.. versionadded:: 0.2.0
.. versionchanged:: 0.4.0
- Change comparison to function like :class:`Between` meaning length is
compared to min and max values.
- Allow keyword arguments ``min`` and ``max`` to be used in place of
positional tuple
.. versionchanged:: 1.0.0
Removed positional tuple argument and only support ``min`` and ``max``
keyword arguments.
"""
#:
reason = '{0} does not have length between {min} and {max}'
@staticmethod
def op(value, min=None, max=None):
try:
return Between.op(len(value), min=min, max=max)
except (TypeError, ValueError):
return False
to_have_length = Length
has_length = Length
class NotLength(Negate, Length):
"""Asserts that `value` is an iterable with length not between `min` and
`max` inclusively.
Aliases:
- ``to_not_have_length``
- ``does_not_have_length``
.. versionadded:: 1.0.0
"""
#:
reason = '{0} has length between {min} and {max}'
to_not_have_length = NotLength
does_not_have_length = NotLength
| {
"repo_name": "dgilland/verify",
"path": "verify/containers.py",
"copies": "1",
"size": "7481",
"license": "mit",
"hash": 6053268354495464000,
"line_mean": 20.0732394366,
"line_max": 79,
"alpha_frac": 0.5703782917,
"autogenerated": false,
"ratio": 3.5931796349663783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9663557926666377,
"avg_score": 0,
"num_lines": 355
} |
"""Assertions related to equality.
"""
import operator
from functools import partial
import re
import pydash
from .base import Assertion, Comparator, Negate, NotSet
__all__ = (
'Equal',
'NotEqual',
'Match',
'NotMatch',
'Is',
'IsNot',
'IsTrue',
'IsNotTrue',
'IsFalse',
'IsNotFalse',
'IsNotNone',
'IsNone',
)
class Equal(Comparator):
"""Asserts that two values are equal.
Aliases:
- ``to_be_equal``
- ``is_equal``
.. versionadded:: 0.0.1
"""
#:
reason = '{0} is not equal to {comparable}'
op = operator.eq
to_be_equal = Equal
is_equal = Equal
class NotEqual(Negate, Equal):
"""Asserts that two values are not equal.
Aliases:
- ``to_not_be_equal``
- ``is_not_equal``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} is equal to {comparable}'
to_not_be_equal = NotEqual
is_not_equal = NotEqual
class Match(Comparator):
"""Asserts that `value` matches the regular expression `comparable`.
Args:
value (mixed, optional): Value to compare.
comparable (str|RegExp): String or RegExp object used for matching.
Keyword Args:
flags (int, optional): Used when compiling regular expression when
regular expression is a string. Defaults to ``0``.
Aliases:
- ``to_match``
- ``is_match``
- ``matches``
.. versionadded:: 0.3.0
"""
#:
reason = '{0} does not match the regular expression {comparable}'
def set_options(self, opts):
self.flags = opts.pop('flags', 0)
def compare(self, value):
return self.op(value, self.comparable, flags=self.flags)
@staticmethod
def op(value, comparable, flags=0):
if pydash.is_string(comparable):
pattern = re.compile(comparable, flags)
else:
pattern = comparable
try:
match = bool(pattern.match(value))
except (TypeError, ValueError):
match = False
return match
to_match = Match
is_match = Match
matches = Match
class NotMatch(Negate, Match):
"""Asserts that `value` does not match the regular expression `comparable`.
Aliases:
- ``to_not_be_match``
- ``is_not_match``
- ``not_matches``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} matches the regular expression {comparable}'
to_not_match = NotMatch
is_not_match = Match
does_not_match = NotMatch
class Is(Comparator):
"""Asserts that `value` is `comparable`.
Aliases:
- ``to_be``
- ``is_``
.. versionadded:: 0.0.1
"""
#:
reason = '{0} is not {comparable}'
op = operator.is_
to_be = Is
is_ = Is
class IsNot(Negate, Is):
"""Asserts that `value` is not `comparable`.
Aliases:
- ``to_not_be``
- ``is_not``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} is {comparable}'
to_not_be = IsNot
is_not = IsNot
class IsTrue(Assertion):
"""Asserts that `value` is ``True``.
Aliases:
- ``to_be_true``
- ``is_true``
.. versionadded:: 0.1.0
"""
#:
reason = '{0} is not True'
op = partial(operator.is_, True)
to_be_true = IsTrue
is_true = IsTrue
class IsNotTrue(Negate, IsTrue):
"""Asserts that `value` is not ``True``.
Aliases:
- ``to_not_be_true``
- ``is_not_true``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} is True'
to_not_be_true = IsNotTrue
is_not_true = IsNotTrue
class IsFalse(Assertion):
"""Asserts that `value` is ``False``.
Aliases:
- ``to_be_false``
- ``is_false``
.. versionadded:: 0.1.0
"""
#:
reason = '{0} is not False'
op = partial(operator.is_, False)
to_be_false = IsFalse
is_false = IsFalse
class IsNotFalse(Negate, IsFalse):
"""Asserts that `value` is not ``False``.
Aliases:
- ``to_not_be_false``
- ``is_not_false``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} is False'
to_not_be_false = IsNotFalse
is_not_false = IsNotFalse
class IsNone(Assertion):
"""Asserts that `value` is ``None``.
Aliases:
- ``to_be_none``
- ``is_none``
.. versionadded:: 0.0.1
"""
#:
reason = '{0} is not None'
op = staticmethod(pydash.is_none)
to_be_none = IsNone
is_none = IsNone
class IsNotNone(Negate, IsNone):
"""Asserts that `value` is not ``None``.
Aliases:
- ``to_be_not_none``
- ``is_not_none``
.. versionadded:: 0.5.0
"""
#:
reason = '{0} is None'
to_not_be_none = IsNotNone
is_not_none = IsNotNone
| {
"repo_name": "dgilland/verify",
"path": "verify/equality.py",
"copies": "1",
"size": "4665",
"license": "mit",
"hash": -7240248771391749000,
"line_mean": 16.4719101124,
"line_max": 79,
"alpha_frac": 0.5530546624,
"autogenerated": false,
"ratio": 3.2968197879858656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43498744503858655,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.