text stringlengths 38 1.54M |
|---|
"""Asynchronous Python client for the AdGuard Home API."""
from __future__ import annotations
from dataclasses import dataclass
from typing import TYPE_CHECKING
from .exceptions import AdGuardHomeError
if TYPE_CHECKING:
from . import AdGuardHome
@dataclass
class AdGuardHomeSafeBrowsing:
"""Controls AdGuard Home browsing security."""
adguard: AdGuardHome
async def enabled(self) -> bool:
"""Return if AdGuard Home browsing security is enabled or not.
Returns:
The current state of the AdGuard safe browsing feature.
"""
response = await self.adguard.request("safebrowsing/status")
return response["enabled"]
async def enable(self) -> None:
"""Enable AdGuard Home browsing security.
Raises:
AdGuardHomeError: If enabling the safe browsing didn't succeed.
"""
try:
await self.adguard.request("safebrowsing/enable", method="POST")
except AdGuardHomeError as exception:
raise AdGuardHomeError(
"Enabling AdGuard Home safe browsing failed"
) from exception
async def disable(self) -> None:
"""Disable AdGuard Home browsing security.
Raises:
AdGuardHomeError: If disabling the safe browsing didn't succeed.
"""
try:
await self.adguard.request("safebrowsing/disable", method="POST")
except AdGuardHomeError as exception:
raise AdGuardHomeError(
"Disabling AdGuard Home safe browsing failed"
) from exception
|
"""
Six Plus Twelve
===============
"""
from ...topology_graph import Edge
from ..cage import Cage
from ..vertices import LinearVertex, NonLinearVertex
class SixPlusTwelve(Cage):
"""
Represents a cage topology graph.
Unoptimized construction
.. moldoc::
import moldoc.molecule as molecule
import stk
bb1 = stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
)
bb2 = stk.BuildingBlock(
smiles='Brc1c(Br)cc(Br)c(Br)c1',
functional_groups=[stk.BromoFactory()],
)
cage = stk.ConstructedMolecule(
topology_graph=stk.cage.SixPlusTwelve((bb1, bb2)),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
cage.get_atoms(),
cage.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in cage.get_bonds()
),
)
:class:`.Collapser` optimized construction
.. moldoc::
import moldoc.molecule as molecule
import stk
bb1 = stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
)
bb2 = stk.BuildingBlock(
smiles='Brc1c(Br)cc(Br)c(Br)c1',
functional_groups=[stk.BromoFactory()],
)
cage = stk.ConstructedMolecule(
topology_graph=stk.cage.SixPlusTwelve(
building_blocks=(bb1, bb2),
optimizer=stk.Collapser(),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
cage.get_atoms(),
cage.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in cage.get_bonds()
),
)
Nonlinear building blocks with four functional groups are
required for this topology.
Linear building blocks with two functional groups are required for
this topology.
When using a :class:`dict` for the `building_blocks` parameter,
as in :ref:`cage-topology-graph-examples`:
*Multi-Building Block Cage Construction*, a
:class:`.BuildingBlock`, with the following number of functional
groups, needs to be assigned to each of the following vertex ids:
| 4-functional groups: 0 to 5
| 2-functional groups: 6 to 17
See :class:`.Cage` for more details and examples.
"""
_non_linears = (
NonLinearVertex(0, [-1, -1, 0]),
NonLinearVertex(1, [-1, 1, 0]),
NonLinearVertex(2, [1, -1, 0]),
NonLinearVertex(3, [1, 1, 0]),
NonLinearVertex(4, [0, 0, 1]),
NonLinearVertex(5, [0, 0, -1]),
)
_vertex_prototypes = (
*_non_linears,
LinearVertex.init_at_center(
id=6,
vertices=(_non_linears[0], _non_linears[1]),
),
LinearVertex.init_at_center(
id=7,
vertices=(_non_linears[1], _non_linears[3]),
),
LinearVertex.init_at_center(
id=8,
vertices=(_non_linears[3], _non_linears[2]),
),
LinearVertex.init_at_center(
id=9,
vertices=(_non_linears[0], _non_linears[2]),
),
LinearVertex.init_at_center(
id=10,
vertices=(_non_linears[4], _non_linears[0]),
),
LinearVertex.init_at_center(
id=11,
vertices=(_non_linears[4], _non_linears[1]),
),
LinearVertex.init_at_center(
id=12,
vertices=(_non_linears[4], _non_linears[2]),
),
LinearVertex.init_at_center(
id=13,
vertices=(_non_linears[4], _non_linears[3]),
),
LinearVertex.init_at_center(
id=14,
vertices=(_non_linears[5], _non_linears[0]),
),
LinearVertex.init_at_center(
id=15,
vertices=(_non_linears[5], _non_linears[1]),
),
LinearVertex.init_at_center(
id=16,
vertices=(_non_linears[5], _non_linears[2]),
),
LinearVertex.init_at_center(
id=17,
vertices=(_non_linears[5], _non_linears[3]),
),
)
_edge_prototypes = (
Edge(0, _vertex_prototypes[6], _vertex_prototypes[0]),
Edge(1, _vertex_prototypes[6], _vertex_prototypes[1]),
Edge(2, _vertex_prototypes[7], _vertex_prototypes[1]),
Edge(3, _vertex_prototypes[7], _vertex_prototypes[3]),
Edge(4, _vertex_prototypes[8], _vertex_prototypes[3]),
Edge(5, _vertex_prototypes[8], _vertex_prototypes[2]),
Edge(6, _vertex_prototypes[9], _vertex_prototypes[0]),
Edge(7, _vertex_prototypes[9], _vertex_prototypes[2]),
Edge(8, _vertex_prototypes[10], _vertex_prototypes[4]),
Edge(9, _vertex_prototypes[10], _vertex_prototypes[0]),
Edge(10, _vertex_prototypes[11], _vertex_prototypes[4]),
Edge(11, _vertex_prototypes[11], _vertex_prototypes[1]),
Edge(12, _vertex_prototypes[12], _vertex_prototypes[4]),
Edge(13, _vertex_prototypes[12], _vertex_prototypes[2]),
Edge(14, _vertex_prototypes[13], _vertex_prototypes[4]),
Edge(15, _vertex_prototypes[13], _vertex_prototypes[3]),
Edge(16, _vertex_prototypes[14], _vertex_prototypes[5]),
Edge(17, _vertex_prototypes[14], _vertex_prototypes[0]),
Edge(18, _vertex_prototypes[15], _vertex_prototypes[5]),
Edge(19, _vertex_prototypes[15], _vertex_prototypes[1]),
Edge(20, _vertex_prototypes[16], _vertex_prototypes[5]),
Edge(21, _vertex_prototypes[16], _vertex_prototypes[2]),
Edge(22, _vertex_prototypes[17], _vertex_prototypes[5]),
Edge(23, _vertex_prototypes[17], _vertex_prototypes[3]),
)
_num_windows = 8
_num_window_types = 1
|
from uploads3 import *
from simple_camera import take_picture
import time
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/mask')
def run_recognition():
s3upload = s3Upload()
# take picture and name it capture.png
s3upload.upload_picture()
# mask recognition
recog = s3upload.detect_labels()
state = s3upload.check_mask(recog)
print("STATE is", state)
# updating db
s3upload.update_db(state)
print(recog)
return state
@app.route('/stats')
def getStats():
print(request.args.get("date"))
date = str(request.args.get("date"))
s3upload = s3Upload()
stats = s3upload.get_today_stats(date)
return stats
|
#! python3
import numpy
import mss
import pyautogui
from pynput.mouse import Button, Controller
import time
from constant import BLACK_PIXEL_THRESHOLD, CLICK_POSITION_LATENCY
print(pyautogui.size())
print(pyautogui.position())
class MouseController:
mouse = Controller()
@staticmethod
def queryPosition():
return pyautogui.position()
@staticmethod
def click(x, y):
# pyautogui.click(x, y)
MouseController.mouse.position = (x, y)
time.sleep(CLICK_POSITION_LATENCY)
MouseController.mouse.click(Button.left, 1)
@staticmethod
def moveTo(x, y):
# pyautogui.moveTo(x, y)
MouseController.mouse.position = (x, y)
class KeyboardController:
@staticmethod
def keyDown(keyCode):
pyautogui.keyDown(keyCode)
@staticmethod
def keyUp(keyCode):
pyautogui.keyUp(keyCode)
@staticmethod
def press(keyCode):
pyautogui.press(keyCode)
@staticmethod
def typeWrite(text):
pyautogui.typewrite(text)
@staticmethod
def hotkey(*keyCodes):
pyautogui.hotkey(*keyCodes)
class ScreenController:
@staticmethod
def screenshot(box_coordinates=None):
with mss.mss() as sct:
if box_coordinates:
return numpy.array(sct.grab(box_coordinates))
else:
return numpy.array(sct.grab())
|
from rest_framework import serializers
from apps.utils.serializers import BtcAddressField
from .models import *
class OrganizationSerializer(serializers.ModelSerializer):
category_name = serializers.Field(source='category.name')
owner = serializers.Field(source='user.username')
validation_state = serializers.Field(source='validation_state')
foundation_address = serializers.Field(source='foundation_address_value')
freicoin_address = BtcAddressField(source='freicoin_address_value', max_length=34)
bitcoin_address = BtcAddressField(source='bitcoin_address_value', required=False, max_length=34)
class Meta:
model = Organization
fields = ('id', 'name', 'website', 'email', 'category', 'category_name', 'user', 'owner',
'short_description', 'long_description', 'validation_state',
'foundation_address', 'freicoin_address', 'bitcoin_address')
class OrganizationShortSerializer(serializers.ModelSerializer):
category_name = serializers.Field(source='category.name')
user = serializers.Field(source='user.username')
foundation_address = serializers.Field(source='foundation_address_value')
class Meta:
model = Organization
fields = ('id', 'name', 'website', 'foundation_address', 'short_description')
class CategoryShortSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = ('id', 'name')
class CategorySerializer(serializers.HyperlinkedModelSerializer):
organizations = OrganizationShortSerializer(source='organizations')
inner_organizations = serializers.Field(source='inner_organizations')
class Meta:
model = Category
fields = ('id', 'name', 'inner_organizations', 'organizations', 'child_categories')
CategorySerializer.base_fields['child_categories'] = CategorySerializer()
class CategoryValidatedSerializer(serializers.HyperlinkedModelSerializer):
organizations = OrganizationShortSerializer(source='validated')
inner_organizations = serializers.Field(source='inner_validated')
class Meta:
model = Category
fields = ('id', 'name', 'inner_organizations', 'organizations', 'child_categories')
CategoryValidatedSerializer.base_fields['child_categories'] = CategoryValidatedSerializer()
class CategoryCandidatesSerializer(serializers.HyperlinkedModelSerializer):
organizations = OrganizationShortSerializer(source='candidates')
inner_organizations = serializers.Field(source='inner_candidates')
class Meta:
model = Category
fields = ('id', 'name', 'inner_organizations', 'organizations', 'child_categories')
CategoryCandidatesSerializer.base_fields['child_categories'] = CategoryCandidatesSerializer()
class CategoryBlockedSerializer(serializers.HyperlinkedModelSerializer):
organizations = OrganizationShortSerializer(source='blocked')
inner_organizations = serializers.Field(source='inner_blocked')
class Meta:
model = Category
fields = ('id', 'name', 'inner_organizations', 'organizations', 'child_categories')
CategoryBlockedSerializer.base_fields['child_categories'] = CategoryBlockedSerializer()
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 12:09:57 2017
@author: Richard
"""
import matplotlib.pyplot as plt
from math import cos, sin, atan
class Neuron():
def __init__(self, x, y):
self.x = x
self.y = y
def draw(self, neuron_radius):
circle = plt.Circle((self.x, self.y), radius = neuron_radius, fill = False)
plt.gca().add_patch(circle)
class Layer():
def __init__(self, network,number_of_neurons, number_of_neurons_in_widest_layer):
self.horizontal_distance_between_layers = 6
self.vertical_distance_between_neurons = 2
self.neuron_radius = 0.5
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.previous_layer = self.__get_previous_layer(network)
self.x = self.__calculate_layer_x_position()
self.neurons = self.__initialize_neurons(number_of_neurons)
def __initialize_neurons(self, number_of_neurons):
neurons = []
y = self.__calculate_bottom_margin_so_layer_is_centered(number_of_neurons)
for iteration in xrange(number_of_neurons):
neuron = Neuron(self.x, y)
neurons.append(neuron)
y+= self.vertical_distance_between_neurons
return neurons
def __calculate_bottom_margin_so_layer_is_centered(self, number_of_neurons):
return self.vertical_distance_between_neurons * (self.number_of_neurons_in_widest_layer - number_of_neurons) / 2
def __calculate_layer_x_position(self):
if self.previous_layer:
return self.previous_layer.x + self.horizontal_distance_between_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2):
angle = atan((neuron2.y - neuron1.y)/float(neuron2.x - neuron1.x))
x_adjustment = self.neuron_radius * cos(angle)
y_adjustment = self.neuron_radius * sin(angle)
line = plt.Line2D((neuron1.x - x_adjustment, neuron2.x + x_adjustment),
(neuron1.y - y_adjustment, neuron2.y + y_adjustment))
plt.gca().add_line(line)
def draw(self, layerType = 0):
for neuron in self.neurons:
neuron.draw(self.neuron_radius)
if self.previous_layer:
for previous_layer_neuron in self.previous_layer.neurons:
self.__line_between_two_neurons(neuron, previous_layer_neuron)
# Write text:
if layerType == 0:
plt.text(self.x, -1, 'Input Layer', fontsize = 12, rotation = -45)
elif layerType == -1:
plt.text(self.x, -1, 'Output Layer', fontsize = 12, rotation = -45)
else:
plt.text(self.x, -1, 'Hidden Layer %d' % layerType, fontsize = 12, rotation = -45)
class NeuralNetwork():
def __init__(self, number_of_neurons_in_widest_layer):
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.layers = []
self.layertypes = 0
def add_layer(self, number_of_neurons):
layer = Layer(self, number_of_neurons, self.number_of_neurons_in_widest_layer)
self.layers.append(layer)
def draw(self, plot_title = "Neural Network Architecture", fig = None):
if fig is None:
fig = plt.figure()
else:
plt.set_current_figure(fig)
for i in range(len(self.layers)):
layer = self.layers[i]
if i == len(self.layers)-1:
i = -1
layer.draw(i)
plt.axis("tight")
plt.axis('off')
plt.title(plot_title, fontsize = 15)
plt.show()
return fig
class DrawNN():
def __init__(self, neural_network):
self.neural_network = neural_network
def draw(self, title = "Neural Network Architecture", fig = None):
widest_layer = max(self.neural_network)
network = NeuralNetwork(widest_layer)
for l in self.neural_network:
network.add_layer(l)
f = network.draw(title, fig)
if __name__ == "__main__":
network = DrawNN([20,14,8,10,10,9,15])
network.draw() |
TEST_DOMAINS_GOOD = [
'cmatthewbrooks.com',
'cmatthewbrooks[.]com',
'google.com',
'www.badguy.com',
'www.badguy.xyz'
]
TEST_DOMAINS_BAD = [
'asdfasdfasdfasdf',
'a',
';klj23;4lkj2;3k4j;k'
]
TEST_IPS_GOOD = [
'1.1.1.1',
'8.8.8.8',
'123.123.123.123',
'123.123.123[.]123',
'123[.]123[.]123[.]123',
'123,123,123,123',
'123.123,123.123'
]
TEST_IPS_BAD = [
'999',
'999.999.999.999',
'123.123.123'
]
TEST_MD5_GOOD = [
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
'931606baaa7a2b4ef61198406f8fc3f4',
'931606BAAA7A2B4EF61198406F8FC3F4'
]
TEST_MD5_BAD = [
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
'9999999999999999999999999999999999999999'
'zxczxczxczxczxczxczxczxczxczxczxc'
]
TEST_SHA1_GOOD = [
'd3a21675a8f19518d8b8f3cef0f6a21de1da6cc7',
'D3A21675A8F19518D8B8F3CEF0F6A21DE1DA6CC7',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
'9999999999999999999999999999999999999999'
]
TEST_SHA1_BAD = [
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
]
TEST_SHA256_GOOD = [
'0d06f9724af41b13cdacea133530b9129a48450230feef9632d53d5bbb837c8c',
'0D06F9724AF41B13CDACEA133530B9129A48450230FEEF9632D53D5BBB837C8C',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
'9999999999999999999999999999999999999999999999999999999999999999'
]
TEST_SHA256_BAD = [
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
'zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz',
'ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ'
]
TEST_EMAILS_GOOD = [
'me@cmatthewbrooks.com',
'hilaryclinton@privatemailserver.com',
'badguy@baguyserver.xyz'
]
TEST_EMAILS_BAD = [
'cmatthewbrooks.com',
'me@cmatthewbrooks'
]
TEST_NEUTERED_INDICATORS = [
'cmatthewbrooks[.]com',
'me[@]cmatthewbrooks[.]com',
'me[@]cmatthewbrooks.com'
]
ALL_INDICATORS_GOOD = (
TEST_DOMAINS_GOOD +
TEST_IPS_GOOD +
TEST_MD5_GOOD +
TEST_SHA1_GOOD +
TEST_SHA256_GOOD +
TEST_EMAILS_GOOD
)
ALL_INDICATORS_BAD = (
TEST_DOMAINS_BAD +
TEST_IPS_BAD +
TEST_MD5_BAD +
TEST_SHA1_BAD +
TEST_SHA256_BAD +
TEST_EMAILS_BAD
)
|
import numpy as np
import matplotlib.pyplot as plt
def createClusteredData (N,k):
pointsPerCluster = float (N)/k
x=[]
y=[]
for i in range (k):
incomeCentroid = np.random.uniform(20000.0,200000.0)
ageCentroid = np.random.uniform(20.0,70.0)
for j in range (int(pointsPerCluster)):
x.append([np.random.normal(incomeCentroid,10000.0), np.random.normal(ageCentroid,2.0)])
y.append(i)
x=np.array(x)
y=np.array(y)
return x,y
(x,y)=createClusteredData(100,5)
plt.figure(figsize=(8,6))
plt.scatter(x[:,0], x[:,1], c=y.astype(np.float))
plt.show()
# ----------------------------- SVM ------------------------------------------
from sklearn import svm, datasets
c=1.0
# svc = svm.SVC(kernel='linear', c=c).fit(x,y)
svc = svm.SVC(kernel='linear').fit(x,y)
def plotPredictions(clf):
xx,yy = np.meshgrid(np.arange(0, 250000, 10), np.arange(10,70,0.5))
z=clf.predict(np.c_[xx.ravel(), yy.ravel()])
plt.figure(figsize=(8,6))
z=z.reshape(xx.shape)
plt.contourf(xx, yy, z, cmap = plt.cm.Paired, alpha=0.8)
plt.scatter(x[:,0], x[:,1], c=y.astype(np.float))
plt.show()
plotPredictions(svc)
# -----Make a prediction of a person who makes 200000 a year and 40 years old
pred=svc.predict([[200000,40]])
print(pred) |
from modules.utils.location import Location
from modules.utils.block import Block
from modules.kakuro import Kakuro
class Parser:
@staticmethod
def parse(raw_kakuro):
kakuro = []
size = None
is_horizontal_line = None
for line in raw_kakuro:
line = line.rstrip('\n')
info = line.split(' ')
if info[0] == 'S':
size = tuple(map(lambda x: int(x), info[1].split('x')))
elif info[0] == 'H':
is_horizontal_line = True
elif info[0] == 'V':
is_horizontal_line = False
else:
if is_horizontal_line is None:
raise Exception('value of "is_horizontal_line" is not assigned')
block = Block(Location.parse(',', info[0]), int(info[1]), int(info[2]), is_horizontal_line)
kakuro.append(block)
if size is None:
raise Exception('value of "size" is not assigned')
return Kakuro(size, kakuro)
|
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
style.use('ggplot')
data_dict = {-1: np.array([[1, 7],
[2, 8],
[3, 8], ]),
1: np.array([[5, 1],
[6, -1],
[7, 3], ])}
class Support_Vector_Machine:
def __init__(self, visualization=True):
self.visualization = visualization
self.colors = {1:'r',-1:'b'}
if self.visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1)
# train
def fit(self, data):
pass
def predict(self, features):
# sign( x.w+b )
classification = np.sign(np.dot(np.array(features), self.w) + self.b)
return classification
import KNN
import numpy as np
from sklearn import preprocessing, model_selection, svm, neighbors
import pandas as pd
import sklearn
df = pd.read_csv('breast-cancer-wisconsin.data')
df.replace('?', -9999999, inplace=True)
df.drop(['id'],1,inplace=True)
X = np.array(df.drop(['class'],1))
Y = np.array(df['class'])
X_train, X_test, Y_train, Y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.2)
clf = svm.SVC(kernel='linear')
clf.fit(X_train, Y_train)
ACC = clf.score(X_test, Y_test)
print(ACC)
|
from pydantic import BaseModel
class BankNote(BaseModel):
variance: float
skewness: float
kurtosis: float
entropy: float |
# 주석(comment)
# 주석은 주로 코드 혹은 어떤 것에 대한 설명을 하기 위해 작성합니다.
# 주석은 코드를 실행할 때 무시됩니다.
# 이런식으로 해시태그 `#` 기호를 사용하고 한칸 띄어쓰면 됩니다.
# 결과 출력하기
# print
# 콘솔에 해당 입력값을 출력하는 함수입니다.
# 함수에 대해서는 '파이썬 함수 튜토리얼'에서 공부해볼 예정입니다.
# Hello, Python!
print('Hello, Python!')
# 식별자(identifier)
# 변수(value; 파이썬에서는 식별자(identifier) 혹은 이름(name)이라고도 함)를 선언할 때는 `=` 기호를 사용합니다.
# 파이썬에서는 선언과 동시에 값이 대입되기 때문에 바인딩(binding)이라고 합니다.
# 식별자를 선언하는 방식은 다음과 같습니다.
a = 1
# 1
print(a)
# 식별자 정의 규칙
# 1. 식별자는 파이썬에 기본적으로 정의되어 있는 키워드(keywords)와 같은 명칭을 사용할 수 없습니다.
# 2. 아스키(ASCII) 문자 혹은 유니코드(Uni-code) 문자를 사용할 수 있습니다.
# 하지만 유니 코드로 식별자를 사용하는 것은 권장되지 않습니다.
# 아스키 코드란 숫자, 영문자, 특수문자 같은 `1 byte` 크기의 문자를 의미하며
# 유니코드는 한국어, 중국어, 일본어와 같은 `2 byte` 크기의 문자를 의미합니다.
# 3. 식별자 이름 앞에 숫자가 먼저 오면 안 됩니다.
# 4. 특수문자 사용 시 파이썬에 정의되어 있는 기본 연산자와 일치되는 문자는 사용할 수 없습니다.
# 연산자는 '파이썬 연산자' 튜토리얼에서 공부해볼 예정입니다.
# 파이썬에 정의되어 있는 키워드를 확인하려면 다음 코드를 실행해봅니다.
import keyword
kwlist = keyword.kwlist
# [
# 'False', 'None', 'True',
# '__peg_parser__', 'and',
# 'as', 'assert', 'async',
# 'await', 'break', 'class',
# 'continue', 'def', 'del',
# 'elif', 'else', 'except',
# 'finally', 'for', 'from',
# 'global', 'if', 'import',
# 'in', 'is', 'lambda',
# 'nonlocal', 'not', 'or',
# 'pass', 'raise', 'return',
# 'try', 'while', 'with',
# 'yield'
# ]
print(kwlist)
# 튜토리얼을 진행하면서 각각의 키워드들에 대해 공부해볼 예정입니다.
|
"""Provides an object that handles the registration and deregistration of the callbacks for a given non-standard event,
providing convenient methods for common tasks.
Usage: Instantiate the object when needed, perform the necessary operations, and store in state["callbackStore"] (probably
under a key such as mainActor.name which will properly identify it). When the callbacks are no longer leader, delete from the
state. If a group of callbacks may be deleted under different circumstances, it is probably best to make two objects."""
from functools import partial
import warnings
from Objs.Utilities.ArenaUtils import DictToOrderedDict
class IndividualEventHandler(object):
def __init__(self, state):
self.state = state
# List of tuples, (callback list name where callback is stored, callback)
self.callbackReferences = []
def __del__(self):
for toRemove in self.callbackReferences:
try:
self.state["callbacks"][toRemove[0]].remove(toRemove[1])
except ValueError:
pass
#warnings.warn('IndividualEventHandler: Attempted to remove invalid callback '+str(toRemove[1])+'('+toRemove[1].eventName+') from '+toRemove[0])
self.callbackReferences = []
def registerEvent(self, locationListName, func, front=True):
if front:
self.state["callbacks"][locationListName].insert(0, func)
else:
self.state["callbacks"][locationListName].append(func)
self.callbackReferences.append((locationListName, func))
def setEventWeightForSingleContestant(self, eventName, contestantName, weight, state):
def func(actor, origWeight, event):
# if we're trying to set a weight to positive but it's the wrong phase
if weight and "phase" in event.baseProps and state["curPhase"] not in event.baseProps["phase"]:
return (origWeight, True)
if event.name == eventName and actor.name == contestantName:
# if weight is 0, we almost always want this to return False and block the event entirely
return (weight, bool(weight))
else:
return (origWeight, True)
# this anonymizes func, giving a new reference each time this is called
def anonfunc(actor, origWeight, event): return func(
actor, origWeight, event)
anonfunc.eventName = eventName # Notes on the functor for debug purposes
anonfunc.contestantName = contestantName
# This needs to be at beginning for proper processing
self.registerEvent("modifyIndivActorWeights", anonfunc)
return anonfunc # Just in case it's needed by the calling function
def bindRoleForContestantAndEvent(self, roleName, fixedRoleList, relevantActor, eventName):
anonfunc = partial(self.fixedRoleCallback, roleName,
fixedRoleList, relevantActor, eventName)
anonfunc.eventName = eventName # Notes on the functor for debug purposes
anonfunc.relevantActor = relevantActor
anonfunc.fixedRoleList = fixedRoleList
self.registerEvent("overrideContestantEvent", anonfunc)
# It must _also_ be checked that the people bound all still live. This has be done before the event is selected, to prevent the selection
# of invalid events.
def func(actor, origWeight, event): # Black magic
if event.name == eventName and actor.name == relevantActor.name:
for person in fixedRoleList:
if not person.alive:
return (0, False)
return (origWeight, True)
# this anonymizes func, giving a new reference each time this is called
def anonfunc2(actor, origWeight, event): return func(
actor, origWeight, event)
# This needs to be at beginning for proper processing
self.registerEvent("modifyIndivActorWeights", anonfunc2, False)
return anonfunc, anonfunc2 # Just in case it's needed by the calling function
@staticmethod
def fixedRoleCallback(roleName, fixedRoleList, relevantActor, eventName, contestantKey, thisevent, state, participants, victims, sponsorsHere):
# Avoiding eval here
roleDict = DictToOrderedDict({"participants": participants,
"victims": victims,
"sponsors": sponsorsHere})
if thisevent.name == eventName and relevantActor.name == contestantKey:
# Have to clear the list BUT keep the reference
del roleDict[roleName][:]
roleDict[roleName].extend(fixedRoleList)
return True, False
def banEventForSingleContestant(self, eventName, contestantName, state):
self.setEventWeightForSingleContestant(
eventName, contestantName, 0, state)
def banMurderEventsAtoB(self, cannotKill, cannotBeVictim):
def func(contestantKey, thisevent, state, participants, victims, sponsorsHere):
if "murder" in thisevent.baseProps and thisevent.baseProps["murder"] and contestantKey == str(cannotKill):
if cannotBeVictim in victims or (not victims) and cannotBeVictim in participants:
return False, True
return True, False
def anonfunc(contestantKey, thisevent, state, participants, victims, sponsorsHere): return func(contestantKey, thisevent,
state, participants, victims, sponsorsHere) # this anonymizes func, giving a new reference each time this is called
anonfunc.cannotKill = cannotKill # Notes on the functor for debug purposes
anonfunc.cannotBeVictim = cannotBeVictim
# This needs to be at beginning for proper processing
self.registerEvent("overrideContestantEvent", anonfunc)
return anonfunc # Just in case it's needed by the calling function
|
from datetime import datetime
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms import Form , BooleanField , PasswordField , HiddenField , DateTimeField , IntegerField , DecimalField , FloatField , RadioField |
from google_drive_downloader import GoogleDriveDownloader as gdd
print('\n\n!!! This may take several minutes !!!\n\n')
gdd.download_file_from_google_drive(file_id='1u9PkZ5UWcKeOaJGjhS-VYm0WIz0vd6cP',
dest_path='./Rlibrary_NeVOmics.zip',
unzip=True)
|
from chryso.schema import BaseEnum
class CardType(BaseEnum):
action = 'action'
attack = 'attack'
castle = 'castle'
curse = 'curse'
duration = 'duration'
event = 'event'
gathering = 'gathering'
knight = 'knight'
landmark = 'landmark'
looter = 'looter'
prize = 'prize'
reaction = 'reaction'
reserve = 'reserve'
ruins = 'ruins'
shelter = 'shelter'
traveller = 'traveller'
treasure = 'treasure'
victory = 'victory'
|
import pygame
import time
import random
from tools.utils import emptyRoundRect
pygame.init()
pygame.font.init()
clock = pygame.time.Clock()
win = pygame.display.set_mode((1200, 750), 0, 32)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
BG_COLOR = (127, 127, 127)
myfont = pygame.font.SysFont('Comicsans', 30)
state = 'player_turn' # 누구의 턴인지 표시
turn_cnt = 0 # 총 3번 주사위를 굴릴 수 있다.
# win = pygame.display.set_mode((SC_WIDTH, SC_HEIGHT))
# pygame.display.set_caption('Yacht Dice Game')
# 족보계산
class Strategy():
def __init__(self, strategies):
self.strategies = strategies
def set_dices(self, dices):
self.dices = dices
def calculate(self):
self.sides = [dice.side for dice in self.dices]
self.unique = set(self.sides)
upper_score = 0
for i in range(1, 7):
if self.strategies['%ds' % i]['done']:
continue
score = self.sum_of_single(i)
self.strategies['%ds' % i]['score'] = score
upper_score += score
if upper_score >= 63:
self.strategies['Bonus']['score'] = 35
if not self.strategies['Choice']['done']:
self.strategies['Choice']['score'] = sum(self.sides)
if not self.strategies['4-of-a-kind']['done']:
self.strategies['4-of-a-kind']['score'] = self.of_a_kind(4)
if not self.strategies['Full House']['done']:
self.strategies['Full House']['score'] = self.full_house()
if not self.strategies['S. Straight']['done']:
self.strategies['S. Straight']['score'] = self.small_straight()
if not self.strategies['L. Straight']['done']:
self.strategies['L. Straight']['score'] = self.large_straight()
if not self.strategies['Yacht']['done']:
self.strategies['Yacht']['score'] = self.of_a_kind(5)
self.strategies['Total']['score'] = 0
for k, v in self.strategies.items():
if v['done']:
self.strategies['Total']['score'] += v['score']
return self.strategies
def count(self, number):
return len([side for side in self.sides if side == number])
def highest_repeated(self, min_repeats):
repeats = [x for x in self.unique if self.count(x) >= min_repeats]
return max(repeats) if repeats else 0
def of_a_kind(self, n):
hr = self.highest_repeated(n)
if hr == 0:
return 0
if n == 5:
return 50
rests = [side for side in self.sides if side != hr]
return hr * n + sum(rests)
def sum_of_single(self, number):
return sum([x for x in self.sides if x == number])
def full_house(self):
hr = self.highest_repeated(3)
if hr > 0:
rests = [side for side in self.sides if side != hr]
if len(set(rests)) == 1 and len(rests) == 2:
return 25
hr = self.highest_repeated(2)
if hr > 0:
rests = [side for side in self.sides if side != hr]
if len(set(rests)) == 1 and len(rests) == 3:
return 25
return 0
def small_straight(self):
if set([1, 2, 3, 4]).issubset(self.unique) or set([2, 3, 4, 5]).issubset(self.unique) or set(
[3, 4, 5, 6]).issubset(self.unique):
return 30
return 0
def large_straight(self):
if set([1, 2, 3, 4, 5]).issubset(self.unique) or set([2, 3, 4, 5, 6]).issubset(self.unique):
return 40
return 0
# 점수판 목록
strategies_order = ['1s', '2s', '3s', '4s', '5s', '6s', 'Bonus', 'Choice',
'4-of-a-kind', 'Full House', 'S. Straight', 'L. Straight', 'Yacht', 'Total']
strategies = {
'1s': 0,
'2s': 0,
'3s': 0,
'4s': 0,
'5s': 0,
'6s': 0,
'Bonus': 0,
'Choice': 0,
'4-of-a-kind': 0,
'Full House': 0,
'S. Straight': 0,
'L. Straight': 0,
'Yacht': 0,
'Total': 0
}
# 족보 계산을 위한 점수판 초기화
strategy = Strategy(strategies)
for i, strategy_name in enumerate(strategies_order):
strategies[strategy_name] = {
'score': 0,
'selected': False,
'done': False
}
class Dice(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
self.side = 0
self.status = 'stopped'
self.tempx = x
self.tempy = y
def roll(self, win):
pygame.draw.rect(win, WHITE, (self.x, self.y, 100, 100))
# 만약 멈춰 있는 상태라면 굴리는 상태로 변환
if self.status == 'stopped':
self.status = 'rolling'
if self.status == 'finalroll':
self.x = self.tempx
self.y = self.tempy
self.side = random.randint(1, 6)
if self.side == 1 or self.side == 3 or self.side == 5:
pygame.draw.circle(win, BLACK, (self.x + 50, self.y + 50), 8, 8)
if self.side == 4 or self.side == 5 or self.side == 6:
pygame.draw.circle(win, BLACK, (self.x + 20, self.y + 20), 8, 8)
if self.side == 6:
pygame.draw.circle(win, BLACK, (self.x + 20, self.y + 50), 8, 8)
if self.side == 2 or self.side == 3 or self.side == 4 or self.side == 5 or self.side == 6:
pygame.draw.circle(win, BLACK, (self.x + 20, self.y + 80), 8, 8)
if self.side == 2 or self.side == 3 or self.side == 4 or self.side == 5 or self.side == 6:
pygame.draw.circle(win, BLACK, (self.x + 80, self.y + 20), 8, 8)
if self.side == 6:
pygame.draw.circle(win, BLACK, (self.x + 80, self.y + 50), 8, 8)
if self.side == 4 or self.side == 5 or self.side == 6:
pygame.draw.circle(win, BLACK, (self.x + 80, self.y + 80), 8, 8)
# 주사위 애니메이션
if self.status == 'rolling':
self.x += random.randint(-10, 10)
self.y += random.randint(-10, 10)
def getSide(self):
return self.side
def redrawGameWindow(dices):
# 10번 굴리고 반환
for i in range(0, 11):
time.sleep(0.1)
win.fill((100, 0, 0))
emptyRoundRect(win, (255, 255, 255), (20, 100, 1150, 630), 14, 4)
# 화면 업데이트 (총 10번 진행)
for dice in dices:
if i == 9:
dice.status = 'finalroll'
dice.roll(win)
if i == 10:
dice.status = 'stopped'
mytext1 = myfont.render(str(dice.side), 1, BLACK)
win.blit(mytext1, (dice.x + 20, 10))
mytext2 = myfont.render(str(dice.status), 1, BLACK)
win.blit(mytext2, (dice.x + 20, 30))
mytext3 = myfont.render(str(state), 1, BLACK)
win.blit(mytext3, (20, 10))
# 점수 계산 및 점수판 출력
strategy.set_dices(dices)
strategy.calculate()
for i, sName in enumerate(strategies_order):
score = strategies[sName]["score"]
n_text = f"{sName:<20}"
s_text = f"{score:>10}"
nText = myfont.render(n_text, 1, WHITE)
sText = myfont.render(s_text, 1, WHITE)
win.blit(nText, (40, 30 * (i + 1) + 100))
win.blit(sText, (180, 30 * (i + 1) + 100))
pygame.display.update()
if i == 10:
return
def main(win):
global state
global turn_cnt
while True:
dices = []
clock.tick(60)
# 주사위 객체들
dices = [
Dice(550, 330, 90, 90),
Dice(660, 330, 90, 90),
Dice(770, 330, 90, 90),
Dice(880, 330, 90, 90),
Dice(990, 330, 90, 90)
]
for event in pygame.event.get():
# 턴넘기기
if state == 'player_turn':
if event.type == pygame.QUIT:
return
if event.type == pygame.MOUSEBUTTONUP:
turn_cnt += 1
# 마우스 클릭을 했을때
for dice in dices:
dice.status = 'stopped'
redrawGameWindow(dices)
if turn_cnt == 3:
turn_cnt = 0
state = 'enemy_turn'
elif state == "enemy_turn":
if event.type == pygame.QUIT:
return
if event.type == pygame.MOUSEBUTTONUP:
turn_cnt += 1
# 마우스 클릭을 했을때
for dice in dices:
dice.status = 'stopped'
redrawGameWindow(dices)
if turn_cnt == 3:
turn_cnt = 0
state = 'player_turn'
|
'''
Created on 12.05.2012
@author: akiryuhin
'''
import paramiko
import scpclient
import re
from fw.helper_base import HelperBase
from testconfig import config
from requests.exceptions import SSLError
import time
import socket
from nose.tools import ok_#@UnresolvedImport
from paramiko.ssh_exception import AuthenticationException
#import struct, fcntl #FCNTL exists under Linux only
class SshHelper(HelperBase):
def __init__(self, manager):
super(SshHelper, self).__init__(manager)
def open_ssh_session(self, host, user, password, port=22, timeout=30):
self.ssh = paramiko.SSHClient()
paramiko.util.logging.disable(self.ssh)
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.load_system_host_keys()
# self.ssh.connect(hostname=host, port=int(port), username=user, password=password, timeout=timeout)
for i in range(timeout):
try:
self.ssh.connect(hostname=host, port=int(port), username=user, password=password, timeout=timeout)
break
except socket.error as err: "SSLError caught..."
except AuthenticationException:
print "Authentication failed to host " + host + ":" + str(port)
self.ssh = None
break
time.sleep(1)
else: raise Exception("Couldn't establish SSH connection during" + str(timeout) + "sec")
return self.ssh
def close_ssh_session(self, ssh):
ssh.close()
def send_ssh_command(self, ssh, command):
output = ssh.exec_command(command)
return output
def put_file(self, ssh, local_file, remote_folder):
scp = scpclient.Write(ssh.get_transport(), remote_folder)
scp.send_file(local_file)
def put_file_with_check(self, ssh, local_file, remote_folder):
k = local_file.split('/')
local_file_name = k[len(k)-1]
file_ready = False
i=0
while i < 11:
self.put_file(ssh, local_file, remote_folder)
# scp = scpclient.Write(ssh.get_transport(), remote_folder)
# scp.send_file(local_file)
cred_file = self.send_ssh_command(self.ssh, 'cat ' + remote_folder + '/' + local_file_name)
file_content = cred_file[1].read()
error_mes = cred_file[2].read()
if not file_content == '' and error_mes == '':
file_ready = True
return
elif file_content == '' and not error_mes == '':
print "File " + local_file_name + " cannot be read: " + error_mes
print "\nTrying again..."
time.sleep(5)
i = i + 1
if file_ready is False:
ok_(False, 'Sending file ' + local_file_name + ' failed.')
def check_file_accessibility(self, remote_folder, remote_file):
file_ready = False
cred_file = self.send_ssh_command(self.ssh, 'cat ' + remote_folder + '/' + remote_file)
file_content = cred_file[1].read()
error_mes = cred_file[2].read()
if not file_content == '' and error_mes == '':
file_ready = True
return
elif file_content == '' and not error_mes == '':
print "File " + remote_file + " cannot be read: " + error_mes
time.sleep(5)
if file_ready is False:
ok_(False, 'File ' + remote_file + ' reading failed. Substitution failed.')
def put_dir(self, ssh, local_dir, remote_folder):
scp = scpclient.WriteDir(ssh.get_transport(), remote_folder)
scp.send_dir(local_dir)
def substitute_file(self, ssh, local_folder, local_filename, destination_folder, remote_file, add = ''):
self.send_ssh_command(ssh, 'mv '+ destination_folder + '/' + remote_file + ' ' + destination_folder + '/' + remote_file + '.bak' + add)
self.put_file(ssh, local_folder + '/' + local_filename, destination_folder)
self.send_ssh_command(ssh, 'mv '+ destination_folder + '/' + local_filename + ' '+ destination_folder + '/' + remote_file)
def substitute_file_with_check(self, ssh, local_folder, local_filename, destination_folder, remote_file, add = ''):
self.send_ssh_command(ssh, 'mv '+ destination_folder + '/' + remote_file + ' ' + destination_folder + '/' + remote_file + '.bak' + add)
self.put_file_with_check(ssh, local_folder + '/' + local_filename, destination_folder)
self.send_ssh_command(ssh, 'mv '+ destination_folder + '/' + local_filename + ' '+ destination_folder + '/' + remote_file)
self.check_file_accessibility(destination_folder, remote_file)
def restore_substituted_file(self, ssh, destination_folder, remote_file):
self.send_ssh_command(ssh, 'rm -f '+ destination_folder + '/' + remote_file)
self.send_ssh_command(ssh, 'mv '+ destination_folder + '/' + remote_file + '.bak ' + destination_folder + '/' + remote_file)
def get_file(self, ssh, remote_file, local_folder):
#my_host = self.get_local_ip_linux()
#print "\ncurrent IP: ", my_host
#if my_host is None:
#return False
#command = "scp -v -P 10010 root@192.168.17.141:/opt/mescalero/reactor/log.txt /home/miroslav/log"
sftp = ssh.open_sftp()
sftp.get(remote_file, local_folder)
sftp.close()
#scp = scpclient.Read(ssh.get_transport(), remote_file)
#scp.receive_file(local_folder, False, None, '/var/tmp_debug.log', None)
return True
def check_tunnel_to_appliance_with_ping(self, ssh, iface, remote_ip):
_command = "ping -c 4 -I {0} {1} | gawk '{{for(i=1;i<NF;i++) {{if($i ~/%/) {{print $i}} }} }}'".format(iface, remote_ip)
_response = self.send_ssh_command(ssh, _command)[1].read()
_response = _response.replace("\n","")
_response = _response.replace("%","")
_response = int(_response)
if str(_response) == "0":
return True
else:
return False
def get_packet_route_with_ping(self, ssh, iface, remote_ip):
'''
Returns list of IP addresses and sites on the packet way to destination site
'''
_command = "ping -c 4 -R -I {0} {1} | gawk -v i=0 '{{if ($0 ~/RR/ || i==1) {{if ($0 ~/RR/){{i++}}; if (NF==0) exit 0; else print; next}} }}'".format(iface, remote_ip)
_response = self.send_ssh_command(ssh, _command)[1].read()
_response = _response.replace("RR:","")
_response = _response.rstrip("\n")
_response = _response.replace("\n",",")
return _response
def send_rest_request_via_ssh(self, ssh, rest_url=None, request_data=None, request_type=None, user=None, password=None):
import json
import types
if request_type is None:
request_type = "GET"
if user is None:
user = self.manager.get_property("lom_user")
if password is None:
password = self.manager.get_property("lom_password")
if rest_url is None:
rest_url = "http://{0}/rest/sys/appliances/list".format(self.manager.get_property("lom_host"))
if request_data is not None:
if isinstance(request_data, types.DictionaryType):
request_data = json.dumps(request_data)
if request_type == "GET":
# curl -k -s -i -u root:swordfish -H "Accept: application/json" -X GET http://192.168.17.141:8000/rest/sys/appliances/list
_command = "curl -k -s -i -u {0}:{1} -H 'Accept: application/json' -X {2} {3}".format(user, password, request_type, rest_url)
else:
# curl -k -s -i -u root:swordfish -H "Accept: application/json" -XPOST http://192.168.17.141:8000/rest/auth/service/verify -d '{"password":"ee9casllp3cf8czh"}'
_command = "curl -k -s -i -u {0}:{1} -H 'Accept: application/json' -X{2} {3} -d '{4}'".format(user, password, request_type, rest_url, request_data)
_response = self.send_ssh_command(ssh, _command)[1].read()
return _response
#Method below gets current local IP for given interface. Works under Linux only
# def get_local_ip_linux(self,iface = 'eth0'):
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sockfd = sock.fileno()
# SIOCGIFADDR = 0x8915
# ifreq = struct.pack('16sH14s', iface, socket.AF_INET, '\x00'*14)
# try:
# res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)
# except:
# return None
# ip = struct.unpack('16sH2x4s8x', res)[2]
# return socket.inet_ntoa(ip)
|
# -*- coding: utf-8 -*-
__author__ = 'gaochenchao'
class ListNode(object):
def __init__(self, value, next):
self.value = value
self.next = next
def __repr__(self):
return "[ListNode] %s" % self.value
class ListManual(object):
def __init__(self):
self.head = ListNode(None, None)
def arrToLi(self, arr):
p = self.head
for i in arr:
p.next = ListNode(i, None)
p = p.next
return self.head
def printLi(self, head=None):
head = head if head else self.head
p = head
while p:
if p.value:
print p
p = p.next
def lenL(self):
if not self.head.next:
return 0
n = 0
p = self.head.next
while p:
n += 1
p = p.next
return n
def rotateRight(self, k):
n = self.lenL()
if k == 0:
return
if k > n:
k = n % k
if k == 0:
return
newTail = self.head
i = 0
while i < n - k:
newTail = newTail.next
i += 1
newHead = newTail.next
join = newTail
while join.next:
join = join.next
newTail.next = None
join.next = self.head.next
self.head = newHead
def isParlindrom(self):
n = self.lenL()
central = int(n/2)
rstart = self.head
i = 0
while i < central:
rstart = rstart.next
i += 1
if n % 2 != 0:
rstart = rstart.next
join = rstart
pre = rstart.next
tail = pre
p = pre.next
while p.next:
next = p.next
p.next = pre
pre = p
p = next
p.next = pre
join.next = p
tail.next = None
lstart = self.head.next
rstart = join.next
j = 1
while j <= central:
if lstart.value != rstart.value:
return False
lstart = lstart.next
rstart = rstart.next
j += 1
return True
def swapPaire(self):
begin = self.head
pre = begin.next
p = pre.next
while pre and p:
next = p.next
p.next = pre
pre.next = next
begin.next = p
if not next:
break
else:
begin = pre
pre = next
p = pre.next
def partion(self, x=3):
if not self.head.next:
return
leftLn = ListNode(None, None)
rightLn = ListNode(None, None)
leftTail = leftLn
rightTail = rightLn
p = self.head.next
while p:
if p.value < x:
leftTail.next = p
leftTail = p
else:
rightTail.next = p
rightTail = p
p = p.next
rightTail.next = None
leftTail.next = rightLn.next
self.head = leftLn
def reverse(self, begin):
if not begin or not begin.next:
return
newHead = ListNode(None, begin)
pre = newHead
p = pre.next
while p.next:
next = p.next
p.next = pre
pre = p
p = next
p.next = pre
begin.next = None
newHead.next = p
return newHead.next
def reorderLn(self):
leftStart = self.head.next
leftEnd = leftStart
n = self.lenL()
k = int(n/2)
if n % 2 != 0:
k += 1
for i in range(1, k):
leftEnd = leftEnd.next
rightStart = leftEnd.next
rightStart = self.reverse(rightStart)
flag = True
while rightStart.next:
if flag:
next = leftStart.next
leftStart.next = rightStart
leftStart = next
else:
next = rightStart.next
rightStart.next = leftStart
rightStart = next
flag = not flag
print rightStart, leftStart
lm = ListManual()
arr = [1, 2, 3, 4, 5, 6]
# arr = [1, 4, 3, 2, 5, 2]
head = lm.arrToLi(arr)
h = lm.reorderLn()
# lm.partion(3)
# h = lm.reverse(head)
# lm.printLi(h)
# lm.swapPaire()
# lm.rotateRight(3)
# lm.printLi()
# print lm.isParlindrom() |
import django_tables2 as tables
from models import LibraryVisit
class PersonTypeTable(tables.Table):
class Meta:
model = LibraryVisit
fields = ("prsn_e_type", "prsn_e_type__count")
attrs = {"class": "paleblue"}
prefix='pt_t_'
class DepartmentTable(tables.Table):
class Meta:
model = LibraryVisit
fields = ("dprt_n", "dprt_n__count")
attrs = {"class": "paleblue"}
prefix='dpt_t_'
class DivisionTable(tables.Table):
class Meta:
model = LibraryVisit
fields = ("dvsn_n", "dvsn_n__count")
attrs = {"class": "paleblue"}
prefix='div_t_'
class ProgramTable(tables.Table):
class Meta:
model = LibraryVisit
fields = ("acpr_n", "acpr_n__count")
attrs = {"class": "paleblue"}
prefix='pgm_t_'
class PlanTable(tables.Table):
class Meta:
model = LibraryVisit
fields = ("acpl_n", "acpl_n__count")
attrs = {"class": "paleblue"}
prefix='pln_t_'
class ClassTable(tables.Table):
class Meta:
model = LibraryVisit
fields = ("stdn_e_clas", "stdn_e_clas__count")
attrs = {"class": "paleblue"}
prefix='cls_t_'
|
import os
import environ
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Override in production via env
env = environ.Env()
env = environ.Env(
DATABASE_URL=(
str,
'postgres://pos_user:pos_passwd@localhost:5432/pos_db'
)
)
APP_FOLDER = 'api'
env.read_env(os.path.join(BASE_DIR, '{}/.env'.format(APP_FOLDER)))
DEBUG = env('DEBUG', default=True)
SECRET_KEY = env(
'SECRET_KEY', default='p!ci1&ni8u98vvd#%18yp)aqh+m_8o565g*@!8@1wb$j#pj4d8')
ENV_DB = env.db()
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': ENV_DB['HOST'],
'NAME': ENV_DB['NAME'],
'PASSWORD': ENV_DB['PASSWORD'],
'PORT': ENV_DB['PORT'],
'USER': ENV_DB['USER'],
}
} # Env should have DATABASE_URL
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
EMAIL_HOST = env('EMAIL_HOST', default='localhost')
EMAIL_HOST_USER = env('EMAIL_HOST_USER', default=487)
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', default='notarealpassword')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
ALLOWED_HOSTS = ['.healthix.co.ke', '.localhost']
INSTALLED_APPS = (
'django.contrib.sites',
'common',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.postgres',
# libs
'django_extensions',
'django_filters',
'djrill',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'oauth2_provider',
'gunicorn',
'corsheaders',
'reversion',
'rest_auth',
'rest_auth.registration',
'allauth',
'allauth.account',
# our apps
'api',
'users',
'pos',
)
LOCAL_APPS = (
'api',
'common',
'users',
'pos'
)
SITE_ID = 1
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
'localhost:8012',
'.healthix.co.ke',
'pos-web.healthix.co.ke',
'pos-api.healthix.co.ke',
'52.89.181.225'
)
API_DOMAIN = 'pos-api.healthix.co.ke'
CORS_ALLOW_CREDENTIALS = True
CORS_PREFLIGHT_MAX_AGE = 172800
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'OPTIONS'
)
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken',
'cache-control'
)
ROOT_URLCONF = 'config.urls'
WSGI_APPLICATION = 'config.wsgi.application'
AUTH_USER_MODEL = 'users.User'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = 'http://localhost:9000/media/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
CSRF_COOKIE_HTTPONLY = False
CSRF_COOKIE_SECURE = False
SECURE_SSL_REDIRECT = False # Turn on in production
SWAGGER_SETTINGS = {
'exclude_namespaces': [],
'api_version': '0.1',
'api_path': '/',
'enabled_methods': [
'get',
'post',
'put',
'patch',
'delete'
],
'api_key': '228b67fadab69d86a8d7e49dc03ac8e2206yre22',
'is_authenticated': True,
'is_superuser': False,
'permission_denied_handler': 'api.views.permission_denied_handler',
'resource_access_handler': None,
'base_path': API_DOMAIN,
'info': {
'contact': 'developers@emanager.co',
'description': 'Explore the POS v0.0.1 API',
'title': 'POS V0.0.1 API',
},
'doc_expansion': 'none',
}
REST_FRAMEWORK = {
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.ScopedRateThrottle',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
'rest_framework.filters.OrderingFilter',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework.parsers.JSONParser',
'rest_framework_xml.parsers.XMLParser',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.JSONRenderer',
'rest_framework_xml.renderers.XMLRenderer'
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS': (
'rest_framework.pagination.PageNumberPagination'),
'PAGE_SIZE': 3,
'PAGINATE_BY_PARAM': 'page_size',
# Should be able to opt in to see all wards at once
'MAX_PAGINATE_BY': 15000,
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
'TEST_REQUEST_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DATETIME_FORMAT': 'iso-8601',
'DATE_FORMAT': 'iso-8601',
'TIME_FORMAT': 'iso-8601'
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': os.path.join(BASE_DIR, '/common/templates/'),
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# django-allauth related settings
# some of these settings take into account that the target audience
# of this system is not super-savvy
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_EMAIL_SUBJECT_PREFIX = '[POS]'
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_LOGOUT_REDIRECT_URL = ''
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_USER_MODEL_USERNAME_FIELD = 'email'
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/api/v1/auth/login/'
API_LOGIN_URL = '/api/v1/auth/drf/login/'
# django_rest_auth settings
OLD_PASSWORD_FIELD_ENABLED = True
REST_AUTH_SERIALIZERS = {
'USER_DETAILS_SERIALIZER': 'users.serializers.UserSerializer',
'TOKEN_SERIALIZER': 'users.serializers.UserAuthTokenSerializer',
'PASSWORD_RESET_SERIALIZER': (
'users.serializers.UserPasswordResetSerializer'),
}
# Client origin to be allowed access
CLIENT_ORIGIN = 'http://localhost:8012'
DEFAULT_FROM_EMAIL = 'POS <info@pos.co.ke>'
API_ROOT = 'http://localhost:8061/api/v1'
MANDRILL_API_KEY = ''
EMAIL_BACKEND = 'djrill.mail.backends.djrill.DjrillBackend'
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""=================================================
@Project -> File :PycharmProjects -> assets_servlet_test
@IDE :PyCharm
@Author :Mr. toiler
@Date :1/18/2020 3:35 PM
@Desc :
=================================================="""
import unittest
import web.servlet.assets as servlet_assets
import web.servlet.assets_add as servlet_assets_add
from utils import my_print
from web.test import *
from web.biz.constant import Const
import json
import time
from web.conf import Conf
class AssetsServletTestCase(unittest.TestCase):
assets_code = 'A{}'.format(time.time())
assets_code_no_attach = 'B9999'
def setUp(self):
self.req = get_test_request()
self.req.parameters['code'] = AssetsServletTestCase.assets_code # 'A8888_9'
def test_00100_do_get_assets(self):
"""根据资产代码不能找到记录"""
my_print('准备测试:' + self.test_00100_do_get_assets.__doc__)
self.req.parameters["action"] = 'get_assets'
b_ret = servlet_assets.do_get(self.req)
d = dict()
d.update(json.loads(self.req.res_body.decode('UTF-8')))
self.assertEqual(b_ret, False, '执行成功, 没有找到记录')
self.assertEqual(Const.OpStatus.失败.value, d.get('status'), '不能找到记录')
self.assertEqual('没有找到 code:{} 或者 login_name:login_name_0001 的记录'.format(AssetsServletTestCase.assets_code),
d.get('message'), '返回消息')
def test_00200_do_post(self):
"""成功创建资产,附件方式"""
my_print('准备测试:' + self.test_00200_do_post.__doc__)
b_ret = servlet_assets_add.do_post(self.req)
d = dict()
d.update(json.loads(self.req.res_body.decode('UTF-8')))
self.assertEqual(b_ret, True, '执行成功, 创建资产成功')
self.assertEqual(Const.OpStatus.成功.value, d.get('status'), '创建资产成功')
self.assertEqual('admin(login_name_0001) 添加资产 图书({})-类别一 并设置管理成功'.format(AssetsServletTestCase.assets_code),
d.get('message'), '返回消息')
def test_00201_do_post(self):
"""成功创建资产,不带附件方式"""
my_print('准备测试:' + self.test_00201_do_post.__doc__)
self.req.parameters['image'] = None
self.req.parameters['code'] = AssetsServletTestCase.assets_code_no_attach
b_ret = servlet_assets_add.do_post(self.req)
d = dict()
d.update(json.loads(self.req.res_body.decode('UTF-8')))
self.assertEqual(b_ret, True, '执行成功, 创建资产成功')
self.assertEqual(Const.OpStatus.成功.value, d.get('status'), '创建资产成功')
self.assertEqual('admin(login_name_0001) 添加资产 图书({})-类别一 并设置管理成功'.format(
self.req.parameters['code']),
d.get('message'), '返回消息')
def test_00202_do_post(self):
"""成功创建资产,不带附件方式"""
my_print('准备测试:' + self.test_00201_do_post.__doc__)
self.req.parameters['image'] = None
self.req.parameters['code'] = 'servlet.assets.code'
b_ret = servlet_assets_add.do_post(self.req)
d = dict()
d.update(json.loads(self.req.res_body.decode('UTF-8')))
self.assertEqual(b_ret, True, '执行成功, 创建资产成功')
self.assertEqual(Const.OpStatus.成功.value, d.get('status'), '创建资产成功')
self.assertEqual('admin(login_name_0001) 添加资产 图书({})-类别一 并设置管理成功'.format(
self.req.parameters['code']),
d.get('message'), '返回消息')
def test_00300_do_get_assets(self):
"""根据资产代码能找到记录"""
my_print('准备测试:' + self.test_00300_do_get_assets.__doc__)
self.req.parameters["action"] = 'get_assets'
b_ret = servlet_assets.do_get(self.req)
d = dict()
d.update(json.loads(self.req.res_body.decode('UTF-8')))
self.assertEqual(b_ret, True, '执行成功,找到记录')
self.assertEqual(Const.OpStatus.成功.value, d.get('status'), '能找到记录')
self.assertEqual('找到 code:{} 或者 login_name:login_name_0001 的记录'.format(AssetsServletTestCase.assets_code),
d.get('message'), '消息')
def test_00400_do_get_image(self):
"""根据资产代码能找到原始图片"""
my_print('准备测试:' + self.test_00400_do_get_image.__doc__)
self.req.parameters["action"] = 'get_image'
b_ret = servlet_assets.do_get(self.req)
self.assertEqual(b_ret, True, '执行成功,找到图片')
self.assertEqual(self.req.res_body[:2], b'\xff\xd8')
self.assertEqual(self.req.res_body[-2:], b'\xff\xd9')
def test_00401_do_get_image(self):
"""根据资产代码能找到记录,但是没有图片"""
my_print('准备测试:' + self.test_00401_do_get_image.__doc__)
self.req.parameters["action"] = 'get_image'
self.req.parameters['code'] = AssetsServletTestCase.assets_code_no_attach
b_ret = servlet_assets.do_get(self.req)
d = dict()
d.update(json.loads(self.req.res_body.decode('UTF-8')))
self.assertEqual(b_ret, False, '执行成功,记录没有图片')
self.assertEqual(Const.OpStatus.成功.value, d.get('status'), '不能找到图片')
self.assertEqual('资产代码:{0}, 没有图像记录或者图像记录格式不对'.format(AssetsServletTestCase.assets_code_no_attach),
d.get('message'), '消息')
def test_00500_do_biz(self):
"""资产代码code、用户login_name不能成功借出不存在的资产"""
my_print('准备测试:' + self.test_00500_do_biz.__doc__)
self.req.parameters['code'] = 'no_assets_code'
b_ret = servlet_assets.do_post(self.req)
d = dict()
d.update(json.loads(self.req.res_body.decode('UTF-8')))
self.assertEqual(b_ret, True, '没有借出成功')
self.assertEqual(d.get('status'), Const.OpStatus.其他.value, '不能借出不存在的物品')
self.assertEqual(d.get('message'), '资产代码: no_assets_code 还未入库, 不能借还。')
def test_00501_do_biz(self):
"""资产代码code、用户login_name成功借出资产"""
my_print('准备测试:' + self.test_00501_do_biz.__doc__)
b_ret = servlet_assets.do_post(self.req)
d = dict()
d.update(json.loads(self.req.res_body.decode('UTF-8')))
self.assertEqual(b_ret, True, '借出成功')
self.assertEqual(d.get('status'), Const.OpStatus.成功.value, '借出成功')
self.assertEqual(d.get('message'), 'admin(18995533533)成功借出admin的图书({})'.format(self.req.parameters['code']))
def test_00600_do_biz(self):
"""资产代码code、非管理者不能成功还资产,不是管理员"""
my_print('准备测试:' + self.test_00600_do_biz.__doc__)
self.req.parameters['userInfo']['login_name'] = 'is_not_mng_assets_user'
self.req.parameters['userInfo']['id'] = None
b_ret = servlet_assets.do_post(self.req)
d = dict()
d.update(json.loads(self.req.res_body.decode('UTF-8')))
self.assertEqual(b_ret, True, '没有归还成功')
self.assertEqual(d.get('status'), Const.OpStatus.失败.value, '不能归还不属于你管理的物品')
self.assertEqual(d.get('message'), '资产: 图书({}) 不由你管理,不能完成归还动作'.format(self.req.parameters['code']))
def test_00601_do_biz(self):
"""资产代码code、管理者用户login_name成功还资产"""
my_print('准备测试:' + self.test_00601_do_biz.__doc__)
b_ret = servlet_assets.do_post(self.req)
d = dict()
d.update(json.loads(self.req.res_body.decode('UTF-8')))
self.assertEqual(b_ret, True, '归还成功')
self.assertEqual(d.get('status'), Const.OpStatus.成功.value, '成功归还属于你管理的物品')
self.assertEqual(d.get('message'), '管理员:admin 归还了 admin 借的 admin 的 图书')
def test_99999_show_db(self):
"""显示目前数据库的数据"""
my_print('准备测试:' + self.test_99999_show_db.__doc__)
show_db(Conf.db_file_path_rw)
if Conf.db_file_path_img != Conf.db_file_path_rw:
show_db(Conf.db_file_path_img)
#
# if __name__ == '__main__':
# unittest.main()
|
'''
Small module for insult functionality.
Contains a function for generating a random insult, and a function
for turning that insult into a full sentence.
'''
import random
def insult_part():
'''
Concatenate an adjective with a noun and return the insult as a string.
'''
adjectives = ("aggressive", "aloof", "arrogant", "belligerent",
"big-headed", "bitchy", "boastful", "bone-idle",
"boring", "bossy", "callous", "cantankerous",
"careless", "changeable", "clinging", "compulsive",
"conservative", "cowardly", "cruel", "cunning",
"cynical", "deceitful", "detached", "dishonest",
"dogmatic", "domineering", "finicky", "flirtatious",
"foolish", "foolhardy", "fussy", "greedy",
"grumpy", "gullible", "harsh", "impatient",
"impolite", "impulsive", "inconsiderate", "inconsistent",
"indecisive", "indiscreet", "inflexible", "interfering",
"intolerant", "irresponsible", "jealous", "lazy",
"Machiavellian", "materialistic", "mean", "miserly",
"moody", "narrow-minded", "nasty", "naughty",
"nervous", "obsessive", "obstinate", "overcritical",
"overemotional", "parsimonious", "patronizing", "perverse",
"pessimistic", "pompous", "possessive", "pusillanimous",
"quarrelsome", "quick-tempered", "resentful", "rude",
"ruthless", "sarcastic", "secretive", "selfish",
"self-centred", "self-indulgent", "silly", "sneaky",
"stingy", "stubborn", "stupid", "superficial",
"tactless", "timid", "touchy", "thoughtless",
"truculent", "unkind", "unpredictable", "unreliable",
"untidy", "untrustworthy", "vague", "vain",
"vengeful", "vulgar", "weak-willed")
nouns = ("Amateur", "Animal", "Anorak", "Ape",
"Ape covered in human flesh", "Apefucker", "Arse-licker", "Ass",
"Ass-face", "Ass-hat", "Ass-kisser", "Ass-nugget",
"Ass clown", "Assaholic", "Assbutt", "Assclown",
"Assface", "Asshat", "Asshole", "Assmonkey",
"Assmunch", "Asswagon", "Assweed", "Asswipe",
"Aunt fucker", "Baby", "Backwoodsman", "Badass",
"Badgerfucker", "Bag of dicks", "Bandit", "Barbarian",
"Bastard", "Beast", "Beetlehead", "Beginner",
"Bell-end", "Berk", "Bimbo", "Birdbrain",
"Bitch", "Bitch Ass", "Bitch Ass Motherfucker", "Bitchboy",
"Bitchface", "Bitchwad", "Bitchzilla", "Blockhead",
"Blubber gut", "Bluntie", "Bogeyman", "Bonehead",
"Boob", "Booby", "Boomer", "Bootlicker",
"Boozer", "Bozo", "Bruh", "Buffoon",
"Bugger", "Bum", "Bum chum", "Butthead",
"Butthole", "Buttlicker", "Caveman", "Chauvinist",
"Chav", "Cheater", "Chicken", "Chickenfucker",
"Chode", "Chump", "Clown", "Cock",
"Cockboy", "Cockburger", "Cockfucker", "Cockhead",
"Cockholster", "Cockroach", "Con man", "Coomer",
"Cougar", "Country bumpkin", "Cow", "Coward",
"Crack whore", "Crackhead", "Craphole", "Creep",
"Cretin", "Crook", "Cuckold", "Cumstain",
"Cunt fart", "Cuntass", "Cuntbitch", "Cuntzilla",
"Degenerate", "Desperado", "Dick", "Dick mouth",
"Dick sniffer", "Dick weed", "Dickbag", "Dickbreath",
"Dickface", "Dickfucker", "Dickhead", "Dildo",
"Dimmadumbass", "Dimwit", "Ding-head", "Dingleberry",
"Dinosaur", "Dipfuck", "Dirtbag", "Dirthead",
"Dodo", "Dog", "Dolt", "Donkey",
"Donkeyfucker", "Doofus", "Dope", "Douche bag",
"Douche canoe", "Douche nozzle", "Douchelord", "Drunkard",
"Duckfucker", "Dumbass", "Dumbbell", "Dumbo",
"Dummy", "Dunce", "Duncebucket", "Earthworm",
"Edgelord", "Egghead", "Egotist", "Eunuch",
"Farmer", "Fart", "Fellow", "Fink",
"Fish", "Fishwife", "Fixer", "Flake",
"Fool", "Freak", "Fuck", "Fuck-wit",
"Fuck noggin", "Fuck nugget", "Fuckass", "Fuckbait",
"Fuckbucket", "Fucker", "Fuckhead", "Fucking bitch",
"Fuckweasel", "Fuckwhistle", "Fuckwit", "Geebag",
"Gimp", "Git", "Gobshite", "Gold digger",
"Goof", "Goon", "Goose", "Gorilla",
"Grouch", "Grumpy", "Helldog", "Hikikomori",
"Hilding", "Hillbilly", "Hippie", "Ho",
"Hobbledehoy", "Hoe", "Hooligan", "Hooplehead",
"Horse's ass", "Horse's necktie", "Hosebag", "Hypocrite",
"Idiot", "Ignoramus", "Imbecile", "Inbred",
"Intercourser", "Jackass", "Jackwagon", "Jelly",
"Jerk", "Jerkwad", "Joker", "Junkie",
"Keyboard warrior", "Lamebrain", "Landwhale", "Lard Ass",
"Lard face", "Liar", "Lobotomite", "Loser",
"Low-life", "Lunatic", "Lunkhead", "Lurdane",
"Lush", "Mackerel", "Madman", "Maggot",
"Mamzer", "Meanie", "Megadouche", "Minx",
"Mongoose", "Monkey", "Monster", "Moron",
"Motherfucker", "Mouthbreather", "Mucky pup", "Muppet",
"Mutant", "Mutt", "Ne'er-do-well", "Neanderthal",
"Neckbeard", "Nerd", "Nerf herder", "Nimrod",
"Nincompoop", "Ninny", "Nitwit", "Nobody",
"Non", "Nonce", "Noob", "Noodle",
"Numbnuts", "Numbskull", "Numpty", "Numskull",
"Oaf", "Oddball", "Ogre", "Oompa loompa",
"Orphan", "Outlaw", "Oxygen Thief", "Pack",
"Pain in the ass", "Pariah", "Peasant", "Pee Mom",
"Penchod", "Pencil dick", "Penis face", "Pervert",
"Pig", "Pigfucker", "Piggy-wiggy", "Pillock",
"Pinhead", "Pirate", "Pissface", "Pleb",
"Porno freak", "Prick", "Pseudo-intellectual", "Pube flosser",
"Puppet", "Quack", "Querulant", "Rat",
"Ratcatcher", "Ratfink", "Redneck", "Reject",
"Riff-raff", "Roaster", "Robot", "Rowdy",
"Rudesby", "Ruffian", "Runt", "Sadist",
"Saprophyte", "Sausage-masseuse", "Scumbag", "Scumhead",
"Scumlord", "Scuzzbag", "Serf", "Sewer rat",
"Shark", "Sheepfucker", "Sheepshagger", "Shill",
"Shit-eater", "Shit-for-brains", "Shit stain", "Shitass",
"Shitbucket", "Shitehawk", "Shitfuck", "Shithead",
"Shitneck", "Shitnugget", "Shitsack", "Shitter",
"Shitweasel", "Shyster", "Simp", "Simpleton",
"Skank", "Skunk", "Skunkfucker", "Slave",
"Sleeze", "Sleeze bag", "Slob", "Snail",
"Snake", "Snob", "Snollygoster", "Snot",
"Snotball", "Snowflake", "Son of a bitch", "Son of a motherless goat",
"Sphincter", "Square", "Stinker", "Stinkhole",
"Swindler", "Swine", "Sycophant", "Theatre kid",
"Thief", "Thundercunt", "Titbag", "Toad",
"Tool", "Tree hugger", "Troglodyte", "Troll",
"Trollface", "Turd", "Turdball", "Twatwaffle",
"Twerp", "Twit", "Ugly ass", "Unclefucker",
"Vagina cleaner", "Vampire", "Vandal", "Varmint",
"Vermin", "Wacko", "Wallflower", "Wank stain",
"Wanker", "Weirdo", "Whore", "Windfucker",
"Windsucker", "Worm", "Wretch", "Xenophobe",
"Yahoo", "Yes-man", "Yonker", "Zitface",
"Zounderkite")
# Concatenate the random adjectives and nouns
insult_template = (random.choice(adjectives).lower()
+ ' '
+ random.choice(nouns).lower()
+ '.')
# Return the string
return insult_template
def insult(template):
'''
Take a string and append it to the start of an insult.
This makes it easy to reuse the insults in other contexts
while still easily returning an insult as a full sentence.
'''
# Possible beginnings to the insults
start = ('Can it {}, you ',
'Cool it {}, you ',
'Shut up {}, you ',
'Cut it out {}, you ',
'I\'ve heard enough out of you {}, you ',
'Silence yourself {}, you ',
'{}... spare me, you ',
'{}, you sound like a ',
'{}, quit being a ')
return random.choice(start) + template
|
import glob
import radiomics
import os
import numpy as np
from scipy import misc
import matplotlib.pyplot as plt
import SimpleITK as sitk
import pandas as pd
df1 = pd.DataFrame({'id':[1,2,3],'F1':[0.7,1.7,3.2],'F2':[1.3,2.4,3.7]},
columns = ['id','F1','F2'])
df2 = pd.DataFrame({'id':[1,3,4],'F3':[1.1,2.2,3.3]},
columns = ['id','F3'])
df = pd.merge(df1,df2,how = 'inner', on = 'id')
print(df)
print(df.iloc[:,0].values)
|
import cvlib
img = cvlib.load("photo11.bmp")
i = cvlib.imfill(img)
i = cvlib.bitNot(i)
contours = cvlib.findContours(i)
for cnt in contours:
cvlib.drawContour(img, cnt)
print cvlib.cntInfo(img, cnt), "\n"
cvlib.plotPOI(img, cnt)
cvlib.display(img)
|
from flask import Flask
from flask import request
from flask import jsonify
import json
import os
import time
import subprocess
LOG_LINE_SIZE = 14
HOST_LABEL_POSITION = 5
SERVER_LABEL_POSITION = 8
dir_path = os.path.dirname(os.path.realpath(__file__))
nodes_file_path = dir_path + '/nodes.json'
build_log_path = dir_path + '/build_log.txt'
#log_path = "/home/gabrielf/dev/sco/assets/support-containers/load_balancer/log.test"
log_path = '/var/log/haproxy.log'
conf_path = '/etc/haproxy/haproxy.cfg'
#conf_path = "/home/gabrielf/dev/sco/assets/support-containers/load_balancer/haproxy.cfg.test"
custom_connection_block_line = "cookie SERVERID insert indirect nocache"
tcp_conf_line = "frontend sco 0.0.0.0:80"
break_line = '\n'
blank = ' '
ident = ' '
backend_label = 'backend '
server_label = 'server '
check_label = ' check'
port_separator = ":"
backend_suffix = "_bknd"
default_port = "8000"
n_src_addresses = 0
host_labels = []
lines = []
last_lines = []
app = Flask(__name__)
def read_last_line(in_file):
tail(in_file, 1)
def tail( f, lines=20 ):
total_lines_wanted = lines
BLOCK_SIZE = 1024
f.seek(0, 2)
block_end_byte = f.tell()
lines_to_go = total_lines_wanted
block_number = -1
blocks = [] # blocks of size BLOCK_SIZE, in reverse order starting
# from the end of the file
while lines_to_go > 0 and block_end_byte > 0:
if (block_end_byte - BLOCK_SIZE > 0):
# read the last block we haven't yet read
f.seek(block_number*BLOCK_SIZE, 2)
blocks.append(f.read(BLOCK_SIZE))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
blocks.append(f.read(block_end_byte))
lines_found = blocks[-1].count('\n')
lines_to_go -= lines_found
block_end_byte -= BLOCK_SIZE
block_number -= 1
all_read_text = ''.join(reversed(blocks))
return '\n'.join(all_read_text.splitlines()[-total_lines_wanted:])
def get_host_label(log_line):
if is_log_line(log_line):
fields = str.split(log_line)
full_address = fields[HOST_LABEL_POSITION]
short_address = full_address.split(":")[0]
return short_address
def get_server_label(log_line):
if is_log_line(log_line):
words = str.split(log_line)
full_name = words[SERVER_LABEL_POSITION]
print "FULL SERVER NAME IS " + full_name
short_name = full_name.split("/")[1]
return short_name
def is_log_line(line):
words = str.split(line)
if len(words) != LOG_LINE_SIZE:
return False
elif words[13] != "0/0":
return False
else:
return True
def add_rule(server_label, lb_ip, container_ip):
with open(conf_path, 'r+') as conf_file:
lines = conf_file.readlines()
position_src = get_next_src_rule_position(lines)
new_rules = assemble_new_rule(server_label, container_ip)
lines.insert(position_src, '\n' + new_rules[0])
lines.insert(position_src + 1, '\n')
lines.insert(position_src + 1, ident + new_rules[1] + '\n')
position_bind = get_next_bind_rule_position(lines)
if not _lb_exists(server_label, lines):
position_backend = get_next_backend_rule_position(lines)
backend_rule = 'backend ' + server_label + "_bknd" + '\n' + ident + 'server ' + server_label + blank + lb_ip + " check" + '\n' + '\n'
lines.insert(position_backend, backend_rule)
conf_file.seek(0)
conf_file.writelines(lines)
conf_file.close()
args = ['/etc/init.d/haproxy', 'reload']
subprocess.call(args)
def get_next_src_rule_position(lines):
current_position = 0
reached_tcp_conf_line = False
for i in lines:
if i.rstrip() == tcp_conf_line:
reached_tcp_conf_line = True
if reached_tcp_conf_line and i == "\n":
return current_position + 1
current_position += 1
def get_next_frontend_position(lines):
current_position = 0
reached_src_rules = False
for i in lines:
if i.startswith(tcp_conf_line):
reached_src_rules = True
if reached_src_rules and i == '\n':
return current_position
current_position += 1
def get_next_backend_rule_position(lines):
return len(lines) - 1
def assemble_new_rule(server_label, host_label):
global n_src_addresses
src_line = "acl rule" + str(n_src_addresses) + " hdr(host) -i " + host_label + '\n'
bind_line = "use_backend " + server_label + "_bknd" + " if rule" + str(n_src_addresses) + '\n'
n_src_addresses += 1
return (src_line, bind_line)
def _lb_exists(server_label, lines):
backend_line = 'backend ' + server_label + "_bknd"
for line in lines:
if line.strip() == backend_line.strip():
return True
return False
def get_image_backend_position(image_id, lines):
backend_line = 'backend ' + image_id + '_bknd'
position = 0
for line in lines:
if line.strip() == backend_line.strip():
return position
position += 1
return None
def cluster_line_exists(image_id, node_ip, lb_port, lines):
server_line = ident + 'server ' + image_id + blank + node_ip + ":" + lb_port + ' check'
if server_line in lines:
return True
return False
def add_frontend(cluster_port, image_id):
with open(conf_path, 'r+') as conf_file:
lines = conf_file.readlines()
position = get_next_frontend_position(lines)
line1 = 'frontend ' + image_id + ' 0.0.0.0:80' + str(cluster_port)
line2 = ident + 'log 127.0.0.1 local0'
line3 = ident + 'mode tcp'
line4 = ident + 'default_backend ' + image_id + '_bknd'
lines.insert(position, '\n' + line1 + '\n' + line2 + '\n' + line3 + '\n' + line4 + '\n')
conf_file.seek(0)
conf_file.writelines(lines)
conf_file.close()
def add_to_backend(image_id, node_ip, lb_port):
with open(conf_path, 'r+') as conf_file:
lines = conf_file.readlines()
if not cluster_line_exists(image_id, node_ip, lb_port, lines):
backend_position = get_next_backend_rule_position(lines)
if not _lb_exists(image_id, lines):
backend_line = 'backend ' + image_id + "_bknd" + '\n'
lines.insert(backend_position, backend_line)
image_backend_position = get_image_backend_position(image_id, lines)
server_line = ident + 'server ' + image_id + node_ip + ":" + lb_port + blank + node_ip + ":" + lb_port + ' check' + '\n'
lines.insert(image_backend_position + 1, server_line)
conf_file.seek(0)
conf_file.writelines(lines)
conf_file.close()
args = ['/etc/init.d/haproxy', 'reload']
subprocess.call(args)
def remove_from_backend(image_id, node_ip, lb_port):
with open(conf_path, 'r+') as conf_file:
lines = conf_file.readlines()
backend_line = 'backend ' + image_id + "_bknd" + '\n'
index = 0
for line in lines:
if line == backend_line:
extra_lines = 1
server_line_found = False
while not server_line_found:
if lines[index + extra_lines] == ident + 'server ' + image_id + node_ip + blank + node_ip + ":" + lb_port + ' check' + '\n':
del lines[index + extra_lines]
server_line_found = True
extra_lines += 1
index += 1
@app.route('/create', methods=['POST'])
def create():
create_data = request.get_json()
cluster_port = create_data['cluster_port']
image_id = create_data['image_id']
add_frontend(cluster_port, image_id)
return "success"
@app.route('/update', methods=['POST'])
def update():
update_data = request.get_json()
image_id = update_data['image_id']
node_ip = update_data['node_ip']
lb_port = update_data['lb_port']
if lb_port:
add_to_backend(image_id, node_ip, lb_port)
else:
remove_from_backend(image_id, node_ip, lb_port)
return "success"
#start_up()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
from typing import Callable
from time import time
def time_count(func: Callable):
def wrapper(*args, **kwargs):
start = time()
result = func(*args, **kwargs)
print(time() - start)
return result
return wrapper
|
from . import SpatialGridStruct as _SpatialGridStruct
from .transform import TransformMethodsMixin
class SpatialGridStruct(_SpatialGridStruct,TransformMethodsMixin):
pass
|
import os
import sys
import datetime
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../simple_budget'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simple_budget.settings")
from simple_budget.settings import (BACKUP_PATH, DATABASES)
if not BACKUP_PATH or not os.path.isdir(BACKUP_PATH):
print "Backup Path does not exist"
sys.exit()
sql_dump_file = 'accounts.zz50.co.uk.sql'
os.popen('export PGPASSWORD="%s";'
'psql -U%s -h%s %s -c "VACUUM ANALYZE;" > /dev/null 2>&1' %
(DATABASES['default']['PASSWORD'], DATABASES['default']['USER'],
DATABASES['default']['HOST'], DATABASES['default']['NAME'],))
os.popen('export PGPASSWORD="%s";pg_dump -U%s -h%s %s '
'--inserts --clean > %s/%s' %
(DATABASES['default']['PASSWORD'], DATABASES['default']['USER'],
DATABASES['default']['HOST'], DATABASES['default']['NAME'],
BACKUP_PATH, sql_dump_file))
if not os.path.isfile(BACKUP_PATH + '/' + sql_dump_file):
print "Error Dumping DB"
sys.exit()
backup_file = 'accounts.zz50.co.uk_%s.tar.gz' % (datetime.date.today())
os.popen('cd %s;tar -czf %s %s' %
(BACKUP_PATH, backup_file, sql_dump_file))
os.popen('rm %s/%s' % (BACKUP_PATH, sql_dump_file)) |
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
from functools import partial
logging.addLevelName(logging.WARN, 'WARN')
def print_log(level: int, msg: str, *args):
log = '%s %s %s' % (
datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
logging.getLevelName(level),
msg % args
)
print(log)
debug = partial(print_log, logging.DEBUG)
info = partial(print_log, logging.INFO)
warn = partial(print_log, logging.WARN)
error = partial(print_log, logging.ERROR)
|
class QueueElement:
def __init__(self, value):
self.previous = None
self.value = value
class Queue:
def __init__(self):
self.head = None
self.tail = None
def add(self, value):
new_element = QueueElement(value)
# Jeśli kolejka jest pusta
if self.head == None:
self.head = new_element
self.tail = new_element
else:
self.tail.previous = new_element
self.tail = new_element
def pop(self):
if self.head == None:
raise ValueError("Queue empty")
result = self.head
self.head = result.previous
return result.value
@property
def is_empty(self):
return self.head == None
class StackElement:
def __init__(self,value):
self.value = value
self.next = None
class Stack:
def __init__(self):
self.stack_top = None
def push(self, value):
new_element = StackElement(value)
if self.stack_top == None:
self.stack_top = new_element
else:
new_element.next = self.stack_top
self.stack_top = new_element
def pop(self):
if self.stack_top == None:
raise ValueError("Stack is empty")
result = self.stack_top
self.stack_top = result.next
return result.value
@property
def is_empty(self):
return self.stack_top == None
|
import random
print(random.random())
x = 0
while x < 100:
x = x + 1
#print(random.random())
#print(random.uniform(0, 100))
def willWeaponHit(weaponChanceToHitProcentage):
hitChance = random.uniform(0, 100)
if(hitChance < weaponChanceToHitProcentage):
return 'hit'
else:
return 'missed'
x = 0
listHit = []
while x < 100:
x += 1
listHit.append(willWeaponHit(50))
from collections import Counter
dictionaryHit = Counter(listHit)
print(dictionaryHit)
x = 0
while x < 100:
x += 1
#print(random.randrange(10))
print(random.randint(1, 10))
|
from lineups import FanDuelLineup, LineupError
class OptimizeError(Exception):
def __init__(self, errvals):
self.errvals = errvals
self.code = 400
def standard(date, projection_version='', site='fd', excludes=[0], includes=[]):
# in the future, if there are different rules or you have a keeper player
# who is alreay set in a position, we can change this and still get a valid
# lineup
# Defaulting excludes to [0] so we always have an array with values for the query
try:
lineup = FanDuelLineup(date, excludes=excludes, includes=includes)
return lineup.optimize(projection_version)
except LineupError as err:
raise OptimizeError(err.errvals)
|
def solution(s):
answer = 0
half_s = len(s) // 2
count = 1
compare = ''
final_count = []
real_count = []
if len(s) != 1:
for i in range(1, half_s+1):
stack = []
for j in range(len(s)):
try:
if len(s[(j * i)::]) != 0:
stack.append(s[(j*i): (j*i)+i])
except:
stack.append(s[(j*i)::])
print(stack)
for q in range(len(stack)):
if compare != stack[q]:
compare = stack[q]
if q+1 < len(stack) and compare == stack[q+1]:
count = count + 1
else:
if count != 1:
final_count.append(str(count))
final_count.append(compare)
count = 1
else:
if q+1 < len(stack) and compare == stack[q+1]:
count = count + 1
else:
if count != 1:
final_count.append(str(count))
final_count.append(compare)
count = 1
real_count.append(len("".join(final_count)))
final_count = []
else:
answer = 1
return answer
print(real_count)
real_count.sort()
answer =real_count[0]
return answer
s = "aaabb"
solution(s)
|
import os
import sys
import datetime
import numpy as np
import keras
from keras_metrics import precision, recall
sys.path.insert(1, os.path.join(sys.path[0], '..', '..', '..'))
sys.path.insert(1, os.path.join(sys.path[0], '..', '..'))
from settings import MODEL_DIR, USE_GPU
from model import get_model
from features.helpers.data_helpers import plot_cnn_model_statistics, \
get_train_data, write_cnn_model_statistics, string_to_list
if USE_GPU:
from keras.utils import multi_gpu_model
# use if you are running on a PC with many GPU-s
# needs to be at the beginning of the file
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# the GPU id to use, usually either "0" or "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2"
# just disables the warning, doesn't enable AVX/FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "2"
# -------- required arguments --------
if not sys.argv[1]:
print('Enter model name defined in settings.py')
sys.exit()
ML_MODEL = str(sys.argv[1])
if not sys.argv[2]:
print(
'Enter training parameters in the following order: cnn_layers, \
num_filters, filter_size, hidden_layers'
)
sys.exit()
parameters = string_to_list(sys.argv[2])
if not sys.argv[3]:
print('Pass True as an argument if you would like to save a model.')
sys.exit()
save_model = (sys.argv[3] == 'True')
if not sys.argv[4]:
print('Enter number of classes to be trained.')
sys.exit()
num_classes = int(sys.argv[4])
# ---------------- END OF ARGUMENTS --------------------
batch_size = 1000
epochs = 12
x_train, y_train = get_train_data(ML_MODEL)
n_rows, n_cols = x_train.shape
x_train = np.expand_dims(x_train, axis=3)
y_train = keras.utils.to_categorical(
y_train, num_classes=num_classes
)
validation_split = 0.2
print(
'Parameters cnn_layers %s, num_filters %s, filter size %s, hidden layers %s' %
(parameters[0], parameters[1], parameters[2], parameters[3])
)
model = get_model(
(n_cols, 1),
num_classes,
cnn_layers=parameters[0],
num_filters=parameters[1],
filter_size=parameters[2],
hidden_layers=parameters[3]
)
if USE_GPU:
model = multi_gpu_model(model, gpus=[0, 1, 2])
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy', precision(), recall()]
)
print('Before:')
print(datetime.datetime.now())
history = model.fit(
x=x_train,
y=y_train,
validation_split=validation_split,
epochs=epochs,
verbose=1,
shuffle=True,
steps_per_epoch=batch_size,
validation_steps=batch_size,
callbacks=[
keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0.1
)
]
)
print('After:')
print(datetime.datetime.now())
# list all data in history
print(history.history.keys())
# serialize model to JSON
model_json = model.to_json()
name = ML_MODEL
for i in range(len(parameters)):
name += '_' + str(parameters[i])
model_path = os.path.join(MODEL_DIR, ML_MODEL, 'model_' + name)
with open(model_path + '.json','w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(model_path + '.h5')
print('Saved model to disk')
write_cnn_model_statistics(
name,
history.history,
x_train.shape,
validation_split,
parameters,
ML_MODEL
)
# summarize history for accuracy
plot_cnn_model_statistics(
'accuracy',
history.history['acc'],
history.history['val_acc'],
name,
ML_MODEL
)
# summarize history for loss
plot_cnn_model_statistics(
'loss',
history.history['loss'],
history.history['val_loss'],
name,
ML_MODEL
)
|
numbers = []
inp = raw_input()
str_numbers = inp.split()
str_numbers = sorted(str_numbers)
min = reduce(lambda x,y: int(x) + int(y), str_numbers[:4])
max = reduce(lambda x,y: int(x) + int(y), str_numbers[-4:])
print("%s %s"% (min, max))
|
# -*- coding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import functools
import gdbm
import unittest
import mock
from pulp.server.managers.consumer.bind import BindManager
import web
from pulp_puppet.common import constants
from pulp_puppet.forge import releases
from pulp_puppet.forge.unit import Unit
unit_generator = functools.partial(
Unit, name='me/mymodule', file='/path/to/file', db={}, repo_id='repo1',
host='localhost', protocol='http', version='1.0.0',
dependencies = [{'name':'you/yourmodule', 'version_requirement': '>= 2.1.0'}]
)
MOCK_HOST_PROTOCOL = {
'host': 'localhost',
'protocol' : 'http'
}
class TestView(unittest.TestCase):
@mock.patch('web.webapi.ctx')
def test_null_auth(self, mock_ctx):
self.assertRaises(
web.Unauthorized, releases.view, constants.FORGE_NULL_AUTH_VALUE,
constants.FORGE_NULL_AUTH_VALUE, 'foo/bar')
@mock.patch.object(releases, 'find_newest', autospec=True)
def test_repo_only(self, mock_find):
result = releases.view(constants.FORGE_NULL_AUTH_VALUE, 'repo1', 'foo/bar')
mock_find.assert_called_once_with(['repo1'], 'foo/bar')
self.assertEqual(result, mock_find.return_value.build_dep_metadata.return_value)
@mock.patch.object(releases, 'find_newest', autospec=True)
def test_repo_and_consumer(self, mock_find):
# should ignore the consumer
releases.view('consumer1', 'repo1', 'foo/bar')
mock_find.assert_called_once_with(['repo1'], 'foo/bar')
@mock.patch.object(releases, 'find_newest', autospec=True)
@mock.patch.object(releases, 'get_bound_repos', autospec=True)
def test_consumer_only(self, mock_get_bound, mock_find):
mock_get_bound.return_value = ['repo1', 'repo2']
releases.view('consumer1', constants.FORGE_NULL_AUTH_VALUE, 'foo/bar')
mock_get_bound.assert_called_once_with('consumer1')
mock_find.assert_called_once_with(['repo1', 'repo2'], 'foo/bar')
@mock.patch.object(releases, 'find_version', autospec=True)
def test_with_version(self, mock_find):
result = releases.view(constants.FORGE_NULL_AUTH_VALUE, 'repo1', 'foo/bar', '1.0.0')
mock_find.assert_called_once_with(['repo1'], 'foo/bar', '1.0.0')
self.assertEqual(result, mock_find.return_value.build_dep_metadata.return_value)
@mock.patch.object(releases, 'find_newest', autospec=True, return_value=None)
@mock.patch('web.NotFound', return_value=Exception())
def test_unit_not_found(self, mock_not_found, mock_find):
self.assertRaises(Exception, releases.view, constants.FORGE_NULL_AUTH_VALUE, 'repo1', 'foo/bar')
mock_not_found.assert_called_once_with()
@mock.patch.object(releases, 'find_newest', autospec=True)
def test_close_unit_db(self, mock_find):
result = releases.view(constants.FORGE_NULL_AUTH_VALUE, 'repo1', 'foo/bar')
mock_find.return_value.db.close.assert_called_once_with()
@mock.patch.object(releases, 'find_newest', autospec=True)
def test_close_unit_db_with_error(self, mock_find):
mock_find.return_value.build_dep_metadata.side_effect=Exception
self.assertRaises(Exception, releases.view, constants.FORGE_NULL_AUTH_VALUE, 'repo1', 'foo/bar')
mock_find.return_value.db.close.assert_called_once_with()
class TestGetRepoData(unittest.TestCase):
@mock.patch('web.ctx')
@mock.patch('pulp.server.managers.repo.distributor.RepoDistributorManager.find_by_repo_list')
@mock.patch('gdbm.open', autospec=True)
def test_single_repo(self, mock_open, mock_find, mock_ctx):
mock_ctx.protocol = 'http'
mock_find.return_value = [{'repo_id':'repo1', 'config':{}}]
result = releases.get_repo_data(['repo1'])
self.assertTrue(isinstance(result, dict))
self.assertEqual(result.keys(), ['repo1'])
self.assertEqual(result['repo1']['db'], mock_open.return_value)
mock_open.assert_called_once_with('/var/www/pulp_puppet/http/repos/repo1/.dependency_db', 'r')
@mock.patch('web.ctx')
@mock.patch('pulp.server.managers.repo.distributor.RepoDistributorManager.find_by_repo_list')
@mock.patch('gdbm.open', autospec=True)
def test_multiple_repos(self, mock_open, mock_find, mock_ctx):
mock_ctx.protocol = 'http'
mock_find.return_value = [
{'repo_id':'repo1', 'config':{}},
{'repo_id':'repo2', 'config':{}}
]
result = releases.get_repo_data(['repo1', 'repo2'])
self.assertTrue('repo1' in result)
self.assertTrue('repo2' in result)
@mock.patch('web.ctx')
@mock.patch('pulp.server.managers.repo.distributor.RepoDistributorManager.find_by_repo_list')
@mock.patch('gdbm.open', autospec=True)
def test_configured_publish_dir(self, mock_open, mock_find, mock_ctx):
mock_ctx.protocol = 'http'
mock_find.return_value = [
{'repo_id':'repo1',
'config':{constants.CONFIG_HTTP_DIR: '/var/www/pulp_puppet/foo'}}
]
result = releases.get_repo_data(['repo1'])
mock_open.assert_called_once_with('/var/www/pulp_puppet/foo/repo1/.dependency_db', 'r')
@mock.patch('web.ctx')
@mock.patch('pulp.server.managers.repo.distributor.RepoDistributorManager.find_by_repo_list')
@mock.patch('gdbm.open', autospec=True)
def test_db_open_error(self, mock_open, mock_find, mock_ctx):
mock_ctx.protocol = 'http'
mock_find.return_value = [{'repo_id':'repo1', 'config':{}}]
mock_open.side_effect = gdbm.error
result = releases.get_repo_data(['repo1'])
self.assertEqual(result, {})
mock_open.assert_called_once_with('/var/www/pulp_puppet/http/repos/repo1/.dependency_db', 'r')
class TestGetProtocol(unittest.TestCase):
def test_default(self):
result = releases._get_protocol_from_distributor({'config':{}})
# http is currently the default protocol for publishes
self.assertEqual(result, 'http')
def test_no_config(self):
# if there is no config, don't return a default. This is an error.
self.assertRaises(KeyError, releases._get_protocol_from_distributor, {})
def test_http(self):
distributor = {'config': {constants.CONFIG_SERVE_HTTP: True}}
result = releases._get_protocol_from_distributor(distributor)
self.assertEqual(result, 'http')
def test_https(self):
distributor = {'config': {constants.CONFIG_SERVE_HTTPS: True}}
result = releases._get_protocol_from_distributor(distributor)
self.assertEqual(result, 'https')
class TestFindVersion(unittest.TestCase):
@mock.patch.object(releases, 'get_host_and_protocol', return_value=MOCK_HOST_PROTOCOL)
@mock.patch('pulp_puppet.forge.unit.Unit.units_from_json')
@mock.patch.object(releases, 'get_repo_data', autospec=True)
def test_calls_units_from_json(self, mock_get_data, mock_units_from_json, mock_get_host_and_protocol):
mock_get_data.return_value = {
'repo1' : {'db': mock.MagicMock(), 'protocol': 'http'},
'repo2' : {'db': mock.MagicMock(), 'protocol': 'http'},
}
mock_units_from_json.return_value = []
result = releases.find_version(['repo1', 'repo2'], 'foo/bar', '1.0.0')
mock_units_from_json.assert_any_call(
'foo/bar', mock_get_data.return_value['repo1']['db'],
'repo1', MOCK_HOST_PROTOCOL['host'], 'http'
)
mock_units_from_json.assert_any_call(
'foo/bar', mock_get_data.return_value['repo2']['db'],
'repo2', MOCK_HOST_PROTOCOL['host'], 'http'
)
@mock.patch.object(releases, 'get_host_and_protocol')
@mock.patch('pulp_puppet.forge.unit.Unit.units_from_json')
@mock.patch.object(releases, 'get_repo_data', autospec=True)
def test_returns_version(self, mock_get_data, mock_units_from_json, mock_get_host_and_protocol):
mock_get_data.return_value = {
'repo1' : {'db': mock.MagicMock(), 'protocol': 'http'},
'repo2' : {'db': mock.MagicMock(), 'protocol': 'http'},
}
mock_units_from_json.return_value = [
unit_generator(version='2.1.3'),
unit_generator(version='1.6.2'),
unit_generator(version='2.0.3'),
unit_generator(version='3.1.5'),
]
result = releases.find_version(['repo1', 'repo2'], 'foo/bar', '2.0.3')
self.assertTrue(isinstance(result, Unit))
self.assertEqual(result.version, '2.0.3')
@mock.patch.object(releases, 'get_host_and_protocol')
@mock.patch('pulp_puppet.forge.unit.Unit.units_from_json')
@mock.patch.object(releases, 'get_repo_data', autospec=True)
def test_no_units_found(self, mock_get_data, mock_units_from_json, mock_get_host_and_protocol):
# make sure it correctly returns None if there are no units found
mock_get_data.return_value = {
'repo1' : {'db': mock.MagicMock(), 'protocol': 'http'},
'repo2' : {'db': mock.MagicMock(), 'protocol': 'http'},
}
mock_units_from_json.return_value = []
result = releases.find_version(['repo1', 'repo2'], 'foo/bar', '1.0.0')
self.assertTrue(result is None)
@mock.patch.object(releases, 'get_host_and_protocol')
@mock.patch('pulp_puppet.forge.unit.Unit.units_from_json', side_effect=Exception)
@mock.patch.object(releases, 'get_repo_data', autospec=True)
def test_close_dbs_on_error(self, mock_get_data, mock_units_from_json, mock_get_host_and_protocol):
mock_get_data.return_value = {
'repo1' : {'db': mock.MagicMock(), 'protocol': 'http'},
'repo2' : {'db': mock.MagicMock(), 'protocol': 'http'},
}
self.assertRaises(Exception, releases.find_version, ['repo1', 'repo2'], 'foo/bar', '1.0.0')
for mock_data in mock_get_data.return_value.itervalues():
mock_data['db'].close.assert_called_once_with()
class TestFindNewest(unittest.TestCase):
@mock.patch.object(releases, 'get_host_and_protocol', return_value=MOCK_HOST_PROTOCOL)
@mock.patch('pulp_puppet.forge.unit.Unit.units_from_json')
@mock.patch.object(releases, 'get_repo_data', autospec=True)
def test_calls_units_from_json(self, mock_get_data, mock_units_from_json, mock_get_host_and_protocol):
mock_get_data.return_value = {
'repo1' : {'db': mock.MagicMock(), 'protocol': 'http'},
'repo2' : {'db': mock.MagicMock(), 'protocol': 'http'},
}
mock_units_from_json.return_value = []
result = releases.find_newest(['repo1', 'repo2'], 'foo/bar')
mock_units_from_json.assert_any_call(
'foo/bar', mock_get_data.return_value['repo1']['db'],
'repo1', MOCK_HOST_PROTOCOL['host'], 'http'
)
mock_units_from_json.assert_any_call(
'foo/bar', mock_get_data.return_value['repo2']['db'],
'repo2', MOCK_HOST_PROTOCOL['host'], 'http'
)
@mock.patch.object(releases, 'get_host_and_protocol')
@mock.patch('pulp_puppet.forge.unit.Unit.units_from_json')
@mock.patch.object(releases, 'get_repo_data', autospec=True)
def test_returns_newest(self, mock_get_data, mock_units_from_json, mock_get_host_and_protocol):
mock_get_data.return_value = {
'repo1' : {'db': mock.MagicMock(), 'protocol': 'http'},
'repo2' : {'db': mock.MagicMock(), 'protocol': 'http'},
}
mock_units_from_json.return_value = [
unit_generator(version='2.1.3'),
unit_generator(version='1.6.2'),
unit_generator(version='3.1.5'),
unit_generator(version='2.0.3'),
]
result = releases.find_newest(['repo1', 'repo2'], 'foo/bar')
self.assertTrue(isinstance(result, Unit))
self.assertEqual(result.version, '3.1.5')
@mock.patch.object(releases, 'get_host_and_protocol')
@mock.patch('pulp_puppet.forge.unit.Unit.units_from_json')
@mock.patch.object(releases, 'get_repo_data', autospec=True)
def test_no_units_found(self, mock_get_data, mock_units_from_json, mock_get_host_and_protocol):
# make sure it correctly returns None if there are no units found
mock_get_data.return_value = {
'repo1' : {'db': mock.MagicMock(), 'protocol': 'http'},
'repo2' : {'db': mock.MagicMock(), 'protocol': 'http'},
}
mock_units_from_json.return_value = []
result = releases.find_newest(['repo1', 'repo2'], 'foo/bar')
self.assertTrue(result is None)
@mock.patch.object(releases, 'get_host_and_protocol')
@mock.patch('pulp_puppet.forge.unit.Unit.units_from_json', side_effect=Exception)
@mock.patch.object(releases, 'get_repo_data', autospec=True)
def test_close_dbs_on_error(self, mock_get_data, mock_units_from_json, mock_get_host_and_protocol):
mock_get_data.return_value = {
'repo1' : {'db': mock.MagicMock(), 'protocol': 'http'},
'repo2' : {'db': mock.MagicMock(), 'protocol': 'http'},
}
self.assertRaises(Exception, releases.find_newest, ['repo1', 'repo2'], 'foo/bar')
for mock_data in mock_get_data.return_value.itervalues():
mock_data['db'].close.assert_called_once_with()
class TestGetHostAndProtocol(unittest.TestCase):
@mock.patch('web.ctx', autospec=True)
def test_normal(self, mock_ctx):
mock_ctx.host = 'localhost'
mock_ctx.protocol = 'http'
result = releases.get_host_and_protocol()
self.assertEqual(set(result.keys()), set(['host', 'protocol']))
self.assertEqual(result['host'], 'localhost')
self.assertEqual(result['protocol'], 'http')
class TestGetBoundRepos(unittest.TestCase):
@mock.patch.object(BindManager, 'find_by_consumer', spec=BindManager().find_by_consumer)
def test_only_puppet(self, mock_find):
bindings =[{
'repo_id': 'repo1',
'distributor_id' : constants.DISTRIBUTOR_TYPE_ID
}]
mock_find.return_value = bindings
result = releases.get_bound_repos('consumer1')
mock_find.assert_called_once_with('consumer1')
self.assertEqual(result, ['repo1'])
@mock.patch.object(BindManager, 'find_by_consumer', spec=BindManager().find_by_consumer)
def test_only_other_type(self, mock_find):
bindings =[{
'repo_id': 'repo1',
'distributor_id': 'some_other_type'
}]
mock_find.return_value = bindings
result = releases.get_bound_repos('consumer1')
mock_find.assert_called_once_with('consumer1')
self.assertEqual(result, [])
@mock.patch.object(BindManager, 'find_by_consumer', spec=BindManager().find_by_consumer)
def test_mixed_types(self, mock_find):
bindings =[
{
'repo_id': 'repo1',
'distributor_id' : constants.DISTRIBUTOR_TYPE_ID
},
{
'repo_id': 'repo2',
'distributor_id' :'some_other_type'
},
{
'repo_id': 'repo3',
'distributor_id' : constants.DISTRIBUTOR_TYPE_ID
},
]
mock_find.return_value = bindings
result = releases.get_bound_repos('consumer1')
mock_find.assert_called_once_with('consumer1')
self.assertEqual(result, ['repo1', 'repo3'])
|
import numpy as np
license="""
Copyright (C) 2014 James Annis
This program is free software; you can redistribute it and/or modify it
under the terms of version 3 of the GNU General Public License as
published by the Free Software Foundation.
More to the points- this code is science code: buggy, barely working,
with little or no documentation. Science code in the the alpine fast
& light style.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
def rotate (x, y, angle) :
angle = angle*2*np.pi/360.
rotMatrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]], dtype=np.float64)
newx = x*rotMatrix[0,0] + y*rotMatrix[0,1]
newy = x*rotMatrix[1,0] + y*rotMatrix[1,1]
return newx,newy
#
# x axis points at ra = 0
# y axis points at ra = 90
# z axis points at dec = 90
#
def sphericalToCartesian (ra, dec, r=1) :
ra = ra*2*np.pi/360.
#dec = dec*2*np.pi/360.
# npd = north polar distance = co-latitude
# npd = 90+dec
# npd = npd*2*np.pi/360.
# x = r * np.cos(ra)*np.sin(npd)
# y = r * np.sin(ra)*np.sin(npd)
# z = r * np.cos(npd)
dec = dec*2*np.pi/360.
x = r * np.cos(ra)*np.cos(dec)
y = r * np.sin(ra)*np.cos(dec)
z = r * np.sin(dec)
return x,y,z
def cartesianToSpherical (x, y, z) :
r = np.sqrt( x**2 + y**2 + z**2)
# the accuracy isn't good enough?
#index = np.nonzero(abs(r) < 1e-16)
#index1 = np.nonzero(abs(r) >= 1e-16)
#npd = np.zeros(len(r))
#npd[index] = np.arccos(z)
#npd[index1] = np.arccos(z/r)
npd = np.arccos(z/r)
ra = np.arctan2(y,x)
ra = ra*360/(2*np.pi)
dec = 90- npd*360/(2*np.pi)
try :
index = np.nonzero(ra < 0); ra[index] = ra[index]+360;
index = np.nonzero(ra >= 360); ra[index] = ra[index]-360;
index = np.nonzero(dec > 90); dec[index] = 90-(dec[index]-90);
index = np.nonzero(dec < -90); dec[index] = -90+(dec[index]+90)
except :
pass
return ra, dec, r
def rotateAboutXaxis (x, y, z, alpha, verbose = 0) :
if verbose : print("\t x axis rotation of ", alpha, "given ", x[0], y[0], z[0])
alpha = alpha*2*np.pi/360.
xp = x
yp = y*np.cos(alpha) - z*np.sin(alpha)
zp = y*np.sin(alpha) + z*np.cos(alpha)
return xp,yp,zp
def rotateAboutYaxis (x, y, z, alpha, verbose = 0) :
if verbose : print("\t y axis rotation of ", alpha, "given ", x[0], y[0], z[0])
alpha = alpha*2*np.pi/360.
# correct
xp = z*np.sin(alpha) + x*np.cos(alpha)
yp = y
zp = z*np.cos(alpha) - x*np.sin(alpha)
# but at one point functional
xp = x*np.cos(alpha) - z*np.sin(alpha)
yp = y
zp = x*np.sin(alpha) + z*np.cos(alpha)
return xp,yp,zp
def rotateAboutZaxis (x, y, z, alpha, verbose = 0) :
if verbose : print("\t z axis rotation of ", alpha, "given ", x[0], y[0], z[0])
alpha = alpha*2*np.pi/360.
xp = x*np.cos(alpha) - y*np.sin(alpha)
yp = x*np.sin(alpha) + y*np.cos(alpha)
zp = z
return xp,yp,zp
def getEulerAngles (ra, dec) :
alpha = 0
beta = dec
gamma = ra
return alpha, beta, gamma
def test_euler (raCen, decCen) :
alpha, beta, gamma = getEulerAngles (raCen, decCen)
print("rotate about axes by Euler angles: ", alpha, beta, gamma)
ra = [-30, -10, -10, 0, 0, 30]
dec = [ 0, 1, -1, 3, -3, 0]
ra = np.array(ra); dec = np.array(dec)
print("ra,dec pairs: ", end=' ')
for i in range(0,len(ra)) :
print("{:6.2f} {:6.2f} ".format(ra[i], dec[i]), end=' ')
print("")
x,y,z = sphericalToCartesian(ra,dec,1)
i = 1
print("{:d} start : {:10.5f} {:10.5f} {:10.5f}".format(i,float(x[i]),float(y[i]),float(z[i])))
x,y,z =rotateAboutZaxis(x,y,z,alpha)
print("Z-rot done {:5.1f} deg : {:10.5f} {:10.5f} {:10.5f}".format(alpha, float(x[i]),float(y[i]),float(z[i])))
x,y,z = rotateAboutYaxis(x,y,z,beta)
print("Y-rot done {:5.1f} deg : {:10.5f} {:10.5f} {:10.5f}".format(beta, float(x[i]),float(y[i]),float(z[i])))
x,y,z = rotateAboutZaxis(x,y,z,gamma)
print("Z-rot done {:5.1f} deg : {:10.5f} {:10.5f} {:10.5f}".format(gamma, float(x[i]),float(y[i]),float(z[i])))
ra,dec,r = cartesianToSpherical (x, y, z)
ra = np.array(ra); dec = np.array(dec)
print("ra,dec pairs out: ", end=' ')
for i in range(0,len(ra)) :
print("{:6.2f} {:6.2f} ".format(ra[i], dec[i]), end=' ')
print("")
# definintions:
# zxz rotation conventions, following the solid body
# the ellipse is centered at the z=0, x=0, y=1 position, ra=90, dec=0
# the target is ra,dec; our aim is to place the y-axis onto it, after the three rotations
# line of nodes: this will be at ra+- 90 degrees
# thus alpha = x-x' = 0-(ra-90) = 90-ra
# then beta = rotate about x'axis by declination,
# and gamma = 0, as the X axis is coincident with the nodes
#
def getEulerAngles2 (ra, dec) :
alpha = -90
beta = -dec
gamma = ra+90
alpha = 0
beta = dec
gamma = ra
return alpha, beta, gamma
def test_euler_mk2 (raCen, decCen) :
alpha, beta, gamma = getEulerAngles2 (raCen, decCen)
print("rotate about axes by Euler angles: ", alpha, beta, gamma)
ra = [60, 80, 80, 90, 90, 90.]
dec = [ 0, 1, -1, 3, -3, 0.]
ra = np.array(ra); dec = np.array(dec)
ra = ra-90
print("ra,dec pairs: ", end=' ')
for i in range(0,len(ra)) :
print("{:6.2f} {:6.2f} ".format(ra[i], dec[i]), end=' ')
print("")
x,y,z = sphericalToCartesian(ra,dec,1)
i = 1
print("{:d} start : {:10.5f} {:10.5f} {:10.5f}".format(i,float(x[i]),float(y[i]),float(z[i])))
x,y,z =rotateAboutZaxis(x,y,z,alpha)
print("Z-rot done {:5.1f} deg : {:10.5f} {:10.5f} {:10.5f}".format(alpha, float(x[i]),float(y[i]),float(z[i])))
x,y,z = rotateAboutXaxis(x,y,z,beta)
print("X-rot done {:5.1f} deg : {:10.5f} {:10.5f} {:10.5f}".format(beta, float(x[i]),float(y[i]),float(z[i])))
x,y,z = rotateAboutZaxis(x,y,z,gamma)
print("Z-rot done {:5.1f} deg : {:10.5f} {:10.5f} {:10.5f}".format(gamma, float(x[i]),float(y[i]),float(z[i])))
ra,dec,r = cartesianToSpherical (x, y, z)
ra = np.array(ra); dec = np.array(dec)
print("ra,dec pairs out: ", end=' ')
for i in range(0,len(ra)) :
print("{:6.2f} {:6.2f} ".format(ra[i], dec[i]), end=' ')
print("")
#
# a combination of cunning and euler rotation;
# a solid body rotation of an great circle ellipse
# centered on ra, dec=0, rotated up to the dec
# of interest
#
def rotateAboutEasternPole (raCen, decCen, ra, dec) :
x, y, z = sphToCartesian(raCen, ra, dec)
x, y, z = rotateAboutXaxis (x, y, z, decCen)
ra, dec = cartesianToSph (raCen, x, y, z)
return ra,dec
# redefine ra to be 90 away from this ra
def sphToCartesian(ra0, ra, dec, r=1) :
ra = (ra-(ra0-90))*2*np.pi/360.
dec = dec*2*np.pi/360.
x = r * np.cos(ra)*np.cos(dec)
y = r * np.sin(ra)*np.cos(dec)
z = r * np.sin(dec)
return x,y,z
def cartesianToSph (ra0, x, y, z) :
r = np.sqrt( x**2 + y**2 + z**2)
npd = np.arccos(z/r)
ra = np.arctan2(y,x)
ra = (ra0-90) + ra*360/(2*np.pi)
dec = 90- npd*360/(2*np.pi)
index = np.nonzero(ra < 0); ra[index] = ra[index]+360;
index = np.nonzero(ra >= 360); ra[index] = ra[index]-360;
index = np.nonzero(dec > 90); dec[index] = 90-(dec[index]-90);
index = np.nonzero(dec < -90); dec[index] = -90+(dec[index]+90)
return ra, dec
|
# Visualize raw time series with 4 classes
# 'PVHA', 'PVLA', 'NVHA', 'NVLA'
from __future__ import print_function
import matplotlib
matplotlib.rcParams.update({'font.size': 16})
import matplotlib.pyplot as plt
import numpy as np
from loadData import loadData
# Importing the data files
numClasses = 4
if numClasses == 12:
classNames = ['PVHA_i1', 'PVHA_i2', 'PVHA_i3', 'PVLA_i1', 'PVLA_i2', 'PVLA_i3', 'NVHA_i1', 'NVHA_i2', 'NVHA_i3',
'NVLA_i1', 'NVLA_i2', 'NVLA_i3']
else:
classNames = ['PVHA', 'PVLA', 'NVHA', 'NVLA']
path = 'DATA/DATA_' + str(numClasses) + '/'
(X, y) = loadData(path, numClasses)
X = np.transpose(X, [3, 0, 1, 2])
X.shape
# average over time
X1 = np.mean(X, axis=2)
# Extracting beta, lower gamma and higher gamma bands and averaging each separately
temp1 = np.mean(X[:, 13:30, :, :], axis=1)
temp2 = np.mean(X[:, 31:50, :, :], axis=1)
temp3 = np.mean(X[:, 51:79, :, :], axis=1)
########################################################################################################################
# visualize the average activations for every frequency bands
########################################################################################################################
a0 = np.mean(temp1[y == 0, :, :], axis=0)
a1 = np.mean(temp1[y == 1, :, :], axis=0)
a2 = np.mean(temp1[y == 2, :, :], axis=0)
a3 = np.mean(temp1[y == 3, :, :], axis=0)
a00 = np.mean(a0, axis=1)
a01 = np.mean(a1, axis=1)
a02 = np.mean(a2, axis=1)
a03 = np.mean(a3, axis=1)
ax = plt.figure(figsize=(10, 6))
plt.plot(a00, linewidth=2, color='b', label='PVHA')
plt.plot(a01, linewidth=2, color='r', label='PVLA')
plt.plot(a02, linewidth=2, color='g', label='NVHA')
plt.plot(a03, linewidth=2, color='k', label='NVLA')
plt.legend()
plt.title("Power spectral density averaged over sensors and trials for beta band (13-30 Hz)", fontsize=15)
plt.xlabel("timebins, 100 msec")
plt.ylabel("power spectral density $\mu V^2$/Hz")
plt.tight_layout()
plt.savefig("FIGURES/SPECTRAL_DENSITIES/psdBeta_" + str(numClasses) + ".pdf")
b0 = np.mean(temp2[y == 0, :, :], axis=0)
b1 = np.mean(temp2[y == 1, :, :], axis=0)
b2 = np.mean(temp2[y == 2, :, :], axis=0)
b3 = np.mean(temp2[y == 3, :, :], axis=0)
b00 = np.mean(b0, axis=1)
b01 = np.mean(b1, axis=1)
b02 = np.mean(b2, axis=1)
b03 = np.mean(b3, axis=1)
ax = plt.figure(figsize=(10, 6))
plt.plot(b00, linewidth=2, color='b', label='PVHA')
plt.plot(b01, linewidth=2, color='r', label='PVLA')
plt.plot(b02, linewidth=2, color='g', label='NVHA')
plt.plot(b03, linewidth=2, color='k', label='NVLA')
plt.legend()
plt.title("Power spectral density averaged over sensors and trials for lower gamma band (31-50 Hz)", fontsize=15)
plt.xlabel("timebins, 100 msec")
plt.ylabel("power spectral density $\mu V^2$/Hz")
plt.tight_layout()
plt.savefig("FIGURES/SPECTRAL_DENSITIES/psdLowGamma_" + str(numClasses) + ".pdf")
c0 = np.mean(temp3[y == 0, :, :], axis=0)
c1 = np.mean(temp3[y == 1, :, :], axis=0)
c2 = np.mean(temp3[y == 2, :, :], axis=0)
c3 = np.mean(temp3[y == 3, :, :], axis=0)
c00 = np.mean(c0, axis=1)
c01 = np.mean(c1, axis=1)
c02 = np.mean(c2, axis=1)
c03 = np.mean(c3, axis=1)
ax = plt.figure(figsize=(10, 6))
plt.plot(c00, linewidth=2, color='b', label='PVHA')
plt.plot(c01, linewidth=2, color='r', label='PVLA')
plt.plot(c02, linewidth=2, color='g', label='NVHA')
plt.plot(c03, linewidth=2, color='k', label='NVLA')
plt.legend()
plt.title("Power spectral density averaged over sensors and trials for higher gamma band (51-79 Hz)", fontsize=15)
plt.xlabel("timebins, 100 msec")
plt.ylabel("power spectral density $\mu V^2$/Hz")
plt.tight_layout()
plt.savefig("FIGURES/SPECTRAL_DENSITIES/psdHighGamma_" + str(numClasses) + ".pdf")
# plot all together:
ax = plt.figure(figsize=(10, 6))
plt.plot(a00, linewidth=2, color='b', label='PVHA')
plt.plot(a01, linewidth=2, color='r', label='PVLA')
plt.plot(a02, linewidth=2, color='g', label='NVHA')
plt.plot(a03, linewidth=2, color='k', label='NVLA')
plt.plot(b00, linewidth=2, color='b', label='PVHA')
plt.plot(b01, linewidth=2, color='r', label='PVLA')
plt.plot(b02, linewidth=2, color='g', label='NVHA')
plt.plot(b03, linewidth=2, color='k', label='NVLA')
plt.plot(c00, linewidth=2, color='b', label='PVHA')
plt.plot(c01, linewidth=2, color='r', label='PVLA')
plt.plot(c02, linewidth=2, color='g', label='NVHA')
plt.plot(c03, linewidth=2, color='k', label='NVLA')
plt.legend()
plt.title("Power spectral density averaged over sensors and trials", fontsize=15)
plt.xlabel("timebins, 100 msec")
plt.ylabel("power spectral density $\mu V^2$/Hz")
plt.tight_layout()
plt.savefig("FIGURES/SPECTRAL_DENSITIES/psdAll_" + str(numClasses) + ".pdf")
########################################################################################################################
# activation depending on frequency
########################################################################################################################
# average over the sensors
X2 = np.mean(X1, axis=2)
X2.shape
########################################################################################################################
tmpX_0 = np.mean(X2[y == 0, :], axis=0)
tmpX_1 = np.mean(X2[y == 1, :], axis=0)
tmpX_2 = np.mean(X2[y == 2, :], axis=0)
tmpX_3 = np.mean(X2[y == 3, :], axis=0)
ax = plt.figure(figsize=(10, 6))
plt.plot(tmpX_0, linewidth=2, color='b', label='PVHA')
plt.plot(tmpX_1, linewidth=2, color='r', label='PVLA')
plt.plot(tmpX_2, linewidth=2, color='g', label='NVHA')
plt.plot(tmpX_3, linewidth=2, color='k', label='NVLA')
plt.legend()
plt.title("Power spectral density vs frequency averaged over sensors and trials", fontsize=15)
plt.xlabel("frequency, Hz")
plt.ylabel("power spectral density $\mu V^2$/Hz")
plt.tight_layout()
plt.savefig("FIGURES/SPECTRAL_DENSITIES/psdFreq" + str(numClasses) + ".pdf")
|
# pylint: disable=C0103
# pylint: disable=C0111
import unittest
import calculator
from mock import MagicMock
class ParseTestCase(unittest.TestCase):
def setUp(self):
self.calculator = calculator.Calculator()
def when_input_empty_string_should_raise_error(self):
# assert
with self.assertRaisesRegexp(ValueError, 'No input provided'):
# act
self.calculator.parse('')
def when_parsing_expression_should_skip_spaces(self):
# act
result = self.calculator.parse('3 + 4 + 5')
# assert
self.assertEqual(result, [3, 4, '+', 5, '+'])
def when_parsing_single_operation_should_put_numbers_first(self):
# act
result = self.calculator.parse('3+4')
# assert
self.assertEqual(result, [3, 4, '+'])
def when_lower_precedence_operator_before_higher_should_put_lower_last(self):
# act
result = self.calculator.parse('3+4*5')
# assert
self.assertEqual(result, [3, 4, 5, '*', '+'])
def when_higher_precedence_operator_before_lower_should_put_higher_between(self):
# act
result = self.calculator.parse('3*4+5')
# assert
self.assertEqual(result, [3, 4, '*', 5, '+'])
def when_expression_with_parentheses_should_obey_precedence(self):
# act
result = self.calculator.parse('(3+4)*5')
# assert
self.assertEqual(result, [3, 4, '+', 5, '*'])
def when_multidigit_expresssions_should_return_valid_rpn(self):
# act
result = self.calculator.parse('12 + 45 * 415')
# assert
self.assertEqual(result, [12, 45, 415, '*', '+'])
def when_no_matching_opening_parentesis_should_raise_error(self):
# assert
with self.assertRaisesRegexp(SyntaxError, 'Could not find pair for "\)"'):
# act
self.calculator.parse('11 + 13) * 2')
def when_no_matching_closing_parentesis_should_raise_error(self):
# assert
with self.assertRaisesRegexp(SyntaxError, 'Could not find pair for "\("'):
# act
self.calculator.parse('(11 + 13 * 2')
def when_unknown_token_should_raise_error(self):
# assert
with self.assertRaisesRegexp(SyntaxError, 'Unknown token'):
# act
self.calculator.parse('11 + 13 # 2')
class ProcessRpnTestCase(unittest.TestCase):
def setUp(self):
self.calculator = calculator.Calculator()
def when_input_empty_list_should_raise_error(self):
# assert
with self.assertRaisesRegexp(ValueError, 'Nothing to process'):
# act
self.calculator.process_rpn([])
def when_processing_single_operation_should_return_valid_result(self):
# act
result = self.calculator.process_rpn([3, 4, '+'])
# assert
self.assertEqual(result, 7)
def when_lower_precedence_operator_before_higher_should_process_higher_first(self):
# act
result = self.calculator.process_rpn([3, 4, 5, '*', '+'])
# assert
self.assertEqual(result, 23)
def when_higher_precedence_operator_before_lower_should_process_higher_first(self):
# act
result = self.calculator.process_rpn([3, 4, '*', 5, '+'])
# assert
self.assertEqual(result, 17)
def when_too_many_arguments_should_raise_error(self):
# assert
with self.assertRaisesRegexp(SyntaxError, 'Insufficient amount of operators'):
# act
self.calculator.process_rpn([3, 4, 5, '*'])
def when_too_many_operators_should_raise_error(self):
# assert
with self.assertRaisesRegexp(SyntaxError, 'Insufficient amount of arguments'):
# act
self.calculator.process_rpn([3, 5, '*', '+'])
class EvaluateRpnTestCase(unittest.TestCase):
def setUp(self):
self.calculator = calculator.Calculator()
def when_evaluating_expression_return_call_parse_and_process(self):
# arrange
self.calculator.parse = MagicMock(return_value=[2, 2, '+'])
self.calculator.process_rpn = MagicMock(return_value=4)
# act
result = self.calculator.evaluate('2 + 2')
# assert
self.calculator.parse.assert_called_with('2 + 2')
self.calculator.process_rpn.assert_called_with([2, 2, '+'])
def when_evaluating_complex_expression_should_return_correct_result(self):
# act
result = self.calculator.evaluate('(15 + 7) / 2 - (65 - 61) * 2')
# assert
self.assertEqual(result, 3)
test_loader = unittest.TestLoader()
test_loader.testMethodPrefix = 'when'
parse_test_suite = test_loader.loadTestsFromTestCase(ParseTestCase)
process_test_suite = test_loader.loadTestsFromTestCase(ProcessRpnTestCase)
evaluate_test_suite = test_loader.loadTestsFromTestCase(EvaluateRpnTestCase)
test_suite = unittest.TestSuite(
[parse_test_suite, process_test_suite, evaluate_test_suite])
unittest.TextTestRunner(verbosity=2).run(test_suite)
|
import curses
import os
import sys
import re
import shutil
import configparser
import json
import pkg_resources
import locale
import requests
import requests_cache
# GLOBALS!
BASEDIR = os.path.join(os.path.expanduser('~'), '.cryptop')
DATAFILE = os.path.join(BASEDIR, 'wallet.json')
CONFFILE = os.path.join(BASEDIR, 'config.ini')
CONFIG = configparser.ConfigParser()
COIN_FORMAT = re.compile('[A-Z]{2,5},\d{0,}\.?\d{0,}')
SORT_FNS = { 'coin' : lambda item: item[0],
'price': lambda item: float(item[1][0]),
'held' : lambda item: float(item[2]),
'val' : lambda item: float(item[1][0]) * float(item[2]) }
SORTS = list(SORT_FNS.keys())
COLUMN = SORTS.index('val')
ORDER = True
KEY_ESCAPE = 27
KEY_ZERO = 48
KEY_A = 65
KEY_Q = 81
KEY_R = 82
KEY_S = 83
KEY_C = 67
KEY_a = 97
KEY_q = 113
KEY_r = 114
KEY_s = 115
KEY_c = 99
def read_configuration(confpath):
"""Read the configuration file at given path."""
# copy our default config file
if not os.path.isfile(confpath):
defaultconf = pkg_resources.resource_filename(__name__, 'config.ini')
shutil.copyfile(defaultconf, CONFFILE)
CONFIG.read(confpath)
return CONFIG
def if_coin(coin, url='https://www.cryptocompare.com/api/data/coinlist/'):
'''Check if coin exists'''
return coin in requests.get(url).json()['Data']
def get_price(coin, curr=None):
'''Get the data on coins'''
curr = curr or CONFIG['api'].get('currency', 'USD')
fmt = 'https://min-api.cryptocompare.com/data/pricemultifull?fsyms={}&tsyms={}'
try:
r = requests.get(fmt.format(coin, curr))
except requests.exceptions.RequestException:
sys.exit('Could not complete request')
try:
data_raw = r.json()['RAW']
return [(float(data_raw[c][curr]['PRICE']),
float(data_raw[c][curr]['HIGH24HOUR']),
float(data_raw[c][curr]['LOW24HOUR'])) for c in coin.split(',') if c in data_raw.keys()]
except:
sys.exit('Could not parse data')
def get_theme_colors():
''' Returns curses colors according to the config'''
def get_curses_color(name_or_value):
try:
return getattr(curses, 'COLOR_' + name_or_value.upper())
except AttributeError:
return int(name_or_value)
theme_config = CONFIG['theme']
return (get_curses_color(theme_config.get('text', 'yellow')),
get_curses_color(theme_config.get('banner', 'yellow')),
get_curses_color(theme_config.get('banner_text', 'black')),
get_curses_color(theme_config.get('background', -1)))
def conf_scr():
'''Configure the screen and colors/etc'''
curses.curs_set(0)
curses.start_color()
curses.use_default_colors()
text, banner, banner_text, background = get_theme_colors()
curses.init_pair(2, text, background)
curses.init_pair(3, banner_text, banner)
curses.halfdelay(10)
def str_formatter(coin, val, held):
'''Prepare the coin strings as per ini length/decimal place values'''
max_length = CONFIG['theme'].getint('field_length', 13)
dec_place = CONFIG['theme'].getint('dec_places', 2)
avg_length = CONFIG['theme'].getint('dec_places', 2) + 10
held_str = '{:>{},.8f}'.format(float(held), max_length)
val_str = '{:>{},.{}f}'.format(float(held) * val[0], max_length, dec_place)
return ' {:<5} {:>{}} {} {:>{}} {:>{}} {:>{}}'.format(coin,
locale.currency(val[0], grouping=True)[:max_length], avg_length,
held_str[:max_length],
locale.currency(float(held) * val[0], grouping=True)[:max_length], avg_length,
locale.currency(val[1], grouping=True)[:max_length], avg_length,
locale.currency(val[2], grouping=True)[:max_length], avg_length)
def write_scr(stdscr, wallet, y, x):
'''Write text and formatting to screen'''
first_pad = '{:>{}}'.format('', CONFIG['theme'].getint('dec_places', 2) + 10 - 3)
second_pad = ' ' * (CONFIG['theme'].getint('field_length', 13) - 2)
third_pad = ' ' * (CONFIG['theme'].getint('field_length', 13) - 3)
if y >= 1:
stdscr.addnstr(0, 0, 'cryptop v0.2.0', x, curses.color_pair(2))
if y >= 2:
header = ' COIN{}PRICE{}HELD {}VAL{}HIGH {}LOW '.format(first_pad, second_pad, third_pad, first_pad, first_pad)
stdscr.addnstr(1, 0, header, x, curses.color_pair(3))
total = 0
coinl = list(wallet.keys())
heldl = list(wallet.values())
if coinl:
coinvl = get_price(','.join(coinl))
if y > 3:
s = sorted(list(zip(coinl, coinvl, heldl)), key=SORT_FNS[SORTS[COLUMN]], reverse=ORDER)
coinl = list(x[0] for x in s)
coinvl = list(x[1] for x in s)
heldl = list(x[2] for x in s)
for coin, val, held in zip(coinl, coinvl, heldl):
if coinl.index(coin) + 2 < y:
stdscr.addnstr(coinl.index(coin) + 2, 0,
str_formatter(coin, val, held), x, curses.color_pair(2))
total += float(held) * val[0]
if y > len(coinl) + 3:
stdscr.addnstr(y - 2, 0, 'Total Holdings: {:10} '
.format(locale.currency(total, grouping=True)), x, curses.color_pair(3))
stdscr.addnstr(y - 1, 0,
'[A] Add/update coin [R] Remove coin [S] Sort [C] Cycle sort [0\Q]Exit', x,
curses.color_pair(2))
def read_wallet():
''' Reads the wallet data from its json file '''
try:
with open(DATAFILE, 'r') as f:
return json.load(f)
except (FileNotFoundError, ValueError):
# missing or malformed wallet
write_wallet({})
return {}
def write_wallet(wallet):
''' Write wallet data to its json file '''
with open(DATAFILE, 'w') as f:
json.dump(wallet, f)
def get_string(stdscr, prompt):
'''Requests and string from the user'''
curses.echo()
stdscr.clear()
stdscr.addnstr(0, 0, prompt, -1, curses.color_pair(2))
curses.curs_set(1)
stdscr.refresh()
in_str = stdscr.getstr(1, 0, 20).decode()
curses.noecho()
curses.curs_set(0)
stdscr.clear()
curses.halfdelay(10)
return in_str
def add_coin(coin_amount, wallet):
''' Add a coin and its amount to the wallet '''
coin_amount = coin_amount.upper()
if not COIN_FORMAT.match(coin_amount):
return wallet
coin, amount = coin_amount.split(',')
if not if_coin(coin):
return wallet
if not amount:
amount = "0"
wallet[coin] = amount
return wallet
def remove_coin(coin, wallet):
''' Remove a coin and its amount from the wallet '''
# coin = '' if window is resized while waiting for string
if coin:
coin = coin.upper()
wallet.pop(coin, None)
return wallet
def mainc(stdscr):
inp = 0
wallet = read_wallet()
y, x = stdscr.getmaxyx()
conf_scr()
stdscr.bkgd(' ', curses.color_pair(2))
stdscr.clear()
#stdscr.nodelay(1)
# while inp != 48 and inp != 27 and inp != 81 and inp != 113:
while inp not in {KEY_ZERO, KEY_ESCAPE, KEY_Q, KEY_q}:
while True:
try:
write_scr(stdscr, wallet, y, x)
except curses.error:
pass
inp = stdscr.getch()
if inp != curses.KEY_RESIZE:
break
stdscr.erase()
y, x = stdscr.getmaxyx()
if inp in {KEY_a, KEY_A}:
if y > 2:
data = get_string(stdscr,
'Enter in format Symbol,Amount e.g. BTC,10')
wallet = add_coin(data, wallet)
write_wallet(wallet)
if inp in {KEY_r, KEY_R}:
if y > 2:
data = get_string(stdscr,
'Enter the symbol of coin to be removed, e.g. BTC')
wallet = remove_coin(data, wallet)
write_wallet(wallet)
if inp in {KEY_s, KEY_S}:
if y > 2:
global ORDER
ORDER = not ORDER
if inp in {KEY_c, KEY_C}:
if y > 2:
global COLUMN
COLUMN = (COLUMN + 1) % len(SORTS)
def main():
if os.path.isfile(BASEDIR):
sys.exit('Please remove your old configuration file at {}'.format(BASEDIR))
os.makedirs(BASEDIR, exist_ok=True)
global CONFIG
CONFIG = read_configuration(CONFFILE)
locale.setlocale(locale.LC_MONETARY, CONFIG['locale'].get('monetary', ''))
requests_cache.install_cache(cache_name='api_cache', backend='memory',
expire_after=int(CONFIG['api'].get('cache', 10)))
curses.wrapper(mainc)
if __name__ == "__main__":
main()
|
# Generated by Django 3.0.4 on 2020-07-25 10:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sales', '0008_auto_20200309_2316'),
]
operations = [
migrations.CreateModel(
name='Cuboid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('length', models.IntegerField(blank=True, default=None, null=True)),
('breadth', models.IntegerField(blank=True, default=None, null=True)),
('height', models.IntegerField(blank=True, default=None, null=True)),
],
),
migrations.DeleteModel(
name='Cars',
),
]
|
import csv
from pprint import pprint
from collections import defaultdict
import sys
import matplotlib.pyplot as plt
person = {}
#
# read person data
#
with open('xd_persons.txt', 'rb') as csvfile:
data = csv.reader(csvfile, delimiter='|')
for entry in data:
person[entry[0]] = {
'number': entry[0],
'firstname': entry[1],
'middlename': entry[2],
'lastname': entry[3],
'organization': entry[4]
}
# pprint (person)
#
# count lastname, firtsname, combined name
#
from collections import Counter
data = {}
kind = ["Firstname",
"Lastname",
"F Lastname",
"F M Lastname",
"Firstname Lastname",
"Firstname Middlename Lastname"]
# "Firstname Middlename Lastname Org"]
for graph in kind:
data[graph] = []
pprint (data)
for entry in person:
data["Lastname"].append(person[entry]['lastname'])
data["Firstname"].append(person[entry]['firstname'])
data["Firstname Lastname"].append(person[entry]['firstname'] + " " + person[entry]['lastname'])
data["Firstname Middlename Lastname"].append(person[entry]['firstname'] + " " + person[entry]['middlename'] + " " + person[entry]['lastname'])
# data["Firstname Middlename Lastname Org"].append(person[entry]['firstname'] + " " + person[entry]['middlename'] + " " + person[entry]['lastname'] + " " + person[entry]['organization'])
data["F Lastname"].append(person[entry]['firstname'][0] + " " + person[entry]['lastname'])
initial = person[entry]['middlename']
if len(initial) > 0:
initial = person[entry]['middlename'][0]
data["F M Lastname"].append(person[entry]['firstname'][0] + " " + initial + " " + person[entry]['lastname'])
def find_frequency(entries):
freqs = Counter(entries)
return sorted(list(freqs.items()), key=lambda i: i[1])
def get_freq_vector(freqs):
vector = []
for name in freqs:
vector.append(name[1])
return vector
def add_to_plot(entries, label=None):
plt.plot(sorted(entries, reverse=True), label=label)
for graph in kind:
print(70 * "+")
print(find_frequency(data[graph]))
add_to_plot(get_freq_vector(find_frequency(data[graph])), label=graph)
plt.legend()
plt.title("Name Ambiguity in XSEDE Data")
plt.xlabel("Unique Name")
plt.ylabel("Frequency")
plt.yscale('log')
plt.xscale('log')
plt.show()
|
#!/usr/bin/env python
"""
Add the missing vars required to run CABLE
That's all folks.
"""
__author__ = "Martin De Kauwe"
__version__ = "1.0 (11.03.2020)"
__email__ = "mdekauwe@gmail.com"
import numpy as np
import xarray as xr
import os
import sys
import netCDF4
import shutil
import xarray as xr
def main(fname, out_fname):
ds = xr.open_dataset(fname)
ds.Qair.attrs['units'] = 'kg/kg'
ds.time.encoding['units'] = 'seconds since 1993-01-01 00:00:00'
ds.to_netcdf(out_fname)
if __name__ == "__main__":
fname = "raw/01_preston_metforcing_1993_2004_UTC_v1.nc"
out_fname = "01_preston_metforcing_1993_2004_UTC_v1.nc"
main(fname, out_fname)
|
from Bio import SeqIO
import sys
import os
from StringIO import StringIO # Python 2
#count reads
inputfile = sys.argv[1]
chunksize = int(str(sys.argv[2]))
count = SeqIO.index(inputfile, "fasta")
c= len(count)
#print c
a= int(c)
numchunks = (c/chunksize)
#print"Number of chunks=",numchunks
#sys.stdout.write(str(numchunks))
t=0
f=1
print"writing files"
outfile = "/OSM/HOME-MEL/all29c/scripts/tmp/ubin"+str(f)+".fna"
g=open(outfile,'w')
for i in SeqIO.parse(inputfile,"fasta"):
t=t+1
if t>=chunksize:
f=f+1
outfile = "/OSM/HOME-MEL/all29c/scripts/tmp/ubin"+str(f)+".fna"
g=open(outfile,'w')
t=0
print"file",f,"record",t
print i.seq
raw_input()
SeqIO.write(i,g,"fasta")
print
print "python files written" |
"""Module to test the interface to Mongodb."""
from typing import List
from pymongo import MongoClient
from pymongo.database import Database
from ceiba.mongo_interface import (USERS_COLLECTION, DatabaseConfig,
add_users_to_db, connect_to_db,
store_dataframe_in_mongo)
from .utils_test import PATH_TEST, read_jobs
DB_NAME = "test_mutations"
COLLECTION_NAME = "candidates"
def add_candidates(mongodb: MongoClient) -> List[int]:
"""Check that the interface is working."""
# read data from file
path_data = PATH_TEST / "candidates.csv"
return store_dataframe_in_mongo(mongodb[COLLECTION_NAME], path_data)
def get_database() -> Database:
"""Return client to MongoDB."""
db_config = DatabaseConfig(DB_NAME)
return connect_to_db(db_config)
def test_many_insertions():
"""Check that the interface is working."""
# Connect to the database
mongodb = get_database()
expected_ids = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 76950,
43380, 26717, 70, 47561, 32800, 37021, 2449, 63555, 72987}
try:
ids = add_candidates(mongodb)
print("received ids: ", ids)
assert all(index in expected_ids for index in ids)
finally:
collection = mongodb[COLLECTION_NAME]
collection.drop()
def test_aggregation():
"""Test an aggregation pipeline."""
mongodb = get_database()
col = mongodb["jobs_test"]
jobs = read_jobs()
col.insert_many(jobs)
print(col.find())
try:
print(col.find())
# large = next(get_jobs_by_size("LARGE", col))
# assert large["_id"] == 135037
finally:
col.drop()
def test_add_user_to_db():
"""Check that some users are properly added in the database."""
path_users = PATH_TEST / "users.txt"
try:
db = get_database()
add_users_to_db(db, path_users)
finally:
db.drop_collection(USERS_COLLECTION)
|
from .satelliteInfo import SatelliteInfo
class SatelliteResults:
def __init__(
self,
satellite_id,
status_info,
timestamp1,
timestamp2,
doppler,
bit_change_info1,
bit_change_info2,
):
self.__satellite_id = satellite_id
self.__status_info = status_info
self.__timestamp1 = timestamp1
self.__timestamp2 = timestamp2
self.__doppler = doppler
self.__bit_change_info1 = bit_change_info1
self.__bit_change_info2 = bit_change_info2
@property
def satellite_id(self):
return self.__satellite_id
@property
def satellite_name(self):
return SatelliteResults.satellite_id_to_name(self.__satellite_id)
@property
def status_info(self):
return self.__status_info
@property
def timestamp1(self):
return self.__timestamp1
@property
def timestamp2(self):
return self.__timestamp2
@property
def doppler(self):
return self.__doppler
@property
def bit_change_info1(self):
return self.__bit_change_info1
@property
def bit_change_info2(self):
return self.__bit_change_info2
@staticmethod
def from_nav_consumer(nav_consumer):
satellite_id_raw = int.from_bytes(
nav_consumer.consume_field(7), byteorder="little"
)
status_info = SatelliteInfo.from_nav_parser(nav_consumer)
if status_info.timestamp1_exists:
timestamp1_raw = nav_consumer.consume_field(19)
timestamp1 = int.from_bytes(timestamp1_raw, byteorder="little")
else:
timestamp1 = None
if status_info.timestamp2_exists:
timestamp2_raw = nav_consumer.consume_field(19)
timestamp2 = int.from_bytes(timestamp2_raw, byteorder="little")
else:
timestamp2 = None
if status_info.doppler_exists:
doppler_raw = nav_consumer.consume_field(15)
doppler_value = SatelliteResults._doppler_value_from_raw_val(doppler_raw)
else:
doppler_value = None
if status_info.bit_change_info1_exists:
bit_change_info1_raw = nav_consumer.consume_field(8)
else:
bit_change_info1_raw = None
if status_info.bit_change_info2_exists:
bit_change_info2_raw = nav_consumer.consume_field(8)
else:
bit_change_info2_raw = None
return SatelliteResults(
satellite_id=satellite_id_raw,
status_info=status_info,
timestamp1=timestamp1,
timestamp2=timestamp2,
doppler=doppler_value,
bit_change_info1=bit_change_info1_raw,
bit_change_info2=bit_change_info2_raw,
)
@staticmethod
def _doppler_value_from_raw_val(raw_value: bytes):
doppler_value = int.from_bytes(raw_value, byteorder='little')
if doppler_value & 0x4000:
doppler_value |= 0xC000
doppler_value = int.from_bytes(
doppler_value.to_bytes(
length=2,
byteorder='little'
),
byteorder='little',
signed=True
)
return doppler_value
@staticmethod
def satellite_id_to_name(satellite_id: int):
if 0 <= satellite_id <= 31:
satellite_number = satellite_id + 1
return f"GPS #{satellite_number}"
elif 64 <= satellite_id <= 100:
satellite_number = satellite_id - 63
return f"BeiDou #{satellite_number}"
elif 32 <= satellite_id <= 50:
special_sbas_names = {
34: "AUS-NZ",
35: "EGNOS",
38: "EGNOS",
39: "GAGAN",
40: "GAGAN",
41: "MSAS",
42: "ARTEMIS",
43: "WAAS",
44: "GAGAN",
45: "WAAS",
48: "EGNOS",
49: "MSAS",
50: "WAAS",
}
satellite_number = satellite_id + 88
return f"SBAS #{satellite_number}"
else:
return f"RFU (sv_id: {satellite_id})"
def __str__(self):
return f"Sv Id: {self.satellite_id} (C/N: {self.status_info.c_n()}): timestamps {self.timestamp1}/{self.timestamp2}"
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_sp_vnic_order
short_description: Configures vNIC order for service profiles and templates on Cisco UCS Manager
version_added: 2.10
description:
- Configures Configures vNIC order for service profiles and templates on Cisco UCS Manager
options:
sp_name:
description: DN of the service profile
vnics:
description: List of vNIC order properties
suboptions:
name:
description: Name of the vNIC
required: true
admin_vcon:
description: Name of the virtual connection
choices: ["1","2","3","4","any"]
order:
description: vNIC connection order
choices: ["unspecified", "0-256"]
transport:
description: transport medium
choices: ["ethernet", "fc"]
required: true
state:
description: Desired state of the vNIC.
choices: [present, absent]
default: present
org_dn:
description: root org dn
extends_documentation_fragment:
- cisco.ucs.ucs
requirements:
- ucsmsdk
author:
- Brett Johnson (@sdbrett)
'''
EXAMPLES = r'''
- name: Configure vnic order
cisco.ucs.ucs_sp_vnic_order:
sp_name: my_sp
vnics:
- name: 'my_vnic'
admin_vcon: '1'
order: '1'
transport: 'ethernet'
hostname: 192.168.99.100
username: admin
password: password
- name: Configure vhba order
cisco.ucs.ucs_sp_vnic_order:
sp_name: my_sp
vnics:
- name: 'my_vhba'
admin_vcon: '2'
order: '1'
transport: 'fc'
hostname: 192.168.99.100
username: admin
password: password
- name: Configure vnic and vhba order
cisco.ucs.ucs_sp_vnic_order:
sp_name: my_sp
vnics:
- name: my_vhba
admin_vcon: '2'
order: '1'
transport: fc
- name: my_vnic
admin_vcon: '1'
order: '1'
transport: ethernet
hostname: 192.168.99.100
username: admin
password: password
- name: Remove vnic order configuration from my_vnic
cisco.ucs.ucs_sp_vnic_order:
sp_name: my_sp
vnics:
- name: 'my_vnic'
transport: ethernet
state: absent
hostname: 192.168.99.100
username: admin
password: password
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.cisco.ucs.plugins.module_utils.ucs import UCSModule, ucs_argument_spec
def get_service_profile(handle, org_dn, sp_name):
dn = org_dn + "/ls-" + sp_name
sp = handle.query_dn(dn)
return sp
def update_vnic_assignment_order(ucs, vnic, sp):
from ucsmsdk.mometa.ls.LsVConAssign import LsVConAssign
mo = LsVConAssign(parent_mo_or_dn=sp, admin_vcon=vnic['admin_vcon'],
order=vnic['order'], transport=vnic['transport'],
vnic_name=vnic['name'])
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
def remove_vnic_assignment_order(ucs, vnic, sp):
from ucsmsdk.mometa.ls.LsVConAssign import LsVConAssign
mo = LsVConAssign(parent_mo_or_dn=sp, admin_vcon='any',
order='unspecified', transport=vnic['transport'],
vnic_name=vnic['name'])
ucs.login_handle.add_mo(mo, True)
ucs.login_handle.commit()
def get_vnic(ucs, dn):
return ucs.login_handle.query_dn(dn)
def get_vnic_dn(sp_dn, transport, name):
if transport == 'ethernet':
return sp_dn + '/ether-' + name
return sp_dn + '/fc-' + name
def matches_existing_vnic_order(vnic, vnic_mo):
if vnic['state'] == 'absent':
kwargs = dict(admin_vcon='any')
kwargs['order'] = 'unspecified'
else:
kwargs = dict(admin_vcon=vnic['admin_vcon'])
kwargs['order'] = vnic['order']
if vnic['transport'] == 'ethernet':
kwargs['type'] = 'ether'
else:
kwargs['type'] = vnic['transport']
return vnic_mo.check_prop_match(**kwargs)
def main():
vnic_spec = dict(
name=dict(type='str', required=True),
admin_vcon=dict(type='str', choices=['1', '2', '3', '4', 'any']),
order=dict(type='str'),
transport=dict(type='str', required=True, choices=['ethernet', 'fc']),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
argument_spec = ucs_argument_spec
argument_spec.update(
sp_name=dict(required=True, type='str'),
vnics=dict(required=True, type='list', elements='dict', options=vnic_spec),
org_dn=dict(required=False, type='str', default='org-root'),
)
module = AnsibleModule(argument_spec,
supports_check_mode=True)
ucs = UCSModule(module)
err = False
changed = False
try:
sp_dn = dn = module.params['org_dn'] + "/ls-" + module.params['sp_name']
sp = ucs.login_handle.query_dn(dn)
if not sp:
raise ValueError("SP '%s' does not exist" % sp_dn)
for vnic in module.params['vnics']:
vnic_mo = get_vnic(ucs, (get_vnic_dn(sp_dn, vnic['transport'], vnic['name'])))
if vnic['state'] != 'absent' and not vnic_mo:
raise ValueError("vNIC '%s' is not assigned to service profile '%s'" % (vnic['name'], sp_dn))
if vnic_mo:
if not matches_existing_vnic_order(vnic, vnic_mo):
changed = True
break
if changed and not module.check_mode:
for vnic in module.params['vnics']:
vnic_mo = get_vnic(ucs, (get_vnic_dn(sp_dn, vnic['transport'], vnic['name'])))
if vnic['state'] == 'absent' and vnic_mo:
remove_vnic_assignment_order(ucs, vnic, sp)
elif not vnic_mo:
update_vnic_assignment_order(ucs, vnic, sp)
elif not matches_existing_vnic_order(vnic, vnic_mo):
update_vnic_assignment_order(ucs, vnic, sp)
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
|
# 연결 요소의 개수 DFS
# 첫째줄: 노드갯수(N), 간선갯수(M)
# 들째줄 ~ M번째줄: 간선의 양 끝점 (u,v)
# 시간초과나서 sys.stdin.readline() 추가함
import sys
from collections import deque
def BFS():
count = 0
#visited[start] = 1
queue = deque()
#1번부터 N번까지 노드를 전부 탐색한다.
for i in range(1, N+1):
# 방문하지 않은 번호의 노드만
if visited[i] == 0:
visited[i] = 1
queue.append(i)
count += 1
while queue:
v = queue.popleft()
# 큐에서 뽑은 그래프번째 노드 탐색
for j in graph[v]:
# 만약 방문하지 않은 노드라면 방문처리 하고 해당 노드를 큐에 삽입
if visited[j] == 0:
visited[j] = 1
queue.append(j)
return count
if __name__ == '__main__':
N,M = map(int, sys.stdin.readline().rstrip().split())
graph = [[] for _ in range(N+1)]
visited = [0] * (N + 1)
count = 0
# 인접행렬 생성
for _ in range(M):
u,v = list(map(int, sys.stdin.readline().rstrip().split()))
graph[u].append(v)
graph[v].append(u)
# 1번부터 차례로 시작
count = BFS()
print(count)
|
import sys
from subprocess import check_output
def test_not_py_shebang(tmpdir, monkeypatch):
flavor = "deadbeef"
python = "snake"
flavor_dir = tmpdir.mkdir(flavor)
test_file = flavor_dir.join("mangleme")
test_file.write("#!/bin/sh")
monkeypatch.setenv("EUPS_PATH", tmpdir)
monkeypatch.setenv("SHTRON_EUPS_FLAVOR", flavor)
monkeypatch.setenv("SHTRON_PYTHON", python)
print(check_output(["./shebangtron"]))
assert test_file.read() == "#!/bin/sh"
def test_vanilla_py_shebang(tmpdir, monkeypatch):
flavor = "deadbeef"
python = "snake"
flavor_dir = tmpdir.mkdir(flavor)
test_file = flavor_dir.join("mangleme")
test_file.write("#!python")
monkeypatch.setenv("EUPS_PATH", tmpdir)
monkeypatch.setenv("SHTRON_EUPS_FLAVOR", flavor)
monkeypatch.setenv("SHTRON_PYTHON", python)
print(check_output(["./shebangtron"]))
assert test_file.read() == "#!" + python
def test_env_shebang(tmpdir, monkeypatch):
flavor = "deadbeef"
python = "snake"
flavor_dir = tmpdir.mkdir(flavor)
test_file = flavor_dir.join("mangleme")
test_file.write("#!/usr/bin/env python")
monkeypatch.setenv("EUPS_PATH", tmpdir)
monkeypatch.setenv("SHTRON_EUPS_FLAVOR", flavor)
monkeypatch.setenv("SHTRON_PYTHON", python)
print(check_output(["./shebangtron"]))
assert test_file.read() == "#!/usr/bin/env python"
|
"""
Leap year
a given year number
to check if that year is a leap year
year = 1900
question: Is the year of 1900 a leap year?
366 days every 4 year
1. if the number of year can be perfectly divided by 4, year is probably a leap year.
2. if the number of year can be also per. divided by 100, that year is probably not a leap year.
3. if the number of year can be also per. divided by 400, that year is a leap year.
"""
year = 2100
if year % 4 == 0:
# print("probably a leap year")
if year % 100 == 0:
# print("probably Not a leap year")
if year % 400 ==0:
print("{} is a leap year".format(year))
else:
print("{} is not a leap year".format(year))
else:
print("{} is a leap year".format(year))
else:
print("{} is not a leap year".format(year))
|
import os
import string
location = './AGameOfThrones'
f = open('./result', 'r')
res = open('./feature_chapiter','w')
name = []
chapiter = []
for line in f:
name.append(line.split(' ')[0])
chapiter.append([])
f.close()
# distinguish those prefixes and suffixes
similar = []
for obj_1 in name:
similar.append([obj_1])
found = 0
for obj_2 in name:
if obj_1 != obj_2 and ' ' in obj_2:
if obj_1 in obj_2.split(' '):
similar[-1].append(obj_2)
found = 1
if found == 0:
similar.pop()
def find_fullname(temp, name):
for obj in similar:
if name in obj:
for c in obj:
if c != name and name in c:
temp.append(c)
# not to mix up those prefixes or suffixes with full names
def distinguish(temp, name, chapiter, line, n):
is_substring = 0
treated = line
for obj in temp:
treated = treated.replace(obj,'')
if name+' ' not in treated:
is_substring = 1
if is_substring == 0:
chapiter[n].append(1)
return is_substring
DIR = './AGameOfThrones'
f_num = len([fn for fn in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, fn))])
times = 0.
for subdir, dirs, files in os.walk(location):
for f in files:
print str(round(times/f_num,3)*100)+'%\n'
text = open('./AGameOfThrones/'+f,'r')
n = 0
for obj in name:
found = 0
temp = []
find_fullname(temp,obj)
for line in text:
line = "".join(l for l in line if l not in string.punctuation)
if distinguish(temp, obj, chapiter, line, n) == 0:
found = 1
break
if found == 0:
chapiter[n].append(0)
n = n + 1
text.seek(0)
text.close()
times = times + 1
for obj in chapiter:
res.write('[')
n = 0
for unit in obj:
n = n + 1
if n != len(obj):
res.write(str(unit)+',')
if n == len(obj):
res.write(str(unit)+']\n')
res.close() |
from sqlalchemy import Integer, Column, String
from app import db # noqa
from .interface import FizzbarInterface
from typing import Any
class Fizzbar(db.Model): # type: ignore
"""A snazzy Fizzbar"""
__tablename__ = "fizzbar"
fizzbar_id = Column(Integer(), primary_key=True)
name = Column(String(255))
purpose = Column(String(255))
def update(self, changes: FizzbarInterface):
for key, val in changes.items():
setattr(self, key, val)
return self
|
#!/usr/bin/env python
#-*-encoding:UTF-8-*-
import sys
import datetime
import os
import Mail
from lib.common import *
class VulStats:
def __init__(self):
try:
self.conn = ""
self.cursor = ""
except Exception,e:
logging.getLogger().error("init CountVul Exception(VulStats):" + str(e))
#end try
#end def
def mysqlConnect(self):
try:
self.conn = MySQLdb.connect(host, user, passwd , db = "waf_hw", charset = "utf8")
self.cursor = self.conn.cursor(MySQLdb.cursors.DictCursor)
except Exception,e:
logging.getLogger().error("mysql connect Exception(VulStats):" + str(e))
#end try
#end def
def mysqlClose(self):
try:
self.cursor.close()
self.conn.close()
except Exception,e:
logging.getLogger().error("mysql close Exception(VulStats):" + str(e))
#end try
#end def
def updateVulTypeStats(self):
try:
self.mysqlConnect()
if table_exists('vul_stats'):
self.cursor.execute("truncate table vul_stats")
self.conn.commit()
#end if
self.cursor.execute("select `id`,`user_id`,`asset_scan_id` from `task_manage`")
ret = self.cursor.fetchall()
if ret and len(ret) > 0:
for row in ret:
task_id = row['id']
user_id = row['user_id']
asset_scan_id = row['asset_scan_id']
vul_details_table = 'vul_details_' + str(task_id)
scan_result_table = 'scan_result_' + str(task_id)
weak_pwd_details_table = 'weak_pwd_details_' + str(task_id)
#count host_vul
if table_exists(vul_details_table) and table_exists(scan_result_table) and table_exists(weak_pwd_details_table):
sql = "select count(*) as c, family, risk_factor as level from %s group by concat(family,risk_factor) union all " % (vul_details_table)
sql += " select count(*) as c, vul_type as family, substring(level,1,1) as level from %s group by concat(vul_type,level) union all " % (scan_result_table)
sql += " select count(*) as c, type as family, 'H' as level from %s group by type" % (weak_pwd_details_table)
if asset_scan_id > 0:
sql = "select count(*) as c, family, risk_factor as level from %s where asset_scan_id = '%d' group by concat(family,risk_factor) union all " % (vul_details_table,asset_scan_id)
sql += " select count(*) as c, vul_type as family, substring(level,1,1) as level from %s where asset_scan_id = '%d' group by concat(vul_type,level) union all " % (scan_result_table,asset_scan_id)
sql += " select count(*) as c, type as family, 'H' as level from %s where asset_scan_id = '%d' group by type" % (weak_pwd_details_table,asset_scan_id)
#end if
self.cursor.execute(sql)
result = self.cursor.fetchall()
for item in result:
count = item['c']
family = item['family']
level = item['level']
sql = "select count(id) as c from vul_stats where vul_name = %s and user_id = %s"
self.cursor.execute(sql,(family,str(user_id)))
query = self.cursor.fetchone()
if query and len(query) > 0 and query['c'] > 0:
sql = "update vul_stats set "+level.lower()+" = "+level.lower()+" + "+str(count)+" where vul_name = %s and user_id = %s"
self.cursor.execute(sql,(family,str(user_id)))
self.conn.commit()
else:
c = 0
h = 0
m = 0
l = 0
i = 0
if level == 'C':
c = count
elif level == 'H':
h = count
elif level == 'M':
m = count
elif level == 'L':
l = count
elif level == 'I':
i = count
#end if
sql = "insert into vul_stats(vul_name,c,h,m,l,i,user_id)values(%s,%s,%s,%s,%s,%s,%s)"
self.cursor.execute(sql,(family,str(c),str(h),str(m),str(l),str(i),str(user_id)))
self.conn.commit()
#end if
#end for
#end if
#end for
#end if
self.mysqlClose()
except Exception,e:
logging.getLogger().error("updateVulTypeStats Exception(VulStats):" + str(e))
#end try
#end def
def updateVulTaskStats(self):
try:
self.mysqlConnect()
self.cursor.execute("select `id`,`asset_scan_id` from `task_manage`")
ret = self.cursor.fetchall()
if ret and len(ret) > 0:
for row in ret:
task_id = row['id']
asset_scan_id = row['asset_scan_id']
vul_details_table = 'vul_details_' + str(task_id)
scan_result_table = 'scan_result_' + str(task_id)
weak_pwd_details_table = 'weak_pwd_details_' + str(task_id)
c = 0
h = 0
m = 0
l = 0
i = 0
if table_exists(vul_details_table) and table_exists(scan_result_table) and table_exists(weak_pwd_details_table):
sql = "select sum(c) as c,a.level as level from ("
sql += " select count(*) as c,v.risk_factor as `level` from "+vul_details_table+" v group by v.risk_factor union all "
sql += " select count(*) as c,substring(s.level,1,1) as `level` from "+scan_result_table+" s group by s.level union all "
sql += " select count(*) as c,'H' as `level` from "+weak_pwd_details_table+" "
sql += " ) a group by a.level "
if asset_scan_id > 0:
sql = "select sum(c) as c,a.level as level from ("
sql += " select count(*) as c,v.risk_factor as `level` from %s v where v.asset_scan_id = '%d' group by v.risk_factor union all " % (vul_details_table,asset_scan_id)
sql += " select count(*) as c,substring(s.level,1,1) as `level` from %s s where s.asset_scan_id = '%d' group by s.level union all " % (scan_result_table,asset_scan_id)
sql += " select count(*) as c,'H' as `level` from %s where asset_scan_id = '%d' " % (weak_pwd_details_table,asset_scan_id)
sql += " ) a group by a.level "
#end if
self.cursor.execute(sql)
result = self.cursor.fetchall()
if result and len(result) > 0:
for item in result:
count = item['c']
level = item['level']
if level == 'C':
c = count
elif level == 'H':
h = count
elif level == 'M':
m = count
elif level == 'L':
l = count
elif level == 'I':
i = count
#end if
#end for
#end if
sql = "update task_manage set c = '%s', h = '%s', m = '%s', l = '%s', i = '%s' where id = '%s'" % (str(c),str(h),str(m),str(l),str(i),str(task_id))
self.cursor.execute(sql)
self.conn.commit()
#end if
#end for
#end if
self.mysqlClose()
except Exception,e:
logging.getLogger().error("updateVulTaskStats Exception(updateVulTaskStats):" + str(e))
#end try
#end def
def main(self):
try:
self.updateVulTypeStats()
self.updateVulTaskStats()
except Exception,e:
logging.getLogger().error("main Exception(VulStats):" + str(e))
#end try
#end def
#end class
if __name__ == "__main__":
init_log(logging.ERROR, logging.ERROR, "/var/log/" + os.path.split(__file__)[1].split(".")[0] + ".log")
try:
vulstats = VulStats()
vulstats.main()
except Exception,e:
logging.getLogger().error("__main__ Exception(VulStats):" + str(e))
#end try
#end if
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Author: Frederic Lepied <frederic.lepied@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from distutils.core import setup
setup(name='builder',
long_description='Build a dependency graph of shell '
'commands like the make tool',
version='0.1',
keywords=['make', 'build'],
author=u'Frédéric Lepied',
author_email='frederic.lepied@enovance.com',
url='https://github.com/enovance/builder',
license='Apache',
py_modules=['builder', ])
# setup.py ends here
|
from scrapy.spider import BaseSpider
from scrapy.contrib.spiders import CrawlSpider,Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.http import FormRequest
from scrapy.selector import Selector
from scrapy.http import Request
from myfilmfinder.items import MyFilmItem
import re
class ItuneSpider(CrawlSpider):
count = 0
name = 'itunesspider'
start_urls = ['https://itunes.apple.com/gb/genre/films/id33',]
rules = (
Rule(SgmlLinkExtractor(restrict_xpaths=('//*[@id="genre-nav"]')),follow=True,),
Rule(SgmlLinkExtractor(restrict_xpaths=('//*[@id="selectedgenre"]/ul[1]')),follow=True,),
Rule(SgmlLinkExtractor(restrict_xpaths=('//*[@id="selectedgenre"]/ul[2]')),follow=True,),
Rule(SgmlLinkExtractor(restrict_xpaths=('//*[@id="selectedcontent"]')),callback='parse_movie')
)
def parse_movie(self,response):
sel = Selector(response)
self.count += 1
TITLE_XPATH = sel.xpath('//*[@id="title"]/div[1]/h1/text()').extract()
MOVIE_URL_XPATH = sel.xpath('/html/head/link[1]/@href').extract()[0]
YEAR_XPATH = sel.xpath('//*[@id="left-stack"]/div[1]/ul/li[3]/text()').extract()
RATING_XPATH = sel.xpath('//*[@id="title"]/div[1]/span/text()').extract()
BUY_PRICE_XPATH = sel.xpath('//*[@id="left-stack"]/div[1]/ul/li[1]/span/text()').extract()
if TITLE_XPATH:
title = TITLE_XPATH[0]
else:
title = "N/A"
if MOVIE_URL_XPATH:
MOVIE_SPLIT = MOVIE_URL_XPATH.split("/")
movieid = MOVIE_SPLIT[-1].replace('id','')
else:
movieid = "N/A"
if RATING_XPATH:
rating = RATING_XPATH[0]
else:
rating = "N/A"
if YEAR_XPATH:
year = YEAR_XPATH[0]
else:
year = "N/A"
duration = "N/A"
url = MOVIE_URL_XPATH
if BUY_PRICE_XPATH:
buyprice = re.compile('\d+.\d+').findall(BUY_PRICE_XPATH[0])[0]
else:
buyprice='N/A'
price = "N/A"
pid = 1
item = MyFilmItem(
title=title,
movieid=movieid,
rating=rating,
year=year,
url=url,
buyprice=buyprice,
price=price,
pid=pid,
providerid='5',
duration=duration,
)
yield item |
# -*- coding: utf-8 -*-
studentmark = {'jack':[25,580],'jim':[24,400],'ramu':[24,590]}
print(studentmark)
# add
print('Add raju')
studentmark['raju'] = [24,599]
print(studentmark)
print('update raju')
studentmark['raju'] = [24,600]
print(studentmark)
print("Raju's age is",studentmark['raju'][0])
print("Raju's Mark is",studentmark['raju'][1])
print("Deleting Raju")
del studentmark['raju']
print(studentmark)
|
#Recieves a paresed array from the EqParse function and sends the proper operators to the CalDataType function
#Basically is the logic that will calculate the total of the entire equation
from CalDataType import calculate
from EqParse import EqParse
def eqIterator(eqArray):
calculation = False
index = 0
parenCount = 0
leftNum = 0
rightNum = 0
PEMDAS = ['(','^', '*', '/', '+', '-']
while index < len(PEMDAS):
for i in range(len(eqArray)):
place = eqArray[i]
if (place == '('):
parenCount = parenCount + 1
elif (place == ')'):
parenCount = parenCount - 1
if(parenCount == 0):
first_opening = eqArray.index('(')
recur_list = eqArray[first_opening+1:i]
eqArray[i] = eqIterator(recur_list)
del eqArray[first_opening:i]
break
else:
if(place == PEMDAS[index]):
leftNum = eqArray[i-1]
rightNum = eqArray[i+1]
eqArray[i] = calculate(place, leftNum, rightNum)
eqArray.pop(i-1)
eqArray.pop(i)
calculation = True
break
else:
continue
if(calculation == True):
calculation = False
continue
else:
index += 1
calculation = False
return float(eqArray[-1])
# x = str(input("Please enter an expression to be evaluated.\n> "))
# print(eqIterator(EqParse(x.strip())))
#use for test input
|
from selenium import webdriver
chrome_driver_path = YOUR_DRIVER_PATH_HERE
driver = webdriver.Chrome(executable_path=chrome_driver_path)
# driver.get("https://www.amazon.in/Camel-Oil-Pastel-Reusable-Plastic/dp/B00LY12TH6/ref=sr_1_1?dchild=1&keywords=oil+pastels&qid=1621483789&sr=8-1")
# price = driver.find_element_by_id("priceblock_ourprice")
# print(price.text)
driver.get("https://www.python.org/")
# search_bar = driver.find_element_by_name("q")
# print(search_bar.get_attribute("placeholder"))
# logo = driver.find_element_by_class_name("python-logo")
# print(logo.size)
# documentation_link = driver.find_element_by_css_selector(".documentation-widget a")
# print(documentation_link.text)
# bug_link = driver.find_element_by_xpath('//*[@id="site-map"]/div[2]/div/ul/li[3]/a')
# print(bug_link.text)
events_date = driver.find_elements_by_css_selector(".event-widget time")
events_name = driver.find_elements_by_css_selector(".event-widget li a")
events = {}
for i in range(len(events_date)):
events[i] = {
"date": events_date[i].text,
"name": events_name[i].text
}
print(events)
driver.quit()
# driver.close()
|
import numpy as np
import glob
import string
import os
import sys
import matplotlib.pyplot as plt
from scipy.fftpack import fft,fftfreq, rfft
from mpl_toolkits.basemap import Basemap, shiftgrid, addcyclic
from matplotlib.patches import Polygon
from matplotlib.colors import LogNorm
from cmath import *
import colorsys
from scipy import signal
from netCDF4 import Dataset
#Type should be Magnitude or Phase
type = raw_input('mag or phase?\n')
#Period should be daily, half_annual, annual
period = raw_input('half_annual or annual?\n')
#Ask if want to save out full image, no borders
full_image = raw_input('Do you want to save out full image? Y or N?\n')
if full_image == 'Y':
f_name = 'omi_%s_%s.png'%(period,type)
if type == 'mag':
type_f = 'magnitudes'
else:
type_f = 'phases'
#set up plot
fig =plt.figure(figsize=(20,12))
fig.patch.set_facecolor('white')
if full_image == 'Y':
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
model_f = 'obs_%s/obs_%s_%s.npy'%(type_f,period,type_f)
if period == 'half_annual':
if type_f == 'magnitudes':
title = 'Half-Annual Magnitude'
label = 'Concentration (ppbV)'
types = 'mag'
else:
title = 'Half-Annual Phase'
label = 'Time (Months)'
types = 'phase'
phase_min = 0
phase_max = 6
if period == 'annual':
if type_f == 'magnitudes':
title = 'Annual Magnitude'
label = 'Concentration (ppbV)'
types = 'mag'
else:
title = 'Annual Phase'
label = ' Time (Months)'
types = 'phase'
phase_min = 0
phase_max = 12
# load in model values
values = np.load(model_f)
#lat lon edges for 1x2.25 grid
lat_e = np.arange(-60.,60.5,1)
lon_e = np.arange(-180,181,1.25)
lat_c = np.arange(-59.5,60,1)
lon_c = np.arange(-179.375,180,1.25)
#get size of grid
grid_dim_c = len(lat_c)*len(lon_c)
#reshape array to plot on map
start = 0
end = len(lon_c)
for i in range(len(lat_c)):
new_list = values[start:end]
new_list = np.array(new_list)
try:
z =np.vstack((z,new_list))
except:
z = [new_list]
z=np.array(z)
start+=len(lon_c)
end+=len(lon_c)
#setup basemap projection
m = Basemap(projection='cyl',llcrnrlat=-60,urcrnrlat=60,\
llcrnrlon=lon_e[0],\
urcrnrlon=lon_e[-1],\
resolution='c')
if full_image == 'N':
m.drawcoastlines()
m.drawmapboundary()
parallels = np.arange(-60,61,15)
meridians = np.arange(-180,151,30)
plt.xticks(meridians)
plt.yticks(parallels)
m.drawparallels(parallels)
m.drawmeridians(meridians)
#plot model gridboxes
if type == 'mag':
poly = m.pcolor(lon_e, lat_e, z,vmin=0, vmax=23.5,cmap = plt.cm.coolwarm)
else:
poly = m.pcolor(lon_e, lat_e, z, vmin=phase_min, vmax=phase_max, cmap=plt.cm.hsv)
if full_image == 'N':
if (period == 'half_annual') & (type == 'phase'):
cb = plt.colorbar(poly, ticks =[0,1,2,3,4,5], ax = m.ax,shrink=0.8,orientation = 'horizontal', format='%.2f')
cb.ax.set_xticklabels(['M1','M2','M3','M4','M5','M6'])
elif (period == 'annual') & (type == 'phase'):
cb = plt.colorbar(poly, ticks =[0,1.01848,1.94661,2.96509,3.95072,4.96920,5.95483,6.97331,7.99179,8.97741,9.99589,10.98152], ax = m.ax,shrink=0.8,orientation = 'horizontal', format='%.2f')
cb.ax.set_xticklabels(['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'])
else:
cb = plt.colorbar(poly, ax = m.ax,shrink=0.8,orientation = 'horizontal', format='%.2f')
cb.set_label('%s'%(label), fontsize = 16)
plt.xlabel('Longitude',fontsize = 20)
plt.ylabel('Latitude',fontsize = 20)
cb.ax.tick_params(labelsize=16)
plt.title('Tropospheric O3 OMI 1x1.25 Oct 2004 - Jan 2014 %s'%(title), fontsize = 18)
plt.show()
else:
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
plt.savefig(f_name,bbox_inches=extent)
|
numbers = input("Please enter a character")
separators = ""
for char in numbers:
if not char.isnumeric():
separators = separators + char
print(separators)
values = "".join(char if char not in separators else " " for char in numbers).split()
print([int(val) for val in values])
|
API_VIEWS_HEADER = '''# coding=utf-8
from rest_framework import permissions
from core.api.views import CommonViewSet
from ..models import <% ALL_MODELS %>
from . import serializers
'''
API_VIEWS_BODY = '''
class <% MODEL_NAME %>ViewSet(CommonViewSet):
""" API views for <% MODEL_NAME %> """
queryset = <% MODEL_NAME %>.objects.all()
serializer_class = serializers.<% MODEL_NAME %>Serializer
filter_fields = ['pk'] + <% fields %>
search_fields = <% fields %>
ordering_fields = <% fields %>
'''
|
from locust import HttpLocust, TaskSet, task
import queue,csv,time
from src.utils.extractor import JMESPathExtractor
class WebsiteTasks(TaskSet):
@task(1)
def test_pay(self):
try:
data = self.locust.user_data_queue.get()
except queue.Empty:
print('test ended.')
exit(0)
payload ={"merchantCode": "A00166598", "isPloy": 1, "platformType": 0, "orderNO": data['orderNO'], "amount": 1, "productId": 10000, "quantity": 100, "name": "金币", "desc": "100金币", "device": 4, "typeCode": "alipay", "extData": "", "extInfo": "", "openId": "jpSeis_Pz8_Pz8_Hzs_NzszKzcuxusyR", "appId": "", "notifyUrl": "http://www.baidu.com", "sign": "test", "signType": "RSA", "version": "1.0"}
res = self.client.post('/pay',catch_response = True,json=payload)
if res.status_code ==200:
res_code = JMESPathExtractor().extract(query='code', body=res.text)
if res_code == 0 :
print('下单成功: {}'.format(data['orderNO']))
res.success()
else:
print('下单失败: {}'.format(data['orderNO']))
res.failure('fail')
print(res.text)
else:
print('请求失败 with {}'.format(data['orderNO']))
res.failure('error')
class WebsiteUser(HttpLocust):
"""docstring for WebsiteUser"""
task_set = WebsiteTasks
# host = "http://172.16.6.242:8066"
user_data_queue = queue.Queue()
for index in range(2000):
data = {
"orderNO": str(int(time.time()*1000))+"%06d" % index
}
user_data_queue.put_nowait(data)
min_wait = 1000
max_wait = 3000
if __name__ == '__main__':
import os
os.system("locust -f PayOrder.py --host=http://192.168.0.166:8099") |
import discord
from discord.ext import commands
from discord import FFmpegPCMAudio
from discord import Member
from discord.ext.commands import has_permissions, MissingPermissions
import json
import os
class Fun(commands.Cog):
def __init__(self, client):
self.client = client
#listen for words
@commands.Cog.listener()
async def on_message(self, message):
if message.author == self.client.user:
return
if ("meme" or "Meme" or "MEME") in message.content:
await message.channel.send("Did someone say meme?")
embed = discord.Embed(title="")
embed.set_image(url="https://c.tenor.com/jItxRf-KWosAAAAC/jeffrey-dean-morgan-smile.gif")
await message.channel.send(embed=embed)
if ("spiderman" or "peter-parker" or "peter parker" or "spider-man" or "spider man" or "tobey maguire" or "tobey" or "tobeymaguire") in message.content:
embed = discord.Embed(title="")
embed.set_image(url="https://c.tenor.com/P8VsDwHZreYAAAAd/tobey-maguire-spider-man.gif")
await message.channel.send(embed=embed)
if ("fuck") in message.content:
embed = discord.Embed(title="")
embed.set_image(url="https://c.tenor.com/Rt5-pEJHt04AAAAC/fuck-damn.gif")
await message.channel.send(embed=embed)
if ("game") in message.content:
embed = discord.Embed(title="")
embed.set_image(url="https://c.tenor.com/xtp2N6fg2SwAAAAC/stickergiant-game-time.gif")
await message.channel.send(embed=embed)
if ("sorry") in message.content:
embed = discord.Embed(title="")
embed.set_image(url="https://c.tenor.com/B1ooFF-GRUMAAAAd/no-apology-necessary-travis.gif")
await message.channel.send(embed=embed)
def setup(client):
client.add_cog(Fun(client))
|
import torch
import torch.nn as nn
from mmdet.core import bbox2result
from .. import builder
from ..registry import DETECTORS
from .base import BaseDetector
@DETECTORS.register_module
class SingleStageDetector(BaseDetector):
"""Base class for single-stage detectors.
Single-stage detectors directly and densely predict bounding boxes on the
output features of the backbone+neck.
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SingleStageDetector, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self.bbox_head = builder.build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
super(SingleStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.bbox_head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck
"""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmedetection/tools/get_flops.py`
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_export(self, img):
def reformat(feature, dim):
N = feature.shape[0]
return feature.permute(0, 2, 3, 1).contiguous().view(N, -1, dim)
x = self.extract_feat(img)
cls_scores, bbox_preds = self.bbox_head(x)
assert len(cls_scores) == len(bbox_preds)
cls_out_channels = self.bbox_head.cls_out_channels
if self.bbox_head.use_sigmoid_cls:
cls_scores = [cls_score.sigmoid() for cls_score in cls_scores]
else:
cls_scores = [cls_score.softmax(-1) for cls_score in cls_scores]
classification = torch.cat([reformat(feature, cls_out_channels) for feature in cls_scores], dim=1)
regression = torch.cat([reformat(feature, 4) for feature in bbox_preds], dim=1)
# import pdb;pdb.set_trace()
return classification, regression
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
x = self.extract_feat(img)
outs = self.bbox_head(x)
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg)
losses = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
def simple_test(self, img, img_meta, rescale=False):
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_inputs = outs + (img_meta, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs)
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results[0]
def aug_test(self, imgs, img_metas, rescale=False):
raise NotImplementedError
|
# Create a function that takes in two lists and creates a single dictionary where the first list contains keys and the second values. Assume the lists will be of equal length.
name = ["Anna", "Eli", "Pariece", "Brendan", "Amy", "Shane", "Oscar"]
favorite_animal = ["horse", "cat", "spider", "giraffe", "ticks", "dolphins", "llamas"]
print len(name)
print len(favorite_animal)
def make_dict(arr, arr2d2):
new_dict = {}
if len(arr2d2) > len(arr):
datZip = zip(arr2d2, arr)
new_dict = dict(datZip)
else:
datZip = zip(arr, arr2d2)
new_dict = dict(datZip)
return new_dict
print make_dict(name, favorite_animal)
|
# -*- coding: utf-8 -*-
# File: widerdata.py
import os
import cv2
import numpy as np
import json
MINSIZE = 9
class WiderData(object):
def __init__(self, basedir):
self.basedir = basedir
self.anndir = os.path.join(basedir,'wider_face_split')
def loadImgs(self,data='train',fromfile=False):
print('load WIDER data info')
labeldir = os.path.join(self.anndir,'wider_face_{}_bbx_gt.txt'.format(data))
baseimgdir = os.path.join(self.basedir,'WIDER_{}/images'.format(data))
labelfile = open(labeldir)
lines = labelfile.readlines()
lindex = 0
imgs = []
hwarray=[]
while(lindex<len(lines)):
img = {}
name = lines[lindex][:-1]
imgname = os.path.join(baseimgdir,name)
img['file_name'] = imgname
lindex += 1
objnum = int(lines[lindex][:-1])
lindex += 1
boxes = []
delnum = 0
for i in range(objnum):
labels = lines[lindex+i][:-1].split(' ')
box = [float(labels[0]), float(labels[1]),float(labels[2])+float(labels[0]),float(labels[3]) + float(labels[1])]
if float(labels[2])*float(labels[3])>MINSIZE:
boxes.append(box)
else:
delnum+=1
lindex += objnum
objnum = objnum - delnum
img['boxes'] = np.float32(np.asarray(boxes))
img['is_crowd'] = np.asarray([0]*objnum)
img['class'] = np.asarray([1]*objnum)
if fromfile:
hws = np.load('wider_wh_{}.npy'.format(data))
img['height'] = hws[len(imgs)][0]
img['width'] = hws[len(imgs)][1]
else:
imgdata = cv2.imread(imgname)
img['height'] = imgdata.shape[0]
img['width'] = imgdata.shape[1]
hwarray.append([imgdata.shape[0],imgdata.shape[1]])
imgs.append(img)
if fromfile==False:
np.save('wider_wh_{}.npy'.format(data),hwarray)
#res.write(str(hwdict))
#res.close()
return imgs
def loadImgsfromfile(self,data='train'):
labelfile = open('wider_label_{}.json'.format(data),'r')
return json.load(labelfile)
|
from odoo import fields, models
class ModelName(models.Model):
_name = 'crm.team.ept'
_description = 'CRM Lead.'
name = fields.Char(string="Name ", help="Enter Name")
team_leader_id = fields.Many2one(comodel_name='res.users',
string="Team Leader", help="Enter Team Leader.")
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 16:07:46 2019
@author: Antonio
"""
# compute bitgrowth L1 norm on kernels (\infty norm on activation is 0.75)
import numpy as np
import math
weights = np.load('./saved_model/tnn_weights.npy', allow_pickle=True)
kernelc1 = weights.item(0)
kernelc2 = weights.item(5)
for kernel in [kernelc1, kernelc2]:
x, y, w, z = kernel.shape
print(kernel.shape)
for l in range(z): #ofmaps
bg = 0
norm = 0
for k in range(w): #ifmaps
#accumulate norm
norm += np.linalg.norm(kernel[:,:,k,l], ord=1)
bg = np.ceil(math.log((0.75)*norm, 2))
#display bg for each output channel
print(bg) |
"""
url : https://leetcode.com/problems/valid-palindrome-ii/description/
Description : Given a non-empty string s, you may delete at most one character. Judge whether you can make it a palindrome.
Ex:
Input: "aba"
Output: True
Note:
# The string will only contain lowercase characters a-z. The maximum length of the string is 50000.
"""
class Solution:
def validPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
left_index = 0
right_index = len(s)-1
deletion_made = 0
while (left_index <= right_index and deletion_made <= 1):
if s[left_index] != s[right_index]:
left_index += 1
status = self.valid_palindrome_string(s, left_index, right_index)
if not status:
left_index -= 1 # back to original state
right_index -= 1
status = self.valid_palindrome_string(s, left_index, right_index)
if not status:
return False
else:
pass
left_index += 1
right_index -= 1
return False or deletion_made <= 1
def valid_palindrome_string(self, s, l, r):
while (l<=r):
if s[l] != s[r]:
return False
l += 1
r -= 1
return True
print Solution().validPalindrome('abcdbda')
print Solution().validPalindrome("cupucu")
|
"""
Zach Miller
Final project: Rock, Paper, Scissors
11/25/13
"""
import random
def main():
print('Rock Paper Scissors Game!')
print('Rock crushes Scissors!')
print('Scissors cuts Paper!')
print('Paper covers Rock!')
playAgain='y'
while (playAgain=='y' or playAgain=='Y'):
computerChoice=random.randint(1,3)
playerChoice=int(input('Enter 1 for rock, 2 for scissors, and 3 for paper: '))
print (playerChoice)
print (computerChoice)
if(playerChoice==computerChoice):
print('You made the same choice as the computer. starting over...')
if(playerChoice==1 and computerChoice==2):
print('Computer chose scissors')
print('Player chose Rock')
print('Player wins!')
if(playerChoice==2 and computerChoice==3):
print('Computer chose Paper')
print('Player chose Scissors')
print('Player wins!')
if(playerChoice==3 and computerChoice==1):
print('Computer chose Rock')
print('Player chose Paper')
print('Player wins!')
if(playerChoice==1 and computerChoice==3):
print('Computer chose Paper')
print('Player chose Rock')
print('Computer wins!')
if(playerChoice==2 and computerChoice==1):
print('Computer chose Rock')
print('Player chose Scissors')
print('Computer wins!')
if(playerChoice==3 and computerChoice==2):
print('Computer chose Scissors')
print('Player chose Paper')
print('Computer wins!')
if(playerChoice!=computerChoice):
playAgain=input('Do you want to continue? (y for yes, n for no)')
main()
|
import argparse
import ast
import math
import os
import sys
from datetime import datetime, timedelta
from time import sleep
from collections.abc import Callable
from typing import Any
import demisto_client
from demisto_client.demisto_api.rest import ApiException
from urllib3.exceptions import HTTPWarning, HTTPError
from Tests.Marketplace.configure_and_install_packs import search_and_install_packs_and_their_dependencies
from Tests.configure_and_test_integration_instances import CloudBuild, get_custom_user_agent
from Tests.scripts.utils import logging_wrapper as logging
from Tests.scripts.utils.log_util import install_logging
ALREADY_IN_PROGRESS = "create / update / delete operation is already in progress (10102)"
def generic_request_with_retries(client: demisto_client,
retries_message: str,
exception_message: str,
prior_message: str,
path: str,
method: str,
request_timeout: int | None = None,
accept: str = 'application/json',
attempts_count: int = 5,
sleep_interval: int = 60,
should_try_handler: Callable[[], bool] | None = None,
success_handler: Callable[[Any], Any] | None = None,
api_exception_handler: Callable[[ApiException], Any] | None = None,
http_exception_handler: Callable[[HTTPError | HTTPWarning], Any] | None = None):
"""
Args:
client: demisto client.
retries_message: message to print after failure when we have more attempts.
exception_message: message to print when we get and exception that is not API or HTTP exception.
prior_message: message to print when a new retry is made.
path: endpoint to send request to.
method: HTTP method to use.
request_timeout: request param.
accept: request param.
attempts_count: number of total attempts made.
sleep_interval: sleep interval between attempts.
should_try_handler: a method to determine if we should send the next request.
success_handler: a method to run in case of successful request (according to the response status).
api_exception_handler: a method to run in case of api exception.
http_exception_handler: a method to run in case of http exception
Returns: True if the request succeeded and status in case of waiting_for_process_to_end
"""
try:
for attempts_left in range(attempts_count - 1, -1, -1):
try:
if should_try_handler and not should_try_handler():
# if the method exist and we should not try again.
return True, None
# should_try_handler return True, we are trying to send request.
logging.info(f"{prior_message}, attempt: {attempts_count - attempts_left}/{attempts_count}")
response, status_code, headers = demisto_client.generic_request_func(client,
path=path,
method=method,
accept=accept,
_request_timeout=request_timeout)
if 200 <= status_code < 300 and status_code != 204:
if success_handler:
# We have a method to run as we were returned a success status code.
return success_handler(response)
# No handler, just return True.
return True, None
else:
err = f"Got {status_code=}, {headers=}, {response=}"
if not attempts_left:
# No attempts left, raise an exception that the request failed.
raise Exception(err)
logging.warning(err)
except ApiException as ex:
if api_exception_handler:
api_exception_handler(ex)
if not attempts_left: # exhausted all attempts, understand what happened and exit.
raise Exception(f"Got status {ex.status} from server, message: {ex.body}, headers: {ex.headers}") from ex
logging.debug(f"Process failed, got error {ex}")
except (HTTPError, HTTPWarning) as http_ex:
if http_exception_handler:
http_exception_handler(http_ex)
if not attempts_left: # exhausted all attempts, understand what happened and exit.
raise Exception("Failed to perform http request to the server") from http_ex
logging.debug(f"Process failed, got error {http_ex}")
# There are more attempts available, sleep and retry.
logging.debug(f"{retries_message}, sleeping for {sleep_interval} seconds.")
sleep(sleep_interval)
except Exception as e:
logging.exception(f'{exception_message}. Additional info: {str(e)}')
return False, None
def check_if_pack_still_installed(client: demisto_client,
pack_id: str,
attempts_count: int = 3,
sleep_interval: int = 30):
"""
Args:
client (demisto_client): The client to connect to.
attempts_count (int): The number of attempts to install the packs.
sleep_interval (int): The sleep interval, in seconds, between install attempts.
pack_id: pack id to check id still installed on the machine.
Returns:
True if the pack is still installed, False otherwise.
"""
def success_handler(response_data):
installed_packs = ast.literal_eval(response_data)
installed_packs_ids = [pack.get('id') for pack in installed_packs]
return pack_id in installed_packs_ids, None
return generic_request_with_retries(client=client,
retries_message="Failed to get all installed packs.",
exception_message="Failed to get installed packs.",
prior_message=f"Checking if pack {pack_id} is still installed",
path='/contentpacks/metadata/installed',
method='GET',
attempts_count=attempts_count,
sleep_interval=sleep_interval,
success_handler=success_handler)
def get_all_installed_packs(client: demisto_client, unremovable_packs: list):
"""
Args:
unremovable_packs: list of packs that can't be uninstalled.
client (demisto_client): The client to connect to.
Returns:
list of id's of the installed packs
"""
try:
logging.info("Attempting to fetch all installed packs.")
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/metadata/installed',
method='GET',
accept='application/json',
_request_timeout=None)
if 200 <= status_code < 300:
installed_packs = ast.literal_eval(response_data)
installed_packs_ids = [pack.get('id') for pack in installed_packs]
logging.success('Successfully fetched all installed packs.')
installed_packs_ids_str = ', '.join(installed_packs_ids)
logging.debug(
f'The following packs are currently installed from a previous build run:\n{installed_packs_ids_str}')
for pack in unremovable_packs:
if pack in installed_packs_ids:
installed_packs_ids.remove(pack)
return installed_packs_ids
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
raise Exception(f'Failed to fetch installed packs - with status code {status_code}\n{message}')
except Exception as e:
logging.exception(f'The request to fetch installed packs has failed. Additional info: {str(e)}')
return None
def uninstall_all_packs_one_by_one(client: demisto_client, hostname, unremovable_packs: list):
""" Lists all installed packs and uninstalling them.
Args:
client (demisto_client): The client to connect to.
hostname (str): cloud hostname
unremovable_packs: list of packs that can't be uninstalled.
Returns (bool):
A flag that indicates if the operation succeeded or not.
"""
packs_to_uninstall = get_all_installed_packs(client, unremovable_packs)
logging.info(f'Starting to search and uninstall packs in server: {hostname}, packs count to '
f'uninstall: {len(packs_to_uninstall)}')
uninstalled_count = 0
failed_to_uninstall = []
start_time = datetime.utcnow()
if packs_to_uninstall:
for i, pack_to_uninstall in enumerate(packs_to_uninstall, 1):
logging.info(f"{i}/{len(packs_to_uninstall)} - Attempting to uninstall a pack: {pack_to_uninstall}")
successful_uninstall, _ = uninstall_pack(client, pack_to_uninstall)
if successful_uninstall:
uninstalled_count += 1
else:
failed_to_uninstall.append(pack_to_uninstall)
end_time = datetime.utcnow()
logging.info(f"Finished uninstalling - Succeeded: {uninstalled_count} out of {len(packs_to_uninstall)}, "
f"Took:{end_time - start_time}")
if failed_to_uninstall:
logging.error(f"Failed to uninstall: {','.join(failed_to_uninstall)}")
return uninstalled_count == len(packs_to_uninstall)
def get_updating_status(client: demisto_client,
attempts_count: int = 5,
sleep_interval: int = 60,
) -> tuple[bool, bool | None]:
def success_handler(response):
updating_status = 'true' in str(response).lower()
logging.info(f"Got updating status: {updating_status}")
return True, updating_status
return generic_request_with_retries(client=client,
success_handler=success_handler,
retries_message="Failed to get installation/update status",
exception_message="The request to get update status has failed",
prior_message="Getting installation/update status",
path='/content/updating',
method='GET',
attempts_count=attempts_count,
sleep_interval=sleep_interval)
def wait_until_not_updating(client: demisto_client,
attempts_count: int = 2,
sleep_interval: int = 30,
maximum_time_to_wait: int = 600,
) -> bool:
"""
Args:
client (demisto_client): The client to connect to.
attempts_count (int): The number of attempts to install the packs.
sleep_interval (int): The sleep interval, in seconds, between install attempts.
maximum_time_to_wait (int): The maximum time to wait for the server to exit the updating mode, in seconds.
Returns:
Boolean - If the operation succeeded.
"""
end_time = datetime.utcnow() + timedelta(seconds=maximum_time_to_wait)
while datetime.utcnow() <= end_time:
success, updating_status = get_updating_status(client)
if success:
if not updating_status:
return True
logging.debug(f"Server is still installation/updating status, sleeping for {sleep_interval} seconds.")
sleep(sleep_interval)
else:
if attempts_count := attempts_count - 1:
logging.debug(f"failed to get installation/updating status, sleeping for {sleep_interval} seconds.")
sleep(sleep_interval)
else:
logging.info("Exiting after exhausting all attempts")
return False
logging.info(f"Exiting after exhausting the allowed time:{maximum_time_to_wait} seconds")
return False
def uninstall_pack(client: demisto_client,
pack_id: str,
attempts_count: int = 5,
sleep_interval: int = 60,
):
"""
Args:
client (demisto_client): The client to connect to.
pack_id: packs id to uninstall
attempts_count (int): The number of attempts to install the packs.
sleep_interval (int): The sleep interval, in seconds, between install attempts.
Returns:
Boolean - If the operation succeeded.
"""
def success_handler(_):
logging.success(f'Pack: {pack_id} was successfully uninstalled from the server')
return True, None
def should_try_handler():
"""
Returns: true if we should try and uninstall the pack - the pack is still installed
"""
still_installed, _ = check_if_pack_still_installed(client=client,
pack_id=pack_id)
return still_installed
def api_exception_handler(ex):
if ALREADY_IN_PROGRESS in ex.body:
wait_succeeded = wait_until_not_updating(client)
if not wait_succeeded:
raise Exception(
"Failed to wait for the server to exit installation/updating status"
) from ex
failure_massage = f'Failed to uninstall pack: {pack_id}'
return generic_request_with_retries(client=client,
retries_message=failure_massage,
exception_message=failure_massage,
prior_message=f'Uninstalling pack {pack_id}',
path=f'/contentpacks/installed/{pack_id}',
method='DELETE',
attempts_count=attempts_count,
sleep_interval=sleep_interval,
should_try_handler=should_try_handler,
success_handler=success_handler,
api_exception_handler=api_exception_handler)
def uninstall_packs(client: demisto_client, pack_ids: list):
"""
Args:
client (demisto_client): The client to connect to.
pack_ids: packs ids to uninstall
Returns:
True if uninstalling succeeded False otherwise.
"""
body = {"IDs": pack_ids}
try:
logging.info("Attempting to uninstall all installed packs.")
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/installed/delete',
method='POST',
body=body,
accept='application/json',
_request_timeout=None)
except Exception as e:
logging.exception(f'The request to uninstall packs has failed. Additional info: {str(e)}')
return False
return True
def uninstall_all_packs(client: demisto_client, hostname, unremovable_packs: list):
""" Lists all installed packs and uninstalling them.
Args:
unremovable_packs: list of packs that can't be uninstalled.
client (demisto_client): The client to connect to.
hostname (str): cloud hostname
Returns (list, bool):
A flag that indicates if the operation succeeded or not.
"""
logging.info(f'Starting to search and uninstall packs in server: {hostname}')
packs_to_uninstall: list = get_all_installed_packs(client, unremovable_packs)
if packs_to_uninstall:
return uninstall_packs(client, packs_to_uninstall)
logging.debug('Skipping packs uninstallation - nothing to uninstall')
return True
def reset_core_pack_version(client: demisto_client, unremovable_packs: list):
"""
Resets core pack version to prod version.
Args:
unremovable_packs: list of packs that can't be uninstalled.
client (demisto_client): The client to connect to.
"""
host = client.api_client.configuration.host.replace('https://api-', 'https://') # disable-secrets-detection
_, success = search_and_install_packs_and_their_dependencies(pack_ids=unremovable_packs,
client=client,
hostname=host,
multithreading=False,
production_bucket=True)
return success
def wait_for_uninstallation_to_complete(client: demisto_client, unremovable_packs: list):
"""
Query if there are still installed packs, as it might take time to complete.
Args:
unremovable_packs: list of packs that can't be uninstalled.
client (demisto_client): The client to connect to.
Returns: True if all packs were uninstalled successfully
"""
retry = 0
sleep_duration = 150
try:
installed_packs = get_all_installed_packs(client, unremovable_packs)
# Monitoring when uninstall packs don't work
installed_packs_amount_history, failed_uninstall_attempt_count = len(installed_packs), 0
# new calculation for num of retries
retries = math.ceil(len(installed_packs) / 2)
while len(installed_packs) > len(unremovable_packs):
if retry > retries:
raise Exception('Waiting time for packs to be uninstalled has passed, there are still installed '
'packs. Aborting.')
if failed_uninstall_attempt_count >= 3:
raise Exception(f'Uninstalling packs failed three times. {installed_packs=}')
logging.info(f'The process of uninstalling all packs is not over! There are still {len(installed_packs)} '
f'packs installed. Sleeping for {sleep_duration} seconds.')
sleep(sleep_duration)
installed_packs = get_all_installed_packs(client, unremovable_packs)
if len(installed_packs) == installed_packs_amount_history:
# did not uninstall any pack
failed_uninstall_attempt_count += 1
else: # uninstalled at least one pack
installed_packs_amount_history = len(installed_packs)
failed_uninstall_attempt_count = 0
retry += 1
except Exception as e:
logging.exception(f'Exception while waiting for the packs to be uninstalled. The error is {e}')
return False
return True
def sync_marketplace(client: demisto_client,
attempts_count: int = 5,
sleep_interval: int = 60,
sleep_time_after_sync: int = 120,
hard: bool = True,
):
"""
Send a request to sync marketplace.
Args:
hard(bool): Whether to perform a hard sync or not.
sleep_time_after_sync(int): The sleep interval, in seconds, after sync.
client (demisto_client): The client to connect to.
attempts_count (int): The number of attempts to install the packs.
sleep_interval (int): The sleep interval, in seconds, between install attempts.
Returns:
Boolean - If the operation succeeded.
"""
try:
logging.info("Attempting to sync marketplace.")
for attempt in range(attempts_count - 1, -1, -1):
try:
sync_marketplace_url = (
f'/contentpacks/marketplace/sync?hard={str(hard).lower()}'
)
logging.info(f"Sent request for sync, Attempt: {attempts_count - attempt}/{attempts_count}")
response, status_code, headers = demisto_client.generic_request_func(client,
path=sync_marketplace_url, method='POST')
if 200 <= status_code < 300 and status_code != 204:
logging.success(f'Sent request for sync successfully, sleeping for {sleep_time_after_sync} seconds.')
sleep(sleep_time_after_sync)
break
if not attempt:
raise Exception(f"Got bad status code: {status_code}, headers: {headers}")
logging.warning(f"Got bad status code: {status_code} from the server, headers: {headers}")
except ApiException as ex:
if ALREADY_IN_PROGRESS in ex.body:
wait_succeeded = wait_until_not_updating(client)
if not wait_succeeded:
raise Exception(
"Failed to wait for the server to exit installation/updating status"
) from ex
if not attempt: # exhausted all attempts, understand what happened and exit.
# Unknown exception reason, re-raise.
raise Exception(f"Got {ex.status} from server, message: {ex.body}, headers: {ex.headers}") from ex
logging.debug(f"Failed to sync marketplace, got error {ex}")
except (HTTPError, HTTPWarning) as http_ex:
if not attempt:
raise Exception("Failed to perform http request to the server") from http_ex
logging.debug(f"Failed to sync marketplace, got error {http_ex}")
# There are more attempts available, sleep and retry.
logging.debug(f"failed to sync marketplace, sleeping for {sleep_interval} seconds.")
sleep(sleep_interval)
return True
except Exception as e:
logging.exception(f'The request to sync marketplace has failed. Additional info: {str(e)}')
return False
def options_handler():
"""
Returns: options parsed from input arguments.
"""
parser = argparse.ArgumentParser(description='Utility for instantiating and testing integration instances')
parser.add_argument('--cloud_machine', help='cloud machine to use, if it is cloud build.')
parser.add_argument('--cloud_servers_path', help='Path to secret cloud server metadata file.')
parser.add_argument('--cloud_servers_api_keys', help='Path to the file with cloud Servers api keys.')
parser.add_argument('--unremovable_packs', help='List of packs that cant be removed.')
parser.add_argument('--one-by-one', help='Uninstall pack one pack at a time.', action='store_true')
parser.add_argument('--build-number', help='CI job number where the instances were created', required=True)
options = parser.parse_args()
return options
def main():
install_logging('cleanup_cloud_instance.log', logger=logging)
# In Cloud, We don't use demisto username
os.environ.pop('DEMISTO_USERNAME', None)
options = options_handler()
host = options.cloud_machine
logging.info(f'Starting cleanup for CLOUD server {host}')
api_key, _, base_url, xdr_auth_id = CloudBuild.get_cloud_configuration(options.cloud_machine,
options.cloud_servers_path,
options.cloud_servers_api_keys)
client = demisto_client.configure(base_url=base_url,
verify_ssl=False,
api_key=api_key,
auth_id=xdr_auth_id)
client.api_client.user_agent = get_custom_user_agent(options.build_number)
logging.debug(f'Setting user agent on client to: {client.api_client.user_agent}')
# We are syncing marketplace since we are copying production bucket to build bucket and if packs were configured
# in earlier builds they will appear in the bucket as it is cached.
success = sync_marketplace(client=client)
unremovable_packs = options.unremovable_packs.split(',')
success &= reset_core_pack_version(client, unremovable_packs)
if success:
if options.one_by_one:
success = uninstall_all_packs_one_by_one(client, host, unremovable_packs)
else:
success = uninstall_all_packs(client, host, unremovable_packs) and \
wait_for_uninstallation_to_complete(client, unremovable_packs)
success &= sync_marketplace(client=client)
if not success:
sys.exit(2)
logging.info('Uninstalling packs done.')
if __name__ == '__main__':
main()
|
import os, sys, random, string
from source import simplefigloader
from source import randomloader
from source import mathloader
from time import sleep
global figversion
figversion = "1.1.0"
global simplefig
simplefig = 0
global randomimport
randomimport = 0
global mathimport
mathimport = 0
global inIf
inIf = False
variables = {
}
def addvar(name,value):
variables[name] = value
def line(line):
global randomimport
global simplefig
global mathimport
global inIf
if "//" in line:
None
elif "from system" in line:
if "import simplefig" in line:
if simplefig == 0:
print(f"Import SimpleFig version {simplefigloader.sfv} from Fig System")
simpfig = 1
simplefig = simpfig
else:
print("Error: SimpleFig Already imported")
elif 'import random' in line:
if randomimport == 0:
print(f"Imported Random version {randomloader.rv} from Fig System")
randomimport = 1
else:
print(f"Error: Random Already imported")
elif "import math" in line:
if mathimport == 0:
print(f"Imported Math version {mathloader.mv} from Fig System")
mathimport = 1
else:
print("Error: Math Already imported")
elif "import" in line:
if "simplefig" in line:
simpfig = simplefig
if simpfig == 0:
simplefigloader.load()
simpfig = 1
simplefig = simpfig
else:
print("Error: SimpleFig is already imported")
elif "random" in line:
imprand = randomimport
if imprand == 0:
randomloader.load()
randomimport = 1
else:
print("Error: Random is already imported")
elif "math" in line:
impmath = mathimport
if impmath == 0:
mathloader.load()
mathimport = 1
else:
print("Error: Random is already imported")
elif "sys.out" in line:
linefinal = line.split("'")
if ";" in linefinal[2]:
print(linefinal[1])
else:
print("error. ; (semicolon) missing.")
exit()
elif "out" in line:
if simplefig == 1:
linefinal = line.split("'")
if ";" in linefinal[2]:
print(linefinal[1])
else:
print("error. ; (semicolon) missing.")
exit()
else:
print("Unknown function 'out'")
elif "sys.input" in line:
linefinal = line.split("'")
if ";" in linefinal[2]:
var = input(linefinal[1] + "\n")
print(var)
else:
print("error. ; (semicolon) missing.")
elif "input" in line:
if simplefig == 1:
linefinal = line.split("'")
if ";" in linefinal[2]:
var = input(linefinal[1] + "\n")
print(var)
else:
print("error. ; (semicolon) missing.")
else:
print("Unknown function 'in'")
elif "sys.wait" in line:
linefinal = line.split("(")
if ";" in linefinal[1]:
lineerfinal = linefinal[1].split(")")
sleep(int(lineerfinal[0]))
else:
print("Error. ; (semicolon) missing")
elif "wait" in line:
if simplefig == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
lineerfinal = linefinal[1].split(")")
sleep(int(lineerfinal[0]))
else:
print("Error. ; (semicolon) missing")
else:
print("unrecognized function 'wait'")
elif "sys.setvar" in line:
linefinal = line.split("(")
lineefinal = linefinal[1].split(",")
lineerfinal = lineefinal[1].split(")")
linerfinal = [lineerfinal,lineefinal[0]]
if ";" in linerfinal[0][1]:
addvar(str(linerfinal[1]),str(linerfinal[0][0]))
print("Set variable",'"'+str(linerfinal[1])+'"',"to",variables[str(linerfinal[1])])
else:
print("Error: missing ; (semicolon)")
elif "setvar" in line:
if simplefig == 1:
linefinal = line.split("(")
lineefinal = linefinal[1].split(",")
lineerfinal = lineefinal[1].split(")")
linerfinal = [lineerfinal,lineefinal[0]]
if ";" in linerfinal[0][1]:
addvar(str(linerfinal[1]),str(linerfinal[0][0]))
print("Set variable",'"'+str(linerfinal[1])+'"',"to",variables[str(linerfinal[1])])
else:
print("Error: missing ; (semicolon)")
else:
print("Unknown function 'setvar'")
elif "sys.getvar" in line:
linefinal = line.split("(")
lineefinal = linefinal[1].split(")")
if ";" in lineefinal[1]:
print(variables[lineefinal[0]])
else:
print("Error: missing ; (semicolon)")
elif "getvar" in line:
if simplefig == 1:
linefinal = line.split("(")
lineefinal = linefinal[1].split(")")
if ";" in lineefinal[1]:
print(variables[lineefinal[0]])
else:
print("Error: missing ; (semicolon)")
else:
print("Unknown function 'getvar'")
elif "sys.vars" in line:
if ";" in line:
print("Defined Variables:")
for i in variables:
print(str(i)+": "+str(variables[i]))
print("")
else:
print("Error: missing ';' (semicolon)")
elif "vars" in line:
if simplefig == 1:
if ";" in line:
print("Defined Variables:")
for i in variables:
print(str(i)+": "+str(variables[i]))
print("")
else:
print("Error: missing ';' (semicolon)")
else:
print("Unknown function 'vars'")
elif "random.int" in line:
if randomimport == 1:
if ";" in line:
randomint = random.randint(0,2147483647)
print(randomint)
else:
print("Unknown function 'random.int'")
elif "int" in line:
if randomimport == 1:
if simplefig == 1:
if ";" in line:
print(random.randint(0,2147483647))
else:
print("Unknown function 'int'")
else:
print("Unknown function 'random.int'")
elif "random.str" in line:
if randomimport == 1:
if ";" in line:
print(''.join(random.choice(string.ascii_lowercase) for i in range(12)))
else:
print("Unknown function 'random.str'")
elif "str" in line:
if randomimport == 1:
if simplefig == 1:
if ";" in line:
print(''.join(random.choice(string.ascii_lowercase) for i in range(12)))
else:
print("Unknown function 'str'")
elif "math.add" in line:
if mathimport == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
linerfinal = linefinal[1].split(")")
lineerfinal = linerfinal[0].split(",")
print(int(lineerfinal[0])+int(lineerfinal[1]))
else:
print("Unknown function 'math.add'")
elif "math.sub" in line:
if mathimport == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
linerfinal = linefinal[1].split(")")
lineerfinal = linerfinal[0].split(",")
print(int(lineerfinal[0])-int(lineerfinal[1]))
else:
print("Unknown function 'math.sub'")
elif "math.div" in line:
if mathimport == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
linerfinal = linefinal[1].split(")")
lineerfinal = linerfinal[0].split(",")
print(int(lineerfinal[0])/int(lineerfinal[1]))
else:
print("Unknown function 'math.div'")
elif "math.mult" in line:
if mathimport == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
linerfinal = linefinal[1].split(")")
lineerfinal = linerfinal[0].split(",")
print(int(lineerfinal[0])*int(lineerfinal[1]))
else:
print("Unknown function 'math.mult'")
elif "mult" in line:
if simplefig == 1:
if mathimport == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
linerfinal = linefinal[1].split(")")
lineerfinal = linerfinal[0].split(",")
print(int(lineerfinal[0])*int(lineerfinal[1]))
else:
print("Unknown function 'mult'")
else:
print("Unknown function 'mult'")
elif "div" in line:
if simplefig == 1:
if mathimport == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
linerfinal = linefinal[1].split(")")
lineerfinal = linerfinal[0].split(",")
print(int(lineerfinal[0])/int(lineerfinal[1]))
else:
print("Unknown function 'div'")
else:
print("Unknown function 'div'")
elif "add" in line:
if simplefig == 1:
if mathimport == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
linerfinal = linefinal[1].split(")")
lineerfinal = linerfinal[0].split(",")
print(int(lineerfinal[0])+int(lineerfinal[1]))
else:
print("Unknown function 'add'")
else:
print("Unknown function 'add'")
elif "sub" in line:
if simplefig == 1:
if mathimport == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
linerfinal = linefinal[1].split(")")
lineerfinal = linerfinal[0].split(",")
print(int(lineerfinal[0])+int(lineerfinal[1]))
else:
print("Unknown function 'sub'")
else:
print("Unknown function 'sub'")
elif "math.pow" in line:
if mathimport == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
linerfinal = linefinal[1].split(")")
lineerfinal = linerfinal[0].split(",")
print(int(lineerfinal[0])**int(lineerfinal[1]))
else:
print("Unknown function 'math.pow'")
elif "pow" in line:
if simplefig == 1:
if mathimport == 1:
linefinal = line.split("(")
if ";" in linefinal[1]:
linerfinal = linefinal[1].split(")")
lineerfinal = linerfinal[0].split(",")
print(int(lineerfinal[0])**int(lineerfinal[1]))
else:
print("Unknown function 'pow'")
else:
print("Unknown function 'pow'")
class compile:
file = open("main.fig","r").read()
file = file.split("\n")
for i in range(len(file)):
if(file[i] != ""):
line(file[i]) |
# keep GUI config options
config.load_autoconfig()
c.url.searchengines = {
"DEFAULT": "https://www.google.com/search?q={}",
'red': 'https://reddit.com/r/{}',
'tw': 'https://twitter.com/{}',
'aw': 'https://wiki.archlinux.org/index.php?title=Special%3ASearch&search={}',
'yt': 'https://www.youtube.com/results?search_query={}',
# 'ytv': 'https://youtube.com/search?q={}',
}
|
#1412성준호
n=int(input())
k=input()
dummy=[]
anslist=[]
for i in range(n):
c=[]
ans=[]
b=97
a=list(input())
while len(a)%len(k)!=0:
a.append(chr(b))
b+=1
d=0
while len(a)!=0:
c.append(list(a[:len(k)]))
for j in range(len(k)):
if len(a)!=0:
dummy.append(a.pop(0))
dummy.clear()
else:
break
for j in range(len(c)):
for l in range(len(k)):
ans.append(c[j][int(k[l])-1])
ansint='#%d '%(i+1)
for j in range(len(ans)):
ansint+=ans[j]
anslist.append(ansint)
for i in range(len(anslist)):
print(anslist[i])
|
import sys
sys.path.insert(0,'/hiddenx/Toolkit/toolkitlogs')
import logger
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from pydrive.files import *
from googleapiclient.errors import HttpError
from collections import defaultdict
from pprint import pprint
import json
logger = logger.get_logger(__name__)
logger.info("Configuring Paths.")
config_path = "Google_Drive/config.json"
def load_mimetype_json() :
with open(config_path,'r') as in_file:
data = json.load(in_file)
MimeType = data['mimeType']
return MimeType
# MimeType = {"application/vnd.google-apps.folder":"folder","image/jpeg":"image",
# "application/vnd.google-apps.spreadsheet":"spreadsheet","text/plain":"text","application/pdf":"pdf"}
def exceptionWrapper(function):
try :
function()
except Exception as e:
logger.error(f"{e} - Error")
def authenticate():
"""
Authenticate to Google API
"""
logger.info("Authenticating Google Drive API.")
gauth = GoogleAuth()
# Try to load saved client credentials
logger.info("Checking for credential file.")
gauth.LoadCredentialsFile("mycreds_googleDrive.txt")
if gauth.credentials is None:
# Authenticate if they're not there
logger.info("Authenticating using Local Web Server.")
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
# Refresh them if expired
logger.info("Refreshing Auth Token.")
gauth.Refresh()
else:
# Initialize the saved creds
logger.info("Authorizing Saved credentials.")
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("mycreds_googleDrive.txt")
logger.info("Authorization Complete.")
return GoogleDrive(gauth)
def get_folders(drive, parent_folder_id = "root"):
# Auto-iterate through all files in the parent folder.
file_list = GoogleDriveFileList()
try:
logger.info(f"Searching Drive for {parent_folder_id}")
file_list = drive.ListFile(
{'q': "'{0}' in parents and trashed=false".format(parent_folder_id)}
).GetList()
logger.info(f"{file_list}")
# Exit if the parent folder doesn't exist
except googleapiclient.errors.HttpError as err:
# Parse error message
message = ast.literal_eval(err.content)['error']['message']
if message == 'File not found: ':
print(message)
logger.info(message + "not found in drive.")
return
# Exit with stacktrace in case of other error
else:
logger.info(f"Exiting : {message}")
raise
return file_list
def display_folder(file_list) :
if file_list is None :
print("No Folders in drive", sep=' ', end='n', file=sys.stdout, flush=False)
else :
count = 1
for file1 in file_list:
logger.debug(f"Title : {file1['title']} , ID : {file1['id']} , mimeType : {file1['mimeType']}")
MimeType = load_mimetype_json()
mimetype = MimeType.get(file1['mimeType'], "Unknown")
if mimetype == "Unknown" :
unknown_mimetype(file1['mimeType']) #Sends mimetype.
print(f"{count}.{file1['title']} - {mimetype}",end='\n')
count += 1
def unknown_mimetype(mimeType):
# We will write a JSON file for the mimetype checks.
try :
with open(config_path,'r') as in_file:
data = json.load(in_file)
d1 = {mimeType : mimeType}
data['mimeType'].update(d1)
with open(config_path, mode='w') as out_file:
out_file.write(json.dumps(data,indent=4,sort_keys=True))
except IOError as e :
logger.error(f"File does not exist. {e.errno}{e.strerror}")
except :
raise
def remove_all_duplicateFiles():
# if file1['mimeType'] != "application/vnd.google-apps.folder" :
# print(f"{file1['md5Checksum']} - {file1['title']}")
# else :
# and store those duplicate files list of what u deleted and from where it was deleted.
pass
def get_folder_id(drive, parent_folder_id, parent_folder_name):
"""
Check if destination folder exists and return it's ID
"""
# Auto-iterate through all files in the parent folder.
file_list = GoogleDriveFileList()
try:
file_list = drive.ListFile(
{'q': "'{0}' in parents and trashed=false".format(parent_folder_id)}
).GetList()
# Exit if the parent folder doesn't exist
except googleapiclient.errors.HttpError as err:
# Parse error message
message = ast.literal_eval(err.content)['error']['message']
if message == 'File not found: ':
print(message + parent_folder_name)
logger.info(message + parent_folder_name + "not found in drive.")
exit(1)
# Exit with stacktrace in case of other error
else:
raise
# Find the the destination folder in the parent folder's files
for file1 in file_list:
if file1['title'] == parent_folder_name:
print('title: %s, id: %s' % (file1['title'], file1['id']))
logger.info('title: %s, id: %s' % (file1['title'], file1['id']))
return file1['id']
def create_folder(drive, folder_name, parent_folder_id):
"""
Create folder on Google Drive
"""
folder_metadata ={'title': folder_name,
# Define the file type as folder
'mimeType': 'application/vnd.google-apps.folder',
# ID of the parent folder
'parents': [{"kind": "drive#fileLink", "id": parent_folder_id}]}
folder = drive.CreateFile(folder_metadata)
folder.Upload()
# Return folder informations
print('title: %s, id: %s' % (folder['title'], folder['id']))
return folder['id']
def upload_files(drive, folder_id, src_folder_name):
"""
Upload files in the local folder to Google Drive
"""
# Enter the source folder
try:
chdir(src_folder_name)
# Print error if source folder doesn't exist
except OSError:
print(src_folder_name + 'is missing')
logger.error("Source folder doesn't exist.")
# Auto-iterate through all files in the folder.
for file1 in listdir('.'):
# Check the file's size
statinfo = stat(file1)
if statinfo.st_size > 0:
print('Uploading ' + file1)
# Upload file to folder.
f = drive.CreateFile(
{"parents": [{"kind": "drive#fileLink", "id": folder_id}]})
f.SetContentFile(file1)
f.Upload()
# Skip the file if it's empty
else:
print('File {0} is empty'.format(file1))
logger.info('File {0} is empty'.format(file1))
def display_files_and_folders():
return display_folder(get_folders(drive))
drive = authenticate()
|
from protean.core.exceptions import ObjectNotFoundError
from protean.globals import current_domain
from realworld.domain import domain
from realworld.model.user import User
@domain.repository(aggregate_cls=User)
class UserRepository:
@classmethod
def get_by_email(cls, email: str) -> User:
user_dao = current_domain.get_dao(User)
try:
return user_dao.find_by(email=email)
except ObjectNotFoundError:
return None
@classmethod
def get_by_username(cls, username: str) -> User:
user_dao = current_domain.get_dao(User)
try:
return user_dao.find_by(username=username)
except ObjectNotFoundError:
return None
@classmethod
def get_by_token(cls, token: str) -> User:
# FIXME Should return None if token has expired
user_dao = current_domain.get_dao(User)
try:
user = user_dao.find_by(token=token)
return user
except ObjectNotFoundError:
return None
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 17 15:26:33 2018
@author: LENOVO
"""
from tkinter.filedialog import askopenfilename
from tkinter import *
import tkinter.messagebox
from PIL import Image
def click():
im1=im.transpose(Image.FLIP_TOP_BOTTOM)
im1.show()
def save1():
y=[]
y=(filename).split(".")
im1.save(y[0]+"_flip_top."+y[1])
tkinter.messagebox.showinfo("Saved"," Saved as "+y[0]+"_flip_top."+y[1])
Button18 = Button(window,text="Save",command=save1,font="Times 12").grid(row=7,column=1,sticky=E)
def click1():
im1=im.transpose(Image.FLIP_LEFT_RIGHT)
def save2():
y=[]
y=(filename).split(".")
im1.save(y[0]+"_flip_left."+y[1])
tkinter.messagebox.showinfo("Saved"," Saved as "+y[0]+"_flip_left."+y[1])
im1.show()
Button12 = Button(window,text="Save",font="Times 12",command=save2).grid(row=8,column=1,sticky=E)
def click3():
im1=im.transpose(Image.TRANSPOSE)
def save3():
y=[]
y=(filename).split(".")
im1.save(y[0]+"_transpose."+y[1])
tkinter.messagebox.showinfo("Saved"," Saved as "+y[0]+"_transpose."+y[1])
im1.show()
Button13 = Button(window,text="Save",font="Times 12",command=save3).grid(row=6,column=1,sticky=E)
def click4():
im1=im.transpose(Image.ROTATE_90)
def save4():
y=[]
y=(filename).split(".")
im1.save(y[0]+"_rotate_90."+y[1])
tkinter.messagebox.showinfo("Saved"," Saved as "+y[0]+"_rotate_90."+y[1])
im1.show()
Button14 = Button(window,text="Save",font="Times 12",command=save4).grid(row=9,column=1,sticky=E)
def click5():
im1=im.transpose(Image.ROTATE_180)
def save5():
y=[]
y=(filename).split(".")
im1.save(y[0]+"_rotate_180."+y[1])
tkinter.messagebox.showinfo("Saved"," Saved as "+y[0]+"_rotate_180."+y[1])
im1.show()
Button15 = Button(window,text="Save",font="Times 12",command=save5).grid(row=10,column=1,sticky=E)
def click6():
im1=im.transpose(Image.ROTATE_270)
def save6():
y=[]
y=(filename).split(".")
im1.save(y[0]+"_rotate_270."+y[1])
tkinter.messagebox.showinfo("Saved"," Saved as "+y[0]+"_rotate_270."+y[1])
im1.show()
Button16 = Button(window,text="Save",font="Times 12",command=save6).grid(row=11,column=1,sticky=E)
def path1():
if((entry1.get()) =="" or (entry2.get()) == ""):
tkinter.messagebox.showerror("Error",message="Please enter the values")
return
if((entry1.get()).isalpha() or (entry2.get()).isalpha()):
tkinter.messagebox.showerror("Error",message="Please enter the numeric values")
return
if(int(entry1.get()) > 2000 or int(entry2.get()) > 2000):
tkinter.messagebox.showerror("Error",message="Image size out of range !\nMaximum size for both width and height is 2000 pixels")
return
if(int(entry1.get()) < 0 or int(entry2.get()) < 0):
tkinter.messagebox.showerror("Error",message="Image size out of range !\nMinimum size for both width and height is 1 pixels")
return
im1=im.resize((int(entry1.get()),int(entry2.get())))
def save7():
y=[]
y=(filename).split(".")
im1.save(y[0]+"_size_"+entry1.get()+"x"+entry2.get()+"."+y[1])
tkinter.messagebox.showinfo("Saved"," Saved as "+y[0]+"_size_"+entry1.get()+"x"+entry2.get()+"."+y[1])
im1.show()
Button17 = Button(window,text="Save",font="Times 12",command=save7).grid(row=15,column=1,sticky=NE)
def click7():
label3 = Label(window,text="Enter the size in pixels( Width * Height )",font="Times 12").grid(row=13,column=1,columnspan=(3),sticky=W)
global entry1,entry2
entry1=Entry(window,width=10)
entry1.grid(row=14,column=1,sticky=NW)
label12=Label(window,text="X",width=6).grid(row=14,column=1,sticky=N)
entry2=Entry(window,width=10)
entry2.grid(row=14,column=1,sticky=NE)
Button11 = Button(window,text="Done",command=path1,font="Times 12").grid(row=15,column=1,sticky=NW)
def loadtemplate():
global filename
filename = tkinter.filedialog.askopenfilename(filetypes = (("Image files", "*.jpg;*.png;*.bmp;*.gif"),("All files", "*")))
entry=Label(window,text=filename,width=65,font="Times 12")
entry.grid(row=1,column=2)
def path():
global im
answer = tkinter.messagebox.askquestion("Alert",message=" Is this the required path : - \n"+filename)
if(answer=='no'):
return;
im=Image.open(filename)
img=im.resize((800,500),Image.ANTIALIAS)
img.save("sample.png")
photo= PhotoImage(file = "sample.png")
label =Label(window,image=photo,height=500,width=800)
label.image=photo
label.grid(row=6,rowspan=(9),column=2,columnspan=3,pady=30)
label3 = Label(window,text="The Various operations that can be performed on the Image are : ",font="Times 12").grid(row=4,columnspan=(3),sticky=W,pady=20)
Button2 = Button(window,text="1.Transpose",font="Times 12",command=click3).grid(row=6,column=1,sticky=W)
Button3 = Button(window,text="2.Flip(Top-Bottom)",font="Times 12",command=click).grid(row=7,column=1,sticky=W)
Button4 = Button(window,text="3.Flip(Left-Right)",font="Times 12",command=click1).grid(row=8,column=1,sticky=W)
Button5 = Button(window,text="4.Rotate 90 Degree",font="Times 12",command=click4).grid(row=9,column=1,sticky=W)
Button6 = Button(window,text="5.Rotate 180 Degree",font="Times 12",command=click5).grid(row=10,column=1,sticky=W)
Button7 = Button(window,text="6.Rotate 270 Degree",font="Times 12",command=click6).grid(row=11,column=1,sticky=W)
Button8 = Button(window,text="7.Resize",font="Times 12",command=click7).grid(row=12,column=1,sticky=W)
Button9 = Button(window,text="Quit",font="Times 12",command=window.destroy).grid(row=15,column=4,sticky=SE)
window=Tk()
window.title("Image Processing")
window.geometry("1980x1080")
image=PhotoImage(file="Theme.png")
labeli=Label(image=image)
labeli.image=image
labeli.place(x=0,y=0,relwidth=1.0,relheight=1.0,anchor=NW)
label1 = Label(window,text=" Welcome to KLE TECH Image processing Page ",fg="yellow",bg="darkblue",font="Times 18 bold",anchor=CENTER).grid(row=0,columnspan=(6),sticky=N)
label2 = Label(window,text="Enter the image location",font="Times 14").grid(row=1,columnspan=(2),pady=20,sticky=W)
Label10=Label(window,width=65,text="Please Browse for the location of Image",font="Times 12").grid(row=1,column=2)
Button10 = Button(window,text="Done",command=path,font="Times 12").grid(row=1,column=5)
Button(window, text = "Browse", command = loadtemplate,font="Times 12").grid(row=1,column=4,sticky=E)
window.mainloop() |
from django.shortcuts import render, get_object_or_404
# Create your views here.
def post_list_view(request):
return render(request, './blog/post/home.html') |
#!/usr/bin/env python
import onmt
import onmt.markdown
import argparse
import torch
import subprocess
import time, datetime
from onmt.data.binarizer import Binarizer
from onmt.data.binarizer import SpeechBinarizer
from onmt.data.indexed_dataset import IndexedDatasetBuilder
import numpy as np
import warnings
import os
from os.path import dirname, abspath
import gc
warnings.filterwarnings("ignore", category=UserWarning)
parser = argparse.ArgumentParser(description='preprocess.py')
onmt.markdown.add_md_help_argument(parser)
# **Preprocess Options**
parser.add_argument('-multi_dataset', action='store_true',
help="Save each dataset separately instead of one joined dataset")
parser.add_argument('-multi_mirror', action='store_true',
help="Save each dataset separately instead of one joined dataset")
parser.add_argument('-resume', action='store_true',
help="If the dataset is created, ignored and create the next one")
parser.add_argument('-config', help="Read options from this file")
parser.add_argument('-src_type', default="text",
help="Type of the source input. Options are [text|img|audio].")
parser.add_argument('-sort_type', default="ascending",
help="Type of sorting. Options are [ascending|descending].")
parser.add_argument('-src_img_dir', default=".",
help="Location of source images")
parser.add_argument('-stride', type=int, default=1,
help="Stride on input features")
parser.add_argument('-concat', type=int, default=1,
help="Concate sequential audio features to decrease sequence length")
parser.add_argument('-previous_context', type=int, default=0,
help="Number of previous sentence for context")
parser.add_argument('-input_type', default="word",
help="Input type: word/char")
parser.add_argument('-data_type', default="int64",
help="Input type for storing text (int64|int32|int|int16) to reduce memory load")
parser.add_argument('-format', default="raw",
help="Save data format: binary or raw. Binary should be used to load faster")
parser.add_argument('-external_tokenizer', default="",
help="External tokenizer from Huggingface. Currently supports barts.")
parser.add_argument('-train_src', required=True,
help="Path to the training source data")
parser.add_argument('-past_train_src', default="",
help="Path to the training source data")
parser.add_argument('-future_train_src', default="",
help="Path to the training source data")
parser.add_argument('-train_tgt', required=True,
help="Path to the training target data")
parser.add_argument('-aux_train_tgt', default="",
help="Path to the training source data")
parser.add_argument('-valid_src', required=True,
help="Path to the validation source data")
parser.add_argument('-past_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-future_valid_src', default="",
help="Path to the validation source data")
parser.add_argument('-valid_tgt', required=True,
help="Path to the validation target data")
parser.add_argument('-aux_valid_tgt', default="",
help="Path to the training source data")
parser.add_argument('-train_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-train_src_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-train_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-train_tgt_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-valid_src_lang', default="src",
help="Language(s) of the source sequences.")
parser.add_argument('-valid_src_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-valid_tgt_lang', default="tgt",
help="Language(s) of the target sequences.")
parser.add_argument('-valid_tgt_atbs', default="",
help="Attributes(s) of the source sequences.")
parser.add_argument('-save_data', required=True,
help="Output file for the prepared data")
parser.add_argument('-src_vocab_size', type=int, default=9999999,
help="Size of the source vocabulary")
parser.add_argument('-tgt_vocab_size', type=int, default=9999999,
help="Size of the target vocabulary")
parser.add_argument('-src_vocab',
help="Path to an existing source vocabulary")
parser.add_argument('-tgt_vocab',
help="Path to an existing target vocabulary")
parser.add_argument('-load_dict',
help="Path to an existing target vocabulary")
parser.add_argument('-src_seq_length', type=int, default=10000,
help="Maximum source sequence length")
parser.add_argument('-src_seq_length_trunc', type=int, default=0,
help="Truncate source sequence length.")
parser.add_argument('-tgt_seq_length', type=int, default=10000,
help="Maximum target sequence length to keep.")
parser.add_argument('-tgt_seq_length_trunc', type=int, default=0,
help="Truncate target sequence length.")
# tokens
parser.add_argument('-src_bos_token', type=str, default="<s>",
help='SRC BOS Token Default is <s>.')
parser.add_argument('-src_eos_token', type=str, default="</s>",
help='SRC BOS Token. Default is </s>.')
parser.add_argument('-src_unk_token', type=str, default="<unk>",
help='SRC Unk Token. Default is <unk>.')
parser.add_argument('-src_pad_token', type=str, default="<blank>",
help='SRC PAD Token. Default is <blank>.')
parser.add_argument('-tgt_bos_token', type=str, default="<s>",
help='TGT BOS Token Default is <s>.')
parser.add_argument('-tgt_eos_token', type=str, default="</s>",
help='TGT BOS Token. Default is </s>.')
parser.add_argument('-tgt_unk_token', type=str, default="<unk>",
help='TGT Unk Token. Default is <unk>.')
parser.add_argument('-tgt_pad_token', type=str, default="<blank>",
help='TGT PAD Token. Default is <blank>.')
parser.add_argument('-shuffle', type=int, default=1,
help="Shuffle data")
parser.add_argument('-asr', action='store_true',
help="prepare data for asr task")
parser.add_argument('-asr_format', default="h5",
help="Format of asr data h5 or scp")
parser.add_argument('-lm', action='store_true',
help="prepare data for LM task")
parser.add_argument('-fp16', action='store_true',
help="store ASR data in fp16")
parser.add_argument('-seed', type=int, default=3435,
help="Random seed")
parser.add_argument('-lower', action='store_true', help='lowercase data')
parser.add_argument('-load_bpe_voc', action='store_true', help='lowercase data')
parser.add_argument('-no_bos', action='store_true', help='not adding bos word (this is done manually in the data)')
parser.add_argument('-sort_by_target', action='store_true', help='lowercase data')
parser.add_argument('-join_vocab', action='store_true', help='Using one dictionary for both source and target')
parser.add_argument('-report_every', type=int, default=100000,
help="Report status every this many sentences")
parser.add_argument('-reshape_speech', type=int, default=1,
help="Reshaping the speech segments here. Mostly for compatibility..")
parser.add_argument('-num_threads', type=int, default=1,
help="Number of threads for multiprocessing")
parser.add_argument('-verbose', action='store_true',
help="Print out information during preprocessing")
opt = parser.parse_args()
torch.manual_seed(opt.seed)
def make_vocab(name, filenames, size, tokenizer, num_workers=1):
if name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
elif name == "target":
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token,
opt.tgt_bos_token, opt.tgt_eos_token],
lower=opt.lower)
else:
print("Warning: check the name")
exit(-1)
for filename in filenames:
print("Generating vocabulary from file %s ... " % filename)
onmt.Dict.gen_dict_from_file(filename, vocab, tokenizer, num_workers=num_workers)
original_size = vocab.size()
vocab = vocab.prune(size)
print('Created dictionary of size %d (pruned from %d)' %
(vocab.size(), original_size))
return vocab
def init_vocab(name, data_files, vocab_file, vocab_size, tokenizer, num_workers=1):
vocab = None
if vocab_file is not None:
# If given, load existing word dictionary.
print('Reading ' + name + ' vocabulary from \'' + vocab_file + '\'...')
if not opt.load_bpe_voc:
vocab = onmt.Dict()
else:
if name == "target":
vocab = onmt.Dict([opt.tgt_pad_token, opt.tgt_unk_token,
opt.tgt_bos_token, opt.tgt_eos_token],
lower=opt.lower)
elif name == "source":
vocab = onmt.Dict([opt.src_pad_token, opt.src_unk_token,
opt.src_bos_token, opt.src_eos_token],
lower=opt.lower)
else:
print("Warning: name should be source or target")
exit(-1)
vocab.loadFile(vocab_file)
print('Loaded ' + str(vocab.size()) + ' ' + name + ' words')
if vocab is None:
print('Building ' + name + ' vocabulary...')
gen_word_vocab = make_vocab(name, data_files, vocab_size, tokenizer, num_workers=num_workers, )
vocab = gen_word_vocab
print()
return vocab
def save_vocabulary(name, vocab, file):
print('Saving ' + name + ' vocabulary to \'' + file + '\'...')
vocab.writeFile(file)
def save_dataset(path, data, format, dicts, src_type):
# Each dataset is comprised of the following components:
# src: tensors for the source vectors, or the scp_path (in ASR case)
# tgt: tensors for the target vectors
# src_lang: tensors for the source language ids (simplified)
# tgt_lang: tensors for the target language ids (simplified)
# convert all datasets to pytorch tensors and save to .pt
if format in ['raw', 'bin']:
print('Saving data to ' + os.path.join(path, 'data.pt') + '...')
save_data = {'type': opt.src_type ,
'data': data}
torch.save(save_data, os.path.join(path, 'data.pt'))
print("Done")
# for ASR only
elif format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
# TODO: changing this to before saving everything
# torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'aux_tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if set_ not in data or data[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, "data.%s.bin" % set_), dtype=dtype)
# add item from training data to the indexed data
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, "data.%s.idx" % set_))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if data[set_] is not None:
np_array = np.asarray(data[set_])
np.save(os.path.join(path, "data.%s.npy") % set_, np_array)
else:
print("Training %s not found " % set_)
# Finally save the audio path
torch.save(data['src'], os.path.join(path, 'data.scp_path.pt'))
if 'prev_src' in data and data['prev_src'] is not None:
torch.save(data['prev_src'], os.path.join(path, 'data.prev_scp_path.pt'))
print("Done")
elif opt.format in ['mmap', 'mmem']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
if opt.asr:
print("ASR data format isn't compatible with memory indexed format")
raise AssertionError
# save dicts in this format
# torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'src_atb', 'tgt_atb']:
if set_ not in data or data[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
indexed_data = MMapIndexedDatasetBuilder(os.path.join(path, "data.%s.bin" % set_), dtype=dtype)
# add item from training data to the indexed data
for tensor in data[set_]:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(path, "data.%s.idx" % set_))
del indexed_data
for set_ in ['src_sizes', 'tgt_sizes']:
if data[set_] is not None:
np_array = np.asarray(data[set_])
np.save(os.path.join(path, "data.%s.npy" % set_), np_array)
else:
print("Set %s not found " % set_)
def make_lm_data(tgt_file, tgt_dicts, max_tgt_length=1000, input_type='word', data_type='int32'):
tgt = []
sizes = []
count, ignored = 0, 0
print('Processing %s ...' % (tgt_file))
tgtf = open(tgt_file)
eos = torch.LongTensor(1).fill_(opt.tgt_eos_token)
# print(eos.size())
tensors = [eos]
# find the number of words in the sentence
while True:
tline = tgtf.readline()
# normal end of file
if tline == "":
break
tline = tline.strip()
# source and/or target are empty
if tline == "":
print('WARNING: ignoring an empty line (' + str(count + 1) + ')')
continue
if input_type == 'word':
tgt_words = tline.split()
elif input_type == 'char':
tgt_words = split_line_by_char(tline)
tensor = tgt_dicts.convertToIdx(tgt_words,
opt.tgt_unk_token,
None,
opt.tgt_eos_token,
type=data_type)
# print(tensor.size())
tensors.append(tensor)
count = count + 1
if count % opt.report_every == 0:
print('... %d sentences prepared' % count)
tgtf.close()
# concatenate all tensors into one
tensor = torch.cat(tensors, dim=-1)
return tensor
def make_translation_data(src_file, tgt_file, src_dicts, tgt_dicts, tokenizer, max_src_length=64, max_tgt_length=64,
add_bos=True, data_type='int64', num_workers=1, verbose=False,
external_tokenizer=None, src_lang=None, tgt_lang=None, lang_list=[],
early_save=False, savedir="", mirror=False, mirror_savedir=""):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
if type(lang_list) is dict:
lang_list = sorted(list(lang_list.keys()))
print("[INFO] Binarizing file %s ..." % src_file)
binarized_src = Binarizer.binarize_file(src_file, src_dicts, tokenizer,
bos_word=None, eos_word=None,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=src_lang, lang_list=lang_list, target=False
)
if early_save:
os.makedirs(savedir, exist_ok=True)
if mirror:
os.makedirs(mirror_savedir, exist_ok=True)
src_len = len(binarized_src['data'])
print("Saving source data to %s .... with %d entries" % (savedir, src_len))
if data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
indexed_data = MMapIndexedDatasetBuilder(os.path.join(savedir, "data.%s.bin" % "src"), dtype=dtype)
# add item from training data to the indexed data
for tensor in binarized_src['data']:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(savedir, "data.%s.idx" % "src"))
del binarized_src['data']
gc.collect()
np_array = np.asarray(binarized_src['sizes'])
np.save(os.path.join(savedir, "data.%s.npy" % "src_sizes"), np_array)
del binarized_src
del indexed_data
del np_array
gc.collect()
if mirror:
print("Saving mirrrored target data to %s .... with %d entries" % (mirror_savedir, src_len))
source = os.path.join(savedir, "data.%s.bin" % "src")
target = os.path.join(mirror_savedir, "data.%s.bin" % "tgt")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.idx" % "src")
target = os.path.join(mirror_savedir, "data.%s.idx" % "tgt")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.npy" % "src_sizes")
target = os.path.join(mirror_savedir, "data.%s.npy" % "tgt_sizes")
os.symlink(os.path.abspath(source), target)
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list, target=True
)
if early_save:
tgt_len = len(binarized_tgt['data'])
assert tgt_len == src_len, "Number of samples doesn't match between source and target!!!"
print("Saving target data to %s .... with %d samples" % (savedir, tgt_len))
if data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
indexed_data = MMapIndexedDatasetBuilder(os.path.join(savedir, "data.%s.bin" % "tgt"), dtype=dtype)
# add item from training data to the indexed data
for tensor in binarized_tgt['data']:
indexed_data.add_item(tensor)
indexed_data.finalize(os.path.join(savedir, "data.%s.idx" % "tgt"))
del binarized_tgt['data']
del indexed_data
gc.collect()
np_array = np.asarray(binarized_tgt['sizes'])
np.save(os.path.join(savedir, "data.%s.npy" % "tgt_sizes"), np_array)
del binarized_tgt
del np_array
gc.collect()
if mirror:
print("Saving mirrrored source data to %s .... with %d entries" % (mirror_savedir, src_len))
source = os.path.join(savedir, "data.%s.bin" % "tgt")
target = os.path.join(mirror_savedir, "data.%s.bin" % "src")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.idx" % "tgt")
target = os.path.join(mirror_savedir, "data.%s.idx" % "src")
os.symlink(os.path.abspath(source), target)
source = os.path.join(savedir, "data.%s.npy" % "tgt_sizes")
target = os.path.join(mirror_savedir, "data.%s.npy" % "src_sizes")
os.symlink(os.path.abspath(source), target)
src, tgt, src_sizes, tgt_sizes = None, None, None, None
else:
src = binarized_src['data']
src_sizes = binarized_src['sizes']
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
# currently we don't ignore anything :D
ignored = 0
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes
def make_asr_data(src_file, tgt_file, tgt_dicts, tokenizer,
max_src_length=64, max_tgt_length=64, add_bos=True, data_type='int64', num_workers=1, verbose=False,
input_type='word', stride=1, concat=4, prev_context=0, fp16=False, reshape=True,
asr_format="scp", output_format="raw",
external_tokenizer=None, src_lang=None, tgt_lang=None,aux_tgt_file=None, lang_list=[]):
src, tgt = [], []
src_sizes = []
tgt_sizes = []
count, ignored = 0, 0
n_unk_words = 0
if add_bos:
tgt_bos_word = opt.tgt_bos_token
else:
tgt_bos_word = None
if tgt_file is not None:
print("[INFO] Binarizing file %s ..." % tgt_file)
binarized_tgt = Binarizer.binarize_file(tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list, target=True)
tgt = binarized_tgt['data']
tgt_sizes = binarized_tgt['sizes']
ignored = 0
else:
tgt = None
tgt_sizes = None
if aux_tgt_file is not None:
aux_tgt = []
print("[INFO] Binarizing auxiliary target file %s ..." % aux_tgt_file)
aux_binarized_tgt = Binarizer.binarize_file(aux_tgt_file, tgt_dicts, tokenizer,
bos_word=tgt_bos_word, eos_word=opt.tgt_eos_token,
data_type=data_type,
num_workers=num_workers, verbose=verbose,
external_tokenizer=external_tokenizer,
lang=tgt_lang, lang_list=lang_list)
aux_tgt = aux_binarized_tgt['data']
aux_tgt_sizes = aux_binarized_tgt['sizes']
ignored = 0
else:
aux_tgt = None
aux_tgt_sizes = None
print('[INFO] Processing %s ...' % src_file)
# num_workers = num_workers if asr_format in ['scp', 'kaldi'] else 1
# speech binarizer has to be 1 thread at the moment
binarized_src = SpeechBinarizer.binarize_file(src_file, input_format=asr_format,
output_format=output_format, concat=concat,
stride=stride, fp16=fp16, prev_context=prev_context,
num_workers=num_workers, verbose=verbose)
src = binarized_src['data']
src_sizes = binarized_src['sizes']
if len(src_sizes) != len(tgt_sizes) and tgt_file is not None:
print("Warning: data size mismatched. Src: %d . Tgt: %d" % len(src_sizes), len(tgt_sizes))
print(('Prepared %d sentences ' +
'(%d ignored due to length == 0 or src len > %d or tgt len > %d)') %
(len(src), ignored, max_src_length, max_tgt_length))
return src, tgt, src_sizes, tgt_sizes, aux_tgt, aux_tgt_sizes
def main():
dicts = {}
tokenizer = onmt.Tokenizer(opt.input_type, opt.lower)
# We can load the dictionary from another project to ensure consistency
if opt.load_dict is not None and len(opt.load_dict) > 0:
print("[INFO] Loading dictionary from ... %s" % opt.load_dict)
dicts = torch.load(opt.load_dict)
# construct set of languages from the training languages
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
langs = (src_langs + tgt_langs)
langs = sorted(list(set(langs)))
if len (opt.train_src_atbs) > 0:
src_atbs = opt.train_src_atbs.split("|")
tgt_atbs = opt.train_tgt_atbs.split("|")
atbs = (src_atbs + tgt_atbs)
atbs = sorted(list(set(atbs)))
else:
atbs = []
if not opt.load_dict:
dicts['langs'] = dict()
for lang in langs:
idx = len(dicts['langs'])
dicts['langs'][lang] = idx
dicts['atbs'] = dict()
for atb in atbs:
idx = len(dicts['atbs'])
dicts['atbs'][atb] = idx
else:
if 'langs' not in dicts:
dicts['langs'] = dict()
else:
print(dicts['langs'])
print("Adding languages to existing dictionary ...")
for lang in langs:
idx = len(dicts['langs'])
if lang not in dicts['langs']:
dicts['langs'][lang] = idx
if 'atbs' not in dicts:
dicts['atbs'] = dict()
else:
print("Adding attributes to existing dictionary ...")
for atb in atbs:
idx = len(dicts['atbs'])
if atb not in dicts['atbs']:
dicts['atbs'][atb] = idx
print("Languages: ", dicts['langs'])
print("Attributes: ", dicts['atbs'])
start = time.time()
src_train_files = opt.train_src.split("|")
tgt_train_files = opt.train_tgt.split("|")
# for ASR and LM we only need to build vocab for the 'target' language
if opt.asr or opt.lm:
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elif opt.join_vocab:
dicts['src'] = init_vocab('source', set(src_train_files + tgt_train_files), opt.src_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = dicts['src']
else:
dicts['src'] = init_vocab('source', src_train_files, opt.src_vocab,
opt.src_vocab_size, tokenizer, num_workers=opt.num_threads)
dicts['tgt'] = init_vocab('target', tgt_train_files, opt.tgt_vocab,
opt.tgt_vocab_size, tokenizer, num_workers=opt.num_threads)
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Vocabulary generated after %s" % elapse)
if opt.lm:
print('Preparing training language model ...')
train = dict()
train['tgt'] = make_lm_data(opt.train_tgt,
dicts['tgt'])
train['src'] = None
valid = dict()
valid['tgt'] = make_lm_data(opt.valid_tgt,
dicts['tgt'])
valid['src'] = None
train['src_sizes'] = None
train['tgt_sizes'] = None
valid['src_sizes'] = None
valid['tgt_sizes'] = None
elif opt.asr:
print('Preparing training acoustic model ...')
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
src_atbs = opt.train_src_atbs.split("|") if len(atbs) > 0 else [None] * len(src_input_files)
tgt_atbs = opt.train_tgt_atbs.split("|") if len(atbs) > 0 else [None] * len(tgt_input_files)
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(src_atbs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
assert len(tgt_input_files) == len(tgt_atbs)
past_src_files = opt.past_train_src.split("|")
idx = 0
n_input_files = len(src_input_files)
# Training data ###################################################################
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_atb'], train['tgt_atb'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
data = dict()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = "train.%i.%s-%s" % (idx, src_lang, tgt_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if opt.multi_dataset and opt.resume:
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
n_samples = len(src_data)
src_atb_data, tgt_atb_data = None, None
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
# by default its 0
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
add_bos=not opt.no_bos,
fp16=opt.fp16,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
# Finalizing Training data ###################################################################
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if len(atbs) > 0:
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print("Saving training set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "train.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
if len(atbs) > 0:
train['src_atb'] += src_atb_data
train['tgt_atb'] += tgt_atb_data
# Validation data ###################################################################
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
src_atbs = opt.valid_src_atbs.split("|") if len(atbs) > 0 else [None] * len(src_input_files)
tgt_atbs = opt.valid_tgt_atbs.split("|") if len(atbs) > 0 else [None] * len(tgt_input_files)
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
idx = 0
n_input_files = len(src_input_files)
data = dict()
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
valid['src_atb'], valid['tgt_atb'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for i, (src_file, tgt_file, src_lang, tgt_lang, src_atb, tgt_atb) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs, src_atbs, tgt_atbs)):
data_name = "valid.%i.%s-%s" % (idx, src_lang, tgt_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
if opt.multi_dataset and opt.resume:
if os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
src_data, tgt_data, src_sizes, tgt_sizes = make_asr_data(src_file, tgt_file,
dicts['tgt'], tokenizer,
max_src_length=max(1024, opt.src_seq_length),
max_tgt_length=max(1024, opt.tgt_seq_length),
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
n_samples = len(src_data)
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
# by default its 0
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]])]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
if len(atbs) > 0:
src_atb_data = [torch.Tensor([dicts['atbs'][src_atb]]) for _ in range(n_samples)]
tgt_atb_data = [torch.Tensor([dicts['atbs'][tgt_atb]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_asr_data(past_src_file, None, None, None,
input_type=opt.input_type,
stride=opt.stride, concat=opt.concat,
prev_context=opt.previous_context,
fp16=opt.fp16,
add_bos=not opt.no_bos,
asr_format=opt.asr_format,
output_format=opt.format,
num_workers=opt.num_threads,
external_tokenizer=opt.external_tokenizer,
tgt_lang=tgt_lang, verbose=opt.verbose,
lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
# Finalizing Validation data ... #########################
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
if len(atbs) > 0:
data['src_atb'] = src_atb_data
data['tgt_atb'] = tgt_atb_data
print("Saving validation set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "valid.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
if len(atbs) > 0:
valid['src_atb'] += src_atb_data
valid['tgt_atb'] += tgt_atb_data
else:
src_input_files = opt.train_src.split("|")
tgt_input_files = opt.train_tgt.split("|")
src_langs = opt.train_src_lang.split("|")
tgt_langs = opt.train_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
past_src_files = opt.past_train_src.split("|")
n_input_files = len(src_input_files)
idx = 0
data = dict()
train = dict()
train['src'], train['tgt'] = list(), list()
train['src_sizes'], train['tgt_sizes'] = list(), list()
train['src_lang'], train['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
train['past_src'] = list()
train['past_src_sizes'] = list()
start = time.time()
print('Binarizing data to train translation models...')
for i, (src_file, tgt_file, src_lang, tgt_lang) in \
enumerate(zip(src_input_files, tgt_input_files, src_langs, tgt_langs)):
dataset_idx = idx if not opt.multi_mirror else 2 * idx
data_name = "train.%i.%s-%s" % (dataset_idx , src_lang, tgt_lang)
mirrored_data_name = "train.%i.%s-%s" % (dataset_idx + 1 , tgt_lang, src_lang)
dataset_path = os.path.join(dirname(opt.save_data), data_name)
mirrored_dataset_path = os.path.join(dirname(opt.save_data), mirrored_data_name)
if opt.multi_dataset:
if opt.resume and os.path.exists(dataset_path):
print("[INFO] Found data %s in the savedir ... Ignoring" % data_name)
idx = idx + 1
continue
else:
os.makedirs(dataset_path, exist_ok=True)
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'],
early_save=opt.multi_dataset,
savedir=dataset_path,
mirror=opt.multi_mirror,
mirror_savedir=mirrored_dataset_path)
#TODO: check
# if n_input_files == 1:
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
assert src_data is not None
n_samples = len(src_data)
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# processing the previous segment
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_translation_data(past_src_file, '/dev/null',
dicts['src'], dicts['src'], tokenizer,
max_src_length=opt.src_seq_length,
max_tgt_length=opt.tgt_seq_length,
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'])
if opt.multi_dataset:
data['prev_src'] = prev_src_data
else:
train['past_src'] += past_src_data
train['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print("Saving training set %i %s-%s to disk ..." % (dataset_idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = dataset_path
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, data, opt.format, dicts, opt.src_type)
if opt.multi_mirror:
mdata = dict()
mdata['src'] = tgt_data
mdata['tgt'] = src_data
mdata['tgt_sizes'] = src_sizes
mdata['src_sizes'] = tgt_sizes
mdata['tgt_lang'] = src_lang_data
mdata['src_lang'] = tgt_lang_data
print("Saving training set %i %s-%s to disk ..." % (dataset_idx + 1, tgt_lang, src_lang))
# take basedir from opt.save_data
path = mirrored_dataset_path
os.makedirs(path, exist_ok=True)
# save data immediately
# TODO: save the prev src as well
save_dataset(path, mdata, opt.format, dicts, opt.src_type)
idx = idx + 1
del data
data = dict()
else:
train['src'] += src_data
train['tgt'] += tgt_data
train['src_sizes'] += src_sizes
train['tgt_sizes'] += tgt_sizes
train['src_lang'] += src_lang_data
train['tgt_lang'] += tgt_lang_data
print('Preparing validation ...')
src_input_files = opt.valid_src.split("|")
tgt_input_files = opt.valid_tgt.split("|")
past_src_files = opt.past_valid_src.split("|")
src_langs = opt.valid_src_lang.split("|")
tgt_langs = opt.valid_tgt_lang.split("|")
assert len(src_input_files) == len(src_langs)
assert len(src_input_files) == len(tgt_input_files)
assert len(tgt_input_files) == len(tgt_langs)
n_input_files = len(src_input_files)
idx = 0
data = dict()
valid = dict()
valid['src'], valid['tgt'] = list(), list()
valid['src_sizes'], valid['tgt_sizes'] = list(), list()
valid['src_lang'], valid['tgt_lang'] = list(), list()
if opt.past_train_src and len(past_src_files) == len(src_input_files):
valid['past_src'] = list()
valid['past_src_sizes'] = list()
for (src_file, tgt_file, src_lang, tgt_lang) in zip(src_input_files, tgt_input_files, src_langs, tgt_langs):
src_data, tgt_data, src_sizes, tgt_sizes = make_translation_data(src_file, tgt_file,
dicts['src'], dicts['tgt'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs']
)
n_samples = len(src_data)
#TODO: this has to be changed
# if n_input_files == 1:
if n_input_files == 1 or opt.multi_dataset:
# For single-file cases we only need to have 1 language per file
# which will be broadcasted
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]])]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]])]
else:
# each sample will have a different language id
src_lang_data = [torch.Tensor([dicts['langs'][src_lang]]) for _ in range(n_samples)]
tgt_lang_data = [torch.Tensor([dicts['langs'][tgt_lang]]) for _ in range(n_samples)]
# validation past file
if opt.past_train_src and len(past_src_files) == len(src_input_files):
past_src_file = past_src_files[i]
past_src_data, _, past_src_sizes, _ = make_translation_data(past_src_file, '/dev/null',
dicts['src'], dicts['src'], tokenizer,
max_src_length=max(1024,
opt.src_seq_length),
max_tgt_length=max(1024,
opt.tgt_seq_length),
add_bos=(not opt.no_bos),
data_type=opt.data_type,
num_workers=opt.num_threads,
verbose=opt.verbose,
external_tokenizer=opt.external_tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
lang_list=dicts['langs'])
valid['past_src'] += past_src_data
valid['past_src_sizes'] += past_src_sizes
if opt.multi_dataset:
data['src'] = src_data
data['tgt'] = tgt_data
data['src_sizes'] = src_sizes
data['tgt_sizes'] = tgt_sizes
data['src_lang'] = src_lang_data
data['tgt_lang'] = tgt_lang_data
print("Saving validation set %i %s-%s to disk ..." % (idx, src_lang, tgt_lang))
# take basedir from opt.save_data
path = os.path.join(dirname(opt.save_data), "valid.%i.%s-%s" % (idx, src_lang, tgt_lang))
os.makedirs(path, exist_ok=True)
# save data immediately
save_dataset(path, data, opt.format, dicts, opt.src_type)
idx = idx + 1
else:
valid['src'] += src_data
valid['tgt'] += tgt_data
valid['src_sizes'] += src_sizes
valid['tgt_sizes'] += tgt_sizes
valid['src_lang'] += src_lang_data
valid['tgt_lang'] += tgt_lang_data
elapse = str(datetime.timedelta(seconds=int(time.time() - start)))
print("Binarization finished after %s" % elapse)
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
if opt.multi_dataset:
# SAVE DATA
print("Saving dictionary to %s" % (opt.save_data + '.dict.pt'))
torch.save(dicts, opt.save_data + '.dict.pt')
if opt.src_vocab is None and opt.asr == False and opt.lm == False:
save_vocabulary('source', dicts['src'], opt.save_data + '.src.dict')
if opt.tgt_vocab is None:
save_vocabulary('target', dicts['tgt'], opt.save_data + '.tgt.dict')
print("Finished.")
else:
if opt.format in ['raw', 'bin']:
print('Saving data to \'' + opt.save_data + '.train.pt\'...')
save_data = {'dicts': dicts,
'type': opt.src_type,
'train': train,
'valid': valid}
torch.save(save_data, opt.save_data + '.train.pt')
print("Done")
elif opt.format in ['scp', 'scpmem', 'wav']:
print('Saving target data to memory indexed data files. Source data is stored only as scp path.')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
assert opt.asr, "ASR data format is required for this memory indexed format"
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['tgt', 'src_lang', 'tgt_lang']:
if train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
# Finally save the audio path
save_data = {'train': train['src'],
'valid': valid['src']}
# remember to take into account the past information
if 'past_src' in train and len(train['past_src']) > 0:
save_data['train_past'] = train['past_src']
save_data['valid_past'] = valid['past_src']
if opt.format in ['wav']:
torch.save(save_data, opt.save_data + '.wav_path.pt')
else:
torch.save(save_data, opt.save_data + '.scp_path.pt')
print("Done")
elif opt.format in ['mmap', 'mmem']:
print('Saving data to memory indexed data files')
from onmt.data.mmap_indexed_dataset import MMapIndexedDatasetBuilder
# save dicts in this format
torch.save(dicts, opt.save_data + '.dict.pt')
# binarize the training set first
for set_ in ['src', 'tgt', 'src_lang', 'tgt_lang', 'past_src']:
if set_ not in train or train[set_] is None:
continue
if opt.data_type == 'int64':
dtype = np.int64
else:
dtype = np.int32
train_data = MMapIndexedDatasetBuilder(opt.save_data + ".train.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in train[set_]:
train_data.add_item(tensor)
train_data.finalize(opt.save_data + ".train.%s.idx" % set_)
del train_data
if valid[set_] is None:
continue
valid_data = MMapIndexedDatasetBuilder(opt.save_data + ".valid.%s.bin" % set_, dtype=dtype)
# add item from training data to the indexed data
for tensor in valid[set_]:
valid_data.add_item(tensor)
valid_data.finalize(opt.save_data + ".valid.%s.idx" % set_)
del valid_data
for set_ in ['src_sizes', 'tgt_sizes']:
if set_ not in train or train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if 'past_src' in train and len(train['past_src']) > 0:
set_ = 'past_src_sizes'
if train[set_] is not None:
np_array = np.asarray(train[set_])
np.save(opt.save_data + ".train.%s.npy" % set_, np_array)
else:
print("Training %s not found " % set_)
if valid[set_] is not None:
np_array = np.asarray(valid[set_])
np.save(opt.save_data + ".valid.%s.npy" % set_, np_array)
else:
print("Validation %s not found " % set_)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
|
'''module for finding individuals who are deceased'''
def print_deceased(people):
deadPeople = {}
for key in sorted(people.iterkeys()):
if 'DEAT' in people[key]:
deadPeople[key] = people[key]['DEAT']
return deadPeople
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-26 22:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20151208_2358'),
]
operations = [
migrations.AlterField(
model_name='userbillingaddress',
name='state',
field=models.CharField(choices=[('anhui', '安徽'), ('beijing', '北京'), ('chongqing', '重庆'), ('fujian', '福建'), ('gansu', '甘肃'), ('guangdong', '广东'), ('guangxi', '广西壮族自治区'), ('guizhou', '贵州'), ('hainan', '海南'), ('hebei', '河北'), ('heilongjiang', '黑龙江'), ('henan', '河南'), ('hongkong', '香港'), ('hubei', '湖北'), ('hunan', '湖南'), ('jiangsu', '江苏'), ('jiangxi', '江西'), ('jilin', '吉林'), ('liaoning', '辽宁'), ('macao', '澳门'), ('neimongol', '内蒙古自治区'), ('ningxia', '宁夏回族自治区'), ('qinghai', '青海'), ('shaanxi', '陕西'), ('shandong', '山东'), ('shanghai', '上海'), ('shanxi', '山西'), ('sichuan', '四川'), ('taiwan', '台湾'), ('tianjin', '天津'), ('xinjiang', '新疆维吾尔自治区'), ('xizang', '西藏自治区'), ('yunnan', '云南'), ('zhejiang', '浙江')], max_length=50),
),
migrations.AlterField(
model_name='usermailingaddress',
name='state',
field=models.CharField(blank=True, choices=[('anhui', '安徽'), ('beijing', '北京'), ('chongqing', '重庆'), ('fujian', '福建'), ('gansu', '甘肃'), ('guangdong', '广东'), ('guangxi', '广西壮族自治区'), ('guizhou', '贵州'), ('hainan', '海南'), ('hebei', '河北'), ('heilongjiang', '黑龙江'), ('henan', '河南'), ('hongkong', '香港'), ('hubei', '湖北'), ('hunan', '湖南'), ('jiangsu', '江苏'), ('jiangxi', '江西'), ('jilin', '吉林'), ('liaoning', '辽宁'), ('macao', '澳门'), ('neimongol', '内蒙古自治区'), ('ningxia', '宁夏回族自治区'), ('qinghai', '青海'), ('shaanxi', '陕西'), ('shandong', '山东'), ('shanghai', '上海'), ('shanxi', '山西'), ('sichuan', '四川'), ('taiwan', '台湾'), ('tianjin', '天津'), ('xinjiang', '新疆维吾尔自治区'), ('xizang', '西藏自治区'), ('yunnan', '云南'), ('zhejiang', '浙江')], max_length=50),
),
]
|
"""
This program create random passwords.
"""
import random
import string
import datetime
print("Hello in Password Creator 0.01\n\
Choose a right number: \n\
1 - simple password with 6th letters\n\
2 - password with letters (small and big) and numbers with your value of characters\n\
3 - CRAZIEST passwords - try it yourself!\n\
4 - I don't have idea of what I want to do")
program_choice = input("What number you choose?: ")
now = datetime.datetime.now()
now = now.strftime("%d-%m-%Y %X")
password = ""
if program_choice == "1":
for i in range(6):
password += random.choice(string.ascii_lowercase)
elif program_choice == "2":
characters_value = input("Write how many characters your password need: ")
for i in range(int(characters_value)):
password += random.choice(string.ascii_letters + string.digits)
if password.isalpha():
password = password[:-1] + random.choice(string.digits)
elif program_choice == "3":
characters_value = input("Write how many characters your password need: ")
for i in range(int(characters_value)):
password += random.choice(string.ascii_letters + string.digits + string.punctuation)
elif program_choice == "4":
print("Thank you, see you next time")
else:
print("Wrong key. Try it again.")
with open("passwords.txt","a+") as file:
file.write("password: {} website: {} datetime: {}\n".format(password,input("Write name of website or press enter: "), now))
print(password)
|
import urllib.request, re, sys, os
from http import server, HTTPStatus
from string import ascii_uppercase
if __name__ == "__main__":
if(len(sys.argv) < 3):
print("Error: Missing arguments...\nUsage: python aspirer.py [A-Z] [PORT NUMBER]\n")
else:
interval = sys.argv[1].upper() # Interval
port = sys.argv[2] # Port
if(not re.match(r'[A-Z]-[A-Z]', interval)):
print("Error: The given interval isn't valid...")
else:
# Opening files
infos = open('infos.txt', 'w+', encoding='utf-8')
dct = open('subst.dic', 'w+', encoding='utf-16')
# Start aspiration
nbtotal = 0 # Initialize total number of entries
for c in ascii_uppercase: # Loop through all uppercase letters
if(c>=interval[0] and c<=interval[2]): # If c is in the given interval, handle case
print("http://localhost:%s/vidal/vidal-Sommaires-Substances-%c.htm" %(port, c))
url = urllib.request.urlopen("http://localhost:%s/vidal/vidal-Sommaires-Substances-%c.htm" %(port, c))
res = url.read().decode('utf-8')
fin = re.findall(r'href="Substance/.*-.*.htm">(\w*)', res)
inf = ",.N+subst\n".join(fin)
for line in inf:
dct.write(line)
infos.write("\t- Number of entries in %c: %d\n" %(c, len(fin) ) )
nbtotal+=len(fin)
# Display total number of entries
infos.write("\n- Total number of entries: %d" %(nbtotal) )
|
'''
Created on Mar 1, 2017
@author: Nathanael Mathieu, Avi Stein, Kevin McMorrow, Jesse Galganov
A can only pair with U, and only when the U is 2 away from it
U can only pair with A, and only when the A is 2 away from it
C can only pair with G, and only when the G is 2 away from it
G can only pair with C, and only when the C is 2 away from it
'''
import numpy as np
pairs = {'A':'U', 'U':'A', 'G':'C', 'C':'G'}
def possibleBasePairs(data):
i = 0
n = 0
possiblePairs = []
while n < len(data):
while i < len(data) - 2:
if data[n] == pairs[data[i + 2]]:
possiblePairs.append([n, i + 2])
i += 1
i = n
n += 1
return possiblePairs
def basePairMatrixCreator(possibleBasePairs):
x = 0
y = 0
i = 0
n = len(possibleBasePairs)
matrix = np.zeros(shape=(n, n))
while i < n:
x = possibleBasePairs[i][0]
y = possibleBasePairs[i][1]
matrix[x][y] = 1
i += 1
return matrix
data = ('C', 'C', 'C', 'A', 'A', 'A', 'G', 'G', 'G', 'U', 'C', 'A')
# 0 1 2 3 4 5 6 7 8 9 10 11
pairs = basePairMatrixCreator(possibleBasePairs(data))
# print(pairs)
# print("Pairs Matrix:")
# print(np.matrix(basePairMatrixCreator(pairs)))
paths = []
ones = []
# for i in range(0,len(bluh)):
# for j in range(0, len(bluh)):
# if bluh[i,j] != 0:
# ones.append((i,j))
#
# twos = [[] for i in range(0,len(ones))]
#
# for i in range(0, len(ones)):
# (x,y) = ones[i]
# for j in range(x-1, -1, -1):
# for k in range(y+1, len(bluh)):
# tup = (j,k)
# (twos[i]).append(tup)
# paths.append([ones[i], tup])
def rabbithole(matrix):
global paths
maxList = []
ones = []
for i in range(0, len(matrix)):
for j in range(0, len(matrix[0])):
# print(matrix[i][j])
if matrix[i][j] != 0:
ones.append([(i, j)])
paths.append([(i, j)])
# print(len(ones), ones)
prev_list = list(ones)
while prev_list != []:
new_list = []
for i in range(0, len(prev_list)):
place = prev_list[i]
(x, y) = place[len(place) - 1]
for j in range(x - 1, -1, -1):
for k in range(y + 1, len(matrix)):
hold = list(place)
hold.append((j, k))
new_list.append(hold)
paths.append(hold)
# print(len(new_list), new_list)
prev_list = new_list
for i in range(0, len(paths)):
score = 0
maxScore = 0
temp = paths[i]
for base in temp:
if data[base[0]] == 'G' or data[base[0]] == 'C':
score += 3
else:
score += 2
if maxScore < score:
maxList = paths[i]
maxScore = score
return [maxList, score]
maxList = rabbithole(pairs)
print("Max Pairs List {}\nWith Score Of: {}".format(maxList[0], maxList[1]))
# bottom
|
from ._base import BaseAPIDispatch, BaseAPIEndpoint
class Stars(BaseAPIDispatch):
pass
@Stars.register('add')
class StarsAdd(BaseAPIEndpoint):
"""This method adds a star to an item (message, file, file comment, channel, private group, or DM) on behalf of the authenticated user.
One of file, file_comment, channel, or the combination of channel and timestamp must be specified.
.. code-block:: json
{
"ok": true
}
After making this call, the item will be starred and a star_added event is broadcast through the RTM API for the calling user.
For more information see https://api.slack.com/methods/add
"""
endpoint = 'stars.add'
required_args = {}
optional_args = {
'channel',
'file',
'file_comment',
'timestamp',
}
options = {
'include_token': True,
}
# Scope Information
scopes = {
'all': {
'stars:write',
},
'bot': set(),
'user': set(),
}
def __call__(self,
channel=None,
file=None,
file_comment=None,
timestamp=None,
):
"""Adds a star to an item.
:param channel: Optional. Channel to add star to, or channel where the message to add star to was posted (used with timestamp). e.g. C1234567890
:param file: Optional. File to add star to. e.g. F1234567890
:param file_comment: Optional. File comment to add star to. e.g. Fc1234567890
:param timestamp: Optional. Timestamp of the message to add star to. e.g. 1234567890.123456
"""
optional_kwargs = {}
if channel is not None:
optional_kwargs['channel'] = channel
if file is not None:
optional_kwargs['file'] = file
if file_comment is not None:
optional_kwargs['file_comment'] = file_comment
if timestamp is not None:
optional_kwargs['timestamp'] = timestamp
return BaseAPIEndpoint.__call__(self,
**optional_kwargs
)
@Stars.register('list')
class StarsList(BaseAPIEndpoint):
"""This method lists the items starred by the authed user.
The response contains a list of starred items followed by pagination
information.
.. code-block:: json
{
"ok": true,
"items": [
{
"type": "message",
"channel": "C2147483705",
"message": {...}
},
{
"type": "file",
"file": { ... }
}
{
"type": "file_comment",
"file": { ... },
"comment": { ... }
}
{
"type": "channel",
"channel": "C2147483705"
},
],
"paging": {
"count": 100,
"total": 4,
"page": 1,
"pages": 1
}
}
Different item types can be starred. Every item in the list has a type property, the
other property depend on the type of item. The possible types are:
message: the item will have a message property containing a message object
file: this item will have a file property containing a file object.
file_comment: the item will have a file property containing the file object and a comment property containing the file comment.
channel: the item will have a channel property containing the channel ID.
im: the item will have a channel property containing the channel ID for this direct message.
group: the item will have a group property containing the channel ID for the private group.
The paging information contains the count of files returned, the total
number of items starred, the page of results returned in this response and
the total number of pages available. Please note that the max count value is 1000 and the max page value is 100.
For more information see https://api.slack.com/methods/list
"""
endpoint = 'stars.list'
required_args = {}
optional_args = {
'count',
'page',
}
options = {
'include_token': True,
}
# Scope Information
scopes = {
'all': {
'stars:read',
},
'bot': set(),
'user': set(),
}
def __call__(self,
count=None,
page=None,
):
"""Lists stars for a user.
:param count: Optional, default=100. Number of items to return per page. e.g. 20
:param page: Optional, default=1. Page number of results to return. e.g. 2
"""
optional_kwargs = {}
if count is not None:
optional_kwargs['count'] = count
if page is not None:
optional_kwargs['page'] = page
return BaseAPIEndpoint.__call__(self,
**optional_kwargs
)
@Stars.register('remove')
class StarsRemove(BaseAPIEndpoint):
"""This method removes a star from an item (message, file, file comment, channel, private group, or DM) on behalf of the authenticated user.
One of file, file_comment, channel, or the combination of channel and timestamp must be specified.
.. code-block:: json
{
"ok": true
}
After making this call, the item will be unstarred and a star_removed event is broadcast through the RTM API for the calling user.
For more information see https://api.slack.com/methods/remove
"""
endpoint = 'stars.remove'
required_args = {}
optional_args = {
'channel',
'file',
'file_comment',
'timestamp',
}
options = {
'include_token': True,
}
# Scope Information
scopes = {
'all': {
'stars:write',
},
'bot': set(),
'user': set(),
}
def __call__(self,
channel=None,
file=None,
file_comment=None,
timestamp=None,
):
"""Removes a star from an item.
:param channel: Optional. Channel to remove star from, or channel where the message to remove star from was posted (used with timestamp). e.g. C1234567890
:param file: Optional. File to remove star from. e.g. F1234567890
:param file_comment: Optional. File comment to remove star from. e.g. Fc1234567890
:param timestamp: Optional. Timestamp of the message to remove star from. e.g. 1234567890.123456
"""
optional_kwargs = {}
if channel is not None:
optional_kwargs['channel'] = channel
if file is not None:
optional_kwargs['file'] = file
if file_comment is not None:
optional_kwargs['file_comment'] = file_comment
if timestamp is not None:
optional_kwargs['timestamp'] = timestamp
return BaseAPIEndpoint.__call__(self,
**optional_kwargs
)
|
""" Mastercard moneysend """
mastercard_key = 'iDSru7Fk2uxbzH2P-GHwLjMOUh46StrBlGSYwzc2a2e49b57!414c7161336b59306f476674736a4775695551757234303d'
mastercard_url = 'https://sandbox.api.mastercard.com/moneysend/v2/mapping/card?Format=XML'
mc_testcard = '5184680430000006'
# OAuth
oauth_consumer_key="AajmNcCn9oINP3qTATeAvS5relCzeex09tuqe8RT6e2690d9"
oauth_nonce="25400540855611"
oauth_timestamp="1286917726"
oauth_version="1.0"
oauth_body_hash="8CDsKsBpvFyMVMIdkZHi7ADckFA%3D"
oauth_signature_method="RSA-SHA1"
oauth_signature="J20bH3rKwCDOsj6Rn4Q8XhYrFgNVfInpLm3zSQFM7awCn%2B%2BDIuu4TBRC3%2F1eTGqLPgWwKDNbNUAQ0ZTLuRTsliQNVTRyXAn6N6w2Vmv%2BC3xQR9Clvpjyzb0FKmwXOQrqMaX7QxP4hywoZG6idZsVCcyrEGffVm39jJ1%2B9AK59s8%3D"
#stripe_key = "sk_test_82QQbAUIRKZ7aCldRdS6ZpmJ"
stripe_key = "sk_live_U92mDEX7Hl6NLX5KhLaigQBZ"
twilio_number = '(626) 414-5990'
twilio_auth = '878af82b65098a9f2d38b71d13387041'
twilio_sid = 'AC89cd934acd5ed8ac1485cb1ec8cd0692'
|
from flask import Flask, session, request, Response, render_template, redirect, url_for, flash
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user
from werkzeug.datastructures import CombinedMultiDict
from werkzeug.utils import secure_filename
import os
import time
import sqlite3
import datetime
# Forms...
from forms import *
class Sql:
def __init__(self):
self.db = sqlite3.connect("./db.db")
self.c = self.db.cursor()
def query(self, query):
return self.c.execute(query)
def insert(self, *query):
return self.c.execute(*query)
def close(self):
self.db.close()
app = Flask(__name__)
# config
app.config.update(
DEBUG=True,
SECRET_KEY='secret_xxx'
)
# flask-login
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "login"
class User(UserMixin):
def __init__(self, id):
self.id = id
self.name = "user" + str(id)
self.password = self.name + "_secret"
def __repr__(self):
return "%d/%s/%s" % (self.id, self.name, self.password)
@app.route('/')
@app.route('/index', methods=["POST", "GET"])
def index():
sql_instance = Sql()
sql = "SELECT * FROM posts ORDER BY ID DESC"
all_posts = sql_instance.query(sql).fetchall()
sql_instance.close()
return render_template("index.html", all_posts=all_posts)
@app.route("/login", methods=["POST", "GET"])
def login():
form = PwdForm(request.form)
if request.method == "POST":
print("LOGIN PRESSED")
db = sqlite3.connect("./db.db")
c = db.cursor()
c.execute("SELECT * FROM users")
usr = c.fetchall()
username = form.username.data
password = form.password.data
for i in usr: # Checks password and username against DB and redirects to index of successfull.
if i[1] == username and i[3] == password:
id = i[1]
user = User(id)
login_user(user)
print("Authenticated... redirect please...")
session["logged_in"] = True
db.close()
flash('You were successfully logged in...')
return redirect(url_for("index"))
flash('Invalid password provided')
return render_template("login.html", form=form)
@app.route("/new_post", methods=["POST", "GET"])
@login_required
def new_post():
form = PostForm(CombinedMultiDict((request.files, request.form)))
if request.method == "POST":
filename = None
if form.post_image.has_file():
image = form.post_image.data
filename = secure_filename(image.filename)
image.save(os.path.join('./upload/images', filename))
title = form.title.data
text = form.body.data
date = datetime.datetime.now()
author = current_user.get_id()
post_db = sqlite3.connect("./db.db")
c = post_db.cursor()
try:
c.execute("INSERT INTO posts(title,text,date,author,image) VALUES (?,?,?,?,?)",
(title, text, date, author, filename,))
post_db.commit()
post_db.close()
print("INSERT OK")
return redirect(url_for("index"))
except Exception as error:
flash(error)
print(error)
return render_template("new_post.html", form=form)
@app.route("/register", methods=["POST", "GET"])
def register():
form = UserRegistration(CombinedMultiDict((request.files, request.form)))
if request.method == "POST":
username = form.username.data
password = form.password.data
first_name = form.first_name.data
last_name = form.last_name.data
profile_pic = form.profile_pic.data
print(profile_pic)
print(type(profile_pic))
email = form.email.data
filename = None
if form.profile_pic.has_file():
print("has file...")
image = form.profile_pic.data
filename = secure_filename(image.filename)
filename = username + filename
image.save(os.path.join('./upload/images/profile_pic', filename))
db = sqlite3.connect("./db.db")
c = db.cursor()
c.execute("INSERT INTO users(usr,pwd,first_name,last_name,profile_pic,email) VALUES (?,?,?,?,?,?)",
(username, password, first_name, last_name, filename, email,))
db.commit()
db.close()
return redirect(url_for("login"))
return render_template("register.html", form=form)
@login_required
@app.route("/leaderboard", methods=["POST", "GET"])
def leaderboard():
db = sqlite3.connect("./db.db")
c = db.cursor()
c.execute("SELECT * FROM weight_bench ORDER BY weight DESC")
result = c.fetchall()
db.close()
return render_template("leaderboard.html", result=result)
@login_required
@app.route("/my_weights", methods=["POST", "GET"])
def my_weights():
form = RegisterWeights(request.form)
form_delete = DeleteButton(request.form)
user = current_user.get_id()
db = sqlite3.connect("./db.db")
c = db.cursor()
c.execute("SELECT * FROM weight_bench WHERE usr=(?)", (user,))
result_weights = c.fetchall()
db.close()
if request.method == "POST":
print(request.form)
if "submit" in request.form:
time_date = form.time_date.data
print(time_date)
video = form.video.data
weight = form.weight.data
db = sqlite3.connect("./db.db")
c = db.cursor()
c.execute("INSERT INTO weight_bench(usr,weight,time_date,video) VALUES (?,?,?,?)",
(user, weight, time_date, video,))
db.commit()
db.close()
return redirect(url_for("my_weights"))
if "delete" in request.form:
id_hidden = form_delete.id_hidden.data
db = sqlite3.connect("./db.db")
c = db.cursor()
c.execute("DELETE from weight_bench WHERE ID=(?)", (id_hidden,))
db.commit()
db.close()
return redirect(url_for("my_weights"))
return render_template("my_weights.html", result_weights=result_weights, form=form, form_delete=form_delete)
@app.errorhandler(401)
def page_not_found(e):
return Response('<p>Login failed</p>')
# callback to reload the user object
@login_manager.user_loader
def load_user(userid):
return User(userid)
# somewhere to logout
@app.route("/logout")
@login_required
def logout():
session["logged_in"] = False
logout_user()
return redirect(url_for("index"))
if __name__ == '__main__':
app.run(host="127.0.0.1", port=5000, threaded=True, debug=True)
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
|
# CS5 Gold, hw1pr3
# Filename: hw1pr3.py
# Name: Joe Konieczny
# Problem description: Function Frenzy!
#
# leng example from class
#
def leng( s ):
""" leng outputs the length of s
input: s, which can be a string or list
"""
if s == '' or s == []:
return 0
else:
return 1 + leng( s[1:])
def mult (n,m):
""" mult outputs the product of
input: n, m
"""
if m == 0 or n == 0:
return 0
elif m<0:
return -n + mult (n,m+1)
else:
return n + mult (n,m-1)
def dot (L,K):
""" should output the dot product of the lists L and K"""
if len(L) != len(K):
return 0
if L == [] or K == []:
return 0
else:
return L[0]*K[0] + dot (L[1:],K[1:])
def ind(e,L):
"""takes in a sequence L and an element e"""
if e not in L:
return len(L)
if L[0] == e:
return 0
else:
return ind(e,L[1:]) + 1
def letterScore(let):
"""Returns the 'scrabble' value of the one-character string let
"""
if let in 'AaEeIiLlNnOoRrSsTtUu':
return 1
elif let in 'DdGg':
return 2
elif let in 'BbCcMmPp':
return 3
elif let in 'FfHhVvWwYy':
return 4
elif let in 'Kk':
return 5
elif let in 'XxJj':
return 8
elif let in 'QqZz':
return 10
else:
return 0
def scrabbleScore(S):
"""Returns the 'scrabble' value of the multi-character string S
"""
if S == '':
return 0
else:
return letterScore(S[0]) + scrabbleScore(S[1:])
def transcribe(S):
""" Converts a DNA string an RNA string
"""
if S == '':
return ''
else:
return one_dna_to_rna(S[0]) + transcribe(S[1:])
def one_dna_to_rna( c ):
""" converts a single-character c from DNA
nucleotide to complementary RNA nucleotide """
if c == 'A': return 'U'
elif c == 'C': return 'G'
elif c == 'G': return 'C'
elif c == 'T': return 'A'
else:
return ''
def pigletLatin(s):
""" converts the string s into Dodds' 'piglet latin' language
"""
if s == '': return ''
elif s[0] in 'AaEeIiOoUu': return s[0:] + 'way'
elif s[0] in 'BbCcDdFfGgHhJjKkLlMmNnPpQqRrSsTtVvWwXxYyZz': return s[1:] + s[0] + 'ay'
def pigLatin(s):
""" Creates a fully formed Pig Latin string with accurate handling of multiple frontal consonants
"""
if s == '':
return ''
elif s[0] in 'Yy' and s[1] in 'BbCcDdFfGgHhJjKkLlMmNnPpQqRrSsTtVvWwXxYyZz' : return s + 'way'
elif s[0] in 'Yy' and s[1] in 'AaEeIiOoUu': return vowels(s) + initial_consonants(s) + 'ay'
else:
return vowels(s) + initial_consonants(s) + 'ay'
def initial_consonants(s):
""" Helps the full Pig Latin function
"""
if s[0] in 'AaEeIiOoUu': return ''
else:
return s[0] + initial_consonants(s[1:])
def vowels(s):
""" Defines the intial values to aid function pigLatin(s)
"""
if s[0] in 'AaEeIiOoUu': return s
else:
return vowels(s[1:])
#
# I finished all of the CodingBat STRING problems.
#
#
# I finished all of the CodingBat LIST problems.
# |
"""
Given an array of strings, return another array containing all of
its longest strings.
Example:
For inputArray = ["aba", "aa", "ad", "vcd", "aba"], the output should be:
allLongestStrings(inputArray) = ["aba", "vcd", "aba"].
Input/Output:
[execution time limit] 4 seconds (py3)
[input] array.string inputArray
A non-empty array.
Guaranteed constraints:
1 ≤ inputArray.length ≤ 10,
1 ≤ inputArray[i].length ≤ 10.
[output] array.string
Array of the longest strings, stored in the same order as in the inputArray.
"""
"""
Plan:
1. Create a list to hold the results.
2. Iterate through the list of strings to get the max length
3. Append only the strings that are the max length to the results list.
4. Return the results list.
"""
def allLongestStrings(inputArray):
# Create a list to hold the valid strings
results = []
# Get all the lengths of the strings in the given array
arr_lens = [len(inputArray[x]) for x in range(len(inputArray))]
# Get the max length of the strings
max_len = max(arr_lens)
# Iterate through the given array
for i in range(len(inputArray)):
# If the length of the current string is the max_len
if len(inputArray[i]) == max_len:
# Add string to the results list
results.append(inputArray[i])
# Return the results list
return results
|
from os.path import realpath
import ppo_atari
import rainy
from rainy.envs import Atari, atari_parallel
import rainy.utils.cli as cli
def config() -> rainy.Config:
c = ppo_atari.config()
c.set_env(lambda: Atari('Breakout', flicker_frame=True, frame_stack=False))
c.set_parallel_env(atari_parallel(frame_stack=False))
c.set_net_fn('actor-critic', rainy.net.actor_critic.ac_conv(rnn=rainy.net.GruBlock))
c.eval_env = Atari('Breakout', frame_stack=False)
return c
if __name__ == '__main__':
cli.run_cli(config(), rainy.agents.PpoAgent, script_path=realpath(__file__))
|
#!/usr/bin/env python
"""
*******************************************************************************
* Ledger Blue
* (c) 2016 Ledger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
from ledgerblue.comm import getDongle
from ledgerblue.commException import CommException
import argparse
import struct
#TEST_TX = "12000022800000002400000001201B000F4D98614000000000A7D8C068400000000000000C7321ED7810474C1DBEFAB9B271346E95ECA5CF931B5B8A5CDDEDAA2E73EB97BB72D796811444CC4E9B00F8668E5F642043E2206DB9BEFC65E5831439E57D524E0AF4110DBEDB32B0A858C8896DD6D4".decode('hex')
#TEST_TX = "12000022800000002400000025201B01BF7A086140000000000186A0684000000000000C717321039E4E8FE03FFB2B732A06DA881A4705ADBA7AF723545A89DA437427D1B0C3E58F81146E0394EBAA83660BC0C0CABCBE07FA5D61345FB2831423ED3371E6B8058FD14799CFF14D155366098E54".decode('hex')
# soure tag
TEST_TX = "120000228000000023ABCD12342400000001201B0012316D61400000000BEBC20068400000000000000C732102C2FB8D65461479B7F69A8945DDA0FFFB77354FF48528AEC5C558B88140FA25578114026D56EE9F14933E58DCF0DCC127E70963064F618314B420EAADA2AA969F2138268DAB6F0B858CB635C8".decode('hex')
# dest tag
#TEST_TX = "120000228000000024000000012E12345678201B0012316D61400000000BEBC20068400000000000000C732102C2FB8D65461479B7F69A8945DDA0FFFB77354FF48528AEC5C558B88140FA25578114026D56EE9F14933E58DCF0DCC127E70963064F618314B420EAADA2AA969F2138268DAB6F0B858CB635C8".decode('hex')
#src and dest tags
#TEST_TX = "120000228000000023ABCD123424000000012E12345678201B0012316D61400000000BEBC20068400000000000000C732102C2FB8D65461479B7F69A8945DDA0FFFB77354FF48528AEC5C558B88140FA25578114026D56EE9F14933E58DCF0DCC127E70963064F618314B420EAADA2AA969F2138268DAB6F0B858CB635C8".decode('hex')
def parse_bip32_path(path):
if len(path) == 0:
return ""
result = ""
elements = path.split('/')
for pathElement in elements:
element = pathElement.split('\'')
if len(element) == 1:
result = result + struct.pack(">I", int(element[0]))
else:
result = result + struct.pack(">I", 0x80000000 | int(element[0]))
return result
parser = argparse.ArgumentParser()
parser.add_argument('--path', help="BIP 32 path to retrieve")
parser.add_argument('--ed25519', help="Derive on ed25519 curve", action='store_true')
parser.add_argument("--apdu", help="Display APDU log", action='store_true')
args = parser.parse_args()
if args.path == None:
if args.ed25519:
args.path = "44'/144'/0'/0'/0'"
else:
args.path = "44'/144'/0'/0'/0/0"
donglePath = parse_bip32_path(args.path)
if args.ed25519:
p2 = "81"
else:
p2 = "41"
apdu = "e00400" + p2
apdu = apdu.decode('hex') + chr(len(donglePath) + 1 + len(TEST_TX)) + chr(len(donglePath) / 4) + donglePath + TEST_TX
dongle = getDongle(args.apdu)
result = dongle.exchange(bytes(apdu))
print str(result).encode('hex')
|
'''
current node has three components to form sum
largest from parent, largest from left child, largest from right child
if the results can be labelled as m1, m2, m3 (sorting from minimum to maximum)
then the max can only be
max(node.val, node.val + m3, node.val + m3 + m2, current_res)
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxPath(self, root, parentmax):
parentmax_cur = max(0, parentmax) + root.val
leftmax = 0
rightmax = 0
if root.left:
leftleft, leftright = self.maxPath(root.left, parentmax_cur)
leftmax = max(0, leftleft, leftright) + root.left.val
if root.right:
rightleft, rightright = self.maxPath(root.right, parentmax_cur)
rightmax = max(0, rightleft, rightright) + root.right.val
m1, m2, m3 = sorted([parentmax, leftmax, rightmax])
self.res = max(self.res, root.val + m2 + m3, root.val + m3, root.val)
return [leftmax, rightmax]
def maxPathSum(self, root: TreeNode) -> int:
self.res = -float('inf')
self.maxPath(root, 0)
return self.res
|
import pytest
TEST_CASES = [
({'list1': [], 'list2': ['a', 'b']}, []),
({'list1': ['a', 'b'], 'list2': []}, []),
({'list1': ['c', 'd'], 'list2': ['a', 'b']}, ['c-a', 'd-b']),
({'list1': [], 'list2': ['a', 'b'], 'format': '{2}-{1}'}, []),
({'list1': ['a', 'b'], 'list2': [], 'format': '{1}-{2}'}, []),
({'list1': ['c', 'd'], 'list2': ['a', 'b'], 'format': '{1}/{2}'}, ['c/a', 'd/b']),
({'list1': "aa", 'list2': "bb", 'format': '{1}/{2}'}, ['aa/bb']),
({'list1': "", 'list2': "", 'format': '{1}/{2}'}, []),
({'list1': "[\"aa\"]", 'list2': "[\"bb\"]", 'format': '{1}/{2}'}, ['aa/bb']),
]
@pytest.mark.parametrize('args, expected', TEST_CASES)
def test_mapper_command(mocker, args, expected):
from ZipStringsArrays import mapper_command
res = mapper_command(args)
assert res.outputs.get('zipped_list') == expected
|
#!/usr/bin/env python
import PySimpleGUI as sg
from random import randint as randint
"""
Demo - LEDS using Text
A simple example of how you can use UNICODE characters as LED indicators in a window
Copyright 2020 PySimpleGUI.org
"""
sg.theme('Light Brown 4')
CIRCLE = '⚫'
CIRCLE_OUTLINE = '⚪'
def LED(color, key):
"""
A "user defined element". In this case our LED is based on a Text element. This gives up 1 location to change how they look, size, etc.
:param color: (str) The color of the LED
:param key: (Any) The key used to look up the element
:return: (sg.Text) Returns a Text element that displays the circle
"""
return sg.Text(CIRCLE_OUTLINE, text_color=color, key=key)
layout = [ [sg.Text('Status 1 '), LED('Green', '-LED0-') ],
[sg.Text('Status 2 '), LED('blue', '-LED1-')],
[sg.Text('Status 3 '), LED('red', '-LED2-')],
[sg.Button('Exit')]]
window = sg.Window('Window Title', layout, font='Any 16')
while True:
event, values = window.read(timeout=200)
if event == sg.WIN_CLOSED or event == 'Exit':
break
# Loop through all of the LEDs and update. 25% of the time turn it off.
for i in range(3):
window[f'-LED{i}-'].update(CIRCLE if randint(1, 100) < 25 else CIRCLE_OUTLINE)
window.close() |
from django.shortcuts import render
def index(request):
return render(request, '../frontend/build/index.html') |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# To run in terminal
# $ cd /home/james/Documents/Edoc/3Nohtyp/Python_By_Example/Example_091
# $ python3 Example_091.py
# $ python3 Example_091.py <Input.txt >Output.txt
"""
Python by Example: Learning to Program in 150 Challenges by Nichola Lacey
091
Create an array which contains
five numbers (two of which
should be repeated). Display
the whole array to the user. Ask
the user to enter one of the
numbers from the array and
then display a message saying
how many times that number
appears in the list.
"""
from array import *
print(__doc__)
int_array = array('i', [1, 2, 3, 3, 4])
print(f'Here is an array: {int_array}')
try_again = ''
while try_again != 'n' :
a_num = int(input('Enter one of the numbers from the array: '))
print(f'The input for a number: {a_num}')
if try_again != 'n' :
print(f'The number occurs {int_array.count(a_num)} times in the array.')
try_again = input('Try again (y/n)? ')
print()
print()
|
from django.shortcuts import render
# Create your views here.
def index(request):
return render(request, 'index.html')
def Vista_Peticiones(request):
return render(request, 'Vista_Peticiones.html')
def Datos_Personales(request):
return render(request, 'datosPersonales.html') |
import tensorflow as tf
import numpy as np
import os
#import vae
import na_vib
import error_utils
import validator
import glob
import joblib
import argparse
"""parsing and configuration"""
def parse_args():
desc = "Tensorflow implementation of 'Negative Anchor Variational Information Bottleneck (NAVIB)'"
parser = argparse.ArgumentParser(description=desc)
#
# negative_anchors
#
parser.add_argument('--lambda_param', type=float,default=0.001,
help='negative anchor trade-off parameter')
parser.add_argument('--beta_param', type=float,default=0.01,
help='information bottleneck trade-off parameter')
parser.add_argument('--keep_prob', type=float,default=0.9,
help='training keep_prob for dropout')
parser.add_argument('--save_freq', type=int,default=5,
help='how often should we save parameters to file? (in epochs)')
parser.add_argument('--param_save_path', default="params/",
help='save path for params')
parser.add_argument('--experiment_name', default=None,
help='save prefix for params')
parser.add_argument('--restart_epoch', type=int, default=-1,
help='epoch to start at, uses param_save_path/experiment_name_#.ckpt')
parser.add_argument('--data_path',default=None,\
help='location of data (split dependency, sorry =.= )')
parser.add_argument('--exp_override', default=False, action="store_true",
help='overrides safety switch and writes over experiment files')
parser.add_argument('--y_type', default="zero_one",
help='defines error metric')
parser.add_argument('--x_type', default="zero_one",
help='defines error metric')
#
# vae
#
parser.add_argument('--dim_z', type=int, default='20', help='Dimension of latent vector', required = True)
parser.add_argument('--n_hidden_xz',\
type=int, default=64, help='Number of hidden units in MLP')
parser.add_argument('--n_hidden_zy',\
type=int, default=64, help='Number of hidden units in MLP')
parser.add_argument('--learn_rate', type=float, default=1e-3, help='Learning rate for Adam optimizer')
parser.add_argument('--num_epochs', type=int, default=20, help='The number of epochs to run')
parser.add_argument('--batch_size', type=int, default=128, help='Batch size')
return check_args(parser.parse_args())
"""checking arguments"""
def check_args(args):
# --experiment_name
if not args.experiment_name:
print("Experiment Needs a Name! Aborting.")
return None
# --param_save_path
try:
os.mkdir(args.param_save_path)
except(FileExistsError):
pass
# delete all existing files
files = glob.glob(args.param_save_path+'/*')
if len(files) > 0 and not args.exp_override:
print("Experiment params already exist! Either restart or use --exp_override flag.")
return None
elif args.exp_override:
for f in files:
os.remove(f)
# --data_path
if args.data_path is None:
print("--data_path required.")
return None
# --dim-z
try:
assert args.dim_z > 0
except:
print('dim_z must be positive integer')
return None
# --n_hidden_xz
try:
assert args.n_hidden_xz >= 1
except:
print('number of hidden units must be larger than one')
# --n_hidden_zy
try:
assert args.n_hidden_zy >= 1
except:
print('number of hidden units must be larger than one')
# --learn_rate
try:
assert args.learn_rate > 0
except:
print('learning rate must be positive')
# --num_epochs
try:
assert args.num_epochs >= 1
except:
print('number of epochs must be larger than or equal to one')
# --batch_size
try:
assert args.batch_size >= 1
except:
print('batch size must be larger than or equal to one')
return args
"""main function"""
def main(args):
n_hidden_xz = args.n_hidden_xz
n_hidden_zy = args.n_hidden_zy
train_total_data, split_numbers, train_size,\
validation_data, validation_labels, validation_confounds,\
test_data, test_labels, test_confounds =\
joblib.load( args.data_path )
#maxes = np.maximum(np.maximum(\
# train_total_data[:,:split_numbers[0]].max(axis=0),\
# validation_data.max(axis=0)),\
# test_data.max(axis=0))
n_samples = train_size
n_labels = split_numbers[1] - split_numbers[0]
n_confounds = np.shape(train_total_data)[1] - split_numbers[1]
""" parameters """
dim_input = split_numbers[0]
dim_out = n_labels
batch_size = args.batch_size
learn_rate = args.learn_rate
num_epochs = args.num_epochs
dim_z = args.dim_z
beta_param = args.beta_param
lambda_param = args.lambda_param
""" build graph """
# dropout
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
x_in = tf.placeholder(tf.float32, shape=[None, dim_input], name='input_data')
y_in = tf.placeholder(tf.float32, shape=[None, n_labels], name='output_label')
c_in = tf.placeholder(tf.float32, shape=[None, n_confounds], name="confounds")
#TODO: generalize the output types
loss, neg_x_like, neg_y_like, other_div = na_vib.na_vib(\
x_in, y_in, c_in,\
dim_input, dim_out, dim_z,\
n_hidden_xz, n_hidden_zy,\
keep_prob,\
x_output_type=args.x_type, y_output_type=args.y_type,\
beta_param=beta_param, lambda_param=lambda_param, reuse=False\
)
train_op = tf.train.AdamOptimizer(learn_rate).minimize(loss)
na_vib_obj = na_vib.classifier(x_in, dim_z, dim_out, n_hidden_xz, n_hidden_zy)
saver = tf.train.Saver(max_to_keep=None)
if n_samples < batch_size:
print("[na_vib] WARNING: n_samples < batch_size")
batch_size = n_samples
total_batch = int(n_samples / batch_size)
val_tracker = validator.Validator()
with tf.Session() as sess:
#TODO: modify this!
if(args.restart_epoch < 0):
start_epoch = 0
sess.run(tf.global_variables_initializer(), feed_dict={keep_prob : args.keep_prob})
else:
start_epoch = args.restart_epoch + 1
saver.restore(sess,
args.param_save_path + args.experiment_name +\
("_epoch%i.ckpt" % args.restart_epoch))
for epoch in range(start_epoch,num_epochs):
# Random shuffling
np.random.shuffle(train_total_data)
#TODO: Modify this to work with train confounder classes
train_x_shuf = train_total_data[:, :dim_input]
train_y_shuf = train_total_data[:, dim_input:split_numbers[1]]
train_c_shuf = train_total_data[:, split_numbers[1]:]
# Loop over all batches
for i in range(total_batch):
# Compute the offset of the current minibatch in the data.
offset = (i * batch_size) % (n_samples)
batch_x = train_x_shuf[offset:(offset + batch_size), :]
batch_y = train_y_shuf[offset:(offset + batch_size), :]
batch_c = train_c_shuf[offset:(offset + batch_size), :]
_, tot_loss, Lx, Ly, Odiv =\
sess.run(\
(train_op, loss, neg_x_like, neg_y_like, other_div),\
feed_dict={
x_in: batch_x, \
y_in: batch_y, \
c_in: batch_c, \
keep_prob : args.keep_prob \
}\
)
if args.save_freq > 0 and epoch % args.save_freq == 0:
saver.save(sess,\
args.param_save_path + args.experiment_name +\
("_epoch%i.ckpt" % epoch))
#TODO: MAP epoch output
print("[na_vib] epoch %d: L_tot %03.2f L_x %03.2f L_y %03.2f Div %03.2f"\
% (epoch, tot_loss, Lx, Ly, Odiv))
#TODO: MAP epoch output
if (epoch % 10 == 0 or epoch == num_epochs - 1)\
and validation_data is not None:
y_hat = sess.run(na_vib_obj, feed_dict = { x_in: validation_data })
new_score = \
np.mean(error_utils.zero_one_abs(validation_labels,np.array(y_hat)))
print("[na_vib] Validation L: %f" % new_score)
if val_tracker.new_try(new_score):
saver.save(sess,\
args.param_save_path + args.experiment_name + "_MAP.ckpt")
print("New Record!")
elif (epoch % 10 == 0 or epoch == num_epochs - 1):
y_hat = sess.run(na_vib_obj, feed_dict = { x_in: test_data })
print(error_utils.log_loss(test_labels, np.array(y_hat)))
#y_hat = np.amax(y_hat,axis=1)
#error = np.amax(test_labels,axis=1) == y_hat
#print( sum(np.abs(error)) / np.shape(test_data)[0] )
if __name__ == '__main__':
# parse arguments
args = parse_args()
if args is None:
exit()
# main
main(args)
|
'''
CSC263 Winter 2019
Problem Set 3 Starter Code
University of Toronto Mississauga
'''
# Do NOT add any "import" statements
# Do NOT use Python dictionaries
def hash_func(input_list):
'''Simple hash of adding elements of a list together
'''
total = 0
for item in input_list:
total += item
return total
def num_pizza_kinds(pizzas):
'''
Pre: pizzas is a list of pizza 5-tuples
Post: return number of kinds of pizzas
'''
total_unique = 0
i = 0
unique_pizzas = []
while i < len(pizzas):
unique_pizzas.append(None)
i += 1
for pizza in pizzas:
index = hash_func(pizza) % len(pizzas)
if not unique_pizzas[index]:
unique_pizzas[index] = [pizza]
total_unique += 1
else:
if unique(pizza, unique_pizzas[index]):
unique_pizzas[index].append(pizza)
total_unique += 1
return total_unique
def unique(pizza1, pizza_list):
for pizza in pizza_list:
pizza_2_concat = pizza + pizza
offset = 0
while offset < 7:
pizza_to_compare = pizza_2_concat[offset: offset + 5]
if pizza1 == pizza_to_compare:
return False
offset += 1
return True
if __name__ == '__main__':
# some small test cases
# Case 1
pizzas = [(1, 2, 3, 4, 5), (2, 3, 4, 5, 1), (5, 4, 3, 2, 1), (4, 3, 2, 1, 5), (20, 10, 2, 9, 1)]
assert 3 == num_pizza_kinds(pizzas) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.