blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f9702743613a539b49c53c3eba4c66d8ea963505 | Python | jamiegrain/titanic | /titanic.py | UTF-8 | 2,735 | 2.703125 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn import linear_model, preprocessing
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import Imputer, StandardScaler
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, GridSearchCV
df = pd.read_csv('train.csv', index_col = 0)
def fare_sqrt(fare):
return fare**0.5
def create_new_cols(df):
df['fare_sqrt'] = list(map(fare_sqrt, df['Fare']))
def separate_labels(df):
y = df['Survived'].values
df.drop('Survived', axis=1, inplace = True)
return y, df
def preprocess(df):
create_new_cols(df)
df['Sex'].replace(['male', 'female'], [0, 1], inplace = True)
df.drop(['Cabin', 'Name', 'Ticket', 'Fare'], axis=1, inplace=True)
df = pd.get_dummies(df, prefix = ['Departed'], columns = ['Embarked'])
pipeline = Pipeline([
('imp', Imputer(strategy = 'most_frequent')),
('scaler', StandardScaler())
])
X = pipeline.fit_transform(df)
df = pd.DataFrame(X, columns = list(df))
return X
print(df.corr()['Survived'])
y, df = separate_labels(df)
X = preprocess(df)
X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.2, random_state=42)
def rbf_svc_fit(X, y):
clf = SVC(kernel='rbf', gamma = 5, C = 1)
clf.fit(X, y)
return clf
def decision_tree_fit(X, y):
clf = DecisionTreeClassifier(min_samples_leaf = 4)
clf.fit(X, y)
return clf
def gaussian_naive_bayes_fit(X, y):
clf = GaussianNB()
clf.fit(X, y)
return clf
def mlp_fit(X, y):
clf = MLPClassifier(hidden_layer_sizes = (1000, 300), random_state = 42)
clf.fit(X, y)
return clf
def fit_all_clfs(X, y):
classifiers = [('svc', rbf_svc_fit(X, y)),
('dtree', decision_tree_fit(X, y)),
('gnb', gaussian_naive_bayes_fit(X, y)),
('mlp', mlp_fit(X,y))]
return classifiers
def ensemble_fit(X, y):
voting_clf = VotingClassifier(estimators = fit_all_clfs(X,y), voting = 'hard')
voting_clf.fit(X,y)
return voting_clf
final_clf = ensemble_fit(X, y)
y_pred = final_clf.predict(X_validation)
print(f1_score(y_validation, y_pred))
df_test = pd.read_csv('test.csv', index_col = 0)
create_new_cols(df_test)
X_sub = preprocess(df_test)
y_sub =final_clf.predict(X_sub)
predictions = pd.DataFrame(y_sub, index = df_test.index)
predictions.rename(columns = {0: 'Survived'}, inplace = True)
print(predictions.head())
predictions.to_csv('titanic_submission.csv')
sub_check = pd.read_csv('titanic_submission.csv', index_col = 0)
print(sub_check.head())
| true |
e4895208cec9c83c066d6448d5e2e8f9739d12cc | Python | pchat99/Hacktoberfest2021-4 | /Convert String to Datetime | UTF-8 | 196 | 3.203125 | 3 | [
"MIT"
] | permissive | from datetime import datetime
my_date_string = "Mar 11 2011 11:31AM"
datetime_object = datetime.strptime(my_date_string, '%b %d %Y %I:%M%p')
print(type(datetime_object))
print(datetime_object)
| true |
7f18295a2dae4dcb8147b580523143530d5d575b | Python | J-Spade/slackov | /runSlackov.py | UTF-8 | 2,245 | 2.65625 | 3 | [] | no_license | import threading
import os.path
import signal
import sys
from MarkovBot import MarkovBot
def main():
reload(sys)
sys.setdefaultencoding('utf8')
config_path = "slackov.cfg"
config_default = "slacktoken,\n" \
"slackid,\n" \
"avatarsource,\n" \
"twitterconsumerkey,\n" \
"twitterconsumersecret,\n" \
"twitteraccesstoken,\n" \
"twitteraccesssecret,\n" \
"twitterid,"
if os.path.isfile(config_path):
config_vals = {}
config_file = open(config_path, 'r')
for line in config_file:
try:
(key, val) = line.split(',')
if key:
key = key.strip()
if val:
val = val.strip()
config_vals[key] = val
except ValueError:
error_message = "Config file not properly setup. " \
"Config: {} missing {} value.\n"
print error_message.format(config_path, line.replace(':', ''))
slack = {
"token": config_vals['slacktoken'],
"id": config_vals['slackid'],
"avatarsource": config_vals['avatarsource']
}
twitter = {
"consumer_key": config_vals['twitterconsumerkey'],
"consumer_secret": config_vals['twitterconsumersecret'],
"access_token": config_vals['twitteraccesstoken'],
"access_token_secret": config_vals['twitteraccesssecret'],
"id": config_vals['twitterid']
}
if None in (slack['token'],
slack['id'],
slack['avatarsource']):
print "Config file not properly setup. " \
"Config file located at {}.\n".format(config_path)
else:
bot = MarkovBot(None, slack, twitter)
bot.start()
bot.process.join()
print threading.enumerate()
else:
print "Could not find a config file. " \
"Creating a new one: {}\n".format(config_path)
config_file = open(config_path, 'a')
config_file.write(config_default)
config_file.close()
if __name__ == "__main__":
main()
| true |
e2d317ffa749924d24d0c00ad7e0e8049dcdf4f5 | Python | alindkhare/ray | /doc/kubernetes/example_scripts/job_example.py | UTF-8 | 2,256 | 2.71875 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | from collections import Counter
import os
import sys
import time
import ray
""" This script is meant to be run from a pod in the same Kubernetes namespace
as your Ray cluster.
Just below are the environment variables used to access Ray client via a
service targetting the Ray cluster's head node pod.
These environment variables are set by Kubernetes.
See https://kubernetes.io/docs/concepts/services-networking/service/#environment-variables
In the documentation examples, the head service has
"example-cluster-ray-head" and the relevant port is named "client".
Modify the environment variables as needed to match the name of the service
and port.
Note: The default head service set up by the Ray Kubernetes operator is named
<cluster-name>-ray-head,
where <cluster-name> is the metadata.name field you set in the RayCluster
custom resource.
""" # noqa
HEAD_SERVICE_IP_ENV = "EXAMPLE_CLUSTER_RAY_HEAD_SERVICE_HOST"
HEAD_SERVICE_CLIENT_PORT_ENV = "EXAMPLE_CLUSTER_RAY_HEAD_SERVICE_PORT_CLIENT"
@ray.remote
def gethostname(x):
import platform
import time
time.sleep(0.01)
return x + (platform.node(), )
def wait_for_nodes(expected):
# Wait for all nodes to join the cluster.
while True:
resources = ray.cluster_resources()
node_keys = [key for key in resources if "node" in key]
num_nodes = sum(resources[node_key] for node_key in node_keys)
if num_nodes < expected:
print("{} nodes have joined so far, waiting for {} more.".format(
num_nodes, expected - num_nodes))
sys.stdout.flush()
time.sleep(1)
else:
break
def main():
wait_for_nodes(3)
# Check that objects can be transferred from each node to each other node.
for i in range(10):
print("Iteration {}".format(i))
results = [
gethostname.remote(gethostname.remote(())) for _ in range(100)
]
print(Counter(ray.get(results)))
sys.stdout.flush()
print("Success!")
sys.stdout.flush()
if __name__ == "__main__":
head_service_ip = os.environ[HEAD_SERVICE_IP_ENV]
client_port = os.environ[HEAD_SERVICE_CLIENT_PORT_ENV]
ray.util.connect(f"{head_service_ip}:{client_port}")
main()
| true |
f1e1ae742676cc8f299c3ce919086f1ed51d3cf1 | Python | imsardine/learning | /python/tests/test_re.py | UTF-8 | 662 | 3.515625 | 4 | [
"MIT"
] | permissive | import re
def test_named_group():
pattern = r'(?P<number>\d+(?:.\d+)?)(?P<unit>K)? Ratings'
match = re.search(pattern, '8723 Ratings')
assert match.group('number') == '8723'
assert match.group('unit') is None
match = re.search(pattern, '8.7K Ratings')
assert match.group('number') == '8.7'
assert match.group('unit') == 'K'
def test_substitution__look_ahead_behind():
origin = 'Hello, World! Have a nice day.'
# ^-- replace it
# re.sub(pattern, repl, string), replace match.group(0)
replaced = re.sub(r'(?<=Hello, )\w+(?=\! .+)', 'Regex', origin)
assert replaced == 'Hello, Regex! Have a nice day.'
| true |
30bd80b3d8a95aa682a0ac02053101e5d1d387ef | Python | wattaihei/ProgrammingContest | /Codeforces/ECR67/probE.py | UTF-8 | 998 | 2.953125 | 3 | [] | no_license | import sys
import threading
input = sys.stdin.readline
sys.setrecursionlimit(10**7)
N = int(input())
graph = [[] for _ in range(N)]
for _ in range(N-1):
a, b = map(int, input().split())
graph[a-1].append(b-1)
graph[b-1].append(a-1)
dp = [1]*N
checked1 = [False]*N
checked = [False]*N
ans = 0
def dfs(p):
checked1[p] = True
for np in graph[p]:
if not checked1[np]:
dfs(np)
dp[p] += dp[np]
def reroot(p, score):
global ans
ans = max(ans, score)
checked[p] = True
for np in graph[p]:
if not checked[np]:
root = dp[p]
goto = dp[np]
dp[np] = root
dp[p] = root - goto
reroot(np, score + root - 2*goto)
dp[np] = goto
dp[p] = root
def main():
dfs(0)
reroot(0, sum(dp))
print(ans)
if __name__ == "__main__":
threading.stack_size(1024 * 100000)
thread = threading.Thread(target=main)
thread.start()
thread.join()
| true |
66efe51742eb14daa20902df467cce3f7b65980e | Python | Jsonghh/leetcode | /200114/Closest_Binary_Search_Tree_Value_II.py | UTF-8 | 2,619 | 3.234375 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import heapq
class Solution:
def closestKValues(self, root: TreeNode, target: float, k: int) -> List[int]:
# O(klogk) + O(N)
self.min_diffs = []
self.helper(root, target, k)
ans = []
for diff, node_val in self.min_diffs:
ans.append(node_val)
return ans
def helper(self, node, target, k):
if not node:
return
if node.val <= target and node.right:
self.helper(node.right, target, k)
if node.val >= target and node.left:
self.helper(node.left, target, k)
diff = abs(node.val - target)
if len(self.min_diffs) < k or -diff > self.min_diffs[0][0]:
heapq.heappush(self.min_diffs, (-diff, node.val))
if len(self.min_diffs) > k:
heapq.heappop(self.min_diffs)
if node.val < target and node.left:
self.helper(node.left, target, k)
if node.val > target and node.right:
self.helper(node.right, target, k)
return
# CONVERT TREE TO A LIST
# inorder = self.get_inorder(root)
# left = 0
# for i in range(1, len(inorder)):
# if inorder[i] >= target:
# left = i - 1
# break
# right = left + 1
# ans = []
# while len(ans) < k:
# if self.take_left(inorder, left, right, target):
# ans.append(inorder[left])
# left -= 1
# else:
# ans.append(inorder[right])
# right += 1
# return ans
# def take_left(self, inorder, left, right, target):
# if left < 0:
# return False
# if right >= len(inorder):
# return True
# return target - inorder[left] < inorder[right] - target
# def get_inorder(self, root):
# if not root:
# return []
# inorder, stack = [], []
# while root:
# stack.append(root)
# root = root.left
# while stack:
# node = stack.pop()
# inorder.append(node.val)
# if node.right:
# root = node.right
# while root:
# stack.append(root)
# root = root.left
# return inorder
| true |
bbfc7fc0e55043f496256c2b6e2a078653d6159f | Python | keinam53/Python_Poczatek | /Programowanie_obiektowe/2_Konstruktor i pola obiektu/Dodawanie_studentow.py | UTF-8 | 606 | 3.84375 | 4 | [] | no_license | class Student:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
self.promotion = False
def print_student(student):
print(f"Imię: {student.first_name} {student.last_name}, Promocja: {student.promotion}")
def promote_student(student):
student.promotion = True
def run_example():
ola = Student(first_name="Ola", last_name="Bąk")
print_student(ola)
jan = Student("Jan", "Kowal")
print_student(jan)
promote_student(ola)
print_student(ola)
if __name__ == "__main__":
run_example()
| true |
3682157a0898797b117cb184cea458f7a0121fb3 | Python | alanx3x7/SudokuSolver | /test_scripts/test.py | UTF-8 | 1,978 | 2.921875 | 3 | [] | no_license | from SudokuRecursiveSolver import SudokuRecursiveSolver
from SudokuScreenReader import SudokuScreenReader
from SudokuScreenWriter import SudokuScreenWriter
from sudoku_boards import board
from sudoku_base_solver_v3 import SudokuRecursiveSolver3
from sudoku_base_solver_v4 import SudokuRecursiveSolver4
from sudoku_base_solver_v5 import SudokuRecursiveSolver5
import time
import numpy as np
def test_with_screen():
reader = SudokuScreenReader()
reader.get_sudoku_board(220, 320, 450, 450)
print(reader.game_board)
solver = SudokuRecursiveSolver()
solver.load_board(reader.game_board)
print(solver.board)
solver.solve_sudoku()
print(solver.solution)
writer = SudokuScreenWriter()
writer.load_solution_board(220, 320, solver.solution, reader.game_board_contours)
writer.find_game_board_centers()
writer.write_in_sudoku()
def test_with_board():
solver = SudokuRecursiveSolver5()
for i in range(6):
start = time.time()
solver.load_board(board[i])
solver.solve_sudoku()
print(time.time() - start)
print(solver.solution)
if __name__ == "__main__":
solver = SudokuRecursiveSolver5()
v4_times = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
for i in range(9):
time_start = time.time()
for k in range(10):
solver.load_board(board[i])
solver.solve_sudoku()
print(solver.solution)
print(time.time() - time_start)
v4_times[i] = time.time() - time_start
print("v4:")
print(v4_times)
# solver3 = SudokuRecursiveSolver3()
# full_start3 = time.time()
# for i in range(9):
# print("Solving board number " + str(i))
# solver3.load_board(board[i])
# solver3.solve_sudoku()
# print(solver3.solution)
# print("Total time to solve using v3")
# print(time.time() - full_start3)
| true |
061e1fa770735652475ae42f6230185a31683472 | Python | antonylu/leetcode2 | /Python/053_maximum-subarray.py | UTF-8 | 1,845 | 4.0625 | 4 | [] | no_license | """
https://leetcode.com/problems/maximum-subarray/description/
Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.
Example:
Input: [-2,1,-3,4,-1,2,1,-5,4],
Output: 6
Explanation: [4,-1,2,1] has the largest sum = 6.
Follow up:
If you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.
"""
class Solution:
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Solution 1: brute force, enumerate all sub array and calculate the value, return maximum value, O(n^2)
#
# Solution 2: Dynamic programming
# divide and conqure, the final optimization is determined by every optimized solution of each sub-question
# > For each item in the array, find the largest sum of any subarray ended in that item
# > and it's either the current item alone [c] or the previous+current item [..c]
# > Save the result in-place, so we just need to save the maximum of the current item and its previous+current item
# > return the maximum of the array
# Time O(n), 87% 46ms
r = nums[0]
for i in range(1, len(nums)):
j = nums[i] + nums[i-1]
if j > nums[i]: nums[i] = j
if r < nums[i]: r = nums[i]
return r
# Solution 3: accumulate sum, remember the max, re-accumate if sum < 0
# O(n), 36% 54ms
"""
sum = 0
maximum = nums[0]
for i in nums:
sum += i
if maximum < sum: maximum = sum
if sum <0: sum = 0
#print(maximum)
return maximum
"""
s = Solution()
print(s.maxSubArray([-2,1,-3,4,-1,2,1,-5,4]))
| true |
281bf51a62edfd35f3bc51e26711f454a47ed42e | Python | brilliantFire/deep-learning | /Kaggle_MNIST_VGGnet_w_aug_data.py | UTF-8 | 5,252 | 2.921875 | 3 | [] | no_license | """
~*~* VGGnet-like CNN with Augmented Data using Keras/Tensorflow *~*~
Code for a CNN with VGGnet-like architecture and augmented training data
as classifier for the MNIST handwritten digit data in the context of Kaggle's
'Digit Recognizer' competition. See https://arxiv.org/pdf/1409.1556.pdf for
more detail on VGGnet.
"""
import pandas as pd
import numpy as np
import os
from keras.utils.np_utils import to_categorical # for labels
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
K.set_image_dim_ordering('th')
np.random.seed(237)
# Set the working directory
os.chdir('C:\\Users\\rlvis\\work_MSPA\\MNIST') # desktop
train_orig = pd.read_csv('data/train.csv')
X_test = pd.read_csv('data/test.csv')
# Hold out 4200 random images (10%) as a validation set
valid = train_orig.sample(n = 4200, random_state = 555)
train = train_orig.loc[~train_orig.index.isin(valid.index)]
# delete original train set
del train_orig
# separate images & labels
X_train = train.drop(['label'], axis=1)
labels_train = train['label']
X_valid = valid.drop(['label'], axis=1)
labels_valid = valid['label']
# clear more space
del train, valid
# Normalize and reshape
X_train = X_train.astype('float32') / 255.
X_train = X_train.values.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_valid = X_valid.astype('float32') / 255.
X_valid = X_valid.values.reshape(X_valid.shape[0], 1, 28, 28).astype('float32')
X_test = X_test.astype('float32') / 255.
X_test = X_test.values.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
# one hot encoding of digit labels
labels_train = to_categorical(labels_train)
labels_valid = to_categorical(labels_valid)
# K = 10 digits classes; 784 px images as input
K_classes = 10
px = X_train.shape[1] # 784 pixels/inputs
convnet3 = Sequential()
# [Conv2D]*2 -> MaxPool -> Dropout #1
convnet3.add(Conv2D(32, (3, 3),
input_shape=(1, 28, 28),
padding = 'same',
activation='relu'))
convnet3.add(Conv2D(32, (3, 3),
padding = 'same',
activation='relu'))
convnet3.add(MaxPooling2D(pool_size=(2, 2)))
convnet3.add(Dropout(0.10))
# [Conv2D]*2 -> MaxPool -> Dropout #2
convnet3.add(Conv2D(64, (3, 3),
strides = (2, 2),
padding = 'same',
activation = 'relu'))
convnet3.add(Conv2D(64, (3, 3),
padding = 'same',
activation='relu'))
convnet3.add(MaxPooling2D(pool_size = (2, 2)))
convnet3.add(Dropout(0.10))
# [Conv2D]*2 -> MaxPool -> Dropout #3
convnet3.add(Conv2D(64, (3, 3),
strides = (2, 2),
padding = 'same',
activation = 'relu'))
convnet3.add(Conv2D(64, (3, 3),
padding = 'same',
activation='relu'))
convnet3.add(MaxPooling2D(pool_size = (2, 2)))
convnet3.add(Dropout(0.10))
# Flatten -> Dense -> Dense -> Out
convnet3.add(Flatten())
convnet3.add(Dense(256, activation='relu'))
convnet3.add(Dense(128, activation='relu'))
convnet3.add(Dense(K_classes, activation='softmax'))
# Define stochastic gradient descent optimizer parameters & compile
sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True)
convnet3.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# Data augmentation
augdata = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=20,
zoom_range = 0.2,
width_shift_range=0.15,
height_shift_range=0.15,
horizontal_flip=False,
vertical_flip=False)
augdata.fit(X_train)
# Fit the model (use fit_generator for batch fitting as data is generated)
convnet3_fit = convnet3.fit_generator(augdata.flow(X_train,labels_train,
batch_size=100),
epochs = 30,
validation_data = (X_valid,labels_valid),
verbose = 2)
# make predictions on test
convnet3_test_preds = convnet3.predict(X_test)
# predict as the class with highest probability
convnet3_test_preds = np.argmax(convnet3_test_preds, axis = 1)
# put predictions in pandas Series
convnet3_test_preds = pd.Series(convnet3_test_preds, name='label')
# Add 'ImageId' column
convnet3_for_csv = pd.concat([pd.Series(range(1,28001), name='ImageId'),
convnet3_test_preds], axis=1)
# write to csv for submission
convnet3_for_csv.to_csv('../output/csv/convnet3_keras.csv', index=False)
| true |
a5ddb6d60802f189b5ef2655cc3ecfdc75e9d1d5 | Python | saipoojavr/saipoojacodekata | /posinnum.py | UTF-8 | 100 | 3.15625 | 3 | [] | no_license | num1,num2,num3=input().split()
num1=str(num1)
num2=int(num2)
num3=int(num3)
print(num1[num2+num3-1]) | true |
a381971e4dfe3451a5431b1e1669733b34f8a4a8 | Python | elzaharieva/hackBulgaria | /week2/crash_desk.py | UTF-8 | 1,209 | 3.5625 | 4 | [] | no_license | class Bill:
def __init__(self, amount):
self.amount=amount
def __str__(self):
return "A {}$ bill".format(self.amount)
def __repr__(self):
return self.__str__()
def __int__(self):
return self.amount
def __eq__(self, other):
return self.amount==other.amount
def __hash__(self):
return hash(str(self.amount))
class BatchBill():
def __init__(self, batch):
self.batch=batch
def __len__(self):
return len(self.batch)
def total(self):
return sum(self.batch)
def __getitem__(self, index):
return self.batch[index]
class CashDesk():
def __init__(self):
self.money=[]
def take_money(self, money):
if isinstance (money, Bill):
self.money.append(money)
else:
for m in money:
(self.money).append(m)
return self.money
def total(self):
suma=0
for money in self.money:
suma+=int(money)
return suma
def inspect(self):
dict={}
for money in self.money:
key=money
if key in dict:
dict[key]+=1
else:
dict[key]=1
print("We have a total of {}$ in the desk".format(self.total()))
print("We have the following count of bills, sorted in ascending order:")
for d in dict:
print(str(d)[2:]+"s - "+str(dict[d]))
return dict
| true |
c6395d2585e7ccf28c39129c7716aa3d8e6a6de9 | Python | data4help/crispy-train | /src/vae_model/decoder.py | UTF-8 | 3,861 | 2.828125 | 3 | [
"MIT"
] | permissive | # %% Packages
from ast import literal_eval as make_tuple
import numpy as np
from tensorflow.keras import Model, layers
# %% Code
class CreateDecoder:
def __init__(
self, input_shape, conv_filters, conv_kernels, conv_strides, latent_space_dim
):
self.input_shape = input_shape
self.conv_filters = conv_filters
self.conv_kernels = [make_tuple(x) for x in conv_kernels]
self.conv_strides = [make_tuple(x) for x in conv_strides]
self.latent_space_dim = latent_space_dim
self.model = None
self._model_input = None
self._shape_before_bottleneck = self._conv_arithmetic()
self._num_conv_layers = len(conv_filters)
self._build_decoder()
def summary(self):
self.model.summary()
def _build_decoder(self):
decoder_input = self._add_decoder_input()
dense_layers = self._add_dense_layer(decoder_input)
reshape_layer = self._add_reshape_layer(dense_layers)
conv_transpose_layers = self._add_conv_transpose_layers(reshape_layer)
decoder_output = self._add_decoder_output(conv_transpose_layers)
self.model = Model(decoder_input, decoder_output, name="decoder")
def _add_decoder_input(self):
return layers.Input(shape=self.latent_space_dim, name="decoder_input")
def _calculate_input_size(self, input_size, number_of_iteration, position):
conv_strides_tuple = self.conv_strides[number_of_iteration]
conv_strides_number = conv_strides_tuple[position]
input_size = int(input_size / conv_strides_number)
return input_size
def _conv_arithmetic(self):
input_width = self.input_shape[0]
input_height = self.input_shape[1]
for i in range(len(self.conv_filters)):
input_width = self._calculate_input_size(input_width, i, 0)
input_height = self._calculate_input_size(input_height, i, 1)
shape_before_bottleneck = [input_width, input_height, self.conv_filters[-1]]
return shape_before_bottleneck
def _add_dense_layer(self, decoder_input):
shape_before_bottleneck = self._conv_arithmetic()
number_of_neurons = np.prod(shape_before_bottleneck)
dense_layers = layers.Dense(number_of_neurons)(decoder_input)
return dense_layers
def _add_reshape_layer(self, dense_layer):
return layers.Reshape(self._shape_before_bottleneck)(dense_layer)
def _add_conv_transpose_layers(self, x):
"""Add conv transpose blocks"""
# Loop through all the conv layers in reverse order and stop at the first layer
for layer_index in reversed(range(1, self._num_conv_layers)):
x = self._add_conv_transpose_layer(layer_index, x)
return x
def _add_conv_transpose_layer(self, layer_index, x):
layer_num = self._num_conv_layers - layer_index
conv_transpose_layer = layers.Conv2DTranspose(
filters=self.conv_filters[layer_index],
kernel_size=self.conv_kernels[layer_index],
strides=self.conv_strides[layer_index],
padding="same",
name=f"decoder_conv_transpose_layer_{layer_num}",
)
x = conv_transpose_layer(x)
x = layers.ReLU(name=f"decoder_relu_{layer_num}")(x)
x = layers.BatchNormalization(name=f"decoder_bn_{layer_num}")(x)
return x
def _add_decoder_output(self, x):
conv_transpose_layer = layers.Conv2DTranspose(
filters=self.input_shape[-1],
kernel_size=self.conv_kernels[0],
strides=self.conv_strides[0],
padding="same",
name=f"decoder_conv_transpose_layer_{self._num_conv_layers}",
)
x = conv_transpose_layer(x)
output_layer = layers.Activation("sigmoid", name="sigmoid_layer")(x)
return output_layer
| true |
fbd3029cde52d41714ba0add5122957495b09359 | Python | alexgpitts/CS362-Homework4-Question-1 | /volume_of_a_cube.py | UTF-8 | 448 | 3.5625 | 4 | [] | no_license | '''
Author: Alex Pitts
Date: 2/7/21
Class: CS 362
Description: This program simply calculates the
volume of a cube with the function "volume_of_cube()"
'''
def volume_of_cube(edge):
#convert inputs to ints
edge = int(edge)
#if a negative num is detected, calculate error value and return it
if(edge<1):
return -1
#calculate errors if no negative values were entered
volume = edge**3
return volume
| true |
847371f4ad1e8e831cd366a99d6c03bf34baa949 | Python | iandioch/solutions | /advent_of_code/2022/01/part2.py | UTF-8 | 279 | 2.953125 | 3 | [] | no_license | import sys
elves = []
curr = []
for line in sys.stdin.readlines():
if line.strip() == '':
elves.append(curr)
curr = []
else:
curr.append(int(line))
elves.append(curr)
tot = list(sorted((sum(e) for e in elves), reverse=True))
print(sum(tot[:3]))
| true |
52928c7b8a2f982c4e3eaefd0031e37334d08ee4 | Python | itallocastro/projeto_ia | /model.py | UTF-8 | 2,935 | 2.734375 | 3 | [] | no_license | import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import _tree
import numpy as np
def replaceVirg(x):
x = x.split(',')[0]
x = x.replace('.', '')
return x
class DecisionTreePhone:
def __init__(self):
self.dataset = pd.read_csv('dadosia.csv', sep=',')
self.replaceData()
def createModel(self):
return DecisionTreeClassifier(random_state=0)
def tree_to_code(self,tree, feature_names):
tree_ = tree.tree_
feature_name = [
feature_names[i] if i != _tree.TREE_UNDEFINED else "undefined!"
for i in tree_.feature
]
feature_names = [f.replace(" ", "_")[:-5] for f in feature_names]
def recurse(f,node, depth):
indent = " " * depth
if tree_.feature[node] != _tree.TREE_UNDEFINED:
name = feature_name[node]
threshold = tree_.threshold[node]
f.write("{}if {} <= {}:\n".format(indent, name, np.round(threshold,2)))
recurse(f,tree_.children_left[node], depth + 1)
f.write("{}else:\n".format(indent, name, np.round(threshold,2)))
recurse(f,tree_.children_right[node], depth + 1)
else:
for i in range(0,len(tree_.value[node][0])):
if(tree_.value[node][0][i] >= 1):
name = self.dataset['Nome'][i]
f.write("{}x = {}\n".format(indent, str('"'+name+'"')))
with open('rules.txt', 'w') as f:
f.write('global x\n')
recurse(f,0, 0)
def predict(self,Preço, Resistência, Câmera, Desempenho, fiveG, RAM, Armazenamento, Tela, Digital, Bateria, file):
exec(file.read())
return x
def fitModel(self):
model = self.createModel()
model.fit(self.dataset[['Preço', 'Resistência', 'Câmera', 'Desempenho', 'fiveG', 'RAM', 'Armazenamento', 'Tela', 'Digital', 'Bateria']],self.dataset['Nome'])
self.tree_to_code(model, list(self.dataset.columns[1:]))
return model
def replaceData(self):
le = LabelEncoder()
self.dataset["Preço"] = self.dataset["Preço"].apply(lambda x: replaceVirg(x))
self.dataset = self.dataset.rename(columns={'Resistência a água': 'Resistência', '5G': 'fiveG'})
# sim = 1, não = 0
self.dataset['Resistência'] = le.fit_transform(self.dataset['Resistência'])
# bom = 0, medio = 1, ruim = 2
self.dataset['Câmera'] = le.fit_transform(self.dataset['Câmera'])
# bom = 0, medio = 1, ruim = 2
self.dataset['Desempenho'] = le.fit_transform(self.dataset['Desempenho'])
# sim = 1, não = 0
self.dataset['fiveG'] = le.fit_transform(self.dataset['fiveG'])
# sim = 1, não = 0
self.dataset['Digital'] = le.fit_transform(self.dataset['Digital'])
| true |
ee9b83c61e8c6309aa8939d0472c633ef63bd1be | Python | zeroam/TIL | /python/pyqt/LearnPyQt/q_dialog.py | UTF-8 | 1,285 | 2.765625 | 3 | [
"MIT"
] | permissive | import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (
QApplication,
QMainWindow,
QDialog,
QDialogButtonBox,
QPushButton,
QVBoxLayout,
)
class CustomDialog(QDialog):
def __init__(self, *args, **kwargs):
super(CustomDialog, self).__init__(*args, **kwargs)
self.setWindowTitle('Hello!')
QBtn = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
self.buttonBox = QDialogButtonBox(QBtn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.layout = QVBoxLayout()
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
class MainWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(QMainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle("Jayone's Awesome App")
widget = QPushButton('Hello')
widget.clicked.connect(self.onMyToolBarButtonClick)
self.setCentralWidget(widget)
def onMyToolBarButtonClick(self, s):
print('click', s)
dlg = CustomDialog(self)
if dlg.exec_():
print('Success')
else:
print('Cancel')
app = QApplication(sys.argv)
window = MainWindow()
window.show()
app.exec_()
| true |
2b0047806af23b217f298a64483f0b8292a46ba2 | Python | gntpapa/prep_docker | /src/checker.py | UTF-8 | 3,295 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python3.6
import threading
import time
import sys
import os
import requests,json
try:
# Python 2.x
from SocketServer import ThreadingMixIn
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
except ImportError:
# Python 3.x
from socketserver import ThreadingMixIn
from http.server import SimpleHTTPRequestHandler, HTTPServer
class ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
pass
class MyRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == '/check':
# Create the response
response = {
"result": "OK"
}
self.protocol_version = 'HTTP/1.1'
self.send_response(200, 'OK')
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(response)))
# self.path = '/'
return
# return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
class ThreadingChecker(object):
""" Threading example class
The run() method will be started and it will run in the background
until the application exits.
"""
def __init__(self, interval=1):
""" Constructor
:type interval: int
:param interval: Check interval, in seconds
"""
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
""" Method that runs forever """
while True:
# Do something
print('Doing something imporant in the background')
time.sleep(self.interval)
response = requests.get("http://localhost:8000/check",verify=False)
# print(response["data"].json().get("result",""))
res_json = response.json().get("result","")
if res_json == "OK":
os._exit(0)
def getExtIPaddr():
url = "https://ipinfo.io/"
try:
r = requests.get(url,verify=False)
except:
print("error while querying info...")
sys.exit()
data = json.loads(r.text)
return data.get("ip", "[NO DATA]")
if sys.argv[1:]:
address = sys.argv[1]
if (':' in address):
interface = address.split(':')[0]
port = int(address.split(':')[1])
else:
interface = '0.0.0.0'
port = int(address)
else:
port = 8000
interface = '0.0.0.0'
if sys.argv[2:]:
os.chdir(sys.argv[2])
print('Started HTTP server on ' + interface + ':' + str(port))
SimpleHTTPRequestHandler = MyRequestHandler
server = ThreadingSimpleServer((interface, port), SimpleHTTPRequestHandler)
checker = ThreadingChecker()
# time.sleep(3)
# print('Checkpoint')
# time.sleep(2)
# print('Bye')
print( "Your externel IP : " + getExtIPaddr())
# data = requests.post(url, json=payload, verify=False)
# print(getExtIPaddr())
# requests.get("http://localhost:8000/check",verify=False)
try:
while 1:
# print("start")
sys.stdout.flush()
server.handle_request()
except KeyboardInterrupt:
print('Finished.')
| true |
bb76848c458c703ad89dcbd538dfaef71f0e859b | Python | pabloppp/MNIST-Kaggle-Compo | /main.py | UTF-8 | 3,604 | 2.765625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import deep_nn as nn
import time
np.random.seed(21071969)
ground_truth = pd.read_csv("train.csv").as_matrix().T
X = ground_truth[1:] / 255
Y_raw = ground_truth[:1]
Y = np.eye(10)[Y_raw.reshape(-1)].T # .map()
train_set_size = 38000
X_train = X[:, :train_set_size]
Y_train = Y[:, :train_set_size]
Y_raw_train = Y_raw[:, :train_set_size]
X_test = X[:, train_set_size:]
Y_test = Y[:, train_set_size:]
Y_raw_test = Y_raw[:, train_set_size:]
m = Y.shape[1]
m_train = Y_test.shape[1]
m_test = Y_test.shape[1]
print("Shape of X", X.shape)
print("Shape of Y", Y.shape)
assert (X.shape == (784, 42000)) # 784 == 28x28
assert (Y.shape == (10, 42000)) # the output is a single label from 0 to 9
# print(Y_raw[:, :5])
# print(Y[:, :5])
input_layer_dims = X.shape[0]
output_layer_dims = Y.shape[0]
# SIMPLE LOGISTIC REGRESSION
parameters = nn.initialize_parameters([input_layer_dims, 1200, 700, output_layer_dims])
costs = []
accuracies_train = []
accuracies_test = []
print("Shape of W1", parameters["W1"].shape)
print("Shape of b1", parameters["b1"].shape)
learning_rate = 0.04
L2_lambd = 0.7
epoch_size = 200
batch_size = train_set_size // 200 # mini batches for the full test data
batch_count = X_train.shape[1] // batch_size
print("batch_size", batch_size)
print("batch_count", batch_count)
last_epoch_time = time.time()
for i in range(30000):
# Generate minibatch
batch_index_start = (i % batch_count) * batch_size
batch_index_end = batch_index_start + batch_size
X_batch = X_train[:, batch_index_start:batch_index_end]
Y_batch = Y_train[:, batch_index_start:batch_index_end]
# forward propagation
AL, caches = nn.L_model_forward(X_batch, parameters)
# calculate costs
if L2_lambd == 0:
cost = nn.compute_cost(AL, Y_batch)
else:
cost = nn.compute_cost_with_regularization(AL, Y_batch, parameters, L2_lambd)
costs.append(cost)
# backward propagation
grads = nn.L_model_backward(AL, Y_batch, caches, L2_lambd)
# update props
parameters = nn.update_parameters(parameters, grads, learning_rate)
# epoch
if i % epoch_size == 0:
print("Iteration", i)
elapsed_time = time.time() - last_epoch_time
print("Elapsed time since las epoch", "{0:.2f}".format(elapsed_time) + "s",
"(" + "{0:.2f}".format(elapsed_time / epoch_size) + "s/iter)")
last_epoch_time = time.time()
prediction_test = nn.predict(X_test, parameters)
prediction_train = nn.predict(X_train, parameters)
print(prediction_test[:, :30])
print(Y_raw_test[:, :30])
accuracy_test = np.sum(prediction_test == Y_raw_test) / prediction_test.shape[1]
accuracies_test.append(accuracy_test)
accuracy_train = np.sum(prediction_train == Y_raw_train) / prediction_train.shape[1]
accuracies_train.append(accuracy_train)
print("Cost:", cost)
print("Accuracy test:", accuracy_test, "train:", accuracy_train)
print("--------")
compo_data = pd.read_csv("test.csv").as_matrix().T
X_compo = compo_data / 255
print("X_compo", X_compo.shape)
prediction = nn.predict(X_compo, parameters)
df = pd.DataFrame(prediction.T, columns=['Label'])
df.index += 1
df.to_csv('submission.csv', index=True, index_label='ImageId')
print("DONE!")
plt.figure()
plt.plot(costs) # plott cost fn
plt.legend(['cost'])
plt.figure()
plt.plot(accuracies_train) # plott accuracy fn
plt.plot(accuracies_test) # plott accuracy fn
plt.legend(['accuracy train', 'accuracy test'])
plt.show()
| true |
762076a2482b57e125f2db2a23d01f690ffa3180 | Python | mishrakeshav/Competitive-Programming | /SPOJ/Dynamic Programming/ACODE.py | UTF-8 | 366 | 2.734375 | 3 | [
"MIT"
] | permissive |
def solve(code):
if len(code) <= 2:
return len(code)
else:
n = len(code)
ans = 0
for i in range(n-2):
if int(code[i:i+2]) <= 26:
ans += 3
if __name__ == '__main__':
mem = {}
while True:
code = input()
if code == '0':
break
solve(code) | true |
a1db43d5041d52a0db4e18774654d2d4e2e34c90 | Python | kumarsaurabh20/Omics_data_integration_utilities | /integrative_omics/db.py | UTF-8 | 3,996 | 3.8125 | 4 | [] | no_license | """
author: Kumar Saurabh Singh
url: https://kumarsaurabhsingh.com
creation date: 09-02-2022
"""
import sqlite3
from sqlite3 import Error
class RowsKeys:
def __init__(self):
self.sqlite_file = "mvc.db"
self.conn = None # set the placeholder for the connection
self.create_connection() # create the connection
self.drop_table() # drop the table if it exists
self.create_table() # creation the dummy table
self.create_data() # for filling up the database with dummy data
def create_connection(self):
""" create a database connection to the SQLite database
specified by db_file
:db_file: self.sqlite_file
:creates : self.conn Connection object
"""
try:
self.conn = sqlite3.connect(self.sqlite_file)
self.conn.row_factory = sqlite3.Row #this for getting the column names!
except Error as e:
print("create_connection: {}".format(e))
else:
print("Database connection created!")
def drop_table(self):
"""
small function to drop the dummy table
"""
sql = '''DROP TABLE IF EXISTS `addresses` '''
try:
self.conn.execute(sql)
except Error as e:
print("create_table: {}".format(e))
else:
print("Table dropped")
def create_table(self):
"""
small function to create a dummy table
"""
sql = '''CREATE TABLE IF NOT EXISTS `addresses` (`id` integer PRIMARY KEY,
`name` TEXT,
`address` TEXT,
`city` TEXT)'''
try:
self.conn.execute(sql)
except Error as e:
print("create_table: {}".format(e))
else:
print("Table created!")
def create_data(self):
addresses = [("Jansen", "Blaak 928", "Rotterdam"), ("Klaasen", "Maasberglaan 23", "Rotterdam"),
("Sluijsen", "Maasstraat 25", "Barendrecht"), ("de Vos", "Meent 198", "Rotterdam"),
("De Notenkraker", "Pennylane 15", "Amsterdam")]
sql = """INSERT INTO `addresses` (`name`, `address`, `city`)
VALUES (?, ?, ?)"""
try:
cur = self.conn.cursor()
cur.executemany(sql, addresses)
self.conn.commit()
except Error as e:
print("create_table: {}".format(e))
else:
print("Insert of fake data!")
def get_rows(self, fields):
"""
Small function for getting multiple rows
:param fields:
:return: rows
"""
try:
sql = '''SELECT `name`, `address`, `city`
FROM `addresses` WHERE `city` = ?'''
cur = self.conn.cursor()
cur.execute(sql, fields)
return cur.fetchall()
except Error as e:
print("get_row: {}".format(e))
def get_row(self, fields):
try:
sql = '''SELECT `name`, `address`, `city`
FROM `addresses` WHERE `city` = ?'''
cur = self.conn.cursor()
cur.execute(sql, fields)
return cur.fetchone()
except Error as e:
print("get_row: {}".format(e))
def close_conn(self):
try:
self.conn.close()
except Error as e:
print("close_conn: {}".format(e))
else:
print("Connection closed!")
if __name__ == "__main__":
s = RowsKeys()
# get one row and print as dictionary
print("Return one Row")
fields = ["Barendrecht"]
data = s.get_row(fields)
print(dict(data))
print("==============")
print("Return multiple Rows")
# get multiple rows and print as dictionary
fields = ["Rotterdam"]
rows = s.get_rows(fields)
for row in rows:
print(dict(row))
print()
s.close_conn()
| true |
4f7d6791c38622ed49f78b1bd9f508747293a98a | Python | rasteiro11/Python | /Coding_Bat_Python_Solution/pos_neg.py | UTF-8 | 227 | 3.234375 | 3 | [] | no_license | def pos_neg(a, b, negative):
if negative == True and (a < 0 and b < 0):
return True
if negative == False and ((a > 0 and b < 0) or (a < 0 and b > 0)):
return True
else:
return False
| true |
18b5f38db9a7754425645d4131cf791b06ccbc09 | Python | VoidSec/Exploit-Development | /fuzzers/SimpleCommandFuzzer/SCF.py | UTF-8 | 4,736 | 3.0625 | 3 | [] | no_license | """
Full title: Simple Command Fuzzer
Author: Paolo Stagno - voidsec@voidsec.com - https://voidsec.com
Usage: Provide this script with a target IP and port. It will start sending a raw buffer with length 50.
It will then increment the size at every cicle until the target software will crash.
"""
#/usr/bin/env python
import argparse, random, socket, string, sys
from termcolor import cprint
from time import sleep
def str_generator(size=100, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits + string.punctuation):
"""
Pseudo-random string generator
:param size: size of the string to be generated (default = 100)
:param chars: charset to be used for the string generation (default: ASCII uppercase, lowercase, digits and punctuation)
:return: generated string
"""
return str("".join(random.choice(chars) for _ in range(size)))
parser = argparse.ArgumentParser(prog="SCF.py", description="Simple Command Fuzzer")
parser.add_argument("-t", "--target", dest="target", default=None, required=True, help="Remote target server")
parser.add_argument("-p", "--port", dest="port", default=None, required=True, type=int, help="Remote target port")
parser.add_argument("--treshold", dest="treshold", default=9000, type=int, help="Max bufer length. Once hit, it will switch to another command")
parser.add_argument("--timeout", dest="timeout", default=2, type=int, help="Socket timeout. Used to determine if theremote target has crashed")
parser.add_argument("--size", dest="size", default=100, type=int, help="buff incrementing size. Every cycle the buff will be incremented of the specified size")
args = parser.parse_args()
# Commands & PoC Config options
commands = ["STATS", "RTIME", "LTIME", "SRUN", "TRUN", "GMON", "GDOG", "KSTET", "GTER", "HTER", "LTER", "KSTAN"]
poc_file = open("SCF_PoC.txt", "w")
#-------------------------------------------
target = args.target
port = args.port
treshold = args.treshold
timeout = args.timeout
size = args.size
vulnerable = []
treshold_hit = []
cprint("Simple Command Fuzzer (SCF) by VoidSec","magenta")
try:
for command in commands:
buff = str_generator()
user_input = ""
cprint("\n[>] Testing {} command:".format(command),"blue")
while (len(buff)<=treshold):
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(timeout)
try:
s.connect((target,port))
s.recv(1024)
#cprint("Sending {} buff with length: {}".format(command, len(buff)), "blue")
sys.stdout.write("\r[-] Sending buffer of {} bytes".format(len(buff)))
sys.stdout.flush()
s.send(command + " " + buff + "\r\n")
s.close()
except socket.timeout: # If we fail to send new command to the server, we'll assume it is crashed
vulnerable.append((command, len(buff)-size, buff))
cprint("\n[+] Crash occured with {} command and buffer length of {}".format(command, len(buff)-size), "green")
while (user_input != "C"):
cprint("[!] Restart the server, than press [C] to continue:","yellow")
user_input=raw_input().upper()
break
except socket.error as err:
cprint("\n{}".format(err),"red")
user_input=""
while ((user_input != "K") and (user_input != "C") and (user_input != "S")):
cprint("[!] Restart the server, than press [C] to continue fuzzing this command, [S] to skip this command or [K] to abort:","yellow")
user_input=raw_input().upper()
if (user_input.upper() == "K"):
sys.exit(1)
elif (user_input.upper() == "C"):
cprint("{} bytes buffer size was skipped".format(len(buff)-size),"red")
pass
elif (user_input.upper() == "S"):
user_input=""
cprint("Do you want to save this crash? [Y/N]","yellow")
user_input=raw_input().upper()
if(user_input=="Y"):
vulnerable.append((command, len(buff)-size, buff))
cprint("[+] Crash occured with {} command and buffer length of {}".format(command, len(buff)-size), "green")
cprint("{} command was skipped".format(command),"red")
break
sleep(1)
buff = buff + str_generator(size)
if(len(buff)>treshold):
treshold_hit.append(command)
cprint("\nVulnerable Commands:\n----------------------","green")
poc_file.write("Vulnerable Commands:\n----------------------\n")
for i in vulnerable:
cprint("- {} crashed with {} bytes".format(i[0], i[1]),"white")
cprint("PoC: {}\n".format(i[2]),"grey")
poc_file.write("- {} crashed with {} bytes\nPoC: {}\n\n".format(i[0], i[1], i[2]))
cprint("\nTreshold hit on:\n----------------------","white")
poc_file.write("\nTreshold hit on:\n----------------------\n")
for i in treshold_hit:
cprint("- {}".format(i),"grey")
poc_file.write("- {}\n".format(i))
poc_file.close()
except KeyboardInterrupt:
cprint("Exiting...","red")
sys.exit(0) | true |
52ed6b68652777938fd44d47c33bde7c54d1082b | Python | livmortis/ImageStyleDetection | /symmetrical_by_hist.py | UTF-8 | 9,897 | 2.578125 | 3 | [] | no_license | import cv2
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import config as cf
from utils.exception import SymmetricalException
# 2020.6.2 余弦,阈值0.1
ori_path = "../data/testsym/ori/"
TEST = False
TRESHOD_TYPE = 2 # 0固定, 1自适应, 2不适用阈值
DIST_ALGO = 1 # 0 欧式距离 1 余弦相似度
np.set_printoptions(threshold=np.inf)
def alpha_bg_to_white(img):
B_channel = img[:,:,0]
G_channel = img[:,:,1]
R_channel = img[:,:,2]
A_channel = img[:,:,3]
# print(np.sum([B_channel, G_channel, R_channel]) )
# print(B_channel)
# print(np.sum(B_channel))
# print(np.sum(G_channel))
# print(np.sum(R_channel))
# print(np.sum(A_channel))
# BGR三通道都为黑色,alpha通道控制图像形状
if (np.sum([B_channel, G_channel, R_channel]) == 0) or \
(np.sum(B_channel) == np.sum(G_channel) ):
# print('it is')
b_converted = 255-img[:, :, 3]+img[:,:,0]
g_converted = 255-img[:, :, 3]+img[:,:,1]
r_converted = 255-img[:, :, 3]+img[:,:,2]
img[:, :, 0] = b_converted
img[:, :, 0][b_converted > 255] = 255
img[:, :, 1] = g_converted
img[:, :, 1][g_converted > 255] = 255
img[:, :, 2] = r_converted
img[:, :, 2][r_converted > 255] = 255
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if TRESHOD_TYPE == 0 :
ret, img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY)
# 固定阈值
elif TRESHOD_TYPE == 1:
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.ADAPTIVE_THRESH_MEAN_C, 99 , 2)
# 自适应阈值,分错的:1001.png、1000.png、10634.png、10023.png等左右颜色不同
return img
def compute_cosin_distance(vec1, vec2):
vec1 = vec1 / (np.max(vec1) + 0.01)
vec2 = vec2 / (np.max(vec2) + 0.01)
dist = 1 - np.dot(vec1, vec2)/ (np.linalg.norm(vec1)* np.linalg.norm(vec2)+0.01)
return dist
def eucliDist(A,B):
return np.sqrt(sum(np.power((A - B), 2)))
def hist_method(img_name,img,H,W):
# 左右分割
half_left = img[:, 0:int(W / 2)]
if W % 2 == 0:
half_right = img[:, int(W / 2):W]
else:
half_right = img[:, int(W / 2) + 1:W]
# 上下分割后,左右分割
if H % 2 == 0:
H_half = int(H / 2)
else:
H_half = int(H / 2) + 1
up_left = half_left[:int(H / 2), :]
down_left = half_left[H_half:H, :]
up_right = half_right[:int(H / 2), :]
down_right = half_right[H_half:H, :]
if TEST:
cv2.imshow('half_left', half_left)
cv2.imshow('half_right', half_right)
cv2.imshow('up_left', up_left)
cv2.imshow('up_right', up_right)
cv2.imshow('down_left', down_left)
cv2.imshow('down_right', down_right)
cv2.waitKey(0)
# 左与右
left_hist = cv2.calcHist([half_left], [0], None, [256], [0, 256])
right_hist = cv2.calcHist([half_right], [0], None, [256], [0, 256])
# print(left_hist)
# print(right_hist)
dist_cor = cv2.compareHist(left_hist, right_hist, cv2.HISTCMP_CORREL)
# cv2.HISTCMP_CORREL: 皮尔逊相关系数,1最高 ; cv2.HISTCMP_CHISQR: 卡方检验,0最高
# cv2.HISTCMP_INTERSECT: 十字交叉性,0最高; cv2.HISTCMP_BHATTACHARYYA: 巴氏距离,1最高
# 左上与右上
up_left_hist = cv2.calcHist([up_left], [0], None, [256], [0, 256])
up_right_hist = cv2.calcHist([up_right], [0], None, [256], [0, 256])
dist_cor_up = cv2.compareHist(up_left_hist, up_right_hist, cv2.HISTCMP_CORREL)
# 左下与右下
down_left_hist = cv2.calcHist([down_left], [0], None, [256], [0, 256])
down_right_hist = cv2.calcHist([down_right], [0], None, [256], [0, 256])
dist_cor_down = cv2.compareHist(down_left_hist, down_right_hist, cv2.HISTCMP_CORREL)
mean_score = np.mean([dist_cor, dist_cor_up, dist_cor_down])
if TEST:
print(dist_cor)
print(dist_cor_up)
print(dist_cor_down)
print(mean_score)
print('\n')
else:
if mean_score > 0.9999:
if mode:
os.system('cp %s %s' % (str(ori_path) + str(img_name), '../data/testsym/symm_by_cor'))
else:
return "对称"
else:
if mode:
os.system('cp %s %s' % (str(ori_path) + str(img_name), '../data/testsym/nosymm_by_cor'))
else:
return "不对称"
def pixel_cosin_method(mode, img_name,img): # 余弦距离和欧氏距离
rota90_img = np.zeros([img.shape[1], img.shape[0]])
for y in range(img.shape[0]):
for x in range(img.shape[1]):
a = img[y, x]
rota90_img[x, y] = a
multi_angle_imgs = [img, rota90_img ]
dists_list_with_multi_angle = []
for i, theImg in enumerate(multi_angle_imgs):
W = theImg.shape[1]
# 左右分割
half_left = theImg[:, 0:int(W / 2)]
if W % 2 == 0:
half_right = theImg[:, int(W / 2):W]
else:
half_right = theImg[:, int(W / 2) + 1:W]
# 右半部分逆序
new_right = []
for row in half_right:
new_row = row[::-1]
new_right.append(new_row)
new_right = np.array(new_right)
if TEST:
cv2.imshow("a",half_left)
cv2.imshow("b",new_right)
cv2.waitKey(0)
'''1、sift特征点匹配法, 弃用'''
# # sift左右分别检测特征点
# sift = cv2.xfeatures2d.SIFT_create()
# l_kpt, l_dscp = sift.detectAndCompute(half_left, None)
# r_kpt, r_dscp = sift.detectAndCompute(new_right, None)
# # 匹配描述子
# bfmatcher = cv2.BFMatcher()
# dmatches = bfmatcher.match(l_dscp,r_dscp)
# dists = []
# for d in dmatches:
# dists.append(d.distance)
# dist_mean = np.mean(dists)
# if dist_mean<250:
# os.system('cp %s %s' % (str(ori_path) + str(img_name), '../data/testsym/symm_by_cor'))
# else:
# os.system('cp %s %s' % (str(ori_path) + str(img_name), '../data/testsym/nosymm_by_cor'))
'''2、直接求像素值余弦距离法'''
dists = []
for l, r in zip(half_left,new_right):
# print(l)
# print(r)
dist = compute_cosin_distance(l,r) if DIST_ALGO else eucliDist(l,r)
dists.append(dist)
dists = np.array(dists)
dists_mean = np.mean(dists)
if TEST:
print('余弦距离:'+str(dists_mean)) if DIST_ALGO else print('欧式距离:'+str(dists_mean))
threshold = 0.1 if DIST_ALGO else 30 # 余弦距离阈值0.1, 欧氏距离阈值30
if dists_mean < threshold:
# print("对称")
if mode:
os.system('cp %s %s' % (str(ori_path) + str(img_name), '../data/testsym/symm_by_cor'))
else:
return "对称"
break
else:
if i==1:
# print("不对称")
if mode:
os.system('cp %s %s' % (str(ori_path) + str(img_name), '../data/testsym/nosymm_by_cor'))
else:
return "不对称"
else:
continue
def judgeSym(listdir, mode, gy, gyid):
try:
result = ''
if gy != -1:
if gy == cf.XTYS or gy == cf.CF:
return "对称"
elif gy == cf.TD:
if gyid in [8,9,10,11] or gyid in [26,27,28,29] or gyid in [44,45,46,47] :
return "对称"
for img_name in listdir:
if mode:
img_path = str(ori_path) + str(img_name)
else:
img_path = img_name
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) # 1 3 6 10
imgshape = img.shape
# img = cv2.resize(img, (512, int(512 * (imgshape[0]/imgshape[1])) ))
# img = cv2.imread(str(ori_path)+str('10222.png'), cv2.IMREAD_UNCHANGED)
if TEST:
cv2.imshow('img before abtw', img)
cv2.waitKey(0)
img = alpha_bg_to_white(img)
if TEST:
cv2.imshow('img after abtw', img)
cv2.waitKey(0)
# print('\n')
# print(img_name)
# print(img.shape)
# hist_method(img_name,img,H,W) # 直方图法
result = pixel_cosin_method(mode, img_name,img) # 距离法
return result
except Exception as e:
raise SymmetricalException(str(e))
if __name__ == "__main__":
listdir = os.listdir(ori_path)
mode = 1 # 0 演示单图 1 多图分批
if TEST:
# listdir = ['10389.png' , '10525.png', '10235.png', '10085.png']
# listdir = ['10206.png']
# listdir = ['10253.png']
# listdir = ['10320.png'] # 狗,应该不对称, “透明底黑前景图”典型
# listdir = ['10987.png']
# listdir = ['379253.png'] # 应该对成
# listdir = ['379253.png']
# listdir = ['379278.png'] # 应该不对称, “透明底黑白前景图”典型
# listdir = ['11014.png'] # 不对称高跟鞋 余弦0.11 欧式1723
# listdir = ['10527.png'] # 不对称大象 余弦0.47 欧式32
# listdir = ['10335.png'] # 不对称狐狸 余弦0.37 欧式34
# listdir = ['10295.png'] # 不对称松鼠 余弦0.56 欧式29
# listdir = ['10901.png'] # 完全不对称小鹿 余弦0.21 欧式25
# listdir = ['10452.png'] # 稍微不对称海马 余弦0.098 欧式25 放弃欧氏距离
listdir = ['236852.png']
judgeSym(listdir, mode) | true |
f07814684a4daab1dd6817b1003589ad952e2b19 | Python | BrineFast/python-third-course | /Analysis/matplotlib/13_2.py | UTF-8 | 4,379 | 3.8125 | 4 | [] | no_license | import math
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
"""
Задание 1.
Создайте график функции sin(x) на отрезке [0; 2pi]. В качестве значений используйте массив x размером в 100
элементов. Для создания массива значений используйте функции пакета numpy. График должен иметь видимую координатную
сетку, подписанные координатные оси и заголовок. Измерения должны в точности совпадать значениям графика.
Подсказка: смотрите документацию функции plt.axis(...). График сохраните в JPG-файл.
"""
x = np.linspace(0, 2 * math.pi, num=100)
y = np.sin(x)
fig, ax = plt.subplots()
plt.title("Синусоида")
plt.grid()
plt.xlabel("x")
plt.ylabel("y")
ax.plot(x, y)
fig.savefig('Sin_x.jpeg')
"""
Задание 2
Создайте матрицу случайных целых чисел от 0 до 10000 размером (10, 1000). Просуммируйте используя параметр axis=0 и
получите одномерный массив целых чисел. Создайте кумулятивную гистограмму полученного массива. Отобразите ее на экране.
"""
plt.close(fig)
arr = np.random.uniform(0, 10000, (10, 1000))
arr_sum = arr.sum(axis=0)
plt.hist(arr_sum)
plt.show()
"""
Задание 3
Загрузите датасет “tips” из пакета seaborn. Выведите список колонок и их типы данных. Выведите размер датасета.
Выведите последние пять элементов датасета.
"""
print("Задание 3")
df = pd.read_csv("tips.csv")
print("Колонки: ")
print(df.dtypes)
print()
print(f"Размер:{df.shape}")
print()
print("5 последних: ")
print(df.tail())
"""
Задание 4
Найдите среднюю стоимость в чеке за каждое значение дня недели, времени и колонки, указывающей, был ли столик в зоне
для курения или нет. (Подсказка: сгруппируйте значения из датасета по дню недели, времени и “smoker”).
"""
print("Задание 4")
df = df.groupby(["day", "time", "smoker"])["total_bill"].mean().to_frame()
print(df)
"""
Задание 5
Сформируйте гистограмму значений стоимости в чеке в полученной датасете средствами pandas.
Сохраните полученную гистограмму в PNG-файл.
"""
fig, ax = plt.subplots()
ax.hist(df)
fig.savefig("Bill")
"""
Задание 6
Загрузите датасет из файла table2.csv. Датасет представляет собой записи о выявлении случаев заражения вирусом X в
разных странах с 1999 по 2000 годы. Колонка rate представляет собой строку, где через дробную черту записано отношения
выявленных случаев к общему населению. Выведите информацию об этом датасете и его индексе.
"""
df2 = pd.read_csv("table2.csv")
print("Задание 6")
print("Информация: ")
print(df2.info)
print()
print(f"Индекс: {df2.index}")
"""
Задание 7
Преобразуйте датасет в более широкий вид, сохранив колонки с записями о стране и годе. Из колонки rate должно получится
две дополнительные колонки cases и population, значения которых следует брать из колонки rate, разделив ее значения по
дробной черте на две части. Выведите полученный датасет.
"""
df2[["cases", "population"]] = df2["rate"].str.split("/", expand=True)
del df2["rate"]
print("Задание 7")
print(df2) | true |
aad0e5358942ebedd0d9a21afead9b7ebb9a7a5a | Python | chenlangping/script | /docs/python/video/mp4_all_frames.py | UTF-8 | 333 | 2.8125 | 3 | [] | no_license | # pip install opencv-python
import cv2
# 读取视频
video = cv2.VideoCapture('test.flv')
# 逐帧读取,当还有画面时ret为True,frame为当前帧的ndarray对象
ret, frame = video.read()
i = 0
# 循环读取
while ret:
i += 1
cv2.imwrite('picture_'+str(i) + '.jpg', frame)
ret, frame = video.read() | true |
c67d80fcebc96f5c4fc6c876ca33b32f2b674a73 | Python | amansh95/2041-Scripting | /week06/digits.py | UTF-8 | 178 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python3
from __future__ import print_function
from re import sub;
import sys;
for line in sys.stdin:
print(sub("[6-9]", ">", sub("[0-4]", "<", line)), end = "")
| true |
1eaff3fe70154753a434fffb1438080665ec5539 | Python | jonathantsang/CompetitiveProgramming | /kattis/tritiling/tritiling.py | UTF-8 | 324 | 3.21875 | 3 | [] | no_license | #! /usr/bin/python3
import sys
import math
f = [1, 0]
g = [0, 1]
for i in range(2, 31):
f.append(f[i-2] + 2*g[i-1])
g.append(f[i-1] + g[i-2])
for line in sys.stdin:
l = int(line)
if l == -1:
break
if l % 2 == 1:
print(0)
else:
if l == 0:
print(f[0])
elif l == 1:
print(f[1])
else:
print(f[l])
| true |
c8a53b4528c4704767d840dba4a60f7fc5b18f7b | Python | sarias12/holbertonschool-higher_level_programming | /0x0A-python-inheritance/6-base_geometry.py | UTF-8 | 334 | 3.390625 | 3 | [] | no_license | #!/usr/bin/python3
"""
Module With BaseGeometry Class
"""
class BaseGeometry():
"""
BaseGeometry Class with Area Method.
"""
def area(self):
"""area method with Exception.
Raises:
Exception: While area is not implemented.
"""
raise Exception("area() is not implemented")
| true |
586068a23ff43a779d802356a4c4f07e4986b56f | Python | fahad97azawi/snake-game | /main.py | UTF-8 | 4,061 | 2.84375 | 3 | [] | no_license | import pygame
import random
pygame.init
pygame.font.init()
WIN_WIDTH = 600 + 15
WIN_HEGIHT = 600 + 45
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED = (255, 0, 0)
score = 0
SNAKE_SEG = pygame.image.load('Red_block.png')
BAIT = pygame.image.load('Green_block.png')
screen = pygame.display.set_mode((WIN_WIDTH, WIN_HEGIHT))
running = True
myfont = pygame.font.SysFont('score font', 30, True)
seglist = []
def draw_grid():
screen.fill(WHITE)
for i in range(0, 42):
pygame.draw.line(screen, BLACK, (i*15, 0), (i*15, 615))
pygame.draw.line(screen, BLACK, (0, i*15), (615, i*15))
score_text = myfont.render(f'score = {score}', True, BLACK)
screen.blit(score_text, (10, 625))
class Snake:
VEL = score + 5
def __init__(self, x = 300, y = 300, dirX = 0, dirY = 0):
self.x = x
self.y = y
self.dirX = dirX
self.dirY = dirY
def move(self, event):
if event.key == pygame.K_UP and self.dirY != 15:
self.dirX = 0
self.dirY = -15
elif event.key == pygame.K_DOWN and self.dirY != -15:
self.dirX = 0
self.dirY = 15
elif event.key == pygame.K_RIGHT and self.dirX != -15:
self.dirX = 15
self.dirY = 0
elif event.key == pygame.K_LEFT and self.dirX != 15:
self.dirX = -15
self.dirY = 0
def draw(self):
Snake.snake_target(self)
screen.blit(SNAKE_SEG, (self.x, self.y))
if self.x > 600:
self.x = -15
elif self.x < 0:
self.x = 615
elif self.y >= 615:
self.y = -15
elif self.y < 0:
self.y = 615
self.x = self.x + self.dirX
self.y = self.y + self.dirY
# Body.draw(self)
def snake_target(self):
pygame.draw.line(screen, BLUE, (self.x + 7, 0), (self.x + 7, 615), 2)
pygame.draw.line(screen, BLUE, (0, self.y + 7), (615, self.y + 7), 2)
class Bait:
def __init__(self, eaten = True, x = random.randint(0, 615), y = random.randint(0, 615)):
self.x = x
self.y = y
self.eaten = eaten
def generator(self):
if self.eaten == True:
# while self.x != snake.x and self.y != snake.y:
self.x = random.randint(1, 40)*15
self.y = random.randint(1, 40)*15
self.eaten = False
return self.x, self.y
def draw(self):
screen.blit(BAIT, Bait.generator(self))
def check_eaten(self):
global score
if self.x == snake.x and self.y == snake.y:
self.eaten = True
score += 1
seglist.append(Body(body.add_seg()[0], body.add_seg()[1]))
class Body:
def __init__(self, x = None, y = None):
self.x = x
self.y = y
def draw(self):
for seg in seglist:
if len(seglist) == 0:
seg.x = snake.x - snake.dirX
seg.y = snake.y - snake.dirY
else:
last_seg = len(seglist) - 1
seg.x = seglist[last_seg].x - snake.dirX
seg.y = seglist[last_seg].y - snake.dirY
screen.blit(SNAKE_SEG, (seg.x, seg.y))
def add_seg(self):
if len(seglist) == 0:
self.x = snake.x - snake.dirX
self.y = snake.y - snake.dirY
else:
last_seg = len(seglist) - 1
self.x = seglist[last_seg].x - snake.dirX
self.y = seglist[last_seg].y - snake.dirY
return self.x, self.y
clock = pygame.time.Clock()
snake = Snake()
bait = Bait()
body = Body()
while running:
clock.tick(snake.VEL)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
snake.move(event)
draw_grid()
bait.draw()
snake.draw()
body.draw()
bait.check_eaten()
pygame.display.update()
| true |
8d7c6c45e69316fb72810852cbbc9677865571a4 | Python | kdeloach/localmusic | /tests/tests.py | UTF-8 | 2,438 | 2.9375 | 3 | [] | no_license | import unittest
from catalog import SearchTerm
from catalog import SearchTermSql
class SearchTermTest(unittest.TestCase):
def test_positive(self):
tokens = list(SearchTerm('a'))
self.assertEqual(tokens, [('POSITIVE', ('TERM', 'a'))])
def test_negative(self):
tokens = list(SearchTerm('-a'))
self.assertEqual(tokens, [('NEGATIVE', ('POSITIVE', ('TERM', 'a')))])
def test_positive_negative(self):
tokens = list(SearchTerm('a -a'))
self.assertEqual(tokens, [('POSITIVE', ('TERM', 'a')),
('NEGATIVE', ('POSITIVE', ('TERM', 'a')))])
def test_negative_positive(self):
tokens = list(SearchTerm('-a a'))
self.assertEqual(tokens, [('NEGATIVE', ('POSITIVE', ('TERM', 'a'))),
('POSITIVE', ('TERM', 'a'))])
def test_multiple(self):
tokens = list(SearchTerm('"a b"'))
self.assertEqual(tokens, [('POSITIVE', ('EXACT', [('TERM', 'a'),
('TERM', 'b')]))])
def test_escaping0(self):
tokens = list(SearchTerm("\"a\\\"b\""))
self.assertEqual(tokens, [('POSITIVE', ('EXACT', [('TERM', 'a"b')]))])
def test_escaping1(self):
tokens = list(SearchTerm("'a\\\'b'"))
self.assertEqual(tokens, [('POSITIVE', ('EXACT', [('TERM', 'a\'b')]))])
def test_escaping2(self):
tokens = list(SearchTerm("\"a'b\""))
self.assertEqual(tokens, [('POSITIVE', ('EXACT', [('TERM', "a'b")]))])
def test_negative_exact(self):
tokens = list(SearchTerm('-"a b"'))
self.assertEqual(tokens, [('NEGATIVE', ('POSITIVE', ('EXACT', [('TERM', 'a'),
('TERM', 'b')])))])
def test_mixed(self):
tokens = list(SearchTerm('a -"b c" "d" -e f'))
self.assertEqual(tokens, [('POSITIVE', ('TERM', 'a')),
('NEGATIVE', ('POSITIVE', ('EXACT', [('TERM', 'b'),
('TERM', 'c')]))),
('POSITIVE', ('EXACT', [('TERM', 'd')])),
('NEGATIVE', ('POSITIVE', ('TERM', 'e'))),
('POSITIVE', ('TERM', 'f'))])
def test_empty(self):
tokens = list(SearchTerm(''))
self.assertEqual(tokens, [])
| true |
370cb48772d415e7b4f743086e69bcbc79b19b4e | Python | DomenicD/domenic | /python/cracking_the_code/data_structures/linked_list_test.py | UTF-8 | 739 | 3.484375 | 3 | [] | no_license | import unittest
from data_structures.linked_list import SingleLinkList, kth_from_end
class LinkListTest(unittest.TestCase):
def test_kth_from_end(self):
link_list = SingleLinkList() # SingleLinkList[int]
for i in range(7):
link_list.add(i)
self.assertEqual(kth_from_end(link_list, -1), None)
self.assertEqual(kth_from_end(link_list, 0), 6)
self.assertEqual(kth_from_end(link_list, 2), 4)
self.assertEqual(kth_from_end(link_list, 5), 1)
self.assertEqual(kth_from_end(link_list, 6), 0)
self.assertEqual(kth_from_end(link_list, 7), None)
self.assertEqual(kth_from_end(link_list, 8), None)
self.assertEqual(kth_from_end(link_list, 10), None)
| true |
9bfa413b70ac6310efe9bc05954d8ba5d28160fc | Python | FergusCurrie/SummerResearch | /SLiM/py_scripts/hypervolume.py | UTF-8 | 3,149 | 2.640625 | 3 | [] | no_license | from pymoo.factory import get_performance_indicator
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
def find_hyper(code):
# Find nader point
hyper = [10000,111110]
for x in os.listdir('../data/result_cleaned/'):
df = pd.read_csv('../data/result_cleaned/'+x)
# Convert to float
for i, row in df.iterrows():
row['Early Objective'] = float(row['Early Objective'])
row['Late Objective'] = float(row['Early Objective'])
# Normalise
for i,row in df.iterrows():
row['Early Objective'] = (row['Early Objective'] - df['Early Objective'].min()) / (df['Early Objective'].max() - df['Early Objective'].min())
row['Late Objective'] = (row['Late Objective'] - df['Late Objective'].min()) / (df['Late Objective'].max() - df['Late Objective'].min())
# Find nadir point
for i,row in df.iterrows():
if float(row["Early Objective"]) < hyper[0]:
hyper[0] = float(row["Early Objective"])
if float(row["Late Objective"]) < hyper[1]:
hyper[1] = float(row["Late Objective"])
# Get dict of name -> hyper, normalised
di = {}
for x in os.listdir('../data/result_cleaned/'):
df = pd.read_csv('../data/result_cleaned/'+x)
# Convert to float
for i, row in df.iterrows():
row['Early Objective'] = float(row['Early Objective'])
row['Late Objective'] = float(row['Early Objective'])
# Normalise
for i, row in df.iterrows():
row['Early Objective'] = (row['Early Objective'] - df['Early Objective'].min()) / (df['Early Objective'].max() - df['Early Objective'].min())
row['Late Objective'] = (row['Late Objective'] - df['Late Objective'].min()) / (df['Late Objective'].max() - df['Late Objective'].min())
data = []
for i,row in df.iterrows():
data.append([-float(row["Early Objective"]), -float(row["Late Objective"])])
A = np.array(data)
#print(A)
hv = get_performance_indicator("hv", ref_point=np.array([-1,-1]))
#print("hv", hv.calc(A))
di[x] = hv.calc(A)
l = []
for key in di:
l.append(di[key])
l.sort(reverse=True)
l = (l[:5])
best_solutions = []
for x in l:
for key in di:
if di[key] == x:
best_solutions.append(key)
return best_solutions
def vr(nm):
df = pd.read_csv("../data/result_cleaned/"+nm)
title = ""
for x in nm:
if x.isnumeric():
title += x
#df.plot.scatter(x="Early Objective", y="Late Objective", c="Selection Coefficient",s=5,title=title,xlim=(emin,emax),ylim=(lmin,lmax))
df.plot.scatter(x="Early Objective", y="Late Objective", c="Selection Coefficient", s=5, title=title)
# Zoom
if False:
ax3 = plt.subplot(222)
ax3.margins(x=0, y=-0.25) # Values in (-0.5, 0.0) zooms in to center
ax3.plot(df)
ax3.set_title('Zoomed in')
plt.show(block=True)
bs = find_hyper(437888)
print(bs[:5])
for x in bs[:1]:
vr(x) | true |
f77e0e717734af1a0636c41467520c4f3a2d5fe0 | Python | NOAA-ORR-ERD/GnomeTools | /batch_gnome/batch_gnome/TAP/TAP_ext/test_TAP_ext.py | UTF-8 | 7,325 | 3.15625 | 3 | [
"LicenseRef-scancode-public-domain",
"Unlicense"
] | permissive | #!/usr/bin/env python2.3
"""
A simple test routine that tests at least some of the TAP_ext package
"""
import unittest
from Numeric import *
class CalcPolygonsTestCase(unittest.TestCase):
def testCalcPolygons(self):
pass
class check_receptorsTestCase(unittest.TestCase):
def test_check_receptors(self):
"""
The Python and C versions of the receptor site hit test should return the same result
"""
from RandomArray import uniform, seed
from TAP_ext import check_receptors
from time import time
area = 200
num_LEs = 100
num_times = 10
num_sites = 4
sites = [array([(20,65),(40,35),(70,25),(75,45),(55,50),(45,75),(20,65)],Float)]*num_sites
# build site bounding boxes
BBs = []
for site in sites:
max_x = site[0,0]
min_x = site[0,0]
max_y = site[0,1]
min_y = site[0,1]
max_x = max(max_x, max(site[:,0]))
min_x = min(min_x, min(site[:,0]))
max_y = max(max_y, max(site[:,1]))
min_y = min(min_y, min(site[:,1]))
BBs.append(array((max_x,min_x,max_y,min_y),Float))
LEs = uniform(0,area,(num_times,num_LEs,2))
Hit_Table1 = zeros((num_LEs, num_sites),Int)
start = time()
hit_test(LEs,sites,BBs,Hit_Table1,0)
print "Python version took %.3f seconds"%(time()-start)
Hit_Table2 = zeros((num_LEs, num_sites),Int)
start = time()
check_receptors.hit_test(LEs,sites,BBs,Hit_Table2,0)
print "c version took %.3f seconds"%(time()-start)
assert alltrue(equal(Hit_Table1,Hit_Table2)), "Python and C version gave different results"
from TAP_ext import NumericExtras as NE
class NumericExtrasTestCase(unittest.TestCase):
def testFastclip(self):
print "testing fastclip"
A = arange(0,10,1,Float)
B = clip(A, 3, 5)
NE.fastclip(A, 3, 5)
assert alltrue(A == B), "fastclip and clip gave different answers"
def testByteswap(self):
A = arange(10)
B = A.copy()
NE.byteswap(B)
B = B.byteswapped()
assert alltrue(A == B), "NE.byteswap and Numeric.array.byteswapped gave different results"
def testChangetypeA(self):
"""
changetype should fail for non-contiguous arrays
"""
A = arange(18)
A.shape = 3,6
B = A[:,3]
self.assertRaises(ValueError,NE.changetype,B,Float)
def testChangetypeB(self):
"""
changetype should fail for arrays the wrong size for the type
"""
A = arange(25)
self.assertRaises(ValueError,NE.changetype,A,Float)
def testChangetypeC(self):
"""
changetype(m,typecode) should have the same result as:
m = fromstring(m.tostring(),typecode)
"""
A = arange(26)
B = A.copy()
NE.changetype(A,Float)
assert alltrue (A == fromstring(B.tostring(),Float))
## This is the Python version of the check_receptors code, used by the test code above
def hit_test(LEs,sites,BBs,Hit_Table,Start_step):
"""
hit_test computes the receptor site hits given a set of LE positions,
LEs, and the receptor sites, and the bounding boxes of the receptor sites.
LEs is a M X N X 2 NumPy array (of Floats ?)
N is the number of LEs (Num_LEs)
M is the number of timesteps (must be at least 2)
sites is a list of N X 2 NumPy arrays (of Floats)
N is the number of points in a receptor polygon
BBs is a list of 4 X 1 NumPy arrays (of Floats) of the bounding box of the sites (max_x,min_x,max_y,min_y)
Hit_Table is a NumPy array of Int16 (short) of size (Num_LEs, Num_sites),
it hold the values of the first timestep that the site was hit by a given LE.
***Hit_Table is ALTERED by this function!!!***
the function returns None
"""
N_LEs = LEs.shape[1]
N_times = LEs.shape[0]
N_sites = len(sites)
for T_ind in range(1,N_times): # loop over timesteps
for LE_ind in range(N_LEs): # loop over LEs
LE_line = (tuple(LEs[T_ind-1,LE_ind,:]),tuple(LEs[T_ind,LE_ind,:])) # LE-movement segment
# did the LE move?
if (LE_line[0] != LE_line[1]):
# check bounding boxes
bb_LE = (max(LE_line[0][0],LE_line[1][0]),min(LE_line[0][0],LE_line[1][0]),
max(LE_line[0][1],LE_line[1][1]),min(LE_line[0][1],LE_line[1][1]))
for site_ind in range(N_sites): # loop over sites
if BB_check(BBs[site_ind],bb_LE):
# do the line cross check
for segment in map(None,sites[site_ind][:-1],sites[site_ind][1:]):
if LCross(LE_line,segment):
if not Hit_Table[LE_ind,site_ind]:
Hit_Table[LE_ind,site_ind] = Start_step + T_ind
break
return None
def BB_check(bb_1, bb_2):
"""
bb_1 and bb_2 are two bounding boxes.
Each is a 4 element tuple of :
(max_x,min_x,max_y,min_y)
BB_check(bb_1, bb_2)
returns 1 if the two boxes intersect
returns 0 if the two boxes don't intersect
"""
if ( (bb_1[0] > bb_2[1]) and (bb_1[1] < bb_2[0]) and
(bb_1[2] > bb_2[3]) and (bb_1[3] < bb_2[2]) ):
return 1
else:
return 0
def LCross(S1,S2):
"""
S1 and S2 are two element tuples of two element tuples of
x,y coordinates of the two lines:
Routine to check if two line segments intersect
returns 0 if they don't intersect, 1 if they intersect
"""
((px1,py1),(px2,py2)) = S1
((px3,py3),(px4,py4)) = S2
# First some utility functions:
def SideOfLineCheck(x1,y1,x2,y2,Px,Py):
""" Given a line segment x1,y1 to x2,y2
it checks to see if point Px,Py is to the right
or to the left of the line segment looking from
point x1,y1 to point x2,y2.
If D is positive, then the point Px,Py is to the LEFT of the
line segment. If D is negative, P is to the right of segment.
If D is zero then, P is on the segment
If D =0 then that means that the point P is on the line
defined by the two points...they may not be on the segment
The check is done by taking the
cross product of the vectors x1,y1 to x2,y2
and x1,y1 to Px,Py
"""
def CrossProduct(x1,y1,x2,y2):
# Given vectors x1,y1 and x2,y2
# this routine returns the cross product
# which is also the determinant
return x1*y2 - y1*x2
dx = x2 - x1
dy = y2 - y1
dxp = Px - x1
dyp = Py - y1
return CrossProduct(dx,dy,dxp,dyp)
# Check to see if point 3 is to the left of segment 1
D1 = SideOfLineCheck(px1,py1,px2,py2,px3,py3)
# Now check if point 4 is to the left of segment 1
D2 = SideOfLineCheck(px1,py1,px2,py2,px4,py4)
# if points 3 and 4 are on the same side of line 1
# then things don't cross
if(D1*D2 > 0):
return 0
# now we need to check the other way...
#Check to see if point 1 is to the left of segment 2
D1 = SideOfLineCheck(px3,py3,px4,py4,px1,py1)
# Now check if point 2 is to the left of segment 2
D2 = SideOfLineCheck(px3,py3,px4,py4,px2,py2)
# if points 1 and 2 are on the same side of line 2 then things don't cross
if(D1*D2 > 0):
return 0
#if we get here, the hummers cross
return 1
if __name__ == "__main__":
## suite()
unittest.main()
| true |
25ea11c4832904c6426de413beac69acc90ace0b | Python | johndoknjas/Socket-Programming | /asn2_sender.py | UTF-8 | 3,003 | 2.765625 | 3 | [] | no_license | import time
from common.net import Client
from common.random import Random
from common.asn2 import ASN2_PROTOCOL, Packet0Data, Packet1Suicide
from common.print import debug
import common.print as printer
debug.name = "sender"
debug.enabled = False
class Config:
def __init__(self):
self.timing_random = Random()
self.segments = 5
self.corruption_random = Random()
self.corruption_probability = 0.5
self.data_random = Random()
self.rtt = 3
def read_from_stdin(self):
self.timing_random.seed = input()
self.segments = int(input())
self.corruption_random.seed = input()
self.corruption_probability = float(input())
self.data_random.seed = input()
self.rtt = float(input())
def is_corrupted(self):
# Paraphrasing from the assignment:
# > If the number generated is less that the input value of the corruption probability, the packet is to be
# > considered corrupted.
#
# The last_bool function generates `True` if the number is greater than or equal to, so we simply take its
# inverse to meet that criteria.
return not self.corruption_random.next_bool(self.corruption_probability)
def generate_data(self):
return self.data_random.next_int(0, 1024)
def generate_delay(self):
return self.timing_random.next_float(0, 5)
def wait():
remaining = client.delayed_until - time.time()
if remaining > 0:
debug("Delayed for " + str(remaining) + " seconds.")
time.sleep(remaining)
def STATE_SEND_PACKET(client):
# Increment the sequence.
client.last_sequence += 1
# Exit if the number of segments has been sent is reached.
if client.last_sequence == config.segments:
client.send(Packet1Suicide())
return None
# Create the packet to send.
packet = Packet0Data()
packet.data = config.generate_data()
packet.sequence_segment = client.last_sequence % 2
client.last_packet = packet
# Print the state change message.
print("The sender is moving to state WAIT FOR CALL " + str(packet.sequence_segment) + " FROM ABOVE")
# Send the packet.
wait()
printer.packet_sent(packet)
client.send(packet)
client.delayed_until = time.time() + config.generate_delay()
print("The sender is moving to state WAIT FOR ACK " + str(client.last_sequence % 2))
return STATE_WAIT_FOR_ACK
def STATE_RESEND_PACKET(client):
printer.packet_sent_duplicate(client.last_packet)
client.send(client.last_packet)
print("The sender is moving back to state WAIT FOR ACK " + str(client.last_sequence % 2))
return STATE_WAIT_FOR_ACK
def STATE_WAIT_FOR_ACK(receiver):
packet = receiver.recv(timeout=config.rtt)
# Handle timeout.
if packet is None:
return STATE_RESEND_PACKET
# Handle corrupted packets.
if config.is_corrupted():
printer.packet_received_corrupt(True)
return STATE_RESEND_PACKET
# Next.
printer.packet_received(packet)
return STATE_SEND_PACKET
config = Config()
config.read_from_stdin()
client = Client(ASN2_PROTOCOL)
client.delayed_until = 0
client.last_sequence = -1
client.last_packet = None
client.start(STATE_SEND_PACKET)
| true |
2a3c34b9f7a19597aa640c64242f91d926192b02 | Python | nandoltw/rosebotics2 | /src/harrisap.py | UTF-8 | 266 | 2.71875 | 3 | [] | no_license | """
Capstone Project. Code written by PUT_YOUR_NAME_HERE.
Fall term, 2018-2019.
"""
import rosebotics as rb
import time
def main():
""" Runs YOUR specific part of the project """
robot2 = rb.DriveSystem()
robot2.spin_in_place_degrees(50)
main() | true |
41f6aab96b0efbbc2e694aaa662dc6925d0925cd | Python | Pamela-anthonydas/python_challenge | /PyPoll/main.py | UTF-8 | 3,487 | 3.65625 | 4 | [] | no_license |
import os
import csv
#Function to calculate candidate vote % ( using candidate vote_count list) and print overall candidate statistics
#This code can definitely be placed inside the main code, but I am practicing functions
def candidateStats(unique_candidate,vote_count,count):
cand_Percent=[]
for i in range(len(vote_count)):
x=round((vote_count[i]/count)*100,2)
cand_Percent.append(x)
print(f"{unique_candidate[i]} : {cand_Percent[i]} % ({vote_count[i]})")
return cand_Percent
# Define file path
csvpath = os.path.join("Resources","election_data.csv")
#Inititalize counters and lists
rowcount = 0
candidatelist = []
unique_candidatelist = []
candidate_count = []
candidate_percent = []
# Open the CSV using the set path csvpath
with open(csvpath, newline="") as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
csv_header = next(csvreader)
# Loop through all rows in csv file
for row in csvreader:
# Count the total vote number
rowcount = rowcount + 1
# Set the candidate names to candidatelist
candidatelist.append(row[2])
# Find the unique candidates from the candidatelist using set, convert it into a list and assign it to unique_candidatelist
unique_candidatelist=list(set(candidatelist))
# Loop through each candidate in the unique candidate list and count the votes and append to candidate_count list.
for i in unique_candidatelist:
y=candidatelist.count(i)
candidate_count.append(y)
# As mentioned in row 7, candidate vote percent can be calculated as below, however I have put the code in a function for practice
# z=round((y/rowcount)*100,2)
# candidate_percent.append(z)
# Create a dictionary by zipping unique_candidatelist and candidate_count
candidateDict=dict(zip(unique_candidatelist,candidate_count))
# Print resuls to terminal
print("Election Results")
print("------------------------------")
print(f"Total votes cast : {rowcount}")
print("------------------------------")
# Call to function : candidateStats with unique_candidatelist , candidate_count list and total votecount as arguments.
# Candidate stats will be printed in the function
candidate_percent=candidateStats(unique_candidatelist,candidate_count,rowcount)
print("------------------------------")
#Declare winner and print by finding the max value in candidateDict and winner as associated key
winner=max(candidateDict,key=lambda key:candidateDict[key])
print(f" The winner is : {winner}")
print("------------------------------")
# Print resuls to text file
output_path = os.path.join("output", "new.txt")
# Open the file using "write" mode.
with open(output_path, 'w', newline='') as csvfile:
csvfile.write("Election Results\n")
csvfile.write("------------------------------ \n")
csvfile.write(f"Total votes cast : {rowcount}\n")
csvfile.write("------------------------------\n")
# Loop through the length of the uniq_candidatelist and write the candidate name , candidate vote_percent and candidate vote count
for i in range(len(unique_candidatelist)):
csvfile.write(f"{unique_candidatelist[i]} : {candidate_percent[i]} % ({candidate_count[i]})\n")
#Declare winner and print
csvfile.write("------------------------------\n")
csvfile.write(f" The winner is : {winner}\n")
csvfile.write("------------------------------\n")
| true |
0438bead0d4b05b869d5a14f0a759e65bca582a7 | Python | ifirat/14.Hafta-Odevler | /Climbing the Leaderboard.py | UTF-8 | 496 | 2.546875 | 3 | [] | no_license | import bisect
scores = [100,100,100,50,50,50,40,40,20,10]
alice = [5,25,50,120]
scores = sorted(list(set(scores)))
[print(abs(bisect.bisect(scores,i)-(len(scores)+1))) for i in alice]
## Hacker rank daki cozum
# import bisect
# scores_count = int(input())
# scores = list(map(int, input().rstrip().split()))
# alice_count = int(input())
# alice = list(map(int, input().rstrip().split()))
# scores = sorted(list(set(scores)))
# [print(abs(bisect.bisect(scores,i)-(len(scores)+1))) for i in alice] | true |
65e6be97cdb7b98ceecd3a74bcb999e0ee0997cf | Python | jay-munjapara/CSES | /Introductory Problems/Weird Algorithm/Weird Algorithm Vedant Kokate.py | UTF-8 | 86 | 3.515625 | 4 | [] | no_license | n=int(input())*2
while n!=1:
n=n>>1 if n%2==0 else n*3+1
print(n,end=" ")
| true |
c646b9ff4904ed4445ea43edf9126c557096701e | Python | sidhantp1906/googlekickstart2020 | /workout.py | UTF-8 | 672 | 3 | 3 | [
"Apache-2.0"
] | permissive | listN = []
listK = []
lists = []
def calc(listM):
for m in range(len(listM)-1):
diff = listM[m+1] - listM[m]
lists.append(diff)
lists.sort()
max = lists[0]
for i in lists:
if i >= max:
max = i
return max
T = int(input('tc: '))
for i in range(T):
N = int(input('ses: '))
listN.append(N)
K = int(input('add: '))
listK.append(K)
listM = []
for j in range(N):
M = int(input('hr: '))
listM.append(M)
for k in range(K):
A = int(input('addt: '))
ind = int(input('ind: '))
listM.insert(ind, A)
print(listM)
print(f'case #{i+1}: {calc(listM)}')
| true |
cbc1fd15e0a7153629275bd4b5743becddcc9269 | Python | jchacon4/yuka4 | /camera3/run.py | UTF-8 | 1,163 | 3.234375 | 3 | [] | no_license | import threading
import time
from fire import readFile
import json
datos = []
def writeFile2():
#print(len(datos))
with open('data.json', 'r+') as f:
json_data = json.load(f)
json_data = datos
f.seek(0)
f.write(json.dumps(json_data))
f.truncate()
#subprocess.call('python3 fire.py', shell=True)
writeFile2()
class ThreadingExample(object):
""" Threading example class
The run() method will be started and it will run in the background
until the application exits.
"""
def __init__(self, interval=0):
""" Constructor
:type interval: int
:param interval: Check interval, in seconds
"""
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
""" Method that runs forever """
print("!")
while True:
# Do something
readFile()
#time.sleep(self.interval)
example = ThreadingExample()
| true |
62da236b111f6e0a5b7ef30c3f0232c085c29ded | Python | SurajKotagi/coding | /COMPETATIVE SOLUTIONS/Codeforces/fair_number.py | UTF-8 | 332 | 2.953125 | 3 | [] | no_license | b = str(input())
def answer_giver(user_input):
for i in range(int(user_input)):
if int(str(user_input[i]))!=0:
a = int(str(user_input[i]))
if (int(user_input))%a!=0:
answer_giver(int(user_input) + 1)
else:
print(user_input)
answer_giver(b) | true |
0d54297bd921cfaffae4dce0ce8caa8df5760779 | Python | bmoe/AOC2018 | /10-prog.py | UTF-8 | 2,996 | 3.453125 | 3 | [] | no_license | import re
import sys
test_file = '10-input-test'
data_file = '10-input'
# Python version shenanigans
try:
raw_input # noqa
except NameError:
raw_input = input
def parse(data):
pat = re.compile(
r'position=<([^,]+),([^>]+)> *velocity=<([^,]+),([^>]+)>.*'
)
return [map(int, pat.match(line).groups()) for line in data]
class UFO:
def __init__(self, x, y, xv, yv):
self.x, self.y = x, y
self.xv, self.yv = xv, yv
def move(self, nsec):
self.x += nsec * self.xv
self.y += nsec * self.yv
class Sky:
def __init__(self, ufos):
self.ufos = ufos
self.time = 0
def move(self, nsec=1):
self.time += nsec
map(lambda x: x.move(nsec), self.ufos)
def map(self):
return {(ufo.x, ufo.y): True for ufo in self.ufos}
def ranges(self):
x0, xn, y0, yn = self.maxes()
return (xn - x0, yn - y0)
def maxes(self):
maxx = minx = self.ufos[0].x
miny = maxy = self.ufos[0].y
for ufo in self.ufos:
minx, maxx = min(ufo.x, minx), max(ufo.x, maxx)
miny, maxy = min(ufo.y, miny), max(ufo.y, maxy)
return minx, maxx, miny, maxy
def show(self, x0, xn, y0, yn):
map = self.map()
for y in range(y0, yn+1):
for x in range(x0, xn+1):
if (x, y) in map:
sys.stdout.write('*')
else:
sys.stdout.write(' ')
sys.stdout.write('\n')
def explore(fname):
""" Browse around time and find the message in the stars.
"""
datas = parse(open(fname, 'r').readlines())
sky = Sky([
UFO(*data) for data in datas
])
an_ufo = [ufo for ufo in sky.ufos if ufo.xv != 0][0] # get a moving one!
guess = an_ufo.x / an_ufo.xv
print('One guy gets to zero at about {} seconds. Just sayin'.format(
guess))
# Prompt shows time t, and the magnitude of x and y ranges.
# Which is basically the size of the sky.
# If all the stars in in the message, the message will appear
# sometime around the minimum of those ranges.
cmd = raw_input('{}/{} - '.format(sky.time, sky.ranges())).strip()
while cmd not in ['q', 'Q']:
if cmd == 's': # Careful!
sky.show(*sky.maxes())
if cmd == 'm':
print(sky.maxes())
if cmd.startswith('g'): # move in time!
seconds = int(cmd[1:])
sky.move(seconds)
# Show the sky when the sky is small enough.
if cmd == '-': # back one second
sky.move(-1)
xr, yr = sky.ranges()
if xr < 200 and yr < 200:
sky.show(*sky.maxes())
if cmd == '': # forward one second
sky.move()
xr, yr = sky.ranges()
if xr < 200 and yr < 200:
sky.show(*sky.maxes())
cmd = raw_input('{}/{} - '.format(sky.time, sky.ranges())).strip()
print('Bye!')
| true |
b8bdb8a0d8538a7499a5d01c789fa13702b076be | Python | aliyun/linkedge-thing-access-sdk-python | /examples/LightSensor/index.py | UTF-8 | 2,329 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
import logging
import lethingaccesssdk
from threading import Timer
class Light_Sensor(object):
def __init__(self):
self._illuminance = 200
self._delta = 100
self._callback = None
@property
def illuminance(self):
return self._illuminance
def start(self):
illuminance = self._illuminance
delta = self._delta
if (illuminance >= 600 or illuminance <= 100):
delta = -delta;
illuminance += delta
self._delta = delta
self._illuminance = illuminance
if self._callback is not None:
data = {"properties": illuminance}
self._callback(data)
t = Timer(2, self.start, ())
t.start()
def stop(self):
self._callback = None
def listen(self, callback):
if callback is None:
self.stop()
else:
self._callback = callback
self.start()
class Connector(lethingaccesssdk.ThingCallback):
def __init__(self, config, lightSensor):
self.lightSensor = lightSensor
self._client = lethingaccesssdk.ThingAccessClient(config)
def listenCallback(self, data):
self._client.reportProperties({'MeasuredIlluminance': data["properties"]})
def connect(self):
self._client.registerAndOnline(self)
self.lightSensor.listen(self.listenCallback)
def disconnect(self):
self._client.offline()
self.lightSensor.listen(None)
def callService(self, name, input_value):
if name == "yourFunc":
#do something
return 0, {}
return 100001, {}
def getProperties(self, input_value):
retDict = {}
if 'MeasuredIlluminance' in input_value:
retDict['MeasuredIlluminance'] = self.lightSensor.illuminance
return 0, retDict
def setProperties(self, input_value):
logging.error("can't set value")
return 100001, {}
infos = lethingaccesssdk.Config().getThingInfos()
for info in infos:
print(info)
try:
lightSensor = Light_Sensor()
connector = Connector(info, lightSensor)
connector.connect()
except Exception as e:
logging.error(e)
# don't remove this function
def handler(event, context):
return 'hello world'
| true |
ae437ba621787ad4cb8cdb1d630b13fc46930534 | Python | Abhishek-7017/Snake_game-repo | /Snake_game.py | UTF-8 | 796 | 2.671875 | 3 | [] | no_license | import pygame
import game_functions as gf
from Settings import Settings
from Snake import Snake
from fruit import Fruit
from scoreboard import Score
from game_stats import GameStats
def run_game():
# Initialize the game configuration
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Snake_game")
sn = Snake(ai_settings, screen)
fr = Fruit(ai_settings, screen)
stats = GameStats(ai_settings)
sb = Score(ai_settings, screen, stats)
while True:
gf.check_event(sn)
sn.update()
gf.update_fruit(ai_settings, sn, fr, stats, sb)
gf.update_screen(ai_settings, screen, sn, fr, sb)
if __name__ == "__main__":
run_game()
| true |
67c090047c2abc812b711976a53596d09fd2fb8e | Python | SonQBChau/Natural_Language_Processing | /perceptron_and_embeddings/perceptron.py | UTF-8 | 2,962 | 3.109375 | 3 | [] | no_license | import sys
import os
import re
import math
import logging
import argparse
import math
import numpy as np
def read_data(file_name):
# Reads .dat files
f = open(file_name, 'r')
data = []
f.readline()
for instance in f.readlines():
if not re.search('\t', instance): continue
data += [list(map(int, instance.strip().split('\t') ))]
return data
def dot_product(array1, array2):
# Returns the dot product between array1 and array2
result = sum([i*j for (i, j) in zip(array1, array2)])
return result
def sigmoid(x):
# Returns sigmoid of x
result = 1/(1+math.exp(-x))
return result
def predict(weights, instance):
output = sigmoid(dot_product(weights, instance))
if output > 0.5:
return 1
return 0
def derivative_sigmoid(x):
return sigmoid(x)*(1-sigmoid(x))
def train_perceptron(instances, lr, epochs):
# Train (calculates weights) for a sigmoid perceptron
weights = [0] * (len(instances[0])-1)
for epoch in range(epochs):
for ins in instances:
output = sigmoid(dot_product(weights, ins))
error = ins[-1] - output
for i in range(len(weights)):
# update weight according to the weight update rule
derivative_output = derivative_sigmoid(output)
weighted_derv = error * derivative_output
weights[i] = weights[i] + lr * weighted_derv * ins[i]
return weights
def get_accuracy(weights, instances):
###########################
# DO NOT CHANGE THIS METHOD
###########################
# Predict instances and return accuracy
error = 0
for instance in instances:
prediction = predict(weights, instance)
error += abs(instance[-1] - prediction)
accuracy = float(len(instances)-error) / len(instances)
return accuracy * 100
def main(file_tr, file_te, lr, epochs):
###########################
# DO NOT CHANGE THIS METHOD
###########################
instances_tr = read_data(file_tr)
instances_te = read_data(file_te)
# Training: calculate weights
weights = train_perceptron(instances_tr, lr, epochs)
# Testing: calculate accuracy in the test set
accuracy = get_accuracy(weights, instances_te)
print(f"Accuracy on test set ({len(instances_te)} instances): {accuracy}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("PATH_TR", help="Path to train file with POS annotations")
parser.add_argument("PATH_TE", help="Path to test file (POS tags only used for evaluation)")
parser.add_argument("lr", type=float, default=0.1, help="Learning rate")
parser.add_argument("epochs", type=int, default=3, help="Number of epochs")
args = parser.parse_args()
print(' '.join(map(str, [args.PATH_TR, args.PATH_TE, args.lr, args.epochs])))
main(args.PATH_TR, args.PATH_TE, args.lr, args.epochs)
| true |
f6983cf96086c111ef76163f14d2c91ad851c11a | Python | ashengtx/algorithms | /basic/stack/fix_converter.py | UTF-8 | 1,804 | 3.8125 | 4 | [] | no_license | from stack import Stack
def infix2Postfix(infix_expr):
prec = {}
prec['*'] = 3
prec['/'] = 3
prec['+'] = 2
prec['-'] = 2
prec['('] = 1
token_list = infix_expr.split(' ')
output_list = []
operator_stack = Stack()
for token in token_list:
if token in "ABCDEFGHIJKLMNOPQRSTUVWXYZ" or token in "0123456789":
# 直接输出
output_list.append(token)
elif token == '(':
# 直接入栈
operator_stack.push(token)
elif token == ')':
# 出栈至output_list,直到遇到'('
top = operator_stack.pop()
while top != '(':
output_list.append(top)
top = operator_stack.pop()
else:
# 和栈顶的操作符比较优先级,若高于,则入栈,若低于,则先出栈,再入栈
while (not operator_stack.isEmpty()) and prec[token] < prec[operator_stack.peek()]:
top = operator_stack.pop()
output_list.append(top)
operator_stack.push(token)
while not operator_stack.isEmpty():
output_list.append(operator_stack.pop())
return ' '.join(output_list)
def infix2Prefix(infix_expr):
prec = {}
prec['*'] = 3
prec['/'] = 3
prec['+'] = 2
prec['-'] = 2
prec['('] = 1
token_list = infix_expr.split(' ')
output_list = []
operator_stack = Stack()
for token in token_list:
# todo
while not operator_stack.isEmpty():
output_list.append(operator_stack.pop())
return ' '.join(output_list)
if __name__ == '__main__':
print(infix2Postfix("A + B * C + D")) # A B C * D + +
print(infix2Postfix("A * B + C * D")) # A B * C D * +
print(infix2Postfix("( A + B ) * ( C + D )")) # A B + C D + *
| true |
e153e0af213f7368192c399dbf73b92449e08e32 | Python | jgonsior/reef | /program_synthesis/label_aggregator.py | UTF-8 | 5,893 | 3.296875 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
from scipy import sparse
from pprint import pprint
def log_odds(p):
"""This is the logit function"""
return np.log(p / (1.0 - p))
def odds_to_prob(l):
"""
This is the inverse logit function logit^{-1}:
l = \log\frac{p}{1-p}
\exp(l) = \frac{p}{1-p}
p = \frac{\exp(l)}{1 + \exp(l)}
"""
return np.exp(l) / (1.0 + np.exp(l))
def sample_data(X, w, n_samples):
"""
Here we do Gibbs sampling over the decision variables (representing our objects), o_j
corresponding to the columns of X
The model is just logistic regression, e.g.
P(o_j=1 | X_{*,j}; w) = logit^{-1}(w \dot X_{*,j})
This can be calculated exactly, so this is essentially a noisy version of the exact calc...
"""
N, R = X.shape
t = np.zeros(N)
f = np.zeros(N)
# Take samples of random variables
idxs = np.round(np.random.rand(n_samples) * (N - 1)).astype(int)
ct = np.bincount(idxs)
# Estimate probability of correct assignment
increment = np.random.rand(n_samples) < odds_to_prob(X[idxs, :].dot(w))
increment_f = -1. * (increment - 1)
t[idxs] = increment * ct[idxs]
f[idxs] = increment_f * ct[idxs]
return t, f
def exact_data(X, w, evidence=None):
"""
We calculate the exact conditional probability of the decision variables in
logistic regression; see sample_data
"""
t = odds_to_prob(X.dot(w))
if evidence is not None:
t[evidence > 0.0] = 1.0
t[evidence < 0.0] = 0.0
return t, 1 - t
def transform_sample_stats(Xt, t, f, Xt_abs=None):
"""
Here we calculate the expected accuracy of each LF/feature
(corresponding to the rows of X) wrt to the distribution of samples S:
E_S[ accuracy_i ] = E_(t,f)[ \frac{TP + TN}{TP + FP + TN + FN} ]
= \frac{X_{i|x_{ij}>0}*t - X_{i|x_{ij}<0}*f}{t+f}
= \frac12\left(\frac{X*(t-f)}{t+f} + 1\right)
"""
if Xt_abs is None:
Xt_abs = sparse_abs(Xt) if sparse.issparse(Xt) else abs(Xt)
n_pred = Xt_abs.dot(t + f)
m = (1. / (n_pred + 1e-8)) * (Xt.dot(t) - Xt.dot(f))
p_correct = (m + 1) / 2
return p_correct, n_pred
class LabelAggregator(object):
"""LabelAggregator Object that learns the accuracies for the heuristics.
Copied from Snorkel v0.4 NaiveBayes Model with minor changes for simplicity"""
def __init__(self, bias_term=False):
self.w = None
self.bias_term = bias_term
def train(self, X, n_iter=1000, w0=None, rate=0.01, alpha=0.5, mu=1e-6, \
sample=False, n_samples=100, evidence=None, warm_starts=False, tol=1e-6, verbose=True):
"""
Perform SGD wrt the weights w
* n_iter: Number of steps of SGD
* w0: Initial value for weights w
* rate: I.e. the SGD step size
* alpha: Elastic net penalty mixing parameter (0=ridge, 1=lasso)
* mu: Elastic net penalty
* sample: Whether to sample or not
* n_samples: Number of samples per SGD step
* evidence: Ground truth to condition on
* warm_starts:
* tol: For testing for SGD convergence, i.e. stopping threshold
"""
self.X_train = X
# Set up stuff
N, M = X.shape
if verbose:
print("=" * 80)
print("Training marginals (!= 0.5):\t%s" % N)
print("Features:\t\t\t%s" % M)
print("=" * 80)
Xt = X.transpose()
Xt_abs = np.abs(Xt)
w0 = w0 if w0 is not None else np.ones(M)
# Initialize training
w = w0.copy()
g = np.zeros(M)
l = np.zeros(M)
g_size = 0
# Gradient descent
if verbose:
print("Begin training for rate={}, mu={}".format(rate, mu))
for step in range(n_iter):
# Get the expected LF accuracy
t, f = sample_data(X, w,
n_samples=n_samples) if sample else exact_data(
X, w, evidence)
p_correct, n_pred = transform_sample_stats(Xt, t, f, Xt_abs)
# Get the "empirical log odds"; NB: this assumes one is correct, clamp is for sampling...
l = np.clip(log_odds(p_correct), -10, 10)
# SGD step with normalization by the number of samples
g0 = (n_pred * (w - l)) / np.sum(n_pred)
# Momentum term for faster training
g = 0.95 * g0 + 0.05 * g
# Check for convergence
wn = np.linalg.norm(w, ord=2)
g_size = np.linalg.norm(g, ord=2)
if step % 250 == 0 and verbose:
print("\tLearning epoch = {}\tGradient mag. = {:.6f}".format(
step, g_size))
if (wn < 1e-12 or g_size / wn < tol) and step >= 10:
if verbose:
print("SGD converged for mu={} after {} steps".format(
mu, step))
break
# Update weights
w -= rate * g
# Apply elastic net penalty
w_bias = w[-1]
soft = np.abs(w) - mu
ridge_pen = (1 + (1 - alpha) * mu)
# \ell_1 penalty by soft thresholding | \ell_2 penalty
w = (np.sign(w) *
np.select([soft > 0], [soft], default=0)) / ridge_pen
# Don't regularize the bias term
if self.bias_term:
w[-1] = w_bias
# SGD did not converge
else:
if verbose:
print("Final gradient magnitude for rate={}, mu={}: {:.3f}".
format(rate, mu, g_size))
# Return learned weights
self.w = w
def marginals(self, X):
X = X.todense()
marginals = odds_to_prob(X.dot(self.w))
return np.array(marginals)[0]
| true |
d693ea2eed26787f515e073a3f854bc66ab3625b | Python | Al153/Programming | /Python/CPU project/CPU 10 archive/CPU 10 - Optimised/Forth compiler.py | UTF-8 | 9,127 | 3.0625 | 3 | [] | no_license | #imma pop some stacks, only got operands in my pocket
#shunt-shunt-shunting, this is freaking CompSci
import sys
#________________________ basic syntax ______________________
fib:
. . . n ==> . . . fib n
fib
dup 1 = if
drop 1 return
else
dup 0 = if
drop 1 return
endif
endif
dup 1 - fib
2 - fib n
+
;
#____________________ commands _____________________
_________ run_time_words : _________
+ . . . a b ==> . . . a+b
-
*
/
%
&
|
^
~
drop . . . a ==> . . .
swap . . . a b ==> . . . b a
dup . . . a ==> . . . a a
echo . . . 'a' ==> . . . outputs 'a'
print . . . a ==> . . . outputs integer value of a
= . . . a b ==> . . . c where c is result of (a == b)
< . . . a b ==> . . . c where c is result of (a < b)
> . . . a b ==> . . . c where c is result of (a > b)
<= . . . a b ==> . . . c where c is result of (a <= b)
>= . . . a b ==> . . . c where c is result of (a >= b)
index . . . list index ==> . . . value of list [index*4]
or
. . . str index ==> . . . value of str [index]
return returns on the stack
_____________________ Compile_time_words _____________________
global creates a global variable/list etc
ptr <name> variable creates pointer variable to a variable
int <name> . . . a ==> . . . Loads local int of name name with value a
char <name> . . . 'a' ==> . . . Loads local char of name name with value a&255
list <name> [ . . .] creates a list
str <name> "blah blah blah" creates a string
if . . . a ==> . . . carries on if a is True, else go to corresponding else or end if
else . . . ==> . . . if in a positive if, then goto corresponding endif
endif . . . ==> . . . pass
while . . . ==> . . . if top of stack, then crarry on, else go to corresponding loop
loop . . . ==> . . . go to corresponding while
_______________ semi_compile_time_words _______________
<const>
<var name>
def compile_header():
assembly = '''
#_______________ HEADER _______________
Import Stack
ptr FORTH.greater_or_equal
ptr FORTH.less_or_equal
ptr FORTH.Equal
ptr FORTH.Greater
ptr FORTH.Less
ptr FORTH.push0
ptr FORTH.push1
Pop gp1 %FORTH.Equal
Pop gp0
Compare gp0 gp1
if Equal then Goto FORTH.push1
Goto FORTH.push0
Pop gp1 %FORTH.Greater
Pop gp0
Compare gp0 gp1
if Greater then Goto FORTH.push1
Goto FORTH.push0
Pop gp1 %FORTH.Less
Pop gp0
Compare gp0 gp1
if Less then Goto FORTH.push1
Goto FORTH.push0
Pop gp1 %FORTH.greater_or_equal
Pop gp0
Compare gp0 gp1
if Less then Goto FORTH.push0
Goto FORTH.push1
Pop gp1 %FORTH.less_or_equal
Pop gp0
Compare gp0 gp1
if Greater then Goto FORTH.push0
Goto FORTH.push1
Push One %FORTH.push1
Return
Push Zero %FORTH.push0
Return
#_______________ END HEADER _______________
'''
def compile_add():
assembly = "Pop gp0\nPop gp1\nADD gp0 gp1\nPush gp0\n"
return assembly
def compile_sub():
assembly = "Pop gp0\nPop gp1\nSUB gp0 gp1\nPush gp0\n"
return assembly
def compile_mul():
assembly = "Pop gp0\nPop gp1\nMUL gp0 gp1\nPush gp0\n"
return assembly
def compile_div():
assembly = "Pop gp0\nPop gp1\nDIV gp0 gp1\nPush gp0\n"
return assembly
def compile_mod():
assembly = "Pop gp0\nPop gp1\nMOD gp0 gp1\nPush gp0\n"
return assembly
def compile_and():
assembly = "Pop gp0\nPop gp1\nAND gp0 gp1\nPush gp0\n"
return assembly
def compile_or():
assembly = "Pop gp0\nPop gp1\nOR gp0 gp1\nPush gp0\n"
return assembly
def compile_xor():
assembly = "Pop gp0\nPop gp1\nXOR gp0 gp1\nPush gp0\n"
def compile_not():
assembly = "Pop gp0\nPop gp1\nNOT gp0 gp1\nPush gp0\n"
return assembly
def compile_drop():
assembly = "Pop gp0\n"
return assembly
def compile_swap():
assembly = "Pop gp0\nPop gp1\nPush gp0\nPush gp1\n"
return assembly
def compile_dup():
assembly = "Pop gp0\nPush gp0\nPush gp0 \n"
return assembly
def compile_echo():
assembly = "Pop gp0\nOut gp0\n"
return assembly
def compile_print():
assembly = "Pop gp0\nOutd gp0\n"
return assembly
def compile_equal():
assembly = "Call FORTH.Equal\n"
return assembly
def compile_greater():
assembly = "Call FORTH.Greater\n"
return assembly
def compile_less():
assembly = "Call FORTH.Less\n"
return assembly
def compile_greater_or_equal():
assembly = "Call FORTH.greater_or_equal\n"
return assembly
def compile_less_or_equal():
assembly = "Call FORTH.less_or_equal\n"
return assembly
def compile_index(token_type):
if token_type == "int":
assembly = "Pop gp0\nPop gp1\nADD gp0 gp1\nLoad gp0 0 [gp0]\nPush gp0\n"
else:
assembly = "Pop gp0\nPop gp1\nMUL gp0 @4\nADD gp0 gp1\nLoad gp0 0 [gp0]\nPush gp0\n"
return assembly
def compile_return():
assembly = "Return\n"
return assembly
def compile():
source = get_code()
tokens = tokenise(source)
words = get_words(tokens,'')
assembly = compile_words(words)
run_assembler(assembly)
def get_code():
file_name = sys.argv[1]
try:
return open(file_name)
except IOError:
try:
return open(file_name+".fth")
except IOError:
print "Error: Invalid file name, please try again"
quit()
def tokenize(text_file):
token_list = []
string = 0
escaped = 0
for line in text_file:
if not string:
line_tokens= []
current_token = ''
for character in line:
if not string:
if character == '"' and current_token == '':
string = 1
token_list += line_tokens
line_tokens = []
elif character == "#" and current_token == '':
break
elif character != " " and character != "\t" and character != "\n":
current_token += character
else:
if current_token != "":
line_tokens.append(current_token)
#print line_tokens
current_token = ""
else:
if not escaped:
if character == '"':
string = 0
token_list.append(current_token)
current_token = ''
elif character == "\\":
escaped = 1
else:
current_token += character
else:
escaped = 0
if character == '"':
current_token += '"'
if character == "n":
current_token += "\n"
if not string:
if current_token != "" and current_token != "\t":
line_tokens.append(current_token)
if line_tokens != []:
token_list+=line_tokens
#print token_list
return token_list
def get_words(tokens,name_space):
"""
Gathers words within program for execution
"""
defined_words = {}
current_word = []
found_word = 0
import_called = 0
for token in tokens:
if import_called:
import_called = 0
token = "..\\" + token+".pyfth"
name = token
#print token
imported_program = open(token)
defined_words.update(get_words(Tokenizer.tokenize(imported_program),name_space+token+"."))
#print defined_words
imported_program.close()
elif found_word:
if token == ";":
found_word = 0
current_word.append("return")
defined_words[current_word_name] = current_word
current_word = []
else:
current_word.append(token)
elif token == "import":
import_called = 1
else:
#print "found word: ",token
found_word = 1
current_word_name = name_space+token
return defined_words
def compile_words(words):
assembly = 'Goto @main\n'
global_variables = []
word_names = [name for name in words]
for name in words:
assembly += compile_word(words[name],name,word_names,global_variables)
return assembly
def compile_word(word,name,word_names,global_variables):
local_variables = []
assembly = ''
if_stack = []
for token in word:
def compile_block(name,block,return1,return2,word_names,global_variables):
class Start:
def __init__(self,suboutine_name,child):
self.name = suboutine_name
self.child = child
def blockify(self):
self.child.blockify
def compile(self):
return self.child.compile()
class Return:
def __init__(self):
pass
def blockify(self):
pass
def compile(self):
return "Return\n"
class Block:
def __init__(self,name_space,subroutine_number,tokens,child):
self.name_space = name_space
self.subroutine_number = subroutine_number
self.tokens = tokens
self.child = child
def blockify(self):
new_tokens = []
child_tokens = []
hit_if = 0
for token in self.tokens:
if not hit_if:
if token == "if":
hit_if = 1
else:
new_tokens.append(token)
else:
child_tokens.append(token)
if hit_if:
if_counter = 1
for token in child_tokens
def compile(self):
def expand(self):
class Branch:
def __init__(self,name_space,subroutine_number,tokens,child0,child1):
pass
def blockify(self):
def compile(self):
| true |
2fdfb2a57cdf7fb3a482d3d4cbf33ba92dd823d9 | Python | iam-amitkumar/BridgeLabz | /AlgorithmPrograms/Problem2_Prime.py | UTF-8 | 673 | 4.15625 | 4 | [] | no_license | """Prime program prints all the prime numbers up to a
user-input number.
@author Amit Kumar
@version 1.0
@since 02/01/2019
"""
# importing important modules
import utility.Utility
import util.Util
global number
try:
number = utility.Utility.get_integer()
except Exception as e:
print(e)
print("\nPrime numbers up to ", number, ": ")
for i in range(2, number): # checking the prime numbers from 2 to the user-input number
res = util.Util.is_prime(i) # passing each number of the range to get the boolean value
if res is True: # if the boolean value returned by th function is true then the current passed number is prime
print(i, end=" ")
print()
| true |
4b7852826dc3c79ef60e668db5d4bd24a4143ccf | Python | jooh/sana | /tests/test_backend_agreement.py | UTF-8 | 1,313 | 2.59375 | 3 | [
"MIT"
] | permissive | """test agreement between corresponding functionality across backends"""
import numpy as np
from sana.backend import npbased, tfbased
import test_tfbased
import util
# there are surprisingly substantial float precision discrepances between numpy and tf
# computations
ATOL = 1e-4
RTOL = 1e-14
def test_square2vec():
pattern = np.random.rand(10, 10).astype("float32")
np.testing.assert_array_equal(
npbased.square2vec(pattern), tfbased.square2vec(pattern)
)
def test_euclideansq():
# cast to common precision (tf doesn't do double)
responses = util.responses(10).astype("float32")
np.testing.assert_allclose(
npbased.euclideansq(responses),
tfbased.euclideansq(responses),
rtol=RTOL,
atol=ATOL,
)
def test_zscore():
responses = util.responses(10).astype("float32")
np.testing.assert_allclose(
npbased.zscore(responses),
tfbased.zscore(responses),
rtol=RTOL,
atol=ATOL,
)
np.testing.assert_allclose(
npbased.zscore(responses, axis=1),
tfbased.zscore(responses, axis=1),
rtol=RTOL,
atol=ATOL,
)
np.testing.assert_allclose(
npbased.zscore(responses, axis=0),
tfbased.zscore(responses, axis=0),
rtol=RTOL,
atol=ATOL,
)
| true |
9043c5a923f6aaec7c8e744fcec8f515501d0497 | Python | guogander/python100 | /lesson_15.py | UTF-8 | 352 | 4.59375 | 5 | [] | no_license | # 题目:利用条件运算符的嵌套来完成此题:学习成绩>=90分的同学用A表示,60-89分之间的用B表示,60分以下的用C表示。
sorce = int(input("请输入分数:"))
if 90 <= sorce <= 100:
print("A")
elif 60 <= sorce <= 89:
print("B")
elif 60 > sorce >= 0:
print("C")
else:
print("输入有误!") | true |
fd9240fea4bec0738675223294c8c62a71f0f718 | Python | Qingliny/Yahaha_secondary_online_market | /lambda/customerService.py | UTF-8 | 10,280 | 2.609375 | 3 | [] | no_license | import math
import dateutil.parser
import datetime
import time
import os
import logging
import boto3
import json
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
""" --- Helpers to build responses which match the structure of the necessary dialog actions --- """
def get_slots(intent_request):
return intent_request['currentIntent']['slots']
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'intentName': intent_name,
'slots': slots,
'slotToElicit': slot_to_elicit,
'message': message
}
}
def close(session_attributes, fulfillment_state, message):
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
'fulfillmentState': fulfillment_state,
'message': message
}
}
return response
def delegate(session_attributes, slots):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Delegate',
'slots': slots
}
}
""" --- Helper Functions --- """
def parse_int(n):
try:
return int(n)
except ValueError:
return float('nan')
def build_validation_result(is_valid, violated_slot, message_content):
if message_content is None:
return {
"isValid": is_valid,
"violatedSlot": violated_slot,
}
return {
'isValid': is_valid,
'violatedSlot': violated_slot,
'message': {'contentType': 'PlainText', 'content': message_content}
}
def isvalid_date(date):
try:
dateutil.parser.parse(date)
return True
except ValueError:
return False
def validate_asking_config(intent_request, book, album, drink, game):
if book is not None:
notnone = book
if album is not None:
notnone = album
if drink is not None:
notnone = drink
if game is not None:
notnone = game
intent_request['sessionAttributes']=intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
dynamo = boto3.client("dynamodb")
response = dynamo.scan(TableName='Products')
if response['Count']!=0:
findifnameexist = []
findifdiscexist = []
for i in range(response['Count']):
print(response['Items'][i])
findifnameexist.append(response['Items'][i]['ProductName']['S'].find(notnone))
findifdiscexist.append(response['Items'][i]['Description']['S'].find(notnone))
print(findifnameexist)
print(findifdiscexist)
if book is not None and (0 not in findifnameexist and 0 not in findifdiscexist):
intent_request['sessionAttributes']['book']=book
return build_validation_result(False,
'book',
'I am sorry that we dont\'t have {}, but we will publish to help you find it! We will inform you as long as there are similar books available! Could you tell us your phone number?'.format(book))
if album is not None and (0 not in findifnameexist and 0 not in findifdiscexist):
intent_request['sessionAttributes']['album']=album
return build_validation_result(False,
'album',
'I am sorry that we dont\'t have {}, but we will publish to help you find it! We will inform you as long as it\'s available! Could you tell us your phone number?'.format(album))
if drink is not None and (0 not in findifnameexist and 0 not in findifdiscexist):
intent_request['sessionAttributes']['drink']=drink
return build_validation_result(False,
'drink',
'I am sorry that {} are sold out, but we will advocate people to sell it more! Next time seize your chance! Could you tell us your phone number?'.format(drink))
if game is not None and (0 not in findifnameexist and 0 not in findifdiscexist):
intent_request['sessionAttributes']['game']=game
return build_validation_result(False,
'game',
'It seems {} are really hot, we will keeping looking for information for you and inform you as long as it\'s available! Could you tell us your phone number?'.format(game))
else:
return build_validation_result(False,
'phone',
'Buddy bad luck. We are out of everything now. But we will refill our repository soon! Try some luck next time! Could you tell us your phone number? We will message you whenever there is somthing posted!')
return build_validation_result(True, None, None)
""" --- Functions that control the bot's behavior --- """
def say_hi(intent_request):
return close(intent_request['sessionAttributes'],
'Fulfilled',
{'contentType': 'PlainText',
'content': 'Hi there! How can I help you?'})
def say_bye(intent_request):
return close(intent_request['sessionAttributes'],
'Fulfilled',
{'contentType': 'PlainText',
'content': 'Bye, have a good day :p'})
def asking(intent_request):
book = get_slots(intent_request)["book"]
album = get_slots(intent_request)["album"]
drink = get_slots(intent_request)["drink"]
game = get_slots(intent_request)["game"]
phone = get_slots(intent_request)["phone"]
source = intent_request['invocationSource']
if source == 'DialogCodeHook':
slots = get_slots(intent_request)
if phone is None:
if book is None and album is None and drink is None and game is None:
return elicit_slot(intent_request['sessionAttributes'],
intent_request['currentIntent']['name'],
slots,
'game',
{'contentType': 'PlainText', 'content': 'sorry can you please change another word? I don\'t recognize that'})
validation_result = validate_asking_config(intent_request, book, album, drink, game)
if not validation_result['isValid']:
slots[validation_result['violatedSlot']] = None
output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
return elicit_slot(output_session_attributes,
intent_request['currentIntent']['name'],
slots,
validation_result['violatedSlot'],
validation_result['message'])
else:
dynamo = boto3.client("dynamodb")
if 'book' in intent_request['sessionAttributes'].keys():
book = intent_request['sessionAttributes']['book']
else:
book = "-"
if 'album' in intent_request['sessionAttributes'].keys():
album = intent_request['sessionAttributes']['album']
else:
album = "-"
if 'game' in intent_request['sessionAttributes'].keys():
game = intent_request['sessionAttributes']['game']
else:
game = "-"
if 'drink' in intent_request['sessionAttributes'].keys():
drink = intent_request['sessionAttributes']['drink']
else:
drink = "-"
response = dynamo.put_item(TableName='demand', Item={
"phone":{
'S':phone
},
"book":{
'S':book
},
"album":{
'S':album
},
"game":{
'S':game
},
"drink":{
'S':drink
}
})
return {
# 'sessionAttributes': event['sessionAttributes'],
'dialogAction': {
'type': 'Close',
'fulfillmentState': 'Fulfilled',
'message':{
"contentType": "PlainText",
"content": "Gotcha! Yahaha you wil find it!"
}
}
}
response = {
# 'sessionAttributes': event['sessionAttributes'],
'dialogAction': {
'type': 'Close',
'fulfillmentState': 'Fulfilled',
'message':{
"contentType": "PlainText",
"content": "Yahaha you found it! Just search it, someone is selling!"
}
}
}
return response
""" --- Intents --- """
def dispatch(intent_request):
"""
Called when the user specifies an intent for this bot.
"""
logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
# Dispatch to your bot's intent handlers
if intent_name == 'GreetingIntent':
return say_hi(intent_request)
elif intent_name == 'ThankyouIntent':
return say_bye(intent_request)
elif intent_name == 'ReturnIntent':
return asking(intent_request)
raise Exception('Intent with name ' + intent_name + ' not supported')
""" --- Main handler --- """
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
# By default, treat the user request as coming from the America/New_York time zone.
os.environ['TZ'] = 'America/New_York'
time.tzset()
logger.debug('event.bot.name={}'.format(event['bot']['name']))
return dispatch(event)
| true |
1802e7f915321879f3954a27a5c943bf5667bfb4 | Python | IamGianluca/checkio-solutions | /checkio/deprecated/house_password.py | UTF-8 | 661 | 3.734375 | 4 | [] | no_license | import re
DIGIT_RE = re.compile("\d")
LOWER_RE = re.compile("[a-z]")
UPPER_RE = re.compile("[A-Z]")
def checkio(data):
"""Check if a password is strong.
Args:
data [str]: The password to check.
Returns:
[bool] True if the password is strong and False if it's not.
A password is strong if it contains at least 10 symbols, and
one digit, one upper case and one lower case letter.
"""
if len(data) < 10:
return False
if not DIGIT_RE.search(data):
return False
if not LOWER_RE.search(data):
return False
if not UPPER_RE.search(data):
return False
return True
| true |
4314ccddb7e35b0d7d265f49e8b715dc834a82b0 | Python | profHydee/mypython.com | /Ticket-Reservation-App/deleteReservation.py | UTF-8 | 1,111 | 2.984375 | 3 | [] | no_license | from tkinter import *
from tkinter import ttk
from Mydatabase import MyReservation
from tkinter import messagebox
class DeleteTickets:
def __init__(self):
self._myreservation=MyReservation()
self._root=Tk()
self._root.title("Delete Reservation")
#Adding the entry field for data needed to delete a ticket
ttk.Label(self._root, text="Please Enter the details below to delete your reservation").grid(row=0, column=0, columnspan=3, pady=8, padx=8, sticky="w")
ttk.Label(self._root, text="ID").grid(row=1, column=0, sticky="e")
IdEntry=ttk.Entry(self._root, width=30)
IdEntry.grid(row=1, column=1)
DelButton=ttk.Button(self._root, text="Delete Ticket")
DelButton.grid(row=2, column=1)
def BuClick():
#print("hope it works")
msg=self._myreservation.DeleteMyTickets(IdEntry.get())
messagebox.showinfo(title="Delete Reservation", message=msg)
IdEntry.delete(0, 'end')
DelButton.config(command=BuClick)
self._root.mainloop()
| true |
fbeeb8d000bd0ef27dd1d87b2fa5cec82ed509f5 | Python | dancaps/python_playground | /cw_maximumSubarraySum.py | UTF-8 | 1,160 | 4.34375 | 4 | [] | no_license | #!/urs/bin/env python3
'''http://www.codewars.com/kata/54521e9ec8e60bc4de000d6c/train/python
Maximum subarray sum
The maximum sum subarray problem consists in finding the maximum sum of a contiguous subsequence in an array or list of integers:
maxSequence([-2, 1, -3, 4, -1, 2, 1, -5, 4])
# should be 6: [4, -1, 2, 1]
Easy case is when the list is made up of only positive numbers and the maximum sum is the sum of the whole array. If the list is made up of only negative numbers, return 0 instead.
Empty list is considered to have zero greatest sum. Note that the empty list or array is also a valid sublist/subarray.
'''
import itertools
def maxSequence(arr):
numbers = list(itertools.permutations(arr, 2))
answer = [0]
print(numbers)
for n in numbers:
if answer[0] < n[0] + n[1]:
answer[0] = n[0] + n[1]
return answer[0]
print(maxSequence([-2, 1, -3, 4, -1, 2, 1, -5, 4]))
print(maxSequence([]))
'''
test.describe("Tests")
test.it('should work on an empty array')
test.assert_equals(maxSequence([]), 0)
test.it('should work on the example')
test.assert_equals(maxSequence([-2, 1, -3, 4, -1, 2, 1, -5, 4]), 6)
''' | true |
8a84b660b6a3edbb965d1d434637c3d26d8a9c60 | Python | giansegato/clue-hackathon | /src/train_predict.py | UTF-8 | 1,229 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | import sys
import pandas as pd
from os.path import join
def run(data_fname):
# load data
df = pd.read_csv(data_fname)
# extract symptoms to predict
symptoms = ['happy', 'pms', 'sad', 'sensitive_emotion', 'energized', 'exhausted',
'high_energy', 'low_energy', 'cramps', 'headache', 'ovulation_pain',
'tender_breasts', 'acne_skin', 'good_skin', 'oily_skin', 'dry_skin']
df = df[df.symptom.isin(symptoms)]
# predictions are simply the averages per user/day_of_cycle/symptom of the training data
user_num_cycles = df[['user_id', 'cycle_id']].groupby(['user_id']).max()
user_symptoms_total = df.groupby(['user_id', 'symptom', 'day_in_cycle']).count()[['cycle_id']]
user_symptoms_rel = user_symptoms_total / user_num_cycles
# create results
user_symptoms_rel.reset_index(inplace=True)
user_symptoms_rel = user_symptoms_rel.rename(columns={'cycle_id': 'probability'})
results = user_symptoms_rel[['user_id', 'day_in_cycle', 'symptom', 'probability']]
# save results in the correct format.
results.to_csv('../result.txt', index=None)
if __name__ == '__main__':
print('run training')
data_fname = sys.argv[-1]
run(data_fname)
| true |
1a496de69a7b52c4d6fa195eba9e770f64120f18 | Python | mindspore-ai/mindspore | /mindspore/python/mindspore/dataset/__init__.py | UTF-8 | 4,755 | 2.625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-f... | permissive | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides APIs to load and process various common datasets such as MNIST,
CIFAR-10, CIFAR-100, VOC, COCO, ImageNet, CelebA, CLUE, etc. It also supports datasets
in standard format, including MindRecord, TFRecord, Manifest, etc. Users can also define
their own datasets with this module.
Besides, this module provides APIs to sample data while loading.
We can enable cache in most of the dataset with its key arguments 'cache'. Please notice that cache is not supported
on Windows platform yet. Do not use it while loading and processing data on Windows. More introductions and limitations
can refer `Single-Node Tensor Cache <https://www.mindspore.cn/tutorials/experts/en/master/dataset/cache.html>`_ .
Common imported modules in corresponding API examples are as follows:
.. code-block::
import mindspore.dataset as ds
import mindspore.dataset.transforms as transforms
import mindspore.dataset.vision as vision
Descriptions of common dataset terms are as follows:
- Dataset, the base class of all the datasets. It provides data processing methods to help preprocess the data.
- SourceDataset, an abstract class to represent the source of dataset pipeline which produces data from data
sources such as files and databases.
- MappableDataset, an abstract class to represent a source dataset which supports for random access.
- Iterator, the base class of dataset iterator for enumerating elements.
Introduction to data processing pipeline
----------------------------------------
.. image:: dataset_pipeline_en.png
As shown in the above figure, the mindspore dataset module makes it easy for users to define data preprocessing
pipelines and transform samples in the dataset in the most efficient (multi-process / multi-thread) manner.
The specific steps are as follows:
- Loading datasets: Users can easily load supported datasets using the `*Dataset` class, or load Python layer
customized datasets through `UDF Loader` + `GeneratorDataset` . At the same time, the loading class method can
accept a variety of parameters such as sampler, data slicing, and data shuffle;
- Dataset operation: The user uses the dataset object method `.shuffle` / `.filter` / `.skip` / `.split` /
`.take` / ... to further shuffle, filter, skip, and obtain the maximum number of samples of datasets;
- Dataset sample transform operation: The user can add data transform operations
( `vision transform <https://mindspore.cn/docs/en/master/api_python/mindspore.\
dataset.transforms.html#module-mindspore.dataset.vision>`_ ,
`NLP transform <https://mindspore.cn/docs/en/master/api_python/mindspore.\
dataset.transforms.html#module-mindspore.dataset.text>`_ ,
`audio transform <https://mindspore.cn/docs/en/master/api_python/mindspore.\
dataset.transforms.html#module-mindspore.dataset.audio>`_ ) to the map
operation to perform transformations. During data preprocessing, multiple map operations can be defined to
perform different transform operations to different fields. The data transform operation can also be a
user-defined transform `pyfunc` (Python function);
- Batch: After the transformation of the samples, the user can use the batch operation to organize multiple samples
into batches, or use self-defined batch logic with the parameter `per_batch_map` applied;
- Iterator: Finally, the user can use the dataset object method `create_dict_iterator` to create an
iterator, which can output the preprocessed data cyclically.
Quick start of Dataset Pipeline
-------------------------------
For a quick start of using Dataset Pipeline, download `Load & Process Data With Dataset Pipeline
<https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/dataset_gallery.html>`_
to local and run in sequence.
"""
from .core import config
from .engine import *
from .engine.cache_client import DatasetCache
from .engine.datasets import *
from .engine.graphdata import GraphData, SamplingStrategy, OutputFormat
from .engine.samplers import *
from .engine.serializer_deserializer import compare, deserialize, serialize, show
from .utils.line_reader import LineReader
__all__ = []
__all__.extend(engine.__all__)
| true |
fbe25f17904718920494bbe92a1a85b7dade2eb8 | Python | ryansturmer/CraftBirds | /boids.py | UTF-8 | 10,137 | 2.546875 | 3 | [] | no_license | from client import Client, thread
import random, time
from numpy import array, add
from numpy.linalg import norm
from math import sin, cos, atan2, hypot, pi
import threading
HOST = '184.173.122.170'
PORT = 30624
LOC = (2380, 52, 4283)
#HOST = 'localhost'
#PORT = 4080
#LOC = (0,50,0)
with open('common.txt') as fp:
BOID_NAMES = [name.strip() for name in fp.readlines() if ' ' not in name and "'" not in name]
BOID_INITIAL_POSITION = LOC
BOID_INITIAL_SPREAD = 30.0 # Boids start in a random cloud with this diameter
BOID_GRAVITY = 0.1 # Force pulling boids inward towards their collective centers of mass
BOID_TARGET_GRAVITY = 0.1 # Flocking towards a target
BOID_PERSONAL_SPACE = 4.0 # How close a boid has to be to another boid to provoke it to start steering away
BOID_OBEDIENCE = 0.01 # Boid tendency to match speed with other boids in the flock
BOID_COUNT = 20 # Number of boids in the flock
BOID_STEPS_PER_UPDATE = 20 # Simulation timestep
BOID_UPDATE_INTERVAL = 0.5 # Update interval
BOID_SCALE = 1.0 # Scale factor that translates the simulated velocities
BOID_BOUNDS_CENTER = LOC # Center of the boid boundary area
BOID_BOUNDS_X = 100 # X extent of the boundary area
BOID_BOUNDS_Y = 35 # Y extent of the boundary area
BOID_BOUNDS_Z = 100 # Z extent of the boundary area
BOID_BOUNDS_REPULSION = 10.0 # How strongly boids are repelled from the boundaries
BOID_MAX_SPEED = 50.0 # Maximum speed of any boid
BOID_MIN_SPEED = 10.0 # Minimum speed of any boid
BOID_REPULSION = 1.0 # Boid repulsion from each other
BOID_USERNAME = 'birdmaster'
class Boid(object):
def __init__(self, position=None, velocity=None):
self.position = array(position or [0,0,0])
self.velocity = array(velocity or [0,0,0])
class BoidsModel(object):
def __init__(self):
self.boids = []
self.rules = []
self.update_function = None
self.setup_rules()
self.create_boids(BOID_COUNT)
self.gravity = BOID_GRAVITY
self.flock_target = None
self.bound = True
self.bounds_center = BOID_BOUNDS_CENTER
self.lock = threading.RLock()
def set_update_function(self, func):
self.update_function = func
def update(self):
if self.update_function:
self.update_function(self)
def setup_rules(self):
self.rules = [ self.rule_center_of_mass,
self.rule_avoid_other_boids,
self.rule_match_velocities,
self.rule_limit_bounds,
self.rule_flock_target]
def disperse(self):
with self.lock:
self.flock_target = None
self.gravity = 0
self.bound = True
def regroup(self):
with self.lock:
self.bound = True
self.flock_target = None
self.gravity = BOID_GRAVITY
def set_flock_target(self, v):
with self.lock:
self.disperse()
self.flock_target = v
self.bound = False
def adjust_velocity(self, x):
with self.lock:
for boid in self.boids:
boid.velocity *= float(x)
def recenter(self, v):
with self.lock:
self.bounds_center = array(v)
def adjust_gravity(self, x):
with self.lock:
self.gravity *= float(x)
def run(self, iterations=None):
# Evaluation means run a simulation timestep
# Update means run the self.update_function to update the view
self.evaluate()
t = time.time()
self.update()
last_update = t
i = 0
while True:
t1 = time.time()
with self.lock:
for step in range(BOID_STEPS_PER_UPDATE):
self.evaluate()
i+=1
t2 = time.time()
while t < last_update + BOID_UPDATE_INTERVAL:
t = time.time()
# Print time between updates / time it took to simulate
#print '%s/%s' % (t - last_update, t2-t1)
self.update()
last_update = t
if iterations and i >= iterations:
break
def evaluate(self):
for boid in self.boids:
# Apply all the rules in the model to this boid
#vectors = [rule(boid) for rule in self.rules]
dv = array([0,0,0])
for rule in self.rules:
dv += rule(boid)
boid.velocity += dv
# Enforce a speed limit
speed = norm(boid.velocity)
if speed > BOID_MAX_SPEED:
boid.velocity = BOID_MAX_SPEED*boid.velocity/speed
elif speed < BOID_MIN_SPEED:
boid.velocity = BOID_MIN_SPEED*boid.velocity/speed
# Update the boid's position
boid.position = (boid.position + BOID_SCALE*boid.velocity/BOID_STEPS_PER_UPDATE)
def rule_center_of_mass(self, me):
'Velocity vector driving all boids to their collective center of mass'
center_of_mass = array([0,0,0])
for boid in self.boids:
if boid is not me:
center_of_mass = center_of_mass + boid.position
center_of_mass = center_of_mass/float(len(self.boids)-1)
v = (center_of_mass - me.position)*self.gravity
return v
def rule_avoid_other_boids(self, me):
'Repulsion bump for when boids are too close'
v = array([0,0,0])
for boid in self.boids:
if boid is not me:
delta = boid.position - me.position
dist = abs(norm(delta))
if dist < BOID_PERSONAL_SPACE:
v = v - delta
return v*BOID_REPULSION
def rule_match_velocities(self, me):
'Observe the velocities of other boids, and adjust speed to match gradually'
v = array([0,0,0])
for boid in self.boids:
if boid is not me:
v = v + boid.velocity
v /= float(len(self.boids)-1)
return BOID_OBEDIENCE*(v - me.velocity)
def rule_flock_target(self, me):
if self.flock_target != None:
return -(me.position - self.flock_target)*BOID_TARGET_GRAVITY
else:
return array([0,0,0])
def rule_limit_bounds(self, me):
'Nudge back in bounds if boid strays outside the bounding box'
if not self.bound:
return array([0,0,0])
x,y,z = me.position
dx,dy,dz = 0,0,0
cx,cy,cz = self.bounds_center
if x < cx - BOID_BOUNDS_X:
dx = BOID_BOUNDS_REPULSION
elif x > cx + BOID_BOUNDS_X:
dx = -BOID_BOUNDS_REPULSION
if y < cy - BOID_BOUNDS_Y:
dy = BOID_BOUNDS_REPULSION
elif y > cy + BOID_BOUNDS_Y:
dy = -BOID_BOUNDS_REPULSION
if z < cz - BOID_BOUNDS_Z:
dz = BOID_BOUNDS_REPULSION
elif z > cz + BOID_BOUNDS_Z:
dz = -BOID_BOUNDS_REPULSION
return array([dx,dy,dz])
def create_boids(self, count):
# Start the boids with a little spread and an initial velocity to keep things interesting
dx,dy,dz = random.uniform(50, 75.0),random.uniform(10.0, 20.0),random.uniform(50, 75.0)
for i in range(count):
x,y,z = BOID_INITIAL_POSITION
x += random.uniform(-BOID_INITIAL_SPREAD/2.0, BOID_INITIAL_SPREAD/2.0)
y += random.uniform(-BOID_INITIAL_SPREAD/2.0, BOID_INITIAL_SPREAD/2.0)
z += random.uniform(-BOID_INITIAL_SPREAD/2.0, BOID_INITIAL_SPREAD/2.0)
self.boids.append(Boid(position=(x,y,z), velocity=(dx,dy,dz)))
# Thanks, Fogle!
def r2v(rx, ry):
m = cos(ry)
x = cos(rx - pi / 2) * m
y = sin(ry)
z = sin(rx - pi / 2) * m
return (x, y, z)
def v2r(x, y, z):
rx = atan2(z, x) + pi / 2
ry = atan2(y, hypot(x, z))
return (rx, ry)
if __name__ == '__main__':
model = BoidsModel()
client = Client(HOST, PORT)
for boid in model.boids:
name = random.choice(BOID_NAMES)
client.add_player(name)
def command_handler(s):
print 'handling command %s' % s
try:
s = s.lower().split()
player = s[0].rstrip('>')
target = s[1]
cmd = s[2]
args = s[3:]
if player.startswith('bird') and target == 'birds':
if cmd == 'disperse':
model.disperse()
elif cmd == 'regroup':
model.regroup()
elif cmd == 'attack':
nick = args[0]
pos = client.opponent_positions.get(nick, None)
if pos:
x,y,z,rx,ry = pos
model.set_flock_target(array([x,y,z]))
elif cmd == 'come':
player = client.get_player_by_nick(player)
if player:
x,y,z,rx,ry = player.position
model.recenter(array([x,y,z]))
elif cmd == 'huddleup':
model.adjust_gravity(2.0)
elif cmd == 'breakup':
model.adjust_gravity(0.5)
elif cmd == 'slowdown':
model.adjust_velocity(0.5)
elif cmd == 'speedup':
model.adjust_velocity(2.0)
except Exception, e:
print "Invalid command: '%s'" % s
print e
def update(model):
for boid, player in zip(model.boids, client.players.values()):
x,y,z = boid.position
dx,dy,dz = boid.velocity
rx,ry = v2r(dx,dy,dz)
player.set_position(x,y,z,rx,ry)
client.add_talk_handler(command_handler)
model.set_update_function(update)
# Wait for client to be ready
while not client.ready:
pass
model.run()
| true |
4846db35e0b4a6e8e6586cc86941722649edd333 | Python | xiangzhaocheng12/Swiper | /sms_test.py | UTF-8 | 823 | 2.609375 | 3 | [] | no_license | """
如果必须的参数都给全了, 还是返回错误提示,
那么就把非必须参数都写进去
"""
import time
import requests
import json
import hashlib
api = 'https://api.mysubmail.com/message/xsend'
appid = '52461'
appkey = 'b2a7e3b4f77d220a5c26f3625db7b368'
# 总共四个参数
args = {
'appid': appid,
'to': '13567943726',
'project': 'axSpR',
'vars': json.dumps({'code': '123456', 'time': '5分钟'}),
'timestamp': int(time.time()),
'sign_type': 'md5',
}
# 数字签名
signature_str = \
'&'.join([f'{k}={v}' for k, v in sorted(args.items())])
string = f'{appid}{appkey}{signature_str}{appid}{appkey}'
signature_str = hashlib.md5(string.encode('utf8')).hexdigest()
args['signature'] = signature_str
response = requests.post(api, data=args)
print(response.content)
print(response.status_code)
| true |
c7e7ca8794c5778f8d0cf58aeb897fd79f5e9660 | Python | AlessandroFMello/EstruturaDeRepeticao | /Exercício008.py | UTF-8 | 519 | 3.59375 | 4 | [] | no_license | import msvcrt
while True:
arr = []
contador = 0
arr.append(float(input('Digite um número:\n')))
while contador < 4:
arr.append(float(input('Digite outro número\n')))
contador += 1
print('A soma dos números %s é: %s' % (arr, sum(arr)))
media = sum(arr)/len(arr)
print('\nA média dos números %s é: %s' % (arr, media))
print('Pressione qualquer tecla para repetir ou ESPAÇO para sair')
key = msvcrt.getch()
if key == b' ':
break
| true |
1f974f206e62cb7dd1ade61edbf80e9c996403b9 | Python | AlexKucera/babylondreams-modo | /bd_tools/bd_overscan.py | UTF-8 | 3,320 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
# Alexander Kucera
# babylondreams.de
# Description
"""
babylondreams - bd_overscan
Release Notes:
V0.1 Initial Release - 2017-08-28
"""
import re
import bd_helpers
import modo
import lx
import traceback
from var import *
# FUNCTIONS -----------------------------------------------
# END FUNCTIONS -----------------------------------------------
# MAIN PROGRAM --------------------------------------------
def main(mode='resolution',
newsize='2016x1134'):
regex = "(\d+)(?:\D+)(\d+)"
start_timer = bd_helpers.timer()
scene = modo.Scene()
# Get current scene values
old_width = scene.renderItem.channel(lx.symbol.sICHAN_POLYRENDER_RESX).get()
old_height = scene.renderItem.channel(lx.symbol.sICHAN_POLYRENDER_RESY).get()
old_apertureX = scene.renderCamera.channel(lx.symbol.sICHAN_CAMERA_APERTUREX).get()
old_apertureY = scene.renderCamera.channel(lx.symbol.sICHAN_CAMERA_APERTUREY).get()
print("Old Render Resolution: {}x{}px".format(old_width, old_height))
print("Old Camera Aperture: {}mmx{}mm".format(old_apertureX, old_apertureY))
if mode == "scale":
try:
scale = float(newsize.replace(",", "."))
new_width = int(old_width * scale)
new_height = int(old_height * scale)
except:
modo.dialogs.alert("Wrong Scale Format",
"Unable to parse the given scale. Please use a floating point number.",
dtype='error')
else:
try:
float(newsize.replace(",", "."))
except:
match = re.match(regex, newsize)
if match and len(match.groups()) == 2:
new_width = int(match.group(1))
new_height = int(match.group(2))
else:
modo.dialogs.alert(
"Wrong Resolution Format",
"Unable to parse the given resolution. Please use the format <width> x <height>.",
dtype='error'
)
# Apply Overscan formula to width and height
new_apertureX = old_apertureX * (new_width / float(old_width))
new_apertureY = old_apertureY * (new_height / float(old_height))
# Set new scene values
scene.renderItem.channel(lx.symbol.sICHAN_POLYRENDER_RESX).set(new_width)
scene.renderItem.channel(lx.symbol.sICHAN_POLYRENDER_RESY).set(new_height)
scene.renderCamera.channel(lx.symbol.sICHAN_CAMERA_APERTUREX).set(new_apertureX)
scene.renderCamera.channel(lx.symbol.sICHAN_CAMERA_APERTUREY).set(new_apertureY)
print("New Render Resolution: {}x{}px".format(new_width, new_height))
print("New Camera Aperture: {}mmx{}mm".format(new_apertureX, new_apertureY))
bd_helpers.timer(start_timer, ' Overall')
# END MAIN PROGRAM -----------------------------------------------
if __name__ == '__main__':
# Argument parsing is available through the
# lx.arg and lx.args methods. lx.arg returns
# the raw argument string that was passed into
# the script. lx.args parses the argument string
# and returns an array of arguments for easier
# processing.
argsAsString = lx.arg()
argsAsTuple = lx.args()
try:
main()
except:
print traceback.format_exc()
| true |
a0a8633e91cd4481446307aaf35d09d6360648c9 | Python | honovation/veil | /src/veil/utility/encoding/encoding.py | UTF-8 | 1,968 | 2.625 | 3 | [] | no_license | from __future__ import unicode_literals, print_function, division
import logging
LOGGER = logging.getLogger(__name__)
def to_str(s):
if isinstance(s, (str, bytes)):
return s
if isinstance(s, unicode):
return s.encode('UTF-8')
return str(s)
def to_unicode(s, encoding='UTF-8', remedial_encodings=('gb18030',), strict=True, additional=None):
if isinstance(s, unicode):
return s
if isinstance(s, (str, bytes)):
if encoding in remedial_encodings:
encodings = remedial_encodings
else:
encodings = (encoding, ) + remedial_encodings
for e in encodings:
try:
return unicode(s, encoding=e)
except UnicodeDecodeError:
if e == encodings[-1]:
u = unicode(repr(s)[1:-1])
LOGGER.warning('to_unicode failed: %(tried_encodings)s, %(u)s, %(additional)s', {
'tried_encodings': encodings,
'u': u,
'additional': additional
})
if strict:
raise
else:
return u
if isinstance(s, (tuple, list, dict)):
kwargs = dict(encoding=encoding, remedial_encodings=remedial_encodings, strict=strict, additional=additional)
if isinstance(s, tuple):
return unicode(tuple(to_unicode(e, **kwargs) for e in s))
elif isinstance(s, list):
return unicode([to_unicode(e, **kwargs) for e in s])
else:
return unicode({to_unicode(k, **kwargs): to_unicode(v, **kwargs) for k, v in s.items()})
try:
return unicode(s)
except UnicodeDecodeError:
u = unicode(repr(s)[1:-1])
LOGGER.warning('to_unicode failed: %(u)s, %(additional)s', {'u': u, 'additional': additional})
if strict:
raise
else:
return u
| true |
9cd16e32fc72d5e1bcfdc6e004d87e8466c82c53 | Python | ktp-forked-repos/esperanto-analyzer | /esperanto_analyzer/morphological_sentence_analyzer.py | UTF-8 | 2,218 | 3.0625 | 3 | [
"BSD-2-Clause",
"MIT"
] | permissive | """
"""
# pylint: disable=too-few-public-methods,missing-docstring
import re
from esperanto_analyzer.analyzers import MorphologicalAnalyzer
class MorphologicalSentenceAnalyzer:
# The same as `string.punctuation`
SENTENCE_CLEAN_REGEXP = re.compile('[!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~]')
def __init__(self, sentence):
self.sentence = sentence
self.sentence_words = self._split_sentence(sentence)
self.processed = False
self.internal_results = None
def analyze(self):
# Avoid running the same thing many times returning the previous cached results
if self.processed is True:
return None
# Cache the results
self.internal_results = self._process_words(self.sentence_words)
self.processed = True
return True
def analyzes_results(self):
if not self.processed:
return None
return [result.results for result in self.internal_results]
def simple_results(self):
return self._format_simple_results(self.results())
def results(self):
if not self.processed:
return None
results = []
for analyze in self.analyzes_results():
results.append([analyze.raw_word, analyze])
return results
def _split_sentence(self, sentence):
clean_sentence = self._clean_sentence(sentence)
return clean_sentence.split()
def _clean_sentence(self, sentence):
return re.sub(self.SENTENCE_CLEAN_REGEXP, '', sentence)
def _process_words(self, words):
results = []
for word in words:
analyzer = MorphologicalAnalyzer(word)
analyzer.analyze()
results.append(analyzer)
return results
def _format_simple_results(self, results):
out_data = []
for data in results:
try:
# Get the current 'Part of Speech' name, such as: 'Adverb', 'Noun'
pos_name = data[1].result.word.__class__.__name__
except:
pos_name = 'Undefined'
out_data.append([
data[0],
pos_name
])
return out_data
| true |
6e100375b6cc988248d2a272bbbf8147b0a41306 | Python | treyvian/CDMO-module-1 | /SAT/src/sat.py | UTF-8 | 4,538 | 3.140625 | 3 | [] | no_license | from z3 import *
import matplotlib.pyplot as plt
from itertools import combinations
import time
import fileinput
import os
# Input the number of the instance from keyboard
num = input("Please enter the number of the instance:")
# Reading the instance
def read_instance(i):
s= ''
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, "../../instances/ins-{}.txt".format(i))
for line in fileinput.input(files = filename):
s += line
return s.splitlines()
s = read_instance(num)
# Method used to plot the solution
def plot_solution(width, n_rets, sizes, positions, height):
print("Sizes: ",sizes)
print("Positions: ", positions)
fig, ax = plt.subplots()
colors = ['tab:blue','tab:orange', 'tab:green', 'tab:red','tab:grey','tab:purple','tab:brown', 'black', 'yellow', 'gold']
for i in range(n_rets):
ax.broken_barh([(positions[i][0], sizes[i][0])], (positions[i][1], sizes[i][1]), facecolors=colors[i%len(colors)],edgecolors=("black",),linewidths=(1,),)
ax.set_ylim(0, height+1)
ax.set_xlim(0, width)
ax.set_xticks(range(width+1))
ax.set_yticks(range(height+1))
ax.set_xlabel('width')
ax.set_ylabel('height')
plt.show()
# Output the result in txt file
def write_solution(num, width, height, n_rets, sizes, positions):
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, "../out/out−{}.txt".format(num))
f= open(filename,"w+")
f.write("{} {}\n".format(width, height))
f.write("{}\n".format(n_rets))
for i in range(len(sizes)):
f.write("{} {} {} {}\n".format(sizes[i][0], sizes[i][1], positions[i][0], positions[i][1]))
f.close()
# Formatting the file
width = int(s[0])
n_rets = int(s[1])
# Splitting the list and casting the string to int
sizes = [i.split() for i in s[-n_rets:]]
sizes = [[int(sizes[i][j]) for j in range(2)] for i in range(n_rets)]
# Calculating the max height that the model can reach
max_h = 0
for i in range(n_rets):
max_h = max_h + sizes[i][1]
min_h = max([sizes[i][1] for i in range(n_rets)])
############################### SAT MODEL ######################################
# Functions
def at_least_one(bool_vars):
return Or(bool_vars)
def at_most_one(bool_vars):
return [Not(And(pair[0], pair[1])) for pair in combinations(bool_vars, 2)]
def exactly_one(solver, bool_vars):
solver.add(at_most_one(bool_vars))
solver.add(at_least_one(bool_vars))
def vlsi(s, height):
# Variables
p = [[[Bool(f"x_{i}_{j}_{n}") for n in range((2*n_rets)+1)] for j in range(height)] for i in range(width)]
# A cell has only one value
for i in range(width):
for j in range(height):
exactly_one(s, p[i][j])
for n in range(n_rets):
# A rectangle has only one position
exactly_one(s, [p[i][j][n] for i in range(width) for j in range(height)])
# Position should respect width and the height
s.add(at_least_one([p[i][j][n] for i in range(width-sizes[n][0]+1) for j in range(height-sizes[n][1]+1)]))
# Solving overlapping
for n in range(n_rets):
for i in range(width-sizes[n][0]+1):
for j in range(height-sizes[n][1]+1):
for k in range(i, i + sizes[n][0]):
for u in range(j, j + sizes[n][1]):
if(k != i or u != j):
s.add(Implies(p[i][j][n], p[k][u][n+n_rets]))
sol = []
if s.check() == sat:
m = s.model()
for i in range(width):
sol.append([])
for j in range(height):
for k in range((2*n_rets)+1):
if m.evaluate(p[i][j][k]):
sol[i].append(k)
elif s.reason_unknown() == "timeout":
print("Solver timeout")
else:
print("Failed to solve at height {}".format(height))
return sol
start = time.time()
for i in range(min_h, max_h):
# Solver
s = Solver()
# Time limit 5 minutes
times = 300000 # in milliseconds
s.set(timeout=times)
m = vlsi(s, i)
if m :
positions = []
for n in range(n_rets):
for i in range(len(m)):
for j in range(len(m[0])):
if m[i][j] == n:
positions.append([i,j])
plot_solution(width, n_rets, sizes, positions, i)
write_solution(num, width, i+1, n_rets, sizes, positions)
break
end = time.time()
print("Time elapsed: {} seconds".format(end - start))
| true |
b0e3aff235aedf2885684f77efe67d4a0cd42a47 | Python | salhasalman/Authorship_Attribution | /inversion_word2vec.py | UTF-8 | 5,016 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 01:52:08 2018
@author: mgungor
"""
from nltk.corpus import stopwords
eng_stopwords = set(stopwords.words("english"))
import time
import multiprocessing
from gensim.models import Word2Vec
from sklearn.metrics import f1_score, accuracy_score
import scipy.io as sc
cores = multiprocessing.cpu_count()
import pandas as pd
import numpy as np
train = pd.read_csv('train1.csv')
test = pd.read_csv('test1.csv')
ytrain = train.author.values
test_author = sc.loadmat("test_author.mat")["test_author"]
#Unique authors in the training set
unique_author = np.unique(train.author.values)
w=500
s=250
all_probs = np.zeros((len(unique_author), len(test.text)))
for k in range(len(unique_author)):
author = unique_author[k]
sentences = train[train.author == author]["text"].ravel().tolist()
new_sentences = []
for x in sentences:
new_sentences.append(x.split())
## create a w2v learner
size = 300
basemodel = Word2Vec(
workers=multiprocessing.cpu_count(), size=size, # use your cores
iter=3, # iter = sweeps of SGD through the data; more is better
hs=1, negative=0 # we only have scoring for the hierarchical softmax setup
)
basemodel.build_vocab(new_sentences)
print(basemodel)
basemodel.train(new_sentences, total_examples=len(new_sentences),epochs=basemodel.epochs)
print(author)
tic = time.clock()
for i in range(len(test.text)):
tmp_txt = test.text[i]
tmp_txt = tmp_txt.split()
b = len(tmp_txt)
tmp2 = int(b/s-1)
if tmp2 ==0:
tmp_txt3 = ''
for x in tmp_txt:
if x not in eng_stopwords:
tmp_txt3 += x
llhd = basemodel.score([tmp_txt3])[0]
if np.isnan(np.log(llhd)):
all_probs[k][i] =0
else:
all_probs[k][i] =np.log(llhd)
else:
probs = np.zeros((1,int(b/s-1)))
for j in range(int(b/s-1)):
tmp_txt2 = tmp_txt[s*j:s*j+w]
tmp_txt3 = ''
for x in tmp_txt2:
if x not in eng_stopwords:
tmp_txt3 += x
probs[0,j] = basemodel.score([tmp_txt3])[0]
#Standardize it
lhd = np.exp(probs[0,:] - probs[0,:].max(axis=0))
#Log likelihood
llhd = basemodel.score([tmp_txt3])[0]
if np.isnan(np.log(llhd)):
all_probs[k][i] =0
else:
llhd = np.sum(np.log(lhd[lhd!=0]))
all_probs[k][i] =llhd
toc = time.clock()
print(toc - tic)
predicted_author = np.zeros((len(test.text),1))
#Now Loop through the whole sequence
#Label the max value as the prediction
for k in range(np.shape(all_probs)[1]):
at = all_probs[:,k]/np.sum(all_probs[:,k])
#Divide By the sum of all probabilities
predicted_author[k] = unique_author[np.argmax(at)]
print("f1 Score")
print(f1_score(test_author, predicted_author, average='micro'))
print("Accuracy")
print(accuracy_score(test_author, predicted_author))
##############################################################################
#Let's change w and s
w=1000
s=500
all_probs = np.zeros((len(unique_author), len(test.text)))
for k in range(len(unique_author)):
author = unique_author[k]
sentences = train[train.author == author]["text"].ravel().tolist()
new_sentences = []
for x in sentences:
new_sentences.append(x.split())
## create a w2v learner
size = 300
basemodel = Word2Vec(
workers=multiprocessing.cpu_count(), size=size, # use your cores
iter=3, # iter = sweeps of SGD through the data; more is better
hs=1, negative=0 # we only have scoring for the hierarchical softmax setup
)
basemodel.build_vocab(new_sentences)
print(basemodel)
basemodel.train(new_sentences, total_examples=len(new_sentences),epochs=basemodel.iter)
print(author)
tic = time.clock()
for i in range(len(test.text)):
tmp_txt = test.text[i]
tmp_txt = tmp_txt.split()
tmp_txt3 = ''
for x in tmp_txt:
if x not in eng_stopwords:
tmp_txt3 += x
llhd = basemodel.score([tmp_txt3])[0]
all_probs[k][i] =llhd
toc = time.clock()
print(toc - tic)
predicted_author = np.zeros((len(test.text),1))
#Now Loop through the whole sequence
#Label the max value as the prediction
for k in range(np.shape(all_probs)[1]):
at = all_probs[:,k]/np.sum(all_probs[:,k])
#Divide By the sum of all probabilities
predicted_author[k] = unique_author[np.argmax(at)]
print("f1 Score")
print(f1_score(test_author, predicted_author, average='micro'))
print("Accuracy")
print(accuracy_score(test_author, predicted_author)) | true |
7ff927da5c44447bfbc4d6f3b48382ae9be63879 | Python | chris88herr/424-Assignment-3 | /generics_sorting.py | UTF-8 | 1,559 | 3.984375 | 4 | [] | no_license | ##python uses a key parameter in the sorted function
#which takes a function with a single argument and returns a key
# to use for sorting purposes
#operator module functions
#this allows us to do multiple levels of sorting
from operator import attrgetter
#Person class definition
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return repr((self.name, self.age))
## generic sorting functio that takes the objexts and a key function
## whose job is tu return a value to base the sorting on
def mySortFunction(objects, key):
return sorted(objects, key=key)
def float_comp(self): ##comp for floats, simple send the values themselves
return self
def people_comp(obj):#comp function for person objects
attr = attrgetter("age", "name") #get the attributes in which to order and return them
return attr(obj)
#main
def main():
floats = {645.32, 37.40, 76.30, 5.40, -34.23, 1.11, -34.94, 23.37, 635.46, -876.22, 467.73, 62.26}
print(mySortFunction(floats, float_comp))
people = [Person("Mac", 19), Person("Hal", 20) , Person("Susann", 31) , Person("Dwight", 19), Person("Kassandra", 21), Person("Lawrence", 25), Person("Cindy", 22), Person("Cory", 27), Person("Romana", 27), Person("Doretha", 32), Person("Danna", 20), Person("Zara", 23), Person("Rosalyn", 26), Person("Risa", 24), Person("Benny", 28), Person("Juan", 33), Person("Natalie", 25)]
print(mySortFunction(people, key=people_comp))
#driver
if __name__ == "__main__":
main() | true |
46f7179fcd60efcb11da34388562ac720108a241 | Python | masaharu-kato/pipeit | /samples/project01/src/elements.py | UTF-8 | 651 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
import argparse
import pipeit
from typing import List, Tuple
def elements(prefix:str, n:int) -> List[str]:
return [prefix + str(i) for i in range(1, n+1)]
def main():
argp = argparse.ArgumentParser(description='Create elements')
argp.add_argument('prefix', type=str, help='Prefix of element name')
argp.add_argument('number', type=int, help='Number of elements')
args = argp.parse_args()
def process(_):
data = {}
data['elements'] = elements(args.prefix, args.number)
return data
return pipeit.pipeit_with_json(args, process)
if __name__ == "__main__":
main()
| true |
3a397f939e5791d2a8b0797aca7bc3fc9db45573 | Python | dakotagporter/med-cabinet | /med-cabinet/cannabis.py | UTF-8 | 2,219 | 2.875 | 3 | [
"MIT"
] | permissive | import json
import pickle
EFFECTS = pickle.load(open('med-cabinet/static/data/effects_list.pkl', 'rb'))
ERROR = """Error: Strain Not Found.
Please Try Again!"""
KEYS = ['Strain', 'Type', 'Rating', 'Effects', 'Flavor', 'Description']
def load_json():
"""Loads .json file into backend"""
f = open('med-cabinet/static/data/cannabis.json', 'r')
cannabis = json.load(f)
f.close()
return cannabis
def search_strains(strain):
"""Search strains by name"""
cannabis = load_json()
strain = transform_query(strain)
index = 0
for i in cannabis:
for key, value in i.items():
if value == strain:
return parse_values(cannabis[index])
index += 1
return ERROR
def query_results(effects, strain_type, prediction):
cannabis = load_json()
strains = []
filtered = []
"""
Explain section below
"""
if prediction:
for i in prediction:
strains.append(cannabis[i])
for strain in strains:
if any([effect in strain['Effects'] for effect in effects]):
if strain_type and (strain_type != strain['Type']):
pass
else:
filtered.append(strain)
else:
for strain in cannabis:
if (any(elem in strain['Effects'] for elem in effects)):
if strain_type and (strain_type != strain['Type']):
pass
else:
filtered.append(strain)
return parse_json(filtered)
def parse_json(json):
values = []
for elem in json:
values.append(parse_values(elem))
return values
def transform_query(query):
"""Transform user query into correct format
i.e. 'blue dream' -> 'Blue-Dream'"""
query = query.title().split()
trans_query = ""
for word in query:
trans_query += word + '-'
trans_query = trans_query[:-1]
return trans_query
def parse_values(dic):
"""Parse values from dictianry
returned by 'search_strains()'"""
if dic == ERROR:
return ERROR
values = []
for key in KEYS:
values.append(dic[key])
return values
| true |
a775659bfd59688ef6ef62ceaee6a5d3cec32431 | Python | Jonathancui123/cccPractice | /s32016.py | UTF-8 | 260 | 2.796875 | 3 | [] | no_license | # ?????????????????????????????? idk what im doing
class Node:
def __init__(self, loc, branches=[]):
self.loc = loc
self.branches = branches
mn = input()
n = int(mn[0])
m = int(mn[2])
pholocs = list(map(int, input().split(' ')))
for i in range(n-1):
| true |
5fdfc4112e3343b64de3ee08560e7c68e49bc9c8 | Python | halcnf/PyPrac | /LoopbyFor.py | UTF-8 | 110 | 3.765625 | 4 | [] | no_license | number = 5
for x in range(1,11):
n1 = number * x
print(str(number) + " X " + str(x) + " = " + str(n1)) | true |
8c2053af4cd25a7a60be199f4939ebca9250c2a5 | Python | newton-li/GIS6345 | /Week8_Part2.py | UTF-8 | 408 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
import geopandas as gpd
import matplotlib.pyplot as plt
shp = "C:/osds.shp/osds_oah.shp"
data = gpd.read_file(shp)
data.head()
fig, ax = plt.subplots()
data['TYPE'].value_counts().plot.bar()
plt.xticks(rotation=0)
ax.set_xlabel('Sewage Disposal Type')
ax.set_ylabel('Count')
ax.set_title('On-Site Sewage Disposal Systems for the Island of Oahu, HI')
plt.show()
| true |
48961e87a6196c90fa80fbc6383c330a78a21249 | Python | shams33/Python_3_a_tutorial_for_dumbs_by_shams | /Input_Output.py | UTF-8 | 2,806 | 4.28125 | 4 | [] | no_license | """lets talk about first ouput statement python is very straightforward its like if you want to propose a girl then python will tell direct I LVE YOU instead of saying any boring compliments
okay lets go to the point directly okay python use print function to print an out put"""
##print(values,sep,end,file,flush) this is what a print function consists of
##values: the thing you wanna say its like eg:
print("I love you")
# Sep: it means separator which is used to provide or adding separator into the string provided by the way by default it will take only space but you can give whatever you want
#eg:
print(1,2,3,4,4,sep=" ")##with the values
print('i','love','you',sep="_")##for strings
"""end: after all vallues are printed end will be printed,bydefault it will print new line"""
print("I love you","but as a friend",sep=" " ,end="***fuck")
###
# file is the object where the values are printed and its default value is sys.stdout(screen)
print("Hello world!", flush=True)
"""The file argument must be an object with a write(string) method; if it is not present or None, sys.stdout will be used. Whether output is buffered is usually determined by file, but if the flush keyword argument is true, the stream is forcibly flushed.
"""
"""formatter:
str.format() is used for string formatting methods in Python3,which allows multiple substituitions and value for matting .This method lets us concatenate elements within
a string through positional formatting .
##using a single formatter:
formatters are actually like place holder like you are in a Indian bus and you told your friend to
please reserve some seat for you and he used his handkerchief to reserve the seat
for you .its like place holder
{}.format(value)
placeholders defined by a pair of curly braces{} into a string and calling the str.format()."""
"""parameters :
(value ): can be an integer,floating point numeric constant,string charecters or even variables .
return type :returns a formatted string with the value passed aas parameter in the placeholder position
"""
print("{},but as a friend".format("i love you but "))
print("Hello,I am {}".format(21))
print("{1}{0}".format("dk","bose"))
"""now its different for different cases so lets seee what sthe differences
"""
print("the number is differnet is decimal :{:d}".format(123))
print("the number is float :{:f}".format(212.22))
print("bin {0:b},oct:{0:o},hex:{0:x}".format(9))
print("{:6d}".format(9))####it is used for padding purpose
print("{:<04d}".format(12))
print("{:^9.3f}".format(12.23))##padding the value
print("{:*^5}".format("act"))##padding the value
"""
>for right alignment
<for left alignment
^for central alignment
= for left most alignment of (+)(-) values
"""
#INPUT
"""for user input we use
input() method """
print(input("enter the number"))
| true |
cb2505b65c3186e2d9089e97f81e4ccb6026e126 | Python | Deltaix/UGR-TFG | /Scripts/scriptpython.py | UTF-8 | 1,765 | 2.78125 | 3 | [] | no_license | import numpy as np
import cv2
from keras.preprocessing import image
# Elegir si utilizar CUDA o no
cuda = False
# Si se utiliza CUDA, prepara la sesión de Keras para utilizar la GPU
if cuda:
import tensorflow as tf
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))
# Clasificador en cascada
face_cascade = cv2.CascadeClassifier('D:/Users/JP/PycharmProjects/pythonProject/haarcascade_frontalface_default.xml')
# Entrada de vídeo
cap = cv2.VideoCapture(0)
# Carga del modelo y sus pesos
from keras.models import model_from_json
model = model_from_json(open("D:/Users/JP/PycharmProjects/pythonProject/clairvoyant.json", "r").read())
model.load_weights('D:/Users/JP/PycharmProjects/pythonProject/clairvoyant.h5')
emotions = ('angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral')
while(True):
# Captura el frame que ve la cámara
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detecta la cara
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
# Recorta la cara y la transforma a 48x48 píxeles
detected_face = img[int(y):int(y+h), int(x):int(x+w)]
detected_face = cv2.cvtColor(detected_face, cv2.COLOR_BGR2GRAY)
detected_face = cv2.resize(detected_face, (48, 48))
img_pixels = image.img_to_array(detected_face)
img_pixels = np.expand_dims(img_pixels, axis=0)
img_pixels /= 255
# Predice la expresión
predictions = model.predict(img_pixels)
max_index = np.argmax(predictions[0])
emotion = emotions[max_index]
print(emotion + "\n")
cap.release()
cv2.destroyAllWindows()
| true |
57b68affc04cbbf417f505d042dcf7ba7a49e050 | Python | Aasthaengg/IBMdataset | /Python_codes/p02970/s788799308.py | UTF-8 | 132 | 2.953125 | 3 | [] | no_license | n, d = map(int, input().split())
area = (d*2)+1
if n%area == 0:
ans = int(n//area)
else:
ans = int(n//area) + 1
print(ans) | true |
ee2745edb8d3781e847bf9027b7f14c5c5e39d00 | Python | jdanray/leetcode | /totalFruit.py | UTF-8 | 366 | 3.21875 | 3 | [] | no_license | # https://leetcode.com/problems/fruit-into-baskets/
class Solution(object):
def totalFruit(self, fruits):
i = 0
res = 0
count = collections.Counter()
for j, n in enumerate(fruits):
count[n] += 1
while len(count) > 2:
f = fruits[i]
count[f] -= 1
if count[f] == 0:
del count[f]
i += 1
res = max(res, j - i + 1)
return res
| true |
53022666c744f920ac864c0d281bd4dfc0894582 | Python | margaritapetrosova/Simple-project | /homework 2/task 5.py | UTF-8 | 401 | 3.5 | 4 | [] | no_license | word1 = 'Smth'
featureVector1 = []
for letter in word1:
featureVector1.append(ord(letter))
print(featureVector1)
word2 = 'Smth'
featureVector2 = []
for letter in word1:
featureVector2.append(ord(letter))
print(featureVector2)
sum = 0
for num in range(len(word1)):
res = (featureVector2[num] - featureVector1[num]) ** 2
sum = res + sum
euc = (sum) ** 0.5
print(euc) | true |
8b3b93bcfec885137324ecb2615c787a24f9f34a | Python | LinusThorsell/robtroller-server | /servocontrols.py | UTF-8 | 6,532 | 2.84375 | 3 | [] | no_license | # Initialize servos
import threading, time
import RPi.GPIO as GPIO
servoPIN1 = 3
servoPIN2 = 5
servoPIN3 = 7
servoPIN4 = 11
servoPIN5 = 13
servoPIN6 = 15
servoPIN7 = 19
servoPIN8 = 21
GPIO.setmode(GPIO.BOARD)
GPIO.setup(servoPIN1, GPIO.OUT)
GPIO.setup(servoPIN2, GPIO.OUT)
GPIO.setup(servoPIN3, GPIO.OUT)
GPIO.setup(servoPIN4, GPIO.OUT)
GPIO.setup(servoPIN5, GPIO.OUT)
GPIO.setup(servoPIN6, GPIO.OUT)
GPIO.setup(servoPIN7, GPIO.OUT)
GPIO.setup(servoPIN8, GPIO.OUT)
p1 = GPIO.PWM(servoPIN1, 50)
p1.start(0)
p2 = GPIO.PWM(servoPIN2, 50)
p2.start(0)
p3 = GPIO.PWM(servoPIN3, 50)
p3.start(0)
p4 = GPIO.PWM(servoPIN4, 50)
p4.start(0)
p5 = GPIO.PWM(servoPIN5, 50)
p5.start(0)
p6 = GPIO.PWM(servoPIN6, 50)
p6.start(0)
p7 = GPIO.PWM(servoPIN7, 50)
p7.start(0)
p8 = GPIO.PWM(servoPIN8, 50)
p8.start(0)
# setup inital positions
p1.ChangeDutyCycle(8.5)
p2.ChangeDutyCycle(8.5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(8.5)
p6.ChangeDutyCycle(8.5)
p7.ChangeDutyCycle(6.75)
p8.ChangeDutyCycle(6.75)
time.sleep(1)
def servos_sleep():
p1.ChangeDutyCycle(0)
p2.ChangeDutyCycle(0)
p3.ChangeDutyCycle(0)
p4.ChangeDutyCycle(0)
p5.ChangeDutyCycle(0)
p6.ChangeDutyCycle(0)
p7.ChangeDutyCycle(0)
p8.ChangeDutyCycle(0)
servos_sleep()
command_queue = []
def add_commands(commands):
command_queue.extend(commands)
def command_worker():
def run_next():
print(command_queue[0])
do_command(command_queue[0])
command_queue.pop(0)
print("[command_worker]: starting...")
while(1):
time.sleep(2)
if (len(command_queue) > 0):
print("[command_worker]: running next command...")
run_next()
t = threading.Thread(target=command_worker)
t.start()
walkstage = 0.15
def walk_forward():
# move forward
p1.ChangeDutyCycle(8.5)
p2.ChangeDutyCycle(8.5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(8.5)
p6.ChangeDutyCycle(8.5)
p7.ChangeDutyCycle(6.75)
p8.ChangeDutyCycle(6.75)
time.sleep(walkstage)
p1.ChangeDutyCycle(3)
p2.ChangeDutyCycle(3)
p3.ChangeDutyCycle(10)
p4.ChangeDutyCycle(10)
p5.ChangeDutyCycle(6)
p6.ChangeDutyCycle(6)
p7.ChangeDutyCycle(6.75)
p8.ChangeDutyCycle(6.75)
time.sleep(walkstage)
p1.ChangeDutyCycle(3)
p2.ChangeDutyCycle(3)
p3.ChangeDutyCycle(10)
p4.ChangeDutyCycle(10)
p5.ChangeDutyCycle(4)
p6.ChangeDutyCycle(4)
p7.ChangeDutyCycle(6.75)
p8.ChangeDutyCycle(6.75)
time.sleep(walkstage)
p1.ChangeDutyCycle(5)
p2.ChangeDutyCycle(5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(5)
p6.ChangeDutyCycle(5)
p7.ChangeDutyCycle(6.75)
p8.ChangeDutyCycle(6.75)
time.sleep(walkstage)
p1.ChangeDutyCycle(7.5)
p2.ChangeDutyCycle(7.5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(10.5)
p6.ChangeDutyCycle(10.5)
p7.ChangeDutyCycle(3.5)
p8.ChangeDutyCycle(3.5)
time.sleep(walkstage)
p1.ChangeDutyCycle(9.5)
p2.ChangeDutyCycle(9.5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(10.5)
p6.ChangeDutyCycle(10.5)
p7.ChangeDutyCycle(3.5)
p8.ChangeDutyCycle(3.5)
time.sleep(walkstage)
p1.ChangeDutyCycle(8.5)
p2.ChangeDutyCycle(8.5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(8.5)
p6.ChangeDutyCycle(8.5)
p7.ChangeDutyCycle(6.75)
p8.ChangeDutyCycle(6.75)
time.sleep(walkstage)
servos_sleep()
def walk_backwards():
# move backward
p1.ChangeDutyCycle(9.5)
p2.ChangeDutyCycle(9.5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(10.5)
p6.ChangeDutyCycle(10.5)
p7.ChangeDutyCycle(3.5)
p8.ChangeDutyCycle(3.5)
time.sleep(walkstage)
p1.ChangeDutyCycle(7.5)
p2.ChangeDutyCycle(7.5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(10.5)
p6.ChangeDutyCycle(10.5)
p7.ChangeDutyCycle(3.5)
p8.ChangeDutyCycle(3.5)
time.sleep(walkstage)
p1.ChangeDutyCycle(5)
p2.ChangeDutyCycle(5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(5)
p6.ChangeDutyCycle(5)
p7.ChangeDutyCycle(6.75)
p8.ChangeDutyCycle(6.75)
time.sleep(walkstage)
p1.ChangeDutyCycle(3)
p2.ChangeDutyCycle(3)
p3.ChangeDutyCycle(10)
p4.ChangeDutyCycle(10)
p5.ChangeDutyCycle(4)
p6.ChangeDutyCycle(4)
p7.ChangeDutyCycle(6.75)
p8.ChangeDutyCycle(6.75)
time.sleep(walkstage)
p1.ChangeDutyCycle(3)
p2.ChangeDutyCycle(3)
p3.ChangeDutyCycle(10)
p4.ChangeDutyCycle(10)
p5.ChangeDutyCycle(6)
p6.ChangeDutyCycle(6)
p7.ChangeDutyCycle(6.75)
p8.ChangeDutyCycle(6.75)
time.sleep(walkstage)
p1.ChangeDutyCycle(8.5)
p2.ChangeDutyCycle(8.5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(8.5)
p6.ChangeDutyCycle(8.5)
p7.ChangeDutyCycle(6.75)
p8.ChangeDutyCycle(6.75)
time.sleep(walkstage)
servos_sleep()
def spin():
p1.ChangeDutyCycle(9.5)
p2.ChangeDutyCycle(9.5)
p3.ChangeDutyCycle(6.75)
p4.ChangeDutyCycle(6.75)
p5.ChangeDutyCycle(10.5)
p6.ChangeDutyCycle(10.5)
p7.ChangeDutyCycle(3.5)
p8.ChangeDutyCycle(3.5)
time.sleep(walkstage)
def do_command(command):
command = command.lower()
if "walk" in command:
if "forward" in command:
print("walk forward")
walk_forward()
if "backwards" in command:
print("move backwards")
walk_backwards()
if "spin" in command:
if "left" in command:
print("spin left")
if "right" in command:
print("spin right")
if "head" in command:
if "right" in command:
print("head look right")
if "left" in command:
print("head look left")
if "center" in command:
print("head look straight")
if "debug" in command:
print("debug command received") | true |
a9e81b9cd4dded1cff3680cca5842d1f558762eb | Python | ming-log/python_threading | /redis_learn/redis_learn_other.py | UTF-8 | 2,264 | 2.953125 | 3 | [] | no_license | # !/usr/bin/python3
# -*- coding:utf-8 -*-
# author: Ming Luo
# time: 2020/8/1 10:05
import redis
import time
pool = redis.ConnectionPool(host='localhost', port=6379, decode_responses=True)
r = redis.Redis(connection_pool=pool)
# 1. delete(*names) 删除
# 根据删除redis中的任意数据类型(string、hash、list、set、有序set)
r.delete("gender") # 删除key为gender的键值对
# 2. exists(name) 检查名字是否存在
# 检测redis的name是否存在,存在就是True,False 不存在
print(r.exists("zset1"))
# 3. keys(pattern='') 模糊匹配
# 根据模型获取redis的name
# 更多:
# KEYS * 匹配数据库中所有 key 。
# KEYS h?llo 匹配 hello , hallo 和 hxllo 等。
# KEYS hllo 匹配 hllo 和 heeeeello 等。
# KEYS h[ae]llo 匹配 hello 和 hallo ,但不匹配 hillo
print(r.keys("foo*"))
# 4. expire(name ,time) 设置超时时间
# 为某个redis的某个name设置超时时间
r.lpush("list5", 11, 22)
r.expire("list5", time=3)
print(r.lrange("list5", 0, -1))
time.sleep(3)
print(r.lrange("list5", 0, -1))
# 5. rename(src, dst) 重命名
# 对redis的name重命名
r.lpush("list5", 11, 22)
r.rename("list5", "list5-1")
# 6. randomkey() 随机获取name
# 随机获取一个redis的name(不删除)
print(r.randomkey())
# 7. type(name) 获取类型
# 获取name对应值的类型
print(r.type("set1"))
print(r.type("hash2"))
# 8. 查看所有元素
r.scan(cursor=0, match=None, count=None)
print(r.hscan("hash2"))
print(r.sscan("set3"))
print(r.zscan("zset2"))
print(r.getrange("foo1", 0, -1))
print(r.lrange("list2", 0, -1))
print(r.smembers("set3"))
print(r.zrange("zset3", 0, -1))
print(r.hgetall("hash1"))
# 9. 查看所有元素--迭代器
r.scan_iter(match=None, count=None)
for i in r.hscan_iter("hash1"):
print(i)
for i in r.sscan_iter("set3"):
print(i)
for i in r.zscan_iter("zset3"):
print(i)
# other 方法
print(r.get('name')) # 查询key为name的值
r.delete("gender") # 删除key为gender的键值对
print(r.keys()) # 查询所有的Key
print(r.dbsize()) # 当前redis包含多少条数据
r.save() # 执行"检查点"操作,将数据写回磁盘。保存时阻塞
# r.flushdb() # 清空r中的所有数据
# 管道(pipeline)
| true |
7e363006f3c9d29b9094fa1f5d09297ea0bf5085 | Python | tangaw/data-structures | /src/algo/hash_table.py | UTF-8 | 1,412 | 3.875 | 4 | [
"MIT"
] | permissive | import string
def intersection(arr1, arr2):
hashTable = {}
cross = []
for e in arr1:
hashTable[e] = True
for f in arr2:
if f in hashTable.keys():
cross.append(f)
return cross
def duplicate_value(arr):
hashTable = {}
repeat = []
for e in arr:
if e not in hashTable.keys():
hashTable[e] = True
else:
repeat.append(e)
return repeat
def missing_letter(text):
deconstructed = list(text)
existing = {}
for e in deconstructed:
existing[e] = True
for a in list(string.ascii_lowercase):
if a not in existing.keys():
return a
def first_non_duplicate(text):
string_arr = list(text)
counts = {}
for e in string_arr:
if e not in counts.keys():
counts[e] = 1
else:
counts[e] += 1
for e in string_arr:
if counts[e] == 1:
return e
return "No singled out letters!"
if __name__ == "__main__":
### Testing for intersection() ###
# arr1 = [1, 2, 3, 4, 5]
# arr2 = [2, 4, 6]
# print(intersection(arr1, arr2))
### Testing for duplicate_value() ###
# letters = ['a', 'b', 'c', 'd', 'b', 'e', 'c', 'f', 'g', 'e']
# print(duplicate_value(letters))
### Testing for missing_letter() ###
# text = "the quick brown box jumps over a lazy dog"
# print(missing_letter(text))
### Testing for first_non_duplicate ###
text = 'minimum'
print(first_non_duplicate(text)) | true |
3eb7d2771c9b2e4ea38f0a1e9358c0a4a2dc3166 | Python | BigFloppa456/ROS | /hackathon/scripts/RRT_test.py | UTF-8 | 2,785 | 3.015625 | 3 | [] | no_license | import numpy as np
import random
import math
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
# adapted from psuedo code in given link:
# https://theclassytim.medium.com/robotic-path-planning-rrt-and-rrt-212319121378
obstacles = np.array([ [0, 1.5], [0, 3], [0, 4.5],
[1.5, 0], [1.5, 1.5], [1.5, 3], [1.5, 4.5],
[3, 0], [3, 1.5], [3, 3], [3, 4.5],
[4.5, 0], [4.5, 1.5], [4.5, 3], [4.5, 4.5] ],np.float32) #only centres right now
x=[]
y=[]
for i in range(len(obstacles)):
x.append(obstacles[i][0])
y.append(obstacles[i][1])
plt.scatter(x,y)
#Function declarations:
def shortest_distance(x1, y1, a, b, c):
d = abs((a * x1 + b * y1 + c)) / (math.sqrt(a * a + b * b))
return d
def genNewConfig(goal):
a = random.uniform(0,goal[0])
b = random.uniform(0,goal[1])
point = [a,b]
return point
def Nearest(a, point):
near = a[0]
dist_near = math.sqrt((point[0] - near[0])**2 + (point[1] - near[1])**2)
for pt in a:
del_x = point[0] - pt[0]
del_y = point[1] - pt[1]
a = del_x**2
b = del_y**2
dist = math.sqrt(a+b)
if dist < dist_near:
near = pt
return pt
def TRAVERSABLE(X,obs):
x_nearest = Nearest(obs,X)
dist = distance(x_nearest,X)
if X in obs:
return False
#elif dist <= 0.25 :
# return False
else:
return True
def join(p1,p2):
temp_x = []
temp_y = []
temp_x.append(p1[0])
temp_x.append(p2[0])
temp_y.append(p1[1])
temp_y.append(p2[1])
plt.plot(temp_x,temp_y)
def distance(p1,p2):
del_x = p1[0] - p2[0]
del_y = p1[1] - p2[1]
a = del_x**2
b = del_y**2
dist = math.sqrt(a+b)
return dist
#def path_dist(path):
# for i in range(len(path)):
def RRT(start, goal,obst):
radius = 0.25
graph = []
graph.append(start)
#for x in range(100):
for a in range(500):
Xnew = genNewConfig(goal)
if TRAVERSABLE(Xnew,obst)==False:
continue
x_nearest = Nearest(graph,Xnew)
if (Xnew[0]>=x_nearest[0]) and (Xnew[1] >= x_nearest[1]):
join(x_nearest,Xnew)
graph.append(Xnew)
#RRT(x_nearest,goal,obst)
if Xnew in [goal]:
#plt.show(block=True)
graph.append(Xnew)
#return graph
#if distance(Xnew,goal)<distance(Xnew,x_nearest):
#join(Xnew,goal)
#plt.grid()
#plt.show(block=True)
return graph
"""S = [0,0]
G = [5,5]
path = RRT(S,G,obstacles)
print (path)"""
| true |
c467d949df1a92a432d3374e8306e26bfb6b73d8 | Python | ThanushCode/thanushcode.github.io | /Python/inputprogram.py | UTF-8 | 135 | 3.9375 | 4 | [] | no_license | firstName = input("Enter Your First Name")
lastName = input ("Enter Your Last Name")
print ("Your full Name is ",firstName,lastName ) | true |
f47b835405825975eddabc16206fb3c278867277 | Python | nupnik267/Deep-Learning-Models | /new/augmentation.py | UTF-8 | 10,670 | 2.859375 | 3 | [
"MIT"
] | permissive | import cv2
from PIL import Image, ImageEnhance, ImageFilter, ImageOps
import random
import numpy as np
import math
def yoloFormattocv(x1, y1, x2, y2, H, W):
bbox_width = x2 * W
bbox_height = y2 * H
center_x = x1 * W
center_y = y1 * H
voc = []
voc.append(center_x - (bbox_width / 2))
voc.append(center_y - (bbox_height / 2))
voc.append(center_x + (bbox_width / 2))
voc.append(center_y + (bbox_height / 2))
return [int(v) for v in voc]
def cvFormattoYolo(corner, H, W):
bbox_W = corner[3] - corner[1]
bbox_H = corner[4] - corner[2]
center_bbox_x = (corner[1] + corner[3]) / 2
center_bbox_y = (corner[2] + corner[4]) / 2
return corner[0], round(center_bbox_x / W, 6), round(center_bbox_y / H, 6), round(bbox_W / W, 6), round(bbox_H / H,
6)
def box_nochange(file_num, text):
with open(file_num + ".txt", 'w') as file:
file.write(text)
return text
def shear_box(direc, file_num, text, shearfactor):
coordinates = []
all = text.splitlines()
for k in all:
coordinates.append(k.split())
for coordinate in coordinates:
coordinate[0] = int(coordinate[0])
for i in range(1, len(coordinate)):
coordinate[i] = float(coordinate[i])
if direc == 0: # x - shearing
for coordinate in coordinates:
x_mid = coordinate[1] * 1 + coordinate[2] * (shearfactor // 2)
y_mid = coordinate[2]
width = coordinate[3] * (1 - shearfactor / 3)
height = coordinate[4]
coordinate[1], coordinate[2] = x_mid, y_mid
coordinate[3], coordinate[4] = width, height
else: # y -shearing
for coordinate in coordinates:
x_mid = coordinate[1]
y_mid = (coordinate[1] * (shearfactor // 2)) + coordinate[2] * 1
width = (1 + shearfactor // 2) * coordinate[3]
height = coordinate[4] * abs(1 - (coordinate[4] / shearfactor) / 2)
coordinate[1], coordinate[2] = x_mid, y_mid
coordinate[3], coordinate[4] = width, height
with open(file_num+".txt", 'w') as file:
coordinates_text = ""
for coordinate in coordinates:
coordinates_text += "%d %.6f %.6f %.6f %.6f\n" % (coordinate[0], coordinate[1], coordinate[2],
coordinate[3], coordinate[4])
file.write(coordinates_text)
return coordinates_text
def rotate_box(filename, image, file_num, text, angle):
org_image = Image.open(filename)
rotation_angle = angle * np.pi / 180
rot_matrix = np.array(
[[np.cos(rotation_angle), -np.sin(rotation_angle)], [np.sin(rotation_angle), np.cos(rotation_angle)]])
W, H = org_image.size[:2]
new_width, new_height = image.size[:2]
bbox = []
new_bbox = []
all = text.splitlines()
for k in all:
bbox.append(k.split())
for x in bbox:
(center_x, center_y, bbox_width, bbox_height) = yoloFormattocv(float(x[1]), float(x[2]),
float(x[3]), float(x[4]), H, W)
upper_left_corner_shift = (center_x - W / 2, -H / 2 + center_y)
upper_right_corner_shift = (bbox_width - W / 2, -H / 2 + center_y)
lower_left_corner_shift = (center_x - W / 2, -H / 2 + bbox_height)
lower_right_corner_shift = (bbox_width - W / 2, -H / 2 + bbox_height)
new_lower_right_corner = [-1, -1]
new_upper_left_corner = []
for i in (upper_left_corner_shift, upper_right_corner_shift, lower_left_corner_shift,
lower_right_corner_shift):
new_coords = np.matmul(rot_matrix, np.array((i[0], -i[1])))
x_prime, y_prime = new_width / 2 + new_coords[0], new_height / 2 - new_coords[1]
if new_lower_right_corner[0] < x_prime:
new_lower_right_corner[0] = x_prime
if new_lower_right_corner[1] < y_prime:
new_lower_right_corner[1] = y_prime
if len(new_upper_left_corner) > 0:
if new_upper_left_corner[0] > x_prime:
new_upper_left_corner[0] = x_prime
if new_upper_left_corner[1] > y_prime:
new_upper_left_corner[1] = y_prime
else:
new_upper_left_corner.append(x_prime)
new_upper_left_corner.append(y_prime)
new_bbox.append([int(x[0]), new_upper_left_corner[0], new_upper_left_corner[1],
new_lower_right_corner[0], new_lower_right_corner[1]])
final_bbox = []
for x in new_bbox:
final_bbox.append(cvFormattoYolo(x, image.size[1], image.size[0]))
with open(file_num + ".txt", 'w') as file:
coordinates_text = ""
for x in final_bbox:
coordinates_text += "%d %.6f %.6f %.6f %.6f\n" % (x[0], x[1], x[2],
x[3], x[4])
file.write(coordinates_text)
return coordinates_text
def hrFlip_box(file_num, text):
coordinates = []
all = text.splitlines()
for k in all:
coordinates.append(k.split())
for coordinate in coordinates:
coordinate[0] = int(coordinate[0])
for i in range(1, len(coordinate)):
coordinate[i] = float(coordinate[i])
coordinate[1] = abs(1 - coordinate[1]) # Changing only the x value of the center coordinate for hor. flip.
with open(file_num + ".txt", 'w') as file:
coordinates_text = ""
for coordinate in coordinates:
coordinates_text += "%d %.6f %.6f %.6f %.6f\n" % (coordinate[0], coordinate[1], coordinate[2],
coordinate[3], coordinate[4])
file.write(coordinates_text)
return coordinates_text
def vrFlip_box(file_num, text):
coordinates = []
all = text.splitlines()
for k in all:
coordinates.append(k.split())
for coordinate in coordinates:
coordinate[0] = int(coordinate[0])
for i in range(1, len(coordinate)):
coordinate[i] = float(coordinate[i])
coordinate[2] = abs(1 - coordinate[2]) # Changing only the y value of the center coordinate for ver. flip.
with open(file_num+".txt", 'w') as file:
coordinates_text = ""
for coordinate in coordinates:
coordinates_text += "%d %.6f %.6f %.6f %.6f\n" % (coordinate[0], coordinate[1], coordinate[2],
coordinate[3], coordinate[4])
file.write(coordinates_text)
return coordinates_text
def blur_gen(image, value):
return image.filter(ImageFilter.GaussianBlur(value / 4))
def rotation_gen(image, value):
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
height, width = image.shape[:2]
image_center = (width / 2, height / 2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape
rotation_mat = cv2.getRotationMatrix2D(image_center, value, 1.)
# rotation calculates the cos and sin, taking absolutes of those.
abs_cos = abs(rotation_mat[0, 0])
abs_sin = abs(rotation_mat[0, 1])
# find the new width and height bounds
bound_w = int(height * abs_sin + width * abs_cos)
bound_h = int(height * abs_cos + width * abs_sin)
# subtract old image center (bringing image back to origin) and adding the new image center coordinates
rotation_mat[0, 2] += bound_w / 2 - image_center[0]
rotation_mat[1, 2] += bound_h / 2 - image_center[1]
# rotate image with the new bounds and translated rotation matrix
rotated_mat = cv2.warpAffine(image, rotation_mat, (bound_w, bound_h))
rgb = cv2.cvtColor(rotated_mat, cv2.COLOR_BGR2RGB)
return Image.fromarray(rgb), value
def shear_gen(image, value):
l = []
cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
x_m = np.float32([[1, value / 10, 0], [0, 1, 0], [0, 0, 1]]) # transformation matrix for x shearing
x_factor = 1 + value / 10 # factor for changing the size of image
x_sheared_img = cv2.warpPerspective(cv_image, x_m, (int(cv_image.shape[1] * x_factor), int(cv_image.shape[0])))
y_m = np.float32([[1, 0, 0], [value / 10, 1, 0], [0, 0, 1]])
y_factor = 1 + value / 10
y_sheared_img = cv2.warpPerspective(cv_image, y_m, (int(cv_image.shape[1]), int(cv_image.shape[0] * y_factor)))
x_rgb = cv2.cvtColor(x_sheared_img, cv2.COLOR_BGR2RGB)
y_rgb = cv2.cvtColor(y_sheared_img, cv2.COLOR_BGR2RGB)
x_img_pil = Image.fromarray(x_rgb)
y_img_pil = Image.fromarray(y_rgb)
l.extend([x_img_pil, y_img_pil])
res = random.choice(l)
return l.index(res), res, (1+(value/10))
def horizontal_flip_gen(image):
return image.transpose(Image.FLIP_LEFT_RIGHT)
def vertical_flip_gen(image):
return image.transpose(Image.FLIP_TOP_BOTTOM)
def darken_gen(image, value):
return ImageEnhance.Brightness(image).enhance(1.0 - value / 100)
def brighten_gen(image, value):
return ImageEnhance.Brightness(image).enhance(1.0 + value / 30)
def noise_gen(image, value):
img = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
rows, columns, channels = img.shape
output = np.zeros(img.shape, np.uint8)
value = value / 100
threshold = 1 - value
for i in range(rows):
for j in range(columns):
r = random.random()
if r < value:
output[i][j] = [0, 0, 0]
elif r > threshold:
output[i][j] = [255, 255, 255]
else:
output[i][j] = img[i][j]
rgb1 = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
img_pil = Image.fromarray(rgb1)
return img_pil
def saturation_gen(image, value):
l = []
converter = ImageEnhance.Color(image)
neg = converter.enhance(1.0 - value / 100)
pos = converter.enhance(1.0 + value / 20)
l.extend([neg, pos])
return random.choice(l)
def grayscale_gen(image):
return ImageOps.grayscale(image)
def cutout_gen(image):
cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
x = random.randint(0, 125)
y = random.randint(0, 125)
w = random.randint(0, 50)
h = random.randint(0, 50)
for i in range(x, x + w):
for j in range(y, y + h):
cv_image[i][j] = 0
img = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
img_pil = Image.fromarray(img)
return img_pil
def bw_gen(image, value):
return ImageOps.grayscale(image).point(lambda x: 0 if x < value else 255, '1')
| true |
b710ea5ccc9de41d3d69e41063ac50aab0413929 | Python | Tecnoll/plover_windows_brightness | /plover_windows_brightness.py | UTF-8 | 1,023 | 2.75 | 3 | [] | no_license | from wmi import WMI
WMI_CONNECTION = WMI(moniker='root/wmi')
def _set_brightness(brightness):
WMI_CONNECTION.WmiMonitorBrightnessMethods()[0].WmiSetBrightness(brightness, 0)
def _get_brightness():
return WMI_CONNECTION.WmiMonitorBrightness()[0].CurrentBrightness
def set(engine, brightness):
brightness = int(brightness)
assert 0 <= brightness <= 100, 'Brightness can only be 0 to 100'
_set_brightness(brightness)
def up(engine, increase):
increase = int(increase) if increase else 10
assert increase > 0, 'Brightness increase amount must be a positive integer'
new_brightness = (_get_brightness() + increase)
new_brightness = min(new_brightness, 100)
_set_brightness(new_brightness)
def down(engine, decrease=10):
decrease = int(decrease) if decrease else 10
assert decrease > 0, 'Brightness decrease amount must be a positive integer'
new_brightness = (_get_brightness() - decrease)
new_brightness = max(new_brightness, 0)
_set_brightness(new_brightness)
| true |
ff85793a5c79079dfc2f395fba13faa9d9d9f8ed | Python | ManavParmar1609/OpenOctober | /Miscellaneouos/Python/Minimum Cost Path.py | UTF-8 | 536 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | # Minimum Cost Path
import sys
def findMinprice(price,x=None,y=None):
if not x and not y:
x, y = len(price), len(price[0])
if not price or not len(price):
return 0
if y == 0 or x == 0:
return sys.maxsize
if x == 1 and y == 1:
return price[0][0]
return min(findMinprice(price,x-1,y), findMinprice(price,x,y-1))\
+ price[x-1][y-1]
if __name__ == '__main__':
price = [
[4, 7, 8, 6, 4],
[6, 7, 3, 9, 2],
[3, 8, 1, 2, 4],
[7, 1, 7, 3, 7],
[2, 9, 8, 9, 3]
]
print('The minimum price is : ', findMinprice(price))
| true |
0097494317f6bbd60602bb206533c00afcc8ab8c | Python | dalinnn/RaspiRobot-Rover | /02. Robot Rover and Wii Remote Controller | UTF-8 | 2,925 | 3.203125 | 3 | [] | no_license | #!/usr/bin/python
#
# wiiRemote04.py
# Connect a Nintendo Wii Remote via Bluetooth
# and read the button states in Python.
#
# Based on Matt Hawkins Project (dated 30/01/2013):
# http://www.raspberrypi-spy.co.uk/2013/02/nintendo-wii-remote-python-and-the-raspberry-pi/
# -----------------------
# Import required Python libraries
# -----------------------
from rrb3 import *
import cwiid
import time
rr = RRB3(9.0, 6.0) # battery, motor
motor_speed = 0.4
button_delay = 0.1
button_delay2 = 0.3
print 'Press 1 + 2 together on your Wii Remote now ...'
time.sleep(1)
# Connect to the Wii Remote. If it times out
# then quit.
try:
wii=cwiid.Wiimote()
except RuntimeError:
print "Error opening wiimote connection"
quit()
#turn on led on wiiRemote to show connected
wii.led = 1
print 'Wii Remote connected...\n'
print 'Press A and B buttons together to disconnect and quit.\n'
print 'Use Left, Right, Up & Down to control direction.\n'
print 'Use - & + to control speed.\n'
print 'Press button B to stop motors.\n'
wii.rpt_mode = cwiid.RPT_BTN
def readDirection():
if direction == 'LEFT':
rr.left(0, motor_speed)
if direction == 'RIGHT':
rr.right(0, motor_speed)
if direction == 'UP':
rr.forward(0, motor_speed)
if direction == 'DOWN':
rr.reverse(0, motor_speed)
while True:
buttons = wii.state['buttons']
# If A and B buttons pressed
# together then rumble and quit.
if (buttons - cwiid.BTN_A - cwiid.BTN_B == 0):
print '\nClosing connection ...'
wii.rumble = 1
time.sleep(1)
wii.rumble = 0
exit(wii)
# Check if other buttons are pressed by
# doing a 'bitwise AND' of the buttons number
# and the predefined constant for that button.
if (buttons & cwiid.BTN_LEFT):
rr.left(0, motor_speed)
direction = 'LEFT'
print 'Left pressed'
time.sleep(button_delay)
if(buttons & cwiid.BTN_RIGHT):
rr.right(0, motor_speed)
direction = 'RIGHT'
print 'Right pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_UP):
rr.forward(0, motor_speed) # if you don't specify duration it keeps going indefinitely print 'Up pressed'
direction = 'UP'
print 'Up pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_DOWN):
rr.reverse(0, motor_speed)
direction = 'DOWN'
print 'Down pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_B):
rr.stop()
print 'Button B pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_MINUS):
motor_speed = motor_speed - 0.1
readDirection()
print 'Slower'
time.sleep(button_delay2)
if (buttons & cwiid.BTN_PLUS):
motor_speed = motor_speed + 0.1
readDirection()
print 'Faster'
time.sleep(button_delay2)
| true |
87109152bff5a56eebd5b6178e8f90264541ff62 | Python | takeshitakenji/bPlaylist | /trplay.py | UTF-8 | 3,465 | 2.71875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env python2
from mfind import get_tags, fwalk
from codecs import open as codecs_open
from contextlib import closing
from sys import argv, stdin, stderr, stdout, exit
from codecs import getwriter, getreader
argv = [x.decode('utf8') for x in argv]
stdin = getreader('utf8')(stdin)
stdout, stderr = [getwriter('utf8')(x) for x in stdout, stderr]
class Tree(object):
class Node(object):
__slots__ = '__parent', '__left', '__right', '__key', '__value'
def __init__(self, key, value, parent = None):
self.__parent = parent
self.__left, self.__right = None, None
self.__key, self.__value = key, value
@property
def parent(self):
return self.__parent
@parent.setter
def parent(self, value):
self.__parent = value
@property
def left(self):
return self.__left
@left.setter
def left(self, value):
self.__left = value
@property
def right(self):
return self.__right
@right.setter
def right(self, value):
self.__right = value
@property
def key(self):
return self.__key
@property
def value(self):
return self.__value
def __iter__(self):
if self.__left is not None:
for x in self.__left:
yield x
yield self
if self.__right is not None:
for x in self.__right:
yield x
def __repr__(self):
return '<Node %s=%s>' % (repr(self.__key), repr(self.__value))
EMPTY_TREE, NODE_APPROXIMATE, NODE_EXACT = xrange(3)
__slots__ = '__root',
def __init__(self):
self.__root = None
def __repr__(self):
if self.__root is None:
return '<Tree 0>'
else:
return '<Tree %s>' % repr(self.__root)
def __get_node(self, key):
if self.__root is None:
return self.EMPTY_TREE, None
node = self.__root
while True:
if node.key == key:
return self.NODE_EXACT, node
elif key < node.key:
if node.left is None:
return self.NODE_APPROXIMATE, node
node = node.left
else:
if node.right is None:
return self.NODE_APPROXIMATE, node
node = node.right
raise RuntimeError
def __setitem__(self, key, value):
match, parent = self.__get_node(key)
if match is self.EMPTY_TREE:
self.__root = self.Node(key, value, None)
elif match is self.NODE_EXACT:
raise KeyError('Key already exists: %s' % key)
else:
node = self.Node(key, value, parent)
if node.key < parent.key:
if parent.left is not None:
raise RuntimeError
parent.left = node
else:
if parent.right is not None:
raise RuntimeError
parent.right = node
def __iter__(self):
if self.__root is not None:
for node in self.__root:
yield node.key, node.value
def get(self, key, approximate = False):
match, node = self.__get_node(key)
if match is self.NODE_EXACT:
return node.value
elif match is self.NODE_APPROXIMATE and approximate:
return node.value
else:
raise KeyError('No such key: %s' % key)
if __name__ == '__main__':
root = u'/home/music'
playlist = []
tree = Tree()
for f in fwalk(root):
try:
tags = tuple([(key, tuple(value)) for key, value in get_tags(f)])
except ValueError:
continue
h = hash(tags)
while True:
try:
tree[h] = f
break
except KeyError:
h = h + 1 if h > 0 else h - 1
if not len(playlist):
playlist.append(f)
for i in xrange(50):
with open(playlist[-1], 'rb') as f:
while True:
key = hash(f.read(2048))
g = tree.get(key, True)
if g not in playlist:
playlist.append(g)
break
for i in playlist:
print >>stdout, i
| true |
6f4000fd970e4edfcd2820fa320a124d3e39078e | Python | shaymcb/unit5 | /displayDate.py | UTF-8 | 430 | 3.828125 | 4 | [] | no_license | #Shaylee McBride
#26Apr2018
#displayDate.py - tells you the date
from datetime import *
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
months = ['January', 'February','March','April','May','June','July','August','September','October','November','December']
today = date.today()
weekday = days[today.weekday()]
month = months[today.month-1]
print('Today is',weekday+',',month,today.day,today.year) | true |
ebcd979c5ea6e799331b564b14300bb2eb311fc9 | Python | jiangqn/TextVAE | /src/module/layer/positional_encoding.py | UTF-8 | 1,860 | 3.046875 | 3 | [
"MIT"
] | permissive | import torch
from torch import nn
import math
class PositionalEncoding(nn.Module):
"""
Pre-compute position encodings (PE).
In forward pass, this adds the position-encodings to the
input for as many time steps as necessary.
Implementation based on OpenNMT-py.
https://github.com/OpenNMT/OpenNMT-py
"""
def __init__(self, size: int = 0, max_len: int = 5000) -> None:
"""
Positional Encoding with maximum length max_len
:param size:
:param max_len:
:param dropout:
"""
if size % 2 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dim (got dim={:d})".format(size))
pe = torch.zeros(max_len, size)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp((torch.arange(0, size, 2, dtype=torch.float) *
-(math.log(10000.0) / size)))
pe[:, 0::2] = torch.sin(position.float() * div_term)
pe[:, 1::2] = torch.cos(position.float() * div_term)
pe = pe.unsqueeze(0) # shape: [1, size, max_len]
super().__init__()
self.register_buffer("pe", pe)
self.dim = size
def forward(self, emb: torch.Tensor) -> torch.Tensor:
"""Embed inputs.
Args:
emb (FloatTensor): Sequence of word vectors
``(batch_size, seq_len, self.dim)``
"""
# Add position encodings
return emb * math.sqrt(self.dim) + self.pe[:, :emb.size(1)]
def efficient_forward(self, emb: torch.Tensor, index: int) -> torch.Tensor:
"""
:param emb: torch.FloatTensor (batch_size, 1, size)
:param index: int
:return : torch.FloatTensor (batch_size, 1, size)
"""
return emb * math.sqrt(self.dim) + self.pe[:, index: index + 1] | true |
0d5d154d00105d6d46e636f7df8a75febded1d17 | Python | seeming-amusing/advent | /16/classes.py | UTF-8 | 984 | 3.28125 | 3 | [] | no_license | def parse(move):
if move[0] == 's':
return Spin(move)
elif move[0] == 'x':
return Exchange(move)
elif move[0] == 'p':
return Partner(move)
class Spin:
def __init__(self, move):
self.num = int(move[1:])
def __repr__(self):
return "spin " + str(self.num)
def dance(self, dancers):
return dancers[-self.num:] + dancers[:-self.num]
class Exchange:
def __init__(self, move):
self.a, self.b = map(int, move[1:].split('/'))
def __repr__(self):
return "exchange " + str(self.a) + " <-> " + str(self.b)
def dance(self, dancers):
dancers[self.a], dancers[self.b] = dancers[self.b], dancers[self.a]
return dancers
class Partner:
def __init__(self, move):
self.a, self.b = move[1:].split('/')
def __repr__(self):
return "partner " + str(self.a) + " <-> " + str(self.b)
def dance(self, dancers):
a, b = dancers.index(self.a), dancers.index(self.b)
dancers[a], dancers[b] = dancers[b], dancers[a]
return dancers | true |
1f0f3f8b1e80f1eb9272439816bb458a9af273af | Python | professorgilzamir/artificialintelligence | /teste_progress.py | UTF-8 | 461 | 3.765625 | 4 | [] | no_license | #import adiciona novos módulos ao programa.
#Os módulos tem funções e objetos que podem ser reutilizados
import sys #provê de acesso ao sistema
import time #disponibiliza, dentre outras coisas, a função sleep, que faz o processo atual dormir por um tempo.
for i in range(100):
if i % 2 == 0:
print('\r', end='')
print('/ ', i, end=' ')
else:
print('\r', end='')
print('\\ ', i, end=' ')
#sys.stdout.flush()
time.sleep(0.4)
print('the end')
| true |
5c4fe329c02a2d9bfe9a592a9c5a74f4660327f0 | Python | Mackachoo/Natural-Computing | /Task 3.py | UTF-8 | 6,005 | 2.703125 | 3 | [] | no_license | import numpy as np
import random as r
import time as t
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split as testSplit
import warnings
from sklearn.exceptions import ConvergenceWarning
from tqdm import tqdm
### Functions ----------------------------------------------------------------------------
def createInitials(N, dict):
initials = []
for _ in range(N):
gpInput = {}
for i in dict:
gpInput[i] = r.choice(dict[i])
initials.append([gpInput,r.sample(range(1,8),(r.randint(1,6)))])
return initials
def createTestSpirals(n, type='lin'):
p = (720)*np.sqrt(np.random.rand(n//2,1))*(np.pi)/180
posX = -p*np.cos(p) + np.random.rand(n//2,1)
posY = p*np.sin(p) + np.random.rand(n//2,1)
PosP = (posX, posY)
PosN = (-posX, -posY)
if 'sin' in type:
sinPosX, sinPosY = np.sin(posX), np.sin(posY)
PosP += (sinPosX, sinPosY)
PosN += (-sinPosX, -sinPosY)
if 'squ' in type:
squPosX, squPosY = posX**2, posY**2
PosP += (squPosX, squPosY)
PosN += (-squPosX, -squPosY)
positions = np.vstack((np.hstack(PosP),np.hstack(PosN)))
values = np.hstack((np.zeros(n//2),np.ones(n//2))).astype(int)
return (positions, values)
def mutate(input, dict, mR):
pr = input[0]
nw = input[1]
for i in dict:
if r.random() <= mR:
pr[i] = r.choice(dict[i])
for i in range(len(nw)):
if r.random() <= mR:
if r.random() > 0.5:
nw[i] += 1
else:
nw[i] -= 1
if r.random() <= mR and r.random() > 0.5:
nw.append(1)
return [pr, [x for x in nw if x != 0]]
def crossover(input1, input2, cR):
pr1 = input1[0]
nw1 = input1[1]
pr2 = input2[0]
nw2 = input2[1]
for i in pr1:
if r.random() <= cR:
pr1[i], pr2[i] = pr2[i], pr1[i]
for i in range(max(len(nw1),len(nw2))):
if r.random() <= cR:
if len(nw1) <= i:
nw1.append(nw2[i])
nw2[i] = 0
elif len(nw2) <= i:
nw2.append(nw1[i])
nw1[i] = 0
else:
nw1[i], nw2[i] = nw2[i], nw1[i]
return [pr1, [x for x in nw1 if x != 0]], [pr2, [x for x in nw2 if x != 0]]
def scores(nws, maxIt, type='lin'):
nwSc = []
posTrain, valueTrain = createTestSpirals(1000,type)
posTest, valueTest = createTestSpirals(1000,type)
for nw in nws:
shape = nw[1]
activ = 'tanh'
alpha = 0.0001
if 'activation' in nw[0]:
activ = nw[0]['activation']
#if 'maxIter' in nw[0]:
# maxIt = nw[0]['maxIter']
#if 'alpha' in nw[0]:
# alpha = nw[0]['alpha']
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=ConvergenceWarning, module='sklearn')
mlp = MLPClassifier(shape, max_iter=maxIt, solver='adam', random_state=0, activation=activ, learning_rate_init=alpha,)
mlp.fit(posTrain,valueTrain)
nwSc.append(mlp.score(posTest, valueTest))
#print(nw," score - ",mlp.score(posTest, valueTest))
return list(zip(nws,nwSc))
def selection(scored, pairs, elitism):
sortScores = sorted(scored, key=lambda scored: scored[1])
roulette = []
for i in range(len(sortScores)):
for _ in range(i):
roulette.append(sortScores[i][0])
selected = []
if elitism:
selected.append(roulette[-1])
while len(selected) < 2*pairs:
selected.append(r.choice(roulette))
return selected
def oldSelection(scored, pairs, elitism):
sortScores = sorted(scored, key=lambda scored: scored[1])
nws = [x[0] for x in sortScores]
val = np.cumsum(np.array([x[1] for x in sortScores]))
selected = []
if elitism:
selected.append(sortScores[-1][0])
while len(selected) < 2*pairs:
rInt = r.random()*sortScores[-1][1]
for i in range(len(nws)):
if rInt < val[i]:
selected.append(nws[i])
break
return selected
def varianceCalc(scored):
scoreSquares = [x[1]**2 for x in scored]
return sum(scoreSquares)/len(scoreSquares)
### Main Program -------------------------------------------------------------------------
def main(crossoverRate=0.7, mutationRate=0.01, iterations=50, numInitials=100, survivalRate=0.5, elitism=True, maxMLPit=2, MLPtype='square', oldSelect=False):
logger = "LOG FILE -------------------\n\n"
logFile = open("logFiles/log"+str(123)+"s.txt",'a')
gpDict = {'activation':['identity','logistic','tanh','relu']}
# gpDict = {'activation':['identity','logistic','tanh','relu'], 'alpha':[1.0,0.1,0.01,0.001,0.0001], 'iterations':[1,2,3,4], 'type':['lin','squ','sin','sinsqu']}
networkSet = createInitials(numInitials, gpDict)
scoreList = []
nwSets = [networkSet]
for _ in tqdm(range(iterations)):
scored = scores(networkSet, maxMLPit, MLPtype)
scoreList.append([x[1] for x in scored])
if oldSelect:
selected = oldSelection(scored, int(len(networkSet)*survivalRate), elitism)
else:
selected = selection(scored, int(len(networkSet)*survivalRate), elitism)
networkSet = []
for pair in range(len(selected)//2):
for _ in range(int(0.5/survivalRate)):
nwP1, nwP2 = crossover(selected[2*pair], selected[2*pair+1], crossoverRate)
networkSet.append(mutate(nwP1, gpDict, mutationRate))
networkSet.append(mutate(nwP2, gpDict, mutationRate))
nwSets.append(networkSet)
logger+= "\nOut lists -------\n\nNetwork list ~: "+str(nwSets)+"\n\nScores List : "+str(scoreList)+"\n"
logFile.write(logger)
logFile.close()
return {'nwSets':nwSets, 'scoreList':scoreList}
#run = main(oldSelect=True, numInitials=100)
#print(run['nwSets'])
#print(run['scoreList'])
| true |
e619efd2ef9c838c7c391e9429a32a4bc83d355d | Python | Nyxelio/experiments | /python/remote_freebox/liste.py | UTF-8 | 566 | 2.609375 | 3 | [] | no_license | import urllib
import json
def get(url):
response = urllib.urlopen(url)
html = response.read().decode("utf-8")
return json.loads(html)
channels = get("http://mafreebox.freebox.fr/api/v3/tv/channels")["result"]
print(channels)
obj = get("http://mafreebox.freebox.fr/api/v3/tv/bouquets/49/channels")
if obj["success"]:
for r in sorted(obj["result"], key=lambda x: x['number']):
uuid = r["uuid"]
print(r["number"], channels[uuid]["name"])
else:
print("Error")
# http://mafreebox.freebox.fr/api/v3/tv/bouquets/freeboxtv/channels | true |
a0b44f111db6dc9c8f64ba0411b4f5dc672ffa9d | Python | FrancoNegri/habla_utils | /utils/fon_to_lab/fon_to_lab.py | UTF-8 | 1,173 | 2.546875 | 3 | [] | no_license | #!/usr/bin/python
#converts fon format to mono lab
import sys
import re
import subprocess
usage = 'Usage: ./lab_to_textgrid.py input.fon input.wav output.lab'
if len(sys.argv) != 4:
print usage
exit()
ifname = sys.argv[1]
wavname = sys.argv[2]
ofname = sys.argv[3]
inf = open(ifname, 'r')
outf = open(ofname, 'w')
# get info from .lab
start_parsing = False
labs = [("0","#")]
for line in inf:
if not re.search('^\s*Tiempo\s*Color\s*Label', line) and not start_parsing:
continue
else:
if not start_parsing:
start_parsing = True
continue
tokens = line.split()
label = tokens[2].strip()
time = tokens[0].strip()
labs.append((string(float(time)*1000000), label))
f = open("temp.txt", "w+")
subprocess.call(['/home/franckn/htk/habla_utils/utils/praat/praat', '--run', 'praat_script', wavname],stdout=f)
f.seek(0)
end_time = f.read().split()[0]
labs.append((string(float(end_time)*1000000),"#"))
count = 0
prevtime = '0'
width = 8
for i in range(0,len(labs)-1):
outf.write(' ' + labs[i][0].ljust(width," ") + ' ' + labs[i+1][0].ljust(width," ") + ' ' + labs[i][1] + '\n')
inf.close()
outf.close() | true |
bf33d1257c6717c6888fc29d118bcd4f65024890 | Python | zhangyuyu0222/mtx0117 | /case/test_addaddr.py | UTF-8 | 1,517 | 2.734375 | 3 | [] | no_license | import time
import allure
from lesson13.lesson13_1买裙子调用类.base.driver import Driver
from lesson13.lesson13_1买裙子调用类.pageAction.login_action import Login
from lesson13.lesson13_1买裙子调用类.base.baseApi import Base
from lesson13.lesson13_1买裙子调用类.pageAction.addaddress_action import Add_addr
'''
登录用例的参数化数据如何组织?如何断言?
1.前面是登录失败的,后一条是登陆成功的
2.没有顺序,随便写
2.1 判断:如果成功了,先退出。判断是否退出成功,退出成功在继续参数化。
前提条件:类级别的装置函数(setup_class,teardown_class)
2.2 前提条件:装置函数可以用方法级别的
'''
@allure.feature('新增地址测试')
class TestAddAddr:
def setup_class(self):
'''
初始化chrome对象
:return:
'''
# 创建driver对象
self.driver = Driver.get_driver()
# 创建login的业务对象
Login(self.driver).login_success()
# 创建base对象,调用page_source的方法
self.base = Base(self.driver)
self.addaddr = Add_addr(self.driver)
@allure.title('新增地址测试用例')
def test_addaddr(self):
self.addaddr.add_addr_business()
time.sleep(3)
assert '新增成功' in self.base.base_page_source
def teardown_class(self):
'''
所有测试用例执行完成关闭浏览器
:return:
'''
Driver.close_driver() | true |
e40b31904810fdf0667146beafbfe8754cf82a9f | Python | zandalman/seneca-database | /functions.py | UTF-8 | 11,630 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# import classess
from classes import *
@contextmanager
def session_scope(Session):
"""
Automatically manage sessions.
Args:
Session: SQLAlchemy sessionmaker object.
Yields:
SQLAlchemy session object.
Examples:
>>> with session_scope(Session) as session:
... session.add(Gateware())
"""
session = Session()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
def clear_database(engine):
"""
Clear all objects from the database
Args:
engine: SQLAlchemy engine object.
"""
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
obj_types = {"gateware": Gateware, "devicedb": DeviceDB, "project": Project, "pipeline": Pipeline, "sequence": Sequence, "measurement": Measurement}
def search_objects(session, obj_type, **kwargs):
"""
Search for objects in the database by attributes
Args:
session: SQLAlchemy session object.
obj_type (str): Class of objects included in the search.
Options are "gateware", "devicedb", "project", "sequence", "measurement".
**kwargs: Search filters.
Returns:
List of objects in the database that meet the search criteria.
The list will be empty if no objects meet the search criteria.
Examples:
>>> search_objects(session, "gateware", name="random gateware")
[]
"""
query = session.query(obj_types[obj_type]).order_by(obj_types[obj_type].id)
for attr in kwargs.keys():
query = query.filter(getattr(obj_types[obj_type], attr) == kwargs[attr])
objs = query.all()
return objs
def get_object_by_id(session, obj_type, id):
"""
Return an object in the database by id.
Args:
session: SQLAlchemy session object.
obj_type (str): Class of the object to return.
Options are "gateware", "devicedb", "project", "sequence", "measurement".
id (int): Id of the object to return.
Returns:
Object in the database with the given id.
Raises:
MultipleResultsFound: More than one object of given class in database has the given id.
NoResultsFound: No object of given class in database has the given id.
"""
obj = session.query(obj_types[obj_type]).filter(obj_types[obj_type].id == id).one()
return obj
def add_object(session, obj_type, duplicates=False, **kwargs):
"""
Add an object to database.
Returns the id of the added object, which can be used to retrieve the object from the database.
It is recommended to add objects into the database from the top down in class hierarchy
(i.e. add gateware, device databases, projects, sequences, and measurements in that order).
Objects can be related to other objects by specifying "gateware_id", "devicedb_id", "project_id", or "sequence_id" in **kwargs.
Args:
session: SQLAlchemy session object.
obj_type (str): Class of the object to add.
Options are "gateware", "devicedb", "project", "sequence", "measurement".
duplicates (bool): Allow duplicate objects to be added. Defaults to False.
This is useful for adding objects whose attributes will be specified later.
**kwargs: Attributes of the object to add.
Returns:
int: Id of the added object. Returns 0 if unsuccessful.
Will be unsuccessful if duplicates=False and a duplicate object is detected
or if the object is related to another object by an invalid id
Examples:
>>> add_object(session, "devicedb", name="example device database")
1
"""
for obj_type_id in obj_types.keys():
if (obj_type_id + "_id") in kwargs.keys():
if len(search_objects(session, obj_type_id, id=kwargs[obj_type_id + "_id"])) == 0:
print("Warning: %s linked to undefined %s with id %s" % (obj_type, obj_type_id, kwargs[obj_type_id + "_id"]))
return 0
if not duplicates:
if len(search_objects(session, obj_type, version=1, **kwargs)) > 0:
print("Warning: duplicate %s detected; object not added" % obj_type)
return 0
time = datetime.datetime.utcnow()
new_obj = obj_types[obj_type](version=1, time=time, **kwargs)
session.add(new_obj)
obj_id = search_objects(session, obj_type, version=1, time=time, **kwargs)[0].id
return obj_id
def delete_object(session, obj_type, id):
"""
Delete an object from the database.
Args:
session: SQLAlchemy session object.
obj_type (string): Class of the object to delete.
Options are "gateware", "devicedb", "project", "sequence", "measurement".
id (int): Id of the object to delete.
Raises:
MultipleResultsFound: More than one object of given class in database has the given id.
NoResultsFound: No object of given class in database has the given id.
"""
obj = get_object_by_id(session, obj_type, id)
session.delete(obj)
def update_object(session, obj_type, id, update_time=True, **kwargs):
"""
Update an object's attributes in the database.
Only attributes of the object in **kwargs will be updated.
Do not use this function to update the id, version, or time attributes.
Object relationships can be updated by specifying "gateware_id", "devicedb_id", "project_id", or "sequence_id" in **kwargs.
Args:
session: SQLAlchemy session object.
obj_type (string): Class of the object to update.
Options are "gateware", "devicedb", "project", "sequence", "measurement".
id (int): Id of the object to update.
update_time (bool): Update the object's time attribute with the current time. Defaults to True.
**kwargs: Attributes of the object to update.
Raises:
MultipleResultsFound: More than one object of given class in database has the given id.
NoResultsFound: No object of given class in database has the given id.
"""
obj = get_object_by_id(session, obj_type, id)
for attr in kwargs.keys():
setattr(obj, attr, kwargs[attr])
obj.version += 1
if update_time:
obj.time = datetime.datetime.utcnow()
def write_data(data, path, outfile):
"""
Write JSON data to a file.
Args:
data (dict): JSON data to write.
path (str): Path of the file to write.
outfile (str): File name. Usually a text file.
"""
outfile_path = os.path.join(path, outfile)
with open(outfile_path, 'w') as outfile:
json.dump(data, outfile, default=str)
def write_object_info(session, obj_type, id, outfile, path=os.getcwd()):
"""
Write all data for an object to a file as JSON.
Args:
session: SQLAlchemy session object.
obj_type (string): Class of the object to write.
Options are "gateware", "devicedb", "project", "sequence", "measurement".
id (int): Id of the object to write.
outfile (str): File name. Usually a text file.
path (str): Path of the file to write. Defaults to the current directory.
Raises:
MultipleResultsFound: More than one object of given class in database has the given id.
NoResultsFound: No object of given class in database has the given id.
"""
obj = get_object_by_id(session, obj_type, id)
write_data(obj.asdict(), path, outfile)
def write_database(session, outfile, path=os.getcwd()):
"""
Write all data in the database to a file as JSON.
Args:
session: SQLAlchemy session object.
outfile (str): File name. Usually a text file.
path (str): Path of the file to write. Defaults to the current directory.
"""
data = {"gateware": []}
for gateware in search_objects(session, "gateware"):
data["gateware"].append(gateware.asdict())
devicedb = search_objects(session, "devicedb", gateware_id=gateware.id)
data["gateware"][-1]["devicedb"] = devicedb.asdict()
data["gateware"][-1]["devicedb"]["projects"] = []
for project in search_objects(session, "project", devicedb_id=devicedb.id):
data["gateware"][-1]["devicedb"]["projects"].append(project.asdict())
data["gateware"][-1]["devicedb"]["projects"][-1]["pipelines"] = []
for pipeline in search_objects(session, "pipeline", project_id=project.id):
data["gateware"][-1]["devicedb"]["projects"][-1]["pipelines"].append(pipeline.asdict())
data["gateware"][-1]["devicedb"]["projects"][-1]["pipelines"][-1]["sequences"] = []
for sequence in search_objects(session, "sequence", pipeline_id=pipeline.id):
data["gateware"][-1]["devicedb"]["projects"][-1]["pipelines"][-1]["sequences"].append(sequence.asdict())
data["gateware"][-1]["devicedb"]["projects"][-1]["pipelines"][-1]["sequences"][-1]["measurements"] = []
for measurement in search_objects(session, "measurement", sequence_id=sequence.id):
data["gateware"][-1]["devicedb"]["projects"][-1]["pipelines"][-1]["sequences"][-1]["measurements"].append(measurement.asdict())
if data == {"gateware": []}:
print("Warning: database is empty")
write_data(data, path, outfile)
def print_info(session, detailed=False):
"""
Prints the database information in a human readable way. Mostly for debugging purposes.
Args:
session: SQLAlchemy session object.
detailed (bool): Print more detailed database information. Defaults to False.
"""
if detailed:
for proj in session.query(Project).order_by(Project.id):
print("\nProject: %s\nDeviceDB: %s\nGateware: %s\nDescription: %s" % (proj.name, proj.devicedb.name, proj.devicedb.gateware.name, proj.description))
table = []
mes_cnt_query = session.query(Measurement.sequence_id, func.count('*').label('measurement_count')).group_by(Measurement.sequence_id).subquery()
for seq, mes_cnt in session.query(Sequence, mes_cnt_query.c.measurement_count).outerjoin(mes_cnt_query, Sequence.id == mes_cnt_query.c.sequence_id).filter(Sequence.pipeline.project == proj).order_by(Sequence.id):
table.append([seq.name, mes_cnt])
print(tabulate(table, headers=['Sequence', 'Measurements']))
else:
table = []
seq_cnt_query = session.query(Sequence.pipeline.project_id, func.count('*').label('sequence_count')).group_by(Sequence.pipeline.project_id).subquery()
for proj, seq_cnt in session.query(Project, seq_cnt_query.c.sequence_count).outerjoin(seq_cnt_query, Project.id == seq_cnt_query.c.project_id).order_by(Project.id):
table.append([proj.name, proj.devicedb.name, proj.devicedb.gateware.name, seq_cnt])
print(tabulate(table, headers=['Project', 'Device Database', 'Gateware', 'Sequences']))
def init_db(name="test.db", memory=False, echo=False):
"""
Initialize the database.
Args:
name (str): The filename of the database. Can also be a path. Ignored if memory == True. Defaults to "test.db."
memory (bool): Create an in-memory only database. Defaults to False.
echo (bool): Print all generated SQL. Defaults to False.
"""
if memory:
name = ":memory:"
engine = create_engine('sqlite:///%s' % name, echo=echo)
Session = sessionmaker(bind=engine)
Base.metadata.create_all(engine)
return Session
| true |
a2e415b272be5d23906f9f6d870bb62e86f4ce3f | Python | saunter999/Simplemodel | /Hubbard/atomiclimit/atomiclim.py | UTF-8 | 2,864 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env python
from scipy import *
from pylab import *
def EigenE():
print "--------EigenEnergy---------"
clf();
U=2.0;
muls=linspace(-1.9*U,1.9*U,100)
E0=[];E1=[];E3=[];
for mu in muls:
E0.append(0.0)
E1.append(-mu)
E3.append(U-2.*mu)
plot(muls,E0,label="holon")
plot(muls,E1,label="eletron")
plot(muls,E3,label="doublon")
legend(loc=0)
xlabel("mu",size=19)
ylabel("E",size=19)
grid()
savefig("EigenE.pdf")
def occ():
print "--------occupancy---------"
clf();
U=3.5;T=0.1;
beta=1./T
print "%8s %8s %8s" % ('U','T','beta')
print "%8.4f %8.4f %8.4f" % (U,T,beta)
muls=linspace(-1.9*U,1.9*U,100)
nls=[]
b=exp(-beta*U)
for mu in muls:
if mu<0:
a=exp(beta*mu)
nave=2.*(a+a**2*b)/(1.+2.*a+a**2*b)
else:
a=exp(-beta*mu)
nave=2.*(a+b)/(a**2+2.*a+b)
nls.append(nave)
plot(muls,nls,"*",c='g')
xlabel("mu",size=19)
ylabel("n_occ",size=19)
axhline(y=1,ls='--',c='b')
axvline(x=U/2.,ls='--',c='b')
axvline(x=U/4.,ls='-',c='b')
axvline(x=3.*U/4.,ls='-',c='b')
savefig("occ.pdf")
def Energy():
clf();
U=0.5;T=0.1;
beta=1./T
print "--------Energy---------"
print "%8s %8s %8s" % ('U','T','beta')
print "%8.4f %8.4f %8.4f" % (U,T,beta)
muls=linspace(-1.9*U,1.9*U,100)
E=[]
b=exp(-beta*U)
for mu in muls:
if mu<0:
a=exp(beta*mu)
Eave=( (U-2.*mu)*a**2*b-2.0*mu*a)/(1.+2.*a+a**2*b)
else:
a=exp(-beta*mu)
Eave=((U-2.*mu)*b-2.0*mu*a)/(a**2+2.*a+b)
E.append(Eave)
plot(muls,E,"*",c='g')
xlabel("mu",size=19)
ylabel("E_ave",size=19)
grid()
axvline(x=U/2.,ls='--',c='b')
savefig("Energy.pdf")
def doubleocc():
clf();
U=0.5;T=1.;
beta=1./T
print "--------double occ---------"
print "%8s %8s %8s" % ('U','T','beta')
print "%8.4f %8.4f %8.4f" % (U,T,beta)
muls=linspace(-3.9*U,3.9*U,100)
dbocc=[]
b=exp(-beta*U)
for mu in muls:
if mu<0:
a=exp(beta*mu)
db=(a**2*b)/(1.+2.*a+a**2*b)
else:
a=exp(-beta*mu)
db=b/(a**2+2.*a+b)
dbocc.append(db)
plot(muls,dbocc,"*",c='g')
xlabel("mu",size=19)
ylabel("Double_occ",size=19)
grid()
axvline(x=U/2.,ls='--',c='b')
axhline(y=0.25,ls='--',c='b')
savefig("double_occ.pdf")
def magnet_mom():
clf();
U=0.9;T=0.1;
beta=1./T
print "--------magnetic moment---------"
print "%8s %8s %8s" % ('U','T','beta')
print "%8.4f %8.4f %8.4f" % (U,T,beta)
muls=linspace(-5.9*U,5.9*U,100)
#muls=linspace(-2.,2.,100)
m2ls=[]
b=exp(-beta*U)
for mu in muls:
if mu<0:
a=exp(beta*mu)
m2=(2.*a)/(1.+2.*a+a**2*b)
else:
a=exp(-beta*mu)
m2=2.*a/(a**2+2.*a+b)
m2ls.append(m2)
plot(muls,m2ls,"*",c='g')
xlabel("mu",size=19)
ylabel("m^2",size=19)
grid()
axvline(x=U/2.,ls='--',c='b')
#axhline(y=0.25,ls='--',c='b')
savefig("magnet_mom.pdf")
if __name__=="__main__":
EigenE();
occ();
Energy();
doubleocc();
magnet_mom();
show()
| true |
8585bfa4b0ce627f1e10a5c7fc3da6bf0d865bf3 | Python | AP-MI-2021/lab-567-dariussandru | /Logic/cheltuiala_mare.py | UTF-8 | 558 | 2.96875 | 3 | [] | no_license | from Domain.cheltuieli import get_tipul, get_suma
def cea_mai_mare_cheltuiala(lst_cheltuiala):
"""
functia returneaza cea mai mare cheltuiala pentru fiecare tip de cheltuiala
:param lst_cheltuiala: lst
:return: un dict cu cheia "tip" si o valoare maxima
"""
rezultat = {}
for x in lst_cheltuiala:
tip = get_tipul(x)
cost = get_suma(x)
if tip not in rezultat:
rezultat[tip] = x
else:
if cost > get_suma(rezultat[tip]):
rezultat[tip] = x
return rezultat
| true |
d46193835a8665605c6b1da3b5ec2aac7fd4a726 | Python | JesusDzul111498/ESTRUCTURA_DE_DATOS | /unidad 2/Monedas/mone.py | UTF-8 | 980 | 3.296875 | 3 | [] | no_license | print ("DETERMINAR EL CAMBIO")
costo = int(input("Cuanto es lo que costo lo que compro lo que compro\n"))
pago = int(input("Digite la cantidad de dinero con lo que usted pago\n"))
dinero = pago-costo
def cambio(cam):
if cam == 0:
print ("0 pesos")
else:
if cam >= 200:
print ("Un Billete de 200 mas")
return cambio(cam-200)
elif cam >= 100:
print ("Un Billete de 100 mas")
return cambio(cam-100)
elif cam >= 50:
print("Un Billete de 50 mas")
return cambio(cam-50)
elif cam >= 20:
print ("Un Billete de 20 mas")
return cambio(cam-20)
elif cam >= 10:
print ("Una moneda de 10 mas")
return cambio(cam-10)
elif cam >= 5:
print("Una moneda de 5 mas")
return cambio(cam-5)
elif cam >= 2:
print("Una moneda de 2 mas")
return cambio(cam-2)
elif cam >= 1:
print("Una moneda de 1 mas")
return cambio(cam-1)
print(cambio(dinero))
print ("Su cambio es : "+str(dinero))
"""
Dzul Canul Jesus Geovany
Coba Kauil Luis Enrique
Camacho Reyes Hector Alejandro
Kauil Dzib Alexis Eliseo
"""
| true |